summaryrefslogtreecommitdiffstats
path: root/src/tools/tidy
diff options
context:
space:
mode:
Diffstat (limited to 'src/tools/tidy')
-rw-r--r--src/tools/tidy/Cargo.toml16
-rw-r--r--src/tools/tidy/src/bins.rs151
-rw-r--r--src/tools/tidy/src/debug_artifacts.rs23
-rw-r--r--src/tools/tidy/src/deps.rs621
-rw-r--r--src/tools/tidy/src/edition.rs31
-rw-r--r--src/tools/tidy/src/error_codes_check.rs319
-rw-r--r--src/tools/tidy/src/errors.rs76
-rw-r--r--src/tools/tidy/src/extdeps.rs33
-rw-r--r--src/tools/tidy/src/features.rs550
-rw-r--r--src/tools/tidy/src/features/tests.rs9
-rw-r--r--src/tools/tidy/src/features/version.rs48
-rw-r--r--src/tools/tidy/src/features/version/tests.rs38
-rw-r--r--src/tools/tidy/src/lib.rs110
-rw-r--r--src/tools/tidy/src/main.rs112
-rw-r--r--src/tools/tidy/src/pal.rs209
-rw-r--r--src/tools/tidy/src/primitive_docs.rs17
-rw-r--r--src/tools/tidy/src/style.rs423
-rw-r--r--src/tools/tidy/src/target_specific_tests.rs96
-rw-r--r--src/tools/tidy/src/ui_tests.rs82
-rw-r--r--src/tools/tidy/src/unit_tests.rs66
-rw-r--r--src/tools/tidy/src/unstable_book.rs132
21 files changed, 3162 insertions, 0 deletions
diff --git a/src/tools/tidy/Cargo.toml b/src/tools/tidy/Cargo.toml
new file mode 100644
index 000000000..96ab42b47
--- /dev/null
+++ b/src/tools/tidy/Cargo.toml
@@ -0,0 +1,16 @@
+[package]
+name = "tidy"
+version = "0.1.0"
+edition = "2021"
+autobins = false
+
+[dependencies]
+cargo_metadata = "0.14"
+regex = "1"
+lazy_static = "1"
+walkdir = "2"
+crossbeam-utils = "0.8.0"
+
+[[bin]]
+name = "rust-tidy"
+path = "src/main.rs"
diff --git a/src/tools/tidy/src/bins.rs b/src/tools/tidy/src/bins.rs
new file mode 100644
index 000000000..9615c4db6
--- /dev/null
+++ b/src/tools/tidy/src/bins.rs
@@ -0,0 +1,151 @@
+//! Tidy check to ensure that there are no binaries checked into the source tree
+//! by accident.
+//!
+//! In the past we've accidentally checked in test binaries and such which add a
+//! huge amount of bloat to the Git history, so it's good to just ensure we
+//! don't do that again.
+
+pub use os_impl::*;
+
+// All files are executable on Windows, so just check on Unix.
+#[cfg(windows)]
+mod os_impl {
+ use std::path::Path;
+
+ pub fn check_filesystem_support(_sources: &[&Path], _output: &Path) -> bool {
+ return false;
+ }
+
+ pub fn check(_path: &Path, _bad: &mut bool) {}
+}
+
+#[cfg(unix)]
+mod os_impl {
+ use std::fs;
+ use std::os::unix::prelude::*;
+ use std::path::Path;
+ use std::process::{Command, Stdio};
+
+ enum FilesystemSupport {
+ Supported,
+ Unsupported,
+ ReadOnlyFs,
+ }
+
+ use FilesystemSupport::*;
+
+ fn is_executable(path: &Path) -> std::io::Result<bool> {
+ Ok(path.metadata()?.mode() & 0o111 != 0)
+ }
+
+ pub fn check_filesystem_support(sources: &[&Path], output: &Path) -> bool {
+ // We want to avoid false positives on filesystems that do not support the
+ // executable bit. This occurs on some versions of Window's linux subsystem,
+ // for example.
+ //
+ // We try to create the temporary file first in the src directory, which is
+ // the preferred location as it's most likely to be on the same filesystem,
+ // and then in the output (`build`) directory if that fails. Sometimes we
+ // see the source directory mounted as read-only which means we can't
+ // readily create a file there to test.
+ //
+ // See #36706 and #74753 for context.
+
+ fn check_dir(dir: &Path) -> FilesystemSupport {
+ let path = dir.join("tidy-test-file");
+ match fs::File::create(&path) {
+ Ok(file) => {
+ let exec = is_executable(&path).unwrap_or(false);
+ std::mem::drop(file);
+ std::fs::remove_file(&path).expect("Deleted temp file");
+ // If the file is executable, then we assume that this
+ // filesystem does not track executability, so skip this check.
+ return if exec { Unsupported } else { Supported };
+ }
+ Err(e) => {
+ // If the directory is read-only or we otherwise don't have rights,
+ // just don't run this check.
+ //
+ // 30 is the "Read-only filesystem" code at least in one CI
+ // environment.
+ if e.raw_os_error() == Some(30) {
+ eprintln!("tidy: Skipping binary file check, read-only filesystem");
+ return ReadOnlyFs;
+ }
+
+ panic!("unable to create temporary file `{:?}`: {:?}", path, e);
+ }
+ };
+ }
+
+ for &source_dir in sources {
+ match check_dir(source_dir) {
+ Unsupported => return false,
+ ReadOnlyFs => {
+ return match check_dir(output) {
+ Supported => true,
+ _ => false,
+ };
+ }
+ _ => {}
+ }
+ }
+
+ return true;
+ }
+
+ #[cfg(unix)]
+ pub fn check(path: &Path, bad: &mut bool) {
+ const ALLOWED: &[&str] = &["configure"];
+
+ crate::walk_no_read(
+ path,
+ &mut |path| {
+ crate::filter_dirs(path)
+ || path.ends_with("src/etc")
+ // This is a list of directories that we almost certainly
+ // don't need to walk. A future PR will likely want to
+ // remove these in favor of crate::walk_no_read using git
+ // ls-files to discover the paths we should check, which
+ // would naturally ignore all of these directories. It's
+ // also likely faster than walking the directory tree
+ // directly (since git is just reading from a couple files
+ // to produce the results).
+ || path.ends_with("target")
+ || path.ends_with("build")
+ || path.ends_with(".git")
+ },
+ &mut |entry| {
+ let file = entry.path();
+ let filename = file.file_name().unwrap().to_string_lossy();
+ let extensions = [".py", ".sh"];
+ if extensions.iter().any(|e| filename.ends_with(e)) {
+ return;
+ }
+
+ if t!(is_executable(&file), file) {
+ let rel_path = file.strip_prefix(path).unwrap();
+ let git_friendly_path = rel_path.to_str().unwrap().replace("\\", "/");
+
+ if ALLOWED.contains(&git_friendly_path.as_str()) {
+ return;
+ }
+
+ let output = Command::new("git")
+ .arg("ls-files")
+ .arg(&git_friendly_path)
+ .current_dir(path)
+ .stderr(Stdio::null())
+ .output()
+ .unwrap_or_else(|e| {
+ panic!("could not run git ls-files: {e}");
+ });
+ let path_bytes = rel_path.as_os_str().as_bytes();
+ if output.status.success() && output.stdout.starts_with(path_bytes) {
+ tidy_error!(bad, "binary checked into source: {}", file.display());
+ }
+ }
+ },
+ )
+ }
+}
diff --git a/src/tools/tidy/src/debug_artifacts.rs b/src/tools/tidy/src/debug_artifacts.rs
new file mode 100644
index 000000000..ab87230f8
--- /dev/null
+++ b/src/tools/tidy/src/debug_artifacts.rs
@@ -0,0 +1,23 @@
+//! Tidy check to prevent creation of unnecessary debug artifacts while running tests.
+
+use std::path::{Path, PathBuf};
+
+const GRAPHVIZ_POSTFLOW_MSG: &str = "`borrowck_graphviz_postflow` attribute in test";
+
+pub fn check(path: &Path, bad: &mut bool) {
+ let test_dir: PathBuf = path.join("test");
+
+ super::walk(&test_dir, &mut super::filter_dirs, &mut |entry, contents| {
+ let filename = entry.path();
+ let is_rust = filename.extension().map_or(false, |ext| ext == "rs");
+ if !is_rust {
+ return;
+ }
+
+ for (i, line) in contents.lines().enumerate() {
+ if line.contains("borrowck_graphviz_postflow") {
+ tidy_error!(bad, "{}:{}: {}", filename.display(), i + 1, GRAPHVIZ_POSTFLOW_MSG);
+ }
+ }
+ });
+}
diff --git a/src/tools/tidy/src/deps.rs b/src/tools/tidy/src/deps.rs
new file mode 100644
index 000000000..333f85f6d
--- /dev/null
+++ b/src/tools/tidy/src/deps.rs
@@ -0,0 +1,621 @@
+//! Checks the licenses of third-party dependencies.
+
+use cargo_metadata::{Metadata, Package, PackageId, Resolve};
+use std::collections::{BTreeSet, HashSet};
+use std::path::Path;
+
+/// These are licenses that are allowed for all crates, including the runtime,
+/// rustc, tools, etc.
+const LICENSES: &[&str] = &[
+ "MIT/Apache-2.0",
+ "MIT / Apache-2.0",
+ "Apache-2.0/MIT",
+ "Apache-2.0 / MIT",
+ "MIT OR Apache-2.0",
+ "Apache-2.0 OR MIT",
+ "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT", // wasi license
+ "MIT",
+ "ISC",
+ "Unlicense/MIT",
+ "Unlicense OR MIT",
+ "0BSD OR MIT OR Apache-2.0", // adler license
+ "Zlib OR Apache-2.0 OR MIT", // tinyvec
+ "MIT OR Zlib OR Apache-2.0", // miniz_oxide
+];
+
+/// These are exceptions to Rust's permissive licensing policy, and
+/// should be considered bugs. Exceptions are only allowed in Rust
+/// tooling. It is _crucial_ that no exception crates be dependencies
+/// of the Rust runtime (std/test).
+const EXCEPTIONS: &[(&str, &str)] = &[
+ ("mdbook", "MPL-2.0"), // mdbook
+ ("openssl", "Apache-2.0"), // cargo, mdbook
+ ("colored", "MPL-2.0"), // rustfmt
+ ("ordslice", "Apache-2.0"), // rls
+ ("ryu", "Apache-2.0 OR BSL-1.0"), // rls/cargo/... (because of serde)
+ ("bytesize", "Apache-2.0"), // cargo
+ ("im-rc", "MPL-2.0+"), // cargo
+ ("sized-chunks", "MPL-2.0+"), // cargo via im-rc
+ ("bitmaps", "MPL-2.0+"), // cargo via im-rc
+ ("instant", "BSD-3-Clause"), // rustc_driver/tracing-subscriber/parking_lot
+ ("snap", "BSD-3-Clause"), // rustc
+ ("fluent-langneg", "Apache-2.0"), // rustc (fluent translations)
+ ("self_cell", "Apache-2.0"), // rustc (fluent translations)
+ // FIXME: this dependency violates the documentation comment above:
+ ("fortanix-sgx-abi", "MPL-2.0"), // libstd but only for `sgx` target
+ ("dunce", "CC0-1.0"), // cargo (dev dependency)
+ ("similar", "Apache-2.0"), // cargo (dev dependency)
+ ("normalize-line-endings", "Apache-2.0"), // cargo (dev dependency)
+];
+
+const EXCEPTIONS_CRANELIFT: &[(&str, &str)] = &[
+ ("cranelift-bforest", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-codegen", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-codegen-meta", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-codegen-shared", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-entity", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-frontend", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-isle", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-jit", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-module", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-native", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-object", "Apache-2.0 WITH LLVM-exception"),
+ ("mach", "BSD-2-Clause"),
+ ("regalloc2", "Apache-2.0 WITH LLVM-exception"),
+ ("target-lexicon", "Apache-2.0 WITH LLVM-exception"),
+];
+
+const EXCEPTIONS_BOOTSTRAP: &[(&str, &str)] = &[
+ ("ryu", "Apache-2.0 OR BSL-1.0"), // through serde
+];
+
+/// These are the root crates that are part of the runtime. The licenses for
+/// these and all their dependencies *must not* be in the exception list.
+const RUNTIME_CRATES: &[&str] = &["std", "core", "alloc", "test", "panic_abort", "panic_unwind"];
+
+/// Crates whose dependencies must be explicitly permitted.
+const RESTRICTED_DEPENDENCY_CRATES: &[&str] = &["rustc_driver", "rustc_codegen_llvm"];
+
+/// Crates rustc is allowed to depend on. Avoid adding to the list if possible.
+///
+/// This list is here to provide a speed-bump to adding a new dependency to
+/// rustc. Please check with the compiler team before adding an entry.
+const PERMITTED_DEPENDENCIES: &[&str] = &[
+ "addr2line",
+ "adler",
+ "ahash",
+ "aho-corasick",
+ "annotate-snippets",
+ "ansi_term",
+ "arrayvec",
+ "atty",
+ "autocfg",
+ "bitflags",
+ "block-buffer",
+ "block-padding",
+ "byte-tools",
+ "byteorder",
+ "cc",
+ "cfg-if",
+ "chalk-derive",
+ "chalk-engine",
+ "chalk-ir",
+ "chalk-solve",
+ "chrono",
+ "compiler_builtins",
+ "cpufeatures",
+ "crc32fast",
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+ "crypto-common",
+ "cstr",
+ "datafrog",
+ "difference",
+ "digest",
+ "dlmalloc",
+ "either",
+ "ena",
+ "env_logger",
+ "expect-test",
+ "fake-simd",
+ "fallible-iterator", // dependency of `thorin`
+ "filetime",
+ "fixedbitset",
+ "flate2",
+ "fluent-bundle",
+ "fluent-langneg",
+ "fluent-syntax",
+ "fortanix-sgx-abi",
+ "generic-array",
+ "getopts",
+ "getrandom",
+ "gimli",
+ "gsgdt",
+ "hashbrown",
+ "hermit-abi",
+ "humantime",
+ "if_chain",
+ "indexmap",
+ "instant",
+ "intl-memoizer",
+ "intl_pluralrules",
+ "itertools",
+ "itoa",
+ "jobserver",
+ "lazy_static",
+ "libc",
+ "libloading",
+ "libz-sys",
+ "lock_api",
+ "log",
+ "matchers",
+ "md-5",
+ "measureme",
+ "memchr",
+ "memmap2",
+ "memoffset",
+ "miniz_oxide",
+ "num-integer",
+ "num-traits",
+ "num_cpus",
+ "object",
+ "odht",
+ "once_cell",
+ "opaque-debug",
+ "parking_lot",
+ "parking_lot_core",
+ "pathdiff",
+ "perf-event-open-sys",
+ "petgraph",
+ "pin-project-lite",
+ "pkg-config",
+ "polonius-engine",
+ "ppv-lite86",
+ "proc-macro-hack",
+ "proc-macro2",
+ "psm",
+ "punycode",
+ "quick-error",
+ "quote",
+ "rand",
+ "rand_chacha",
+ "rand_core",
+ "rand_hc",
+ "rand_xorshift",
+ "rand_xoshiro",
+ "redox_syscall",
+ "regex",
+ "regex-automata",
+ "regex-syntax",
+ "remove_dir_all",
+ "rls-data",
+ "rls-span",
+ "rustc-demangle",
+ "rustc-hash",
+ "rustc-rayon",
+ "rustc-rayon-core",
+ "rustc_version",
+ "ryu",
+ "scoped-tls",
+ "scopeguard",
+ "self_cell",
+ "semver",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "sha-1",
+ "sha2",
+ "sharded-slab",
+ "smallvec",
+ "snap",
+ "stable_deref_trait",
+ "stacker",
+ "syn",
+ "synstructure",
+ "tempfile",
+ "termcolor",
+ "termize",
+ "thiserror",
+ "thiserror-impl",
+ "thorin-dwp",
+ "thread_local",
+ "time",
+ "tinystr",
+ "tinyvec",
+ "tracing",
+ "tracing-attributes",
+ "tracing-core",
+ "tracing-log",
+ "tracing-subscriber",
+ "tracing-tree",
+ "type-map",
+ "typenum",
+ "unic-char-property",
+ "unic-char-range",
+ "unic-common",
+ "unic-emoji-char",
+ "unic-langid",
+ "unic-langid-impl",
+ "unic-langid-macros",
+ "unic-langid-macros-impl",
+ "unic-ucd-version",
+ "unicode-normalization",
+ "unicode-script",
+ "unicode-security",
+ "unicode-width",
+ "unicode-xid",
+ "vcpkg",
+ "version_check",
+ "wasi",
+ "winapi",
+ "winapi-i686-pc-windows-gnu",
+ "winapi-util",
+ "winapi-x86_64-pc-windows-gnu",
+ // this is a false-positive: it's only used by rustfmt, but because it's enabled through a
+ // feature, tidy thinks it's used by rustc as well.
+ "yansi-term",
+];
+
+const PERMITTED_CRANELIFT_DEPENDENCIES: &[&str] = &[
+ "ahash",
+ "anyhow",
+ "ar",
+ "autocfg",
+ "bitflags",
+ "byteorder",
+ "cfg-if",
+ "cranelift-bforest",
+ "cranelift-codegen",
+ "cranelift-codegen-meta",
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+ "cranelift-frontend",
+ "cranelift-isle",
+ "cranelift-jit",
+ "cranelift-module",
+ "cranelift-native",
+ "cranelift-object",
+ "crc32fast",
+ "fxhash",
+ "getrandom",
+ "gimli",
+ "hashbrown",
+ "indexmap",
+ "libc",
+ "libloading",
+ "log",
+ "mach",
+ "memchr",
+ "object",
+ "once_cell",
+ "regalloc2",
+ "region",
+ "slice-group-by",
+ "smallvec",
+ "target-lexicon",
+ "version_check",
+ "wasi",
+ "winapi",
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+];
+
+const FORBIDDEN_TO_HAVE_DUPLICATES: &[&str] = &[
+ // These two crates take quite a long time to build, so don't allow two versions of them
+ // to accidentally sneak into our dependency graph, in order to ensure we keep our CI times
+ // under control.
+ "cargo",
+];
+
+/// Dependency checks.
+///
+/// `root` is path to the directory with the root `Cargo.toml` (for the workspace). `cargo` is path
+/// to the cargo executable.
+pub fn check(root: &Path, cargo: &Path, bad: &mut bool) {
+ let mut cmd = cargo_metadata::MetadataCommand::new();
+ cmd.cargo_path(cargo)
+ .manifest_path(root.join("Cargo.toml"))
+ .features(cargo_metadata::CargoOpt::AllFeatures);
+ let metadata = t!(cmd.exec());
+ let runtime_ids = compute_runtime_crates(&metadata);
+ check_exceptions(&metadata, EXCEPTIONS, runtime_ids, bad);
+ check_dependencies(
+ &metadata,
+ "main workspace",
+ PERMITTED_DEPENDENCIES,
+ RESTRICTED_DEPENDENCY_CRATES,
+ bad,
+ );
+ check_crate_duplicate(&metadata, FORBIDDEN_TO_HAVE_DUPLICATES, bad);
+ check_rustfix(&metadata, bad);
+
+ // Check rustc_codegen_cranelift independently as it has it's own workspace.
+ let mut cmd = cargo_metadata::MetadataCommand::new();
+ cmd.cargo_path(cargo)
+ .manifest_path(root.join("compiler/rustc_codegen_cranelift/Cargo.toml"))
+ .features(cargo_metadata::CargoOpt::AllFeatures);
+ let metadata = t!(cmd.exec());
+ let runtime_ids = HashSet::new();
+ check_exceptions(&metadata, EXCEPTIONS_CRANELIFT, runtime_ids, bad);
+ check_dependencies(
+ &metadata,
+ "cranelift",
+ PERMITTED_CRANELIFT_DEPENDENCIES,
+ &["rustc_codegen_cranelift"],
+ bad,
+ );
+ check_crate_duplicate(&metadata, &[], bad);
+
+ let mut cmd = cargo_metadata::MetadataCommand::new();
+ cmd.cargo_path(cargo)
+ .manifest_path(root.join("src/bootstrap/Cargo.toml"))
+ .features(cargo_metadata::CargoOpt::AllFeatures);
+ let metadata = t!(cmd.exec());
+ let runtime_ids = HashSet::new();
+ check_exceptions(&metadata, EXCEPTIONS_BOOTSTRAP, runtime_ids, bad);
+}
+
+/// Check that all licenses are in the valid list in `LICENSES`.
+///
+/// Packages listed in `EXCEPTIONS` are allowed for tools.
+fn check_exceptions(
+ metadata: &Metadata,
+ exceptions: &[(&str, &str)],
+ runtime_ids: HashSet<&PackageId>,
+ bad: &mut bool,
+) {
+ // Validate the EXCEPTIONS list hasn't changed.
+ for (name, license) in exceptions {
+ // Check that the package actually exists.
+ if !metadata.packages.iter().any(|p| p.name == *name) {
+ tidy_error!(
+ bad,
+ "could not find exception package `{}`\n\
+ Remove from EXCEPTIONS list if it is no longer used.",
+ name
+ );
+ }
+ // Check that the license hasn't changed.
+ for pkg in metadata.packages.iter().filter(|p| p.name == *name) {
+ match &pkg.license {
+ None => {
+ tidy_error!(
+ bad,
+ "dependency exception `{}` does not declare a license expression",
+ pkg.id
+ );
+ }
+ Some(pkg_license) => {
+ if pkg_license.as_str() != *license {
+ println!("dependency exception `{name}` license has changed");
+ println!(" previously `{license}` now `{pkg_license}`");
+ println!(" update EXCEPTIONS for the new license");
+ *bad = true;
+ }
+ }
+ }
+ }
+ }
+
+ let exception_names: Vec<_> = exceptions.iter().map(|(name, _license)| *name).collect();
+
+ // Check if any package does not have a valid license.
+ for pkg in &metadata.packages {
+ if pkg.source.is_none() {
+ // No need to check local packages.
+ continue;
+ }
+ if !runtime_ids.contains(&pkg.id) && exception_names.contains(&pkg.name.as_str()) {
+ continue;
+ }
+ let license = match &pkg.license {
+ Some(license) => license,
+ None => {
+ tidy_error!(bad, "dependency `{}` does not define a license expression", pkg.id);
+ continue;
+ }
+ };
+ if !LICENSES.contains(&license.as_str()) {
+ if pkg.name == "fortanix-sgx-abi" {
+ // This is a specific exception because SGX is considered
+ // "third party". See
+ // https://github.com/rust-lang/rust/issues/62620 for more. In
+ // general, these should never be added.
+ continue;
+ }
+ tidy_error!(bad, "invalid license `{}` in `{}`", license, pkg.id);
+ }
+ }
+}
+
+/// Checks the dependency of `RESTRICTED_DEPENDENCY_CRATES` at the given path. Changes `bad` to
+/// `true` if a check failed.
+///
+/// Specifically, this checks that the dependencies are on the `PERMITTED_DEPENDENCIES`.
+fn check_dependencies(
+ metadata: &Metadata,
+ descr: &str,
+ permitted_dependencies: &[&'static str],
+ restricted_dependency_crates: &[&'static str],
+ bad: &mut bool,
+) {
+ // Check that the PERMITTED_DEPENDENCIES does not have unused entries.
+ for name in permitted_dependencies {
+ if !metadata.packages.iter().any(|p| p.name == *name) {
+ tidy_error!(
+ bad,
+ "could not find allowed package `{}`\n\
+ Remove from PERMITTED_DEPENDENCIES list if it is no longer used.",
+ name
+ );
+ }
+ }
+ // Get the list in a convenient form.
+ let permitted_dependencies: HashSet<_> = permitted_dependencies.iter().cloned().collect();
+
+ // Check dependencies.
+ let mut visited = BTreeSet::new();
+ let mut unapproved = BTreeSet::new();
+ for &krate in restricted_dependency_crates.iter() {
+ let pkg = pkg_from_name(metadata, krate);
+ let mut bad =
+ check_crate_dependencies(&permitted_dependencies, metadata, &mut visited, pkg);
+ unapproved.append(&mut bad);
+ }
+
+ if !unapproved.is_empty() {
+ tidy_error!(bad, "Dependencies for {} not explicitly permitted:", descr);
+ for dep in unapproved {
+ println!("* {dep}");
+ }
+ }
+}
+
+/// Checks the dependencies of the given crate from the given cargo metadata to see if they are on
+/// the list of permitted dependencies. Returns a list of disallowed dependencies.
+fn check_crate_dependencies<'a>(
+ permitted_dependencies: &'a HashSet<&'static str>,
+ metadata: &'a Metadata,
+ visited: &mut BTreeSet<&'a PackageId>,
+ krate: &'a Package,
+) -> BTreeSet<&'a PackageId> {
+ // This will contain bad deps.
+ let mut unapproved = BTreeSet::new();
+
+ // Check if we have already visited this crate.
+ if visited.contains(&krate.id) {
+ return unapproved;
+ }
+
+ visited.insert(&krate.id);
+
+ // If this path is in-tree, we don't require it to be explicitly permitted.
+ if krate.source.is_some() {
+ // If this dependency is not on `PERMITTED_DEPENDENCIES`, add to bad set.
+ if !permitted_dependencies.contains(krate.name.as_str()) {
+ unapproved.insert(&krate.id);
+ }
+ }
+
+ // Do a DFS in the crate graph.
+ let to_check = deps_of(metadata, &krate.id);
+
+ for dep in to_check {
+ let mut bad = check_crate_dependencies(permitted_dependencies, metadata, visited, dep);
+ unapproved.append(&mut bad);
+ }
+
+ unapproved
+}
+
+/// Prevents multiple versions of some expensive crates.
+fn check_crate_duplicate(
+ metadata: &Metadata,
+ forbidden_to_have_duplicates: &[&str],
+ bad: &mut bool,
+) {
+ for &name in forbidden_to_have_duplicates {
+ let matches: Vec<_> = metadata.packages.iter().filter(|pkg| pkg.name == name).collect();
+ match matches.len() {
+ 0 => {
+ tidy_error!(
+ bad,
+ "crate `{}` is missing, update `check_crate_duplicate` \
+ if it is no longer used",
+ name
+ );
+ }
+ 1 => {}
+ _ => {
+ tidy_error!(
+ bad,
+ "crate `{}` is duplicated in `Cargo.lock`, \
+ it is too expensive to build multiple times, \
+ so make sure only one version appears across all dependencies",
+ name
+ );
+ for pkg in matches {
+ println!(" * {}", pkg.id);
+ }
+ }
+ }
+ }
+}
+
+/// Returns a list of dependencies for the given package.
+fn deps_of<'a>(metadata: &'a Metadata, pkg_id: &'a PackageId) -> Vec<&'a Package> {
+ let resolve = metadata.resolve.as_ref().unwrap();
+ let node = resolve
+ .nodes
+ .iter()
+ .find(|n| &n.id == pkg_id)
+ .unwrap_or_else(|| panic!("could not find `{pkg_id}` in resolve"));
+ node.deps
+ .iter()
+ .map(|dep| {
+ metadata.packages.iter().find(|pkg| pkg.id == dep.pkg).unwrap_or_else(|| {
+ panic!("could not find dep `{}` for pkg `{}` in resolve", dep.pkg, pkg_id)
+ })
+ })
+ .collect()
+}
+
+/// Finds a package with the given name.
+fn pkg_from_name<'a>(metadata: &'a Metadata, name: &'static str) -> &'a Package {
+ let mut i = metadata.packages.iter().filter(|p| p.name == name);
+ let result =
+ i.next().unwrap_or_else(|| panic!("could not find package `{name}` in package list"));
+ assert!(i.next().is_none(), "more than one package found for `{name}`");
+ result
+}
+
+/// Finds all the packages that are in the rust runtime.
+fn compute_runtime_crates<'a>(metadata: &'a Metadata) -> HashSet<&'a PackageId> {
+ let resolve = metadata.resolve.as_ref().unwrap();
+ let mut result = HashSet::new();
+ for name in RUNTIME_CRATES {
+ let id = &pkg_from_name(metadata, name).id;
+ normal_deps_of_r(resolve, id, &mut result);
+ }
+ result
+}
+
+/// Recursively find all normal dependencies.
+fn normal_deps_of_r<'a>(
+ resolve: &'a Resolve,
+ pkg_id: &'a PackageId,
+ result: &mut HashSet<&'a PackageId>,
+) {
+ if !result.insert(pkg_id) {
+ return;
+ }
+ let node = resolve
+ .nodes
+ .iter()
+ .find(|n| &n.id == pkg_id)
+ .unwrap_or_else(|| panic!("could not find `{pkg_id}` in resolve"));
+ for dep in &node.deps {
+ normal_deps_of_r(resolve, &dep.pkg, result);
+ }
+}
+
+fn check_rustfix(metadata: &Metadata, bad: &mut bool) {
+ let cargo = pkg_from_name(metadata, "cargo");
+ let compiletest = pkg_from_name(metadata, "compiletest");
+ let cargo_deps = deps_of(metadata, &cargo.id);
+ let compiletest_deps = deps_of(metadata, &compiletest.id);
+ let cargo_rustfix = cargo_deps.iter().find(|p| p.name == "rustfix").unwrap();
+ let compiletest_rustfix = compiletest_deps.iter().find(|p| p.name == "rustfix").unwrap();
+ if cargo_rustfix.version != compiletest_rustfix.version {
+ tidy_error!(
+ bad,
+ "cargo's rustfix version {} does not match compiletest's rustfix version {}\n\
+ rustfix should be kept in sync, update the cargo side first, and then update \
+ compiletest along with cargo.",
+ cargo_rustfix.version,
+ compiletest_rustfix.version
+ );
+ }
+}
diff --git a/src/tools/tidy/src/edition.rs b/src/tools/tidy/src/edition.rs
new file mode 100644
index 000000000..b0abee459
--- /dev/null
+++ b/src/tools/tidy/src/edition.rs
@@ -0,0 +1,31 @@
+//! Tidy check to ensure that crate `edition` is '2018' or '2021'.
+
+use std::path::Path;
+
+fn is_edition_2021(mut line: &str) -> bool {
+ line = line.trim();
+ line == "edition = \"2021\""
+}
+
+pub fn check(path: &Path, bad: &mut bool) {
+ super::walk(
+ path,
+ &mut |path| super::filter_dirs(path) || path.ends_with("src/test"),
+ &mut |entry, contents| {
+ let file = entry.path();
+ let filename = file.file_name().unwrap();
+ if filename != "Cargo.toml" {
+ return;
+ }
+
+ let is_2021 = contents.lines().any(is_edition_2021);
+ if !is_2021 {
+ tidy_error!(
+ bad,
+ "{} doesn't have `edition = \"2021\"` on a separate line",
+ file.display()
+ );
+ }
+ },
+ );
+}
diff --git a/src/tools/tidy/src/error_codes_check.rs b/src/tools/tidy/src/error_codes_check.rs
new file mode 100644
index 000000000..f0054a1c1
--- /dev/null
+++ b/src/tools/tidy/src/error_codes_check.rs
@@ -0,0 +1,319 @@
+//! Checks that all error codes have at least one test to prevent having error
+//! codes that are silently not thrown by the compiler anymore.
+
+use std::collections::{HashMap, HashSet};
+use std::ffi::OsStr;
+use std::fs::read_to_string;
+use std::path::Path;
+
+use regex::Regex;
+
+// A few of those error codes can't be tested but all the others can and *should* be tested!
+const EXEMPTED_FROM_TEST: &[&str] = &[
+ "E0313", "E0377", "E0461", "E0462", "E0465", "E0476", "E0490", "E0514", "E0519", "E0523",
+ "E0554", "E0640", "E0717", "E0729", "E0789",
+];
+
+// Some error codes don't have any tests apparently...
+const IGNORE_EXPLANATION_CHECK: &[&str] = &["E0464", "E0570", "E0601", "E0602", "E0729"];
+
+// If the file path contains any of these, we don't want to try to extract error codes from it.
+//
+// We need to declare each path in the windows version (with backslash).
+const PATHS_TO_IGNORE_FOR_EXTRACTION: &[&str] =
+ &["src/test/", "src\\test\\", "src/doc/", "src\\doc\\", "src/tools/", "src\\tools\\"];
+
+#[derive(Default, Debug)]
+struct ErrorCodeStatus {
+ has_test: bool,
+ has_explanation: bool,
+ is_used: bool,
+}
+
+fn check_error_code_explanation(
+ f: &str,
+ error_codes: &mut HashMap<String, ErrorCodeStatus>,
+ err_code: String,
+) -> bool {
+ let mut invalid_compile_fail_format = false;
+ let mut found_error_code = false;
+
+ for line in f.lines() {
+ let s = line.trim();
+ if s.starts_with("```") {
+ if s.contains("compile_fail") && s.contains('E') {
+ if !found_error_code {
+ error_codes.get_mut(&err_code).map(|x| x.has_test = true);
+ found_error_code = true;
+ }
+ } else if s.contains("compile-fail") {
+ invalid_compile_fail_format = true;
+ }
+ } else if s.starts_with("#### Note: this error code is no longer emitted by the compiler") {
+ if !found_error_code {
+ error_codes.get_mut(&err_code).map(|x| x.has_test = true);
+ found_error_code = true;
+ }
+ }
+ }
+ invalid_compile_fail_format
+}
+
+fn check_if_error_code_is_test_in_explanation(f: &str, err_code: &str) -> bool {
+ let mut ignore_found = false;
+
+ for line in f.lines() {
+ let s = line.trim();
+ if s.starts_with("#### Note: this error code is no longer emitted by the compiler") {
+ return true;
+ }
+ if s.starts_with("```") {
+ if s.contains("compile_fail") && s.contains(err_code) {
+ return true;
+ } else if s.contains("ignore") {
+ // It's very likely that we can't actually make it fail compilation...
+ ignore_found = true;
+ }
+ }
+ }
+ ignore_found
+}
+
+macro_rules! some_or_continue {
+ ($e:expr) => {
+ match $e {
+ Some(e) => e,
+ None => continue,
+ }
+ };
+}
+
+fn extract_error_codes(
+ f: &str,
+ error_codes: &mut HashMap<String, ErrorCodeStatus>,
+ path: &Path,
+ errors: &mut Vec<String>,
+) {
+ let mut reached_no_explanation = false;
+
+ for line in f.lines() {
+ let s = line.trim();
+ if !reached_no_explanation && s.starts_with('E') && s.contains("include_str!(\"") {
+ let err_code = s
+ .split_once(':')
+ .expect(
+ format!(
+ "Expected a line with the format `E0xxx: include_str!(\"..\")`, but got {} \
+ without a `:` delimiter",
+ s,
+ )
+ .as_str(),
+ )
+ .0
+ .to_owned();
+ error_codes.entry(err_code.clone()).or_default().has_explanation = true;
+
+ // Now we extract the tests from the markdown file!
+ let md_file_name = match s.split_once("include_str!(\"") {
+ None => continue,
+ Some((_, md)) => match md.split_once("\")") {
+ None => continue,
+ Some((file_name, _)) => file_name,
+ },
+ };
+ let path = some_or_continue!(path.parent())
+ .join(md_file_name)
+ .canonicalize()
+ .expect("failed to canonicalize error explanation file path");
+ match read_to_string(&path) {
+ Ok(content) => {
+ let has_test = check_if_error_code_is_test_in_explanation(&content, &err_code);
+ if !has_test && !IGNORE_EXPLANATION_CHECK.contains(&err_code.as_str()) {
+ errors.push(format!(
+ "`{}` doesn't use its own error code in compile_fail example",
+ path.display(),
+ ));
+ } else if has_test && IGNORE_EXPLANATION_CHECK.contains(&err_code.as_str()) {
+ errors.push(format!(
+ "`{}` has a compile_fail example with its own error code, it shouldn't \
+ be listed in IGNORE_EXPLANATION_CHECK!",
+ path.display(),
+ ));
+ }
+ if check_error_code_explanation(&content, error_codes, err_code) {
+ errors.push(format!(
+ "`{}` uses invalid tag `compile-fail` instead of `compile_fail`",
+ path.display(),
+ ));
+ }
+ }
+ Err(e) => {
+ eprintln!("Couldn't read `{}`: {}", path.display(), e);
+ }
+ }
+ } else if reached_no_explanation && s.starts_with('E') {
+ let err_code = match s.split_once(',') {
+ None => s,
+ Some((err_code, _)) => err_code,
+ }
+ .to_string();
+ if !error_codes.contains_key(&err_code) {
+ // this check should *never* fail!
+ error_codes.insert(err_code, ErrorCodeStatus::default());
+ }
+ } else if s == ";" {
+ reached_no_explanation = true;
+ }
+ }
+}
+
+fn extract_error_codes_from_tests(f: &str, error_codes: &mut HashMap<String, ErrorCodeStatus>) {
+ for line in f.lines() {
+ let s = line.trim();
+ if s.starts_with("error[E") || s.starts_with("warning[E") {
+ let err_code = match s.split_once(']') {
+ None => continue,
+ Some((err_code, _)) => match err_code.split_once('[') {
+ None => continue,
+ Some((_, err_code)) => err_code,
+ },
+ };
+ error_codes.entry(err_code.to_owned()).or_default().has_test = true;
+ }
+ }
+}
+
+fn extract_error_codes_from_source(
+ f: &str,
+ error_codes: &mut HashMap<String, ErrorCodeStatus>,
+ regex: &Regex,
+) {
+ for line in f.lines() {
+ if line.trim_start().starts_with("//") {
+ continue;
+ }
+ for cap in regex.captures_iter(line) {
+ if let Some(error_code) = cap.get(1) {
+ error_codes.entry(error_code.as_str().to_owned()).or_default().is_used = true;
+ }
+ }
+ }
+}
+
+pub fn check(paths: &[&Path], bad: &mut bool) {
+ let mut errors = Vec::new();
+ let mut found_explanations = 0;
+ let mut found_tests = 0;
+ let mut error_codes: HashMap<String, ErrorCodeStatus> = HashMap::new();
+ let mut explanations: HashSet<String> = HashSet::new();
+ // We want error codes which match the following cases:
+ //
+ // * foo(a, E0111, a)
+ // * foo(a, E0111)
+ // * foo(E0111, a)
+ // * #[error = "E0111"]
+ let regex = Regex::new(r#"[(,"\s](E\d{4})[,)"]"#).unwrap();
+
+ println!("Checking which error codes lack tests...");
+
+ for path in paths {
+ super::walk(path, &mut |path| super::filter_dirs(path), &mut |entry, contents| {
+ let file_name = entry.file_name();
+ let entry_path = entry.path();
+
+ if file_name == "error_codes.rs" {
+ extract_error_codes(contents, &mut error_codes, entry.path(), &mut errors);
+ found_explanations += 1;
+ } else if entry_path.extension() == Some(OsStr::new("stderr")) {
+ extract_error_codes_from_tests(contents, &mut error_codes);
+ found_tests += 1;
+ } else if entry_path.extension() == Some(OsStr::new("rs")) {
+ let path = entry.path().to_string_lossy();
+ if PATHS_TO_IGNORE_FOR_EXTRACTION.iter().all(|c| !path.contains(c)) {
+ extract_error_codes_from_source(contents, &mut error_codes, &regex);
+ }
+ } else if entry_path
+ .parent()
+ .and_then(|p| p.file_name())
+ .map(|p| p == "error_codes")
+ .unwrap_or(false)
+ && entry_path.extension() == Some(OsStr::new("md"))
+ {
+ explanations.insert(file_name.to_str().unwrap().replace(".md", ""));
+ }
+ });
+ }
+ if found_explanations == 0 {
+ eprintln!("No error code explanation was tested!");
+ *bad = true;
+ }
+ if found_tests == 0 {
+ eprintln!("No error code was found in compilation errors!");
+ *bad = true;
+ }
+ if explanations.is_empty() {
+ eprintln!("No error code explanation was found!");
+ *bad = true;
+ }
+ if errors.is_empty() {
+ println!("Found {} error codes", error_codes.len());
+
+ for (err_code, error_status) in &error_codes {
+ if !error_status.has_test && !EXEMPTED_FROM_TEST.contains(&err_code.as_str()) {
+ errors.push(format!("Error code {err_code} needs to have at least one UI test!"));
+ } else if error_status.has_test && EXEMPTED_FROM_TEST.contains(&err_code.as_str()) {
+ errors.push(format!(
+ "Error code {} has a UI test, it shouldn't be listed into EXEMPTED_FROM_TEST!",
+ err_code
+ ));
+ }
+ if !error_status.is_used && !error_status.has_explanation {
+ errors.push(format!(
+ "Error code {} isn't used and doesn't have an error explanation, it should be \
+ commented in error_codes.rs file",
+ err_code
+ ));
+ }
+ }
+ }
+ if errors.is_empty() {
+ // Checking if local constants need to be cleaned.
+ for err_code in EXEMPTED_FROM_TEST {
+ match error_codes.get(err_code.to_owned()) {
+ Some(status) => {
+ if status.has_test {
+ errors.push(format!(
+ "{} error code has a test and therefore should be \
+ removed from the `EXEMPTED_FROM_TEST` constant",
+ err_code
+ ));
+ }
+ }
+ None => errors.push(format!(
+ "{} error code isn't used anymore and therefore should be removed \
+ from `EXEMPTED_FROM_TEST` constant",
+ err_code
+ )),
+ }
+ }
+ }
+ if errors.is_empty() {
+ for explanation in explanations {
+ if !error_codes.contains_key(&explanation) {
+ errors.push(format!(
+ "{} error code explanation should be listed in `error_codes.rs`",
+ explanation
+ ));
+ }
+ }
+ }
+ errors.sort();
+ for err in &errors {
+ eprintln!("{err}");
+ }
+ println!("Found {} error(s) in error codes", errors.len());
+ if !errors.is_empty() {
+ *bad = true;
+ }
+ println!("Done!");
+}
diff --git a/src/tools/tidy/src/errors.rs b/src/tools/tidy/src/errors.rs
new file mode 100644
index 000000000..dbcc9341a
--- /dev/null
+++ b/src/tools/tidy/src/errors.rs
@@ -0,0 +1,76 @@
+//! Tidy check to verify the validity of long error diagnostic codes.
+//!
+//! This ensures that error codes are used at most once and also prints out some
+//! statistics about the error codes.
+
+use std::collections::HashMap;
+use std::path::Path;
+
+pub fn check(path: &Path, bad: &mut bool) {
+ let mut map: HashMap<_, Vec<_>> = HashMap::new();
+ super::walk(
+ path,
+ &mut |path| super::filter_dirs(path) || path.ends_with("src/test"),
+ &mut |entry, contents| {
+ let file = entry.path();
+ let filename = file.file_name().unwrap().to_string_lossy();
+ if filename != "error_codes.rs" {
+ return;
+ }
+
+ // In the `register_long_diagnostics!` macro, entries look like this:
+ //
+ // ```
+ // EXXXX: r##"
+ // <Long diagnostic message>
+ // "##,
+ // ```
+ //
+ // and these long messages often have error codes themselves inside
+ // them, but we don't want to report duplicates in these cases. This
+ // variable keeps track of whether we're currently inside one of these
+ // long diagnostic messages.
+ let mut inside_long_diag = false;
+ for (num, line) in contents.lines().enumerate() {
+ if inside_long_diag {
+ inside_long_diag = !line.contains("\"##");
+ continue;
+ }
+
+ let mut search = line;
+ while let Some(i) = search.find('E') {
+ search = &search[i + 1..];
+ let code = if search.len() > 4 { search[..4].parse::<u32>() } else { continue };
+ let code = match code {
+ Ok(n) => n,
+ Err(..) => continue,
+ };
+ map.entry(code).or_default().push((file.to_owned(), num + 1, line.to_owned()));
+ break;
+ }
+
+ inside_long_diag = line.contains("r##\"");
+ }
+ },
+ );
+
+ let mut max = 0;
+ for (&code, entries) in map.iter() {
+ if code > max {
+ max = code;
+ }
+ if entries.len() == 1 {
+ continue;
+ }
+
+ tidy_error!(bad, "duplicate error code: {}", code);
+ for &(ref file, line_num, ref line) in entries.iter() {
+ tidy_error!(bad, "{}:{}: {}", file.display(), line_num, line);
+ }
+ }
+
+ if !*bad {
+ println!("* {} error codes", map.len());
+ println!("* highest error code: E{:04}", max);
+ }
+}
diff --git a/src/tools/tidy/src/extdeps.rs b/src/tools/tidy/src/extdeps.rs
new file mode 100644
index 000000000..aad57cacb
--- /dev/null
+++ b/src/tools/tidy/src/extdeps.rs
@@ -0,0 +1,33 @@
+//! Check for external package sources. Allow only vendorable packages.
+
+use std::fs;
+use std::path::Path;
+
+/// List of allowed sources for packages.
+const ALLOWED_SOURCES: &[&str] = &["\"registry+https://github.com/rust-lang/crates.io-index\""];
+
+/// Checks for external package sources. `root` is the path to the directory that contains the
+/// workspace `Cargo.toml`.
+pub fn check(root: &Path, bad: &mut bool) {
+ // `Cargo.lock` of rust.
+ let path = root.join("Cargo.lock");
+
+ // Open and read the whole file.
+ let cargo_lock = t!(fs::read_to_string(&path));
+
+ // Process each line.
+ for line in cargo_lock.lines() {
+ // Consider only source entries.
+ if !line.starts_with("source = ") {
+ continue;
+ }
+
+ // Extract source value.
+ let source = line.split_once('=').unwrap().1.trim();
+
+ // Ensure source is allowed.
+ if !ALLOWED_SOURCES.contains(&&*source) {
+ tidy_error!(bad, "invalid source: {}", source);
+ }
+ }
+}
diff --git a/src/tools/tidy/src/features.rs b/src/tools/tidy/src/features.rs
new file mode 100644
index 000000000..2f22c081a
--- /dev/null
+++ b/src/tools/tidy/src/features.rs
@@ -0,0 +1,550 @@
+//! Tidy check to ensure that unstable features are all in order.
+//!
+//! This check will ensure properties like:
+//!
+//! * All stability attributes look reasonably well formed.
+//! * The set of library features is disjoint from the set of language features.
+//! * Library features have at most one stability level.
+//! * Library features have at most one `since` value.
+//! * All unstable lang features have tests to ensure they are actually unstable.
+//! * Language features in a group are sorted by feature name.
+
+use std::collections::HashMap;
+use std::fmt;
+use std::fs;
+use std::num::NonZeroU32;
+use std::path::Path;
+
+use regex::Regex;
+
+#[cfg(test)]
+mod tests;
+
+mod version;
+use version::Version;
+
+const FEATURE_GROUP_START_PREFIX: &str = "// feature-group-start";
+const FEATURE_GROUP_END_PREFIX: &str = "// feature-group-end";
+
+#[derive(Debug, PartialEq, Clone)]
+pub enum Status {
+ Stable,
+ Removed,
+ Unstable,
+}
+
+impl fmt::Display for Status {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let as_str = match *self {
+ Status::Stable => "stable",
+ Status::Unstable => "unstable",
+ Status::Removed => "removed",
+ };
+ fmt::Display::fmt(as_str, f)
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct Feature {
+ pub level: Status,
+ pub since: Option<Version>,
+ pub has_gate_test: bool,
+ pub tracking_issue: Option<NonZeroU32>,
+}
+impl Feature {
+ fn tracking_issue_display(&self) -> impl fmt::Display {
+ match self.tracking_issue {
+ None => "none".to_string(),
+ Some(x) => x.to_string(),
+ }
+ }
+}
+
+pub type Features = HashMap<String, Feature>;
+
+pub struct CollectedFeatures {
+ pub lib: Features,
+ pub lang: Features,
+}
+
+// Currently only used for unstable book generation
+pub fn collect_lib_features(base_src_path: &Path) -> Features {
+ let mut lib_features = Features::new();
+
+ map_lib_features(base_src_path, &mut |res, _, _| {
+ if let Ok((name, feature)) = res {
+ lib_features.insert(name.to_owned(), feature);
+ }
+ });
+ lib_features
+}
+
+pub fn check(
+ src_path: &Path,
+ compiler_path: &Path,
+ lib_path: &Path,
+ bad: &mut bool,
+ verbose: bool,
+) -> CollectedFeatures {
+ let mut features = collect_lang_features(compiler_path, bad);
+ assert!(!features.is_empty());
+
+ let lib_features = get_and_check_lib_features(lib_path, bad, &features);
+ assert!(!lib_features.is_empty());
+
+ super::walk_many(
+ &[
+ &src_path.join("test/ui"),
+ &src_path.join("test/ui-fulldeps"),
+ &src_path.join("test/rustdoc-ui"),
+ &src_path.join("test/rustdoc"),
+ ],
+ &mut |path| super::filter_dirs(path),
+ &mut |entry, contents| {
+ let file = entry.path();
+ let filename = file.file_name().unwrap().to_string_lossy();
+ if !filename.ends_with(".rs")
+ || filename == "features.rs"
+ || filename == "diagnostic_list.rs"
+ {
+ return;
+ }
+
+ let filen_underscore = filename.replace('-', "_").replace(".rs", "");
+ let filename_is_gate_test = test_filen_gate(&filen_underscore, &mut features);
+
+ for (i, line) in contents.lines().enumerate() {
+ let mut err = |msg: &str| {
+ tidy_error!(bad, "{}:{}: {}", file.display(), i + 1, msg);
+ };
+
+ let gate_test_str = "gate-test-";
+
+ let feature_name = match line.find(gate_test_str) {
+ // NB: the `splitn` always succeeds, even if the delimiter is not present.
+ Some(i) => line[i + gate_test_str.len()..].splitn(2, ' ').next().unwrap(),
+ None => continue,
+ };
+ match features.get_mut(feature_name) {
+ Some(f) => {
+ if filename_is_gate_test {
+ err(&format!(
+ "The file is already marked as gate test \
+ through its name, no need for a \
+ 'gate-test-{}' comment",
+ feature_name
+ ));
+ }
+ f.has_gate_test = true;
+ }
+ None => {
+ err(&format!(
+ "gate-test test found referencing a nonexistent feature '{}'",
+ feature_name
+ ));
+ }
+ }
+ }
+ },
+ );
+
+ // Only check the number of lang features.
+ // Obligatory testing for library features is dumb.
+ let gate_untested = features
+ .iter()
+ .filter(|&(_, f)| f.level == Status::Unstable)
+ .filter(|&(_, f)| !f.has_gate_test)
+ .collect::<Vec<_>>();
+
+ for &(name, _) in gate_untested.iter() {
+ println!("Expected a gate test for the feature '{name}'.");
+ println!(
+ "Hint: create a failing test file named 'feature-gate-{}.rs'\
+ \n in the 'ui' test suite, with its failures due to\
+ \n missing usage of `#![feature({})]`.",
+ name, name
+ );
+ println!(
+ "Hint: If you already have such a test and don't want to rename it,\
+ \n you can also add a // gate-test-{} line to the test file.",
+ name
+ );
+ }
+
+ if !gate_untested.is_empty() {
+ tidy_error!(bad, "Found {} features without a gate test.", gate_untested.len());
+ }
+
+ if *bad {
+ return CollectedFeatures { lib: lib_features, lang: features };
+ }
+
+ if verbose {
+ let mut lines = Vec::new();
+ lines.extend(format_features(&features, "lang"));
+ lines.extend(format_features(&lib_features, "lib"));
+
+ lines.sort();
+ for line in lines {
+ println!("* {line}");
+ }
+ } else {
+ println!("* {} features", features.len());
+ }
+
+ CollectedFeatures { lib: lib_features, lang: features }
+}
+
+fn format_features<'a>(
+ features: &'a Features,
+ family: &'a str,
+) -> impl Iterator<Item = String> + 'a {
+ features.iter().map(move |(name, feature)| {
+ format!(
+ "{:<32} {:<8} {:<12} {:<8}",
+ name,
+ family,
+ feature.level,
+ feature.since.map_or("None".to_owned(), |since| since.to_string())
+ )
+ })
+}
+
+fn find_attr_val<'a>(line: &'a str, attr: &str) -> Option<&'a str> {
+ lazy_static::lazy_static! {
+ static ref ISSUE: Regex = Regex::new(r#"issue\s*=\s*"([^"]*)""#).unwrap();
+ static ref FEATURE: Regex = Regex::new(r#"feature\s*=\s*"([^"]*)""#).unwrap();
+ static ref SINCE: Regex = Regex::new(r#"since\s*=\s*"([^"]*)""#).unwrap();
+ }
+
+ let r = match attr {
+ "issue" => &*ISSUE,
+ "feature" => &*FEATURE,
+ "since" => &*SINCE,
+ _ => unimplemented!("{attr} not handled"),
+ };
+
+ r.captures(line).and_then(|c| c.get(1)).map(|m| m.as_str())
+}
+
+fn test_filen_gate(filen_underscore: &str, features: &mut Features) -> bool {
+ let prefix = "feature_gate_";
+ if filen_underscore.starts_with(prefix) {
+ for (n, f) in features.iter_mut() {
+ // Equivalent to filen_underscore == format!("feature_gate_{n}")
+ if &filen_underscore[prefix.len()..] == n {
+ f.has_gate_test = true;
+ return true;
+ }
+ }
+ }
+ false
+}
+
+pub fn collect_lang_features(base_compiler_path: &Path, bad: &mut bool) -> Features {
+ let mut all = collect_lang_features_in(base_compiler_path, "active.rs", bad);
+ all.extend(collect_lang_features_in(base_compiler_path, "accepted.rs", bad));
+ all.extend(collect_lang_features_in(base_compiler_path, "removed.rs", bad));
+ all
+}
+
+fn collect_lang_features_in(base: &Path, file: &str, bad: &mut bool) -> Features {
+ let path = base.join("rustc_feature").join("src").join(file);
+ let contents = t!(fs::read_to_string(&path));
+
+ // We allow rustc-internal features to omit a tracking issue.
+ // To make tidy accept omitting a tracking issue, group the list of features
+ // without one inside `// no-tracking-issue` and `// no-tracking-issue-end`.
+ let mut next_feature_omits_tracking_issue = false;
+
+ let mut in_feature_group = false;
+ let mut prev_names = vec![];
+
+ contents
+ .lines()
+ .zip(1..)
+ .filter_map(|(line, line_number)| {
+ let line = line.trim();
+
+ // Within -start and -end, the tracking issue can be omitted.
+ match line {
+ "// no-tracking-issue-start" => {
+ next_feature_omits_tracking_issue = true;
+ return None;
+ }
+ "// no-tracking-issue-end" => {
+ next_feature_omits_tracking_issue = false;
+ return None;
+ }
+ _ => {}
+ }
+
+ if line.starts_with(FEATURE_GROUP_START_PREFIX) {
+ if in_feature_group {
+ tidy_error!(
+ bad,
+ "{}:{}: \
+ new feature group is started without ending the previous one",
+ path.display(),
+ line_number,
+ );
+ }
+
+ in_feature_group = true;
+ prev_names = vec![];
+ return None;
+ } else if line.starts_with(FEATURE_GROUP_END_PREFIX) {
+ in_feature_group = false;
+ prev_names = vec![];
+ return None;
+ }
+
+ let mut parts = line.split(',');
+ let level = match parts.next().map(|l| l.trim().trim_start_matches('(')) {
+ Some("active") => Status::Unstable,
+ Some("incomplete") => Status::Unstable,
+ Some("removed") => Status::Removed,
+ Some("accepted") => Status::Stable,
+ _ => return None,
+ };
+ let name = parts.next().unwrap().trim();
+
+ let since_str = parts.next().unwrap().trim().trim_matches('"');
+ let since = match since_str.parse() {
+ Ok(since) => Some(since),
+ Err(err) => {
+ tidy_error!(
+ bad,
+ "{}:{}: failed to parse since: {} ({:?})",
+ path.display(),
+ line_number,
+ since_str,
+ err,
+ );
+ None
+ }
+ };
+ if in_feature_group {
+ if prev_names.last() > Some(&name) {
+ // This assumes the user adds the feature name at the end of the list, as we're
+ // not looking ahead.
+ let correct_index = match prev_names.binary_search(&name) {
+ Ok(_) => {
+ // This only occurs when the feature name has already been declared.
+ tidy_error!(
+ bad,
+ "{}:{}: duplicate feature {}",
+ path.display(),
+ line_number,
+ name,
+ );
+ // skip any additional checks for this line
+ return None;
+ }
+ Err(index) => index,
+ };
+
+ let correct_placement = if correct_index == 0 {
+ "at the beginning of the feature group".to_owned()
+ } else if correct_index == prev_names.len() {
+ // I don't believe this is reachable given the above assumption, but it
+ // doesn't hurt to be safe.
+ "at the end of the feature group".to_owned()
+ } else {
+ format!(
+ "between {} and {}",
+ prev_names[correct_index - 1],
+ prev_names[correct_index],
+ )
+ };
+
+ tidy_error!(
+ bad,
+ "{}:{}: feature {} is not sorted by feature name (should be {})",
+ path.display(),
+ line_number,
+ name,
+ correct_placement,
+ );
+ }
+ prev_names.push(name);
+ }
+
+ let issue_str = parts.next().unwrap().trim();
+ let tracking_issue = if issue_str.starts_with("None") {
+ if level == Status::Unstable && !next_feature_omits_tracking_issue {
+ tidy_error!(
+ bad,
+ "{}:{}: no tracking issue for feature {}",
+ path.display(),
+ line_number,
+ name,
+ );
+ }
+ None
+ } else {
+ let s = issue_str.split('(').nth(1).unwrap().split(')').next().unwrap();
+ Some(s.parse().unwrap())
+ };
+ Some((name.to_owned(), Feature { level, since, has_gate_test: false, tracking_issue }))
+ })
+ .collect()
+}
+
+fn get_and_check_lib_features(
+ base_src_path: &Path,
+ bad: &mut bool,
+ lang_features: &Features,
+) -> Features {
+ let mut lib_features = Features::new();
+ map_lib_features(base_src_path, &mut |res, file, line| match res {
+ Ok((name, f)) => {
+ let mut check_features = |f: &Feature, list: &Features, display: &str| {
+ if let Some(ref s) = list.get(name) {
+ if f.tracking_issue != s.tracking_issue && f.level != Status::Stable {
+ tidy_error!(
+ bad,
+ "{}:{}: `issue` \"{}\" mismatches the {} `issue` of \"{}\"",
+ file.display(),
+ line,
+ f.tracking_issue_display(),
+ display,
+ s.tracking_issue_display(),
+ );
+ }
+ }
+ };
+ check_features(&f, &lang_features, "corresponding lang feature");
+ check_features(&f, &lib_features, "previous");
+ lib_features.insert(name.to_owned(), f);
+ }
+ Err(msg) => {
+ tidy_error!(bad, "{}:{}: {}", file.display(), line, msg);
+ }
+ });
+ lib_features
+}
+
+fn map_lib_features(
+ base_src_path: &Path,
+ mf: &mut dyn FnMut(Result<(&str, Feature), &str>, &Path, usize),
+) {
+ super::walk(
+ base_src_path,
+ &mut |path| super::filter_dirs(path) || path.ends_with("src/test"),
+ &mut |entry, contents| {
+ let file = entry.path();
+ let filename = file.file_name().unwrap().to_string_lossy();
+ if !filename.ends_with(".rs")
+ || filename == "features.rs"
+ || filename == "diagnostic_list.rs"
+ || filename == "error_codes.rs"
+ {
+ return;
+ }
+
+ // This is an early exit -- all the attributes we're concerned with must contain this:
+ // * rustc_const_unstable(
+ // * unstable(
+ // * stable(
+ if !contents.contains("stable(") {
+ return;
+ }
+
+ let handle_issue_none = |s| match s {
+ "none" => None,
+ issue => {
+ let n = issue.parse().expect("issue number is not a valid integer");
+ assert_ne!(n, 0, "\"none\" should be used when there is no issue, not \"0\"");
+ NonZeroU32::new(n)
+ }
+ };
+ let mut becoming_feature: Option<(&str, Feature)> = None;
+ let mut iter_lines = contents.lines().enumerate().peekable();
+ while let Some((i, line)) = iter_lines.next() {
+ macro_rules! err {
+ ($msg:expr) => {{
+ mf(Err($msg), file, i + 1);
+ continue;
+ }};
+ }
+
+ lazy_static::lazy_static! {
+ static ref COMMENT_LINE: Regex = Regex::new(r"^\s*//").unwrap();
+ }
+ // exclude commented out lines
+ if COMMENT_LINE.is_match(line) {
+ continue;
+ }
+
+ if let Some((ref name, ref mut f)) = becoming_feature {
+ if f.tracking_issue.is_none() {
+ f.tracking_issue = find_attr_val(line, "issue").and_then(handle_issue_none);
+ }
+ if line.ends_with(']') {
+ mf(Ok((name, f.clone())), file, i + 1);
+ } else if !line.ends_with(',') && !line.ends_with('\\') && !line.ends_with('"')
+ {
+ // We need to bail here because we might have missed the
+ // end of a stability attribute above because the ']'
+ // might not have been at the end of the line.
+ // We could then get into the very unfortunate situation that
+ // we continue parsing the file assuming the current stability
+ // attribute has not ended, and ignoring possible feature
+ // attributes in the process.
+ err!("malformed stability attribute");
+ } else {
+ continue;
+ }
+ }
+ becoming_feature = None;
+ if line.contains("rustc_const_unstable(") {
+ // `const fn` features are handled specially.
+ let feature_name = match find_attr_val(line, "feature") {
+ Some(name) => name,
+ None => err!("malformed stability attribute: missing `feature` key"),
+ };
+ let feature = Feature {
+ level: Status::Unstable,
+ since: None,
+ has_gate_test: false,
+ tracking_issue: find_attr_val(line, "issue").and_then(handle_issue_none),
+ };
+ mf(Ok((feature_name, feature)), file, i + 1);
+ continue;
+ }
+ let level = if line.contains("[unstable(") {
+ Status::Unstable
+ } else if line.contains("[stable(") {
+ Status::Stable
+ } else {
+ continue;
+ };
+ let feature_name = match find_attr_val(line, "feature")
+ .or_else(|| iter_lines.peek().and_then(|next| find_attr_val(next.1, "feature")))
+ {
+ Some(name) => name,
+ None => err!("malformed stability attribute: missing `feature` key"),
+ };
+ let since = match find_attr_val(line, "since").map(|x| x.parse()) {
+ Some(Ok(since)) => Some(since),
+ Some(Err(_err)) => {
+ err!("malformed stability attribute: can't parse `since` key");
+ }
+ None if level == Status::Stable => {
+ err!("malformed stability attribute: missing the `since` key");
+ }
+ None => None,
+ };
+ let tracking_issue = find_attr_val(line, "issue").and_then(handle_issue_none);
+
+ let feature = Feature { level, since, has_gate_test: false, tracking_issue };
+ if line.contains(']') {
+ mf(Ok((feature_name, feature)), file, i + 1);
+ } else {
+ becoming_feature = Some((feature_name, feature));
+ }
+ }
+ },
+ );
+}
diff --git a/src/tools/tidy/src/features/tests.rs b/src/tools/tidy/src/features/tests.rs
new file mode 100644
index 000000000..994523ac1
--- /dev/null
+++ b/src/tools/tidy/src/features/tests.rs
@@ -0,0 +1,9 @@
+use super::*;
+
+#[test]
+fn test_find_attr_val() {
+ let s = r#"#[unstable(feature = "tidy_test_never_used_anywhere_else", issue = "58402")]"#;
+ assert_eq!(find_attr_val(s, "feature"), Some("tidy_test_never_used_anywhere_else"));
+ assert_eq!(find_attr_val(s, "issue"), Some("58402"));
+ assert_eq!(find_attr_val(s, "since"), None);
+}
diff --git a/src/tools/tidy/src/features/version.rs b/src/tools/tidy/src/features/version.rs
new file mode 100644
index 000000000..620be2f98
--- /dev/null
+++ b/src/tools/tidy/src/features/version.rs
@@ -0,0 +1,48 @@
+use std::fmt;
+use std::num::ParseIntError;
+use std::str::FromStr;
+
+#[cfg(test)]
+mod tests;
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+pub struct Version {
+ parts: [u32; 3],
+}
+
+impl fmt::Display for Version {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad(&format!("{}.{}.{}", self.parts[0], self.parts[1], self.parts[2]))
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub enum ParseVersionError {
+ ParseIntError(ParseIntError),
+ WrongNumberOfParts,
+}
+
+impl From<ParseIntError> for ParseVersionError {
+ fn from(err: ParseIntError) -> Self {
+ ParseVersionError::ParseIntError(err)
+ }
+}
+
+impl FromStr for Version {
+ type Err = ParseVersionError;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ let mut iter = s.split('.').map(|part| Ok(part.parse()?));
+
+ let mut part = || iter.next().unwrap_or(Err(ParseVersionError::WrongNumberOfParts));
+
+ let parts = [part()?, part()?, part()?];
+
+ if iter.next().is_some() {
+ // Ensure we don't have more than 3 parts.
+ return Err(ParseVersionError::WrongNumberOfParts);
+ }
+
+ Ok(Self { parts })
+ }
+}
diff --git a/src/tools/tidy/src/features/version/tests.rs b/src/tools/tidy/src/features/version/tests.rs
new file mode 100644
index 000000000..31224fdf1
--- /dev/null
+++ b/src/tools/tidy/src/features/version/tests.rs
@@ -0,0 +1,38 @@
+use super::*;
+
+#[test]
+fn test_try_from_invalid_version() {
+ assert!("".parse::<Version>().is_err());
+ assert!("hello".parse::<Version>().is_err());
+ assert!("1.32.hi".parse::<Version>().is_err());
+ assert!("1.32..1".parse::<Version>().is_err());
+ assert!("1.32".parse::<Version>().is_err());
+ assert!("1.32.0.1".parse::<Version>().is_err());
+}
+
+#[test]
+fn test_try_from_single() {
+ assert_eq!("1.32.0".parse(), Ok(Version { parts: [1, 32, 0] }));
+ assert_eq!("1.0.0".parse(), Ok(Version { parts: [1, 0, 0] }));
+}
+
+#[test]
+fn test_compare() {
+ let v_1_0_0 = "1.0.0".parse::<Version>().unwrap();
+ let v_1_32_0 = "1.32.0".parse::<Version>().unwrap();
+ let v_1_32_1 = "1.32.1".parse::<Version>().unwrap();
+ assert!(v_1_0_0 < v_1_32_1);
+ assert!(v_1_0_0 < v_1_32_0);
+ assert!(v_1_32_0 < v_1_32_1);
+}
+
+#[test]
+fn test_to_string() {
+ let v_1_0_0 = "1.0.0".parse::<Version>().unwrap();
+ let v_1_32_1 = "1.32.1".parse::<Version>().unwrap();
+
+ assert_eq!(v_1_0_0.to_string(), "1.0.0");
+ assert_eq!(v_1_32_1.to_string(), "1.32.1");
+ assert_eq!(format!("{:<8}", v_1_32_1), "1.32.1 ");
+ assert_eq!(format!("{:>8}", v_1_32_1), " 1.32.1");
+}
diff --git a/src/tools/tidy/src/lib.rs b/src/tools/tidy/src/lib.rs
new file mode 100644
index 000000000..09848462a
--- /dev/null
+++ b/src/tools/tidy/src/lib.rs
@@ -0,0 +1,110 @@
+//! Library used by tidy and other tools.
+//!
+//! This library contains the tidy lints and exposes it
+//! to be used by tools.
+
+use std::fs::File;
+use std::io::Read;
+use walkdir::{DirEntry, WalkDir};
+
+use std::path::Path;
+
+macro_rules! t {
+ ($e:expr, $p:expr) => {
+ match $e {
+ Ok(e) => e,
+ Err(e) => panic!("{} failed on {} with {}", stringify!($e), ($p).display(), e),
+ }
+ };
+
+ ($e:expr) => {
+ match $e {
+ Ok(e) => e,
+ Err(e) => panic!("{} failed with {}", stringify!($e), e),
+ }
+ };
+}
+
+macro_rules! tidy_error {
+ ($bad:expr, $fmt:expr) => ({
+ *$bad = true;
+ eprintln!("tidy error: {}", $fmt);
+ });
+ ($bad:expr, $fmt:expr, $($arg:tt)*) => ({
+ *$bad = true;
+ eprint!("tidy error: ");
+ eprintln!($fmt, $($arg)*);
+ });
+}
+
+pub mod bins;
+pub mod debug_artifacts;
+pub mod deps;
+pub mod edition;
+pub mod error_codes_check;
+pub mod errors;
+pub mod extdeps;
+pub mod features;
+pub mod pal;
+pub mod primitive_docs;
+pub mod style;
+pub mod target_specific_tests;
+pub mod ui_tests;
+pub mod unit_tests;
+pub mod unstable_book;
+
+fn filter_dirs(path: &Path) -> bool {
+ let skip = [
+ "tidy-test-file",
+ "compiler/rustc_codegen_cranelift",
+ "compiler/rustc_codegen_gcc",
+ "src/llvm-project",
+ "library/backtrace",
+ "library/portable-simd",
+ "library/stdarch",
+ "src/tools/cargo",
+ "src/tools/clippy",
+ "src/tools/miri",
+ "src/tools/rls",
+ "src/tools/rust-analyzer",
+ "src/tools/rust-installer",
+ "src/tools/rustfmt",
+ "src/doc/book",
+ // Filter RLS output directories
+ "target/rls",
+ ];
+ skip.iter().any(|p| path.ends_with(p))
+}
+
+fn walk_many(
+ paths: &[&Path],
+ skip: &mut dyn FnMut(&Path) -> bool,
+ f: &mut dyn FnMut(&DirEntry, &str),
+) {
+ for path in paths {
+ walk(path, skip, f);
+ }
+}
+
+fn walk(path: &Path, skip: &mut dyn FnMut(&Path) -> bool, f: &mut dyn FnMut(&DirEntry, &str)) {
+ let mut contents = String::new();
+ walk_no_read(path, skip, &mut |entry| {
+ contents.clear();
+ if t!(File::open(entry.path()), entry.path()).read_to_string(&mut contents).is_err() {
+ contents.clear();
+ }
+ f(&entry, &contents);
+ });
+}
+
+fn walk_no_read(path: &Path, skip: &mut dyn FnMut(&Path) -> bool, f: &mut dyn FnMut(&DirEntry)) {
+ let walker = WalkDir::new(path).into_iter().filter_entry(|e| !skip(e.path()));
+ for entry in walker {
+ if let Ok(entry) = entry {
+ if entry.file_type().is_dir() {
+ continue;
+ }
+ f(&entry);
+ }
+ }
+}
diff --git a/src/tools/tidy/src/main.rs b/src/tools/tidy/src/main.rs
new file mode 100644
index 000000000..aa8d8b4f6
--- /dev/null
+++ b/src/tools/tidy/src/main.rs
@@ -0,0 +1,112 @@
+//! Tidy checks source code in this repository.
+//!
+//! This program runs all of the various tidy checks for style, cleanliness,
+//! etc. This is run by default on `./x.py test` and as part of the auto
+//! builders. The tidy checks can be executed with `./x.py test tidy`.
+
+use tidy::*;
+
+use crossbeam_utils::thread::{scope, ScopedJoinHandle};
+use std::collections::VecDeque;
+use std::env;
+use std::num::NonZeroUsize;
+use std::path::PathBuf;
+use std::process;
+use std::str::FromStr;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+fn main() {
+ let root_path: PathBuf = env::args_os().nth(1).expect("need path to root of repo").into();
+ let cargo: PathBuf = env::args_os().nth(2).expect("need path to cargo").into();
+ let output_directory: PathBuf =
+ env::args_os().nth(3).expect("need path to output directory").into();
+ let concurrency: NonZeroUsize =
+ FromStr::from_str(&env::args().nth(4).expect("need concurrency"))
+ .expect("concurrency must be a number");
+
+ let src_path = root_path.join("src");
+ let library_path = root_path.join("library");
+ let compiler_path = root_path.join("compiler");
+
+ let args: Vec<String> = env::args().skip(1).collect();
+
+ let verbose = args.iter().any(|s| *s == "--verbose");
+
+ let bad = std::sync::Arc::new(AtomicBool::new(false));
+
+ scope(|s| {
+ let mut handles: VecDeque<ScopedJoinHandle<'_, ()>> =
+ VecDeque::with_capacity(concurrency.get());
+
+ macro_rules! check {
+ ($p:ident $(, $args:expr)* ) => {
+ while handles.len() >= concurrency.get() {
+ handles.pop_front().unwrap().join().unwrap();
+ }
+
+ let handle = s.spawn(|_| {
+ let mut flag = false;
+ $p::check($($args),* , &mut flag);
+ if (flag) {
+ bad.store(true, Ordering::Relaxed);
+ }
+ });
+ handles.push_back(handle);
+ }
+ }
+
+ check!(target_specific_tests, &src_path);
+
+ // Checks that are done on the cargo workspace.
+ check!(deps, &root_path, &cargo);
+ check!(extdeps, &root_path);
+
+ // Checks over tests.
+ check!(debug_artifacts, &src_path);
+ check!(ui_tests, &src_path);
+
+ // Checks that only make sense for the compiler.
+ check!(errors, &compiler_path);
+ check!(error_codes_check, &[&src_path, &compiler_path]);
+
+ // Checks that only make sense for the std libs.
+ check!(pal, &library_path);
+ check!(primitive_docs, &library_path);
+
+ // Checks that need to be done for both the compiler and std libraries.
+ check!(unit_tests, &src_path);
+ check!(unit_tests, &compiler_path);
+ check!(unit_tests, &library_path);
+
+ if bins::check_filesystem_support(&[&root_path], &output_directory) {
+ check!(bins, &root_path);
+ }
+
+ check!(style, &src_path);
+ check!(style, &compiler_path);
+ check!(style, &library_path);
+
+ check!(edition, &src_path);
+ check!(edition, &compiler_path);
+ check!(edition, &library_path);
+
+ let collected = {
+ while handles.len() >= concurrency.get() {
+ handles.pop_front().unwrap().join().unwrap();
+ }
+ let mut flag = false;
+ let r = features::check(&src_path, &compiler_path, &library_path, &mut flag, verbose);
+ if flag {
+ bad.store(true, Ordering::Relaxed);
+ }
+ r
+ };
+ check!(unstable_book, &src_path, collected);
+ })
+ .unwrap();
+
+ if bad.load(Ordering::Relaxed) {
+ eprintln!("some tidy checks failed");
+ process::exit(1);
+ }
+}
diff --git a/src/tools/tidy/src/pal.rs b/src/tools/tidy/src/pal.rs
new file mode 100644
index 000000000..c5414233f
--- /dev/null
+++ b/src/tools/tidy/src/pal.rs
@@ -0,0 +1,209 @@
+//! Tidy check to enforce rules about platform-specific code in std.
+//!
+//! This is intended to maintain existing standards of code
+//! organization in hopes that the standard library will continue to
+//! be refactored to isolate platform-specific bits, making porting
+//! easier; where "standard library" roughly means "all the
+//! dependencies of the std and test crates".
+//!
+//! This generally means placing restrictions on where `cfg(unix)`,
+//! `cfg(windows)`, `cfg(target_os)` and `cfg(target_env)` may appear,
+//! the basic objective being to isolate platform-specific code to the
+//! platform-specific `std::sys` modules, and to the allocation,
+//! unwinding, and libc crates.
+//!
+//! Following are the basic rules, though there are currently
+//! exceptions:
+//!
+//! - core may not have platform-specific code.
+//! - libpanic_abort may have platform-specific code.
+//! - libpanic_unwind may have platform-specific code.
+//! - libunwind may have platform-specific code.
+//! - other crates in the std facade may not.
+//! - std may have platform-specific code in the following places:
+//! - `sys/`
+//! - `os/`
+//!
+//! `std/sys_common` should _not_ contain platform-specific code.
+//! Finally, because std contains tests with platform-specific
+//! `ignore` attributes, once the parser encounters `mod tests`,
+//! platform-specific cfgs are allowed. Not sure yet how to deal with
+//! this in the long term.
+
+use std::iter::Iterator;
+use std::path::Path;
+
+// Paths that may contain platform-specific code.
+const EXCEPTION_PATHS: &[&str] = &[
+ "library/panic_abort",
+ "library/panic_unwind",
+ "library/unwind",
+ "library/rtstartup", // Not sure what to do about this. magic stuff for mingw
+ "library/term", // Not sure how to make this crate portable, but test crate needs it.
+ "library/test", // Probably should defer to unstable `std::sys` APIs.
+ // The `VaList` implementation must have platform specific code.
+ // The Windows implementation of a `va_list` is always a character
+ // pointer regardless of the target architecture. As a result,
+ // we must use `#[cfg(windows)]` to conditionally compile the
+ // correct `VaList` structure for windows.
+ "library/core/src/ffi/mod.rs",
+ "library/std/src/sys/", // Platform-specific code for std lives here.
+ "library/std/src/os", // Platform-specific public interfaces
+ // Temporary `std` exceptions
+ // FIXME: platform-specific code should be moved to `sys`
+ "library/std/src/io/copy.rs",
+ "library/std/src/io/stdio.rs",
+ "library/std/src/f32.rs",
+ "library/std/src/f64.rs",
+ "library/std/src/path.rs",
+ "library/std/src/sys_common", // Should only contain abstractions over platforms
+ "library/std/src/net/test.rs", // Utility helpers for tests
+ "library/std/src/panic.rs", // fuchsia-specific panic backtrace handling
+];
+
+pub fn check(path: &Path, bad: &mut bool) {
+ // Sanity check that the complex parsing here works.
+ let mut saw_target_arch = false;
+ let mut saw_cfg_bang = false;
+ super::walk(path, &mut super::filter_dirs, &mut |entry, contents| {
+ let file = entry.path();
+ let filestr = file.to_string_lossy().replace("\\", "/");
+ if !filestr.ends_with(".rs") {
+ return;
+ }
+
+ let is_exception_path = EXCEPTION_PATHS.iter().any(|s| filestr.contains(&**s));
+ if is_exception_path {
+ return;
+ }
+
+ // exclude tests and benchmarks as some platforms do not support all tests
+ if filestr.contains("tests") || filestr.contains("benches") {
+ return;
+ }
+
+ check_cfgs(contents, &file, bad, &mut saw_target_arch, &mut saw_cfg_bang);
+ });
+
+ assert!(saw_target_arch);
+ assert!(saw_cfg_bang);
+}
+
+fn check_cfgs(
+ contents: &str,
+ file: &Path,
+ bad: &mut bool,
+ saw_target_arch: &mut bool,
+ saw_cfg_bang: &mut bool,
+) {
+ // Pull out all `cfg(...)` and `cfg!(...)` strings.
+ let cfgs = parse_cfgs(contents);
+
+ let mut line_numbers: Option<Vec<usize>> = None;
+ let mut err = |idx: usize, cfg: &str| {
+ if line_numbers.is_none() {
+ line_numbers = Some(contents.match_indices('\n').map(|(i, _)| i).collect());
+ }
+ let line_numbers = line_numbers.as_ref().expect("");
+ let line = match line_numbers.binary_search(&idx) {
+ Ok(_) => unreachable!(),
+ Err(i) => i + 1,
+ };
+ tidy_error!(bad, "{}:{}: platform-specific cfg: {}", file.display(), line, cfg);
+ };
+
+ for (idx, cfg) in cfgs {
+ // Sanity check that the parsing here works.
+ if !*saw_target_arch && cfg.contains("target_arch") {
+ *saw_target_arch = true
+ }
+ if !*saw_cfg_bang && cfg.contains("cfg!") {
+ *saw_cfg_bang = true
+ }
+
+ let contains_platform_specific_cfg = cfg.contains("target_os")
+ || cfg.contains("target_env")
+ || cfg.contains("target_abi")
+ || cfg.contains("target_vendor")
+ || cfg.contains("unix")
+ || cfg.contains("windows");
+
+ if !contains_platform_specific_cfg {
+ continue;
+ }
+
+ let preceded_by_doc_comment = {
+ let pre_contents = &contents[..idx];
+ let pre_newline = pre_contents.rfind('\n');
+ let pre_doc_comment = pre_contents.rfind("///");
+ match (pre_newline, pre_doc_comment) {
+ (Some(n), Some(c)) => n < c,
+ (None, Some(_)) => true,
+ (_, None) => false,
+ }
+ };
+
+ if preceded_by_doc_comment {
+ continue;
+ }
+
+ // exclude tests as some platforms do not support all tests
+ if cfg.contains("test") {
+ continue;
+ }
+
+ err(idx, cfg);
+ }
+}
+
+fn parse_cfgs(contents: &str) -> Vec<(usize, &str)> {
+ let candidate_cfgs = contents.match_indices("cfg");
+ let candidate_cfg_idxs = candidate_cfgs.map(|(i, _)| i);
+ // This is puling out the indexes of all "cfg" strings
+ // that appear to be tokens followed by a parenthesis.
+ let cfgs = candidate_cfg_idxs.filter(|i| {
+ let pre_idx = i.saturating_sub(1);
+ let succeeds_non_ident = !contents
+ .as_bytes()
+ .get(pre_idx)
+ .cloned()
+ .map(char::from)
+ .map(char::is_alphanumeric)
+ .unwrap_or(false);
+ let contents_after = &contents[*i..];
+ let first_paren = contents_after.find('(');
+ let paren_idx = first_paren.map(|ip| i + ip);
+ let preceeds_whitespace_and_paren = paren_idx
+ .map(|ip| {
+ let maybe_space = &contents[*i + "cfg".len()..ip];
+ maybe_space.chars().all(|c| char::is_whitespace(c) || c == '!')
+ })
+ .unwrap_or(false);
+
+ succeeds_non_ident && preceeds_whitespace_and_paren
+ });
+
+ cfgs.flat_map(|i| {
+ let mut depth = 0;
+ let contents_from = &contents[i..];
+ for (j, byte) in contents_from.bytes().enumerate() {
+ match byte {
+ b'(' => {
+ depth += 1;
+ }
+ b')' => {
+ depth -= 1;
+ if depth == 0 {
+ return Some((i, &contents_from[..=j]));
+ }
+ }
+ _ => {}
+ }
+ }
+
+ // if the parentheses are unbalanced just ignore this cfg -- it'll be caught when attempting
+ // to run the compiler, and there's no real reason to lint it separately here
+ None
+ })
+ .collect()
+}
diff --git a/src/tools/tidy/src/primitive_docs.rs b/src/tools/tidy/src/primitive_docs.rs
new file mode 100644
index 000000000..f3200e0af
--- /dev/null
+++ b/src/tools/tidy/src/primitive_docs.rs
@@ -0,0 +1,17 @@
+//! Tidy check to make sure `library/{std,core}/src/primitive_docs.rs` are the same file. These are
+//! different files so that relative links work properly without having to have `CARGO_PKG_NAME`
+//! set, but conceptually they should always be the same.
+
+use std::path::Path;
+
+pub fn check(library_path: &Path, bad: &mut bool) {
+ let std_name = "std/src/primitive_docs.rs";
+ let core_name = "core/src/primitive_docs.rs";
+ let std_contents = std::fs::read_to_string(library_path.join(std_name))
+ .unwrap_or_else(|e| panic!("failed to read library/{std_name}: {e}"));
+ let core_contents = std::fs::read_to_string(library_path.join(core_name))
+ .unwrap_or_else(|e| panic!("failed to read library/{core_name}: {e}"));
+ if std_contents != core_contents {
+ tidy_error!(bad, "library/{core_name} and library/{std_name} have different contents");
+ }
+}
diff --git a/src/tools/tidy/src/style.rs b/src/tools/tidy/src/style.rs
new file mode 100644
index 000000000..3cf44a2d7
--- /dev/null
+++ b/src/tools/tidy/src/style.rs
@@ -0,0 +1,423 @@
+//! Tidy check to enforce various stylistic guidelines on the Rust codebase.
+//!
+//! Example checks are:
+//!
+//! * No lines over 100 characters (in non-Rust files).
+//! * No files with over 3000 lines (in non-Rust files).
+//! * No tabs.
+//! * No trailing whitespace.
+//! * No CR characters.
+//! * No `TODO` or `XXX` directives.
+//! * No unexplained ` ```ignore ` or ` ```rust,ignore ` doc tests.
+//!
+//! Note that some of these rules are excluded from Rust files because we enforce rustfmt. It is
+//! preferable to be formatted rather than tidy-clean.
+//!
+//! A number of these checks can be opted-out of with various directives of the form:
+//! `// ignore-tidy-CHECK-NAME`.
+
+use regex::Regex;
+use std::path::Path;
+
+/// Error code markdown is restricted to 80 columns because they can be
+/// displayed on the console with --example.
+const ERROR_CODE_COLS: usize = 80;
+const COLS: usize = 100;
+
+const LINES: usize = 3000;
+
+const UNEXPLAINED_IGNORE_DOCTEST_INFO: &str = r#"unexplained "```ignore" doctest; try one:
+
+* make the test actually pass, by adding necessary imports and declarations, or
+* use "```text", if the code is not Rust code, or
+* use "```compile_fail,Ennnn", if the code is expected to fail at compile time, or
+* use "```should_panic", if the code is expected to fail at run time, or
+* use "```no_run", if the code should type-check but not necessary linkable/runnable, or
+* explain it like "```ignore (cannot-test-this-because-xxxx)", if the annotation cannot be avoided.
+
+"#;
+
+const LLVM_UNREACHABLE_INFO: &str = r"\
+C++ code used llvm_unreachable, which triggers undefined behavior
+when executed when assertions are disabled.
+Use llvm::report_fatal_error for increased robustness.";
+
+const ANNOTATIONS_TO_IGNORE: &[&str] = &[
+ "// @!has",
+ "// @has",
+ "// @matches",
+ "// CHECK",
+ "// EMIT_MIR",
+ "// compile-flags",
+ "// error-pattern",
+ "// gdb",
+ "// lldb",
+ "// cdb",
+ "// normalize-stderr-test",
+];
+
+// Intentionally written in decimal rather than hex
+const PROBLEMATIC_CONSTS: &[u32] = &[
+ 184594741, 2880289470, 2881141438, 2965027518, 2976579765, 3203381950, 3405691582, 3405697037,
+ 3735927486, 4027431614, 4276992702,
+];
+
+/// Parser states for `line_is_url`.
+#[derive(Clone, Copy, PartialEq)]
+#[allow(non_camel_case_types)]
+enum LIUState {
+ EXP_COMMENT_START,
+ EXP_LINK_LABEL_OR_URL,
+ EXP_URL,
+ EXP_END,
+}
+
+/// Returns `true` if `line` appears to be a line comment containing a URL,
+/// possibly with a Markdown link label in front, and nothing else.
+/// The Markdown link label, if present, may not contain whitespace.
+/// Lines of this form are allowed to be overlength, because Markdown
+/// offers no way to split a line in the middle of a URL, and the lengths
+/// of URLs to external references are beyond our control.
+fn line_is_url(is_error_code: bool, columns: usize, line: &str) -> bool {
+ // more basic check for markdown, to avoid complexity in implementing two state machines
+ if is_error_code {
+ return line.starts_with('[') && line.contains("]:") && line.contains("http");
+ }
+
+ use self::LIUState::*;
+ let mut state: LIUState = EXP_COMMENT_START;
+ let is_url = |w: &str| w.starts_with("http://") || w.starts_with("https://");
+
+ for tok in line.split_whitespace() {
+ match (state, tok) {
+ (EXP_COMMENT_START, "//") | (EXP_COMMENT_START, "///") | (EXP_COMMENT_START, "//!") => {
+ state = EXP_LINK_LABEL_OR_URL
+ }
+
+ (EXP_LINK_LABEL_OR_URL, w)
+ if w.len() >= 4 && w.starts_with('[') && w.ends_with("]:") =>
+ {
+ state = EXP_URL
+ }
+
+ (EXP_LINK_LABEL_OR_URL, w) if is_url(w) => state = EXP_END,
+
+ (EXP_URL, w) if is_url(w) || w.starts_with("../") => state = EXP_END,
+
+ (_, w) if w.len() > columns && is_url(w) => state = EXP_END,
+
+ (_, _) => {}
+ }
+ }
+
+ state == EXP_END
+}
+
+/// Returns `true` if `line` can be ignored. This is the case when it contains
+/// an annotation that is explicitly ignored.
+fn should_ignore(line: &str) -> bool {
+ // Matches test annotations like `//~ ERROR text`.
+ // This mirrors the regex in src/tools/compiletest/src/runtest.rs, please
+ // update both if either are changed.
+ let re = Regex::new("\\s*//(\\[.*\\])?~.*").unwrap();
+ re.is_match(line) || ANNOTATIONS_TO_IGNORE.iter().any(|a| line.contains(a))
+}
+
+/// Returns `true` if `line` is allowed to be longer than the normal limit.
+fn long_line_is_ok(extension: &str, is_error_code: bool, max_columns: usize, line: &str) -> bool {
+ if extension != "md" || is_error_code {
+ if line_is_url(is_error_code, max_columns, line) || should_ignore(line) {
+ return true;
+ }
+ } else if extension == "md" {
+ // non-error code markdown is allowed to be any length
+ return true;
+ }
+
+ false
+}
+
+enum Directive {
+ /// By default, tidy always warns against style issues.
+ Deny,
+
+ /// `Ignore(false)` means that an `ignore-tidy-*` directive
+ /// has been provided, but is unnecessary. `Ignore(true)`
+ /// means that it is necessary (i.e. a warning would be
+ /// produced if `ignore-tidy-*` was not present).
+ Ignore(bool),
+}
+
+fn contains_ignore_directive(can_contain: bool, contents: &str, check: &str) -> Directive {
+ if !can_contain {
+ return Directive::Deny;
+ }
+ // Update `can_contain` when changing this
+ if contents.contains(&format!("// ignore-tidy-{check}"))
+ || contents.contains(&format!("# ignore-tidy-{check}"))
+ || contents.contains(&format!("/* ignore-tidy-{check} */"))
+ {
+ Directive::Ignore(false)
+ } else {
+ Directive::Deny
+ }
+}
+
+macro_rules! suppressible_tidy_err {
+ ($err:ident, $skip:ident, $msg:expr) => {
+ if let Directive::Deny = $skip {
+ $err($msg);
+ } else {
+ $skip = Directive::Ignore(true);
+ }
+ };
+}
+
+pub fn is_in(full_path: &Path, parent_folder_to_find: &str, folder_to_find: &str) -> bool {
+ if let Some(parent) = full_path.parent() {
+ if parent.file_name().map_or_else(
+ || false,
+ |f| {
+ f.to_string_lossy() == folder_to_find
+ && parent
+ .parent()
+ .and_then(|f| f.file_name())
+ .map_or_else(|| false, |f| f == parent_folder_to_find)
+ },
+ ) {
+ true
+ } else {
+ is_in(parent, parent_folder_to_find, folder_to_find)
+ }
+ } else {
+ false
+ }
+}
+
+fn skip_markdown_path(path: &Path) -> bool {
+ // These aren't ready for tidy.
+ const SKIP_MD: &[&str] = &[
+ "src/doc/edition-guide",
+ "src/doc/embedded-book",
+ "src/doc/nomicon",
+ "src/doc/reference",
+ "src/doc/rust-by-example",
+ "src/doc/rustc-dev-guide",
+ ];
+ SKIP_MD.iter().any(|p| path.ends_with(p))
+}
+
+fn is_unexplained_ignore(extension: &str, line: &str) -> bool {
+ if !line.ends_with("```ignore") && !line.ends_with("```rust,ignore") {
+ return false;
+ }
+ if extension == "md" && line.trim().starts_with("//") {
+ // Markdown examples may include doc comments with ignore inside a
+ // code block.
+ return false;
+ }
+ true
+}
+
+pub fn check(path: &Path, bad: &mut bool) {
+ fn skip(path: &Path) -> bool {
+ super::filter_dirs(path) || skip_markdown_path(path)
+ }
+ let problematic_consts_strings: Vec<String> = (PROBLEMATIC_CONSTS.iter().map(u32::to_string))
+ .chain(PROBLEMATIC_CONSTS.iter().map(|v| format!("{:x}", v)))
+ .chain(PROBLEMATIC_CONSTS.iter().map(|v| format!("{:X}", v)))
+ .collect();
+ super::walk(path, &mut skip, &mut |entry, contents| {
+ let file = entry.path();
+ let filename = file.file_name().unwrap().to_string_lossy();
+ let extensions = [".rs", ".py", ".js", ".sh", ".c", ".cpp", ".h", ".md", ".css"];
+ if extensions.iter().all(|e| !filename.ends_with(e)) || filename.starts_with(".#") {
+ return;
+ }
+
+ let is_style_file = filename.ends_with(".css");
+ let under_rustfmt = filename.ends_with(".rs") &&
+ // This list should ideally be sourced from rustfmt.toml but we don't want to add a toml
+ // parser to tidy.
+ !file.ancestors().any(|a| {
+ a.ends_with("src/test") ||
+ a.ends_with("src/doc/book")
+ });
+
+ if is_style_file && !is_in(file, "src", "librustdoc") {
+ // We only check CSS files in rustdoc.
+ return;
+ }
+
+ if contents.is_empty() {
+ tidy_error!(bad, "{}: empty file", file.display());
+ }
+
+ let extension = file.extension().unwrap().to_string_lossy();
+ let is_error_code = extension == "md" && is_in(file, "src", "error_codes");
+
+ let max_columns = if is_error_code { ERROR_CODE_COLS } else { COLS };
+
+ let can_contain = contents.contains("// ignore-tidy-")
+ || contents.contains("# ignore-tidy-")
+ || contents.contains("/* ignore-tidy-");
+ // Enable testing ICE's that require specific (untidy)
+ // file formats easily eg. `issue-1234-ignore-tidy.rs`
+ if filename.contains("ignore-tidy") {
+ return;
+ }
+ let mut skip_cr = contains_ignore_directive(can_contain, &contents, "cr");
+ let mut skip_undocumented_unsafe =
+ contains_ignore_directive(can_contain, &contents, "undocumented-unsafe");
+ let mut skip_tab = contains_ignore_directive(can_contain, &contents, "tab");
+ let mut skip_line_length = contains_ignore_directive(can_contain, &contents, "linelength");
+ let mut skip_file_length = contains_ignore_directive(can_contain, &contents, "filelength");
+ let mut skip_end_whitespace =
+ contains_ignore_directive(can_contain, &contents, "end-whitespace");
+ let mut skip_trailing_newlines =
+ contains_ignore_directive(can_contain, &contents, "trailing-newlines");
+ let mut skip_leading_newlines =
+ contains_ignore_directive(can_contain, &contents, "leading-newlines");
+ let mut skip_copyright = contains_ignore_directive(can_contain, &contents, "copyright");
+ let mut leading_new_lines = false;
+ let mut trailing_new_lines = 0;
+ let mut lines = 0;
+ let mut last_safety_comment = false;
+ for (i, line) in contents.split('\n').enumerate() {
+ let mut err = |msg: &str| {
+ tidy_error!(bad, "{}:{}: {}", file.display(), i + 1, msg);
+ };
+ if !under_rustfmt
+ && line.chars().count() > max_columns
+ && !long_line_is_ok(&extension, is_error_code, max_columns, line)
+ {
+ suppressible_tidy_err!(
+ err,
+ skip_line_length,
+ &format!("line longer than {max_columns} chars")
+ );
+ }
+ if !is_style_file && line.contains('\t') {
+ suppressible_tidy_err!(err, skip_tab, "tab character");
+ }
+ if line.ends_with(' ') || line.ends_with('\t') {
+ suppressible_tidy_err!(err, skip_end_whitespace, "trailing whitespace");
+ }
+ if is_style_file && line.starts_with(' ') {
+ err("CSS files use tabs for indent");
+ }
+ if line.contains('\r') {
+ suppressible_tidy_err!(err, skip_cr, "CR character");
+ }
+ if filename != "style.rs" {
+ if line.contains("TODO") {
+ err("TODO is deprecated; use FIXME")
+ }
+ if line.contains("//") && line.contains(" XXX") {
+ err("XXX is deprecated; use FIXME")
+ }
+ for s in problematic_consts_strings.iter() {
+ if line.contains(s) {
+ err("Don't use magic numbers that spell things (consider 0x12345678)");
+ }
+ }
+ }
+ let is_test = || file.components().any(|c| c.as_os_str() == "tests");
+ // for now we just check libcore
+ if line.contains("unsafe {") && !line.trim().starts_with("//") && !last_safety_comment {
+ if file.components().any(|c| c.as_os_str() == "core") && !is_test() {
+ suppressible_tidy_err!(err, skip_undocumented_unsafe, "undocumented unsafe");
+ }
+ }
+ if line.contains("// SAFETY:") {
+ last_safety_comment = true;
+ } else if line.trim().starts_with("//") || line.trim().is_empty() {
+ // keep previous value
+ } else {
+ last_safety_comment = false;
+ }
+ if (line.starts_with("// Copyright")
+ || line.starts_with("# Copyright")
+ || line.starts_with("Copyright"))
+ && (line.contains("Rust Developers") || line.contains("Rust Project Developers"))
+ {
+ suppressible_tidy_err!(
+ err,
+ skip_copyright,
+ "copyright notices attributed to the Rust Project Developers are deprecated"
+ );
+ }
+ if is_unexplained_ignore(&extension, line) {
+ err(UNEXPLAINED_IGNORE_DOCTEST_INFO);
+ }
+ if filename.ends_with(".cpp") && line.contains("llvm_unreachable") {
+ err(LLVM_UNREACHABLE_INFO);
+ }
+ if line.is_empty() {
+ if i == 0 {
+ leading_new_lines = true;
+ }
+ trailing_new_lines += 1;
+ } else {
+ trailing_new_lines = 0;
+ }
+
+ if !line.trim().starts_with("//") {
+ lines += 1;
+ }
+ }
+ if leading_new_lines {
+ let mut err = |_| {
+ tidy_error!(bad, "{}: leading newline", file.display());
+ };
+ suppressible_tidy_err!(err, skip_leading_newlines, "mising leading newline");
+ }
+ let mut err = |msg: &str| {
+ tidy_error!(bad, "{}: {}", file.display(), msg);
+ };
+ match trailing_new_lines {
+ 0 => suppressible_tidy_err!(err, skip_trailing_newlines, "missing trailing newline"),
+ 1 => {}
+ n => suppressible_tidy_err!(
+ err,
+ skip_trailing_newlines,
+ &format!("too many trailing newlines ({n})")
+ ),
+ };
+ if lines > LINES {
+ let mut err = |_| {
+ tidy_error!(
+ bad,
+ "{}: too many lines ({}) (add `// \
+ ignore-tidy-filelength` to the file to suppress this error)",
+ file.display(),
+ lines
+ );
+ };
+ suppressible_tidy_err!(err, skip_file_length, "");
+ }
+
+ if let Directive::Ignore(false) = skip_cr {
+ tidy_error!(bad, "{}: ignoring CR characters unnecessarily", file.display());
+ }
+ if let Directive::Ignore(false) = skip_tab {
+ tidy_error!(bad, "{}: ignoring tab characters unnecessarily", file.display());
+ }
+ if let Directive::Ignore(false) = skip_end_whitespace {
+ tidy_error!(bad, "{}: ignoring trailing whitespace unnecessarily", file.display());
+ }
+ if let Directive::Ignore(false) = skip_trailing_newlines {
+ tidy_error!(bad, "{}: ignoring trailing newlines unnecessarily", file.display());
+ }
+ if let Directive::Ignore(false) = skip_leading_newlines {
+ tidy_error!(bad, "{}: ignoring leading newlines unnecessarily", file.display());
+ }
+ if let Directive::Ignore(false) = skip_copyright {
+ tidy_error!(bad, "{}: ignoring copyright unnecessarily", file.display());
+ }
+ // We deliberately do not warn about these being unnecessary,
+ // that would just lead to annoying churn.
+ let _unused = skip_line_length;
+ let _unused = skip_file_length;
+ })
+}
diff --git a/src/tools/tidy/src/target_specific_tests.rs b/src/tools/tidy/src/target_specific_tests.rs
new file mode 100644
index 000000000..723684bfa
--- /dev/null
+++ b/src/tools/tidy/src/target_specific_tests.rs
@@ -0,0 +1,96 @@
+//! Tidy check to ensure that all target specific tests (those that require a `--target` flag)
+//! also require the pre-requisite LLVM components to run.
+
+use std::collections::BTreeMap;
+use std::path::Path;
+
+const COMMENT: &str = "//";
+const LLVM_COMPONENTS_HEADER: &str = "needs-llvm-components:";
+const COMPILE_FLAGS_HEADER: &str = "compile-flags:";
+
+/// Iterate through compiletest headers in a test contents.
+///
+/// Adjusted from compiletest/src/header.rs.
+fn iter_header<'a>(contents: &'a str, it: &mut dyn FnMut(Option<&'a str>, &'a str)) {
+ for ln in contents.lines() {
+ let ln = ln.trim();
+ if ln.starts_with(COMMENT) && ln[COMMENT.len()..].trim_start().starts_with('[') {
+ if let Some(close_brace) = ln.find(']') {
+ let open_brace = ln.find('[').unwrap();
+ let lncfg = &ln[open_brace + 1..close_brace];
+ it(Some(lncfg), ln[(close_brace + 1)..].trim_start());
+ } else {
+ panic!("malformed condition directive: expected `//[foo]`, found `{ln}`")
+ }
+ } else if ln.starts_with(COMMENT) {
+ it(None, ln[COMMENT.len()..].trim_start());
+ }
+ }
+}
+
+#[derive(Default, Debug)]
+struct RevisionInfo<'a> {
+ target_arch: Option<&'a str>,
+ llvm_components: Option<Vec<&'a str>>,
+}
+
+pub fn check(path: &Path, bad: &mut bool) {
+ let tests = path.join("test");
+ super::walk(
+ &tests,
+ &mut |path| path.extension().map(|p| p == "rs") == Some(false),
+ &mut |entry, content| {
+ let file = entry.path().display();
+ let mut header_map = BTreeMap::new();
+ iter_header(content, &mut |cfg, directive| {
+ if let Some(value) = directive.strip_prefix(LLVM_COMPONENTS_HEADER) {
+ let info = header_map.entry(cfg).or_insert(RevisionInfo::default());
+ let comp_vec = info.llvm_components.get_or_insert(Vec::new());
+ for component in value.split(' ') {
+ let component = component.trim();
+ if !component.is_empty() {
+ comp_vec.push(component);
+ }
+ }
+ } else if directive.starts_with(COMPILE_FLAGS_HEADER) {
+ let compile_flags = &directive[COMPILE_FLAGS_HEADER.len()..];
+ if let Some((_, v)) = compile_flags.split_once("--target") {
+ if let Some((arch, _)) =
+ v.trim_start_matches(|c| c == ' ' || c == '=').split_once("-")
+ {
+ let info = header_map.entry(cfg).or_insert(RevisionInfo::default());
+ info.target_arch.replace(arch);
+ } else {
+ eprintln!("{file}: seems to have a malformed --target value");
+ *bad = true;
+ }
+ }
+ }
+ });
+ for (rev, RevisionInfo { target_arch, llvm_components }) in &header_map {
+ let rev = rev.unwrap_or("[unspecified]");
+ match (target_arch, llvm_components) {
+ (None, None) => {}
+ (Some(_), None) => {
+ eprintln!(
+ "{}: revision {} should specify `{}` as it has `--target` set",
+ file, rev, LLVM_COMPONENTS_HEADER
+ );
+ *bad = true;
+ }
+ (None, Some(_)) => {
+ eprintln!(
+ "{}: revision {} should not specify `{}` as it doesn't need `--target`",
+ file, rev, LLVM_COMPONENTS_HEADER
+ );
+ *bad = true;
+ }
+ (Some(_), Some(_)) => {
+ // FIXME: check specified components against the target architectures we
+ // gathered.
+ }
+ }
+ }
+ },
+ );
+}
diff --git a/src/tools/tidy/src/ui_tests.rs b/src/tools/tidy/src/ui_tests.rs
new file mode 100644
index 000000000..8ec5c3324
--- /dev/null
+++ b/src/tools/tidy/src/ui_tests.rs
@@ -0,0 +1,82 @@
+//! Tidy check to ensure below in UI test directories:
+//! - the number of entries in each directory must be less than `ENTRY_LIMIT`
+//! - there are no stray `.stderr` files
+
+use std::fs;
+use std::path::Path;
+
+const ENTRY_LIMIT: usize = 1000;
+// FIXME: The following limits should be reduced eventually.
+const ROOT_ENTRY_LIMIT: usize = 968;
+const ISSUES_ENTRY_LIMIT: usize = 2147;
+
+fn check_entries(path: &Path, bad: &mut bool) {
+ let dirs = walkdir::WalkDir::new(&path.join("test/ui"))
+ .into_iter()
+ .filter_entry(|e| e.file_type().is_dir());
+ for dir in dirs {
+ if let Ok(dir) = dir {
+ let dir_path = dir.path();
+
+ // Use special values for these dirs.
+ let is_root = path.join("test/ui") == dir_path;
+ let is_issues_dir = path.join("test/ui/issues") == dir_path;
+ let limit = if is_root {
+ ROOT_ENTRY_LIMIT
+ } else if is_issues_dir {
+ ISSUES_ENTRY_LIMIT
+ } else {
+ ENTRY_LIMIT
+ };
+
+ let count = std::fs::read_dir(dir_path).unwrap().count();
+ if count > limit {
+ tidy_error!(
+ bad,
+ "following path contains more than {} entries, \
+ you should move the test to some relevant subdirectory (current: {}): {}",
+ limit,
+ count,
+ dir_path.display()
+ );
+ }
+ }
+ }
+}
+
+pub fn check(path: &Path, bad: &mut bool) {
+ check_entries(&path, bad);
+ for path in &[&path.join("test/ui"), &path.join("test/ui-fulldeps")] {
+ super::walk_no_read(path, &mut |_| false, &mut |entry| {
+ let file_path = entry.path();
+ if let Some(ext) = file_path.extension() {
+ if ext == "stderr" || ext == "stdout" {
+ // Test output filenames have one of the formats:
+ // ```
+ // $testname.stderr
+ // $testname.$mode.stderr
+ // $testname.$revision.stderr
+ // $testname.$revision.$mode.stderr
+ // ```
+ //
+ // For now, just make sure that there is a corresponding
+ // `$testname.rs` file.
+ //
+ // NB: We do not use file_stem() as some file names have multiple `.`s and we
+ // must strip all of them.
+ let testname =
+ file_path.file_name().unwrap().to_str().unwrap().split_once('.').unwrap().0;
+ if !file_path.with_file_name(testname).with_extension("rs").exists() {
+ tidy_error!(bad, "Stray file with UI testing output: {:?}", file_path);
+ }
+
+ if let Ok(metadata) = fs::metadata(file_path) {
+ if metadata.len() == 0 {
+ tidy_error!(bad, "Empty file with UI testing output: {:?}", file_path);
+ }
+ }
+ }
+ }
+ });
+ }
+}
diff --git a/src/tools/tidy/src/unit_tests.rs b/src/tools/tidy/src/unit_tests.rs
new file mode 100644
index 000000000..f675b7865
--- /dev/null
+++ b/src/tools/tidy/src/unit_tests.rs
@@ -0,0 +1,66 @@
+//! Tidy check to ensure `#[test]` and `#[bench]` are not used directly inside `core`.
+//!
+//! `#![no_core]` libraries cannot be tested directly due to duplicating lang
+//! items. All tests and benchmarks must be written externally in `core/{tests,benches}`.
+//!
+//! Outside of core tests and benchmarks should be outlined into separate files
+//! named `tests.rs` or `benches.rs`, or directories named `tests` or `benches` unconfigured
+//! during normal build.
+
+use std::path::Path;
+
+pub fn check(root_path: &Path, bad: &mut bool) {
+ let core = &root_path.join("core");
+ let core_tests = &core.join("tests");
+ let core_benches = &core.join("benches");
+ let is_core = |path: &Path| {
+ path.starts_with(core) && !(path.starts_with(core_tests) || path.starts_with(core_benches))
+ };
+
+ let mut skip = |path: &Path| {
+ let file_name = path.file_name().unwrap_or_default();
+ if path.is_dir() {
+ super::filter_dirs(path)
+ || path.ends_with("src/test")
+ || path.ends_with("src/doc")
+ || (file_name == "tests" || file_name == "benches") && !is_core(path)
+ } else {
+ let extension = path.extension().unwrap_or_default();
+ extension != "rs"
+ || (file_name == "tests.rs" || file_name == "benches.rs") && !is_core(path)
+ // UI tests with different names
+ || path.ends_with("src/thread/local/dynamic_tests.rs")
+ || path.ends_with("src/sync/mpsc/sync_tests.rs")
+ }
+ };
+
+ super::walk(root_path, &mut skip, &mut |entry, contents| {
+ let path = entry.path();
+ let is_core = path.starts_with(core);
+ for (i, line) in contents.lines().enumerate() {
+ let line = line.trim();
+ let is_test = || line.contains("#[test]") && !line.contains("`#[test]");
+ let is_bench = || line.contains("#[bench]") && !line.contains("`#[bench]");
+ if !line.starts_with("//") && (is_test() || is_bench()) {
+ let explanation = if is_core {
+ "core unit tests and benchmarks must be placed into \
+ `core/tests` or `core/benches`"
+ } else {
+ "unit tests and benchmarks must be placed into \
+ separate files or directories named \
+ `tests.rs`, `benches.rs`, `tests` or `benches`"
+ };
+ let name = if is_test() { "test" } else { "bench" };
+ tidy_error!(
+ bad,
+ "`{}:{}` contains `#[{}]`; {}",
+ path.display(),
+ i + 1,
+ name,
+ explanation,
+ );
+ return;
+ }
+ }
+ });
+}
diff --git a/src/tools/tidy/src/unstable_book.rs b/src/tools/tidy/src/unstable_book.rs
new file mode 100644
index 000000000..7dfb6224d
--- /dev/null
+++ b/src/tools/tidy/src/unstable_book.rs
@@ -0,0 +1,132 @@
+use crate::features::{CollectedFeatures, Features, Status};
+use std::collections::BTreeSet;
+use std::fs;
+use std::path::{Path, PathBuf};
+
+pub const PATH_STR: &str = "doc/unstable-book";
+
+pub const COMPILER_FLAGS_DIR: &str = "src/compiler-flags";
+
+pub const LANG_FEATURES_DIR: &str = "src/language-features";
+
+pub const LIB_FEATURES_DIR: &str = "src/library-features";
+
+/// Builds the path to the Unstable Book source directory from the Rust 'src' directory.
+pub fn unstable_book_path(base_src_path: &Path) -> PathBuf {
+ base_src_path.join(PATH_STR)
+}
+
+/// Builds the path to the directory where the features are documented within the Unstable Book
+/// source directory.
+pub fn unstable_book_lang_features_path(base_src_path: &Path) -> PathBuf {
+ unstable_book_path(base_src_path).join(LANG_FEATURES_DIR)
+}
+
+/// Builds the path to the directory where the features are documented within the Unstable Book
+/// source directory.
+pub fn unstable_book_lib_features_path(base_src_path: &Path) -> PathBuf {
+ unstable_book_path(base_src_path).join(LIB_FEATURES_DIR)
+}
+
+/// Tests whether `DirEntry` is a file.
+fn dir_entry_is_file(dir_entry: &fs::DirEntry) -> bool {
+ dir_entry.file_type().expect("could not determine file type of directory entry").is_file()
+}
+
+/// Retrieves names of all unstable features.
+pub fn collect_unstable_feature_names(features: &Features) -> BTreeSet<String> {
+ features
+ .iter()
+ .filter(|&(_, ref f)| f.level == Status::Unstable)
+ .map(|(name, _)| name.replace('_', "-"))
+ .collect()
+}
+
+pub fn collect_unstable_book_section_file_names(dir: &Path) -> BTreeSet<String> {
+ fs::read_dir(dir)
+ .expect("could not read directory")
+ .map(|entry| entry.expect("could not read directory entry"))
+ .filter(dir_entry_is_file)
+ .map(|entry| entry.path())
+ .filter(|path| path.extension().map(|e| e.to_str().unwrap()) == Some("md"))
+ .map(|path| path.file_stem().unwrap().to_str().unwrap().into())
+ .collect()
+}
+
+/// Retrieves file names of all library feature sections in the Unstable Book with:
+///
+/// * hyphens replaced by underscores,
+/// * the markdown suffix ('.md') removed.
+fn collect_unstable_book_lang_features_section_file_names(
+ base_src_path: &Path,
+) -> BTreeSet<String> {
+ collect_unstable_book_section_file_names(&unstable_book_lang_features_path(base_src_path))
+}
+
+/// Retrieves file names of all language feature sections in the Unstable Book with:
+///
+/// * hyphens replaced by underscores,
+/// * the markdown suffix ('.md') removed.
+fn collect_unstable_book_lib_features_section_file_names(base_src_path: &Path) -> BTreeSet<String> {
+ collect_unstable_book_section_file_names(&unstable_book_lib_features_path(base_src_path))
+}
+
+pub fn check(path: &Path, features: CollectedFeatures, bad: &mut bool) {
+ let lang_features = features.lang;
+ let lib_features = features
+ .lib
+ .into_iter()
+ .filter(|&(ref name, _)| !lang_features.contains_key(name))
+ .collect::<Features>();
+
+ // Library features
+ let unstable_lib_feature_names = collect_unstable_feature_names(&lib_features);
+ let unstable_book_lib_features_section_file_names =
+ collect_unstable_book_lib_features_section_file_names(path);
+
+ // Language features
+ let unstable_lang_feature_names = collect_unstable_feature_names(&lang_features);
+ let unstable_book_lang_features_section_file_names =
+ collect_unstable_book_lang_features_section_file_names(path);
+
+ // Check for Unstable Book sections that don't have a corresponding unstable feature
+ for feature_name in &unstable_book_lib_features_section_file_names - &unstable_lib_feature_names
+ {
+ if !unstable_lang_feature_names.contains(&feature_name) {
+ tidy_error!(
+ bad,
+ "The Unstable Book has a 'library feature' section '{}' which doesn't \
+ correspond to an unstable library feature",
+ feature_name
+ );
+ }
+ }
+
+ // Check for Unstable Book sections that don't have a corresponding unstable feature.
+ for feature_name in
+ &unstable_book_lang_features_section_file_names - &unstable_lang_feature_names
+ {
+ tidy_error!(
+ bad,
+ "The Unstable Book has a 'language feature' section '{}' which doesn't \
+ correspond to an unstable language feature",
+ feature_name
+ )
+ }
+
+ // List unstable features that don't have Unstable Book sections.
+ // Remove the comment marker if you want the list printed.
+ /*
+ println!("Lib features without unstable book sections:");
+ for feature_name in &unstable_lang_feature_names -
+ &unstable_book_lang_features_section_file_names {
+ println!(" * {} {:?}", feature_name, lib_features[&feature_name].tracking_issue);
+ }
+
+ println!("Lang features without unstable book sections:");
+ for feature_name in &unstable_lib_feature_names-
+ &unstable_book_lib_features_section_file_names {
+ println!(" * {} {:?}", feature_name, lang_features[&feature_name].tracking_issue);
+ }
+ // */
+}