summaryrefslogtreecommitdiffstats
path: root/library/test/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /library/test/src
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/test/src')
-rw-r--r--library/test/src/bench.rs242
-rw-r--r--library/test/src/cli.rs493
-rw-r--r--library/test/src/console.rs307
-rw-r--r--library/test/src/event.rs36
-rw-r--r--library/test/src/formatters/json.rs260
-rw-r--r--library/test/src/formatters/junit.rs180
-rw-r--r--library/test/src/formatters/mod.rs42
-rw-r--r--library/test/src/formatters/pretty.rs281
-rw-r--r--library/test/src/formatters/terse.rs259
-rw-r--r--library/test/src/helpers/concurrency.rs14
-rw-r--r--library/test/src/helpers/exit_code.rs20
-rw-r--r--library/test/src/helpers/isatty.rs32
-rw-r--r--library/test/src/helpers/metrics.rs50
-rw-r--r--library/test/src/helpers/mod.rs8
-rw-r--r--library/test/src/helpers/shuffle.rs67
-rw-r--r--library/test/src/lib.rs696
-rw-r--r--library/test/src/options.rs89
-rw-r--r--library/test/src/stats.rs302
-rw-r--r--library/test/src/stats/tests.rs591
-rw-r--r--library/test/src/term.rs85
-rw-r--r--library/test/src/term/terminfo/mod.rs185
-rw-r--r--library/test/src/term/terminfo/parm.rs532
-rw-r--r--library/test/src/term/terminfo/parm/tests.rs124
-rw-r--r--library/test/src/term/terminfo/parser/compiled.rs336
-rw-r--r--library/test/src/term/terminfo/parser/compiled/tests.rs8
-rw-r--r--library/test/src/term/terminfo/searcher.rs69
-rw-r--r--library/test/src/term/terminfo/searcher/tests.rs19
-rw-r--r--library/test/src/term/win.rs170
-rw-r--r--library/test/src/test_result.rs108
-rw-r--r--library/test/src/tests.rs823
-rw-r--r--library/test/src/time.rs197
-rw-r--r--library/test/src/types.rs167
32 files changed, 6792 insertions, 0 deletions
diff --git a/library/test/src/bench.rs b/library/test/src/bench.rs
new file mode 100644
index 000000000..7869ba2c0
--- /dev/null
+++ b/library/test/src/bench.rs
@@ -0,0 +1,242 @@
+//! Benchmarking module.
+use super::{
+ event::CompletedTest,
+ options::BenchMode,
+ test_result::TestResult,
+ types::{TestDesc, TestId},
+ Sender,
+};
+
+use crate::stats;
+use std::cmp;
+use std::io;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::sync::{Arc, Mutex};
+use std::time::{Duration, Instant};
+
+/// An identity function that *__hints__* to the compiler to be maximally pessimistic about what
+/// `black_box` could do.
+///
+/// See [`std::hint::black_box`] for details.
+#[inline(always)]
+pub fn black_box<T>(dummy: T) -> T {
+ std::hint::black_box(dummy)
+}
+
+/// Manager of the benchmarking runs.
+///
+/// This is fed into functions marked with `#[bench]` to allow for
+/// set-up & tear-down before running a piece of code repeatedly via a
+/// call to `iter`.
+#[derive(Clone)]
+pub struct Bencher {
+ mode: BenchMode,
+ summary: Option<stats::Summary>,
+ pub bytes: u64,
+}
+
+impl Bencher {
+ /// Callback for benchmark functions to run in their body.
+ pub fn iter<T, F>(&mut self, mut inner: F)
+ where
+ F: FnMut() -> T,
+ {
+ if self.mode == BenchMode::Single {
+ ns_iter_inner(&mut inner, 1);
+ return;
+ }
+
+ self.summary = Some(iter(&mut inner));
+ }
+
+ pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
+ where
+ F: FnMut(&mut Bencher),
+ {
+ f(self);
+ self.summary
+ }
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub struct BenchSamples {
+ pub ns_iter_summ: stats::Summary,
+ pub mb_s: usize,
+}
+
+pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
+ use std::fmt::Write;
+ let mut output = String::new();
+
+ let median = bs.ns_iter_summ.median as usize;
+ let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
+
+ write!(
+ output,
+ "{:>11} ns/iter (+/- {})",
+ fmt_thousands_sep(median, ','),
+ fmt_thousands_sep(deviation, ',')
+ )
+ .unwrap();
+ if bs.mb_s != 0 {
+ write!(output, " = {} MB/s", bs.mb_s).unwrap();
+ }
+ output
+}
+
+// Format a number with thousands separators
+fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
+ use std::fmt::Write;
+ let mut output = String::new();
+ let mut trailing = false;
+ for &pow in &[9, 6, 3, 0] {
+ let base = 10_usize.pow(pow);
+ if pow == 0 || trailing || n / base != 0 {
+ if !trailing {
+ write!(output, "{}", n / base).unwrap();
+ } else {
+ write!(output, "{:03}", n / base).unwrap();
+ }
+ if pow != 0 {
+ output.push(sep);
+ }
+ trailing = true;
+ }
+ n %= base;
+ }
+
+ output
+}
+
+fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
+where
+ F: FnMut() -> T,
+{
+ let start = Instant::now();
+ for _ in 0..k {
+ black_box(inner());
+ }
+ start.elapsed().as_nanos() as u64
+}
+
+pub fn iter<T, F>(inner: &mut F) -> stats::Summary
+where
+ F: FnMut() -> T,
+{
+ // Initial bench run to get ballpark figure.
+ let ns_single = ns_iter_inner(inner, 1);
+
+ // Try to estimate iter count for 1ms falling back to 1m
+ // iterations if first run took < 1ns.
+ let ns_target_total = 1_000_000; // 1ms
+ let mut n = ns_target_total / cmp::max(1, ns_single);
+
+ // if the first run took more than 1ms we don't want to just
+ // be left doing 0 iterations on every loop. The unfortunate
+ // side effect of not being able to do as many runs is
+ // automatically handled by the statistical analysis below
+ // (i.e., larger error bars).
+ n = cmp::max(1, n);
+
+ let mut total_run = Duration::new(0, 0);
+ let samples: &mut [f64] = &mut [0.0_f64; 50];
+ loop {
+ let loop_start = Instant::now();
+
+ for p in &mut *samples {
+ *p = ns_iter_inner(inner, n) as f64 / n as f64;
+ }
+
+ stats::winsorize(samples, 5.0);
+ let summ = stats::Summary::new(samples);
+
+ for p in &mut *samples {
+ let ns = ns_iter_inner(inner, 5 * n);
+ *p = ns as f64 / (5 * n) as f64;
+ }
+
+ stats::winsorize(samples, 5.0);
+ let summ5 = stats::Summary::new(samples);
+
+ let loop_run = loop_start.elapsed();
+
+ // If we've run for 100ms and seem to have converged to a
+ // stable median.
+ if loop_run > Duration::from_millis(100)
+ && summ.median_abs_dev_pct < 1.0
+ && summ.median - summ5.median < summ5.median_abs_dev
+ {
+ return summ5;
+ }
+
+ total_run += loop_run;
+ // Longest we ever run for is 3s.
+ if total_run > Duration::from_secs(3) {
+ return summ5;
+ }
+
+ // If we overflow here just return the results so far. We check a
+ // multiplier of 10 because we're about to multiply by 2 and the
+ // next iteration of the loop will also multiply by 5 (to calculate
+ // the summ5 result)
+ n = match n.checked_mul(10) {
+ Some(_) => n * 2,
+ None => {
+ return summ5;
+ }
+ };
+ }
+}
+
+pub fn benchmark<F>(
+ id: TestId,
+ desc: TestDesc,
+ monitor_ch: Sender<CompletedTest>,
+ nocapture: bool,
+ f: F,
+) where
+ F: FnMut(&mut Bencher),
+{
+ let mut bs = Bencher { mode: BenchMode::Auto, summary: None, bytes: 0 };
+
+ let data = Arc::new(Mutex::new(Vec::new()));
+
+ if !nocapture {
+ io::set_output_capture(Some(data.clone()));
+ }
+
+ let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
+
+ io::set_output_capture(None);
+
+ let test_result = match result {
+ //bs.bench(f) {
+ Ok(Some(ns_iter_summ)) => {
+ let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
+ let mb_s = bs.bytes * 1000 / ns_iter;
+
+ let bs = BenchSamples { ns_iter_summ, mb_s: mb_s as usize };
+ TestResult::TrBench(bs)
+ }
+ Ok(None) => {
+ // iter not called, so no data.
+ // FIXME: error in this case?
+ let samples: &mut [f64] = &mut [0.0_f64; 1];
+ let bs = BenchSamples { ns_iter_summ: stats::Summary::new(samples), mb_s: 0 };
+ TestResult::TrBench(bs)
+ }
+ Err(_) => TestResult::TrFailed,
+ };
+
+ let stdout = data.lock().unwrap().to_vec();
+ let message = CompletedTest::new(id, desc, test_result, None, stdout);
+ monitor_ch.send(message).unwrap();
+}
+
+pub fn run_once<F>(f: F)
+where
+ F: FnMut(&mut Bencher),
+{
+ let mut bs = Bencher { mode: BenchMode::Single, summary: None, bytes: 0 };
+ bs.bench(f);
+}
diff --git a/library/test/src/cli.rs b/library/test/src/cli.rs
new file mode 100644
index 000000000..f981b9c49
--- /dev/null
+++ b/library/test/src/cli.rs
@@ -0,0 +1,493 @@
+//! Module converting command-line arguments into test configuration.
+
+use std::env;
+use std::path::PathBuf;
+
+use super::helpers::isatty;
+use super::options::{ColorConfig, Options, OutputFormat, RunIgnored};
+use super::time::TestTimeOptions;
+
+#[derive(Debug)]
+pub struct TestOpts {
+ pub list: bool,
+ pub filters: Vec<String>,
+ pub filter_exact: bool,
+ pub force_run_in_process: bool,
+ pub exclude_should_panic: bool,
+ pub run_ignored: RunIgnored,
+ pub run_tests: bool,
+ pub bench_benchmarks: bool,
+ pub logfile: Option<PathBuf>,
+ pub nocapture: bool,
+ pub color: ColorConfig,
+ pub format: OutputFormat,
+ pub shuffle: bool,
+ pub shuffle_seed: Option<u64>,
+ pub test_threads: Option<usize>,
+ pub skip: Vec<String>,
+ pub time_options: Option<TestTimeOptions>,
+ pub options: Options,
+}
+
+impl TestOpts {
+ pub fn use_color(&self) -> bool {
+ match self.color {
+ ColorConfig::AutoColor => !self.nocapture && isatty::stdout_isatty(),
+ ColorConfig::AlwaysColor => true,
+ ColorConfig::NeverColor => false,
+ }
+ }
+}
+
+/// Result of parsing the options.
+pub type OptRes = Result<TestOpts, String>;
+/// Result of parsing the option part.
+type OptPartRes<T> = Result<T, String>;
+
+fn optgroups() -> getopts::Options {
+ let mut opts = getopts::Options::new();
+ opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
+ .optflag("", "ignored", "Run only ignored tests")
+ .optflag("", "force-run-in-process", "Forces tests to run in-process when panic=abort")
+ .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
+ .optflag("", "test", "Run tests and not benchmarks")
+ .optflag("", "bench", "Run benchmarks instead of tests")
+ .optflag("", "list", "List all tests and benchmarks")
+ .optflag("h", "help", "Display this message")
+ .optopt("", "logfile", "Write logs to the specified file", "PATH")
+ .optflag(
+ "",
+ "nocapture",
+ "don't capture stdout/stderr of each \
+ task, allow printing directly",
+ )
+ .optopt(
+ "",
+ "test-threads",
+ "Number of threads used for running tests \
+ in parallel",
+ "n_threads",
+ )
+ .optmulti(
+ "",
+ "skip",
+ "Skip tests whose names contain FILTER (this flag can \
+ be used multiple times)",
+ "FILTER",
+ )
+ .optflag(
+ "q",
+ "quiet",
+ "Display one character per test instead of one line. \
+ Alias to --format=terse",
+ )
+ .optflag("", "exact", "Exactly match filters rather than by substring")
+ .optopt(
+ "",
+ "color",
+ "Configure coloring of output:
+ auto = colorize if stdout is a tty and tests are run on serially (default);
+ always = always colorize output;
+ never = never colorize output;",
+ "auto|always|never",
+ )
+ .optopt(
+ "",
+ "format",
+ "Configure formatting of output:
+ pretty = Print verbose output;
+ terse = Display one character per test;
+ json = Output a json document;
+ junit = Output a JUnit document",
+ "pretty|terse|json|junit",
+ )
+ .optflag("", "show-output", "Show captured stdout of successful tests")
+ .optopt(
+ "Z",
+ "",
+ "Enable nightly-only flags:
+ unstable-options = Allow use of experimental features",
+ "unstable-options",
+ )
+ .optflag(
+ "",
+ "report-time",
+ "Show execution time of each test.
+
+ Threshold values for colorized output can be configured via
+ `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and
+ `RUST_TEST_TIME_DOCTEST` environment variables.
+
+ Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`.
+ Durations must be specified in milliseconds, e.g. `500,2000` means that the warn time
+ is 0.5 seconds, and the critical time is 2 seconds.
+
+ Not available for --format=terse",
+ )
+ .optflag(
+ "",
+ "ensure-time",
+ "Treat excess of the test execution time limit as error.
+
+ Threshold values for this option can be configured via
+ `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and
+ `RUST_TEST_TIME_DOCTEST` environment variables.
+
+ Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`.
+
+ `CRITICAL_TIME` here means the limit that should not be exceeded by test.
+ ",
+ )
+ .optflag("", "shuffle", "Run tests in random order")
+ .optopt(
+ "",
+ "shuffle-seed",
+ "Run tests in random order; seed the random number generator with SEED",
+ "SEED",
+ );
+ opts
+}
+
+fn usage(binary: &str, options: &getopts::Options) {
+ let message = format!("Usage: {binary} [OPTIONS] [FILTERS...]");
+ println!(
+ r#"{usage}
+
+The FILTER string is tested against the name of all tests, and only those
+tests whose names contain the filter are run. Multiple filter strings may
+be passed, which will run all tests matching any of the filters.
+
+By default, all tests are run in parallel. This can be altered with the
+--test-threads flag or the RUST_TEST_THREADS environment variable when running
+tests (set it to 1).
+
+By default, the tests are run in alphabetical order. Use --shuffle or set
+RUST_TEST_SHUFFLE to run the tests in random order. Pass the generated
+"shuffle seed" to --shuffle-seed (or set RUST_TEST_SHUFFLE_SEED) to run the
+tests in the same order again. Note that --shuffle and --shuffle-seed do not
+affect whether the tests are run in parallel.
+
+All tests have their standard output and standard error captured by default.
+This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
+environment variable to a value other than "0". Logging is not captured by default.
+
+Test Attributes:
+
+ `#[test]` - Indicates a function is a test to be run. This function
+ takes no arguments.
+ `#[bench]` - Indicates a function is a benchmark to be run. This
+ function takes one argument (test::Bencher).
+ `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if
+ the code causes a panic (an assertion failure or panic!)
+ A message may be provided, which the failure string must
+ contain: #[should_panic(expected = "foo")].
+ `#[ignore]` - When applied to a function which is already attributed as a
+ test, then the test runner will ignore these tests during
+ normal test runs. Running with --ignored or --include-ignored will run
+ these tests."#,
+ usage = options.usage(&message)
+ );
+}
+
+/// Parses command line arguments into test options.
+/// Returns `None` if help was requested (since we only show help message and don't run tests),
+/// returns `Some(Err(..))` if provided arguments are incorrect,
+/// otherwise creates a `TestOpts` object and returns it.
+pub fn parse_opts(args: &[String]) -> Option<OptRes> {
+ // Parse matches.
+ let opts = optgroups();
+ let binary = args.get(0).map(|c| &**c).unwrap_or("...");
+ let args = args.get(1..).unwrap_or(args);
+ let matches = match opts.parse(args) {
+ Ok(m) => m,
+ Err(f) => return Some(Err(f.to_string())),
+ };
+
+ // Check if help was requested.
+ if matches.opt_present("h") {
+ // Show help and do nothing more.
+ usage(binary, &opts);
+ return None;
+ }
+
+ // Actually parse the opts.
+ let opts_result = parse_opts_impl(matches);
+
+ Some(opts_result)
+}
+
+// Gets the option value and checks if unstable features are enabled.
+macro_rules! unstable_optflag {
+ ($matches:ident, $allow_unstable:ident, $option_name:literal) => {{
+ let opt = $matches.opt_present($option_name);
+ if !$allow_unstable && opt {
+ return Err(format!(
+ "The \"{}\" flag is only accepted on the nightly compiler with -Z unstable-options",
+ $option_name
+ ));
+ }
+
+ opt
+ }};
+}
+
+// Gets the option value and checks if unstable features are enabled.
+macro_rules! unstable_optopt {
+ ($matches:ident, $allow_unstable:ident, $option_name:literal) => {{
+ let opt = $matches.opt_str($option_name);
+ if !$allow_unstable && opt.is_some() {
+ return Err(format!(
+ "The \"{}\" option is only accepted on the nightly compiler with -Z unstable-options",
+ $option_name
+ ));
+ }
+
+ opt
+ }};
+}
+
+// Implementation of `parse_opts` that doesn't care about help message
+// and returns a `Result`.
+fn parse_opts_impl(matches: getopts::Matches) -> OptRes {
+ let allow_unstable = get_allow_unstable(&matches)?;
+
+ // Unstable flags
+ let force_run_in_process = unstable_optflag!(matches, allow_unstable, "force-run-in-process");
+ let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic");
+ let time_options = get_time_options(&matches, allow_unstable)?;
+ let shuffle = get_shuffle(&matches, allow_unstable)?;
+ let shuffle_seed = get_shuffle_seed(&matches, allow_unstable)?;
+
+ let include_ignored = matches.opt_present("include-ignored");
+ let quiet = matches.opt_present("quiet");
+ let exact = matches.opt_present("exact");
+ let list = matches.opt_present("list");
+ let skip = matches.opt_strs("skip");
+
+ let bench_benchmarks = matches.opt_present("bench");
+ let run_tests = !bench_benchmarks || matches.opt_present("test");
+
+ let logfile = get_log_file(&matches)?;
+ let run_ignored = get_run_ignored(&matches, include_ignored)?;
+ let filters = matches.free.clone();
+ let nocapture = get_nocapture(&matches)?;
+ let test_threads = get_test_threads(&matches)?;
+ let color = get_color_config(&matches)?;
+ let format = get_format(&matches, quiet, allow_unstable)?;
+
+ let options = Options::new().display_output(matches.opt_present("show-output"));
+
+ let test_opts = TestOpts {
+ list,
+ filters,
+ filter_exact: exact,
+ force_run_in_process,
+ exclude_should_panic,
+ run_ignored,
+ run_tests,
+ bench_benchmarks,
+ logfile,
+ nocapture,
+ color,
+ format,
+ shuffle,
+ shuffle_seed,
+ test_threads,
+ skip,
+ time_options,
+ options,
+ };
+
+ Ok(test_opts)
+}
+
+// FIXME: Copied from librustc_ast until linkage errors are resolved. Issue #47566
+fn is_nightly() -> bool {
+ // Whether this is a feature-staged build, i.e., on the beta or stable channel
+ let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
+ // Whether we should enable unstable features for bootstrapping
+ let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
+
+ bootstrap || !disable_unstable_features
+}
+
+// Gets the CLI options associated with `report-time` feature.
+fn get_time_options(
+ matches: &getopts::Matches,
+ allow_unstable: bool,
+) -> OptPartRes<Option<TestTimeOptions>> {
+ let report_time = unstable_optflag!(matches, allow_unstable, "report-time");
+ let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-time");
+
+ // If `ensure-test-time` option is provided, time output is enforced,
+ // so user won't be confused if any of tests will silently fail.
+ let options = if report_time || ensure_test_time {
+ Some(TestTimeOptions::new_from_env(ensure_test_time))
+ } else {
+ None
+ };
+
+ Ok(options)
+}
+
+fn get_shuffle(matches: &getopts::Matches, allow_unstable: bool) -> OptPartRes<bool> {
+ let mut shuffle = unstable_optflag!(matches, allow_unstable, "shuffle");
+ if !shuffle && allow_unstable {
+ shuffle = match env::var("RUST_TEST_SHUFFLE") {
+ Ok(val) => &val != "0",
+ Err(_) => false,
+ };
+ }
+
+ Ok(shuffle)
+}
+
+fn get_shuffle_seed(matches: &getopts::Matches, allow_unstable: bool) -> OptPartRes<Option<u64>> {
+ let mut shuffle_seed = match unstable_optopt!(matches, allow_unstable, "shuffle-seed") {
+ Some(n_str) => match n_str.parse::<u64>() {
+ Ok(n) => Some(n),
+ Err(e) => {
+ return Err(format!(
+ "argument for --shuffle-seed must be a number \
+ (error: {})",
+ e
+ ));
+ }
+ },
+ None => None,
+ };
+
+ if shuffle_seed.is_none() && allow_unstable {
+ shuffle_seed = match env::var("RUST_TEST_SHUFFLE_SEED") {
+ Ok(val) => match val.parse::<u64>() {
+ Ok(n) => Some(n),
+ Err(_) => panic!("RUST_TEST_SHUFFLE_SEED is `{val}`, should be a number."),
+ },
+ Err(_) => None,
+ };
+ }
+
+ Ok(shuffle_seed)
+}
+
+fn get_test_threads(matches: &getopts::Matches) -> OptPartRes<Option<usize>> {
+ let test_threads = match matches.opt_str("test-threads") {
+ Some(n_str) => match n_str.parse::<usize>() {
+ Ok(0) => return Err("argument for --test-threads must not be 0".to_string()),
+ Ok(n) => Some(n),
+ Err(e) => {
+ return Err(format!(
+ "argument for --test-threads must be a number > 0 \
+ (error: {})",
+ e
+ ));
+ }
+ },
+ None => None,
+ };
+
+ Ok(test_threads)
+}
+
+fn get_format(
+ matches: &getopts::Matches,
+ quiet: bool,
+ allow_unstable: bool,
+) -> OptPartRes<OutputFormat> {
+ let format = match matches.opt_str("format").as_deref() {
+ None if quiet => OutputFormat::Terse,
+ Some("pretty") | None => OutputFormat::Pretty,
+ Some("terse") => OutputFormat::Terse,
+ Some("json") => {
+ if !allow_unstable {
+ return Err("The \"json\" format is only accepted on the nightly compiler".into());
+ }
+ OutputFormat::Json
+ }
+ Some("junit") => {
+ if !allow_unstable {
+ return Err("The \"junit\" format is only accepted on the nightly compiler".into());
+ }
+ OutputFormat::Junit
+ }
+ Some(v) => {
+ return Err(format!(
+ "argument for --format must be pretty, terse, json or junit (was \
+ {})",
+ v
+ ));
+ }
+ };
+
+ Ok(format)
+}
+
+fn get_color_config(matches: &getopts::Matches) -> OptPartRes<ColorConfig> {
+ let color = match matches.opt_str("color").as_deref() {
+ Some("auto") | None => ColorConfig::AutoColor,
+ Some("always") => ColorConfig::AlwaysColor,
+ Some("never") => ColorConfig::NeverColor,
+
+ Some(v) => {
+ return Err(format!(
+ "argument for --color must be auto, always, or never (was \
+ {})",
+ v
+ ));
+ }
+ };
+
+ Ok(color)
+}
+
+fn get_nocapture(matches: &getopts::Matches) -> OptPartRes<bool> {
+ let mut nocapture = matches.opt_present("nocapture");
+ if !nocapture {
+ nocapture = match env::var("RUST_TEST_NOCAPTURE") {
+ Ok(val) => &val != "0",
+ Err(_) => false,
+ };
+ }
+
+ Ok(nocapture)
+}
+
+fn get_run_ignored(matches: &getopts::Matches, include_ignored: bool) -> OptPartRes<RunIgnored> {
+ let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
+ (true, true) => {
+ return Err("the options --include-ignored and --ignored are mutually exclusive".into());
+ }
+ (true, false) => RunIgnored::Yes,
+ (false, true) => RunIgnored::Only,
+ (false, false) => RunIgnored::No,
+ };
+
+ Ok(run_ignored)
+}
+
+fn get_allow_unstable(matches: &getopts::Matches) -> OptPartRes<bool> {
+ let mut allow_unstable = false;
+
+ if let Some(opt) = matches.opt_str("Z") {
+ if !is_nightly() {
+ return Err("the option `Z` is only accepted on the nightly compiler".into());
+ }
+
+ match &*opt {
+ "unstable-options" => {
+ allow_unstable = true;
+ }
+ _ => {
+ return Err("Unrecognized option to `Z`".into());
+ }
+ }
+ };
+
+ Ok(allow_unstable)
+}
+
+fn get_log_file(matches: &getopts::Matches) -> OptPartRes<Option<PathBuf>> {
+ let logfile = matches.opt_str("logfile").map(|s| PathBuf::from(&s));
+
+ Ok(logfile)
+}
diff --git a/library/test/src/console.rs b/library/test/src/console.rs
new file mode 100644
index 000000000..e9dda9896
--- /dev/null
+++ b/library/test/src/console.rs
@@ -0,0 +1,307 @@
+//! Module providing interface for running tests in the console.
+
+use std::fs::File;
+use std::io;
+use std::io::prelude::Write;
+use std::time::Instant;
+
+use super::{
+ bench::fmt_bench_samples,
+ cli::TestOpts,
+ event::{CompletedTest, TestEvent},
+ filter_tests,
+ formatters::{JsonFormatter, JunitFormatter, OutputFormatter, PrettyFormatter, TerseFormatter},
+ helpers::{concurrency::get_concurrency, metrics::MetricMap},
+ options::{Options, OutputFormat},
+ run_tests, term,
+ test_result::TestResult,
+ time::{TestExecTime, TestSuiteExecTime},
+ types::{NamePadding, TestDesc, TestDescAndFn},
+};
+
+/// Generic wrapper over stdout.
+pub enum OutputLocation<T> {
+ Pretty(Box<term::StdoutTerminal>),
+ Raw(T),
+}
+
+impl<T: Write> Write for OutputLocation<T> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ match *self {
+ OutputLocation::Pretty(ref mut term) => term.write(buf),
+ OutputLocation::Raw(ref mut stdout) => stdout.write(buf),
+ }
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ match *self {
+ OutputLocation::Pretty(ref mut term) => term.flush(),
+ OutputLocation::Raw(ref mut stdout) => stdout.flush(),
+ }
+ }
+}
+
+pub struct ConsoleTestState {
+ pub log_out: Option<File>,
+ pub total: usize,
+ pub passed: usize,
+ pub failed: usize,
+ pub ignored: usize,
+ pub filtered_out: usize,
+ pub measured: usize,
+ pub exec_time: Option<TestSuiteExecTime>,
+ pub metrics: MetricMap,
+ pub failures: Vec<(TestDesc, Vec<u8>)>,
+ pub not_failures: Vec<(TestDesc, Vec<u8>)>,
+ pub time_failures: Vec<(TestDesc, Vec<u8>)>,
+ pub options: Options,
+}
+
+impl ConsoleTestState {
+ pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
+ let log_out = match opts.logfile {
+ Some(ref path) => Some(File::create(path)?),
+ None => None,
+ };
+
+ Ok(ConsoleTestState {
+ log_out,
+ total: 0,
+ passed: 0,
+ failed: 0,
+ ignored: 0,
+ filtered_out: 0,
+ measured: 0,
+ exec_time: None,
+ metrics: MetricMap::new(),
+ failures: Vec::new(),
+ not_failures: Vec::new(),
+ time_failures: Vec::new(),
+ options: opts.options,
+ })
+ }
+
+ pub fn write_log<F, S>(&mut self, msg: F) -> io::Result<()>
+ where
+ S: AsRef<str>,
+ F: FnOnce() -> S,
+ {
+ match self.log_out {
+ None => Ok(()),
+ Some(ref mut o) => {
+ let msg = msg();
+ let msg = msg.as_ref();
+ o.write_all(msg.as_bytes())
+ }
+ }
+ }
+
+ pub fn write_log_result(
+ &mut self,
+ test: &TestDesc,
+ result: &TestResult,
+ exec_time: Option<&TestExecTime>,
+ ) -> io::Result<()> {
+ self.write_log(|| {
+ let TestDesc { name, ignore_message, .. } = test;
+ format!(
+ "{} {}",
+ match *result {
+ TestResult::TrOk => "ok".to_owned(),
+ TestResult::TrFailed => "failed".to_owned(),
+ TestResult::TrFailedMsg(ref msg) => format!("failed: {msg}"),
+ TestResult::TrIgnored => {
+ if let Some(msg) = ignore_message {
+ format!("ignored: {msg}")
+ } else {
+ "ignored".to_owned()
+ }
+ }
+ TestResult::TrBench(ref bs) => fmt_bench_samples(bs),
+ TestResult::TrTimedFail => "failed (time limit exceeded)".to_owned(),
+ },
+ name,
+ )
+ })?;
+ if let Some(exec_time) = exec_time {
+ self.write_log(|| format!(" <{exec_time}>"))?;
+ }
+ self.write_log(|| "\n")
+ }
+
+ fn current_test_count(&self) -> usize {
+ self.passed + self.failed + self.ignored + self.measured
+ }
+}
+
+// List the tests to console, and optionally to logfile. Filters are honored.
+pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
+ let mut output = match term::stdout() {
+ None => OutputLocation::Raw(io::stdout().lock()),
+ Some(t) => OutputLocation::Pretty(t),
+ };
+
+ let quiet = opts.format == OutputFormat::Terse;
+ let mut st = ConsoleTestState::new(opts)?;
+
+ let mut ntest = 0;
+ let mut nbench = 0;
+
+ for test in filter_tests(&opts, tests) {
+ use crate::TestFn::*;
+
+ let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test;
+
+ let fntype = match testfn {
+ StaticTestFn(..) | DynTestFn(..) => {
+ ntest += 1;
+ "test"
+ }
+ StaticBenchFn(..) | DynBenchFn(..) => {
+ nbench += 1;
+ "benchmark"
+ }
+ };
+
+ writeln!(output, "{name}: {fntype}")?;
+ st.write_log(|| format!("{fntype} {name}\n"))?;
+ }
+
+ fn plural(count: u32, s: &str) -> String {
+ match count {
+ 1 => format!("1 {s}"),
+ n => format!("{n} {s}s"),
+ }
+ }
+
+ if !quiet {
+ if ntest != 0 || nbench != 0 {
+ writeln!(output)?;
+ }
+
+ writeln!(output, "{}, {}", plural(ntest, "test"), plural(nbench, "benchmark"))?;
+ }
+
+ Ok(())
+}
+
+// Updates `ConsoleTestState` depending on result of the test execution.
+fn handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest) {
+ let test = completed_test.desc;
+ let stdout = completed_test.stdout;
+ match completed_test.result {
+ TestResult::TrOk => {
+ st.passed += 1;
+ st.not_failures.push((test, stdout));
+ }
+ TestResult::TrIgnored => st.ignored += 1,
+ TestResult::TrBench(bs) => {
+ st.metrics.insert_metric(
+ test.name.as_slice(),
+ bs.ns_iter_summ.median,
+ bs.ns_iter_summ.max - bs.ns_iter_summ.min,
+ );
+ st.measured += 1
+ }
+ TestResult::TrFailed => {
+ st.failed += 1;
+ st.failures.push((test, stdout));
+ }
+ TestResult::TrFailedMsg(msg) => {
+ st.failed += 1;
+ let mut stdout = stdout;
+ stdout.extend_from_slice(format!("note: {msg}").as_bytes());
+ st.failures.push((test, stdout));
+ }
+ TestResult::TrTimedFail => {
+ st.failed += 1;
+ st.time_failures.push((test, stdout));
+ }
+ }
+}
+
+// Handler for events that occur during test execution.
+// It is provided as a callback to the `run_tests` function.
+fn on_test_event(
+ event: &TestEvent,
+ st: &mut ConsoleTestState,
+ out: &mut dyn OutputFormatter,
+) -> io::Result<()> {
+ match (*event).clone() {
+ TestEvent::TeFiltered(ref filtered_tests, shuffle_seed) => {
+ st.total = filtered_tests.len();
+ out.write_run_start(filtered_tests.len(), shuffle_seed)?;
+ }
+ TestEvent::TeFilteredOut(filtered_out) => {
+ st.filtered_out = filtered_out;
+ }
+ TestEvent::TeWait(ref test) => out.write_test_start(test)?,
+ TestEvent::TeTimeout(ref test) => out.write_timeout(test)?,
+ TestEvent::TeResult(completed_test) => {
+ let test = &completed_test.desc;
+ let result = &completed_test.result;
+ let exec_time = &completed_test.exec_time;
+ let stdout = &completed_test.stdout;
+
+ st.write_log_result(test, result, exec_time.as_ref())?;
+ out.write_result(test, result, exec_time.as_ref(), &*stdout, st)?;
+ handle_test_result(st, completed_test);
+ }
+ }
+
+ Ok(())
+}
+
+/// A simple console test runner.
+/// Runs provided tests reporting process and results to the stdout.
+pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
+ let output = match term::stdout() {
+ None => OutputLocation::Raw(io::stdout()),
+ Some(t) => OutputLocation::Pretty(t),
+ };
+
+ let max_name_len = tests
+ .iter()
+ .max_by_key(|t| len_if_padded(*t))
+ .map(|t| t.desc.name.as_slice().len())
+ .unwrap_or(0);
+
+ let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
+
+ let mut out: Box<dyn OutputFormatter> = match opts.format {
+ OutputFormat::Pretty => Box::new(PrettyFormatter::new(
+ output,
+ opts.use_color(),
+ max_name_len,
+ is_multithreaded,
+ opts.time_options,
+ )),
+ OutputFormat::Terse => {
+ Box::new(TerseFormatter::new(output, opts.use_color(), max_name_len, is_multithreaded))
+ }
+ OutputFormat::Json => Box::new(JsonFormatter::new(output)),
+ OutputFormat::Junit => Box::new(JunitFormatter::new(output)),
+ };
+ let mut st = ConsoleTestState::new(opts)?;
+
+ // Prevent the usage of `Instant` in some cases:
+ // - It's currently not supported for wasm targets.
+ // - We disable it for miri because it's not available when isolation is enabled.
+ let is_instant_supported = !cfg!(target_family = "wasm") && !cfg!(miri);
+
+ let start_time = is_instant_supported.then(Instant::now);
+ run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?;
+ st.exec_time = start_time.map(|t| TestSuiteExecTime(t.elapsed()));
+
+ assert!(st.current_test_count() == st.total);
+
+ out.write_run_finish(&st)
+}
+
+// Calculates padding for given test description.
+fn len_if_padded(t: &TestDescAndFn) -> usize {
+ match t.testfn.padding() {
+ NamePadding::PadNone => 0,
+ NamePadding::PadOnRight => t.desc.name.as_slice().len(),
+ }
+}
diff --git a/library/test/src/event.rs b/library/test/src/event.rs
new file mode 100644
index 000000000..6ff1a615e
--- /dev/null
+++ b/library/test/src/event.rs
@@ -0,0 +1,36 @@
+//! Module containing different events that can occur
+//! during tests execution process.
+
+use super::test_result::TestResult;
+use super::time::TestExecTime;
+use super::types::{TestDesc, TestId};
+
+#[derive(Debug, Clone)]
+pub struct CompletedTest {
+ pub id: TestId,
+ pub desc: TestDesc,
+ pub result: TestResult,
+ pub exec_time: Option<TestExecTime>,
+ pub stdout: Vec<u8>,
+}
+
+impl CompletedTest {
+ pub fn new(
+ id: TestId,
+ desc: TestDesc,
+ result: TestResult,
+ exec_time: Option<TestExecTime>,
+ stdout: Vec<u8>,
+ ) -> Self {
+ Self { id, desc, result, exec_time, stdout }
+ }
+}
+
+#[derive(Debug, Clone)]
+pub enum TestEvent {
+ TeFiltered(Vec<TestDesc>, Option<u64>),
+ TeWait(TestDesc),
+ TeResult(CompletedTest),
+ TeTimeout(TestDesc),
+ TeFilteredOut(usize),
+}
diff --git a/library/test/src/formatters/json.rs b/library/test/src/formatters/json.rs
new file mode 100644
index 000000000..c07fdafb1
--- /dev/null
+++ b/library/test/src/formatters/json.rs
@@ -0,0 +1,260 @@
+use std::{borrow::Cow, io, io::prelude::Write};
+
+use super::OutputFormatter;
+use crate::{
+ console::{ConsoleTestState, OutputLocation},
+ test_result::TestResult,
+ time,
+ types::TestDesc,
+};
+
+pub(crate) struct JsonFormatter<T> {
+ out: OutputLocation<T>,
+}
+
+impl<T: Write> JsonFormatter<T> {
+ pub fn new(out: OutputLocation<T>) -> Self {
+ Self { out }
+ }
+
+ fn writeln_message(&mut self, s: &str) -> io::Result<()> {
+ assert!(!s.contains('\n'));
+
+ self.out.write_all(s.as_ref())?;
+ self.out.write_all(b"\n")
+ }
+
+ fn write_message(&mut self, s: &str) -> io::Result<()> {
+ assert!(!s.contains('\n'));
+
+ self.out.write_all(s.as_ref())
+ }
+
+ fn write_event(
+ &mut self,
+ ty: &str,
+ name: &str,
+ evt: &str,
+ exec_time: Option<&time::TestExecTime>,
+ stdout: Option<Cow<'_, str>>,
+ extra: Option<&str>,
+ ) -> io::Result<()> {
+ // A doc test's name includes a filename which must be escaped for correct json.
+ self.write_message(&*format!(
+ r#"{{ "type": "{}", "name": "{}", "event": "{}""#,
+ ty,
+ EscapedString(name),
+ evt
+ ))?;
+ if let Some(exec_time) = exec_time {
+ self.write_message(&*format!(r#", "exec_time": {}"#, exec_time.0.as_secs_f64()))?;
+ }
+ if let Some(stdout) = stdout {
+ self.write_message(&*format!(r#", "stdout": "{}""#, EscapedString(stdout)))?;
+ }
+ if let Some(extra) = extra {
+ self.write_message(&*format!(r#", {}"#, extra))?;
+ }
+ self.writeln_message(" }")
+ }
+}
+
+impl<T: Write> OutputFormatter for JsonFormatter<T> {
+ fn write_run_start(&mut self, test_count: usize, shuffle_seed: Option<u64>) -> io::Result<()> {
+ let shuffle_seed_json = if let Some(shuffle_seed) = shuffle_seed {
+ format!(r#", "shuffle_seed": {}"#, shuffle_seed)
+ } else {
+ String::new()
+ };
+ self.writeln_message(&*format!(
+ r#"{{ "type": "suite", "event": "started", "test_count": {}{} }}"#,
+ test_count, shuffle_seed_json
+ ))
+ }
+
+ fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> {
+ self.writeln_message(&*format!(
+ r#"{{ "type": "test", "event": "started", "name": "{}" }}"#,
+ EscapedString(desc.name.as_slice())
+ ))
+ }
+
+ fn write_result(
+ &mut self,
+ desc: &TestDesc,
+ result: &TestResult,
+ exec_time: Option<&time::TestExecTime>,
+ stdout: &[u8],
+ state: &ConsoleTestState,
+ ) -> io::Result<()> {
+ let display_stdout = state.options.display_output || *result != TestResult::TrOk;
+ let stdout = if display_stdout && !stdout.is_empty() {
+ Some(String::from_utf8_lossy(stdout))
+ } else {
+ None
+ };
+ match *result {
+ TestResult::TrOk => {
+ self.write_event("test", desc.name.as_slice(), "ok", exec_time, stdout, None)
+ }
+
+ TestResult::TrFailed => {
+ self.write_event("test", desc.name.as_slice(), "failed", exec_time, stdout, None)
+ }
+
+ TestResult::TrTimedFail => self.write_event(
+ "test",
+ desc.name.as_slice(),
+ "failed",
+ exec_time,
+ stdout,
+ Some(r#""reason": "time limit exceeded""#),
+ ),
+
+ TestResult::TrFailedMsg(ref m) => self.write_event(
+ "test",
+ desc.name.as_slice(),
+ "failed",
+ exec_time,
+ stdout,
+ Some(&*format!(r#""message": "{}""#, EscapedString(m))),
+ ),
+
+ TestResult::TrIgnored => self.write_event(
+ "test",
+ desc.name.as_slice(),
+ "ignored",
+ exec_time,
+ stdout,
+ desc.ignore_message
+ .map(|msg| format!(r#""message": "{}""#, EscapedString(msg)))
+ .as_deref(),
+ ),
+
+ TestResult::TrBench(ref bs) => {
+ let median = bs.ns_iter_summ.median as usize;
+ let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
+
+ let mbps = if bs.mb_s == 0 {
+ String::new()
+ } else {
+ format!(r#", "mib_per_second": {}"#, bs.mb_s)
+ };
+
+ let line = format!(
+ "{{ \"type\": \"bench\", \
+ \"name\": \"{}\", \
+ \"median\": {}, \
+ \"deviation\": {}{} }}",
+ EscapedString(desc.name.as_slice()),
+ median,
+ deviation,
+ mbps
+ );
+
+ self.writeln_message(&*line)
+ }
+ }
+ }
+
+ fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
+ self.writeln_message(&*format!(
+ r#"{{ "type": "test", "event": "timeout", "name": "{}" }}"#,
+ EscapedString(desc.name.as_slice())
+ ))
+ }
+
+ fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
+ self.write_message(&*format!(
+ "{{ \"type\": \"suite\", \
+ \"event\": \"{}\", \
+ \"passed\": {}, \
+ \"failed\": {}, \
+ \"ignored\": {}, \
+ \"measured\": {}, \
+ \"filtered_out\": {}",
+ if state.failed == 0 { "ok" } else { "failed" },
+ state.passed,
+ state.failed,
+ state.ignored,
+ state.measured,
+ state.filtered_out,
+ ))?;
+
+ if let Some(ref exec_time) = state.exec_time {
+ let time_str = format!(", \"exec_time\": {}", exec_time.0.as_secs_f64());
+ self.write_message(&time_str)?;
+ }
+
+ self.writeln_message(" }")?;
+
+ Ok(state.failed == 0)
+ }
+}
+
+/// A formatting utility used to print strings with characters in need of escaping.
+/// Base code taken form `libserialize::json::escape_str`
+struct EscapedString<S: AsRef<str>>(S);
+
+impl<S: AsRef<str>> std::fmt::Display for EscapedString<S> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ let mut start = 0;
+
+ for (i, byte) in self.0.as_ref().bytes().enumerate() {
+ let escaped = match byte {
+ b'"' => "\\\"",
+ b'\\' => "\\\\",
+ b'\x00' => "\\u0000",
+ b'\x01' => "\\u0001",
+ b'\x02' => "\\u0002",
+ b'\x03' => "\\u0003",
+ b'\x04' => "\\u0004",
+ b'\x05' => "\\u0005",
+ b'\x06' => "\\u0006",
+ b'\x07' => "\\u0007",
+ b'\x08' => "\\b",
+ b'\t' => "\\t",
+ b'\n' => "\\n",
+ b'\x0b' => "\\u000b",
+ b'\x0c' => "\\f",
+ b'\r' => "\\r",
+ b'\x0e' => "\\u000e",
+ b'\x0f' => "\\u000f",
+ b'\x10' => "\\u0010",
+ b'\x11' => "\\u0011",
+ b'\x12' => "\\u0012",
+ b'\x13' => "\\u0013",
+ b'\x14' => "\\u0014",
+ b'\x15' => "\\u0015",
+ b'\x16' => "\\u0016",
+ b'\x17' => "\\u0017",
+ b'\x18' => "\\u0018",
+ b'\x19' => "\\u0019",
+ b'\x1a' => "\\u001a",
+ b'\x1b' => "\\u001b",
+ b'\x1c' => "\\u001c",
+ b'\x1d' => "\\u001d",
+ b'\x1e' => "\\u001e",
+ b'\x1f' => "\\u001f",
+ b'\x7f' => "\\u007f",
+ _ => {
+ continue;
+ }
+ };
+
+ if start < i {
+ f.write_str(&self.0.as_ref()[start..i])?;
+ }
+
+ f.write_str(escaped)?;
+
+ start = i + 1;
+ }
+
+ if start != self.0.as_ref().len() {
+ f.write_str(&self.0.as_ref()[start..])?;
+ }
+
+ Ok(())
+ }
+}
diff --git a/library/test/src/formatters/junit.rs b/library/test/src/formatters/junit.rs
new file mode 100644
index 000000000..e6fb4f570
--- /dev/null
+++ b/library/test/src/formatters/junit.rs
@@ -0,0 +1,180 @@
+use std::io::{self, prelude::Write};
+use std::time::Duration;
+
+use super::OutputFormatter;
+use crate::{
+ console::{ConsoleTestState, OutputLocation},
+ test_result::TestResult,
+ time,
+ types::{TestDesc, TestType},
+};
+
+pub struct JunitFormatter<T> {
+ out: OutputLocation<T>,
+ results: Vec<(TestDesc, TestResult, Duration)>,
+}
+
+impl<T: Write> JunitFormatter<T> {
+ pub fn new(out: OutputLocation<T>) -> Self {
+ Self { out, results: Vec::new() }
+ }
+
+ fn write_message(&mut self, s: &str) -> io::Result<()> {
+ assert!(!s.contains('\n'));
+
+ self.out.write_all(s.as_ref())
+ }
+}
+
+impl<T: Write> OutputFormatter for JunitFormatter<T> {
+ fn write_run_start(
+ &mut self,
+ _test_count: usize,
+ _shuffle_seed: Option<u64>,
+ ) -> io::Result<()> {
+ // We write xml header on run start
+ self.write_message("<?xml version=\"1.0\" encoding=\"UTF-8\"?>")
+ }
+
+ fn write_test_start(&mut self, _desc: &TestDesc) -> io::Result<()> {
+ // We do not output anything on test start.
+ Ok(())
+ }
+
+ fn write_timeout(&mut self, _desc: &TestDesc) -> io::Result<()> {
+ // We do not output anything on test timeout.
+ Ok(())
+ }
+
+ fn write_result(
+ &mut self,
+ desc: &TestDesc,
+ result: &TestResult,
+ exec_time: Option<&time::TestExecTime>,
+ _stdout: &[u8],
+ _state: &ConsoleTestState,
+ ) -> io::Result<()> {
+ // Because the testsuite node holds some of the information as attributes, we can't write it
+ // until all of the tests have finished. Instead of writing every result as they come in, we add
+ // them to a Vec and write them all at once when run is complete.
+ let duration = exec_time.map(|t| t.0).unwrap_or_default();
+ self.results.push((desc.clone(), result.clone(), duration));
+ Ok(())
+ }
+ fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
+ self.write_message("<testsuites>")?;
+
+ self.write_message(&*format!(
+ "<testsuite name=\"test\" package=\"test\" id=\"0\" \
+ errors=\"0\" \
+ failures=\"{}\" \
+ tests=\"{}\" \
+ skipped=\"{}\" \
+ >",
+ state.failed, state.total, state.ignored
+ ))?;
+ for (desc, result, duration) in std::mem::replace(&mut self.results, Vec::new()) {
+ let (class_name, test_name) = parse_class_name(&desc);
+ match result {
+ TestResult::TrIgnored => { /* no-op */ }
+ TestResult::TrFailed => {
+ self.write_message(&*format!(
+ "<testcase classname=\"{}\" \
+ name=\"{}\" time=\"{}\">",
+ class_name,
+ test_name,
+ duration.as_secs_f64()
+ ))?;
+ self.write_message("<failure type=\"assert\"/>")?;
+ self.write_message("</testcase>")?;
+ }
+
+ TestResult::TrFailedMsg(ref m) => {
+ self.write_message(&*format!(
+ "<testcase classname=\"{}\" \
+ name=\"{}\" time=\"{}\">",
+ class_name,
+ test_name,
+ duration.as_secs_f64()
+ ))?;
+ self.write_message(&*format!("<failure message=\"{m}\" type=\"assert\"/>"))?;
+ self.write_message("</testcase>")?;
+ }
+
+ TestResult::TrTimedFail => {
+ self.write_message(&*format!(
+ "<testcase classname=\"{}\" \
+ name=\"{}\" time=\"{}\">",
+ class_name,
+ test_name,
+ duration.as_secs_f64()
+ ))?;
+ self.write_message("<failure type=\"timeout\"/>")?;
+ self.write_message("</testcase>")?;
+ }
+
+ TestResult::TrBench(ref b) => {
+ self.write_message(&*format!(
+ "<testcase classname=\"benchmark::{}\" \
+ name=\"{}\" time=\"{}\" />",
+ class_name, test_name, b.ns_iter_summ.sum
+ ))?;
+ }
+
+ TestResult::TrOk => {
+ self.write_message(&*format!(
+ "<testcase classname=\"{}\" \
+ name=\"{}\" time=\"{}\"/>",
+ class_name,
+ test_name,
+ duration.as_secs_f64()
+ ))?;
+ }
+ }
+ }
+ self.write_message("<system-out/>")?;
+ self.write_message("<system-err/>")?;
+ self.write_message("</testsuite>")?;
+ self.write_message("</testsuites>")?;
+
+ self.out.write_all(b"\n")?;
+
+ Ok(state.failed == 0)
+ }
+}
+
+fn parse_class_name(desc: &TestDesc) -> (String, String) {
+ match desc.test_type {
+ TestType::UnitTest => parse_class_name_unit(desc),
+ TestType::DocTest => parse_class_name_doc(desc),
+ TestType::IntegrationTest => parse_class_name_integration(desc),
+ TestType::Unknown => (String::from("unknown"), String::from(desc.name.as_slice())),
+ }
+}
+
+fn parse_class_name_unit(desc: &TestDesc) -> (String, String) {
+ // Module path => classname
+ // Function name => name
+ let module_segments: Vec<&str> = desc.name.as_slice().split("::").collect();
+ let (class_name, test_name) = match module_segments[..] {
+ [test] => (String::from("crate"), String::from(test)),
+ [ref path @ .., test] => (path.join("::"), String::from(test)),
+ [..] => unreachable!(),
+ };
+ (class_name, test_name)
+}
+
+fn parse_class_name_doc(desc: &TestDesc) -> (String, String) {
+ // File path => classname
+ // Line # => test name
+ let segments: Vec<&str> = desc.name.as_slice().split(" - ").collect();
+ let (class_name, test_name) = match segments[..] {
+ [file, line] => (String::from(file.trim()), String::from(line.trim())),
+ [..] => unreachable!(),
+ };
+ (class_name, test_name)
+}
+
+fn parse_class_name_integration(desc: &TestDesc) -> (String, String) {
+ (String::from("integration"), String::from(desc.name.as_slice()))
+}
diff --git a/library/test/src/formatters/mod.rs b/library/test/src/formatters/mod.rs
new file mode 100644
index 000000000..cb8085975
--- /dev/null
+++ b/library/test/src/formatters/mod.rs
@@ -0,0 +1,42 @@
+use std::{io, io::prelude::Write};
+
+use crate::{
+ console::ConsoleTestState,
+ test_result::TestResult,
+ time,
+ types::{TestDesc, TestName},
+};
+
+mod json;
+mod junit;
+mod pretty;
+mod terse;
+
+pub(crate) use self::json::JsonFormatter;
+pub(crate) use self::junit::JunitFormatter;
+pub(crate) use self::pretty::PrettyFormatter;
+pub(crate) use self::terse::TerseFormatter;
+
+pub(crate) trait OutputFormatter {
+ fn write_run_start(&mut self, test_count: usize, shuffle_seed: Option<u64>) -> io::Result<()>;
+ fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()>;
+ fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()>;
+ fn write_result(
+ &mut self,
+ desc: &TestDesc,
+ result: &TestResult,
+ exec_time: Option<&time::TestExecTime>,
+ stdout: &[u8],
+ state: &ConsoleTestState,
+ ) -> io::Result<()>;
+ fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool>;
+}
+
+pub(crate) fn write_stderr_delimiter(test_output: &mut Vec<u8>, test_name: &TestName) {
+ match test_output.last() {
+ Some(b'\n') => (),
+ Some(_) => test_output.push(b'\n'),
+ None => (),
+ }
+ writeln!(test_output, "---- {} stderr ----", test_name).unwrap();
+}
diff --git a/library/test/src/formatters/pretty.rs b/library/test/src/formatters/pretty.rs
new file mode 100644
index 000000000..694202229
--- /dev/null
+++ b/library/test/src/formatters/pretty.rs
@@ -0,0 +1,281 @@
+use std::{io, io::prelude::Write};
+
+use super::OutputFormatter;
+use crate::{
+ bench::fmt_bench_samples,
+ console::{ConsoleTestState, OutputLocation},
+ term,
+ test_result::TestResult,
+ time,
+ types::TestDesc,
+};
+
+pub(crate) struct PrettyFormatter<T> {
+ out: OutputLocation<T>,
+ use_color: bool,
+ time_options: Option<time::TestTimeOptions>,
+
+ /// Number of columns to fill when aligning names
+ max_name_len: usize,
+
+ is_multithreaded: bool,
+}
+
+impl<T: Write> PrettyFormatter<T> {
+ pub fn new(
+ out: OutputLocation<T>,
+ use_color: bool,
+ max_name_len: usize,
+ is_multithreaded: bool,
+ time_options: Option<time::TestTimeOptions>,
+ ) -> Self {
+ PrettyFormatter { out, use_color, max_name_len, is_multithreaded, time_options }
+ }
+
+ #[cfg(test)]
+ pub fn output_location(&self) -> &OutputLocation<T> {
+ &self.out
+ }
+
+ pub fn write_ok(&mut self) -> io::Result<()> {
+ self.write_short_result("ok", term::color::GREEN)
+ }
+
+ pub fn write_failed(&mut self) -> io::Result<()> {
+ self.write_short_result("FAILED", term::color::RED)
+ }
+
+ pub fn write_ignored(&mut self, message: Option<&'static str>) -> io::Result<()> {
+ if let Some(message) = message {
+ self.write_short_result(&format!("ignored, {}", message), term::color::YELLOW)
+ } else {
+ self.write_short_result("ignored", term::color::YELLOW)
+ }
+ }
+
+ pub fn write_time_failed(&mut self) -> io::Result<()> {
+ self.write_short_result("FAILED (time limit exceeded)", term::color::RED)
+ }
+
+ pub fn write_bench(&mut self) -> io::Result<()> {
+ self.write_pretty("bench", term::color::CYAN)
+ }
+
+ pub fn write_short_result(
+ &mut self,
+ result: &str,
+ color: term::color::Color,
+ ) -> io::Result<()> {
+ self.write_pretty(result, color)
+ }
+
+ pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
+ match self.out {
+ OutputLocation::Pretty(ref mut term) => {
+ if self.use_color {
+ term.fg(color)?;
+ }
+ term.write_all(word.as_bytes())?;
+ if self.use_color {
+ term.reset()?;
+ }
+ term.flush()
+ }
+ OutputLocation::Raw(ref mut stdout) => {
+ stdout.write_all(word.as_bytes())?;
+ stdout.flush()
+ }
+ }
+ }
+
+ pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> {
+ let s = s.as_ref();
+ self.out.write_all(s.as_bytes())?;
+ self.out.flush()
+ }
+
+ fn write_time(
+ &mut self,
+ desc: &TestDesc,
+ exec_time: Option<&time::TestExecTime>,
+ ) -> io::Result<()> {
+ if let (Some(opts), Some(time)) = (self.time_options, exec_time) {
+ let time_str = format!(" <{time}>");
+
+ let color = if self.use_color {
+ if opts.is_critical(desc, time) {
+ Some(term::color::RED)
+ } else if opts.is_warn(desc, time) {
+ Some(term::color::YELLOW)
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+
+ match color {
+ Some(color) => self.write_pretty(&time_str, color)?,
+ None => self.write_plain(&time_str)?,
+ }
+ }
+
+ Ok(())
+ }
+
+ fn write_results(
+ &mut self,
+ inputs: &Vec<(TestDesc, Vec<u8>)>,
+ results_type: &str,
+ ) -> io::Result<()> {
+ let results_out_str = format!("\n{results_type}:\n");
+
+ self.write_plain(&results_out_str)?;
+
+ let mut results = Vec::new();
+ let mut stdouts = String::new();
+ for &(ref f, ref stdout) in inputs {
+ results.push(f.name.to_string());
+ if !stdout.is_empty() {
+ stdouts.push_str(&format!("---- {} stdout ----\n", f.name));
+ let output = String::from_utf8_lossy(stdout);
+ stdouts.push_str(&output);
+ stdouts.push('\n');
+ }
+ }
+ if !stdouts.is_empty() {
+ self.write_plain("\n")?;
+ self.write_plain(&stdouts)?;
+ }
+
+ self.write_plain(&results_out_str)?;
+ results.sort();
+ for name in &results {
+ self.write_plain(&format!(" {name}\n"))?;
+ }
+ Ok(())
+ }
+
+ pub fn write_successes(&mut self, state: &ConsoleTestState) -> io::Result<()> {
+ self.write_results(&state.not_failures, "successes")
+ }
+
+ pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> {
+ self.write_results(&state.failures, "failures")
+ }
+
+ pub fn write_time_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> {
+ self.write_results(&state.time_failures, "failures (time limit exceeded)")
+ }
+
+ fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> {
+ let name = desc.padded_name(self.max_name_len, desc.name.padding());
+ if let Some(test_mode) = desc.test_mode() {
+ self.write_plain(&format!("test {name} - {test_mode} ... "))?;
+ } else {
+ self.write_plain(&format!("test {name} ... "))?;
+ }
+
+ Ok(())
+ }
+}
+
+impl<T: Write> OutputFormatter for PrettyFormatter<T> {
+ fn write_run_start(&mut self, test_count: usize, shuffle_seed: Option<u64>) -> io::Result<()> {
+ let noun = if test_count != 1 { "tests" } else { "test" };
+ let shuffle_seed_msg = if let Some(shuffle_seed) = shuffle_seed {
+ format!(" (shuffle seed: {shuffle_seed})")
+ } else {
+ String::new()
+ };
+ self.write_plain(&format!("\nrunning {test_count} {noun}{shuffle_seed_msg}\n"))
+ }
+
+ fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> {
+ // When running tests concurrently, we should not print
+ // the test's name as the result will be mis-aligned.
+ // When running the tests serially, we print the name here so
+ // that the user can see which test hangs.
+ if !self.is_multithreaded {
+ self.write_test_name(desc)?;
+ }
+
+ Ok(())
+ }
+
+ fn write_result(
+ &mut self,
+ desc: &TestDesc,
+ result: &TestResult,
+ exec_time: Option<&time::TestExecTime>,
+ _: &[u8],
+ _: &ConsoleTestState,
+ ) -> io::Result<()> {
+ if self.is_multithreaded {
+ self.write_test_name(desc)?;
+ }
+
+ match *result {
+ TestResult::TrOk => self.write_ok()?,
+ TestResult::TrFailed | TestResult::TrFailedMsg(_) => self.write_failed()?,
+ TestResult::TrIgnored => self.write_ignored(desc.ignore_message)?,
+ TestResult::TrBench(ref bs) => {
+ self.write_bench()?;
+ self.write_plain(&format!(": {}", fmt_bench_samples(bs)))?;
+ }
+ TestResult::TrTimedFail => self.write_time_failed()?,
+ }
+
+ self.write_time(desc, exec_time)?;
+ self.write_plain("\n")
+ }
+
+ fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
+ self.write_plain(&format!(
+ "test {} has been running for over {} seconds\n",
+ desc.name,
+ time::TEST_WARN_TIMEOUT_S
+ ))
+ }
+
+ fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
+ if state.options.display_output {
+ self.write_successes(state)?;
+ }
+ let success = state.failed == 0;
+ if !success {
+ if !state.failures.is_empty() {
+ self.write_failures(state)?;
+ }
+
+ if !state.time_failures.is_empty() {
+ self.write_time_failures(state)?;
+ }
+ }
+
+ self.write_plain("\ntest result: ")?;
+
+ if success {
+ // There's no parallelism at this point so it's safe to use color
+ self.write_pretty("ok", term::color::GREEN)?;
+ } else {
+ self.write_pretty("FAILED", term::color::RED)?;
+ }
+
+ let s = format!(
+ ". {} passed; {} failed; {} ignored; {} measured; {} filtered out",
+ state.passed, state.failed, state.ignored, state.measured, state.filtered_out
+ );
+
+ self.write_plain(&s)?;
+
+ if let Some(ref exec_time) = state.exec_time {
+ let time_str = format!("; finished in {exec_time}");
+ self.write_plain(&time_str)?;
+ }
+
+ self.write_plain("\n\n")?;
+
+ Ok(success)
+ }
+}
diff --git a/library/test/src/formatters/terse.rs b/library/test/src/formatters/terse.rs
new file mode 100644
index 000000000..5dace8bae
--- /dev/null
+++ b/library/test/src/formatters/terse.rs
@@ -0,0 +1,259 @@
+use std::{io, io::prelude::Write};
+
+use super::OutputFormatter;
+use crate::{
+ bench::fmt_bench_samples,
+ console::{ConsoleTestState, OutputLocation},
+ term,
+ test_result::TestResult,
+ time,
+ types::NamePadding,
+ types::TestDesc,
+};
+
+// We insert a '\n' when the output hits 100 columns in quiet mode. 88 test
+// result chars leaves 12 chars for a progress count like " 11704/12853".
+const QUIET_MODE_MAX_COLUMN: usize = 88;
+
+pub(crate) struct TerseFormatter<T> {
+ out: OutputLocation<T>,
+ use_color: bool,
+ is_multithreaded: bool,
+ /// Number of columns to fill when aligning names
+ max_name_len: usize,
+
+ test_count: usize,
+ total_test_count: usize,
+}
+
+impl<T: Write> TerseFormatter<T> {
+ pub fn new(
+ out: OutputLocation<T>,
+ use_color: bool,
+ max_name_len: usize,
+ is_multithreaded: bool,
+ ) -> Self {
+ TerseFormatter {
+ out,
+ use_color,
+ max_name_len,
+ is_multithreaded,
+ test_count: 0,
+ total_test_count: 0, // initialized later, when write_run_start is called
+ }
+ }
+
+ pub fn write_ok(&mut self) -> io::Result<()> {
+ self.write_short_result(".", term::color::GREEN)
+ }
+
+ pub fn write_failed(&mut self) -> io::Result<()> {
+ self.write_short_result("F", term::color::RED)
+ }
+
+ pub fn write_ignored(&mut self) -> io::Result<()> {
+ self.write_short_result("i", term::color::YELLOW)
+ }
+
+ pub fn write_bench(&mut self) -> io::Result<()> {
+ self.write_pretty("bench", term::color::CYAN)
+ }
+
+ pub fn write_short_result(
+ &mut self,
+ result: &str,
+ color: term::color::Color,
+ ) -> io::Result<()> {
+ self.write_pretty(result, color)?;
+ if self.test_count % QUIET_MODE_MAX_COLUMN == QUIET_MODE_MAX_COLUMN - 1 {
+ // We insert a new line regularly in order to flush the
+ // screen when dealing with line-buffered output (e.g., piping to
+ // `stamp` in the rust CI).
+ let out = format!(" {}/{}\n", self.test_count + 1, self.total_test_count);
+ self.write_plain(&out)?;
+ }
+
+ self.test_count += 1;
+ Ok(())
+ }
+
+ pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
+ match self.out {
+ OutputLocation::Pretty(ref mut term) => {
+ if self.use_color {
+ term.fg(color)?;
+ }
+ term.write_all(word.as_bytes())?;
+ if self.use_color {
+ term.reset()?;
+ }
+ term.flush()
+ }
+ OutputLocation::Raw(ref mut stdout) => {
+ stdout.write_all(word.as_bytes())?;
+ stdout.flush()
+ }
+ }
+ }
+
+ pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> {
+ let s = s.as_ref();
+ self.out.write_all(s.as_bytes())?;
+ self.out.flush()
+ }
+
+ pub fn write_outputs(&mut self, state: &ConsoleTestState) -> io::Result<()> {
+ self.write_plain("\nsuccesses:\n")?;
+ let mut successes = Vec::new();
+ let mut stdouts = String::new();
+ for &(ref f, ref stdout) in &state.not_failures {
+ successes.push(f.name.to_string());
+ if !stdout.is_empty() {
+ stdouts.push_str(&format!("---- {} stdout ----\n", f.name));
+ let output = String::from_utf8_lossy(stdout);
+ stdouts.push_str(&output);
+ stdouts.push('\n');
+ }
+ }
+ if !stdouts.is_empty() {
+ self.write_plain("\n")?;
+ self.write_plain(&stdouts)?;
+ }
+
+ self.write_plain("\nsuccesses:\n")?;
+ successes.sort();
+ for name in &successes {
+ self.write_plain(&format!(" {name}\n"))?;
+ }
+ Ok(())
+ }
+
+ pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> {
+ self.write_plain("\nfailures:\n")?;
+ let mut failures = Vec::new();
+ let mut fail_out = String::new();
+ for &(ref f, ref stdout) in &state.failures {
+ failures.push(f.name.to_string());
+ if !stdout.is_empty() {
+ fail_out.push_str(&format!("---- {} stdout ----\n", f.name));
+ let output = String::from_utf8_lossy(stdout);
+ fail_out.push_str(&output);
+ fail_out.push('\n');
+ }
+ }
+ if !fail_out.is_empty() {
+ self.write_plain("\n")?;
+ self.write_plain(&fail_out)?;
+ }
+
+ self.write_plain("\nfailures:\n")?;
+ failures.sort();
+ for name in &failures {
+ self.write_plain(&format!(" {name}\n"))?;
+ }
+ Ok(())
+ }
+
+ fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> {
+ let name = desc.padded_name(self.max_name_len, desc.name.padding());
+ if let Some(test_mode) = desc.test_mode() {
+ self.write_plain(&format!("test {name} - {test_mode} ... "))?;
+ } else {
+ self.write_plain(&format!("test {name} ... "))?;
+ }
+
+ Ok(())
+ }
+}
+
+impl<T: Write> OutputFormatter for TerseFormatter<T> {
+ fn write_run_start(&mut self, test_count: usize, shuffle_seed: Option<u64>) -> io::Result<()> {
+ self.total_test_count = test_count;
+ let noun = if test_count != 1 { "tests" } else { "test" };
+ let shuffle_seed_msg = if let Some(shuffle_seed) = shuffle_seed {
+ format!(" (shuffle seed: {shuffle_seed})")
+ } else {
+ String::new()
+ };
+ self.write_plain(&format!("\nrunning {test_count} {noun}{shuffle_seed_msg}\n"))
+ }
+
+ fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> {
+ // Remnants from old libtest code that used the padding value
+ // in order to indicate benchmarks.
+ // When running benchmarks, terse-mode should still print their name as if
+ // it is the Pretty formatter.
+ if !self.is_multithreaded && desc.name.padding() == NamePadding::PadOnRight {
+ self.write_test_name(desc)?;
+ }
+
+ Ok(())
+ }
+
+ fn write_result(
+ &mut self,
+ desc: &TestDesc,
+ result: &TestResult,
+ _: Option<&time::TestExecTime>,
+ _: &[u8],
+ _: &ConsoleTestState,
+ ) -> io::Result<()> {
+ match *result {
+ TestResult::TrOk => self.write_ok(),
+ TestResult::TrFailed | TestResult::TrFailedMsg(_) | TestResult::TrTimedFail => {
+ self.write_failed()
+ }
+ TestResult::TrIgnored => self.write_ignored(),
+ TestResult::TrBench(ref bs) => {
+ if self.is_multithreaded {
+ self.write_test_name(desc)?;
+ }
+ self.write_bench()?;
+ self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
+ }
+ }
+ }
+
+ fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
+ self.write_plain(&format!(
+ "test {} has been running for over {} seconds\n",
+ desc.name,
+ time::TEST_WARN_TIMEOUT_S
+ ))
+ }
+
+ fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> {
+ if state.options.display_output {
+ self.write_outputs(state)?;
+ }
+ let success = state.failed == 0;
+ if !success {
+ self.write_failures(state)?;
+ }
+
+ self.write_plain("\ntest result: ")?;
+
+ if success {
+ // There's no parallelism at this point so it's safe to use color
+ self.write_pretty("ok", term::color::GREEN)?;
+ } else {
+ self.write_pretty("FAILED", term::color::RED)?;
+ }
+
+ let s = format!(
+ ". {} passed; {} failed; {} ignored; {} measured; {} filtered out",
+ state.passed, state.failed, state.ignored, state.measured, state.filtered_out
+ );
+
+ self.write_plain(&s)?;
+
+ if let Some(ref exec_time) = state.exec_time {
+ let time_str = format!("; finished in {exec_time}");
+ self.write_plain(&time_str)?;
+ }
+
+ self.write_plain("\n\n")?;
+
+ Ok(success)
+ }
+}
diff --git a/library/test/src/helpers/concurrency.rs b/library/test/src/helpers/concurrency.rs
new file mode 100644
index 000000000..eb2111573
--- /dev/null
+++ b/library/test/src/helpers/concurrency.rs
@@ -0,0 +1,14 @@
+//! Helper module which helps to determine amount of threads to be used
+//! during tests execution.
+use std::{env, num::NonZeroUsize, thread};
+
+pub fn get_concurrency() -> usize {
+ if let Ok(value) = env::var("RUST_TEST_THREADS") {
+ match value.parse::<NonZeroUsize>().ok() {
+ Some(n) => n.get(),
+ _ => panic!("RUST_TEST_THREADS is `{value}`, should be a positive integer."),
+ }
+ } else {
+ thread::available_parallelism().map(|n| n.get()).unwrap_or(1)
+ }
+}
diff --git a/library/test/src/helpers/exit_code.rs b/library/test/src/helpers/exit_code.rs
new file mode 100644
index 000000000..f762f8881
--- /dev/null
+++ b/library/test/src/helpers/exit_code.rs
@@ -0,0 +1,20 @@
+//! Helper module to detect subprocess exit code.
+
+use std::process::ExitStatus;
+
+#[cfg(not(unix))]
+pub fn get_exit_code(status: ExitStatus) -> Result<i32, String> {
+ status.code().ok_or_else(|| "received no exit code from child process".into())
+}
+
+#[cfg(unix)]
+pub fn get_exit_code(status: ExitStatus) -> Result<i32, String> {
+ use std::os::unix::process::ExitStatusExt;
+ match status.code() {
+ Some(code) => Ok(code),
+ None => match status.signal() {
+ Some(signal) => Err(format!("child process exited with signal {signal}")),
+ None => Err("child process exited with unknown signal".into()),
+ },
+ }
+}
diff --git a/library/test/src/helpers/isatty.rs b/library/test/src/helpers/isatty.rs
new file mode 100644
index 000000000..874ecc376
--- /dev/null
+++ b/library/test/src/helpers/isatty.rs
@@ -0,0 +1,32 @@
+//! Helper module which provides a function to test
+//! if stdout is a tty.
+
+cfg_if::cfg_if! {
+ if #[cfg(unix)] {
+ pub fn stdout_isatty() -> bool {
+ unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
+ }
+ } else if #[cfg(windows)] {
+ pub fn stdout_isatty() -> bool {
+ type DWORD = u32;
+ type BOOL = i32;
+ type HANDLE = *mut u8;
+ type LPDWORD = *mut u32;
+ const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
+ extern "system" {
+ fn GetStdHandle(which: DWORD) -> HANDLE;
+ fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
+ }
+ unsafe {
+ let handle = GetStdHandle(STD_OUTPUT_HANDLE);
+ let mut out = 0;
+ GetConsoleMode(handle, &mut out) != 0
+ }
+ }
+ } else {
+ // FIXME: Implement isatty on SGX
+ pub fn stdout_isatty() -> bool {
+ false
+ }
+ }
+}
diff --git a/library/test/src/helpers/metrics.rs b/library/test/src/helpers/metrics.rs
new file mode 100644
index 000000000..f77a23e68
--- /dev/null
+++ b/library/test/src/helpers/metrics.rs
@@ -0,0 +1,50 @@
+//! Benchmark metrics.
+use std::collections::BTreeMap;
+
+#[derive(Clone, PartialEq, Debug, Copy)]
+pub struct Metric {
+ value: f64,
+ noise: f64,
+}
+
+impl Metric {
+ pub fn new(value: f64, noise: f64) -> Metric {
+ Metric { value, noise }
+ }
+}
+
+#[derive(Clone, PartialEq)]
+pub struct MetricMap(BTreeMap<String, Metric>);
+
+impl MetricMap {
+ pub fn new() -> MetricMap {
+ MetricMap(BTreeMap::new())
+ }
+
+ /// Insert a named `value` (+/- `noise`) metric into the map. The value
+ /// must be non-negative. The `noise` indicates the uncertainty of the
+ /// metric, which doubles as the "noise range" of acceptable
+ /// pairwise-regressions on this named value, when comparing from one
+ /// metric to the next using `compare_to_old`.
+ ///
+ /// If `noise` is positive, then it means this metric is of a value
+ /// you want to see grow smaller, so a change larger than `noise` in the
+ /// positive direction represents a regression.
+ ///
+ /// If `noise` is negative, then it means this metric is of a value
+ /// you want to see grow larger, so a change larger than `noise` in the
+ /// negative direction represents a regression.
+ pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
+ let m = Metric { value, noise };
+ self.0.insert(name.to_owned(), m);
+ }
+
+ pub fn fmt_metrics(&self) -> String {
+ let v = self
+ .0
+ .iter()
+ .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
+ .collect::<Vec<_>>();
+ v.join(", ")
+ }
+}
diff --git a/library/test/src/helpers/mod.rs b/library/test/src/helpers/mod.rs
new file mode 100644
index 000000000..049cadf86
--- /dev/null
+++ b/library/test/src/helpers/mod.rs
@@ -0,0 +1,8 @@
+//! Module with common helpers not directly related to tests
+//! but used in `libtest`.
+
+pub mod concurrency;
+pub mod exit_code;
+pub mod isatty;
+pub mod metrics;
+pub mod shuffle;
diff --git a/library/test/src/helpers/shuffle.rs b/library/test/src/helpers/shuffle.rs
new file mode 100644
index 000000000..ca503106c
--- /dev/null
+++ b/library/test/src/helpers/shuffle.rs
@@ -0,0 +1,67 @@
+use crate::cli::TestOpts;
+use crate::types::{TestDescAndFn, TestId, TestName};
+use std::collections::hash_map::DefaultHasher;
+use std::hash::Hasher;
+use std::time::{SystemTime, UNIX_EPOCH};
+
+pub fn get_shuffle_seed(opts: &TestOpts) -> Option<u64> {
+ opts.shuffle_seed.or_else(|| {
+ if opts.shuffle {
+ Some(
+ SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .expect("Failed to get system time")
+ .as_nanos() as u64,
+ )
+ } else {
+ None
+ }
+ })
+}
+
+pub fn shuffle_tests(shuffle_seed: u64, tests: &mut [(TestId, TestDescAndFn)]) {
+ let test_names: Vec<&TestName> = tests.iter().map(|test| &test.1.desc.name).collect();
+ let test_names_hash = calculate_hash(&test_names);
+ let mut rng = Rng::new(shuffle_seed, test_names_hash);
+ shuffle(&mut rng, tests);
+}
+
+// `shuffle` is from `rust-analyzer/src/cli/analysis_stats.rs`.
+fn shuffle<T>(rng: &mut Rng, slice: &mut [T]) {
+ for i in 0..slice.len() {
+ randomize_first(rng, &mut slice[i..]);
+ }
+
+ fn randomize_first<T>(rng: &mut Rng, slice: &mut [T]) {
+ assert!(!slice.is_empty());
+ let idx = rng.rand_range(0..slice.len() as u64) as usize;
+ slice.swap(0, idx);
+ }
+}
+
+struct Rng {
+ state: u64,
+ extra: u64,
+}
+
+impl Rng {
+ fn new(seed: u64, extra: u64) -> Self {
+ Self { state: seed, extra }
+ }
+
+ fn rand_range(&mut self, range: core::ops::Range<u64>) -> u64 {
+ self.rand_u64() % (range.end - range.start) + range.start
+ }
+
+ fn rand_u64(&mut self) -> u64 {
+ self.state = calculate_hash(&(self.state, self.extra));
+ self.state
+ }
+}
+
+// `calculate_hash` is from `core/src/hash/mod.rs`.
+fn calculate_hash<T: core::hash::Hash>(t: &T) -> u64 {
+ let mut s = DefaultHasher::new();
+ t.hash(&mut s);
+ s.finish()
+}
diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs
new file mode 100644
index 000000000..3b7193adc
--- /dev/null
+++ b/library/test/src/lib.rs
@@ -0,0 +1,696 @@
+//! Support code for rustc's built in unit-test and micro-benchmarking
+//! framework.
+//!
+//! Almost all user code will only be interested in `Bencher` and
+//! `black_box`. All other interactions (such as writing tests and
+//! benchmarks themselves) should be done via the `#[test]` and
+//! `#[bench]` attributes.
+//!
+//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
+
+// Currently, not much of this is meant for users. It is intended to
+// support the simplest interface possible for representing and
+// running tests while providing a base that other test frameworks may
+// build off of.
+
+#![unstable(feature = "test", issue = "50297")]
+#![doc(test(attr(deny(warnings))))]
+#![feature(bench_black_box)]
+#![feature(internal_output_capture)]
+#![feature(staged_api)]
+#![feature(process_exitcode_internals)]
+#![feature(test)]
+
+// Public reexports
+pub use self::bench::{black_box, Bencher};
+pub use self::console::run_tests_console;
+pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic};
+pub use self::types::TestName::*;
+pub use self::types::*;
+pub use self::ColorConfig::*;
+pub use cli::TestOpts;
+
+// Module to be used by rustc to compile tests in libtest
+pub mod test {
+ pub use crate::{
+ assert_test_result,
+ bench::Bencher,
+ cli::{parse_opts, TestOpts},
+ filter_tests,
+ helpers::metrics::{Metric, MetricMap},
+ options::{Concurrent, Options, RunIgnored, RunStrategy, ShouldPanic},
+ run_test, test_main, test_main_static,
+ test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk},
+ time::{TestExecTime, TestTimeOptions},
+ types::{
+ DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
+ TestDescAndFn, TestId, TestName, TestType,
+ },
+ };
+}
+
+use std::{
+ collections::VecDeque,
+ env, io,
+ io::prelude::Write,
+ panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo},
+ process::{self, Command, Termination},
+ sync::mpsc::{channel, Sender},
+ sync::{Arc, Mutex},
+ thread,
+ time::{Duration, Instant},
+};
+
+pub mod bench;
+mod cli;
+mod console;
+mod event;
+mod formatters;
+mod helpers;
+mod options;
+pub mod stats;
+mod term;
+mod test_result;
+mod time;
+mod types;
+
+#[cfg(test)]
+mod tests;
+
+use event::{CompletedTest, TestEvent};
+use helpers::concurrency::get_concurrency;
+use helpers::exit_code::get_exit_code;
+use helpers::shuffle::{get_shuffle_seed, shuffle_tests};
+use options::{Concurrent, RunStrategy};
+use test_result::*;
+use time::TestExecTime;
+
+// Process exit code to be used to indicate test failures.
+const ERROR_EXIT_CODE: i32 = 101;
+
+const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
+
+// The default console test runner. It accepts the command line
+// arguments and a vector of test_descs.
+pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
+ let mut opts = match cli::parse_opts(args) {
+ Some(Ok(o)) => o,
+ Some(Err(msg)) => {
+ eprintln!("error: {msg}");
+ process::exit(ERROR_EXIT_CODE);
+ }
+ None => return,
+ };
+ if let Some(options) = options {
+ opts.options = options;
+ }
+ if opts.list {
+ if let Err(e) = console::list_tests_console(&opts, tests) {
+ eprintln!("error: io error when listing tests: {e:?}");
+ process::exit(ERROR_EXIT_CODE);
+ }
+ } else {
+ match console::run_tests_console(&opts, tests) {
+ Ok(true) => {}
+ Ok(false) => process::exit(ERROR_EXIT_CODE),
+ Err(e) => {
+ eprintln!("error: io error when listing tests: {e:?}");
+ process::exit(ERROR_EXIT_CODE);
+ }
+ }
+ }
+}
+
+/// A variant optimized for invocation with a static test vector.
+/// This will panic (intentionally) when fed any dynamic tests.
+///
+/// This is the entry point for the main function generated by `rustc --test`
+/// when panic=unwind.
+pub fn test_main_static(tests: &[&TestDescAndFn]) {
+ let args = env::args().collect::<Vec<_>>();
+ let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
+ test_main(&args, owned_tests, None)
+}
+
+/// A variant optimized for invocation with a static test vector.
+/// This will panic (intentionally) when fed any dynamic tests.
+///
+/// Runs tests in panic=abort mode, which involves spawning subprocesses for
+/// tests.
+///
+/// This is the entry point for the main function generated by `rustc --test`
+/// when panic=abort.
+pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
+ // If we're being run in SpawnedSecondary mode, run the test here. run_test
+ // will then exit the process.
+ if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
+ env::remove_var(SECONDARY_TEST_INVOKER_VAR);
+ let test = tests
+ .iter()
+ .filter(|test| test.desc.name.as_slice() == name)
+ .map(make_owned_test)
+ .next()
+ .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{name}'"));
+ let TestDescAndFn { desc, testfn } = test;
+ let testfn = match testfn {
+ StaticTestFn(f) => f,
+ _ => panic!("only static tests are supported"),
+ };
+ run_test_in_spawned_subprocess(desc, Box::new(testfn));
+ }
+
+ let args = env::args().collect::<Vec<_>>();
+ let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
+ test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
+}
+
+/// Clones static values for putting into a dynamic vector, which test_main()
+/// needs to hand out ownership of tests to parallel test runners.
+///
+/// This will panic when fed any dynamic tests, because they cannot be cloned.
+fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
+ match test.testfn {
+ StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
+ StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
+ _ => panic!("non-static tests passed to test::test_main_static"),
+ }
+}
+
+/// Invoked when unit tests terminate. Should panic if the unit
+/// Tests is considered a failure. By default, invokes `report()`
+/// and checks for a `0` result.
+pub fn assert_test_result<T: Termination>(result: T) {
+ let code = result.report().to_i32();
+ assert_eq!(
+ code, 0,
+ "the test returned a termination value with a non-zero status code ({}) \
+ which indicates a failure",
+ code
+ );
+}
+
+pub fn run_tests<F>(
+ opts: &TestOpts,
+ tests: Vec<TestDescAndFn>,
+ mut notify_about_test_event: F,
+) -> io::Result<()>
+where
+ F: FnMut(TestEvent) -> io::Result<()>,
+{
+ use std::collections::{self, HashMap};
+ use std::hash::BuildHasherDefault;
+ use std::sync::mpsc::RecvTimeoutError;
+
+ struct RunningTest {
+ join_handle: Option<thread::JoinHandle<()>>,
+ }
+
+ // Use a deterministic hasher
+ type TestMap =
+ HashMap<TestId, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
+
+ struct TimeoutEntry {
+ id: TestId,
+ desc: TestDesc,
+ timeout: Instant,
+ }
+
+ let tests_len = tests.len();
+
+ let mut filtered_tests = filter_tests(opts, tests);
+ if !opts.bench_benchmarks {
+ filtered_tests = convert_benchmarks_to_tests(filtered_tests);
+ }
+
+ let filtered_tests = {
+ let mut filtered_tests = filtered_tests;
+ for test in filtered_tests.iter_mut() {
+ test.desc.name = test.desc.name.with_padding(test.testfn.padding());
+ }
+
+ filtered_tests
+ };
+
+ let filtered_out = tests_len - filtered_tests.len();
+ let event = TestEvent::TeFilteredOut(filtered_out);
+ notify_about_test_event(event)?;
+
+ let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
+
+ let shuffle_seed = get_shuffle_seed(opts);
+
+ let event = TestEvent::TeFiltered(filtered_descs, shuffle_seed);
+ notify_about_test_event(event)?;
+
+ let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
+ .into_iter()
+ .enumerate()
+ .map(|(i, e)| (TestId(i), e))
+ .partition(|(_, e)| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_)));
+
+ let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
+
+ let mut remaining = filtered_tests;
+ if let Some(shuffle_seed) = shuffle_seed {
+ shuffle_tests(shuffle_seed, &mut remaining);
+ } else {
+ remaining.reverse();
+ }
+ let mut pending = 0;
+
+ let (tx, rx) = channel::<CompletedTest>();
+ let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process {
+ RunStrategy::SpawnPrimary
+ } else {
+ RunStrategy::InProcess
+ };
+
+ let mut running_tests: TestMap = HashMap::default();
+ let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new();
+
+ fn get_timed_out_tests(
+ running_tests: &TestMap,
+ timeout_queue: &mut VecDeque<TimeoutEntry>,
+ ) -> Vec<TestDesc> {
+ let now = Instant::now();
+ let mut timed_out = Vec::new();
+ while let Some(timeout_entry) = timeout_queue.front() {
+ if now < timeout_entry.timeout {
+ break;
+ }
+ let timeout_entry = timeout_queue.pop_front().unwrap();
+ if running_tests.contains_key(&timeout_entry.id) {
+ timed_out.push(timeout_entry.desc);
+ }
+ }
+ timed_out
+ }
+
+ fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> {
+ timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout, .. }| {
+ let now = Instant::now();
+ if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) }
+ })
+ }
+
+ if concurrency == 1 {
+ while !remaining.is_empty() {
+ let (id, test) = remaining.pop().unwrap();
+ let event = TestEvent::TeWait(test.desc.clone());
+ notify_about_test_event(event)?;
+ let join_handle =
+ run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone(), Concurrent::No);
+ assert!(join_handle.is_none());
+ let completed_test = rx.recv().unwrap();
+
+ let event = TestEvent::TeResult(completed_test);
+ notify_about_test_event(event)?;
+ }
+ } else {
+ while pending > 0 || !remaining.is_empty() {
+ while pending < concurrency && !remaining.is_empty() {
+ let (id, test) = remaining.pop().unwrap();
+ let timeout = time::get_default_test_timeout();
+ let desc = test.desc.clone();
+
+ let event = TestEvent::TeWait(desc.clone());
+ notify_about_test_event(event)?; //here no pad
+ let join_handle = run_test(
+ opts,
+ !opts.run_tests,
+ id,
+ test,
+ run_strategy,
+ tx.clone(),
+ Concurrent::Yes,
+ );
+ running_tests.insert(id, RunningTest { join_handle });
+ timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
+ pending += 1;
+ }
+
+ let mut res;
+ loop {
+ if let Some(timeout) = calc_timeout(&timeout_queue) {
+ res = rx.recv_timeout(timeout);
+ for test in get_timed_out_tests(&running_tests, &mut timeout_queue) {
+ let event = TestEvent::TeTimeout(test);
+ notify_about_test_event(event)?;
+ }
+
+ match res {
+ Err(RecvTimeoutError::Timeout) => {
+ // Result is not yet ready, continue waiting.
+ }
+ _ => {
+ // We've got a result, stop the loop.
+ break;
+ }
+ }
+ } else {
+ res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
+ break;
+ }
+ }
+
+ let mut completed_test = res.unwrap();
+ let running_test = running_tests.remove(&completed_test.id).unwrap();
+ if let Some(join_handle) = running_test.join_handle {
+ if let Err(_) = join_handle.join() {
+ if let TrOk = completed_test.result {
+ completed_test.result =
+ TrFailedMsg("panicked after reporting success".to_string());
+ }
+ }
+ }
+
+ let event = TestEvent::TeResult(completed_test);
+ notify_about_test_event(event)?;
+ pending -= 1;
+ }
+ }
+
+ if opts.bench_benchmarks {
+ // All benchmarks run at the end, in serial.
+ for (id, b) in filtered_benchs {
+ let event = TestEvent::TeWait(b.desc.clone());
+ notify_about_test_event(event)?;
+ run_test(opts, false, id, b, run_strategy, tx.clone(), Concurrent::No);
+ let completed_test = rx.recv().unwrap();
+
+ let event = TestEvent::TeResult(completed_test);
+ notify_about_test_event(event)?;
+ }
+ }
+ Ok(())
+}
+
+pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
+ let mut filtered = tests;
+ let matches_filter = |test: &TestDescAndFn, filter: &str| {
+ let test_name = test.desc.name.as_slice();
+
+ match opts.filter_exact {
+ true => test_name == filter,
+ false => test_name.contains(filter),
+ }
+ };
+
+ // Remove tests that don't match the test filter
+ if !opts.filters.is_empty() {
+ filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter)));
+ }
+
+ // Skip tests that match any of the skip filters
+ filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
+
+ // Excludes #[should_panic] tests
+ if opts.exclude_should_panic {
+ filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
+ }
+
+ // maybe unignore tests
+ match opts.run_ignored {
+ RunIgnored::Yes => {
+ filtered.iter_mut().for_each(|test| test.desc.ignore = false);
+ }
+ RunIgnored::Only => {
+ filtered.retain(|test| test.desc.ignore);
+ filtered.iter_mut().for_each(|test| test.desc.ignore = false);
+ }
+ RunIgnored::No => {}
+ }
+
+ // Sort the tests alphabetically
+ filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
+
+ filtered
+}
+
+pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
+ // convert benchmarks to tests, if we're not benchmarking them
+ tests
+ .into_iter()
+ .map(|x| {
+ let testfn = match x.testfn {
+ DynBenchFn(benchfn) => DynTestFn(Box::new(move || {
+ bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
+ })),
+ StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
+ bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
+ })),
+ f => f,
+ };
+ TestDescAndFn { desc: x.desc, testfn }
+ })
+ .collect()
+}
+
+pub fn run_test(
+ opts: &TestOpts,
+ force_ignore: bool,
+ id: TestId,
+ test: TestDescAndFn,
+ strategy: RunStrategy,
+ monitor_ch: Sender<CompletedTest>,
+ concurrency: Concurrent,
+) -> Option<thread::JoinHandle<()>> {
+ let TestDescAndFn { desc, testfn } = test;
+
+ // Emscripten can catch panics but other wasm targets cannot
+ let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
+ && cfg!(target_family = "wasm")
+ && !cfg!(target_os = "emscripten");
+
+ if force_ignore || desc.ignore || ignore_because_no_process_support {
+ let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
+ monitor_ch.send(message).unwrap();
+ return None;
+ }
+
+ struct TestRunOpts {
+ pub strategy: RunStrategy,
+ pub nocapture: bool,
+ pub concurrency: Concurrent,
+ pub time: Option<time::TestTimeOptions>,
+ }
+
+ fn run_test_inner(
+ id: TestId,
+ desc: TestDesc,
+ monitor_ch: Sender<CompletedTest>,
+ testfn: Box<dyn FnOnce() + Send>,
+ opts: TestRunOpts,
+ ) -> Option<thread::JoinHandle<()>> {
+ let concurrency = opts.concurrency;
+ let name = desc.name.clone();
+
+ let runtest = move || match opts.strategy {
+ RunStrategy::InProcess => run_test_in_process(
+ id,
+ desc,
+ opts.nocapture,
+ opts.time.is_some(),
+ testfn,
+ monitor_ch,
+ opts.time,
+ ),
+ RunStrategy::SpawnPrimary => spawn_test_subprocess(
+ id,
+ desc,
+ opts.nocapture,
+ opts.time.is_some(),
+ monitor_ch,
+ opts.time,
+ ),
+ };
+
+ // If the platform is single-threaded we're just going to run
+ // the test synchronously, regardless of the concurrency
+ // level.
+ let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_family = "wasm");
+ if concurrency == Concurrent::Yes && supports_threads {
+ let cfg = thread::Builder::new().name(name.as_slice().to_owned());
+ let mut runtest = Arc::new(Mutex::new(Some(runtest)));
+ let runtest2 = runtest.clone();
+ match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) {
+ Ok(handle) => Some(handle),
+ Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
+ // `ErrorKind::WouldBlock` means hitting the thread limit on some
+ // platforms, so run the test synchronously here instead.
+ Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()();
+ None
+ }
+ Err(e) => panic!("failed to spawn thread to run test: {e}"),
+ }
+ } else {
+ runtest();
+ None
+ }
+ }
+
+ let test_run_opts =
+ TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options };
+
+ match testfn {
+ DynBenchFn(benchfn) => {
+ // Benchmarks aren't expected to panic, so we run them all in-process.
+ crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, benchfn);
+ None
+ }
+ StaticBenchFn(benchfn) => {
+ // Benchmarks aren't expected to panic, so we run them all in-process.
+ crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, benchfn);
+ None
+ }
+ DynTestFn(f) => {
+ match strategy {
+ RunStrategy::InProcess => (),
+ _ => panic!("Cannot run dynamic test fn out-of-process"),
+ };
+ run_test_inner(
+ id,
+ desc,
+ monitor_ch,
+ Box::new(move || __rust_begin_short_backtrace(f)),
+ test_run_opts,
+ )
+ }
+ StaticTestFn(f) => run_test_inner(
+ id,
+ desc,
+ monitor_ch,
+ Box::new(move || __rust_begin_short_backtrace(f)),
+ test_run_opts,
+ ),
+ }
+}
+
+/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
+#[inline(never)]
+fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
+ f();
+
+ // prevent this frame from being tail-call optimised away
+ black_box(());
+}
+
+fn run_test_in_process(
+ id: TestId,
+ desc: TestDesc,
+ nocapture: bool,
+ report_time: bool,
+ testfn: Box<dyn FnOnce() + Send>,
+ monitor_ch: Sender<CompletedTest>,
+ time_opts: Option<time::TestTimeOptions>,
+) {
+ // Buffer for capturing standard I/O
+ let data = Arc::new(Mutex::new(Vec::new()));
+
+ if !nocapture {
+ io::set_output_capture(Some(data.clone()));
+ }
+
+ let start = report_time.then(Instant::now);
+ let result = catch_unwind(AssertUnwindSafe(testfn));
+ let exec_time = start.map(|start| {
+ let duration = start.elapsed();
+ TestExecTime(duration)
+ });
+
+ io::set_output_capture(None);
+
+ let test_result = match result {
+ Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time),
+ Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time),
+ };
+ let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec();
+ let message = CompletedTest::new(id, desc, test_result, exec_time, stdout);
+ monitor_ch.send(message).unwrap();
+}
+
+fn spawn_test_subprocess(
+ id: TestId,
+ desc: TestDesc,
+ nocapture: bool,
+ report_time: bool,
+ monitor_ch: Sender<CompletedTest>,
+ time_opts: Option<time::TestTimeOptions>,
+) {
+ let (result, test_output, exec_time) = (|| {
+ let args = env::args().collect::<Vec<_>>();
+ let current_exe = &args[0];
+
+ let mut command = Command::new(current_exe);
+ command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice());
+ if nocapture {
+ command.stdout(process::Stdio::inherit());
+ command.stderr(process::Stdio::inherit());
+ }
+
+ let start = report_time.then(Instant::now);
+ let output = match command.output() {
+ Ok(out) => out,
+ Err(e) => {
+ let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e);
+ return (TrFailed, err.into_bytes(), None);
+ }
+ };
+ let exec_time = start.map(|start| {
+ let duration = start.elapsed();
+ TestExecTime(duration)
+ });
+
+ let std::process::Output { stdout, stderr, status } = output;
+ let mut test_output = stdout;
+ formatters::write_stderr_delimiter(&mut test_output, &desc.name);
+ test_output.extend_from_slice(&stderr);
+
+ let result = match (|| -> Result<TestResult, String> {
+ let exit_code = get_exit_code(status)?;
+ Ok(get_result_from_exit_code(&desc, exit_code, &time_opts, &exec_time))
+ })() {
+ Ok(r) => r,
+ Err(e) => {
+ write!(&mut test_output, "Unexpected error: {}", e).unwrap();
+ TrFailed
+ }
+ };
+
+ (result, test_output, exec_time)
+ })();
+
+ let message = CompletedTest::new(id, desc, result, exec_time, test_output);
+ monitor_ch.send(message).unwrap();
+}
+
+fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box<dyn FnOnce() + Send>) -> ! {
+ let builtin_panic_hook = panic::take_hook();
+ let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| {
+ let test_result = match panic_info {
+ Some(info) => calc_result(&desc, Err(info.payload()), &None, &None),
+ None => calc_result(&desc, Ok(()), &None, &None),
+ };
+
+ // We don't support serializing TrFailedMsg, so just
+ // print the message out to stderr.
+ if let TrFailedMsg(msg) = &test_result {
+ eprintln!("{msg}");
+ }
+
+ if let Some(info) = panic_info {
+ builtin_panic_hook(info);
+ }
+
+ if let TrOk = test_result {
+ process::exit(test_result::TR_OK);
+ } else {
+ process::exit(test_result::TR_FAILED);
+ }
+ });
+ let record_result2 = record_result.clone();
+ panic::set_hook(Box::new(move |info| record_result2(Some(&info))));
+ testfn();
+ record_result(None);
+ unreachable!("panic=abort callback should have exited the process")
+}
diff --git a/library/test/src/options.rs b/library/test/src/options.rs
new file mode 100644
index 000000000..baf36b5f1
--- /dev/null
+++ b/library/test/src/options.rs
@@ -0,0 +1,89 @@
+//! Enums denoting options for test execution.
+
+/// Whether to execute tests concurrently or not
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Concurrent {
+ Yes,
+ No,
+}
+
+/// Number of times to run a benchmarked function
+#[derive(Clone, PartialEq, Eq)]
+pub enum BenchMode {
+ Auto,
+ Single,
+}
+
+/// Whether test is expected to panic or not
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub enum ShouldPanic {
+ No,
+ Yes,
+ YesWithMessage(&'static str),
+}
+
+/// Whether should console output be colored or not
+#[derive(Copy, Clone, Debug)]
+pub enum ColorConfig {
+ AutoColor,
+ AlwaysColor,
+ NeverColor,
+}
+
+/// Format of the test results output
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum OutputFormat {
+ /// Verbose output
+ Pretty,
+ /// Quiet output
+ Terse,
+ /// JSON output
+ Json,
+ /// JUnit output
+ Junit,
+}
+
+/// Whether ignored test should be run or not
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum RunIgnored {
+ Yes,
+ No,
+ /// Run only ignored tests
+ Only,
+}
+
+#[derive(Clone, Copy)]
+pub enum RunStrategy {
+ /// Runs the test in the current process, and sends the result back over the
+ /// supplied channel.
+ InProcess,
+
+ /// Spawns a subprocess to run the test, and sends the result back over the
+ /// supplied channel. Requires `argv[0]` to exist and point to the binary
+ /// that's currently running.
+ SpawnPrimary,
+}
+
+/// Options for the test run defined by the caller (instead of CLI arguments).
+/// In case we want to add other options as well, just add them in this struct.
+#[derive(Copy, Clone, Debug)]
+pub struct Options {
+ pub display_output: bool,
+ pub panic_abort: bool,
+}
+
+impl Options {
+ pub fn new() -> Options {
+ Options { display_output: false, panic_abort: false }
+ }
+
+ pub fn display_output(mut self, display_output: bool) -> Options {
+ self.display_output = display_output;
+ self
+ }
+
+ pub fn panic_abort(mut self, panic_abort: bool) -> Options {
+ self.panic_abort = panic_abort;
+ self
+ }
+}
diff --git a/library/test/src/stats.rs b/library/test/src/stats.rs
new file mode 100644
index 000000000..40b05704b
--- /dev/null
+++ b/library/test/src/stats.rs
@@ -0,0 +1,302 @@
+#![allow(missing_docs)]
+
+use std::mem;
+
+#[cfg(test)]
+mod tests;
+
+fn local_sort(v: &mut [f64]) {
+ v.sort_by(|x: &f64, y: &f64| x.total_cmp(y));
+}
+
+/// Trait that provides simple descriptive statistics on a univariate set of numeric samples.
+pub trait Stats {
+ /// Sum of the samples.
+ ///
+ /// Note: this method sacrifices performance at the altar of accuracy
+ /// Depends on IEEE-754 arithmetic guarantees. See proof of correctness at:
+ /// ["Adaptive Precision Floating-Point Arithmetic and Fast Robust Geometric
+ /// Predicates"][paper]
+ ///
+ /// [paper]: https://www.cs.cmu.edu/~quake-papers/robust-arithmetic.ps
+ fn sum(&self) -> f64;
+
+ /// Minimum value of the samples.
+ fn min(&self) -> f64;
+
+ /// Maximum value of the samples.
+ fn max(&self) -> f64;
+
+ /// Arithmetic mean (average) of the samples: sum divided by sample-count.
+ ///
+ /// See: <https://en.wikipedia.org/wiki/Arithmetic_mean>
+ fn mean(&self) -> f64;
+
+ /// Median of the samples: value separating the lower half of the samples from the higher half.
+ /// Equal to `self.percentile(50.0)`.
+ ///
+ /// See: <https://en.wikipedia.org/wiki/Median>
+ fn median(&self) -> f64;
+
+ /// Variance of the samples: bias-corrected mean of the squares of the differences of each
+ /// sample from the sample mean. Note that this calculates the _sample variance_ rather than the
+ /// population variance, which is assumed to be unknown. It therefore corrects the `(n-1)/n`
+ /// bias that would appear if we calculated a population variance, by dividing by `(n-1)` rather
+ /// than `n`.
+ ///
+ /// See: <https://en.wikipedia.org/wiki/Variance>
+ fn var(&self) -> f64;
+
+ /// Standard deviation: the square root of the sample variance.
+ ///
+ /// Note: this is not a robust statistic for non-normal distributions. Prefer the
+ /// `median_abs_dev` for unknown distributions.
+ ///
+ /// See: <https://en.wikipedia.org/wiki/Standard_deviation>
+ fn std_dev(&self) -> f64;
+
+ /// Standard deviation as a percent of the mean value. See `std_dev` and `mean`.
+ ///
+ /// Note: this is not a robust statistic for non-normal distributions. Prefer the
+ /// `median_abs_dev_pct` for unknown distributions.
+ fn std_dev_pct(&self) -> f64;
+
+ /// Scaled median of the absolute deviations of each sample from the sample median. This is a
+ /// robust (distribution-agnostic) estimator of sample variability. Use this in preference to
+ /// `std_dev` if you cannot assume your sample is normally distributed. Note that this is scaled
+ /// by the constant `1.4826` to allow its use as a consistent estimator for the standard
+ /// deviation.
+ ///
+ /// See: <https://en.wikipedia.org/wiki/Median_absolute_deviation>
+ fn median_abs_dev(&self) -> f64;
+
+ /// Median absolute deviation as a percent of the median. See `median_abs_dev` and `median`.
+ fn median_abs_dev_pct(&self) -> f64;
+
+ /// Percentile: the value below which `pct` percent of the values in `self` fall. For example,
+ /// percentile(95.0) will return the value `v` such that 95% of the samples `s` in `self`
+ /// satisfy `s <= v`.
+ ///
+ /// Calculated by linear interpolation between closest ranks.
+ ///
+ /// See: <https://en.wikipedia.org/wiki/Percentile>
+ fn percentile(&self, pct: f64) -> f64;
+
+ /// Quartiles of the sample: three values that divide the sample into four equal groups, each
+ /// with 1/4 of the data. The middle value is the median. See `median` and `percentile`. This
+ /// function may calculate the 3 quartiles more efficiently than 3 calls to `percentile`, but
+ /// is otherwise equivalent.
+ ///
+ /// See also: <https://en.wikipedia.org/wiki/Quartile>
+ fn quartiles(&self) -> (f64, f64, f64);
+
+ /// Inter-quartile range: the difference between the 25th percentile (1st quartile) and the 75th
+ /// percentile (3rd quartile). See `quartiles`.
+ ///
+ /// See also: <https://en.wikipedia.org/wiki/Interquartile_range>
+ fn iqr(&self) -> f64;
+}
+
+/// Extracted collection of all the summary statistics of a sample set.
+#[derive(Debug, Clone, PartialEq, Copy)]
+#[allow(missing_docs)]
+pub struct Summary {
+ pub sum: f64,
+ pub min: f64,
+ pub max: f64,
+ pub mean: f64,
+ pub median: f64,
+ pub var: f64,
+ pub std_dev: f64,
+ pub std_dev_pct: f64,
+ pub median_abs_dev: f64,
+ pub median_abs_dev_pct: f64,
+ pub quartiles: (f64, f64, f64),
+ pub iqr: f64,
+}
+
+impl Summary {
+ /// Construct a new summary of a sample set.
+ pub fn new(samples: &[f64]) -> Summary {
+ Summary {
+ sum: samples.sum(),
+ min: samples.min(),
+ max: samples.max(),
+ mean: samples.mean(),
+ median: samples.median(),
+ var: samples.var(),
+ std_dev: samples.std_dev(),
+ std_dev_pct: samples.std_dev_pct(),
+ median_abs_dev: samples.median_abs_dev(),
+ median_abs_dev_pct: samples.median_abs_dev_pct(),
+ quartiles: samples.quartiles(),
+ iqr: samples.iqr(),
+ }
+ }
+}
+
+impl Stats for [f64] {
+ // FIXME #11059 handle NaN, inf and overflow
+ fn sum(&self) -> f64 {
+ let mut partials = vec![];
+
+ for &x in self {
+ let mut x = x;
+ let mut j = 0;
+ // This inner loop applies `hi`/`lo` summation to each
+ // partial so that the list of partial sums remains exact.
+ for i in 0..partials.len() {
+ let mut y: f64 = partials[i];
+ if x.abs() < y.abs() {
+ mem::swap(&mut x, &mut y);
+ }
+ // Rounded `x+y` is stored in `hi` with round-off stored in
+ // `lo`. Together `hi+lo` are exactly equal to `x+y`.
+ let hi = x + y;
+ let lo = y - (hi - x);
+ if lo != 0.0 {
+ partials[j] = lo;
+ j += 1;
+ }
+ x = hi;
+ }
+ if j >= partials.len() {
+ partials.push(x);
+ } else {
+ partials[j] = x;
+ partials.truncate(j + 1);
+ }
+ }
+ let zero: f64 = 0.0;
+ partials.iter().fold(zero, |p, q| p + *q)
+ }
+
+ fn min(&self) -> f64 {
+ assert!(!self.is_empty());
+ self.iter().fold(self[0], |p, q| p.min(*q))
+ }
+
+ fn max(&self) -> f64 {
+ assert!(!self.is_empty());
+ self.iter().fold(self[0], |p, q| p.max(*q))
+ }
+
+ fn mean(&self) -> f64 {
+ assert!(!self.is_empty());
+ self.sum() / (self.len() as f64)
+ }
+
+ fn median(&self) -> f64 {
+ self.percentile(50_f64)
+ }
+
+ fn var(&self) -> f64 {
+ if self.len() < 2 {
+ 0.0
+ } else {
+ let mean = self.mean();
+ let mut v: f64 = 0.0;
+ for s in self {
+ let x = *s - mean;
+ v += x * x;
+ }
+ // N.B., this is _supposed to be_ len-1, not len. If you
+ // change it back to len, you will be calculating a
+ // population variance, not a sample variance.
+ let denom = (self.len() - 1) as f64;
+ v / denom
+ }
+ }
+
+ fn std_dev(&self) -> f64 {
+ self.var().sqrt()
+ }
+
+ fn std_dev_pct(&self) -> f64 {
+ let hundred = 100_f64;
+ (self.std_dev() / self.mean()) * hundred
+ }
+
+ fn median_abs_dev(&self) -> f64 {
+ let med = self.median();
+ let abs_devs: Vec<f64> = self.iter().map(|&v| (med - v).abs()).collect();
+ // This constant is derived by smarter statistics brains than me, but it is
+ // consistent with how R and other packages treat the MAD.
+ let number = 1.4826;
+ abs_devs.median() * number
+ }
+
+ fn median_abs_dev_pct(&self) -> f64 {
+ let hundred = 100_f64;
+ (self.median_abs_dev() / self.median()) * hundred
+ }
+
+ fn percentile(&self, pct: f64) -> f64 {
+ let mut tmp = self.to_vec();
+ local_sort(&mut tmp);
+ percentile_of_sorted(&tmp, pct)
+ }
+
+ fn quartiles(&self) -> (f64, f64, f64) {
+ let mut tmp = self.to_vec();
+ local_sort(&mut tmp);
+ let first = 25_f64;
+ let a = percentile_of_sorted(&tmp, first);
+ let second = 50_f64;
+ let b = percentile_of_sorted(&tmp, second);
+ let third = 75_f64;
+ let c = percentile_of_sorted(&tmp, third);
+ (a, b, c)
+ }
+
+ fn iqr(&self) -> f64 {
+ let (a, _, c) = self.quartiles();
+ c - a
+ }
+}
+
+// Helper function: extract a value representing the `pct` percentile of a sorted sample-set, using
+// linear interpolation. If samples are not sorted, return nonsensical value.
+fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 {
+ assert!(!sorted_samples.is_empty());
+ if sorted_samples.len() == 1 {
+ return sorted_samples[0];
+ }
+ let zero: f64 = 0.0;
+ assert!(zero <= pct);
+ let hundred = 100_f64;
+ assert!(pct <= hundred);
+ if pct == hundred {
+ return sorted_samples[sorted_samples.len() - 1];
+ }
+ let length = (sorted_samples.len() - 1) as f64;
+ let rank = (pct / hundred) * length;
+ let lrank = rank.floor();
+ let d = rank - lrank;
+ let n = lrank as usize;
+ let lo = sorted_samples[n];
+ let hi = sorted_samples[n + 1];
+ lo + (hi - lo) * d
+}
+
+/// Winsorize a set of samples, replacing values above the `100-pct` percentile
+/// and below the `pct` percentile with those percentiles themselves. This is a
+/// way of minimizing the effect of outliers, at the cost of biasing the sample.
+/// It differs from trimming in that it does not change the number of samples,
+/// just changes the values of those that are outliers.
+///
+/// See: <https://en.wikipedia.org/wiki/Winsorising>
+pub fn winsorize(samples: &mut [f64], pct: f64) {
+ let mut tmp = samples.to_vec();
+ local_sort(&mut tmp);
+ let lo = percentile_of_sorted(&tmp, pct);
+ let hundred = 100_f64;
+ let hi = percentile_of_sorted(&tmp, hundred - pct);
+ for samp in samples {
+ if *samp > hi {
+ *samp = hi
+ } else if *samp < lo {
+ *samp = lo
+ }
+ }
+}
diff --git a/library/test/src/stats/tests.rs b/library/test/src/stats/tests.rs
new file mode 100644
index 000000000..3a6e8401b
--- /dev/null
+++ b/library/test/src/stats/tests.rs
@@ -0,0 +1,591 @@
+use super::*;
+
+extern crate test;
+use self::test::test::Bencher;
+use std::io;
+use std::io::prelude::*;
+
+// Test vectors generated from R, using the script src/etc/stat-test-vectors.r.
+
+macro_rules! assert_approx_eq {
+ ($a: expr, $b: expr) => {{
+ let (a, b) = (&$a, &$b);
+ assert!((*a - *b).abs() < 1.0e-6, "{} is not approximately equal to {}", *a, *b);
+ }};
+}
+
+fn check(samples: &[f64], summ: &Summary) {
+ let summ2 = Summary::new(samples);
+
+ let mut w = io::sink();
+ let w = &mut w;
+ (write!(w, "\n")).unwrap();
+
+ assert_eq!(summ.sum, summ2.sum);
+ assert_eq!(summ.min, summ2.min);
+ assert_eq!(summ.max, summ2.max);
+ assert_eq!(summ.mean, summ2.mean);
+ assert_eq!(summ.median, summ2.median);
+
+ // We needed a few more digits to get exact equality on these
+ // but they're within float epsilon, which is 1.0e-6.
+ assert_approx_eq!(summ.var, summ2.var);
+ assert_approx_eq!(summ.std_dev, summ2.std_dev);
+ assert_approx_eq!(summ.std_dev_pct, summ2.std_dev_pct);
+ assert_approx_eq!(summ.median_abs_dev, summ2.median_abs_dev);
+ assert_approx_eq!(summ.median_abs_dev_pct, summ2.median_abs_dev_pct);
+
+ assert_eq!(summ.quartiles, summ2.quartiles);
+ assert_eq!(summ.iqr, summ2.iqr);
+}
+
+#[test]
+fn test_min_max_nan() {
+ let xs = &[1.0, 2.0, f64::NAN, 3.0, 4.0];
+ let summary = Summary::new(xs);
+ assert_eq!(summary.min, 1.0);
+ assert_eq!(summary.max, 4.0);
+}
+
+#[test]
+fn test_norm2() {
+ let val = &[958.0000000000, 924.0000000000];
+ let summ = &Summary {
+ sum: 1882.0000000000,
+ min: 924.0000000000,
+ max: 958.0000000000,
+ mean: 941.0000000000,
+ median: 941.0000000000,
+ var: 578.0000000000,
+ std_dev: 24.0416305603,
+ std_dev_pct: 2.5549022912,
+ median_abs_dev: 25.2042000000,
+ median_abs_dev_pct: 2.6784484591,
+ quartiles: (932.5000000000, 941.0000000000, 949.5000000000),
+ iqr: 17.0000000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_norm10narrow() {
+ let val = &[
+ 966.0000000000,
+ 985.0000000000,
+ 1110.0000000000,
+ 848.0000000000,
+ 821.0000000000,
+ 975.0000000000,
+ 962.0000000000,
+ 1157.0000000000,
+ 1217.0000000000,
+ 955.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 9996.0000000000,
+ min: 821.0000000000,
+ max: 1217.0000000000,
+ mean: 999.6000000000,
+ median: 970.5000000000,
+ var: 16050.7111111111,
+ std_dev: 126.6914010938,
+ std_dev_pct: 12.6742097933,
+ median_abs_dev: 102.2994000000,
+ median_abs_dev_pct: 10.5408964451,
+ quartiles: (956.7500000000, 970.5000000000, 1078.7500000000),
+ iqr: 122.0000000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_norm10medium() {
+ let val = &[
+ 954.0000000000,
+ 1064.0000000000,
+ 855.0000000000,
+ 1000.0000000000,
+ 743.0000000000,
+ 1084.0000000000,
+ 704.0000000000,
+ 1023.0000000000,
+ 357.0000000000,
+ 869.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 8653.0000000000,
+ min: 357.0000000000,
+ max: 1084.0000000000,
+ mean: 865.3000000000,
+ median: 911.5000000000,
+ var: 48628.4555555556,
+ std_dev: 220.5186059170,
+ std_dev_pct: 25.4846418487,
+ median_abs_dev: 195.7032000000,
+ median_abs_dev_pct: 21.4704552935,
+ quartiles: (771.0000000000, 911.5000000000, 1017.2500000000),
+ iqr: 246.2500000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_norm10wide() {
+ let val = &[
+ 505.0000000000,
+ 497.0000000000,
+ 1591.0000000000,
+ 887.0000000000,
+ 1026.0000000000,
+ 136.0000000000,
+ 1580.0000000000,
+ 940.0000000000,
+ 754.0000000000,
+ 1433.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 9349.0000000000,
+ min: 136.0000000000,
+ max: 1591.0000000000,
+ mean: 934.9000000000,
+ median: 913.5000000000,
+ var: 239208.9888888889,
+ std_dev: 489.0899599142,
+ std_dev_pct: 52.3146817750,
+ median_abs_dev: 611.5725000000,
+ median_abs_dev_pct: 66.9482758621,
+ quartiles: (567.2500000000, 913.5000000000, 1331.2500000000),
+ iqr: 764.0000000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_norm25verynarrow() {
+ let val = &[
+ 991.0000000000,
+ 1018.0000000000,
+ 998.0000000000,
+ 1013.0000000000,
+ 974.0000000000,
+ 1007.0000000000,
+ 1014.0000000000,
+ 999.0000000000,
+ 1011.0000000000,
+ 978.0000000000,
+ 985.0000000000,
+ 999.0000000000,
+ 983.0000000000,
+ 982.0000000000,
+ 1015.0000000000,
+ 1002.0000000000,
+ 977.0000000000,
+ 948.0000000000,
+ 1040.0000000000,
+ 974.0000000000,
+ 996.0000000000,
+ 989.0000000000,
+ 1015.0000000000,
+ 994.0000000000,
+ 1024.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 24926.0000000000,
+ min: 948.0000000000,
+ max: 1040.0000000000,
+ mean: 997.0400000000,
+ median: 998.0000000000,
+ var: 393.2066666667,
+ std_dev: 19.8294393937,
+ std_dev_pct: 1.9888308788,
+ median_abs_dev: 22.2390000000,
+ median_abs_dev_pct: 2.2283567134,
+ quartiles: (983.0000000000, 998.0000000000, 1013.0000000000),
+ iqr: 30.0000000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_exp10a() {
+ let val = &[
+ 23.0000000000,
+ 11.0000000000,
+ 2.0000000000,
+ 57.0000000000,
+ 4.0000000000,
+ 12.0000000000,
+ 5.0000000000,
+ 29.0000000000,
+ 3.0000000000,
+ 21.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 167.0000000000,
+ min: 2.0000000000,
+ max: 57.0000000000,
+ mean: 16.7000000000,
+ median: 11.5000000000,
+ var: 287.7888888889,
+ std_dev: 16.9643416875,
+ std_dev_pct: 101.5828843560,
+ median_abs_dev: 13.3434000000,
+ median_abs_dev_pct: 116.0295652174,
+ quartiles: (4.2500000000, 11.5000000000, 22.5000000000),
+ iqr: 18.2500000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_exp10b() {
+ let val = &[
+ 24.0000000000,
+ 17.0000000000,
+ 6.0000000000,
+ 38.0000000000,
+ 25.0000000000,
+ 7.0000000000,
+ 51.0000000000,
+ 2.0000000000,
+ 61.0000000000,
+ 32.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 263.0000000000,
+ min: 2.0000000000,
+ max: 61.0000000000,
+ mean: 26.3000000000,
+ median: 24.5000000000,
+ var: 383.5666666667,
+ std_dev: 19.5848580967,
+ std_dev_pct: 74.4671410520,
+ median_abs_dev: 22.9803000000,
+ median_abs_dev_pct: 93.7971428571,
+ quartiles: (9.5000000000, 24.5000000000, 36.5000000000),
+ iqr: 27.0000000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_exp10c() {
+ let val = &[
+ 71.0000000000,
+ 2.0000000000,
+ 32.0000000000,
+ 1.0000000000,
+ 6.0000000000,
+ 28.0000000000,
+ 13.0000000000,
+ 37.0000000000,
+ 16.0000000000,
+ 36.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 242.0000000000,
+ min: 1.0000000000,
+ max: 71.0000000000,
+ mean: 24.2000000000,
+ median: 22.0000000000,
+ var: 458.1777777778,
+ std_dev: 21.4050876611,
+ std_dev_pct: 88.4507754589,
+ median_abs_dev: 21.4977000000,
+ median_abs_dev_pct: 97.7168181818,
+ quartiles: (7.7500000000, 22.0000000000, 35.0000000000),
+ iqr: 27.2500000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_exp25() {
+ let val = &[
+ 3.0000000000,
+ 24.0000000000,
+ 1.0000000000,
+ 19.0000000000,
+ 7.0000000000,
+ 5.0000000000,
+ 30.0000000000,
+ 39.0000000000,
+ 31.0000000000,
+ 13.0000000000,
+ 25.0000000000,
+ 48.0000000000,
+ 1.0000000000,
+ 6.0000000000,
+ 42.0000000000,
+ 63.0000000000,
+ 2.0000000000,
+ 12.0000000000,
+ 108.0000000000,
+ 26.0000000000,
+ 1.0000000000,
+ 7.0000000000,
+ 44.0000000000,
+ 25.0000000000,
+ 11.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 593.0000000000,
+ min: 1.0000000000,
+ max: 108.0000000000,
+ mean: 23.7200000000,
+ median: 19.0000000000,
+ var: 601.0433333333,
+ std_dev: 24.5161851301,
+ std_dev_pct: 103.3565983562,
+ median_abs_dev: 19.2738000000,
+ median_abs_dev_pct: 101.4410526316,
+ quartiles: (6.0000000000, 19.0000000000, 31.0000000000),
+ iqr: 25.0000000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_binom25() {
+ let val = &[
+ 18.0000000000,
+ 17.0000000000,
+ 27.0000000000,
+ 15.0000000000,
+ 21.0000000000,
+ 25.0000000000,
+ 17.0000000000,
+ 24.0000000000,
+ 25.0000000000,
+ 24.0000000000,
+ 26.0000000000,
+ 26.0000000000,
+ 23.0000000000,
+ 15.0000000000,
+ 23.0000000000,
+ 17.0000000000,
+ 18.0000000000,
+ 18.0000000000,
+ 21.0000000000,
+ 16.0000000000,
+ 15.0000000000,
+ 31.0000000000,
+ 20.0000000000,
+ 17.0000000000,
+ 15.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 514.0000000000,
+ min: 15.0000000000,
+ max: 31.0000000000,
+ mean: 20.5600000000,
+ median: 20.0000000000,
+ var: 20.8400000000,
+ std_dev: 4.5650848842,
+ std_dev_pct: 22.2037202539,
+ median_abs_dev: 5.9304000000,
+ median_abs_dev_pct: 29.6520000000,
+ quartiles: (17.0000000000, 20.0000000000, 24.0000000000),
+ iqr: 7.0000000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_pois25lambda30() {
+ let val = &[
+ 27.0000000000,
+ 33.0000000000,
+ 34.0000000000,
+ 34.0000000000,
+ 24.0000000000,
+ 39.0000000000,
+ 28.0000000000,
+ 27.0000000000,
+ 31.0000000000,
+ 28.0000000000,
+ 38.0000000000,
+ 21.0000000000,
+ 33.0000000000,
+ 36.0000000000,
+ 29.0000000000,
+ 37.0000000000,
+ 32.0000000000,
+ 34.0000000000,
+ 31.0000000000,
+ 39.0000000000,
+ 25.0000000000,
+ 31.0000000000,
+ 32.0000000000,
+ 40.0000000000,
+ 24.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 787.0000000000,
+ min: 21.0000000000,
+ max: 40.0000000000,
+ mean: 31.4800000000,
+ median: 32.0000000000,
+ var: 26.5933333333,
+ std_dev: 5.1568724372,
+ std_dev_pct: 16.3814245145,
+ median_abs_dev: 5.9304000000,
+ median_abs_dev_pct: 18.5325000000,
+ quartiles: (28.0000000000, 32.0000000000, 34.0000000000),
+ iqr: 6.0000000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_pois25lambda40() {
+ let val = &[
+ 42.0000000000,
+ 50.0000000000,
+ 42.0000000000,
+ 46.0000000000,
+ 34.0000000000,
+ 45.0000000000,
+ 34.0000000000,
+ 49.0000000000,
+ 39.0000000000,
+ 28.0000000000,
+ 40.0000000000,
+ 35.0000000000,
+ 37.0000000000,
+ 39.0000000000,
+ 46.0000000000,
+ 44.0000000000,
+ 32.0000000000,
+ 45.0000000000,
+ 42.0000000000,
+ 37.0000000000,
+ 48.0000000000,
+ 42.0000000000,
+ 33.0000000000,
+ 42.0000000000,
+ 48.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 1019.0000000000,
+ min: 28.0000000000,
+ max: 50.0000000000,
+ mean: 40.7600000000,
+ median: 42.0000000000,
+ var: 34.4400000000,
+ std_dev: 5.8685603004,
+ std_dev_pct: 14.3978417577,
+ median_abs_dev: 5.9304000000,
+ median_abs_dev_pct: 14.1200000000,
+ quartiles: (37.0000000000, 42.0000000000, 45.0000000000),
+ iqr: 8.0000000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_pois25lambda50() {
+ let val = &[
+ 45.0000000000,
+ 43.0000000000,
+ 44.0000000000,
+ 61.0000000000,
+ 51.0000000000,
+ 53.0000000000,
+ 59.0000000000,
+ 52.0000000000,
+ 49.0000000000,
+ 51.0000000000,
+ 51.0000000000,
+ 50.0000000000,
+ 49.0000000000,
+ 56.0000000000,
+ 42.0000000000,
+ 52.0000000000,
+ 51.0000000000,
+ 43.0000000000,
+ 48.0000000000,
+ 48.0000000000,
+ 50.0000000000,
+ 42.0000000000,
+ 43.0000000000,
+ 42.0000000000,
+ 60.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 1235.0000000000,
+ min: 42.0000000000,
+ max: 61.0000000000,
+ mean: 49.4000000000,
+ median: 50.0000000000,
+ var: 31.6666666667,
+ std_dev: 5.6273143387,
+ std_dev_pct: 11.3913245723,
+ median_abs_dev: 4.4478000000,
+ median_abs_dev_pct: 8.8956000000,
+ quartiles: (44.0000000000, 50.0000000000, 52.0000000000),
+ iqr: 8.0000000000,
+ };
+ check(val, summ);
+}
+#[test]
+fn test_unif25() {
+ let val = &[
+ 99.0000000000,
+ 55.0000000000,
+ 92.0000000000,
+ 79.0000000000,
+ 14.0000000000,
+ 2.0000000000,
+ 33.0000000000,
+ 49.0000000000,
+ 3.0000000000,
+ 32.0000000000,
+ 84.0000000000,
+ 59.0000000000,
+ 22.0000000000,
+ 86.0000000000,
+ 76.0000000000,
+ 31.0000000000,
+ 29.0000000000,
+ 11.0000000000,
+ 41.0000000000,
+ 53.0000000000,
+ 45.0000000000,
+ 44.0000000000,
+ 98.0000000000,
+ 98.0000000000,
+ 7.0000000000,
+ ];
+ let summ = &Summary {
+ sum: 1242.0000000000,
+ min: 2.0000000000,
+ max: 99.0000000000,
+ mean: 49.6800000000,
+ median: 45.0000000000,
+ var: 1015.6433333333,
+ std_dev: 31.8691595957,
+ std_dev_pct: 64.1488719719,
+ median_abs_dev: 45.9606000000,
+ median_abs_dev_pct: 102.1346666667,
+ quartiles: (29.0000000000, 45.0000000000, 79.0000000000),
+ iqr: 50.0000000000,
+ };
+ check(val, summ);
+}
+
+#[test]
+fn test_sum_f64s() {
+ assert_eq!([0.5f64, 3.2321f64, 1.5678f64].sum(), 5.2999);
+}
+#[test]
+fn test_sum_f64_between_ints_that_sum_to_0() {
+ assert_eq!([1e30f64, 1.2f64, -1e30f64].sum(), 1.2);
+}
+
+#[bench]
+pub fn sum_three_items(b: &mut Bencher) {
+ b.iter(|| {
+ [1e20f64, 1.5f64, -1e20f64].sum();
+ })
+}
+#[bench]
+pub fn sum_many_f64(b: &mut Bencher) {
+ let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60];
+ let v = (0..500).map(|i| nums[i % 5]).collect::<Vec<_>>();
+
+ b.iter(|| {
+ v.sum();
+ })
+}
+
+#[bench]
+pub fn no_iter(_: &mut Bencher) {}
diff --git a/library/test/src/term.rs b/library/test/src/term.rs
new file mode 100644
index 000000000..b256ab7b8
--- /dev/null
+++ b/library/test/src/term.rs
@@ -0,0 +1,85 @@
+//! Terminal formatting module.
+//!
+//! This module provides the `Terminal` trait, which abstracts over an [ANSI
+//! Terminal][ansi] to provide color printing, among other things. There are two
+//! implementations, the `TerminfoTerminal`, which uses control characters from
+//! a [terminfo][ti] database, and `WinConsole`, which uses the [Win32 Console
+//! API][win].
+//!
+//! [ansi]: https://en.wikipedia.org/wiki/ANSI_escape_code
+//! [win]: https://docs.microsoft.com/en-us/windows/console/character-mode-applications
+//! [ti]: https://en.wikipedia.org/wiki/Terminfo
+
+#![deny(missing_docs)]
+
+use std::io::{self, prelude::*};
+
+pub(crate) use terminfo::TerminfoTerminal;
+#[cfg(windows)]
+pub(crate) use win::WinConsole;
+
+pub(crate) mod terminfo;
+
+#[cfg(windows)]
+mod win;
+
+/// Alias for stdout terminals.
+pub(crate) type StdoutTerminal = dyn Terminal + Send;
+
+#[cfg(not(windows))]
+/// Returns a Terminal wrapping stdout, or None if a terminal couldn't be
+/// opened.
+pub(crate) fn stdout() -> Option<Box<StdoutTerminal>> {
+ TerminfoTerminal::new(io::stdout()).map(|t| Box::new(t) as Box<StdoutTerminal>)
+}
+
+#[cfg(windows)]
+/// Returns a Terminal wrapping stdout, or None if a terminal couldn't be
+/// opened.
+pub(crate) fn stdout() -> Option<Box<StdoutTerminal>> {
+ TerminfoTerminal::new(io::stdout())
+ .map(|t| Box::new(t) as Box<StdoutTerminal>)
+ .or_else(|| WinConsole::new(io::stdout()).ok().map(|t| Box::new(t) as Box<StdoutTerminal>))
+}
+
+/// Terminal color definitions
+#[allow(missing_docs)]
+#[cfg_attr(not(windows), allow(dead_code))]
+pub(crate) mod color {
+ /// Number for a terminal color
+ pub(crate) type Color = u32;
+
+ pub(crate) const BLACK: Color = 0;
+ pub(crate) const RED: Color = 1;
+ pub(crate) const GREEN: Color = 2;
+ pub(crate) const YELLOW: Color = 3;
+ pub(crate) const BLUE: Color = 4;
+ pub(crate) const MAGENTA: Color = 5;
+ pub(crate) const CYAN: Color = 6;
+ pub(crate) const WHITE: Color = 7;
+}
+
+/// A terminal with similar capabilities to an ANSI Terminal
+/// (foreground/background colors etc).
+pub trait Terminal: Write {
+ /// Sets the foreground color to the given color.
+ ///
+ /// If the color is a bright color, but the terminal only supports 8 colors,
+ /// the corresponding normal color will be used instead.
+ ///
+ /// Returns `Ok(true)` if the color was set, `Ok(false)` otherwise, and `Err(e)`
+ /// if there was an I/O error.
+ fn fg(&mut self, color: color::Color) -> io::Result<bool>;
+
+ /// Resets all terminal attributes and colors to their defaults.
+ ///
+ /// Returns `Ok(true)` if the terminal was reset, `Ok(false)` otherwise, and `Err(e)` if there
+ /// was an I/O error.
+ ///
+ /// *Note: This does not flush.*
+ ///
+ /// That means the reset command may get buffered so, if you aren't planning on doing anything
+ /// else that might flush stdout's buffer (e.g., writing a line of text), you should flush after
+ /// calling reset.
+ fn reset(&mut self) -> io::Result<bool>;
+}
diff --git a/library/test/src/term/terminfo/mod.rs b/library/test/src/term/terminfo/mod.rs
new file mode 100644
index 000000000..694473f52
--- /dev/null
+++ b/library/test/src/term/terminfo/mod.rs
@@ -0,0 +1,185 @@
+//! Terminfo database interface.
+
+use std::collections::HashMap;
+use std::env;
+use std::error;
+use std::fmt;
+use std::fs::File;
+use std::io::{self, prelude::*, BufReader};
+use std::path::Path;
+
+use super::color;
+use super::Terminal;
+
+use parm::{expand, Param, Variables};
+use parser::compiled::{msys_terminfo, parse};
+use searcher::get_dbpath_for_term;
+
+/// A parsed terminfo database entry.
+#[allow(unused)]
+#[derive(Debug)]
+pub(crate) struct TermInfo {
+ /// Names for the terminal
+ pub(crate) names: Vec<String>,
+ /// Map of capability name to boolean value
+ pub(crate) bools: HashMap<String, bool>,
+ /// Map of capability name to numeric value
+ pub(crate) numbers: HashMap<String, u32>,
+ /// Map of capability name to raw (unexpanded) string
+ pub(crate) strings: HashMap<String, Vec<u8>>,
+}
+
+/// A terminfo creation error.
+#[derive(Debug)]
+pub(crate) enum Error {
+ /// TermUnset Indicates that the environment doesn't include enough information to find
+ /// the terminfo entry.
+ TermUnset,
+ /// MalformedTerminfo indicates that parsing the terminfo entry failed.
+ MalformedTerminfo(String),
+ /// io::Error forwards any io::Errors encountered when finding or reading the terminfo entry.
+ IoError(io::Error),
+}
+
+impl error::Error for Error {
+ fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+ use Error::*;
+ match self {
+ IoError(e) => Some(e),
+ _ => None,
+ }
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use Error::*;
+ match *self {
+ TermUnset => Ok(()),
+ MalformedTerminfo(ref e) => e.fmt(f),
+ IoError(ref e) => e.fmt(f),
+ }
+ }
+}
+
+impl TermInfo {
+ /// Creates a TermInfo based on current environment.
+ pub(crate) fn from_env() -> Result<TermInfo, Error> {
+ let term = match env::var("TERM") {
+ Ok(name) => TermInfo::from_name(&name),
+ Err(..) => return Err(Error::TermUnset),
+ };
+
+ if term.is_err() && env::var("MSYSCON").map_or(false, |s| "mintty.exe" == s) {
+ // msys terminal
+ Ok(msys_terminfo())
+ } else {
+ term
+ }
+ }
+
+ /// Creates a TermInfo for the named terminal.
+ pub(crate) fn from_name(name: &str) -> Result<TermInfo, Error> {
+ get_dbpath_for_term(name)
+ .ok_or_else(|| {
+ Error::IoError(io::Error::new(io::ErrorKind::NotFound, "terminfo file not found"))
+ })
+ .and_then(|p| TermInfo::from_path(&(*p)))
+ }
+
+ /// Parse the given TermInfo.
+ pub(crate) fn from_path<P: AsRef<Path>>(path: P) -> Result<TermInfo, Error> {
+ Self::_from_path(path.as_ref())
+ }
+ // Keep the metadata small
+ fn _from_path(path: &Path) -> Result<TermInfo, Error> {
+ let file = File::open(path).map_err(Error::IoError)?;
+ let mut reader = BufReader::new(file);
+ parse(&mut reader, false).map_err(Error::MalformedTerminfo)
+ }
+}
+
+pub(crate) mod searcher;
+
+/// TermInfo format parsing.
+pub(crate) mod parser {
+ //! ncurses-compatible compiled terminfo format parsing (term(5))
+ pub(crate) mod compiled;
+}
+pub(crate) mod parm;
+
+/// A Terminal that knows how many colors it supports, with a reference to its
+/// parsed Terminfo database record.
+pub(crate) struct TerminfoTerminal<T> {
+ num_colors: u32,
+ out: T,
+ ti: TermInfo,
+}
+
+impl<T: Write + Send> Terminal for TerminfoTerminal<T> {
+ fn fg(&mut self, color: color::Color) -> io::Result<bool> {
+ let color = self.dim_if_necessary(color);
+ if self.num_colors > color {
+ return self.apply_cap("setaf", &[Param::Number(color as i32)]);
+ }
+ Ok(false)
+ }
+
+ fn reset(&mut self) -> io::Result<bool> {
+ // are there any terminals that have color/attrs and not sgr0?
+ // Try falling back to sgr, then op
+ let cmd = match ["sgr0", "sgr", "op"].iter().find_map(|cap| self.ti.strings.get(*cap)) {
+ Some(op) => match expand(&op, &[], &mut Variables::new()) {
+ Ok(cmd) => cmd,
+ Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidData, e)),
+ },
+ None => return Ok(false),
+ };
+ self.out.write_all(&cmd).and(Ok(true))
+ }
+}
+
+impl<T: Write + Send> TerminfoTerminal<T> {
+ /// Creates a new TerminfoTerminal with the given TermInfo and Write.
+ pub(crate) fn new_with_terminfo(out: T, terminfo: TermInfo) -> TerminfoTerminal<T> {
+ let nc = if terminfo.strings.contains_key("setaf") && terminfo.strings.contains_key("setab")
+ {
+ terminfo.numbers.get("colors").map_or(0, |&n| n)
+ } else {
+ 0
+ };
+
+ TerminfoTerminal { out, ti: terminfo, num_colors: nc }
+ }
+
+ /// Creates a new TerminfoTerminal for the current environment with the given Write.
+ ///
+ /// Returns `None` when the terminfo cannot be found or parsed.
+ pub(crate) fn new(out: T) -> Option<TerminfoTerminal<T>> {
+ TermInfo::from_env().map(move |ti| TerminfoTerminal::new_with_terminfo(out, ti)).ok()
+ }
+
+ fn dim_if_necessary(&self, color: color::Color) -> color::Color {
+ if color >= self.num_colors && color >= 8 && color < 16 { color - 8 } else { color }
+ }
+
+ fn apply_cap(&mut self, cmd: &str, params: &[Param]) -> io::Result<bool> {
+ match self.ti.strings.get(cmd) {
+ Some(cmd) => match expand(&cmd, params, &mut Variables::new()) {
+ Ok(s) => self.out.write_all(&s).and(Ok(true)),
+ Err(e) => Err(io::Error::new(io::ErrorKind::InvalidData, e)),
+ },
+ None => Ok(false),
+ }
+ }
+}
+
+impl<T: Write> Write for TerminfoTerminal<T> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.out.write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.out.flush()
+ }
+}
diff --git a/library/test/src/term/terminfo/parm.rs b/library/test/src/term/terminfo/parm.rs
new file mode 100644
index 000000000..0756c8374
--- /dev/null
+++ b/library/test/src/term/terminfo/parm.rs
@@ -0,0 +1,532 @@
+//! Parameterized string expansion
+
+use self::Param::*;
+use self::States::*;
+
+use std::iter::repeat;
+
+#[cfg(test)]
+mod tests;
+
+#[derive(Clone, Copy, PartialEq)]
+enum States {
+ Nothing,
+ Percent,
+ SetVar,
+ GetVar,
+ PushParam,
+ CharConstant,
+ CharClose,
+ IntConstant(i32),
+ FormatPattern(Flags, FormatState),
+ SeekIfElse(usize),
+ SeekIfElsePercent(usize),
+ SeekIfEnd(usize),
+ SeekIfEndPercent(usize),
+}
+
+#[derive(Copy, PartialEq, Clone)]
+enum FormatState {
+ Flags,
+ Width,
+ Precision,
+}
+
+/// Types of parameters a capability can use
+#[allow(missing_docs)]
+#[derive(Clone)]
+pub(crate) enum Param {
+ Number(i32),
+}
+
+/// Container for static and dynamic variable arrays
+pub(crate) struct Variables {
+ /// Static variables A-Z
+ sta_va: [Param; 26],
+ /// Dynamic variables a-z
+ dyn_va: [Param; 26],
+}
+
+impl Variables {
+ /// Returns a new zero-initialized Variables
+ pub(crate) fn new() -> Variables {
+ Variables {
+ sta_va: [
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ ],
+ dyn_va: [
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ ],
+ }
+ }
+}
+
+/// Expand a parameterized capability
+///
+/// # Arguments
+/// * `cap` - string to expand
+/// * `params` - vector of params for %p1 etc
+/// * `vars` - Variables struct for %Pa etc
+///
+/// To be compatible with ncurses, `vars` should be the same between calls to `expand` for
+/// multiple capabilities for the same terminal.
+pub(crate) fn expand(
+ cap: &[u8],
+ params: &[Param],
+ vars: &mut Variables,
+) -> Result<Vec<u8>, String> {
+ let mut state = Nothing;
+
+ // expanded cap will only rarely be larger than the cap itself
+ let mut output = Vec::with_capacity(cap.len());
+
+ let mut stack: Vec<Param> = Vec::new();
+
+ // Copy parameters into a local vector for mutability
+ let mut mparams = [
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ Number(0),
+ ];
+ for (dst, src) in mparams.iter_mut().zip(params.iter()) {
+ *dst = (*src).clone();
+ }
+
+ for &c in cap.iter() {
+ let cur = c as char;
+ let mut old_state = state;
+ match state {
+ Nothing => {
+ if cur == '%' {
+ state = Percent;
+ } else {
+ output.push(c);
+ }
+ }
+ Percent => {
+ match cur {
+ '%' => {
+ output.push(c);
+ state = Nothing
+ }
+ 'c' => {
+ match stack.pop() {
+ // if c is 0, use 0200 (128) for ncurses compatibility
+ Some(Number(0)) => output.push(128u8),
+ // Don't check bounds. ncurses just casts and truncates.
+ Some(Number(c)) => output.push(c as u8),
+ None => return Err("stack is empty".to_string()),
+ }
+ }
+ 'p' => state = PushParam,
+ 'P' => state = SetVar,
+ 'g' => state = GetVar,
+ '\'' => state = CharConstant,
+ '{' => state = IntConstant(0),
+ 'l' => match stack.pop() {
+ Some(_) => return Err("a non-str was used with %l".to_string()),
+ None => return Err("stack is empty".to_string()),
+ },
+ '+' | '-' | '/' | '*' | '^' | '&' | '|' | 'm' => {
+ match (stack.pop(), stack.pop()) {
+ (Some(Number(y)), Some(Number(x))) => stack.push(Number(match cur {
+ '+' => x + y,
+ '-' => x - y,
+ '*' => x * y,
+ '/' => x / y,
+ '|' => x | y,
+ '&' => x & y,
+ '^' => x ^ y,
+ 'm' => x % y,
+ _ => unreachable!("All cases handled"),
+ })),
+ _ => return Err("stack is empty".to_string()),
+ }
+ }
+ '=' | '>' | '<' | 'A' | 'O' => match (stack.pop(), stack.pop()) {
+ (Some(Number(y)), Some(Number(x))) => stack.push(Number(
+ if match cur {
+ '=' => x == y,
+ '<' => x < y,
+ '>' => x > y,
+ 'A' => x > 0 && y > 0,
+ 'O' => x > 0 || y > 0,
+ _ => unreachable!(),
+ } {
+ 1
+ } else {
+ 0
+ },
+ )),
+ _ => return Err("stack is empty".to_string()),
+ },
+ '!' | '~' => match stack.pop() {
+ Some(Number(x)) => stack.push(Number(match cur {
+ '!' if x > 0 => 0,
+ '!' => 1,
+ '~' => !x,
+ _ => unreachable!(),
+ })),
+ None => return Err("stack is empty".to_string()),
+ },
+ 'i' => match (&mparams[0], &mparams[1]) {
+ (&Number(x), &Number(y)) => {
+ mparams[0] = Number(x + 1);
+ mparams[1] = Number(y + 1);
+ }
+ },
+
+ // printf-style support for %doxXs
+ 'd' | 'o' | 'x' | 'X' | 's' => {
+ if let Some(arg) = stack.pop() {
+ let flags = Flags::new();
+ let res = format(arg, FormatOp::from_char(cur), flags)?;
+ output.extend(res.iter().cloned());
+ } else {
+ return Err("stack is empty".to_string());
+ }
+ }
+ ':' | '#' | ' ' | '.' | '0'..='9' => {
+ let mut flags = Flags::new();
+ let mut fstate = FormatState::Flags;
+ match cur {
+ ':' => (),
+ '#' => flags.alternate = true,
+ ' ' => flags.space = true,
+ '.' => fstate = FormatState::Precision,
+ '0'..='9' => {
+ flags.width = cur as usize - '0' as usize;
+ fstate = FormatState::Width;
+ }
+ _ => unreachable!(),
+ }
+ state = FormatPattern(flags, fstate);
+ }
+
+ // conditionals
+ '?' => (),
+ 't' => match stack.pop() {
+ Some(Number(0)) => state = SeekIfElse(0),
+ Some(Number(_)) => (),
+ None => return Err("stack is empty".to_string()),
+ },
+ 'e' => state = SeekIfEnd(0),
+ ';' => (),
+ _ => return Err(format!("unrecognized format option {cur}")),
+ }
+ }
+ PushParam => {
+ // params are 1-indexed
+ stack.push(
+ mparams[match cur.to_digit(10) {
+ Some(d) => d as usize - 1,
+ None => return Err("bad param number".to_string()),
+ }]
+ .clone(),
+ );
+ }
+ SetVar => {
+ if cur >= 'A' && cur <= 'Z' {
+ if let Some(arg) = stack.pop() {
+ let idx = (cur as u8) - b'A';
+ vars.sta_va[idx as usize] = arg;
+ } else {
+ return Err("stack is empty".to_string());
+ }
+ } else if cur >= 'a' && cur <= 'z' {
+ if let Some(arg) = stack.pop() {
+ let idx = (cur as u8) - b'a';
+ vars.dyn_va[idx as usize] = arg;
+ } else {
+ return Err("stack is empty".to_string());
+ }
+ } else {
+ return Err("bad variable name in %P".to_string());
+ }
+ }
+ GetVar => {
+ if cur >= 'A' && cur <= 'Z' {
+ let idx = (cur as u8) - b'A';
+ stack.push(vars.sta_va[idx as usize].clone());
+ } else if cur >= 'a' && cur <= 'z' {
+ let idx = (cur as u8) - b'a';
+ stack.push(vars.dyn_va[idx as usize].clone());
+ } else {
+ return Err("bad variable name in %g".to_string());
+ }
+ }
+ CharConstant => {
+ stack.push(Number(c as i32));
+ state = CharClose;
+ }
+ CharClose => {
+ if cur != '\'' {
+ return Err("malformed character constant".to_string());
+ }
+ }
+ IntConstant(i) => {
+ if cur == '}' {
+ stack.push(Number(i));
+ state = Nothing;
+ } else if let Some(digit) = cur.to_digit(10) {
+ match i.checked_mul(10).and_then(|i_ten| i_ten.checked_add(digit as i32)) {
+ Some(i) => {
+ state = IntConstant(i);
+ old_state = Nothing;
+ }
+ None => return Err("int constant too large".to_string()),
+ }
+ } else {
+ return Err("bad int constant".to_string());
+ }
+ }
+ FormatPattern(ref mut flags, ref mut fstate) => {
+ old_state = Nothing;
+ match (*fstate, cur) {
+ (_, 'd') | (_, 'o') | (_, 'x') | (_, 'X') | (_, 's') => {
+ if let Some(arg) = stack.pop() {
+ let res = format(arg, FormatOp::from_char(cur), *flags)?;
+ output.extend(res.iter().cloned());
+ // will cause state to go to Nothing
+ old_state = FormatPattern(*flags, *fstate);
+ } else {
+ return Err("stack is empty".to_string());
+ }
+ }
+ (FormatState::Flags, '#') => {
+ flags.alternate = true;
+ }
+ (FormatState::Flags, '-') => {
+ flags.left = true;
+ }
+ (FormatState::Flags, '+') => {
+ flags.sign = true;
+ }
+ (FormatState::Flags, ' ') => {
+ flags.space = true;
+ }
+ (FormatState::Flags, '0'..='9') => {
+ flags.width = cur as usize - '0' as usize;
+ *fstate = FormatState::Width;
+ }
+ (FormatState::Flags, '.') => {
+ *fstate = FormatState::Precision;
+ }
+ (FormatState::Width, '0'..='9') => {
+ let old = flags.width;
+ flags.width = flags.width * 10 + (cur as usize - '0' as usize);
+ if flags.width < old {
+ return Err("format width overflow".to_string());
+ }
+ }
+ (FormatState::Width, '.') => {
+ *fstate = FormatState::Precision;
+ }
+ (FormatState::Precision, '0'..='9') => {
+ let old = flags.precision;
+ flags.precision = flags.precision * 10 + (cur as usize - '0' as usize);
+ if flags.precision < old {
+ return Err("format precision overflow".to_string());
+ }
+ }
+ _ => return Err("invalid format specifier".to_string()),
+ }
+ }
+ SeekIfElse(level) => {
+ if cur == '%' {
+ state = SeekIfElsePercent(level);
+ }
+ old_state = Nothing;
+ }
+ SeekIfElsePercent(level) => {
+ if cur == ';' {
+ if level == 0 {
+ state = Nothing;
+ } else {
+ state = SeekIfElse(level - 1);
+ }
+ } else if cur == 'e' && level == 0 {
+ state = Nothing;
+ } else if cur == '?' {
+ state = SeekIfElse(level + 1);
+ } else {
+ state = SeekIfElse(level);
+ }
+ }
+ SeekIfEnd(level) => {
+ if cur == '%' {
+ state = SeekIfEndPercent(level);
+ }
+ old_state = Nothing;
+ }
+ SeekIfEndPercent(level) => {
+ if cur == ';' {
+ if level == 0 {
+ state = Nothing;
+ } else {
+ state = SeekIfEnd(level - 1);
+ }
+ } else if cur == '?' {
+ state = SeekIfEnd(level + 1);
+ } else {
+ state = SeekIfEnd(level);
+ }
+ }
+ }
+ if state == old_state {
+ state = Nothing;
+ }
+ }
+ Ok(output)
+}
+
+#[derive(Copy, PartialEq, Clone)]
+struct Flags {
+ width: usize,
+ precision: usize,
+ alternate: bool,
+ left: bool,
+ sign: bool,
+ space: bool,
+}
+
+impl Flags {
+ fn new() -> Flags {
+ Flags { width: 0, precision: 0, alternate: false, left: false, sign: false, space: false }
+ }
+}
+
+#[derive(Copy, Clone)]
+enum FormatOp {
+ Digit,
+ Octal,
+ LowerHex,
+ UpperHex,
+ String,
+}
+
+impl FormatOp {
+ fn from_char(c: char) -> FormatOp {
+ match c {
+ 'd' => FormatOp::Digit,
+ 'o' => FormatOp::Octal,
+ 'x' => FormatOp::LowerHex,
+ 'X' => FormatOp::UpperHex,
+ 's' => FormatOp::String,
+ _ => panic!("bad FormatOp char"),
+ }
+ }
+}
+
+fn format(val: Param, op: FormatOp, flags: Flags) -> Result<Vec<u8>, String> {
+ let mut s = match val {
+ Number(d) => {
+ match op {
+ FormatOp::Digit => {
+ if flags.sign {
+ format!("{:+01$}", d, flags.precision)
+ } else if d < 0 {
+ // C doesn't take sign into account in precision calculation.
+ format!("{:01$}", d, flags.precision + 1)
+ } else if flags.space {
+ format!(" {:01$}", d, flags.precision)
+ } else {
+ format!("{:01$}", d, flags.precision)
+ }
+ }
+ FormatOp::Octal => {
+ if flags.alternate {
+ // Leading octal zero counts against precision.
+ format!("0{:01$o}", d, flags.precision.saturating_sub(1))
+ } else {
+ format!("{:01$o}", d, flags.precision)
+ }
+ }
+ FormatOp::LowerHex => {
+ if flags.alternate && d != 0 {
+ format!("0x{:01$x}", d, flags.precision)
+ } else {
+ format!("{:01$x}", d, flags.precision)
+ }
+ }
+ FormatOp::UpperHex => {
+ if flags.alternate && d != 0 {
+ format!("0X{:01$X}", d, flags.precision)
+ } else {
+ format!("{:01$X}", d, flags.precision)
+ }
+ }
+ FormatOp::String => return Err("non-number on stack with %s".to_string()),
+ }
+ .into_bytes()
+ }
+ };
+ if flags.width > s.len() {
+ let n = flags.width - s.len();
+ if flags.left {
+ s.extend(repeat(b' ').take(n));
+ } else {
+ let mut s_ = Vec::with_capacity(flags.width);
+ s_.extend(repeat(b' ').take(n));
+ s_.extend(s.into_iter());
+ s = s_;
+ }
+ }
+ Ok(s)
+}
diff --git a/library/test/src/term/terminfo/parm/tests.rs b/library/test/src/term/terminfo/parm/tests.rs
new file mode 100644
index 000000000..c738f3ba0
--- /dev/null
+++ b/library/test/src/term/terminfo/parm/tests.rs
@@ -0,0 +1,124 @@
+use super::*;
+
+use std::result::Result::Ok;
+
+#[test]
+fn test_basic_setabf() {
+ let s = b"\\E[48;5;%p1%dm";
+ assert_eq!(
+ expand(s, &[Number(1)], &mut Variables::new()).unwrap(),
+ "\\E[48;5;1m".bytes().collect::<Vec<_>>()
+ );
+}
+
+#[test]
+fn test_multiple_int_constants() {
+ assert_eq!(
+ expand(b"%{1}%{2}%d%d", &[], &mut Variables::new()).unwrap(),
+ "21".bytes().collect::<Vec<_>>()
+ );
+}
+
+#[test]
+fn test_op_i() {
+ let mut vars = Variables::new();
+ assert_eq!(
+ expand(b"%p1%d%p2%d%p3%d%i%p1%d%p2%d%p3%d", &[Number(1), Number(2), Number(3)], &mut vars),
+ Ok("123233".bytes().collect::<Vec<_>>())
+ );
+ assert_eq!(
+ expand(b"%p1%d%p2%d%i%p1%d%p2%d", &[], &mut vars),
+ Ok("0011".bytes().collect::<Vec<_>>())
+ );
+}
+
+#[test]
+fn test_param_stack_failure_conditions() {
+ let mut varstruct = Variables::new();
+ let vars = &mut varstruct;
+ fn get_res(
+ fmt: &str,
+ cap: &str,
+ params: &[Param],
+ vars: &mut Variables,
+ ) -> Result<Vec<u8>, String> {
+ let mut u8v: Vec<_> = fmt.bytes().collect();
+ u8v.extend(cap.as_bytes().iter().map(|&b| b));
+ expand(&u8v, params, vars)
+ }
+
+ let caps = ["%d", "%c", "%s", "%Pa", "%l", "%!", "%~"];
+ for &cap in caps.iter() {
+ let res = get_res("", cap, &[], vars);
+ assert!(res.is_err(), "Op {} succeeded incorrectly with 0 stack entries", cap);
+ if cap == "%s" || cap == "%l" {
+ continue;
+ }
+ let p = Number(97);
+ let res = get_res("%p1", cap, &[p], vars);
+ assert!(res.is_ok(), "Op {} failed with 1 stack entry: {}", cap, res.unwrap_err());
+ }
+ let caps = ["%+", "%-", "%*", "%/", "%m", "%&", "%|", "%A", "%O"];
+ for &cap in caps.iter() {
+ let res = expand(cap.as_bytes(), &[], vars);
+ assert!(res.is_err(), "Binop {} succeeded incorrectly with 0 stack entries", cap);
+ let res = get_res("%{1}", cap, &[], vars);
+ assert!(res.is_err(), "Binop {} succeeded incorrectly with 1 stack entry", cap);
+ let res = get_res("%{1}%{2}", cap, &[], vars);
+ assert!(res.is_ok(), "Binop {} failed with 2 stack entries: {}", cap, res.unwrap_err());
+ }
+}
+
+#[test]
+fn test_push_bad_param() {
+ assert!(expand(b"%pa", &[], &mut Variables::new()).is_err());
+}
+
+#[test]
+fn test_comparison_ops() {
+ let v = [('<', [1u8, 0u8, 0u8]), ('=', [0u8, 1u8, 0u8]), ('>', [0u8, 0u8, 1u8])];
+ for &(op, bs) in v.iter() {
+ let s = format!("%{{1}}%{{2}}%{op}%d");
+ let res = expand(s.as_bytes(), &[], &mut Variables::new());
+ assert!(res.is_ok(), "{}", res.unwrap_err());
+ assert_eq!(res.unwrap(), vec![b'0' + bs[0]]);
+ let s = format!("%{{1}}%{{1}}%{op}%d");
+ let res = expand(s.as_bytes(), &[], &mut Variables::new());
+ assert!(res.is_ok(), "{}", res.unwrap_err());
+ assert_eq!(res.unwrap(), vec![b'0' + bs[1]]);
+ let s = format!("%{{2}}%{{1}}%{op}%d");
+ let res = expand(s.as_bytes(), &[], &mut Variables::new());
+ assert!(res.is_ok(), "{}", res.unwrap_err());
+ assert_eq!(res.unwrap(), vec![b'0' + bs[2]]);
+ }
+}
+
+#[test]
+fn test_conditionals() {
+ let mut vars = Variables::new();
+ let s = b"\\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m";
+ let res = expand(s, &[Number(1)], &mut vars);
+ assert!(res.is_ok(), "{}", res.unwrap_err());
+ assert_eq!(res.unwrap(), "\\E[31m".bytes().collect::<Vec<_>>());
+ let res = expand(s, &[Number(8)], &mut vars);
+ assert!(res.is_ok(), "{}", res.unwrap_err());
+ assert_eq!(res.unwrap(), "\\E[90m".bytes().collect::<Vec<_>>());
+ let res = expand(s, &[Number(42)], &mut vars);
+ assert!(res.is_ok(), "{}", res.unwrap_err());
+ assert_eq!(res.unwrap(), "\\E[38;5;42m".bytes().collect::<Vec<_>>());
+}
+
+#[test]
+fn test_format() {
+ let mut varstruct = Variables::new();
+ let vars = &mut varstruct;
+
+ assert_eq!(
+ expand(b"%p1%d%p1%.3d%p1%5d%p1%:+d", &[Number(1)], vars),
+ Ok("1001 1+1".bytes().collect::<Vec<_>>())
+ );
+ assert_eq!(
+ expand(b"%p1%o%p1%#o%p2%6.4x%p2%#6.4X", &[Number(15), Number(27)], vars),
+ Ok("17017 001b0X001B".bytes().collect::<Vec<_>>())
+ );
+}
diff --git a/library/test/src/term/terminfo/parser/compiled.rs b/library/test/src/term/terminfo/parser/compiled.rs
new file mode 100644
index 000000000..5d40b7988
--- /dev/null
+++ b/library/test/src/term/terminfo/parser/compiled.rs
@@ -0,0 +1,336 @@
+#![allow(non_upper_case_globals, missing_docs)]
+
+//! ncurses-compatible compiled terminfo format parsing (term(5))
+
+use super::super::TermInfo;
+use std::collections::HashMap;
+use std::io;
+use std::io::prelude::*;
+
+#[cfg(test)]
+mod tests;
+
+// These are the orders ncurses uses in its compiled format (as of 5.9). Not sure if portable.
+
+#[rustfmt::skip]
+pub(crate) static boolfnames: &[&str] = &["auto_left_margin", "auto_right_margin",
+ "no_esc_ctlc", "ceol_standout_glitch", "eat_newline_glitch", "erase_overstrike", "generic_type",
+ "hard_copy", "has_meta_key", "has_status_line", "insert_null_glitch", "memory_above",
+ "memory_below", "move_insert_mode", "move_standout_mode", "over_strike", "status_line_esc_ok",
+ "dest_tabs_magic_smso", "tilde_glitch", "transparent_underline", "xon_xoff", "needs_xon_xoff",
+ "prtr_silent", "hard_cursor", "non_rev_rmcup", "no_pad_char", "non_dest_scroll_region",
+ "can_change", "back_color_erase", "hue_lightness_saturation", "col_addr_glitch",
+ "cr_cancels_micro_mode", "has_print_wheel", "row_addr_glitch", "semi_auto_right_margin",
+ "cpi_changes_res", "lpi_changes_res", "backspaces_with_bs", "crt_no_scrolling",
+ "no_correctly_working_cr", "gnu_has_meta_key", "linefeed_is_newline", "has_hardware_tabs",
+ "return_does_clr_eol"];
+
+#[rustfmt::skip]
+pub(crate) static boolnames: &[&str] = &["bw", "am", "xsb", "xhp", "xenl", "eo",
+ "gn", "hc", "km", "hs", "in", "db", "da", "mir", "msgr", "os", "eslok", "xt", "hz", "ul", "xon",
+ "nxon", "mc5i", "chts", "nrrmc", "npc", "ndscr", "ccc", "bce", "hls", "xhpa", "crxm", "daisy",
+ "xvpa", "sam", "cpix", "lpix", "OTbs", "OTns", "OTnc", "OTMT", "OTNL", "OTpt", "OTxr"];
+
+#[rustfmt::skip]
+pub(crate) static numfnames: &[&str] = &[ "columns", "init_tabs", "lines",
+ "lines_of_memory", "magic_cookie_glitch", "padding_baud_rate", "virtual_terminal",
+ "width_status_line", "num_labels", "label_height", "label_width", "max_attributes",
+ "maximum_windows", "max_colors", "max_pairs", "no_color_video", "buffer_capacity",
+ "dot_vert_spacing", "dot_horz_spacing", "max_micro_address", "max_micro_jump", "micro_col_size",
+ "micro_line_size", "number_of_pins", "output_res_char", "output_res_line",
+ "output_res_horz_inch", "output_res_vert_inch", "print_rate", "wide_char_size", "buttons",
+ "bit_image_entwining", "bit_image_type", "magic_cookie_glitch_ul", "carriage_return_delay",
+ "new_line_delay", "backspace_delay", "horizontal_tab_delay", "number_of_function_keys"];
+
+#[rustfmt::skip]
+pub(crate) static numnames: &[&str] = &[ "cols", "it", "lines", "lm", "xmc", "pb",
+ "vt", "wsl", "nlab", "lh", "lw", "ma", "wnum", "colors", "pairs", "ncv", "bufsz", "spinv",
+ "spinh", "maddr", "mjump", "mcs", "mls", "npins", "orc", "orl", "orhi", "orvi", "cps", "widcs",
+ "btns", "bitwin", "bitype", "UTug", "OTdC", "OTdN", "OTdB", "OTdT", "OTkn"];
+
+#[rustfmt::skip]
+pub(crate) static stringfnames: &[&str] = &[ "back_tab", "bell", "carriage_return",
+ "change_scroll_region", "clear_all_tabs", "clear_screen", "clr_eol", "clr_eos",
+ "column_address", "command_character", "cursor_address", "cursor_down", "cursor_home",
+ "cursor_invisible", "cursor_left", "cursor_mem_address", "cursor_normal", "cursor_right",
+ "cursor_to_ll", "cursor_up", "cursor_visible", "delete_character", "delete_line",
+ "dis_status_line", "down_half_line", "enter_alt_charset_mode", "enter_blink_mode",
+ "enter_bold_mode", "enter_ca_mode", "enter_delete_mode", "enter_dim_mode", "enter_insert_mode",
+ "enter_secure_mode", "enter_protected_mode", "enter_reverse_mode", "enter_standout_mode",
+ "enter_underline_mode", "erase_chars", "exit_alt_charset_mode", "exit_attribute_mode",
+ "exit_ca_mode", "exit_delete_mode", "exit_insert_mode", "exit_standout_mode",
+ "exit_underline_mode", "flash_screen", "form_feed", "from_status_line", "init_1string",
+ "init_2string", "init_3string", "init_file", "insert_character", "insert_line",
+ "insert_padding", "key_backspace", "key_catab", "key_clear", "key_ctab", "key_dc", "key_dl",
+ "key_down", "key_eic", "key_eol", "key_eos", "key_f0", "key_f1", "key_f10", "key_f2", "key_f3",
+ "key_f4", "key_f5", "key_f6", "key_f7", "key_f8", "key_f9", "key_home", "key_ic", "key_il",
+ "key_left", "key_ll", "key_npage", "key_ppage", "key_right", "key_sf", "key_sr", "key_stab",
+ "key_up", "keypad_local", "keypad_xmit", "lab_f0", "lab_f1", "lab_f10", "lab_f2", "lab_f3",
+ "lab_f4", "lab_f5", "lab_f6", "lab_f7", "lab_f8", "lab_f9", "meta_off", "meta_on", "newline",
+ "pad_char", "parm_dch", "parm_delete_line", "parm_down_cursor", "parm_ich", "parm_index",
+ "parm_insert_line", "parm_left_cursor", "parm_right_cursor", "parm_rindex", "parm_up_cursor",
+ "pkey_key", "pkey_local", "pkey_xmit", "print_screen", "prtr_off", "prtr_on", "repeat_char",
+ "reset_1string", "reset_2string", "reset_3string", "reset_file", "restore_cursor",
+ "row_address", "save_cursor", "scroll_forward", "scroll_reverse", "set_attributes", "set_tab",
+ "set_window", "tab", "to_status_line", "underline_char", "up_half_line", "init_prog", "key_a1",
+ "key_a3", "key_b2", "key_c1", "key_c3", "prtr_non", "char_padding", "acs_chars", "plab_norm",
+ "key_btab", "enter_xon_mode", "exit_xon_mode", "enter_am_mode", "exit_am_mode", "xon_character",
+ "xoff_character", "ena_acs", "label_on", "label_off", "key_beg", "key_cancel", "key_close",
+ "key_command", "key_copy", "key_create", "key_end", "key_enter", "key_exit", "key_find",
+ "key_help", "key_mark", "key_message", "key_move", "key_next", "key_open", "key_options",
+ "key_previous", "key_print", "key_redo", "key_reference", "key_refresh", "key_replace",
+ "key_restart", "key_resume", "key_save", "key_suspend", "key_undo", "key_sbeg", "key_scancel",
+ "key_scommand", "key_scopy", "key_screate", "key_sdc", "key_sdl", "key_select", "key_send",
+ "key_seol", "key_sexit", "key_sfind", "key_shelp", "key_shome", "key_sic", "key_sleft",
+ "key_smessage", "key_smove", "key_snext", "key_soptions", "key_sprevious", "key_sprint",
+ "key_sredo", "key_sreplace", "key_sright", "key_srsume", "key_ssave", "key_ssuspend",
+ "key_sundo", "req_for_input", "key_f11", "key_f12", "key_f13", "key_f14", "key_f15", "key_f16",
+ "key_f17", "key_f18", "key_f19", "key_f20", "key_f21", "key_f22", "key_f23", "key_f24",
+ "key_f25", "key_f26", "key_f27", "key_f28", "key_f29", "key_f30", "key_f31", "key_f32",
+ "key_f33", "key_f34", "key_f35", "key_f36", "key_f37", "key_f38", "key_f39", "key_f40",
+ "key_f41", "key_f42", "key_f43", "key_f44", "key_f45", "key_f46", "key_f47", "key_f48",
+ "key_f49", "key_f50", "key_f51", "key_f52", "key_f53", "key_f54", "key_f55", "key_f56",
+ "key_f57", "key_f58", "key_f59", "key_f60", "key_f61", "key_f62", "key_f63", "clr_bol",
+ "clear_margins", "set_left_margin", "set_right_margin", "label_format", "set_clock",
+ "display_clock", "remove_clock", "create_window", "goto_window", "hangup", "dial_phone",
+ "quick_dial", "tone", "pulse", "flash_hook", "fixed_pause", "wait_tone", "user0", "user1",
+ "user2", "user3", "user4", "user5", "user6", "user7", "user8", "user9", "orig_pair",
+ "orig_colors", "initialize_color", "initialize_pair", "set_color_pair", "set_foreground",
+ "set_background", "change_char_pitch", "change_line_pitch", "change_res_horz",
+ "change_res_vert", "define_char", "enter_doublewide_mode", "enter_draft_quality",
+ "enter_italics_mode", "enter_leftward_mode", "enter_micro_mode", "enter_near_letter_quality",
+ "enter_normal_quality", "enter_shadow_mode", "enter_subscript_mode", "enter_superscript_mode",
+ "enter_upward_mode", "exit_doublewide_mode", "exit_italics_mode", "exit_leftward_mode",
+ "exit_micro_mode", "exit_shadow_mode", "exit_subscript_mode", "exit_superscript_mode",
+ "exit_upward_mode", "micro_column_address", "micro_down", "micro_left", "micro_right",
+ "micro_row_address", "micro_up", "order_of_pins", "parm_down_micro", "parm_left_micro",
+ "parm_right_micro", "parm_up_micro", "select_char_set", "set_bottom_margin",
+ "set_bottom_margin_parm", "set_left_margin_parm", "set_right_margin_parm", "set_top_margin",
+ "set_top_margin_parm", "start_bit_image", "start_char_set_def", "stop_bit_image",
+ "stop_char_set_def", "subscript_characters", "superscript_characters", "these_cause_cr",
+ "zero_motion", "char_set_names", "key_mouse", "mouse_info", "req_mouse_pos", "get_mouse",
+ "set_a_foreground", "set_a_background", "pkey_plab", "device_type", "code_set_init",
+ "set0_des_seq", "set1_des_seq", "set2_des_seq", "set3_des_seq", "set_lr_margin",
+ "set_tb_margin", "bit_image_repeat", "bit_image_newline", "bit_image_carriage_return",
+ "color_names", "define_bit_image_region", "end_bit_image_region", "set_color_band",
+ "set_page_length", "display_pc_char", "enter_pc_charset_mode", "exit_pc_charset_mode",
+ "enter_scancode_mode", "exit_scancode_mode", "pc_term_options", "scancode_escape",
+ "alt_scancode_esc", "enter_horizontal_hl_mode", "enter_left_hl_mode", "enter_low_hl_mode",
+ "enter_right_hl_mode", "enter_top_hl_mode", "enter_vertical_hl_mode", "set_a_attributes",
+ "set_pglen_inch", "termcap_init2", "termcap_reset", "linefeed_if_not_lf", "backspace_if_not_bs",
+ "other_non_function_keys", "arrow_key_map", "acs_ulcorner", "acs_llcorner", "acs_urcorner",
+ "acs_lrcorner", "acs_ltee", "acs_rtee", "acs_btee", "acs_ttee", "acs_hline", "acs_vline",
+ "acs_plus", "memory_lock", "memory_unlock", "box_chars_1"];
+
+#[rustfmt::skip]
+pub(crate) static stringnames: &[&str] = &[ "cbt", "_", "cr", "csr", "tbc", "clear",
+ "_", "_", "hpa", "cmdch", "cup", "cud1", "home", "civis", "cub1", "mrcup", "cnorm", "cuf1",
+ "ll", "cuu1", "cvvis", "dch1", "dl1", "dsl", "hd", "smacs", "blink", "bold", "smcup", "smdc",
+ "dim", "smir", "invis", "prot", "rev", "smso", "smul", "ech", "rmacs", "sgr0", "rmcup", "rmdc",
+ "rmir", "rmso", "rmul", "flash", "ff", "fsl", "is1", "is2", "is3", "if", "ich1", "il1", "ip",
+ "kbs", "ktbc", "kclr", "kctab", "_", "_", "kcud1", "_", "_", "_", "_", "_", "_", "_", "_", "_",
+ "_", "_", "_", "_", "_", "khome", "_", "_", "kcub1", "_", "knp", "kpp", "kcuf1", "_", "_",
+ "khts", "_", "rmkx", "smkx", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "rmm", "_",
+ "_", "pad", "dch", "dl", "cud", "ich", "indn", "il", "cub", "cuf", "rin", "cuu", "pfkey",
+ "pfloc", "pfx", "mc0", "mc4", "_", "rep", "rs1", "rs2", "rs3", "rf", "rc", "vpa", "sc", "ind",
+ "ri", "sgr", "_", "wind", "_", "tsl", "uc", "hu", "iprog", "_", "_", "_", "_", "_", "mc5p",
+ "rmp", "acsc", "pln", "kcbt", "smxon", "rmxon", "smam", "rmam", "xonc", "xoffc", "_", "smln",
+ "rmln", "_", "kcan", "kclo", "kcmd", "kcpy", "kcrt", "_", "kent", "kext", "kfnd", "khlp",
+ "kmrk", "kmsg", "kmov", "knxt", "kopn", "kopt", "kprv", "kprt", "krdo", "kref", "krfr", "krpl",
+ "krst", "kres", "ksav", "kspd", "kund", "kBEG", "kCAN", "kCMD", "kCPY", "kCRT", "_", "_",
+ "kslt", "kEND", "kEOL", "kEXT", "kFND", "kHLP", "kHOM", "_", "kLFT", "kMSG", "kMOV", "kNXT",
+ "kOPT", "kPRV", "kPRT", "kRDO", "kRPL", "kRIT", "kRES", "kSAV", "kSPD", "kUND", "rfi", "_", "_",
+ "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_",
+ "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_",
+ "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_", "_",
+ "dclk", "rmclk", "cwin", "wingo", "_", "dial", "qdial", "_", "_", "hook", "pause", "wait", "_",
+ "_", "_", "_", "_", "_", "_", "_", "_", "_", "op", "oc", "initc", "initp", "scp", "setf",
+ "setb", "cpi", "lpi", "chr", "cvr", "defc", "swidm", "sdrfq", "sitm", "slm", "smicm", "snlq",
+ "snrmq", "sshm", "ssubm", "ssupm", "sum", "rwidm", "ritm", "rlm", "rmicm", "rshm", "rsubm",
+ "rsupm", "rum", "mhpa", "mcud1", "mcub1", "mcuf1", "mvpa", "mcuu1", "porder", "mcud", "mcub",
+ "mcuf", "mcuu", "scs", "smgb", "smgbp", "smglp", "smgrp", "smgt", "smgtp", "sbim", "scsd",
+ "rbim", "rcsd", "subcs", "supcs", "docr", "zerom", "csnm", "kmous", "minfo", "reqmp", "getm",
+ "setaf", "setab", "pfxl", "devt", "csin", "s0ds", "s1ds", "s2ds", "s3ds", "smglr", "smgtb",
+ "birep", "binel", "bicr", "colornm", "defbi", "endbi", "setcolor", "slines", "dispc", "smpch",
+ "rmpch", "smsc", "rmsc", "pctrm", "scesc", "scesa", "ehhlm", "elhlm", "elohlm", "erhlm",
+ "ethlm", "evhlm", "sgr1", "slength", "OTi2", "OTrs", "OTnl", "OTbs", "OTko", "OTma", "OTG2",
+ "OTG3", "OTG1", "OTG4", "OTGR", "OTGL", "OTGU", "OTGD", "OTGH", "OTGV", "OTGC", "meml", "memu",
+ "box1"];
+
+fn read_le_u16(r: &mut dyn io::Read) -> io::Result<u16> {
+ let mut b = [0; 2];
+ r.read_exact(&mut b)?;
+ Ok((b[0] as u16) | ((b[1] as u16) << 8))
+}
+
+fn read_le_u32(r: &mut dyn io::Read) -> io::Result<u32> {
+ let mut b = [0; 4];
+ r.read_exact(&mut b)?;
+ Ok((b[0] as u32) | ((b[1] as u32) << 8) | ((b[2] as u32) << 16) | ((b[3] as u32) << 24))
+}
+
+fn read_byte(r: &mut dyn io::Read) -> io::Result<u8> {
+ match r.bytes().next() {
+ Some(s) => s,
+ None => Err(io::Error::new(io::ErrorKind::Other, "end of file")),
+ }
+}
+
+/// Parse a compiled terminfo entry, using long capability names if `longnames`
+/// is true
+pub(crate) fn parse(file: &mut dyn io::Read, longnames: bool) -> Result<TermInfo, String> {
+ macro_rules! t( ($e:expr) => (
+ match $e {
+ Ok(e) => e,
+ Err(e) => return Err(e.to_string())
+ }
+ ) );
+
+ let (bnames, snames, nnames) = if longnames {
+ (boolfnames, stringfnames, numfnames)
+ } else {
+ (boolnames, stringnames, numnames)
+ };
+
+ // Check magic number
+ let magic = t!(read_le_u16(file));
+
+ let extended = match magic {
+ 0o0432 => false,
+ 0o01036 => true,
+ _ => return Err(format!("invalid magic number, found {magic:o}")),
+ };
+
+ // According to the spec, these fields must be >= -1 where -1 means that the feature is not
+ // supported. Using 0 instead of -1 works because we skip sections with length 0.
+ macro_rules! read_nonneg {
+ () => {{
+ match t!(read_le_u16(file)) as i16 {
+ n if n >= 0 => n as usize,
+ -1 => 0,
+ _ => return Err("incompatible file: length fields must be >= -1".to_string()),
+ }
+ }};
+ }
+
+ let names_bytes = read_nonneg!();
+ let bools_bytes = read_nonneg!();
+ let numbers_count = read_nonneg!();
+ let string_offsets_count = read_nonneg!();
+ let string_table_bytes = read_nonneg!();
+
+ if names_bytes == 0 {
+ return Err("incompatible file: names field must be at least 1 byte wide".to_string());
+ }
+
+ if bools_bytes > boolnames.len() {
+ return Err("incompatible file: more booleans than expected".to_string());
+ }
+
+ if numbers_count > numnames.len() {
+ return Err("incompatible file: more numbers than expected".to_string());
+ }
+
+ if string_offsets_count > stringnames.len() {
+ return Err("incompatible file: more string offsets than expected".to_string());
+ }
+
+ // don't read NUL
+ let mut bytes = Vec::new();
+ t!(file.take((names_bytes - 1) as u64).read_to_end(&mut bytes));
+ let names_str = match String::from_utf8(bytes) {
+ Ok(s) => s,
+ Err(_) => return Err("input not utf-8".to_string()),
+ };
+
+ let term_names: Vec<String> = names_str.split('|').map(|s| s.to_string()).collect();
+ // consume NUL
+ if t!(read_byte(file)) != b'\0' {
+ return Err("incompatible file: missing null terminator for names section".to_string());
+ }
+
+ let bools_map: HashMap<String, bool> = t! {
+ (0..bools_bytes).filter_map(|i| match read_byte(file) {
+ Err(e) => Some(Err(e)),
+ Ok(1) => Some(Ok((bnames[i].to_string(), true))),
+ Ok(_) => None
+ }).collect()
+ };
+
+ if (bools_bytes + names_bytes) % 2 == 1 {
+ t!(read_byte(file)); // compensate for padding
+ }
+
+ let numbers_map: HashMap<String, u32> = t! {
+ (0..numbers_count).filter_map(|i| {
+ let number = if extended { read_le_u32(file) } else { read_le_u16(file).map(Into::into) };
+
+ match number {
+ Ok(0xFFFF) => None,
+ Ok(n) => Some(Ok((nnames[i].to_string(), n))),
+ Err(e) => Some(Err(e))
+ }
+ }).collect()
+ };
+
+ let string_map: HashMap<String, Vec<u8>> = if string_offsets_count > 0 {
+ let string_offsets: Vec<u16> =
+ t!((0..string_offsets_count).map(|_| read_le_u16(file)).collect());
+
+ let mut string_table = Vec::new();
+ t!(file.take(string_table_bytes as u64).read_to_end(&mut string_table));
+
+ t!(string_offsets
+ .into_iter()
+ .enumerate()
+ .filter(|&(_, offset)| {
+ // non-entry
+ offset != 0xFFFF
+ })
+ .map(|(i, offset)| {
+ let offset = offset as usize;
+
+ let name = if snames[i] == "_" { stringfnames[i] } else { snames[i] };
+
+ if offset == 0xFFFE {
+ // undocumented: FFFE indicates cap@, which means the capability is not present
+ // unsure if the handling for this is correct
+ return Ok((name.to_string(), Vec::new()));
+ }
+
+ // Find the offset of the NUL we want to go to
+ let nulpos = string_table[offset..string_table_bytes].iter().position(|&b| b == 0);
+ match nulpos {
+ Some(len) => {
+ Ok((name.to_string(), string_table[offset..offset + len].to_vec()))
+ }
+ None => Err("invalid file: missing NUL in string_table".to_string()),
+ }
+ })
+ .collect())
+ } else {
+ HashMap::new()
+ };
+
+ // And that's all there is to it
+ Ok(TermInfo { names: term_names, bools: bools_map, numbers: numbers_map, strings: string_map })
+}
+
+/// Creates a dummy TermInfo struct for msys terminals
+pub(crate) fn msys_terminfo() -> TermInfo {
+ let mut strings = HashMap::new();
+ strings.insert("sgr0".to_string(), b"\x1B[0m".to_vec());
+ strings.insert("bold".to_string(), b"\x1B[1m".to_vec());
+ strings.insert("setaf".to_string(), b"\x1B[3%p1%dm".to_vec());
+ strings.insert("setab".to_string(), b"\x1B[4%p1%dm".to_vec());
+
+ let mut numbers = HashMap::new();
+ numbers.insert("colors".to_string(), 8);
+
+ TermInfo {
+ names: vec!["cygwin".to_string()], // msys is a fork of an older cygwin version
+ bools: HashMap::new(),
+ numbers,
+ strings,
+ }
+}
diff --git a/library/test/src/term/terminfo/parser/compiled/tests.rs b/library/test/src/term/terminfo/parser/compiled/tests.rs
new file mode 100644
index 000000000..8a9187b04
--- /dev/null
+++ b/library/test/src/term/terminfo/parser/compiled/tests.rs
@@ -0,0 +1,8 @@
+use super::*;
+
+#[test]
+fn test_veclens() {
+ assert_eq!(boolfnames.len(), boolnames.len());
+ assert_eq!(numfnames.len(), numnames.len());
+ assert_eq!(stringfnames.len(), stringnames.len());
+}
diff --git a/library/test/src/term/terminfo/searcher.rs b/library/test/src/term/terminfo/searcher.rs
new file mode 100644
index 000000000..68e181a68
--- /dev/null
+++ b/library/test/src/term/terminfo/searcher.rs
@@ -0,0 +1,69 @@
+//! ncurses-compatible database discovery.
+//!
+//! Does not support hashed database, only filesystem!
+
+use std::env;
+use std::fs;
+use std::path::PathBuf;
+
+#[cfg(test)]
+mod tests;
+
+/// Return path to database entry for `term`
+#[allow(deprecated)]
+pub(crate) fn get_dbpath_for_term(term: &str) -> Option<PathBuf> {
+ let mut dirs_to_search = Vec::new();
+ let first_char = term.chars().next()?;
+
+ // Find search directory
+ if let Some(dir) = env::var_os("TERMINFO") {
+ dirs_to_search.push(PathBuf::from(dir));
+ }
+
+ if let Ok(dirs) = env::var("TERMINFO_DIRS") {
+ for i in dirs.split(':') {
+ if i == "" {
+ dirs_to_search.push(PathBuf::from("/usr/share/terminfo"));
+ } else {
+ dirs_to_search.push(PathBuf::from(i));
+ }
+ }
+ } else {
+ // Found nothing in TERMINFO_DIRS, use the default paths:
+ // According to /etc/terminfo/README, after looking at
+ // ~/.terminfo, ncurses will search /etc/terminfo, then
+ // /lib/terminfo, and eventually /usr/share/terminfo.
+ // On Haiku the database can be found at /boot/system/data/terminfo
+ if let Some(mut homedir) = env::home_dir() {
+ homedir.push(".terminfo");
+ dirs_to_search.push(homedir)
+ }
+
+ dirs_to_search.push(PathBuf::from("/etc/terminfo"));
+ dirs_to_search.push(PathBuf::from("/lib/terminfo"));
+ dirs_to_search.push(PathBuf::from("/usr/share/terminfo"));
+ dirs_to_search.push(PathBuf::from("/boot/system/data/terminfo"));
+ }
+
+ // Look for the terminal in all of the search directories
+ for mut p in dirs_to_search {
+ if fs::metadata(&p).is_ok() {
+ p.push(&first_char.to_string());
+ p.push(&term);
+ if fs::metadata(&p).is_ok() {
+ return Some(p);
+ }
+ p.pop();
+ p.pop();
+
+ // on some installations the dir is named after the hex of the char
+ // (e.g., macOS)
+ p.push(&format!("{:x}", first_char as usize));
+ p.push(term);
+ if fs::metadata(&p).is_ok() {
+ return Some(p);
+ }
+ }
+ }
+ None
+}
diff --git a/library/test/src/term/terminfo/searcher/tests.rs b/library/test/src/term/terminfo/searcher/tests.rs
new file mode 100644
index 000000000..4227a585e
--- /dev/null
+++ b/library/test/src/term/terminfo/searcher/tests.rs
@@ -0,0 +1,19 @@
+use super::*;
+
+#[test]
+#[ignore = "buildbots don't have ncurses installed and I can't mock everything I need"]
+fn test_get_dbpath_for_term() {
+ // woefully inadequate test coverage
+ // note: current tests won't work with non-standard terminfo hierarchies (e.g., macOS's)
+ use std::env;
+ // FIXME (#9639): This needs to handle non-utf8 paths
+ fn x(t: &str) -> String {
+ let p = get_dbpath_for_term(t).expect("no terminfo entry found");
+ p.to_str().unwrap().to_string()
+ }
+ assert!(x("screen") == "/usr/share/terminfo/s/screen");
+ assert!(get_dbpath_for_term("") == None);
+ env::set_var("TERMINFO_DIRS", ":");
+ assert!(x("screen") == "/usr/share/terminfo/s/screen");
+ env::remove_var("TERMINFO_DIRS");
+}
diff --git a/library/test/src/term/win.rs b/library/test/src/term/win.rs
new file mode 100644
index 000000000..4bdbd6ee7
--- /dev/null
+++ b/library/test/src/term/win.rs
@@ -0,0 +1,170 @@
+//! Windows console handling
+
+// FIXME (#13400): this is only a tiny fraction of the Windows console api
+
+use std::io;
+use std::io::prelude::*;
+
+use super::color;
+use super::Terminal;
+
+/// A Terminal implementation that uses the Win32 Console API.
+pub(crate) struct WinConsole<T> {
+ buf: T,
+ def_foreground: color::Color,
+ def_background: color::Color,
+ foreground: color::Color,
+ background: color::Color,
+}
+
+type SHORT = i16;
+type WORD = u16;
+type DWORD = u32;
+type BOOL = i32;
+type HANDLE = *mut u8;
+
+#[allow(non_snake_case)]
+#[repr(C)]
+struct SMALL_RECT {
+ Left: SHORT,
+ Top: SHORT,
+ Right: SHORT,
+ Bottom: SHORT,
+}
+
+#[allow(non_snake_case)]
+#[repr(C)]
+struct COORD {
+ X: SHORT,
+ Y: SHORT,
+}
+
+#[allow(non_snake_case)]
+#[repr(C)]
+struct CONSOLE_SCREEN_BUFFER_INFO {
+ dwSize: COORD,
+ dwCursorPosition: COORD,
+ wAttributes: WORD,
+ srWindow: SMALL_RECT,
+ dwMaximumWindowSize: COORD,
+}
+
+#[allow(non_snake_case)]
+#[link(name = "kernel32")]
+extern "system" {
+ fn SetConsoleTextAttribute(handle: HANDLE, attr: WORD) -> BOOL;
+ fn GetStdHandle(which: DWORD) -> HANDLE;
+ fn GetConsoleScreenBufferInfo(handle: HANDLE, info: *mut CONSOLE_SCREEN_BUFFER_INFO) -> BOOL;
+}
+
+fn color_to_bits(color: color::Color) -> u16 {
+ // magic numbers from mingw-w64's wincon.h
+
+ let bits = match color % 8 {
+ color::BLACK => 0,
+ color::BLUE => 0x1,
+ color::GREEN => 0x2,
+ color::RED => 0x4,
+ color::YELLOW => 0x2 | 0x4,
+ color::MAGENTA => 0x1 | 0x4,
+ color::CYAN => 0x1 | 0x2,
+ color::WHITE => 0x1 | 0x2 | 0x4,
+ _ => unreachable!(),
+ };
+
+ if color >= 8 { bits | 0x8 } else { bits }
+}
+
+fn bits_to_color(bits: u16) -> color::Color {
+ let color = match bits & 0x7 {
+ 0 => color::BLACK,
+ 0x1 => color::BLUE,
+ 0x2 => color::GREEN,
+ 0x4 => color::RED,
+ 0x6 => color::YELLOW,
+ 0x5 => color::MAGENTA,
+ 0x3 => color::CYAN,
+ 0x7 => color::WHITE,
+ _ => unreachable!(),
+ };
+
+ color | (u32::from(bits) & 0x8) // copy the hi-intensity bit
+}
+
+impl<T: Write + Send + 'static> WinConsole<T> {
+ fn apply(&mut self) {
+ let _unused = self.buf.flush();
+ let mut accum: WORD = 0;
+ accum |= color_to_bits(self.foreground);
+ accum |= color_to_bits(self.background) << 4;
+
+ unsafe {
+ // Magic -11 means stdout, from
+ // https://docs.microsoft.com/en-us/windows/console/getstdhandle
+ //
+ // You may be wondering, "but what about stderr?", and the answer
+ // to that is that setting terminal attributes on the stdout
+ // handle also sets them for stderr, since they go to the same
+ // terminal! Admittedly, this is fragile, since stderr could be
+ // redirected to a different console. This is good enough for
+ // rustc though. See #13400.
+ let out = GetStdHandle(-11i32 as DWORD);
+ SetConsoleTextAttribute(out, accum);
+ }
+ }
+
+ /// Returns `None` whenever the terminal cannot be created for some reason.
+ pub(crate) fn new(out: T) -> io::Result<WinConsole<T>> {
+ use std::mem::MaybeUninit;
+
+ let fg;
+ let bg;
+ unsafe {
+ let mut buffer_info = MaybeUninit::<CONSOLE_SCREEN_BUFFER_INFO>::uninit();
+ if GetConsoleScreenBufferInfo(GetStdHandle(-11i32 as DWORD), buffer_info.as_mut_ptr())
+ != 0
+ {
+ let buffer_info = buffer_info.assume_init();
+ fg = bits_to_color(buffer_info.wAttributes);
+ bg = bits_to_color(buffer_info.wAttributes >> 4);
+ } else {
+ fg = color::WHITE;
+ bg = color::BLACK;
+ }
+ }
+ Ok(WinConsole {
+ buf: out,
+ def_foreground: fg,
+ def_background: bg,
+ foreground: fg,
+ background: bg,
+ })
+ }
+}
+
+impl<T: Write> Write for WinConsole<T> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.buf.write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.buf.flush()
+ }
+}
+
+impl<T: Write + Send + 'static> Terminal for WinConsole<T> {
+ fn fg(&mut self, color: color::Color) -> io::Result<bool> {
+ self.foreground = color;
+ self.apply();
+
+ Ok(true)
+ }
+
+ fn reset(&mut self) -> io::Result<bool> {
+ self.foreground = self.def_foreground;
+ self.background = self.def_background;
+ self.apply();
+
+ Ok(true)
+ }
+}
diff --git a/library/test/src/test_result.rs b/library/test/src/test_result.rs
new file mode 100644
index 000000000..7f44d6e3d
--- /dev/null
+++ b/library/test/src/test_result.rs
@@ -0,0 +1,108 @@
+use std::any::Any;
+
+use super::bench::BenchSamples;
+use super::options::ShouldPanic;
+use super::time;
+use super::types::TestDesc;
+
+pub use self::TestResult::*;
+
+// Return codes for secondary process.
+// Start somewhere other than 0 so we know the return code means what we think
+// it means.
+pub const TR_OK: i32 = 50;
+pub const TR_FAILED: i32 = 51;
+
+#[derive(Debug, Clone, PartialEq)]
+pub enum TestResult {
+ TrOk,
+ TrFailed,
+ TrFailedMsg(String),
+ TrIgnored,
+ TrBench(BenchSamples),
+ TrTimedFail,
+}
+
+/// Creates a `TestResult` depending on the raw result of test execution
+/// and associated data.
+pub fn calc_result<'a>(
+ desc: &TestDesc,
+ task_result: Result<(), &'a (dyn Any + 'static + Send)>,
+ time_opts: &Option<time::TestTimeOptions>,
+ exec_time: &Option<time::TestExecTime>,
+) -> TestResult {
+ let result = match (&desc.should_panic, task_result) {
+ (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk,
+ (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
+ let maybe_panic_str = err
+ .downcast_ref::<String>()
+ .map(|e| &**e)
+ .or_else(|| err.downcast_ref::<&'static str>().copied());
+
+ if maybe_panic_str.map(|e| e.contains(msg)).unwrap_or(false) {
+ TestResult::TrOk
+ } else if let Some(panic_str) = maybe_panic_str {
+ TestResult::TrFailedMsg(format!(
+ r#"panic did not contain expected string
+ panic message: `{:?}`,
+ expected substring: `{:?}`"#,
+ panic_str, msg
+ ))
+ } else {
+ TestResult::TrFailedMsg(format!(
+ r#"expected panic with string value,
+ found non-string value: `{:?}`
+ expected substring: `{:?}`"#,
+ (**err).type_id(),
+ msg
+ ))
+ }
+ }
+ (&ShouldPanic::Yes, Ok(())) | (&ShouldPanic::YesWithMessage(_), Ok(())) => {
+ TestResult::TrFailedMsg("test did not panic as expected".to_string())
+ }
+ _ => TestResult::TrFailed,
+ };
+
+ // If test is already failed (or allowed to fail), do not change the result.
+ if result != TestResult::TrOk {
+ return result;
+ }
+
+ // Check if test is failed due to timeout.
+ if let (Some(opts), Some(time)) = (time_opts, exec_time) {
+ if opts.error_on_excess && opts.is_critical(desc, time) {
+ return TestResult::TrTimedFail;
+ }
+ }
+
+ result
+}
+
+/// Creates a `TestResult` depending on the exit code of test subprocess.
+pub fn get_result_from_exit_code(
+ desc: &TestDesc,
+ code: i32,
+ time_opts: &Option<time::TestTimeOptions>,
+ exec_time: &Option<time::TestExecTime>,
+) -> TestResult {
+ let result = match code {
+ TR_OK => TestResult::TrOk,
+ TR_FAILED => TestResult::TrFailed,
+ _ => TestResult::TrFailedMsg(format!("got unexpected return code {code}")),
+ };
+
+ // If test is already failed (or allowed to fail), do not change the result.
+ if result != TestResult::TrOk {
+ return result;
+ }
+
+ // Check if test is failed due to timeout.
+ if let (Some(opts), Some(time)) = (time_opts, exec_time) {
+ if opts.error_on_excess && opts.is_critical(desc, time) {
+ return TestResult::TrTimedFail;
+ }
+ }
+
+ result
+}
diff --git a/library/test/src/tests.rs b/library/test/src/tests.rs
new file mode 100644
index 000000000..0b81aff59
--- /dev/null
+++ b/library/test/src/tests.rs
@@ -0,0 +1,823 @@
+use super::*;
+
+use crate::{
+ bench::Bencher,
+ console::OutputLocation,
+ formatters::PrettyFormatter,
+ options::OutputFormat,
+ test::{
+ filter_tests,
+ parse_opts,
+ run_test,
+ DynTestFn,
+ DynTestName,
+ MetricMap,
+ RunIgnored,
+ RunStrategy,
+ ShouldPanic,
+ StaticTestName,
+ TestDesc,
+ TestDescAndFn,
+ TestOpts,
+ TrIgnored,
+ TrOk,
+ // FIXME (introduced by #65251)
+ // ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions,
+ // TestType, TrFailedMsg, TrIgnored, TrOk,
+ },
+ time::{TestTimeOptions, TimeThreshold},
+};
+use std::sync::mpsc::channel;
+use std::time::Duration;
+
+impl TestOpts {
+ fn new() -> TestOpts {
+ TestOpts {
+ list: false,
+ filters: vec![],
+ filter_exact: false,
+ force_run_in_process: false,
+ exclude_should_panic: false,
+ run_ignored: RunIgnored::No,
+ run_tests: false,
+ bench_benchmarks: false,
+ logfile: None,
+ nocapture: false,
+ color: AutoColor,
+ format: OutputFormat::Pretty,
+ shuffle: false,
+ shuffle_seed: None,
+ test_threads: None,
+ skip: vec![],
+ time_options: None,
+ options: Options::new(),
+ }
+ }
+}
+
+fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
+ vec![
+ TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("1"),
+ ignore: true,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(move || {})),
+ },
+ TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("2"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(move || {})),
+ },
+ ]
+}
+
+#[test]
+pub fn do_not_run_ignored_tests() {
+ fn f() {
+ panic!();
+ }
+ let desc = TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: true,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(f)),
+ };
+ let (tx, rx) = channel();
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ let result = rx.recv().unwrap().result;
+ assert_ne!(result, TrOk);
+}
+
+#[test]
+pub fn ignored_tests_result_in_ignored() {
+ fn f() {}
+ let desc = TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: true,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(f)),
+ };
+ let (tx, rx) = channel();
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ let result = rx.recv().unwrap().result;
+ assert_eq!(result, TrIgnored);
+}
+
+// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_should_panic() {
+ fn f() {
+ panic!();
+ }
+ let desc = TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::Yes,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(f)),
+ };
+ let (tx, rx) = channel();
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ let result = rx.recv().unwrap().result;
+ assert_eq!(result, TrOk);
+}
+
+// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_should_panic_good_message() {
+ fn f() {
+ panic!("an error message");
+ }
+ let desc = TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::YesWithMessage("error message"),
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(f)),
+ };
+ let (tx, rx) = channel();
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ let result = rx.recv().unwrap().result;
+ assert_eq!(result, TrOk);
+}
+
+// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_should_panic_bad_message() {
+ use crate::tests::TrFailedMsg;
+ fn f() {
+ panic!("an error message");
+ }
+ let expected = "foobar";
+ let failed_msg = r#"panic did not contain expected string
+ panic message: `"an error message"`,
+ expected substring: `"foobar"`"#;
+ let desc = TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::YesWithMessage(expected),
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(f)),
+ };
+ let (tx, rx) = channel();
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ let result = rx.recv().unwrap().result;
+ assert_eq!(result, TrFailedMsg(failed_msg.to_string()));
+}
+
+// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_should_panic_non_string_message_type() {
+ use crate::tests::TrFailedMsg;
+ use std::any::TypeId;
+ fn f() {
+ std::panic::panic_any(1i32);
+ }
+ let expected = "foobar";
+ let failed_msg = format!(
+ r#"expected panic with string value,
+ found non-string value: `{:?}`
+ expected substring: `"foobar"`"#,
+ TypeId::of::<i32>()
+ );
+ let desc = TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::YesWithMessage(expected),
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(f)),
+ };
+ let (tx, rx) = channel();
+ run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ let result = rx.recv().unwrap().result;
+ assert_eq!(result, TrFailedMsg(failed_msg));
+}
+
+// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251)
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn test_should_panic_but_succeeds() {
+ let should_panic_variants = [ShouldPanic::Yes, ShouldPanic::YesWithMessage("error message")];
+
+ for &should_panic in should_panic_variants.iter() {
+ fn f() {}
+ let desc = TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: false,
+ ignore_message: None,
+ should_panic,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(f)),
+ };
+ let (tx, rx) = channel();
+ run_test(
+ &TestOpts::new(),
+ false,
+ TestId(0),
+ desc,
+ RunStrategy::InProcess,
+ tx,
+ Concurrent::No,
+ );
+ let result = rx.recv().unwrap().result;
+ assert_eq!(
+ result,
+ TrFailedMsg("test did not panic as expected".to_string()),
+ "should_panic == {:?}",
+ should_panic
+ );
+ }
+}
+
+fn report_time_test_template(report_time: bool) -> Option<TestExecTime> {
+ fn f() {}
+ let desc = TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(f)),
+ };
+ let time_options = if report_time { Some(TestTimeOptions::default()) } else { None };
+
+ let test_opts = TestOpts { time_options, ..TestOpts::new() };
+ let (tx, rx) = channel();
+ run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ let exec_time = rx.recv().unwrap().exec_time;
+ exec_time
+}
+
+#[test]
+fn test_should_not_report_time() {
+ let exec_time = report_time_test_template(false);
+ assert!(exec_time.is_none());
+}
+
+#[test]
+fn test_should_report_time() {
+ let exec_time = report_time_test_template(true);
+ assert!(exec_time.is_some());
+}
+
+fn time_test_failure_template(test_type: TestType) -> TestResult {
+ fn f() {}
+ let desc = TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type,
+ },
+ testfn: DynTestFn(Box::new(f)),
+ };
+ // `Default` will initialize all the thresholds to 0 milliseconds.
+ let mut time_options = TestTimeOptions::default();
+ time_options.error_on_excess = true;
+
+ let test_opts = TestOpts { time_options: Some(time_options), ..TestOpts::new() };
+ let (tx, rx) = channel();
+ run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
+ let result = rx.recv().unwrap().result;
+
+ result
+}
+
+#[test]
+fn test_error_on_exceed() {
+ let types = [TestType::UnitTest, TestType::IntegrationTest, TestType::DocTest];
+
+ for test_type in types.iter() {
+ let result = time_test_failure_template(*test_type);
+
+ assert_eq!(result, TestResult::TrTimedFail);
+ }
+
+ // Check that for unknown tests thresholds aren't applied.
+ let result = time_test_failure_template(TestType::Unknown);
+ assert_eq!(result, TestResult::TrOk);
+}
+
+fn typed_test_desc(test_type: TestType) -> TestDesc {
+ TestDesc {
+ name: StaticTestName("whatever"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type,
+ }
+}
+
+fn test_exec_time(millis: u64) -> TestExecTime {
+ TestExecTime(Duration::from_millis(millis))
+}
+
+#[test]
+fn test_time_options_threshold() {
+ let unit = TimeThreshold::new(Duration::from_millis(50), Duration::from_millis(100));
+ let integration = TimeThreshold::new(Duration::from_millis(500), Duration::from_millis(1000));
+ let doc = TimeThreshold::new(Duration::from_millis(5000), Duration::from_millis(10000));
+
+ let options = TestTimeOptions {
+ error_on_excess: false,
+ unit_threshold: unit.clone(),
+ integration_threshold: integration.clone(),
+ doctest_threshold: doc.clone(),
+ };
+
+ let test_vector = [
+ (TestType::UnitTest, unit.warn.as_millis() - 1, false, false),
+ (TestType::UnitTest, unit.warn.as_millis(), true, false),
+ (TestType::UnitTest, unit.critical.as_millis(), true, true),
+ (TestType::IntegrationTest, integration.warn.as_millis() - 1, false, false),
+ (TestType::IntegrationTest, integration.warn.as_millis(), true, false),
+ (TestType::IntegrationTest, integration.critical.as_millis(), true, true),
+ (TestType::DocTest, doc.warn.as_millis() - 1, false, false),
+ (TestType::DocTest, doc.warn.as_millis(), true, false),
+ (TestType::DocTest, doc.critical.as_millis(), true, true),
+ ];
+
+ for (test_type, time, expected_warn, expected_critical) in test_vector.iter() {
+ let test_desc = typed_test_desc(*test_type);
+ let exec_time = test_exec_time(*time as u64);
+
+ assert_eq!(options.is_warn(&test_desc, &exec_time), *expected_warn);
+ assert_eq!(options.is_critical(&test_desc, &exec_time), *expected_critical);
+ }
+}
+
+#[test]
+fn parse_ignored_flag() {
+ let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
+ let opts = parse_opts(&args).unwrap().unwrap();
+ assert_eq!(opts.run_ignored, RunIgnored::Only);
+}
+
+#[test]
+fn parse_show_output_flag() {
+ let args = vec!["progname".to_string(), "filter".to_string(), "--show-output".to_string()];
+ let opts = parse_opts(&args).unwrap().unwrap();
+ assert!(opts.options.display_output);
+}
+
+#[test]
+fn parse_include_ignored_flag() {
+ let args = vec!["progname".to_string(), "filter".to_string(), "--include-ignored".to_string()];
+ let opts = parse_opts(&args).unwrap().unwrap();
+ assert_eq!(opts.run_ignored, RunIgnored::Yes);
+}
+
+#[test]
+pub fn filter_for_ignored_option() {
+ // When we run ignored tests the test filter should filter out all the
+ // unignored tests and flip the ignore flag on the rest to false
+
+ let mut opts = TestOpts::new();
+ opts.run_tests = true;
+ opts.run_ignored = RunIgnored::Only;
+
+ let tests = one_ignored_one_unignored_test();
+ let filtered = filter_tests(&opts, tests);
+
+ assert_eq!(filtered.len(), 1);
+ assert_eq!(filtered[0].desc.name.to_string(), "1");
+ assert!(!filtered[0].desc.ignore);
+}
+
+#[test]
+pub fn run_include_ignored_option() {
+ // When we "--include-ignored" tests, the ignore flag should be set to false on
+ // all tests and no test filtered out
+
+ let mut opts = TestOpts::new();
+ opts.run_tests = true;
+ opts.run_ignored = RunIgnored::Yes;
+
+ let tests = one_ignored_one_unignored_test();
+ let filtered = filter_tests(&opts, tests);
+
+ assert_eq!(filtered.len(), 2);
+ assert!(!filtered[0].desc.ignore);
+ assert!(!filtered[1].desc.ignore);
+}
+
+#[test]
+pub fn exclude_should_panic_option() {
+ let mut opts = TestOpts::new();
+ opts.run_tests = true;
+ opts.exclude_should_panic = true;
+
+ let mut tests = one_ignored_one_unignored_test();
+ tests.push(TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName("3"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::Yes,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(move || {})),
+ });
+
+ let filtered = filter_tests(&opts, tests);
+
+ assert_eq!(filtered.len(), 2);
+ assert!(filtered.iter().all(|test| test.desc.should_panic == ShouldPanic::No));
+}
+
+#[test]
+pub fn exact_filter_match() {
+ fn tests() -> Vec<TestDescAndFn> {
+ ["base", "base::test", "base::test1", "base::test2"]
+ .into_iter()
+ .map(|name| TestDescAndFn {
+ desc: TestDesc {
+ name: StaticTestName(name),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(move || {})),
+ })
+ .collect()
+ }
+
+ let substr =
+ filter_tests(&TestOpts { filters: vec!["base".into()], ..TestOpts::new() }, tests());
+ assert_eq!(substr.len(), 4);
+
+ let substr =
+ filter_tests(&TestOpts { filters: vec!["bas".into()], ..TestOpts::new() }, tests());
+ assert_eq!(substr.len(), 4);
+
+ let substr =
+ filter_tests(&TestOpts { filters: vec!["::test".into()], ..TestOpts::new() }, tests());
+ assert_eq!(substr.len(), 3);
+
+ let substr =
+ filter_tests(&TestOpts { filters: vec!["base::test".into()], ..TestOpts::new() }, tests());
+ assert_eq!(substr.len(), 3);
+
+ let substr = filter_tests(
+ &TestOpts { filters: vec!["test1".into(), "test2".into()], ..TestOpts::new() },
+ tests(),
+ );
+ assert_eq!(substr.len(), 2);
+
+ let exact = filter_tests(
+ &TestOpts { filters: vec!["base".into()], filter_exact: true, ..TestOpts::new() },
+ tests(),
+ );
+ assert_eq!(exact.len(), 1);
+
+ let exact = filter_tests(
+ &TestOpts { filters: vec!["bas".into()], filter_exact: true, ..TestOpts::new() },
+ tests(),
+ );
+ assert_eq!(exact.len(), 0);
+
+ let exact = filter_tests(
+ &TestOpts { filters: vec!["::test".into()], filter_exact: true, ..TestOpts::new() },
+ tests(),
+ );
+ assert_eq!(exact.len(), 0);
+
+ let exact = filter_tests(
+ &TestOpts { filters: vec!["base::test".into()], filter_exact: true, ..TestOpts::new() },
+ tests(),
+ );
+ assert_eq!(exact.len(), 1);
+
+ let exact = filter_tests(
+ &TestOpts {
+ filters: vec!["base".into(), "base::test".into()],
+ filter_exact: true,
+ ..TestOpts::new()
+ },
+ tests(),
+ );
+ assert_eq!(exact.len(), 2);
+}
+
+fn sample_tests() -> Vec<TestDescAndFn> {
+ let names = vec![
+ "sha1::test".to_string(),
+ "isize::test_to_str".to_string(),
+ "isize::test_pow".to_string(),
+ "test::do_not_run_ignored_tests".to_string(),
+ "test::ignored_tests_result_in_ignored".to_string(),
+ "test::first_free_arg_should_be_a_filter".to_string(),
+ "test::parse_ignored_flag".to_string(),
+ "test::parse_include_ignored_flag".to_string(),
+ "test::filter_for_ignored_option".to_string(),
+ "test::run_include_ignored_option".to_string(),
+ "test::sort_tests".to_string(),
+ ];
+ fn testfn() {}
+ let mut tests = Vec::new();
+ for name in &names {
+ let test = TestDescAndFn {
+ desc: TestDesc {
+ name: DynTestName((*name).clone()),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ },
+ testfn: DynTestFn(Box::new(testfn)),
+ };
+ tests.push(test);
+ }
+ tests
+}
+
+#[test]
+pub fn sort_tests() {
+ let mut opts = TestOpts::new();
+ opts.run_tests = true;
+
+ let tests = sample_tests();
+ let filtered = filter_tests(&opts, tests);
+
+ let expected = vec![
+ "isize::test_pow".to_string(),
+ "isize::test_to_str".to_string(),
+ "sha1::test".to_string(),
+ "test::do_not_run_ignored_tests".to_string(),
+ "test::filter_for_ignored_option".to_string(),
+ "test::first_free_arg_should_be_a_filter".to_string(),
+ "test::ignored_tests_result_in_ignored".to_string(),
+ "test::parse_ignored_flag".to_string(),
+ "test::parse_include_ignored_flag".to_string(),
+ "test::run_include_ignored_option".to_string(),
+ "test::sort_tests".to_string(),
+ ];
+
+ for (a, b) in expected.iter().zip(filtered) {
+ assert_eq!(*a, b.desc.name.to_string());
+ }
+}
+
+#[test]
+pub fn shuffle_tests() {
+ let mut opts = TestOpts::new();
+ opts.shuffle = true;
+
+ let shuffle_seed = get_shuffle_seed(&opts).unwrap();
+
+ let left =
+ sample_tests().into_iter().enumerate().map(|(i, e)| (TestId(i), e)).collect::<Vec<_>>();
+ let mut right =
+ sample_tests().into_iter().enumerate().map(|(i, e)| (TestId(i), e)).collect::<Vec<_>>();
+
+ assert!(left.iter().zip(&right).all(|(a, b)| a.1.desc.name == b.1.desc.name));
+
+ helpers::shuffle::shuffle_tests(shuffle_seed, right.as_mut_slice());
+
+ assert!(left.iter().zip(right).any(|(a, b)| a.1.desc.name != b.1.desc.name));
+}
+
+#[test]
+pub fn shuffle_tests_with_seed() {
+ let mut opts = TestOpts::new();
+ opts.shuffle = true;
+
+ let shuffle_seed = get_shuffle_seed(&opts).unwrap();
+
+ let mut left =
+ sample_tests().into_iter().enumerate().map(|(i, e)| (TestId(i), e)).collect::<Vec<_>>();
+ let mut right =
+ sample_tests().into_iter().enumerate().map(|(i, e)| (TestId(i), e)).collect::<Vec<_>>();
+
+ helpers::shuffle::shuffle_tests(shuffle_seed, left.as_mut_slice());
+ helpers::shuffle::shuffle_tests(shuffle_seed, right.as_mut_slice());
+
+ assert!(left.iter().zip(right).all(|(a, b)| a.1.desc.name == b.1.desc.name));
+}
+
+#[test]
+pub fn order_depends_on_more_than_seed() {
+ let mut opts = TestOpts::new();
+ opts.shuffle = true;
+
+ let shuffle_seed = get_shuffle_seed(&opts).unwrap();
+
+ let mut left_tests = sample_tests();
+ let mut right_tests = sample_tests();
+
+ left_tests.pop();
+ right_tests.remove(0);
+
+ let mut left =
+ left_tests.into_iter().enumerate().map(|(i, e)| (TestId(i), e)).collect::<Vec<_>>();
+ let mut right =
+ right_tests.into_iter().enumerate().map(|(i, e)| (TestId(i), e)).collect::<Vec<_>>();
+
+ assert_eq!(left.len(), right.len());
+
+ assert!(left.iter().zip(&right).all(|(a, b)| a.0 == b.0));
+
+ helpers::shuffle::shuffle_tests(shuffle_seed, left.as_mut_slice());
+ helpers::shuffle::shuffle_tests(shuffle_seed, right.as_mut_slice());
+
+ assert!(left.iter().zip(right).any(|(a, b)| a.0 != b.0));
+}
+
+#[test]
+pub fn test_metricmap_compare() {
+ let mut m1 = MetricMap::new();
+ let mut m2 = MetricMap::new();
+ m1.insert_metric("in-both-noise", 1000.0, 200.0);
+ m2.insert_metric("in-both-noise", 1100.0, 200.0);
+
+ m1.insert_metric("in-first-noise", 1000.0, 2.0);
+ m2.insert_metric("in-second-noise", 1000.0, 2.0);
+
+ m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
+ m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
+
+ m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
+ m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
+
+ m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
+ m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
+
+ m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
+ m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
+}
+
+#[test]
+pub fn test_bench_once_no_iter() {
+ fn f(_: &mut Bencher) {}
+ bench::run_once(f);
+}
+
+#[test]
+pub fn test_bench_once_iter() {
+ fn f(b: &mut Bencher) {
+ b.iter(|| {})
+ }
+ bench::run_once(f);
+}
+
+#[test]
+pub fn test_bench_no_iter() {
+ fn f(_: &mut Bencher) {}
+
+ let (tx, rx) = channel();
+
+ let desc = TestDesc {
+ name: StaticTestName("f"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ };
+
+ crate::bench::benchmark(TestId(0), desc, tx, true, f);
+ rx.recv().unwrap();
+}
+
+#[test]
+pub fn test_bench_iter() {
+ fn f(b: &mut Bencher) {
+ b.iter(|| {})
+ }
+
+ let (tx, rx) = channel();
+
+ let desc = TestDesc {
+ name: StaticTestName("f"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ };
+
+ crate::bench::benchmark(TestId(0), desc, tx, true, f);
+ rx.recv().unwrap();
+}
+
+#[test]
+fn should_sort_failures_before_printing_them() {
+ let test_a = TestDesc {
+ name: StaticTestName("a"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ };
+
+ let test_b = TestDesc {
+ name: StaticTestName("b"),
+ ignore: false,
+ ignore_message: None,
+ should_panic: ShouldPanic::No,
+ compile_fail: false,
+ no_run: false,
+ test_type: TestType::Unknown,
+ };
+
+ let mut out = PrettyFormatter::new(OutputLocation::Raw(Vec::new()), false, 10, false, None);
+
+ let st = console::ConsoleTestState {
+ log_out: None,
+ total: 0,
+ passed: 0,
+ failed: 0,
+ ignored: 0,
+ filtered_out: 0,
+ measured: 0,
+ exec_time: None,
+ metrics: MetricMap::new(),
+ failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
+ options: Options::new(),
+ not_failures: Vec::new(),
+ time_failures: Vec::new(),
+ };
+
+ out.write_failures(&st).unwrap();
+ let s = match out.output_location() {
+ &OutputLocation::Raw(ref m) => String::from_utf8_lossy(&m[..]),
+ &OutputLocation::Pretty(_) => unreachable!(),
+ };
+
+ let apos = s.find("a").unwrap();
+ let bpos = s.find("b").unwrap();
+ assert!(apos < bpos);
+}
diff --git a/library/test/src/time.rs b/library/test/src/time.rs
new file mode 100644
index 000000000..8c64e5d1b
--- /dev/null
+++ b/library/test/src/time.rs
@@ -0,0 +1,197 @@
+//! Module `time` contains everything related to the time measurement of unit tests
+//! execution.
+//! The purposes of this module:
+//! - Check whether test is timed out.
+//! - Provide helpers for `report-time` and `measure-time` options.
+//! - Provide newtypes for executions times.
+
+use std::env;
+use std::fmt;
+use std::str::FromStr;
+use std::time::{Duration, Instant};
+
+use super::types::{TestDesc, TestType};
+
+pub const TEST_WARN_TIMEOUT_S: u64 = 60;
+
+/// This small module contains constants used by `report-time` option.
+/// Those constants values will be used if corresponding environment variables are not set.
+///
+/// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`,
+/// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`,
+/// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`.
+///
+/// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means
+/// warn time, and 200 means critical time.
+pub mod time_constants {
+ use super::TEST_WARN_TIMEOUT_S;
+ use std::time::Duration;
+
+ /// Environment variable for overriding default threshold for unit-tests.
+ pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT";
+
+ // Unit tests are supposed to be really quick.
+ pub const UNIT_WARN: Duration = Duration::from_millis(50);
+ pub const UNIT_CRITICAL: Duration = Duration::from_millis(100);
+
+ /// Environment variable for overriding default threshold for unit-tests.
+ pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION";
+
+ // Integration tests may have a lot of work, so they can take longer to execute.
+ pub const INTEGRATION_WARN: Duration = Duration::from_millis(500);
+ pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000);
+
+ /// Environment variable for overriding default threshold for unit-tests.
+ pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST";
+
+ // Doctests are similar to integration tests, because they can include a lot of
+ // initialization code.
+ pub const DOCTEST_WARN: Duration = INTEGRATION_WARN;
+ pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL;
+
+ // Do not suppose anything about unknown tests, base limits on the
+ // `TEST_WARN_TIMEOUT_S` constant.
+ pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S);
+ pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2);
+}
+
+/// Returns an `Instance` object denoting when the test should be considered
+/// timed out.
+pub fn get_default_test_timeout() -> Instant {
+ Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S)
+}
+
+/// The measured execution time of a unit test.
+#[derive(Debug, Clone, PartialEq)]
+pub struct TestExecTime(pub Duration);
+
+impl fmt::Display for TestExecTime {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:.3}s", self.0.as_secs_f64())
+ }
+}
+
+/// The measured execution time of the whole test suite.
+#[derive(Debug, Clone, Default, PartialEq)]
+pub struct TestSuiteExecTime(pub Duration);
+
+impl fmt::Display for TestSuiteExecTime {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:.2}s", self.0.as_secs_f64())
+ }
+}
+
+/// Structure denoting time limits for test execution.
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
+pub struct TimeThreshold {
+ pub warn: Duration,
+ pub critical: Duration,
+}
+
+impl TimeThreshold {
+ /// Creates a new `TimeThreshold` instance with provided durations.
+ pub fn new(warn: Duration, critical: Duration) -> Self {
+ Self { warn, critical }
+ }
+
+ /// Attempts to create a `TimeThreshold` instance with values obtained
+ /// from the environment variable, and returns `None` if the variable
+ /// is not set.
+ /// Environment variable format is expected to match `\d+,\d+`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if variable with provided name is set but contains inappropriate
+ /// value.
+ pub fn from_env_var(env_var_name: &str) -> Option<Self> {
+ let durations_str = env::var(env_var_name).ok()?;
+ let (warn_str, critical_str) = durations_str.split_once(',').unwrap_or_else(|| {
+ panic!(
+ "Duration variable {} expected to have 2 numbers separated by comma, but got {}",
+ env_var_name, durations_str
+ )
+ });
+
+ let parse_u64 = |v| {
+ u64::from_str(v).unwrap_or_else(|_| {
+ panic!(
+ "Duration value in variable {} is expected to be a number, but got {}",
+ env_var_name, v
+ )
+ })
+ };
+
+ let warn = parse_u64(warn_str);
+ let critical = parse_u64(critical_str);
+ if warn > critical {
+ panic!("Test execution warn time should be less or equal to the critical time");
+ }
+
+ Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical)))
+ }
+}
+
+/// Structure with parameters for calculating test execution time.
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
+pub struct TestTimeOptions {
+ /// Denotes if the test critical execution time limit excess should be considered
+ /// a test failure.
+ pub error_on_excess: bool,
+ pub unit_threshold: TimeThreshold,
+ pub integration_threshold: TimeThreshold,
+ pub doctest_threshold: TimeThreshold,
+}
+
+impl TestTimeOptions {
+ pub fn new_from_env(error_on_excess: bool) -> Self {
+ let unit_threshold = TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME)
+ .unwrap_or_else(Self::default_unit);
+
+ let integration_threshold =
+ TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME)
+ .unwrap_or_else(Self::default_integration);
+
+ let doctest_threshold = TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME)
+ .unwrap_or_else(Self::default_doctest);
+
+ Self { error_on_excess, unit_threshold, integration_threshold, doctest_threshold }
+ }
+
+ pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool {
+ exec_time.0 >= self.warn_time(test)
+ }
+
+ pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool {
+ exec_time.0 >= self.critical_time(test)
+ }
+
+ fn warn_time(&self, test: &TestDesc) -> Duration {
+ match test.test_type {
+ TestType::UnitTest => self.unit_threshold.warn,
+ TestType::IntegrationTest => self.integration_threshold.warn,
+ TestType::DocTest => self.doctest_threshold.warn,
+ TestType::Unknown => time_constants::UNKNOWN_WARN,
+ }
+ }
+
+ fn critical_time(&self, test: &TestDesc) -> Duration {
+ match test.test_type {
+ TestType::UnitTest => self.unit_threshold.critical,
+ TestType::IntegrationTest => self.integration_threshold.critical,
+ TestType::DocTest => self.doctest_threshold.critical,
+ TestType::Unknown => time_constants::UNKNOWN_CRITICAL,
+ }
+ }
+
+ fn default_unit() -> TimeThreshold {
+ TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL)
+ }
+
+ fn default_integration() -> TimeThreshold {
+ TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL)
+ }
+
+ fn default_doctest() -> TimeThreshold {
+ TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL)
+ }
+}
diff --git a/library/test/src/types.rs b/library/test/src/types.rs
new file mode 100644
index 000000000..ffb1efe18
--- /dev/null
+++ b/library/test/src/types.rs
@@ -0,0 +1,167 @@
+//! Common types used by `libtest`.
+
+use std::borrow::Cow;
+use std::fmt;
+
+use super::bench::Bencher;
+use super::options;
+
+pub use NamePadding::*;
+pub use TestFn::*;
+pub use TestName::*;
+
+/// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html)
+/// conventions.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub enum TestType {
+ /// Unit-tests are expected to be in the `src` folder of the crate.
+ UnitTest,
+ /// Integration-style tests are expected to be in the `tests` folder of the crate.
+ IntegrationTest,
+ /// Doctests are created by the `librustdoc` manually, so it's a different type of test.
+ DocTest,
+ /// Tests for the sources that don't follow the project layout convention
+ /// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly).
+ Unknown,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+pub enum NamePadding {
+ PadNone,
+ PadOnRight,
+}
+
+// The name of a test. By convention this follows the rules for rust
+// paths; i.e., it should be a series of identifiers separated by double
+// colons. This way if some test runner wants to arrange the tests
+// hierarchically it may.
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub enum TestName {
+ StaticTestName(&'static str),
+ DynTestName(String),
+ AlignedTestName(Cow<'static, str>, NamePadding),
+}
+
+impl TestName {
+ pub fn as_slice(&self) -> &str {
+ match *self {
+ StaticTestName(s) => s,
+ DynTestName(ref s) => s,
+ AlignedTestName(ref s, _) => &*s,
+ }
+ }
+
+ pub fn padding(&self) -> NamePadding {
+ match self {
+ &AlignedTestName(_, p) => p,
+ _ => PadNone,
+ }
+ }
+
+ pub fn with_padding(&self, padding: NamePadding) -> TestName {
+ let name = match *self {
+ TestName::StaticTestName(name) => Cow::Borrowed(name),
+ TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
+ TestName::AlignedTestName(ref name, _) => name.clone(),
+ };
+
+ TestName::AlignedTestName(name, padding)
+ }
+}
+impl fmt::Display for TestName {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.as_slice(), f)
+ }
+}
+
+// A function that runs a test. If the function returns successfully,
+// the test succeeds; if the function panics then the test fails. We
+// may need to come up with a more clever definition of test in order
+// to support isolation of tests into threads.
+pub enum TestFn {
+ StaticTestFn(fn()),
+ StaticBenchFn(fn(&mut Bencher)),
+ DynTestFn(Box<dyn FnOnce() + Send>),
+ DynBenchFn(Box<dyn Fn(&mut Bencher) + Send>),
+}
+
+impl TestFn {
+ pub fn padding(&self) -> NamePadding {
+ match *self {
+ StaticTestFn(..) => PadNone,
+ StaticBenchFn(..) => PadOnRight,
+ DynTestFn(..) => PadNone,
+ DynBenchFn(..) => PadOnRight,
+ }
+ }
+}
+
+impl fmt::Debug for TestFn {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match *self {
+ StaticTestFn(..) => "StaticTestFn(..)",
+ StaticBenchFn(..) => "StaticBenchFn(..)",
+ DynTestFn(..) => "DynTestFn(..)",
+ DynBenchFn(..) => "DynBenchFn(..)",
+ })
+ }
+}
+
+// A unique integer associated with each test.
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+pub struct TestId(pub usize);
+
+// The definition of a single test. A test runner will run a list of
+// these.
+#[derive(Clone, Debug)]
+pub struct TestDesc {
+ pub name: TestName,
+ pub ignore: bool,
+ pub ignore_message: Option<&'static str>,
+ pub should_panic: options::ShouldPanic,
+ pub compile_fail: bool,
+ pub no_run: bool,
+ pub test_type: TestType,
+}
+
+impl TestDesc {
+ pub fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
+ let mut name = String::from(self.name.as_slice());
+ let fill = column_count.saturating_sub(name.len());
+ let pad = " ".repeat(fill);
+ match align {
+ PadNone => name,
+ PadOnRight => {
+ name.push_str(&pad);
+ name
+ }
+ }
+ }
+
+ /// Returns None for ignored test or that that are just run, otherwise give a description of the type of test.
+ /// Descriptions include "should panic", "compile fail" and "compile".
+ pub fn test_mode(&self) -> Option<&'static str> {
+ if self.ignore {
+ return None;
+ }
+ match self.should_panic {
+ options::ShouldPanic::Yes | options::ShouldPanic::YesWithMessage(_) => {
+ return Some("should panic");
+ }
+ options::ShouldPanic::No => {}
+ }
+ if self.compile_fail {
+ return Some("compile fail");
+ }
+ if self.no_run {
+ return Some("compile");
+ }
+ None
+ }
+}
+
+#[derive(Debug)]
+pub struct TestDescAndFn {
+ pub desc: TestDesc,
+ pub testfn: TestFn,
+}