diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:18:25 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:18:25 +0000 |
commit | 5363f350887b1e5b5dd21a86f88c8af9d7fea6da (patch) | |
tree | 35ca005eb6e0e9a1ba3bb5dbc033209ad445dc17 /library/test | |
parent | Adding debian version 1.66.0+dfsg1-1. (diff) | |
download | rustc-5363f350887b1e5b5dd21a86f88c8af9d7fea6da.tar.xz rustc-5363f350887b1e5b5dd21a86f88c8af9d7fea6da.zip |
Merging upstream version 1.67.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/test')
-rw-r--r-- | library/test/Cargo.toml | 2 | ||||
-rw-r--r-- | library/test/src/cli.rs | 5 | ||||
-rw-r--r-- | library/test/src/console.rs | 8 | ||||
-rw-r--r-- | library/test/src/event.rs | 2 | ||||
-rw-r--r-- | library/test/src/lib.rs | 168 | ||||
-rw-r--r-- | library/test/src/options.rs | 7 | ||||
-rw-r--r-- | library/test/src/tests.rs | 27 |
7 files changed, 134 insertions, 85 deletions
diff --git a/library/test/Cargo.toml b/library/test/Cargo.toml index 2da41484c..61b6f33bc 100644 --- a/library/test/Cargo.toml +++ b/library/test/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" crate-type = ["dylib", "rlib"] [dependencies] -cfg-if = { version = "0.1.8", features = ['rustc-dep-of-std'] } +cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] } getopts = { version = "0.2.21", features = ['rustc-dep-of-std'] } std = { path = "../std" } core = { path = "../core" } diff --git a/library/test/src/cli.rs b/library/test/src/cli.rs index 8be32183f..524658bce 100644 --- a/library/test/src/cli.rs +++ b/library/test/src/cli.rs @@ -26,6 +26,10 @@ pub struct TestOpts { pub test_threads: Option<usize>, pub skip: Vec<String>, pub time_options: Option<TestTimeOptions>, + /// Stop at first failing test. + /// May run a few more tests due to threading, but will + /// abort as soon as possible. + pub fail_fast: bool, pub options: Options, } @@ -296,6 +300,7 @@ fn parse_opts_impl(matches: getopts::Matches) -> OptRes { skip, time_options, options, + fail_fast: false, }; Ok(test_opts) diff --git a/library/test/src/console.rs b/library/test/src/console.rs index b1270c272..a3c39f71f 100644 --- a/library/test/src/console.rs +++ b/library/test/src/console.rs @@ -228,9 +228,9 @@ fn on_test_event( out: &mut dyn OutputFormatter, ) -> io::Result<()> { match (*event).clone() { - TestEvent::TeFiltered(ref filtered_tests, shuffle_seed) => { - st.total = filtered_tests.len(); - out.write_run_start(filtered_tests.len(), shuffle_seed)?; + TestEvent::TeFiltered(filtered_tests, shuffle_seed) => { + st.total = filtered_tests; + out.write_run_start(filtered_tests, shuffle_seed)?; } TestEvent::TeFilteredOut(filtered_out) => { st.filtered_out = filtered_out; @@ -293,7 +293,7 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?; st.exec_time = start_time.map(|t| TestSuiteExecTime(t.elapsed())); - assert!(st.current_test_count() == st.total); + assert!(opts.fail_fast || st.current_test_count() == st.total); out.write_run_finish(&st) } diff --git a/library/test/src/event.rs b/library/test/src/event.rs index 6ff1a615e..80281ebd2 100644 --- a/library/test/src/event.rs +++ b/library/test/src/event.rs @@ -28,7 +28,7 @@ impl CompletedTest { #[derive(Debug, Clone)] pub enum TestEvent { - TeFiltered(Vec<TestDesc>, Option<u64>), + TeFiltered(usize, Option<u64>), TeWait(TestDesc), TeResult(CompletedTest), TeTimeout(TestDesc), diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs index 141f16d17..256c9e8d1 100644 --- a/library/test/src/lib.rs +++ b/library/test/src/lib.rs @@ -40,7 +40,7 @@ pub mod test { cli::{parse_opts, TestOpts}, filter_tests, helpers::metrics::{Metric, MetricMap}, - options::{Concurrent, Options, RunIgnored, RunStrategy, ShouldPanic}, + options::{Options, RunIgnored, RunStrategy, ShouldPanic}, run_test, test_main, test_main_static, test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk}, time::{TestExecTime, TestTimeOptions}, @@ -85,7 +85,7 @@ use event::{CompletedTest, TestEvent}; use helpers::concurrency::get_concurrency; use helpers::exit_code::get_exit_code; use helpers::shuffle::{get_shuffle_seed, shuffle_tests}; -use options::{Concurrent, RunStrategy}; +use options::RunStrategy; use test_result::*; use time::TestExecTime; @@ -219,6 +219,38 @@ pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> { } } +struct FilteredTests { + tests: Vec<(TestId, TestDescAndFn)>, + benchs: Vec<(TestId, TestDescAndFn)>, + next_id: usize, +} + +impl FilteredTests { + fn add_bench(&mut self, desc: TestDesc, testfn: TestFn) { + let test = TestDescAndFn { desc, testfn }; + self.benchs.push((TestId(self.next_id), test)); + self.next_id += 1; + } + fn add_test(&mut self, desc: TestDesc, testfn: TestFn) { + let test = TestDescAndFn { desc, testfn }; + self.tests.push((TestId(self.next_id), test)); + self.next_id += 1; + } + fn add_bench_as_test( + &mut self, + desc: TestDesc, + benchfn: impl Fn(&mut Bencher) -> Result<(), String> + Send + 'static, + ) { + let testfn = DynTestFn(Box::new(move || { + bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b))) + })); + self.add_test(desc, testfn); + } + fn total_len(&self) -> usize { + self.tests.len() + self.benchs.len() + } +} + pub fn run_tests<F>( opts: &TestOpts, tests: Vec<TestDescAndFn>, @@ -235,6 +267,19 @@ where join_handle: Option<thread::JoinHandle<()>>, } + impl RunningTest { + fn join(self, completed_test: &mut CompletedTest) { + if let Some(join_handle) = self.join_handle { + if let Err(_) = join_handle.join() { + if let TrOk = completed_test.result { + completed_test.result = + TrFailedMsg("panicked after reporting success".to_string()); + } + } + } + } + } + // Use a deterministic hasher type TestMap = HashMap<TestId, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>; @@ -247,45 +292,51 @@ where let tests_len = tests.len(); - let mut filtered_tests = filter_tests(opts, tests); - if !opts.bench_benchmarks { - filtered_tests = convert_benchmarks_to_tests(filtered_tests); - } + let mut filtered = FilteredTests { tests: Vec::new(), benchs: Vec::new(), next_id: 0 }; - let filtered_tests = { - let mut filtered_tests = filtered_tests; - for test in filtered_tests.iter_mut() { - test.desc.name = test.desc.name.with_padding(test.testfn.padding()); - } + for test in filter_tests(opts, tests) { + let mut desc = test.desc; + desc.name = desc.name.with_padding(test.testfn.padding()); - filtered_tests - }; + match test.testfn { + DynBenchFn(benchfn) => { + if opts.bench_benchmarks { + filtered.add_bench(desc, DynBenchFn(benchfn)); + } else { + filtered.add_bench_as_test(desc, benchfn); + } + } + StaticBenchFn(benchfn) => { + if opts.bench_benchmarks { + filtered.add_bench(desc, StaticBenchFn(benchfn)); + } else { + filtered.add_bench_as_test(desc, benchfn); + } + } + testfn => { + filtered.add_test(desc, testfn); + } + }; + } - let filtered_out = tests_len - filtered_tests.len(); + let filtered_out = tests_len - filtered.total_len(); let event = TestEvent::TeFilteredOut(filtered_out); notify_about_test_event(event)?; - let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect(); - let shuffle_seed = get_shuffle_seed(opts); - let event = TestEvent::TeFiltered(filtered_descs, shuffle_seed); + let event = TestEvent::TeFiltered(filtered.total_len(), shuffle_seed); notify_about_test_event(event)?; - let (mut filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests - .into_iter() - .enumerate() - .map(|(i, e)| (TestId(i), e)) - .partition(|(_, e)| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_))); - let concurrency = opts.test_threads.unwrap_or_else(get_concurrency); + let mut remaining = filtered.tests; if let Some(shuffle_seed) = shuffle_seed { - shuffle_tests(shuffle_seed, &mut filtered_tests); + shuffle_tests(shuffle_seed, &mut remaining); } // Store the tests in a VecDeque so we can efficiently remove the first element to run the // tests in the order they were passed (unless shuffled). - let mut remaining = VecDeque::from(filtered_tests); + let mut remaining = VecDeque::from(remaining); let mut pending = 0; let (tx, rx) = channel::<CompletedTest>(); @@ -328,13 +379,22 @@ where let (id, test) = remaining.pop_front().unwrap(); let event = TestEvent::TeWait(test.desc.clone()); notify_about_test_event(event)?; - let join_handle = - run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone(), Concurrent::No); - assert!(join_handle.is_none()); - let completed_test = rx.recv().unwrap(); + let join_handle = run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone()); + // Wait for the test to complete. + let mut completed_test = rx.recv().unwrap(); + RunningTest { join_handle }.join(&mut completed_test); + + let fail_fast = match completed_test.result { + TrIgnored | TrOk | TrBench(_) => false, + TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast, + }; let event = TestEvent::TeResult(completed_test); notify_about_test_event(event)?; + + if fail_fast { + return Ok(()); + } } } else { while pending > 0 || !remaining.is_empty() { @@ -345,15 +405,8 @@ where let event = TestEvent::TeWait(desc.clone()); notify_about_test_event(event)?; //here no pad - let join_handle = run_test( - opts, - !opts.run_tests, - id, - test, - run_strategy, - tx.clone(), - Concurrent::Yes, - ); + let join_handle = + run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone()); running_tests.insert(id, RunningTest { join_handle }); timeout_queue.push_back(TimeoutEntry { id, desc, timeout }); pending += 1; @@ -385,28 +438,34 @@ where let mut completed_test = res.unwrap(); let running_test = running_tests.remove(&completed_test.id).unwrap(); - if let Some(join_handle) = running_test.join_handle { - if let Err(_) = join_handle.join() { - if let TrOk = completed_test.result { - completed_test.result = - TrFailedMsg("panicked after reporting success".to_string()); - } - } - } + running_test.join(&mut completed_test); + + let fail_fast = match completed_test.result { + TrIgnored | TrOk | TrBench(_) => false, + TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast, + }; let event = TestEvent::TeResult(completed_test); notify_about_test_event(event)?; pending -= 1; + + if fail_fast { + // Prevent remaining test threads from panicking + std::mem::forget(rx); + return Ok(()); + } } } if opts.bench_benchmarks { // All benchmarks run at the end, in serial. - for (id, b) in filtered_benchs { + for (id, b) in filtered.benchs { let event = TestEvent::TeWait(b.desc.clone()); notify_about_test_event(event)?; - run_test(opts, false, id, b, run_strategy, tx.clone(), Concurrent::No); - let completed_test = rx.recv().unwrap(); + let join_handle = run_test(opts, false, id, b, run_strategy, tx.clone()); + // Wait for the test to complete. + let mut completed_test = rx.recv().unwrap(); + RunningTest { join_handle }.join(&mut completed_test); let event = TestEvent::TeResult(completed_test); notify_about_test_event(event)?; @@ -432,7 +491,9 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA } // Skip tests that match any of the skip filters - filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf))); + if !opts.skip.is_empty() { + filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf))); + } // Excludes #[should_panic] tests if opts.exclude_should_panic { @@ -480,7 +541,6 @@ pub fn run_test( test: TestDescAndFn, strategy: RunStrategy, monitor_ch: Sender<CompletedTest>, - concurrency: Concurrent, ) -> Option<thread::JoinHandle<()>> { let TestDescAndFn { desc, testfn } = test; @@ -498,7 +558,6 @@ pub fn run_test( struct TestRunOpts { pub strategy: RunStrategy, pub nocapture: bool, - pub concurrency: Concurrent, pub time: Option<time::TestTimeOptions>, } @@ -509,7 +568,6 @@ pub fn run_test( testfn: Box<dyn FnOnce() -> Result<(), String> + Send>, opts: TestRunOpts, ) -> Option<thread::JoinHandle<()>> { - let concurrency = opts.concurrency; let name = desc.name.clone(); let runtest = move || match opts.strategy { @@ -536,7 +594,7 @@ pub fn run_test( // the test synchronously, regardless of the concurrency // level. let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_family = "wasm"); - if concurrency == Concurrent::Yes && supports_threads { + if supports_threads { let cfg = thread::Builder::new().name(name.as_slice().to_owned()); let mut runtest = Arc::new(Mutex::new(Some(runtest))); let runtest2 = runtest.clone(); @@ -557,7 +615,7 @@ pub fn run_test( } let test_run_opts = - TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options }; + TestRunOpts { strategy, nocapture: opts.nocapture, time: opts.time_options }; match testfn { DynBenchFn(benchfn) => { diff --git a/library/test/src/options.rs b/library/test/src/options.rs index baf36b5f1..75ec0b616 100644 --- a/library/test/src/options.rs +++ b/library/test/src/options.rs @@ -1,12 +1,5 @@ //! Enums denoting options for test execution. -/// Whether to execute tests concurrently or not -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Concurrent { - Yes, - No, -} - /// Number of times to run a benchmarked function #[derive(Clone, PartialEq, Eq)] pub enum BenchMode { diff --git a/library/test/src/tests.rs b/library/test/src/tests.rs index b54be64ef..3a0260f86 100644 --- a/library/test/src/tests.rs +++ b/library/test/src/tests.rs @@ -51,6 +51,7 @@ impl TestOpts { skip: vec![], time_options: None, options: Options::new(), + fail_fast: false, } } } @@ -102,7 +103,7 @@ pub fn do_not_run_ignored_tests() { testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); - run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No); + run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx); let result = rx.recv().unwrap().result; assert_ne!(result, TrOk); } @@ -125,7 +126,7 @@ pub fn ignored_tests_result_in_ignored() { testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); - run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No); + run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx); let result = rx.recv().unwrap().result; assert_eq!(result, TrIgnored); } @@ -150,7 +151,7 @@ fn test_should_panic() { testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); - run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No); + run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx); let result = rx.recv().unwrap().result; assert_eq!(result, TrOk); } @@ -175,7 +176,7 @@ fn test_should_panic_good_message() { testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); - run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No); + run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx); let result = rx.recv().unwrap().result; assert_eq!(result, TrOk); } @@ -205,7 +206,7 @@ fn test_should_panic_bad_message() { testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); - run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No); + run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx); let result = rx.recv().unwrap().result; assert_eq!(result, TrFailedMsg(failed_msg.to_string())); } @@ -239,7 +240,7 @@ fn test_should_panic_non_string_message_type() { testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); - run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No); + run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx); let result = rx.recv().unwrap().result; assert_eq!(result, TrFailedMsg(failed_msg)); } @@ -267,15 +268,7 @@ fn test_should_panic_but_succeeds() { testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); - run_test( - &TestOpts::new(), - false, - TestId(0), - desc, - RunStrategy::InProcess, - tx, - Concurrent::No, - ); + run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx); let result = rx.recv().unwrap().result; assert_eq!( result, @@ -306,7 +299,7 @@ fn report_time_test_template(report_time: bool) -> Option<TestExecTime> { let test_opts = TestOpts { time_options, ..TestOpts::new() }; let (tx, rx) = channel(); - run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No); + run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx); let exec_time = rx.recv().unwrap().exec_time; exec_time } @@ -345,7 +338,7 @@ fn time_test_failure_template(test_type: TestType) -> TestResult { let test_opts = TestOpts { time_options: Some(time_options), ..TestOpts::new() }; let (tx, rx) = channel(); - run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No); + run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx); let result = rx.recv().unwrap().result; result |