summaryrefslogtreecommitdiffstats
path: root/vendor/criterion/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:59:35 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:59:35 +0000
commitd1b2d29528b7794b41e66fc2136e395a02f8529b (patch)
treea4a17504b260206dec3cf55b2dca82929a348ac2 /vendor/criterion/src
parentReleasing progress-linux version 1.72.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-d1b2d29528b7794b41e66fc2136e395a02f8529b.tar.xz
rustc-d1b2d29528b7794b41e66fc2136e395a02f8529b.zip
Merging upstream version 1.73.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/criterion/src')
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/analysis/compare.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/analysis/mod.rs15
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/async_executor.rs2
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/bencher.rs9
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/benchmark.rs558
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/benchmark_group.rs19
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/connection.rs38
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/csv_report.rs1
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/error.rs8
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/estimate.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/format.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/fs.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/html/benchmark_report.html.tt0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/html/index.html.tt0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/html/mod.rs18
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/html/report_link.html.tt0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/html/summary_report.html.tt0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/kde.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/lib.rs884
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/macros.rs3
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/macros_private.rs2
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/measurement.rs31
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/distributions.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/iteration_times.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/mod.rs2
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/pdf.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/regression.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/summary.rs15
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/t_test.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/mod.rs2
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/distributions.rs12
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/iteration_times.rs4
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/mod.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/pdf.rs22
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/regression.rs10
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/summary.rs10
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/t_test.rs4
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/profiler.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/report.rs278
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/routine.rs59
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/bivariate/bootstrap.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/bivariate/mod.rs53
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/bivariate/regression.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/bivariate/resamples.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/float.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/mod.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/rand_util.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/test.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/tuple.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/bootstrap.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/kde/kernel.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/kde/mod.rs10
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/mixed.rs61
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/mod.rs55
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/outliers/mod.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/outliers/tukey.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/percentiles.rs22
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/resamples.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/sample.rs57
59 files changed, 884 insertions, 1380 deletions
diff --git a/vendor/criterion/src/analysis/compare.rs b/vendor/criterion/src/analysis/compare.rs
index a49407d85..a49407d85 100755..100644
--- a/vendor/criterion/src/analysis/compare.rs
+++ b/vendor/criterion/src/analysis/compare.rs
diff --git a/vendor/criterion/src/analysis/mod.rs b/vendor/criterion/src/analysis/mod.rs
index 5d84bef16..1851d7186 100755..100644
--- a/vendor/criterion/src/analysis/mod.rs
+++ b/vendor/criterion/src/analysis/mod.rs
@@ -26,7 +26,7 @@ macro_rules! elapsed {
info!(
"{} took {}",
$msg,
- crate::format::time(crate::DurationExt::to_nanos(elapsed) as f64)
+ crate::format::time(elapsed.as_nanos() as f64)
);
out
@@ -47,7 +47,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
) {
criterion.report.benchmark_start(id, report_context);
- if let Baseline::Compare = criterion.baseline {
+ if let Baseline::CompareStrict = criterion.baseline {
if !base_dir_exists(
id,
&criterion.baseline_directory,
@@ -128,7 +128,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
.collect::<Vec<f64>>();
let avg_times = Sample::new(&avg_times);
- if criterion.connection.is_none() && criterion.load_baseline.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut new_dir = criterion.output_directory.clone();
new_dir.push(id.as_directory_name());
@@ -139,7 +139,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
let data = Data::new(&iters, &times);
let labeled_sample = tukey::classify(avg_times);
- if criterion.connection.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut tukey_file = criterion.output_directory.to_owned();
tukey_file.push(id.as_directory_name());
@@ -156,7 +156,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
distributions.slope = Some(distribution);
}
- if criterion.connection.is_none() && criterion.load_baseline.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut sample_file = criterion.output_directory.clone();
sample_file.push(id.as_directory_name());
@@ -222,7 +222,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
};
let measurement_data = crate::report::MeasurementData {
- data: Data::new(&*iters, &*times),
+ data: Data::new(&iters, &times),
avg_times: labeled_sample,
absolute_estimates: estimates,
distributions,
@@ -237,7 +237,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
criterion.measurement.formatter(),
);
- if criterion.connection.is_none() && criterion.load_baseline.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut benchmark_file = criterion.output_directory.clone();
benchmark_file.push(id.as_directory_name());
@@ -365,5 +365,6 @@ fn copy_new_dir_to_base(id: &str, baseline: &str, output_directory: &Path) {
&new_dir.join("benchmark.json"),
&base_dir.join("benchmark.json")
));
+ #[cfg(feature = "csv_output")]
try_else_return!(fs::cp(&new_dir.join("raw.csv"), &base_dir.join("raw.csv")));
}
diff --git a/vendor/criterion/src/async_executor.rs b/vendor/criterion/src/async_executor.rs
index 127af2768..a7337fb1f 100755..100644
--- a/vendor/criterion/src/async_executor.rs
+++ b/vendor/criterion/src/async_executor.rs
@@ -32,7 +32,7 @@ impl AsyncExecutor for FuturesExecutor {
}
}
-/// Runs futures on the 'soml' crate's global executor
+/// Runs futures on the 'smol' crate's global executor
#[cfg(feature = "async_smol")]
pub struct SmolExecutor;
#[cfg(feature = "async_smol")]
diff --git a/vendor/criterion/src/bencher.rs b/vendor/criterion/src/bencher.rs
index c5e90af5c..016aa2841 100755..100644
--- a/vendor/criterion/src/bencher.rs
+++ b/vendor/criterion/src/bencher.rs
@@ -189,15 +189,6 @@ impl<'a, M: Measurement> Bencher<'a, M> {
self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput);
}
- #[doc(hidden)]
- pub fn iter_with_large_setup<I, O, S, R>(&mut self, setup: S, routine: R)
- where
- S: FnMut() -> I,
- R: FnMut(I) -> O,
- {
- self.iter_batched(setup, routine, BatchSize::NumBatches(1));
- }
-
/// Times a `routine` that requires some input by generating a batch of input, then timing the
/// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for
/// details on choosing the batch size. Use this when the routine must consume its input.
diff --git a/vendor/criterion/src/benchmark.rs b/vendor/criterion/src/benchmark.rs
index fccb791f2..3a1cb0012 100755..100644
--- a/vendor/criterion/src/benchmark.rs
+++ b/vendor/criterion/src/benchmark.rs
@@ -1,14 +1,4 @@
-#![allow(deprecated)]
-
-use crate::analysis;
-use crate::connection::OutgoingMessage;
-use crate::measurement::{Measurement, WallTime};
-use crate::report::{BenchmarkId, Report, ReportContext};
-use crate::routine::{Function, Routine};
-use crate::{Bencher, Criterion, DurationExt, Mode, PlotConfiguration, SamplingMode, Throughput};
-use std::cell::RefCell;
-use std::fmt::Debug;
-use std::marker::Sized;
+use crate::{PlotConfiguration, SamplingMode};
use std::time::Duration;
// TODO: Move the benchmark config stuff to a separate module for easier use.
@@ -23,6 +13,7 @@ pub struct BenchmarkConfig {
pub significance_level: f64,
pub warm_up_time: Duration,
pub sampling_mode: SamplingMode,
+ pub quick_mode: bool,
}
/// Struct representing a partially-complete per-benchmark configuration.
@@ -36,6 +27,7 @@ pub(crate) struct PartialBenchmarkConfig {
pub(crate) significance_level: Option<f64>,
pub(crate) warm_up_time: Option<Duration>,
pub(crate) sampling_mode: Option<SamplingMode>,
+ pub(crate) quick_mode: Option<bool>,
pub(crate) plot_config: PlotConfiguration,
}
@@ -52,549 +44,7 @@ impl PartialBenchmarkConfig {
.unwrap_or(defaults.significance_level),
warm_up_time: self.warm_up_time.unwrap_or(defaults.warm_up_time),
sampling_mode: self.sampling_mode.unwrap_or(defaults.sampling_mode),
- }
- }
-}
-
-pub(crate) struct NamedRoutine<T, M: Measurement = WallTime> {
- pub id: String,
- pub(crate) f: Box<RefCell<dyn Routine<M, T>>>,
-}
-
-/// Structure representing a benchmark (or group of benchmarks)
-/// which take one parameter.
-#[doc(hidden)]
-#[allow(clippy::type_complexity)]
-#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
-pub struct ParameterizedBenchmark<T: Debug, M: Measurement = WallTime> {
- config: PartialBenchmarkConfig,
- values: Vec<T>,
- routines: Vec<NamedRoutine<T, M>>,
- throughput: Option<Box<dyn Fn(&T) -> Throughput>>,
-}
-
-/// Structure representing a benchmark (or group of benchmarks)
-/// which takes no parameters.
-#[doc(hidden)]
-#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
-pub struct Benchmark<M: Measurement = WallTime> {
- config: PartialBenchmarkConfig,
- routines: Vec<NamedRoutine<(), M>>,
- throughput: Option<Throughput>,
-}
-
-/// Common trait for `Benchmark` and `ParameterizedBenchmark`. Not intended to be
-/// used outside of Criterion.rs.
-#[doc(hidden)]
-pub trait BenchmarkDefinition<M: Measurement = WallTime>: Sized {
- #[doc(hidden)]
- fn run(self, group_id: &str, c: &mut Criterion<M>);
-}
-
-macro_rules! benchmark_config {
- ($type:tt) => {
- /// Changes the size of the sample for this benchmark
- ///
- /// A bigger sample should yield more accurate results if paired with a sufficiently large
- /// measurement time.
- ///
- /// Sample size must be at least 10.
- ///
- /// # Panics
- ///
- /// Panics if n < 10.
- pub fn sample_size(mut self, n: usize) -> Self {
- assert!(n >= 10);
-
- self.config.sample_size = Some(n);
- self
- }
-
- /// Changes the warm up time for this benchmark
- ///
- /// # Panics
- ///
- /// Panics if the input duration is zero
- pub fn warm_up_time(mut self, dur: Duration) -> Self {
- assert!(dur.to_nanos() > 0);
-
- self.config.warm_up_time = Some(dur);
- self
- }
-
- /// Changes the target measurement time for this benchmark. Criterion will attempt
- /// to spend approximately this amount of time measuring the benchmark.
- /// With a longer time, the measurement will become more resilient to transitory peak loads
- /// caused by external programs.
- ///
- /// # Panics
- ///
- /// Panics if the input duration in zero
- pub fn measurement_time(mut self, dur: Duration) -> Self {
- assert!(dur.to_nanos() > 0);
-
- self.config.measurement_time = Some(dur);
- self
- }
-
- /// Changes the number of resamples for this benchmark
- ///
- /// Number of resamples to use for the
- /// [bootstrap](http://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Case_resampling)
- ///
- /// A larger number of resamples reduces the random sampling errors, which are inherent to the
- /// bootstrap method, but also increases the analysis time.
- ///
- /// # Panics
- ///
- /// Panics if the number of resamples is set to zero
- pub fn nresamples(mut self, n: usize) -> Self {
- assert!(n > 0);
- if n <= 1000 {
- println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
- }
-
- self.config.nresamples = Some(n);
- self
- }
-
- /// Changes the default noise threshold for this benchmark. The noise threshold
- /// is used to filter out small changes in performance, even if they are statistically
- /// significant. Sometimes benchmarking the same code twice will result in small but
- /// statistically significant differences solely because of noise. This provides a way to filter
- /// out some of these false positives at the cost of making it harder to detect small changes
- /// to the true performance of the benchmark.
- ///
- /// The default is 0.01, meaning that changes smaller than 1% will be ignored.
- ///
- /// # Panics
- ///
- /// Panics if the threshold is set to a negative value
- pub fn noise_threshold(mut self, threshold: f64) -> Self {
- assert!(threshold >= 0.0);
-
- self.config.noise_threshold = Some(threshold);
- self
- }
-
- /// Changes the default confidence level for this benchmark. The confidence
- /// level is the desired probability that the true runtime lies within the estimated
- /// [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval). The default is
- /// 0.95, meaning that the confidence interval should capture the true value 95% of the time.
- ///
- /// # Panics
- ///
- /// Panics if the confidence level is set to a value outside the `(0, 1)` range
- pub fn confidence_level(mut self, cl: f64) -> Self {
- assert!(cl > 0.0 && cl < 1.0);
- if cl < 0.5 {
- println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
- }
-
- self.config.confidence_level = Some(cl);
- self
- }
-
- /// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance)
- /// for this benchmark. This is used to perform a
- /// [hypothesis test](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) to see if
- /// the measurements from this run are different from the measured performance of the last run.
- /// The significance level is the desired probability that two measurements of identical code
- /// will be considered 'different' due to noise in the measurements. The default value is 0.05,
- /// meaning that approximately 5% of identical benchmarks will register as different due to
- /// noise.
- ///
- /// This presents a trade-off. By setting the significance level closer to 0.0, you can increase
- /// the statistical robustness against noise, but it also weakens Criterion.rs' ability to
- /// detect small but real changes in the performance. By setting the significance level
- /// closer to 1.0, Criterion.rs will be more able to detect small true changes, but will also
- /// report more spurious differences.
- ///
- /// See also the noise threshold setting.
- ///
- /// # Panics
- ///
- /// Panics if the significance level is set to a value outside the `(0, 1)` range
- pub fn significance_level(mut self, sl: f64) -> Self {
- assert!(sl > 0.0 && sl < 1.0);
-
- self.config.significance_level = Some(sl);
- self
- }
-
- /// Changes the plot configuration for this benchmark.
- pub fn plot_config(mut self, new_config: PlotConfiguration) -> Self {
- self.config.plot_config = new_config;
- self
- }
-
- /// Changes the sampling mode for this benchmark.
- pub fn sampling_mode(mut self, new_mode: SamplingMode) -> Self {
- self.config.sampling_mode = Some(new_mode);
- self
- }
- };
-}
-
-impl<M> Benchmark<M>
-where
- M: Measurement + 'static,
-{
- benchmark_config!(Benchmark);
-
- /// Create a new benchmark group and adds the given function to it.
- ///
- /// # Example
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use criterion::*;
- ///
- /// fn bench(c: &mut Criterion) {
- /// // One-time setup goes here
- /// c.bench(
- /// "my_group",
- /// Benchmark::new("my_function", |b| b.iter(|| {
- /// // Code to benchmark goes here
- /// })),
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- pub fn new<S, F>(id: S, f: F) -> Benchmark<M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>) + 'static,
- {
- Benchmark {
- config: PartialBenchmarkConfig::default(),
- routines: vec![],
- throughput: None,
- }
- .with_function(id, f)
- }
-
- /// Add a function to the benchmark group.
- pub fn with_function<S, F>(mut self, id: S, mut f: F) -> Benchmark<M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>) + 'static,
- {
- let routine = NamedRoutine {
- id: id.into(),
- f: Box::new(RefCell::new(Function::new(move |b, _| f(b)))),
- };
- self.routines.push(routine);
- self
- }
-
- /// Set the input size for this benchmark group. Used for reporting the
- /// throughput.
- pub fn throughput(mut self, throughput: Throughput) -> Benchmark<M> {
- self.throughput = Some(throughput);
- self
- }
-}
-
-impl<M: Measurement> BenchmarkDefinition<M> for Benchmark<M> {
- fn run(self, group_id: &str, c: &mut Criterion<M>) {
- let report_context = ReportContext {
- output_directory: c.output_directory.clone(),
- plot_config: self.config.plot_config.clone(),
- };
-
- let config = self.config.to_complete(&c.config);
- let num_routines = self.routines.len();
-
- let mut all_ids = vec![];
- let mut any_matched = false;
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: group_id })
- .unwrap();
- }
-
- for routine in self.routines {
- let function_id = if num_routines == 1 && group_id == routine.id {
- None
- } else {
- Some(routine.id)
- };
-
- let mut id = BenchmarkId::new(
- group_id.to_owned(),
- function_id,
- None,
- self.throughput.clone(),
- );
-
- id.ensure_directory_name_unique(&c.all_directories);
- c.all_directories.insert(id.as_directory_name().to_owned());
- id.ensure_title_unique(&c.all_titles);
- c.all_titles.insert(id.as_title().to_owned());
-
- let do_run = c.filter_matches(id.id());
- any_matched |= do_run;
-
- execute_benchmark(
- do_run,
- &id,
- c,
- &config,
- &mut *routine.f.borrow_mut(),
- &report_context,
- &(),
- self.throughput.clone(),
- );
-
- all_ids.push(id);
- }
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::FinishedBenchmarkGroup { group: group_id })
- .unwrap();
- conn.serve_value_formatter(c.measurement.formatter())
- .unwrap();
- }
-
- if all_ids.len() > 1 && any_matched && c.mode.is_benchmark() {
- c.report
- .summarize(&report_context, &all_ids, c.measurement.formatter());
- }
- if any_matched {
- c.report.group_separator();
- }
- }
-}
-
-impl<T, M> ParameterizedBenchmark<T, M>
-where
- T: Debug + 'static,
- M: Measurement + 'static,
-{
- benchmark_config!(ParameterizedBenchmark);
-
- pub(crate) fn with_functions(
- functions: Vec<NamedRoutine<T, M>>,
- parameters: Vec<T>,
- ) -> ParameterizedBenchmark<T, M> {
- ParameterizedBenchmark {
- config: PartialBenchmarkConfig::default(),
- values: parameters,
- routines: functions,
- throughput: None,
- }
- }
-
- /// Create a new parameterized benchmark group and adds the given function
- /// to it.
- /// The function under test must follow the setup - bench - teardown pattern:
- ///
- /// # Example
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use criterion::*;
- ///
- /// fn bench(c: &mut Criterion) {
- /// let parameters = vec![1u64, 2u64, 3u64];
- ///
- /// // One-time setup goes here
- /// c.bench(
- /// "my_group",
- /// ParameterizedBenchmark::new(
- /// "my_function",
- /// |b, param| b.iter(|| {
- /// // Code to benchmark using param goes here
- /// }),
- /// parameters
- /// )
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- pub fn new<S, F, I>(id: S, f: F, parameters: I) -> ParameterizedBenchmark<T, M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>, &T) + 'static,
- I: IntoIterator<Item = T>,
- {
- ParameterizedBenchmark {
- config: PartialBenchmarkConfig::default(),
- values: parameters.into_iter().collect(),
- routines: vec![],
- throughput: None,
- }
- .with_function(id, f)
- }
-
- /// Add a function to the benchmark group.
- pub fn with_function<S, F>(mut self, id: S, f: F) -> ParameterizedBenchmark<T, M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>, &T) + 'static,
- {
- let routine = NamedRoutine {
- id: id.into(),
- f: Box::new(RefCell::new(Function::new(f))),
- };
- self.routines.push(routine);
- self
- }
-
- /// Use the given function to calculate the input size for a given input.
- pub fn throughput<F>(mut self, throughput: F) -> ParameterizedBenchmark<T, M>
- where
- F: Fn(&T) -> Throughput + 'static,
- {
- self.throughput = Some(Box::new(throughput));
- self
- }
-}
-impl<T, M> BenchmarkDefinition<M> for ParameterizedBenchmark<T, M>
-where
- T: Debug + 'static,
- M: Measurement + 'static,
-{
- fn run(self, group_id: &str, c: &mut Criterion<M>) {
- let report_context = ReportContext {
- output_directory: c.output_directory.clone(),
- plot_config: self.config.plot_config.clone(),
- };
-
- let config = self.config.to_complete(&c.config);
- let num_parameters = self.values.len();
- let num_routines = self.routines.len();
-
- let mut all_ids = vec![];
- let mut any_matched = false;
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: group_id })
- .unwrap();
- }
-
- for routine in self.routines {
- for value in &self.values {
- let function_id = if num_routines == 1 && group_id == routine.id {
- None
- } else {
- Some(routine.id.clone())
- };
-
- let value_str = if num_parameters == 1 {
- None
- } else {
- Some(format!("{:?}", value))
- };
-
- let throughput = self.throughput.as_ref().map(|func| func(value));
- let mut id = BenchmarkId::new(
- group_id.to_owned(),
- function_id,
- value_str,
- throughput.clone(),
- );
-
- id.ensure_directory_name_unique(&c.all_directories);
- c.all_directories.insert(id.as_directory_name().to_owned());
- id.ensure_title_unique(&c.all_titles);
- c.all_titles.insert(id.as_title().to_owned());
-
- let do_run = c.filter_matches(id.id());
- any_matched |= do_run;
-
- execute_benchmark(
- do_run,
- &id,
- c,
- &config,
- &mut *routine.f.borrow_mut(),
- &report_context,
- value,
- throughput,
- );
-
- all_ids.push(id);
- }
- }
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::FinishedBenchmarkGroup { group: group_id })
- .unwrap();
- conn.serve_value_formatter(c.measurement.formatter())
- .unwrap();
- }
-
- if all_ids.len() > 1 && any_matched && c.mode.is_benchmark() {
- c.report
- .summarize(&report_context, &all_ids, c.measurement.formatter());
- }
- if any_matched {
- c.report.group_separator();
- }
- }
-}
-
-#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
-fn execute_benchmark<T, M>(
- do_run: bool,
- id: &BenchmarkId,
- c: &Criterion<M>,
- config: &BenchmarkConfig,
- routine: &mut dyn Routine<M, T>,
- report_context: &ReportContext,
- parameter: &T,
- throughput: Option<Throughput>,
-) where
- T: Debug,
- M: Measurement,
-{
- match &c.mode {
- Mode::Benchmark => {
- if let Some(conn) = &c.connection {
- if do_run {
- conn.send(&OutgoingMessage::BeginningBenchmark { id: id.into() })
- .unwrap();
- } else {
- conn.send(&OutgoingMessage::SkippingBenchmark { id: id.into() })
- .unwrap();
- }
- }
-
- if do_run {
- analysis::common(
- id,
- routine,
- config,
- c,
- report_context,
- parameter,
- throughput,
- );
- }
- }
- Mode::List => {
- if do_run {
- println!("{}: bench", id);
- }
- }
- Mode::Test => {
- if do_run {
- // In test mode, run the benchmark exactly once, then exit.
- c.report.test_start(id, report_context);
- routine.test(&c.measurement, parameter);
- c.report.test_pass(id, report_context);
- }
- }
- &Mode::Profile(duration) => {
- if do_run {
- routine.profile(&c.measurement, id, c, report_context, duration, parameter);
- }
+ quick_mode: self.quick_mode.unwrap_or(defaults.quick_mode),
}
}
}
diff --git a/vendor/criterion/src/benchmark_group.rs b/vendor/criterion/src/benchmark_group.rs
index 9ed88ef19..687fb2f21 100755..100644
--- a/vendor/criterion/src/benchmark_group.rs
+++ b/vendor/criterion/src/benchmark_group.rs
@@ -6,7 +6,7 @@ use crate::report::BenchmarkId as InternalBenchmarkId;
use crate::report::Report;
use crate::report::ReportContext;
use crate::routine::{Function, Routine};
-use crate::{Bencher, Criterion, DurationExt, Mode, PlotConfiguration, SamplingMode, Throughput};
+use crate::{Bencher, Criterion, Mode, PlotConfiguration, SamplingMode, Throughput};
use std::time::Duration;
/// Structure used to group together a set of related benchmarks, along with custom configuration
@@ -107,7 +107,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
///
/// Panics if the input duration is zero
pub fn warm_up_time(&mut self, dur: Duration) -> &mut Self {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.partial_config.warm_up_time = Some(dur);
self
@@ -125,7 +125,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
///
/// Panics if the input duration is zero
pub fn measurement_time(&mut self, dur: Duration) -> &mut Self {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.partial_config.measurement_time = Some(dur);
self
@@ -145,7 +145,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
pub fn nresamples(&mut self, n: usize) -> &mut Self {
assert!(n > 0);
if n <= 1000 {
- println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
+ eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.partial_config.nresamples = Some(n);
@@ -182,7 +182,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
pub fn confidence_level(&mut self, cl: f64) -> &mut Self {
assert!(cl > 0.0 && cl < 1.0);
if cl < 0.5 {
- println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
+ eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
}
self.partial_config.confidence_level = Some(cl);
@@ -290,7 +290,8 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
assert!(
!self.all_ids.contains(&id),
- "Benchmark IDs must be unique within a group."
+ "Benchmark IDs must be unique within a group. Encountered duplicated benchmark ID {}",
+ &id
);
id.ensure_directory_name_unique(&self.criterion.all_directories);
@@ -327,9 +328,9 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
);
}
}
- Mode::List => {
+ Mode::List(_) => {
if do_run {
- println!("{}: bench", id);
+ println!("{}: benchmark", id);
}
}
Mode::Test => {
@@ -391,7 +392,7 @@ impl<'a, M: Measurement> Drop for BenchmarkGroup<'a, M> {
self.criterion.measurement.formatter(),
);
}
- if self.any_matched {
+ if self.any_matched && !self.criterion.mode.is_terse() {
self.criterion.report.group_separator();
}
}
diff --git a/vendor/criterion/src/connection.rs b/vendor/criterion/src/connection.rs
index 53ad16da1..53706d608 100755..100644
--- a/vendor/criterion/src/connection.rs
+++ b/vendor/criterion/src/connection.rs
@@ -8,28 +8,39 @@ use std::net::TcpStream;
#[derive(Debug)]
pub enum MessageError {
- SerializationError(serde_cbor::Error),
- IoError(std::io::Error),
+ Deserialization(ciborium::de::Error<std::io::Error>),
+ Serialization(ciborium::ser::Error<std::io::Error>),
+ Io(std::io::Error),
}
-impl From<serde_cbor::Error> for MessageError {
- fn from(other: serde_cbor::Error) -> Self {
- MessageError::SerializationError(other)
+impl From<ciborium::de::Error<std::io::Error>> for MessageError {
+ fn from(other: ciborium::de::Error<std::io::Error>) -> Self {
+ MessageError::Deserialization(other)
+ }
+}
+impl From<ciborium::ser::Error<std::io::Error>> for MessageError {
+ fn from(other: ciborium::ser::Error<std::io::Error>) -> Self {
+ MessageError::Serialization(other)
}
}
impl From<std::io::Error> for MessageError {
fn from(other: std::io::Error) -> Self {
- MessageError::IoError(other)
+ MessageError::Io(other)
}
}
impl std::fmt::Display for MessageError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
- MessageError::SerializationError(error) => write!(
+ MessageError::Deserialization(error) => write!(
+ f,
+ "Failed to deserialize message to Criterion.rs benchmark:\n{}",
+ error
+ ),
+ MessageError::Serialization(error) => write!(
f,
- "Failed to serialize or deserialize message to Criterion.rs benchmark:\n{}",
+ "Failed to serialize message to Criterion.rs benchmark:\n{}",
error
),
- MessageError::IoError(error) => write!(
+ MessageError::Io(error) => write!(
f,
"Failed to read or write message to Criterion.rs benchmark:\n{}",
error
@@ -40,8 +51,9 @@ impl std::fmt::Display for MessageError {
impl std::error::Error for MessageError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
- MessageError::SerializationError(err) => Some(err),
- MessageError::IoError(err) => Some(err),
+ MessageError::Deserialization(err) => Some(err),
+ MessageError::Serialization(err) => Some(err),
+ MessageError::Io(err) => Some(err),
}
}
}
@@ -112,13 +124,13 @@ impl InnerConnection {
let length = u32::from_be_bytes(length_buf);
self.receive_buffer.resize(length as usize, 0u8);
self.socket.read_exact(&mut self.receive_buffer)?;
- let value = serde_cbor::from_slice(&self.receive_buffer)?;
+ let value = ciborium::de::from_reader(&self.receive_buffer[..])?;
Ok(value)
}
pub fn send(&mut self, message: &OutgoingMessage) -> Result<(), MessageError> {
self.send_buffer.truncate(0);
- serde_cbor::to_writer(&mut self.send_buffer, message)?;
+ ciborium::ser::into_writer(message, &mut self.send_buffer)?;
let size = u32::try_from(self.send_buffer.len()).unwrap();
let length_buf = size.to_be_bytes();
self.socket.write_all(&length_buf)?;
diff --git a/vendor/criterion/src/csv_report.rs b/vendor/criterion/src/csv_report.rs
index 3b744df08..f8e2a05f7 100755..100644
--- a/vendor/criterion/src/csv_report.rs
+++ b/vendor/criterion/src/csv_report.rs
@@ -35,6 +35,7 @@ impl<W: Write> CsvReportWriter<W> {
let value = id.value_str.as_deref();
let (throughput_num, throughput_type) = match id.throughput {
Some(Throughput::Bytes(bytes)) => (Some(format!("{}", bytes)), Some("bytes")),
+ Some(Throughput::BytesDecimal(bytes)) => (Some(format!("{}", bytes)), Some("bytes")),
Some(Throughput::Elements(elems)) => (Some(format!("{}", elems)), Some("elements")),
None => (None, None),
};
diff --git a/vendor/criterion/src/error.rs b/vendor/criterion/src/error.rs
index 9b7eb17b1..459a716f5 100755..100644
--- a/vendor/criterion/src/error.rs
+++ b/vendor/criterion/src/error.rs
@@ -1,3 +1,4 @@
+#[cfg(feature = "csv_output")]
use csv::Error as CsvError;
use serde_json::Error as SerdeError;
use std::error::Error as StdError;
@@ -21,6 +22,8 @@ pub enum Error {
path: PathBuf,
inner: SerdeError,
},
+ #[cfg(feature = "csv_output")]
+ /// This API requires the following crate features to be activated: csv_output
CsvError(CsvError),
}
impl fmt::Display for Error {
@@ -37,6 +40,7 @@ impl fmt::Display for Error {
"Failed to read or write file {:?} due to serialization error: {}",
path, inner
),
+ #[cfg(feature = "csv_output")]
Error::CsvError(inner) => write!(f, "CSV error: {}", inner),
}
}
@@ -47,6 +51,7 @@ impl StdError for Error {
Error::AccessError { .. } => "AccessError",
Error::CopyError { .. } => "CopyError",
Error::SerdeError { .. } => "SerdeError",
+ #[cfg(feature = "csv_output")]
Error::CsvError(_) => "CsvError",
}
}
@@ -56,10 +61,13 @@ impl StdError for Error {
Error::AccessError { inner, .. } => Some(inner),
Error::CopyError { inner, .. } => Some(inner),
Error::SerdeError { inner, .. } => Some(inner),
+ #[cfg(feature = "csv_output")]
Error::CsvError(inner) => Some(inner),
}
}
}
+
+#[cfg(feature = "csv_output")]
impl From<CsvError> for Error {
fn from(other: CsvError) -> Error {
Error::CsvError(other)
diff --git a/vendor/criterion/src/estimate.rs b/vendor/criterion/src/estimate.rs
index 8a79d27a8..8a79d27a8 100755..100644
--- a/vendor/criterion/src/estimate.rs
+++ b/vendor/criterion/src/estimate.rs
diff --git a/vendor/criterion/src/format.rs b/vendor/criterion/src/format.rs
index 53c4a4dbd..53c4a4dbd 100755..100644
--- a/vendor/criterion/src/format.rs
+++ b/vendor/criterion/src/format.rs
diff --git a/vendor/criterion/src/fs.rs b/vendor/criterion/src/fs.rs
index f47508be7..f47508be7 100755..100644
--- a/vendor/criterion/src/fs.rs
+++ b/vendor/criterion/src/fs.rs
diff --git a/vendor/criterion/src/html/benchmark_report.html.tt b/vendor/criterion/src/html/benchmark_report.html.tt
index babd0032e..babd0032e 100755..100644
--- a/vendor/criterion/src/html/benchmark_report.html.tt
+++ b/vendor/criterion/src/html/benchmark_report.html.tt
diff --git a/vendor/criterion/src/html/index.html.tt b/vendor/criterion/src/html/index.html.tt
index 7c307ed62..7c307ed62 100755..100644
--- a/vendor/criterion/src/html/index.html.tt
+++ b/vendor/criterion/src/html/index.html.tt
diff --git a/vendor/criterion/src/html/mod.rs b/vendor/criterion/src/html/mod.rs
index d36065623..eb31a8168 100755..100644
--- a/vendor/criterion/src/html/mod.rs
+++ b/vendor/criterion/src/html/mod.rs
@@ -438,7 +438,7 @@ impl Report for Html {
// If all of the value strings can be parsed into a number, sort/dedupe
// numerically. Otherwise sort lexicographically.
- if value_strs.iter().all(|os| try_parse(*os).is_some()) {
+ if value_strs.iter().all(|os| try_parse(os).is_some()) {
value_strs.sort_unstable_by(|v1, v2| {
let num1 = try_parse(v1);
let num2 = try_parse(v2);
@@ -464,7 +464,7 @@ impl Report for Html {
self.generate_summary(
&subgroup_id,
- &*samples_with_function,
+ &samples_with_function,
context,
formatter,
false,
@@ -483,13 +483,7 @@ impl Report for Html {
let subgroup_id =
BenchmarkId::new(group_id.clone(), None, Some(value_str.clone()), None);
- self.generate_summary(
- &subgroup_id,
- &*samples_with_value,
- context,
- formatter,
- false,
- );
+ self.generate_summary(&subgroup_id, &samples_with_value, context, formatter, false);
}
}
@@ -516,7 +510,7 @@ impl Report for Html {
self.generate_summary(
&BenchmarkId::new(group_id, None, None, None),
- &*(all_data),
+ &all_data,
context,
formatter,
true,
@@ -543,8 +537,8 @@ impl Report for Html {
}
let mut groups = id_groups
- .into_iter()
- .map(|(_, group)| BenchmarkGroup::new(output_directory, &group))
+ .into_values()
+ .map(|group| BenchmarkGroup::new(output_directory, &group))
.collect::<Vec<BenchmarkGroup<'_>>>();
groups.sort_unstable_by_key(|g| g.group_report.name);
diff --git a/vendor/criterion/src/html/report_link.html.tt b/vendor/criterion/src/html/report_link.html.tt
index 6013114c9..6013114c9 100755..100644
--- a/vendor/criterion/src/html/report_link.html.tt
+++ b/vendor/criterion/src/html/report_link.html.tt
diff --git a/vendor/criterion/src/html/summary_report.html.tt b/vendor/criterion/src/html/summary_report.html.tt
index 4f36f62ba..4f36f62ba 100755..100644
--- a/vendor/criterion/src/html/summary_report.html.tt
+++ b/vendor/criterion/src/html/summary_report.html.tt
diff --git a/vendor/criterion/src/kde.rs b/vendor/criterion/src/kde.rs
index 8812142eb..8812142eb 100755..100644
--- a/vendor/criterion/src/kde.rs
+++ b/vendor/criterion/src/kde.rs
diff --git a/vendor/criterion/src/lib.rs b/vendor/criterion/src/lib.rs
index 426437193..855c68ff2 100755..100644
--- a/vendor/criterion/src/lib.rs
+++ b/vendor/criterion/src/lib.rs
@@ -27,18 +27,18 @@
)
)]
+#[cfg(all(feature = "rayon", target_arch = "wasm32"))]
+compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features.");
+
#[cfg(test)]
extern crate approx;
#[cfg(test)]
extern crate quickcheck;
-use clap::value_t;
+use is_terminal::IsTerminal;
use regex::Regex;
-#[macro_use]
-extern crate lazy_static;
-
#[cfg(feature = "real_blackbox")]
extern crate test;
@@ -57,6 +57,7 @@ mod benchmark_group;
pub mod async_executor;
mod bencher;
mod connection;
+#[cfg(feature = "csv_output")]
mod csv_report;
mod error;
mod estimate;
@@ -76,9 +77,7 @@ use std::cell::RefCell;
use std::collections::HashSet;
use std::default::Default;
use std::env;
-use std::fmt;
-use std::iter::IntoIterator;
-use std::marker::PhantomData;
+use std::io::stdout;
use std::net::TcpStream;
use std::path::{Path, PathBuf};
use std::process::Command;
@@ -86,69 +85,65 @@ use std::sync::{Mutex, MutexGuard};
use std::time::Duration;
use criterion_plot::{Version, VersionError};
+use once_cell::sync::Lazy;
use crate::benchmark::BenchmarkConfig;
-use crate::benchmark::NamedRoutine;
use crate::connection::Connection;
use crate::connection::OutgoingMessage;
-use crate::csv_report::FileCsvReport;
use crate::html::Html;
use crate::measurement::{Measurement, WallTime};
-use crate::plot::{Gnuplot, Plotter, PlottersBackend};
+#[cfg(feature = "plotters")]
+use crate::plot::PlottersBackend;
+use crate::plot::{Gnuplot, Plotter};
use crate::profiler::{ExternalProfiler, Profiler};
-use crate::report::{BencherReport, CliReport, Report, ReportContext, Reports};
-use crate::routine::Function;
+use crate::report::{BencherReport, CliReport, CliVerbosity, Report, ReportContext, Reports};
#[cfg(feature = "async")]
pub use crate::bencher::AsyncBencher;
pub use crate::bencher::Bencher;
-#[allow(deprecated)]
-pub use crate::benchmark::{Benchmark, BenchmarkDefinition, ParameterizedBenchmark};
pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
-lazy_static! {
- static ref DEBUG_ENABLED: bool = std::env::var_os("CRITERION_DEBUG").is_some();
- static ref GNUPLOT_VERSION: Result<Version, VersionError> = criterion_plot::version();
- static ref DEFAULT_PLOTTING_BACKEND: PlottingBackend = {
- match &*GNUPLOT_VERSION {
- Ok(_) => PlottingBackend::Gnuplot,
- Err(e) => {
- match e {
- VersionError::Exec(_) => println!("Gnuplot not found, using plotters backend"),
- e => println!(
- "Gnuplot not found or not usable, using plotters backend\n{}",
- e
- ),
- };
- PlottingBackend::Plotters
- }
- }
- };
- static ref CARGO_CRITERION_CONNECTION: Option<Mutex<Connection>> = {
- match std::env::var("CARGO_CRITERION_PORT") {
- Ok(port_str) => {
- let port: u16 = port_str.parse().ok()?;
- let stream = TcpStream::connect(("localhost", port)).ok()?;
- Some(Mutex::new(Connection::new(stream).ok()?))
- }
- Err(_) => None,
- }
- };
- static ref DEFAULT_OUTPUT_DIRECTORY: PathBuf = {
- // Set criterion home to (in descending order of preference):
- // - $CRITERION_HOME (cargo-criterion sets this, but other users could as well)
- // - $CARGO_TARGET_DIR/criterion
- // - the cargo target dir from `cargo metadata`
- // - ./target/criterion
- if let Some(value) = env::var_os("CRITERION_HOME") {
- PathBuf::from(value)
- } else if let Some(path) = cargo_target_directory() {
- path.join("criterion")
- } else {
- PathBuf::from("target/criterion")
+static DEBUG_ENABLED: Lazy<bool> = Lazy::new(|| std::env::var_os("CRITERION_DEBUG").is_some());
+static GNUPLOT_VERSION: Lazy<Result<Version, VersionError>> = Lazy::new(criterion_plot::version);
+static DEFAULT_PLOTTING_BACKEND: Lazy<PlottingBackend> = Lazy::new(|| match &*GNUPLOT_VERSION {
+ Ok(_) => PlottingBackend::Gnuplot,
+ #[cfg(feature = "plotters")]
+ Err(e) => {
+ match e {
+ VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"),
+ e => eprintln!(
+ "Gnuplot not found or not usable, using plotters backend\n{}",
+ e
+ ),
+ };
+ PlottingBackend::Plotters
+ }
+ #[cfg(not(feature = "plotters"))]
+ Err(_) => PlottingBackend::None,
+});
+static CARGO_CRITERION_CONNECTION: Lazy<Option<Mutex<Connection>>> =
+ Lazy::new(|| match std::env::var("CARGO_CRITERION_PORT") {
+ Ok(port_str) => {
+ let port: u16 = port_str.parse().ok()?;
+ let stream = TcpStream::connect(("localhost", port)).ok()?;
+ Some(Mutex::new(Connection::new(stream).ok()?))
}
- };
-}
+ Err(_) => None,
+ });
+static DEFAULT_OUTPUT_DIRECTORY: Lazy<PathBuf> = Lazy::new(|| {
+ // Set criterion home to (in descending order of preference):
+ // - $CRITERION_HOME (cargo-criterion sets this, but other users could as well)
+ // - $CARGO_TARGET_DIR/criterion
+ // - the cargo target dir from `cargo metadata`
+ // - ./target/criterion
+ if let Some(value) = env::var_os("CRITERION_HOME") {
+ PathBuf::from(value)
+ } else if let Some(path) = cargo_target_directory() {
+ path.join("criterion")
+ } else {
+ PathBuf::from("target/criterion")
+ }
+});
fn debug_enabled() -> bool {
*DEBUG_ENABLED
@@ -177,36 +172,6 @@ pub fn black_box<T>(dummy: T) -> T {
}
}
-/// Representing a function to benchmark together with a name of that function.
-/// Used together with `bench_functions` to represent one out of multiple functions
-/// under benchmark.
-#[doc(hidden)]
-pub struct Fun<I: fmt::Debug, M: Measurement + 'static = WallTime> {
- f: NamedRoutine<I, M>,
- _phantom: PhantomData<M>,
-}
-
-impl<I, M: Measurement> Fun<I, M>
-where
- I: fmt::Debug + 'static,
-{
- /// Create a new `Fun` given a name and a closure
- pub fn new<F>(name: &str, f: F) -> Fun<I, M>
- where
- F: FnMut(&mut Bencher<'_, M>, &I) + 'static,
- {
- let routine = NamedRoutine {
- id: name.to_owned(),
- f: Box::new(RefCell::new(Function::new(f))),
- };
-
- Fun {
- f: routine,
- _phantom: PhantomData,
- }
- }
-}
-
/// Argument to [`Bencher::iter_batched`](struct.Bencher.html#method.iter_batched) and
/// [`Bencher::iter_batched_ref`](struct.Bencher.html#method.iter_batched_ref) which controls the
/// batch size.
@@ -296,12 +261,17 @@ impl BatchSize {
/// Baseline describes how the baseline_directory is handled.
#[derive(Debug, Clone, Copy)]
pub enum Baseline {
- /// Compare ensures a previous saved version of the baseline
- /// exists and runs comparison against that.
- Compare,
+ /// CompareLenient compares against a previous saved version of the baseline.
+ /// If a previous baseline does not exist, the benchmark is run as normal but no comparison occurs.
+ CompareLenient,
+ /// CompareStrict compares against a previous saved version of the baseline.
+ /// If a previous baseline does not exist, a panic occurs.
+ CompareStrict,
/// Save writes the benchmark results to the baseline directory,
/// overwriting any results that were previously there.
Save,
+ /// Discard benchmark results.
+ Discard,
}
/// Enum used to select the plotting backend.
@@ -313,12 +283,18 @@ pub enum PlottingBackend {
/// Plotting backend which uses the rust 'Plotters' library. This is the default if `gnuplot`
/// is not installed.
Plotters,
+ /// Null plotting backend which outputs nothing,
+ None,
}
impl PlottingBackend {
- fn create_plotter(&self) -> Box<dyn Plotter> {
+ fn create_plotter(&self) -> Option<Box<dyn Plotter>> {
match self {
- PlottingBackend::Gnuplot => Box::new(Gnuplot::default()),
- PlottingBackend::Plotters => Box::new(PlottersBackend::default()),
+ PlottingBackend::Gnuplot => Some(Box::<Gnuplot>::default()),
+ #[cfg(feature = "plotters")]
+ PlottingBackend::Plotters => Some(Box::<PlottersBackend>::default()),
+ #[cfg(not(feature = "plotters"))]
+ PlottingBackend::Plotters => panic!("Criterion was built without plotters support."),
+ PlottingBackend::None => None,
}
}
}
@@ -329,7 +305,7 @@ pub(crate) enum Mode {
/// Run benchmarks normally.
Benchmark,
/// List all benchmarks but do not run them.
- List,
+ List(ListFormat),
/// Run benchmarks once to verify that they work, but otherwise do not measure them.
Test,
/// Iterate benchmarks for a given length of time but do not analyze or report on them.
@@ -339,6 +315,39 @@ impl Mode {
pub fn is_benchmark(&self) -> bool {
matches!(self, Mode::Benchmark)
}
+
+ pub fn is_terse(&self) -> bool {
+ matches!(self, Mode::List(ListFormat::Terse))
+ }
+}
+
+#[derive(Debug, Clone)]
+/// Enum representing the list format.
+pub(crate) enum ListFormat {
+ /// The regular, default format.
+ Pretty,
+ /// The terse format, where nothing other than the name of the test and ": benchmark" at the end
+ /// is printed out.
+ Terse,
+}
+
+impl Default for ListFormat {
+ fn default() -> Self {
+ Self::Pretty
+ }
+}
+
+/// Benchmark filtering support.
+#[derive(Clone, Debug)]
+pub enum BenchmarkFilter {
+ /// Run all benchmarks.
+ AcceptAll,
+ /// Run benchmarks matching this regex.
+ Regex(Regex),
+ /// Run the benchmark matching this string exactly.
+ Exact(String),
+ /// Do not run any benchmarks.
+ RejectAll,
}
/// The benchmark manager
@@ -357,7 +366,7 @@ impl Mode {
/// benchmark.
pub struct Criterion<M: Measurement = WallTime> {
config: BenchmarkConfig,
- filter: Option<Regex>,
+ filter: BenchmarkFilter,
report: Reports,
output_directory: PathBuf,
baseline_directory: String,
@@ -383,7 +392,7 @@ fn cargo_target_directory() -> Option<PathBuf> {
.map(PathBuf::from)
.or_else(|| {
let output = Command::new(env::var_os("CARGO")?)
- .args(&["metadata", "--format-version", "1"])
+ .args(["metadata", "--format-version", "1"])
.output()
.ok()?;
let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?;
@@ -406,27 +415,26 @@ impl Default for Criterion {
fn default() -> Criterion {
let reports = Reports {
cli_enabled: true,
- cli: CliReport::new(false, false, false),
+ cli: CliReport::new(false, false, CliVerbosity::Normal),
bencher_enabled: false,
bencher: BencherReport,
- html_enabled: true,
- html: Html::new(DEFAULT_PLOTTING_BACKEND.create_plotter()),
- csv_enabled: true,
- csv: FileCsvReport,
+ html: DEFAULT_PLOTTING_BACKEND.create_plotter().map(Html::new),
+ csv_enabled: cfg!(feature = "csv_output"),
};
let mut criterion = Criterion {
config: BenchmarkConfig {
confidence_level: 0.95,
- measurement_time: Duration::new(5, 0),
+ measurement_time: Duration::from_secs(5),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
- warm_up_time: Duration::new(3, 0),
+ warm_up_time: Duration::from_secs(3),
sampling_mode: SamplingMode::Auto,
+ quick_mode: false,
},
- filter: None,
+ filter: BenchmarkFilter::AcceptAll,
report: reports,
baseline_directory: "base".to_owned(),
baseline: Baseline::Save,
@@ -447,7 +455,7 @@ impl Default for Criterion {
criterion.report.cli_enabled = false;
criterion.report.bencher_enabled = false;
criterion.report.csv_enabled = false;
- criterion.report.html_enabled = false;
+ criterion.report.html = None;
}
criterion
}
@@ -475,6 +483,7 @@ impl<M: Measurement> Criterion<M> {
}
}
+ #[must_use]
/// Changes the internal profiler for benchmarks run with this runner. See
/// the Profiler trait for more details.
pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
@@ -484,6 +493,7 @@ impl<M: Measurement> Criterion<M> {
}
}
+ #[must_use]
/// Set the plotting backend. By default, Criterion will use gnuplot if available, or plotters
/// if not.
///
@@ -498,10 +508,11 @@ impl<M: Measurement> Criterion<M> {
);
}
- self.report.html = Html::new(backend.create_plotter());
+ self.report.html = backend.create_plotter().map(Html::new);
self
}
+ #[must_use]
/// Changes the default size of the sample for benchmarks run with this runner.
///
/// A bigger sample should yield more accurate results if paired with a sufficiently large
@@ -519,18 +530,20 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Changes the default warm up time for benchmarks run with this runner.
///
/// # Panics
///
/// Panics if the input duration is zero
pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.config.warm_up_time = dur;
self
}
+ #[must_use]
/// Changes the default measurement time for benchmarks run with this runner.
///
/// With a longer time, the measurement will become more resilient to transitory peak loads
@@ -542,12 +555,13 @@ impl<M: Measurement> Criterion<M> {
///
/// Panics if the input duration in zero
pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.config.measurement_time = dur;
self
}
+ #[must_use]
/// Changes the default number of resamples for benchmarks run with this runner.
///
/// Number of resamples to use for the
@@ -562,13 +576,14 @@ impl<M: Measurement> Criterion<M> {
pub fn nresamples(mut self, n: usize) -> Criterion<M> {
assert!(n > 0);
if n <= 1000 {
- println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
+ eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.config.nresamples = n;
self
}
+ #[must_use]
/// Changes the default noise threshold for benchmarks run with this runner. The noise threshold
/// is used to filter out small changes in performance, even if they are statistically
/// significant. Sometimes benchmarking the same code twice will result in small but
@@ -588,6 +603,7 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Changes the default confidence level for benchmarks run with this runner. The confidence
/// level is the desired probability that the true runtime lies within the estimated
/// [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval). The default is
@@ -599,13 +615,14 @@ impl<M: Measurement> Criterion<M> {
pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
assert!(cl > 0.0 && cl < 1.0);
if cl < 0.5 {
- println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
+ eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
}
self.config.confidence_level = cl;
self
}
+ #[must_use]
/// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance)
/// for benchmarks run with this runner. This is used to perform a
/// [hypothesis test](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) to see if
@@ -633,32 +650,29 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Enables plotting
pub fn with_plots(mut self) -> Criterion<M> {
// If running under cargo-criterion then don't re-enable the reports; let it do the reporting.
- if self.connection.is_none() {
- self.report.html_enabled = true;
+ if self.connection.is_none() && self.report.html.is_none() {
+ let default_backend = DEFAULT_PLOTTING_BACKEND.create_plotter();
+ if let Some(backend) = default_backend {
+ self.report.html = Some(Html::new(backend));
+ } else {
+ panic!("Cannot find a default plotting backend!");
+ }
}
self
}
+ #[must_use]
/// Disables plotting
pub fn without_plots(mut self) -> Criterion<M> {
- self.report.html_enabled = false;
+ self.report.html = None;
self
}
- /// Return true if generation of the plots is possible.
- #[deprecated(
- since = "0.3.4",
- note = "No longer useful; since the plotters backend is available Criterion.rs can always generate plots"
- )]
- pub fn can_plot(&self) -> bool {
- // Trivially true now that we have plotters.
- // TODO: Deprecate and remove this.
- true
- }
-
+ #[must_use]
/// Names an explicit baseline and enables overwriting the previous results.
pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
self.baseline_directory = baseline;
@@ -666,15 +680,23 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Names an explicit baseline and disables overwriting the previous results.
- pub fn retain_baseline(mut self, baseline: String) -> Criterion<M> {
+ pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion<M> {
self.baseline_directory = baseline;
- self.baseline = Baseline::Compare;
+ self.baseline = if strict {
+ Baseline::CompareStrict
+ } else {
+ Baseline::CompareLenient
+ };
self
}
+ #[must_use]
/// Filters the benchmarks. Only benchmarks with names that contain the
/// given string will be executed.
+ ///
+ /// This overwrites [`Self::with_benchmark_filter`].
pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
let filter_text = filter.into();
let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
@@ -683,11 +705,21 @@ impl<M: Measurement> Criterion<M> {
filter_text, err
)
});
- self.filter = Some(filter);
+ self.filter = BenchmarkFilter::Regex(filter);
+
+ self
+ }
+
+ /// Only run benchmarks specified by the given filter.
+ ///
+ /// This overwrites [`Self::with_filter`].
+ pub fn with_benchmark_filter(mut self, filter: BenchmarkFilter) -> Criterion<M> {
+ self.filter = filter;
self
}
+ #[must_use]
/// Override whether the CLI output will be colored or not. Usually you would use the `--color`
/// CLI argument, but this is available for programmmatic use as well.
pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
@@ -696,6 +728,7 @@ impl<M: Measurement> Criterion<M> {
}
/// Set the output directory (currently for testing only)
+ #[must_use]
#[doc(hidden)]
pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
self.output_directory = path.to_owned();
@@ -704,6 +737,7 @@ impl<M: Measurement> Criterion<M> {
}
/// Set the profile time (currently for testing only)
+ #[must_use]
#[doc(hidden)]
pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
match profile_time {
@@ -731,114 +765,152 @@ impl<M: Measurement> Criterion<M> {
/// Configure this criterion struct based on the command-line arguments to
/// this process.
+ #[must_use]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))]
pub fn configure_from_args(mut self) -> Criterion<M> {
- use clap::{App, Arg};
- let matches = App::new("Criterion Benchmark")
- .arg(Arg::with_name("FILTER")
+ use clap::{value_parser, Arg, Command};
+ let matches = Command::new("Criterion Benchmark")
+ .arg(Arg::new("FILTER")
.help("Skip benchmarks whose names do not contain FILTER.")
.index(1))
- .arg(Arg::with_name("color")
- .short("c")
+ .arg(Arg::new("color")
+ .short('c')
.long("color")
.alias("colour")
- .takes_value(true)
- .possible_values(&["auto", "always", "never"])
+ .value_parser(["auto", "always", "never"])
.default_value("auto")
.help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
- .arg(Arg::with_name("verbose")
- .short("v")
+ .arg(Arg::new("verbose")
+ .short('v')
.long("verbose")
+ .num_args(0)
.help("Print additional statistical information."))
- .arg(Arg::with_name("noplot")
- .short("n")
+ .arg(Arg::new("quiet")
+ .long("quiet")
+ .num_args(0)
+ .conflicts_with("verbose")
+ .help("Print only the benchmark results."))
+ .arg(Arg::new("noplot")
+ .short('n')
.long("noplot")
+ .num_args(0)
.help("Disable plot and HTML generation."))
- .arg(Arg::with_name("save-baseline")
- .short("s")
+ .arg(Arg::new("save-baseline")
+ .short('s')
.long("save-baseline")
.default_value("base")
.help("Save results under a named baseline."))
- .arg(Arg::with_name("baseline")
- .short("b")
+ .arg(Arg::new("discard-baseline")
+ .long("discard-baseline")
+ .num_args(0)
+ .conflicts_with_all(["save-baseline", "baseline", "baseline-lenient"])
+ .help("Discard benchmark results."))
+ .arg(Arg::new("baseline")
+ .short('b')
.long("baseline")
- .takes_value(true)
- .conflicts_with("save-baseline")
- .help("Compare to a named baseline."))
- .arg(Arg::with_name("list")
+ .conflicts_with_all(["save-baseline", "baseline-lenient"])
+ .help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails."))
+ .arg(Arg::new("baseline-lenient")
+ .long("baseline-lenient")
+ .conflicts_with_all(["save-baseline", "baseline"])
+ .help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline."))
+ .arg(Arg::new("list")
.long("list")
+ .num_args(0)
.help("List all benchmarks")
- .conflicts_with_all(&["test", "profile-time"]))
- .arg(Arg::with_name("profile-time")
+ .conflicts_with_all(["test", "profile-time"]))
+ .arg(Arg::new("format")
+ .long("format")
+ .value_parser(["pretty", "terse"])
+ .default_value("pretty")
+ // Note that libtest's --format also works during test execution, but criterion
+ // doesn't support that at the moment.
+ .help("Output formatting"))
+ .arg(Arg::new("ignored")
+ .long("ignored")
+ .num_args(0)
+ .help("List or run ignored benchmarks (currently means skip all benchmarks)"))
+ .arg(Arg::new("exact")
+ .long("exact")
+ .num_args(0)
+ .help("Run benchmarks that exactly match the provided filter"))
+ .arg(Arg::new("profile-time")
.long("profile-time")
- .takes_value(true)
+ .value_parser(value_parser!(f64))
.help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
- .conflicts_with_all(&["test", "list"]))
- .arg(Arg::with_name("load-baseline")
+ .conflicts_with_all(["test", "list"]))
+ .arg(Arg::new("load-baseline")
.long("load-baseline")
- .takes_value(true)
.conflicts_with("profile-time")
.requires("baseline")
.help("Load a previous baseline instead of sampling new data."))
- .arg(Arg::with_name("sample-size")
+ .arg(Arg::new("sample-size")
.long("sample-size")
- .takes_value(true)
- .help(&format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
- .arg(Arg::with_name("warm-up-time")
+ .value_parser(value_parser!(usize))
+ .help(format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
+ .arg(Arg::new("warm-up-time")
.long("warm-up-time")
- .takes_value(true)
- .help(&format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
- .arg(Arg::with_name("measurement-time")
+ .value_parser(value_parser!(f64))
+ .help(format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
+ .arg(Arg::new("measurement-time")
.long("measurement-time")
- .takes_value(true)
- .help(&format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
- .arg(Arg::with_name("nresamples")
+ .value_parser(value_parser!(f64))
+ .help(format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
+ .arg(Arg::new("nresamples")
.long("nresamples")
- .takes_value(true)
- .help(&format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
- .arg(Arg::with_name("noise-threshold")
+ .value_parser(value_parser!(usize))
+ .help(format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
+ .arg(Arg::new("noise-threshold")
.long("noise-threshold")
- .takes_value(true)
- .help(&format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
- .arg(Arg::with_name("confidence-level")
+ .value_parser(value_parser!(f64))
+ .help(format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
+ .arg(Arg::new("confidence-level")
.long("confidence-level")
- .takes_value(true)
- .help(&format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
- .arg(Arg::with_name("significance-level")
+ .value_parser(value_parser!(f64))
+ .help(format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
+ .arg(Arg::new("significance-level")
.long("significance-level")
- .takes_value(true)
- .help(&format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
- .arg(Arg::with_name("test")
- .hidden(true)
+ .value_parser(value_parser!(f64))
+ .help(format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
+ .arg(Arg::new("quick")
+ .long("quick")
+ .num_args(0)
+ .conflicts_with("sample-size")
+ .help(format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode)))
+ .arg(Arg::new("test")
+ .hide(true)
.long("test")
+ .num_args(0)
.help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
- .conflicts_with_all(&["list", "profile-time"]))
- .arg(Arg::with_name("bench")
- .hidden(true)
- .long("bench"))
- .arg(Arg::with_name("plotting-backend")
+ .conflicts_with_all(["list", "profile-time"]))
+ .arg(Arg::new("bench")
+ .hide(true)
+ .long("bench")
+ .num_args(0))
+ .arg(Arg::new("plotting-backend")
.long("plotting-backend")
- .takes_value(true)
- .possible_values(&["gnuplot", "plotters"])
+ .value_parser(["gnuplot", "plotters"])
.help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
- .arg(Arg::with_name("output-format")
+ .arg(Arg::new("output-format")
.long("output-format")
- .takes_value(true)
- .possible_values(&["criterion", "bencher"])
+ .value_parser(["criterion", "bencher"])
.default_value("criterion")
.help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
- .arg(Arg::with_name("nocapture")
+ .arg(Arg::new("nocapture")
.long("nocapture")
- .hidden(true)
+ .num_args(0)
+ .hide(true)
.help("Ignored, but added for compatibility with libtest."))
- .arg(Arg::with_name("show-output")
+ .arg(Arg::new("show-output")
.long("show-output")
- .hidden(true)
+ .num_args(0)
+ .hide(true)
.help("Ignored, but added for compatibility with libtest."))
- .arg(Arg::with_name("version")
- .hidden(true)
- .short("V")
- .long("version"))
+ .arg(Arg::new("version")
+ .hide(true)
+ .short('V')
+ .long("version")
+ .num_args(0))
.after_help("
This executable is a Criterion.rs benchmark.
See https://github.com/bheisler/criterion.rs for more details.
@@ -855,62 +927,68 @@ https://bheisler.github.io/criterion.rs/book/faq.html
.get_matches();
if self.connection.is_some() {
- if let Some(color) = matches.value_of("color") {
+ if let Some(color) = matches.get_one::<String>("color") {
if color != "auto" {
- println!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
+ eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
}
}
- if matches.is_present("verbose") {
- println!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
+ if matches.get_flag("verbose") {
+ eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
}
- if matches.is_present("noplot") {
- println!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
+ if matches.get_flag("noplot") {
+ eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
}
- if let Some(backend) = matches.value_of("plotting-backend") {
- println!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
+ if let Some(backend) = matches.get_one::<String>("plotting-backend") {
+ eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
}
- if let Some(format) = matches.value_of("output-format") {
+ if let Some(format) = matches.get_one::<String>("output-format") {
if format != "criterion" {
- println!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
+ eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
}
}
- if matches.is_present("baseline")
+ if matches.contains_id("baseline")
|| matches
- .value_of("save-baseline")
- .map(|base| base != "base")
- .unwrap_or(false)
- || matches.is_present("load-baseline")
+ .get_one::<String>("save-baseline")
+ .map_or(false, |base| base != "base")
+ || matches.contains_id("load-baseline")
{
- println!("Error: baselines are not supported when running with cargo-criterion.");
+ eprintln!("Error: baselines are not supported when running with cargo-criterion.");
std::process::exit(1);
}
}
- let bench = matches.is_present("bench");
- let test = matches.is_present("test");
+ let bench = matches.get_flag("bench");
+ let test = matches.get_flag("test");
let test_mode = match (bench, test) {
(true, true) => true, // cargo bench -- --test should run tests
(true, false) => false, // cargo bench should run benchmarks
(false, _) => true, // cargo test --benches should run tests
};
- self.mode = if test_mode {
+ self.mode = if matches.get_flag("list") {
+ let list_format = match matches
+ .get_one::<String>("format")
+ .expect("a default value was provided for this")
+ .as_str()
+ {
+ "pretty" => ListFormat::Pretty,
+ "terse" => ListFormat::Terse,
+ other => unreachable!(
+ "unrecognized value for --format that isn't part of possible-values: {}",
+ other
+ ),
+ };
+ Mode::List(list_format)
+ } else if test_mode {
Mode::Test
- } else if matches.is_present("list") {
- Mode::List
- } else if matches.is_present("profile-time") {
- let num_seconds = value_t!(matches.value_of("profile-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
- if num_seconds < 1 {
- println!("Profile time must be at least one second.");
+ } else if let Some(&num_seconds) = matches.get_one("profile-time") {
+ if num_seconds < 1.0 {
+ eprintln!("Profile time must be at least one second.");
std::process::exit(1);
}
- Mode::Profile(Duration::from_secs(num_seconds))
+ Mode::Profile(Duration::from_secs_f64(num_seconds))
} else {
Mode::Benchmark
};
@@ -920,11 +998,27 @@ https://bheisler.github.io/criterion.rs/book/faq.html
self.connection = None;
}
- if let Some(filter) = matches.value_of("FILTER") {
- self = self.with_filter(filter);
- }
+ let filter = if matches.get_flag("ignored") {
+ // --ignored overwrites any name-based filters passed in.
+ BenchmarkFilter::RejectAll
+ } else if let Some(filter) = matches.get_one::<String>("FILTER") {
+ if matches.get_flag("exact") {
+ BenchmarkFilter::Exact(filter.to_owned())
+ } else {
+ let regex = Regex::new(filter).unwrap_or_else(|err| {
+ panic!(
+ "Unable to parse '{}' as a regular expression: {}",
+ filter, err
+ )
+ });
+ BenchmarkFilter::Regex(regex)
+ }
+ } else {
+ BenchmarkFilter::AcceptAll
+ };
+ self = self.with_benchmark_filter(filter);
- match matches.value_of("plotting-backend") {
+ match matches.get_one("plotting-backend").map(String::as_str) {
// Use plotting_backend() here to re-use the panic behavior if Gnuplot is not available.
Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
@@ -932,18 +1026,23 @@ https://bheisler.github.io/criterion.rs/book/faq.html
None => {}
}
- if matches.is_present("noplot") {
+ if matches.get_flag("noplot") {
self = self.without_plots();
- } else {
- self = self.with_plots();
}
- if let Some(dir) = matches.value_of("save-baseline") {
+ if let Some(dir) = matches.get_one::<String>("save-baseline") {
self.baseline = Baseline::Save;
self.baseline_directory = dir.to_owned()
}
- if let Some(dir) = matches.value_of("baseline") {
- self.baseline = Baseline::Compare;
+ if matches.get_flag("discard-baseline") {
+ self.baseline = Baseline::Discard;
+ }
+ if let Some(dir) = matches.get_one::<String>("baseline") {
+ self.baseline = Baseline::CompareStrict;
+ self.baseline_directory = dir.to_owned();
+ }
+ if let Some(dir) = matches.get_one::<String>("baseline-lenient") {
+ self.baseline = Baseline::CompareLenient;
self.baseline_directory = dir.to_owned();
}
@@ -952,19 +1051,26 @@ https://bheisler.github.io/criterion.rs/book/faq.html
self.report.cli_enabled = false;
self.report.bencher_enabled = false;
self.report.csv_enabled = false;
- self.report.html_enabled = false;
+ self.report.html = None;
} else {
- match matches.value_of("output-format") {
+ match matches.get_one("output-format").map(String::as_str) {
Some("bencher") => {
self.report.bencher_enabled = true;
self.report.cli_enabled = false;
}
_ => {
- let verbose = matches.is_present("verbose");
- let stdout_isatty = atty::is(atty::Stream::Stdout);
+ let verbose = matches.get_flag("verbose");
+ let verbosity = if verbose {
+ CliVerbosity::Verbose
+ } else if matches.get_flag("quiet") {
+ CliVerbosity::Quiet
+ } else {
+ CliVerbosity::Normal
+ };
+ let stdout_isatty = stdout().is_terminal();
let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
let enable_text_coloring;
- match matches.value_of("color") {
+ match matches.get_one("color").map(String::as_str) {
Some("always") => {
enable_text_coloring = true;
}
@@ -977,102 +1083,76 @@ https://bheisler.github.io/criterion.rs/book/faq.html
self.report.bencher_enabled = false;
self.report.cli_enabled = true;
self.report.cli =
- CliReport::new(enable_text_overwrite, enable_text_coloring, verbose);
+ CliReport::new(enable_text_overwrite, enable_text_coloring, verbosity);
}
};
}
- if let Some(dir) = matches.value_of("load-baseline") {
+ if let Some(dir) = matches.get_one::<String>("load-baseline") {
self.load_baseline = Some(dir.to_owned());
}
- if matches.is_present("sample-size") {
- let num_size = value_t!(matches.value_of("sample-size"), usize).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
+ if let Some(&num_size) = matches.get_one("sample-size") {
assert!(num_size >= 10);
self.config.sample_size = num_size;
}
- if matches.is_present("warm-up-time") {
- let num_seconds = value_t!(matches.value_of("warm-up-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
- let dur = std::time::Duration::new(num_seconds, 0);
- assert!(dur.to_nanos() > 0);
+ if let Some(&num_seconds) = matches.get_one("warm-up-time") {
+ let dur = std::time::Duration::from_secs_f64(num_seconds);
+ assert!(dur.as_nanos() > 0);
self.config.warm_up_time = dur;
}
- if matches.is_present("measurement-time") {
- let num_seconds =
- value_t!(matches.value_of("measurement-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
- let dur = std::time::Duration::new(num_seconds, 0);
- assert!(dur.to_nanos() > 0);
+ if let Some(&num_seconds) = matches.get_one("measurement-time") {
+ let dur = std::time::Duration::from_secs_f64(num_seconds);
+ assert!(dur.as_nanos() > 0);
self.config.measurement_time = dur;
}
- if matches.is_present("nresamples") {
- let num_resamples =
- value_t!(matches.value_of("nresamples"), usize).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
+ if let Some(&num_resamples) = matches.get_one("nresamples") {
assert!(num_resamples > 0);
self.config.nresamples = num_resamples;
}
- if matches.is_present("noise-threshold") {
- let num_noise_threshold = value_t!(matches.value_of("noise-threshold"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
+ if let Some(&num_noise_threshold) = matches.get_one("noise-threshold") {
assert!(num_noise_threshold > 0.0);
self.config.noise_threshold = num_noise_threshold;
}
- if matches.is_present("confidence-level") {
- let num_confidence_level = value_t!(matches.value_of("confidence-level"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
+ if let Some(&num_confidence_level) = matches.get_one("confidence-level") {
assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
self.config.confidence_level = num_confidence_level;
}
- if matches.is_present("significance-level") {
- let num_significance_level = value_t!(matches.value_of("significance-level"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
+ if let Some(&num_significance_level) = matches.get_one("significance-level") {
assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
self.config.significance_level = num_significance_level;
}
+ if matches.get_flag("quick") {
+ self.config.quick_mode = true;
+ }
+
self
}
fn filter_matches(&self, id: &str) -> bool {
match &self.filter {
- Some(regex) => regex.is_match(id),
- None => true,
+ BenchmarkFilter::AcceptAll => true,
+ BenchmarkFilter::Regex(regex) => regex.is_match(id),
+ BenchmarkFilter::Exact(exact) => id == exact,
+ BenchmarkFilter::RejectAll => false,
}
}
+ /// Returns true iff we should save the benchmark results in
+ /// json files on the local disk.
+ fn should_save_baseline(&self) -> bool {
+ self.connection.is_none()
+ && self.load_baseline.is_none()
+ && !matches!(self.baseline, Baseline::Discard)
+ }
+
/// Return a benchmark group. All benchmarks performed using a benchmark group will be
/// grouped together in the final report.
///
@@ -1185,150 +1265,6 @@ where
);
self
}
-
- /// Benchmarks a function under various inputs
- ///
- /// This is a convenience method to execute several related benchmarks. Each benchmark will
- /// receive the id: `${id}/${input}`.
- ///
- /// # Example
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use self::criterion::*;
- ///
- /// fn bench(c: &mut Criterion) {
- /// c.bench_function_over_inputs("from_elem",
- /// |b: &mut Bencher, size: &usize| {
- /// b.iter(|| vec![0u8; *size]);
- /// },
- /// vec![1024, 2048, 4096]
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- #[allow(deprecated)]
- pub fn bench_function_over_inputs<I, F>(
- &mut self,
- id: &str,
- f: F,
- inputs: I,
- ) -> &mut Criterion<M>
- where
- I: IntoIterator,
- I::Item: fmt::Debug + 'static,
- F: FnMut(&mut Bencher<'_, M>, &I::Item) + 'static,
- {
- self.bench(id, ParameterizedBenchmark::new(id, f, inputs))
- }
-
- /// Benchmarks multiple functions
- ///
- /// All functions get the same input and are compared with the other implementations.
- /// Works similar to `bench_function`, but with multiple functions.
- ///
- /// # Example
- ///
- /// ``` rust
- /// # #[macro_use] extern crate criterion;
- /// # use self::criterion::*;
- /// # fn seq_fib(i: &u32) {}
- /// # fn par_fib(i: &u32) {}
- ///
- /// fn bench_seq_fib(b: &mut Bencher, i: &u32) {
- /// b.iter(|| {
- /// seq_fib(i);
- /// });
- /// }
- ///
- /// fn bench_par_fib(b: &mut Bencher, i: &u32) {
- /// b.iter(|| {
- /// par_fib(i);
- /// });
- /// }
- ///
- /// fn bench(c: &mut Criterion) {
- /// let sequential_fib = Fun::new("Sequential", bench_seq_fib);
- /// let parallel_fib = Fun::new("Parallel", bench_par_fib);
- /// let funs = vec![sequential_fib, parallel_fib];
- ///
- /// c.bench_functions("Fibonacci", funs, 14);
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- #[allow(deprecated)]
- pub fn bench_functions<I>(
- &mut self,
- id: &str,
- funs: Vec<Fun<I, M>>,
- input: I,
- ) -> &mut Criterion<M>
- where
- I: fmt::Debug + 'static,
- {
- let benchmark = ParameterizedBenchmark::with_functions(
- funs.into_iter().map(|fun| fun.f).collect(),
- vec![input],
- );
-
- self.bench(id, benchmark)
- }
-
- /// Executes the given benchmark. Use this variant to execute benchmarks
- /// with complex configuration. This can be used to compare multiple
- /// functions, execute benchmarks with custom configuration settings and
- /// more. See the Benchmark and ParameterizedBenchmark structs for more
- /// information.
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use criterion::*;
- /// # fn routine_1() {}
- /// # fn routine_2() {}
- ///
- /// fn bench(c: &mut Criterion) {
- /// // Setup (construct data, allocate memory, etc)
- /// c.bench(
- /// "routines",
- /// Benchmark::new("routine_1", |b| b.iter(|| routine_1()))
- /// .with_function("routine_2", |b| b.iter(|| routine_2()))
- /// .sample_size(50)
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- pub fn bench<B: BenchmarkDefinition<M>>(
- &mut self,
- group_id: &str,
- benchmark: B,
- ) -> &mut Criterion<M> {
- benchmark.run(group_id, self);
- self
- }
-}
-
-trait DurationExt {
- fn to_nanos(&self) -> u64;
-}
-
-const NANOS_PER_SEC: u64 = 1_000_000_000;
-
-impl DurationExt for Duration {
- fn to_nanos(&self) -> u64 {
- self.as_secs() * NANOS_PER_SEC + u64::from(self.subsec_nanos())
- }
}
/// Enum representing different ways of measuring the throughput of benchmarked code.
@@ -1342,6 +1278,11 @@ pub enum Throughput {
/// an input string or `&[u8]`.
Bytes(u64),
+ /// Equivalent to Bytes, but the value will be reported in terms of
+ /// kilobytes (1000 bytes) per second instead of kibibytes (1024 bytes) per
+ /// second, megabytes instead of mibibytes, and gigabytes instead of gibibytes.
+ BytesDecimal(u64),
+
/// Measure throughput in terms of elements/second. The value should be the number of elements
/// processed by one iteration of the benchmarked code. Typically, this would be the size of a
/// collection, but could also be the number of lines of input text or the number of values to
@@ -1363,7 +1304,7 @@ pub enum AxisScale {
/// or benchmark group.
///
/// ```rust
-/// use self::criterion::{Bencher, Criterion, Benchmark, PlotConfiguration, AxisScale};
+/// use self::criterion::{Bencher, Criterion, PlotConfiguration, AxisScale};
///
/// let plot_config = PlotConfiguration::default()
/// .summary_scale(AxisScale::Logarithmic);
@@ -1388,6 +1329,7 @@ impl Default for PlotConfiguration {
}
impl PlotConfiguration {
+ #[must_use]
/// Set the axis scale (linear or logarithmic) for the summary plots. Typically, you would
/// set this to logarithmic if benchmarking over a range of inputs which scale exponentially.
/// Defaults to linear.
@@ -1460,7 +1402,7 @@ impl ActualSamplingMode {
ActualSamplingMode::Linear => {
let n = sample_count;
let met = warmup_mean_execution_time;
- let m_ns = target_time.to_nanos();
+ let m_ns = target_time.as_nanos();
// Solve: [d + 2*d + 3*d + ... + n*d] * met = m_ns
let total_runs = n * (n + 1) / 2;
let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
@@ -1470,25 +1412,25 @@ impl ActualSamplingMode {
let recommended_sample_size =
ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
- print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
+ eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
- println!(
+ eprintln!(
", enable flat sampling, or reduce sample count to {}.",
recommended_sample_size
);
} else {
- println!(" or enable flat sampling.");
+ eprintln!(" or enable flat sampling.");
}
}
- (1..(n + 1) as u64).map(|a| a * d).collect::<Vec<u64>>()
+ (1..(n + 1)).map(|a| a * d).collect::<Vec<u64>>()
}
ActualSamplingMode::Flat => {
let n = sample_count;
let met = warmup_mean_execution_time;
- let m_ns = target_time.to_nanos() as f64;
+ let m_ns = target_time.as_nanos() as f64;
let time_per_sample = m_ns / (n as f64);
// This is pretty simplistic; we could do something smarter to fit into the allotted time.
let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
@@ -1499,13 +1441,13 @@ impl ActualSamplingMode {
let recommended_sample_size =
ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
- print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
+ eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
- println!(", or reduce sample count to {}.", recommended_sample_size);
+ eprintln!(", or reduce sample count to {}.", recommended_sample_size);
} else {
- println!(".");
+ eprintln!(".");
}
}
@@ -1571,53 +1513,3 @@ pub fn runner(benches: &[&dyn Fn()]) {
}
Criterion::default().configure_from_args().final_summary();
}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(not(feature = "html_reports"))]
-#[doc(hidden)]
-pub fn __warn_about_html_reports_feature() {
- if CARGO_CRITERION_CONNECTION.is_none() {
- println!(
- "WARNING: HTML report generation will become a non-default optional feature in Criterion.rs 0.4.0."
- );
- println!(
- "This feature is being moved to cargo-criterion \
- (https://github.com/bheisler/cargo-criterion) and will be optional in a future \
- version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
- enable the 'html_reports' feature in your Cargo.toml."
- );
- println!();
- }
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(feature = "html_reports")]
-#[doc(hidden)]
-pub fn __warn_about_html_reports_feature() {
- // They have the feature enabled, so they're ready for the update.
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(not(feature = "cargo_bench_support"))]
-#[doc(hidden)]
-pub fn __warn_about_cargo_bench_support_feature() {
- if CARGO_CRITERION_CONNECTION.is_none() {
- println!(
- "WARNING: In Criterion.rs 0.4.0, running criterion benchmarks outside of cargo-criterion will become a default optional feature."
- );
- println!(
- "The statistical analysis and reporting is being moved to cargo-criterion \
- (https://github.com/bheisler/cargo-criterion) and will be optional in a future \
- version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
- enable the 'cargo_bench_support' feature in your Cargo.toml."
- );
- println!();
- }
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(feature = "cargo_bench_support")]
-#[doc(hidden)]
-pub fn __warn_about_cargo_bench_support_feature() {
- // They have the feature enabled, so they're ready for the update.
-}
diff --git a/vendor/criterion/src/macros.rs b/vendor/criterion/src/macros.rs
index 85d8c5956..df7a44d9a 100755..100644
--- a/vendor/criterion/src/macros.rs
+++ b/vendor/criterion/src/macros.rs
@@ -120,9 +120,6 @@ macro_rules! criterion_group {
macro_rules! criterion_main {
( $( $group:path ),+ $(,)* ) => {
fn main() {
- $crate::__warn_about_html_reports_feature();
- $crate::__warn_about_cargo_bench_support_feature();
-
$(
$group();
)+
diff --git a/vendor/criterion/src/macros_private.rs b/vendor/criterion/src/macros_private.rs
index ebad20465..26203d1da 100755..100644
--- a/vendor/criterion/src/macros_private.rs
+++ b/vendor/criterion/src/macros_private.rs
@@ -41,7 +41,7 @@ macro_rules! error {
macro_rules! info {
($($arg:tt)*) => (
if $crate::debug_enabled() {
- println!("Criterion.rs DEBUG: {}", &format!($($arg)*));
+ println!("Criterion.rs DEBUG: {}", &format!($($arg)*))
}
)
}
diff --git a/vendor/criterion/src/measurement.rs b/vendor/criterion/src/measurement.rs
index 3e1e24c4b..63719753d 100755..100644
--- a/vendor/criterion/src/measurement.rs
+++ b/vendor/criterion/src/measurement.rs
@@ -4,7 +4,6 @@
//! measurement.
use crate::format::short;
-use crate::DurationExt;
use crate::Throughput;
use std::time::{Duration, Instant};
@@ -125,6 +124,31 @@ impl DurationFormatter {
unit
}
+ fn bytes_per_second_decimal(
+ &self,
+ bytes: f64,
+ typical: f64,
+ values: &mut [f64],
+ ) -> &'static str {
+ let bytes_per_second = bytes * (1e9 / typical);
+ let (denominator, unit) = if bytes_per_second < 1000.0 {
+ (1.0, " B/s")
+ } else if bytes_per_second < 1000.0 * 1000.0 {
+ (1000.0, "KB/s")
+ } else if bytes_per_second < 1000.0 * 1000.0 * 1000.0 {
+ (1000.0 * 1000.0, "MB/s")
+ } else {
+ (1000.0 * 1000.0 * 1000.0, "GB/s")
+ };
+
+ for val in values {
+ let bytes_per_second = bytes * (1e9 / *val);
+ *val = bytes_per_second / denominator;
+ }
+
+ unit
+ }
+
fn elements_per_second(&self, elems: f64, typical: f64, values: &mut [f64]) -> &'static str {
let elems_per_second = elems * (1e9 / typical);
let (denominator, unit) = if elems_per_second < 1000.0 {
@@ -154,6 +178,9 @@ impl ValueFormatter for DurationFormatter {
) -> &'static str {
match *throughput {
Throughput::Bytes(bytes) => self.bytes_per_second(bytes as f64, typical, values),
+ Throughput::BytesDecimal(bytes) => {
+ self.bytes_per_second_decimal(bytes as f64, typical, values)
+ }
Throughput::Elements(elems) => self.elements_per_second(elems as f64, typical, values),
}
}
@@ -204,7 +231,7 @@ impl Measurement for WallTime {
Duration::from_secs(0)
}
fn to_f64(&self, val: &Self::Value) -> f64 {
- val.to_nanos() as f64
+ val.as_nanos() as f64
}
fn formatter(&self) -> &dyn ValueFormatter {
&DurationFormatter
diff --git a/vendor/criterion/src/plot/gnuplot_backend/distributions.rs b/vendor/criterion/src/plot/gnuplot_backend/distributions.rs
index 1ccbc1a25..1ccbc1a25 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/distributions.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/distributions.rs
diff --git a/vendor/criterion/src/plot/gnuplot_backend/iteration_times.rs b/vendor/criterion/src/plot/gnuplot_backend/iteration_times.rs
index 4db4de8d5..4db4de8d5 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/iteration_times.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/iteration_times.rs
diff --git a/vendor/criterion/src/plot/gnuplot_backend/mod.rs b/vendor/criterion/src/plot/gnuplot_backend/mod.rs
index 987e324c4..27cc48be3 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/mod.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/mod.rs
@@ -248,7 +248,7 @@ impl Plotter for Gnuplot {
info!(
"Waiting for {} gnuplot processes took {}",
child_count,
- format::time(crate::DurationExt::to_nanos(elapsed) as f64)
+ format::time(elapsed.as_nanos() as f64)
);
}
}
diff --git a/vendor/criterion/src/plot/gnuplot_backend/pdf.rs b/vendor/criterion/src/plot/gnuplot_backend/pdf.rs
index a0b85c7aa..a0b85c7aa 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/pdf.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/pdf.rs
diff --git a/vendor/criterion/src/plot/gnuplot_backend/regression.rs b/vendor/criterion/src/plot/gnuplot_backend/regression.rs
index 82de357c4..82de357c4 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/regression.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/regression.rs
diff --git a/vendor/criterion/src/plot/gnuplot_backend/summary.rs b/vendor/criterion/src/plot/gnuplot_backend/summary.rs
index d57a17493..e5d2ab6be 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/summary.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/summary.rs
@@ -67,7 +67,7 @@ pub fn line_comparison(
let max = all_curves
.iter()
- .map(|&&(_, ref data)| Sample::new(data).mean())
+ .map(|&(_, data)| Sample::new(data).mean())
.fold(::std::f64::NAN, f64::max);
let mut dummy = [1.0];
@@ -130,11 +130,11 @@ pub fn violin(
) -> Child {
let path = PathBuf::from(&path);
let all_curves_vec = all_curves.iter().rev().cloned().collect::<Vec<_>>();
- let all_curves: &[&(&BenchmarkId, Vec<f64>)] = &*all_curves_vec;
+ let all_curves: &[&(&BenchmarkId, Vec<f64>)] = &all_curves_vec;
let kdes = all_curves
.iter()
- .map(|&&(_, ref sample)| {
+ .map(|&(_, sample)| {
let (x, mut y) = kde::sweep(Sample::new(sample), KDE_POINTS, None);
let y_max = Sample::new(&y).max();
for y in y.iter_mut() {
@@ -144,10 +144,7 @@ pub fn violin(
(x, y)
})
.collect::<Vec<_>>();
- let mut xs = kdes
- .iter()
- .flat_map(|&(ref x, _)| x.iter())
- .filter(|&&x| x > 0.);
+ let mut xs = kdes.iter().flat_map(|(x, _)| x.iter()).filter(|&&x| x > 0.);
let (mut min, mut max) = {
let &first = xs.next().unwrap();
(first, first)
@@ -174,7 +171,7 @@ pub fn violin(
.configure(Axis::BottomX, |a| {
a.configure(Grid::Major, |g| g.show())
.configure(Grid::Minor, |g| g.hide())
- .set(Range::Limits(0., max as f64 * one[0]))
+ .set(Range::Limits(0., max * one[0]))
.set(Label(format!("Average time ({})", unit)))
.set(axis_scale.to_gnuplot())
})
@@ -190,7 +187,7 @@ pub fn violin(
});
let mut is_first = true;
- for (i, &(ref x, ref y)) in kdes.iter().enumerate() {
+ for (i, (x, y)) in kdes.iter().enumerate() {
let i = i as f64 + 0.5;
let y1: Vec<_> = y.iter().map(|&y| i + y * 0.45).collect();
let y2: Vec<_> = y.iter().map(|&y| i - y * 0.45).collect();
diff --git a/vendor/criterion/src/plot/gnuplot_backend/t_test.rs b/vendor/criterion/src/plot/gnuplot_backend/t_test.rs
index 47b4a110e..47b4a110e 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/t_test.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/t_test.rs
diff --git a/vendor/criterion/src/plot/mod.rs b/vendor/criterion/src/plot/mod.rs
index cb836a394..4bce39468 100755..100644
--- a/vendor/criterion/src/plot/mod.rs
+++ b/vendor/criterion/src/plot/mod.rs
@@ -1,7 +1,9 @@
mod gnuplot_backend;
+#[cfg(feature = "plotters")]
mod plotters_backend;
pub(crate) use gnuplot_backend::Gnuplot;
+#[cfg(feature = "plotters")]
pub(crate) use plotters_backend::PlottersBackend;
use crate::estimate::Statistic;
diff --git a/vendor/criterion/src/plot/plotters_backend/distributions.rs b/vendor/criterion/src/plot/plotters_backend/distributions.rs
index 82f9eae59..8de114058 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/distributions.rs
+++ b/vendor/criterion/src/plot/plotters_backend/distributions.rs
@@ -85,11 +85,11 @@ fn abs_distribution(
chart
.draw_series(LineSeries::new(
kde_xs.iter().zip(ys.iter()).map(|(&x, &y)| (x, y)),
- &DARK_BLUE,
+ DARK_BLUE,
))
.unwrap()
.label("Bootstrap distribution")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart
.draw_series(AreaSeries::new(
@@ -115,7 +115,7 @@ fn abs_distribution(
)))
.unwrap()
.label("Point estimate")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart
.configure_series_labels()
@@ -240,11 +240,11 @@ fn rel_distribution(
chart
.draw_series(LineSeries::new(
xs.iter().zip(ys.iter()).map(|(x, y)| (*x, *y)),
- &DARK_BLUE,
+ DARK_BLUE,
))
.unwrap()
.label("Bootstrap distribution")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart
.draw_series(AreaSeries::new(
@@ -269,7 +269,7 @@ fn rel_distribution(
)))
.unwrap()
.label("Point estimate")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart
.draw_series(std::iter::once(Rectangle::new(
diff --git a/vendor/criterion/src/plot/plotters_backend/iteration_times.rs b/vendor/criterion/src/plot/plotters_backend/iteration_times.rs
index 4d0a22a96..3ac4f1cc7 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/iteration_times.rs
+++ b/vendor/criterion/src/plot/plotters_backend/iteration_times.rs
@@ -37,7 +37,7 @@ pub(crate) fn iteration_times_figure(
.configure_mesh()
.y_desc(format!("Average Iteration Time ({})", unit))
.x_label_formatter(&|x| pretty_print_float(*x, true))
- .light_line_style(&TRANSPARENT)
+ .light_line_style(TRANSPARENT)
.draw()
.unwrap();
@@ -104,7 +104,7 @@ pub(crate) fn iteration_times_comparison_figure(
.configure_mesh()
.y_desc(format!("Average Iteration Time ({})", unit))
.x_label_formatter(&|x| pretty_print_float(*x, true))
- .light_line_style(&TRANSPARENT)
+ .light_line_style(TRANSPARENT)
.draw()
.unwrap();
diff --git a/vendor/criterion/src/plot/plotters_backend/mod.rs b/vendor/criterion/src/plot/plotters_backend/mod.rs
index 4cd1b183d..4cd1b183d 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/mod.rs
+++ b/vendor/criterion/src/plot/plotters_backend/mod.rs
diff --git a/vendor/criterion/src/plot/plotters_backend/pdf.rs b/vendor/criterion/src/plot/plotters_backend/pdf.rs
index 333893fc2..e55de4e6e 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/pdf.rs
+++ b/vendor/criterion/src/plot/plotters_backend/pdf.rs
@@ -38,7 +38,7 @@ pub(crate) fn pdf_comparison_figure(
let y_range = data::fitting_range(base_ys.iter().chain(ys.iter()));
let size = size.unwrap_or(SIZE);
- let root_area = SVGBackend::new(&path, (size.0 as u32, size.1 as u32)).into_drawing_area();
+ let root_area = SVGBackend::new(&path, (size.0, size.1)).into_drawing_area();
let mut cb = ChartBuilder::on(&root_area);
@@ -93,7 +93,7 @@ pub(crate) fn pdf_comparison_figure(
)))
.unwrap()
.label("Base Mean")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_RED));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_RED));
chart
.draw_series(std::iter::once(PathElement::new(
@@ -102,7 +102,7 @@ pub(crate) fn pdf_comparison_figure(
)))
.unwrap()
.label("New Mean")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
if title.is_some() {
chart.configure_series_labels().draw().unwrap();
@@ -132,7 +132,7 @@ pub(crate) fn pdf_small(
let path = context.report_path(id, "pdf_small.svg");
let size = size.unwrap_or(SIZE);
- let root_area = SVGBackend::new(&path, (size.0 as u32, size.1 as u32)).into_drawing_area();
+ let root_area = SVGBackend::new(&path, (size.0, size.1)).into_drawing_area();
let mut chart = ChartBuilder::on(&root_area)
.margin((5).percent())
@@ -208,7 +208,7 @@ pub(crate) fn pdf(
let xs_ = Sample::new(&xs);
let size = size.unwrap_or(SIZE);
- let root_area = SVGBackend::new(&path, (size.0 as u32, size.1 as u32)).into_drawing_area();
+ let root_area = SVGBackend::new(&path, (size.0, size.1)).into_drawing_area();
let range = data::fitting_range(ys.iter());
@@ -255,18 +255,18 @@ pub(crate) fn pdf(
chart
.draw_series(std::iter::once(PathElement::new(
vec![(mean, 0.0), (mean, max_iters)],
- &DARK_BLUE,
+ DARK_BLUE,
)))
.unwrap()
.label("Mean")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart
.draw_series(vec![
- PathElement::new(vec![(lomt, 0.0), (lomt, max_iters)], &DARK_ORANGE),
- PathElement::new(vec![(himt, 0.0), (himt, max_iters)], &DARK_ORANGE),
- PathElement::new(vec![(lost, 0.0), (lost, max_iters)], &DARK_RED),
- PathElement::new(vec![(hist, 0.0), (hist, max_iters)], &DARK_RED),
+ PathElement::new(vec![(lomt, 0.0), (lomt, max_iters)], DARK_ORANGE),
+ PathElement::new(vec![(himt, 0.0), (himt, max_iters)], DARK_ORANGE),
+ PathElement::new(vec![(lost, 0.0), (lost, max_iters)], DARK_RED),
+ PathElement::new(vec![(hist, 0.0), (hist, max_iters)], DARK_RED),
])
.unwrap();
use crate::stats::univariate::outliers::tukey::Label;
diff --git a/vendor/criterion/src/plot/plotters_backend/regression.rs b/vendor/criterion/src/plot/plotters_backend/regression.rs
index c944dbbe6..1a9adece0 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/regression.rs
+++ b/vendor/criterion/src/plot/plotters_backend/regression.rs
@@ -61,7 +61,7 @@ pub(crate) fn regression_figure(
.x_desc(x_label)
.y_desc(format!("Total sample time ({})", unit))
.x_label_formatter(&|x| pretty_print_float(x * x_scale, true))
- .light_line_style(&TRANSPARENT)
+ .light_line_style(TRANSPARENT)
.draw()
.unwrap();
@@ -79,7 +79,7 @@ pub(crate) fn regression_figure(
chart
.draw_series(std::iter::once(PathElement::new(
vec![(0.0, 0.0), (max_iters, point)],
- &DARK_BLUE,
+ DARK_BLUE,
)))
.unwrap()
.label("Linear regression")
@@ -187,13 +187,13 @@ pub(crate) fn regression_comparison_figure(
.x_desc(x_label)
.y_desc(format!("Total sample time ({})", unit))
.x_label_formatter(&|x| pretty_print_float(x * x_scale, true))
- .light_line_style(&TRANSPARENT)
+ .light_line_style(TRANSPARENT)
.draw()
.unwrap();
chart
.draw_series(vec![
- PathElement::new(vec![(0.0, 0.0), (max_iters, base_point)], &DARK_RED).into_dyn(),
+ PathElement::new(vec![(0.0, 0.0), (max_iters, base_point)], DARK_RED).into_dyn(),
Polygon::new(
vec![(0.0, 0.0), (max_iters, base_lb), (max_iters, base_ub)],
DARK_RED.mix(0.25).filled(),
@@ -208,7 +208,7 @@ pub(crate) fn regression_comparison_figure(
chart
.draw_series(vec![
- PathElement::new(vec![(0.0, 0.0), (max_iters, point)], &DARK_BLUE).into_dyn(),
+ PathElement::new(vec![(0.0, 0.0), (max_iters, point)], DARK_BLUE).into_dyn(),
Polygon::new(
vec![(0.0, 0.0), (max_iters, lb), (max_iters, ub)],
DARK_BLUE.mix(0.25).filled(),
diff --git a/vendor/criterion/src/plot/plotters_backend/summary.rs b/vendor/criterion/src/plot/plotters_backend/summary.rs
index a5a410d6e..0ebb851e2 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/summary.rs
+++ b/vendor/criterion/src/plot/plotters_backend/summary.rs
@@ -120,7 +120,7 @@ fn line_comparison_series_data<'a>(
) -> (&'static str, Vec<(Option<&'a String>, Vec<f64>, Vec<f64>)>) {
let max = all_curves
.iter()
- .map(|&&(_, ref data)| Sample::new(data).mean())
+ .map(|&(_, data)| Sample::new(data).mean())
.fold(::std::f64::NAN, f64::max);
let mut dummy = [1.0];
@@ -159,7 +159,7 @@ pub fn violin(
axis_scale: AxisScale,
) {
let all_curves_vec = all_curves.iter().rev().cloned().collect::<Vec<_>>();
- let all_curves: &[&(&BenchmarkId, Vec<f64>)] = &*all_curves_vec;
+ let all_curves: &[&(&BenchmarkId, Vec<f64>)] = &all_curves_vec;
let mut kdes = all_curves
.iter()
@@ -176,7 +176,7 @@ pub fn violin(
let mut xs = kdes
.iter()
- .flat_map(|&(_, ref x, _)| x.iter())
+ .flat_map(|(_, x, _)| x.iter())
.filter(|&&x| x > 0.);
let (mut min, mut max) = {
let &first = xs.next().unwrap();
@@ -250,7 +250,7 @@ fn draw_violin_figure<XR: AsRangedCoord<Value = f64>, YR: AsRangedCoord<Value =
.draw_series(AreaSeries::new(
x.iter().zip(y.iter()).map(|(x, y)| (*x, base + *y / 2.0)),
base,
- &DARK_BLUE,
+ DARK_BLUE,
))
.unwrap();
@@ -258,7 +258,7 @@ fn draw_violin_figure<XR: AsRangedCoord<Value = f64>, YR: AsRangedCoord<Value =
.draw_series(AreaSeries::new(
x.iter().zip(y.iter()).map(|(x, y)| (*x, base - *y / 2.0)),
base,
- &DARK_BLUE,
+ DARK_BLUE,
))
.unwrap();
}
diff --git a/vendor/criterion/src/plot/plotters_backend/t_test.rs b/vendor/criterion/src/plot/plotters_backend/t_test.rs
index d9c15081f..c575c2ff6 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/t_test.rs
+++ b/vendor/criterion/src/plot/plotters_backend/t_test.rs
@@ -38,7 +38,7 @@ pub(crate) fn t_test(
.draw_series(AreaSeries::new(
xs.iter().zip(ys.iter()).map(|(x, y)| (*x, *y)),
0.0,
- &DARK_BLUE.mix(0.25),
+ DARK_BLUE.mix(0.25),
))
.unwrap()
.label("t distribution")
@@ -53,7 +53,7 @@ pub(crate) fn t_test(
)))
.unwrap()
.label("t statistic")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart.configure_series_labels().draw().unwrap();
}
diff --git a/vendor/criterion/src/profiler.rs b/vendor/criterion/src/profiler.rs
index 906af5f03..906af5f03 100755..100644
--- a/vendor/criterion/src/profiler.rs
+++ b/vendor/criterion/src/profiler.rs
diff --git a/vendor/criterion/src/report.rs b/vendor/criterion/src/report.rs
index 9032a38e7..c5448fdbb 100755..100644
--- a/vendor/criterion/src/report.rs
+++ b/vendor/criterion/src/report.rs
@@ -1,5 +1,7 @@
+#[cfg(feature = "csv_output")]
+use crate::csv_report::FileCsvReport;
+use crate::stats::bivariate::regression::Slope;
use crate::stats::univariate::outliers::tukey::LabeledSample;
-use crate::{csv_report::FileCsvReport, stats::bivariate::regression::Slope};
use crate::{html::Html, stats::bivariate::Data};
use crate::estimate::{ChangeDistributions, ChangeEstimates, Distributions, Estimate, Estimates};
@@ -8,11 +10,11 @@ use crate::measurement::ValueFormatter;
use crate::stats::univariate::Sample;
use crate::stats::Distribution;
use crate::{PlotConfiguration, Throughput};
-use std::cell::Cell;
+use anes::{Attribute, ClearLine, Color, ResetAttributes, SetAttribute, SetForegroundColor};
use std::cmp;
use std::collections::HashSet;
use std::fmt;
-use std::io::stdout;
+use std::io::stderr;
use std::io::Write;
use std::path::{Path, PathBuf};
@@ -46,6 +48,7 @@ impl<'a> MeasurementData<'a> {
self.data.x()
}
+ #[cfg(feature = "csv_output")]
pub fn sample_times(&self) -> &Sample<f64> {
self.data.y()
}
@@ -113,9 +116,9 @@ impl BenchmarkId {
throughput: Option<Throughput>,
) -> BenchmarkId {
let full_id = match (&function_id, &value_str) {
- (&Some(ref func), &Some(ref val)) => format!("{}/{}/{}", group_id, func, val),
- (&Some(ref func), &None) => format!("{}/{}", group_id, func),
- (&None, &Some(ref val)) => format!("{}/{}", group_id, val),
+ (Some(func), Some(val)) => format!("{}/{}/{}", group_id, func, val),
+ (Some(func), &None) => format!("{}/{}", group_id, func),
+ (&None, Some(val)) => format!("{}/{}", group_id, val),
(&None, &None) => group_id.clone(),
};
@@ -126,18 +129,18 @@ impl BenchmarkId {
}
let directory_name = match (&function_id, &value_str) {
- (&Some(ref func), &Some(ref val)) => format!(
+ (Some(func), Some(val)) => format!(
"{}/{}/{}",
make_filename_safe(&group_id),
make_filename_safe(func),
make_filename_safe(val)
),
- (&Some(ref func), &None) => format!(
+ (Some(func), &None) => format!(
"{}/{}",
make_filename_safe(&group_id),
make_filename_safe(func)
),
- (&None, &Some(ref val)) => format!(
+ (&None, Some(val)) => format!(
"{}/{}",
make_filename_safe(&group_id),
make_filename_safe(val)
@@ -170,7 +173,9 @@ impl BenchmarkId {
pub fn as_number(&self) -> Option<f64> {
match self.throughput {
- Some(Throughput::Bytes(n)) | Some(Throughput::Elements(n)) => Some(n as f64),
+ Some(Throughput::Bytes(n))
+ | Some(Throughput::Elements(n))
+ | Some(Throughput::BytesDecimal(n)) => Some(n as f64),
None => self
.value_str
.as_ref()
@@ -181,6 +186,7 @@ impl BenchmarkId {
pub fn value_type(&self) -> Option<ValueType> {
match self.throughput {
Some(Throughput::Bytes(_)) => Some(ValueType::Bytes),
+ Some(Throughput::BytesDecimal(_)) => Some(ValueType::Bytes),
Some(Throughput::Elements(_)) => Some(ValueType::Elements),
None => self
.value_str
@@ -304,9 +310,7 @@ pub(crate) struct Reports {
pub(crate) bencher_enabled: bool,
pub(crate) bencher: BencherReport,
pub(crate) csv_enabled: bool,
- pub(crate) csv: FileCsvReport,
- pub(crate) html_enabled: bool,
- pub(crate) html: Html,
+ pub(crate) html: Option<Html>,
}
macro_rules! reports_impl {
(fn $name:ident(&self, $($argn:ident: $argt:ty),*)) => {
@@ -317,11 +321,12 @@ macro_rules! reports_impl {
if self.bencher_enabled {
self.bencher.$name($($argn),*);
}
+ #[cfg(feature = "csv_output")]
if self.csv_enabled {
- self.csv.$name($($argn),*);
+ FileCsvReport.$name($($argn),*);
}
- if self.html_enabled {
- self.html.$name($($argn),*);
+ if let Some(reporter) = &self.html {
+ reporter.$name($($argn),*);
}
}
};
@@ -363,35 +368,34 @@ impl Report for Reports {
reports_impl!(fn group_separator(&self, ));
}
+#[derive(Debug, Clone, Copy, Eq, PartialEq)]
+pub(crate) enum CliVerbosity {
+ Quiet,
+ Normal,
+ Verbose,
+}
+
pub(crate) struct CliReport {
pub enable_text_overwrite: bool,
pub enable_text_coloring: bool,
- pub verbose: bool,
-
- last_line_len: Cell<usize>,
+ pub verbosity: CliVerbosity,
}
impl CliReport {
pub fn new(
enable_text_overwrite: bool,
enable_text_coloring: bool,
- verbose: bool,
+ verbosity: CliVerbosity,
) -> CliReport {
CliReport {
enable_text_overwrite,
enable_text_coloring,
- verbose,
-
- last_line_len: Cell::new(0),
+ verbosity,
}
}
fn text_overwrite(&self) {
if self.enable_text_overwrite {
- print!("\r");
- for _ in 0..self.last_line_len.get() {
- print!(" ");
- }
- print!("\r");
+ eprint!("\r{}", ClearLine::All)
}
}
@@ -399,41 +403,36 @@ impl CliReport {
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))]
fn print_overwritable(&self, s: String) {
if self.enable_text_overwrite {
- self.last_line_len.set(s.len());
- print!("{}", s);
- stdout().flush().unwrap();
+ eprint!("{}", s);
+ stderr().flush().unwrap();
} else {
- println!("{}", s);
+ eprintln!("{}", s);
}
}
- fn green(&self, s: String) -> String {
+ fn with_color(&self, color: Color, s: &str) -> String {
if self.enable_text_coloring {
- format!("\x1B[32m{}\x1B[39m", s)
+ format!("{}{}{}", SetForegroundColor(color), s, ResetAttributes)
} else {
- s
+ String::from(s)
}
}
- fn yellow(&self, s: String) -> String {
- if self.enable_text_coloring {
- format!("\x1B[33m{}\x1B[39m", s)
- } else {
- s
- }
+ fn green(&self, s: &str) -> String {
+ self.with_color(Color::DarkGreen, s)
}
- fn red(&self, s: String) -> String {
- if self.enable_text_coloring {
- format!("\x1B[31m{}\x1B[39m", s)
- } else {
- s
- }
+ fn yellow(&self, s: &str) -> String {
+ self.with_color(Color::DarkYellow, s)
+ }
+
+ fn red(&self, s: &str) -> String {
+ self.with_color(Color::DarkRed, s)
}
fn bold(&self, s: String) -> String {
if self.enable_text_coloring {
- format!("\x1B[1m{}\x1B[22m", s)
+ format!("{}{}{}", SetAttribute(Attribute::Bold), s, ResetAttributes)
} else {
s
}
@@ -441,7 +440,7 @@ impl CliReport {
fn faint(&self, s: String) -> String {
if self.enable_text_coloring {
- format!("\x1B[2m{}\x1B[22m", s)
+ format!("{}{}{}", SetAttribute(Attribute::Faint), s, ResetAttributes)
} else {
s
}
@@ -460,7 +459,7 @@ impl CliReport {
println!(
"{}",
- self.yellow(format!(
+ self.yellow(&format!(
"Found {} outliers among {} measurements ({:.2}%)",
noutliers,
sample_size,
@@ -529,7 +528,7 @@ impl Report for CliReport {
iter_count: u64,
) {
self.text_overwrite();
- let iter_string = if self.verbose {
+ let iter_string = if matches!(self.verbosity, CliVerbosity::Verbose) {
format!("{} iterations", iter_count)
} else {
format::iter_count(iter_count)
@@ -559,14 +558,14 @@ impl Report for CliReport {
let mut id = id.as_title().to_owned();
if id.len() > 23 {
- println!("{}", self.green(id.clone()));
+ println!("{}", self.green(&id));
id.clear();
}
let id_len = id.len();
println!(
"{}{}time: [{} {} {}]",
- self.green(id),
+ self.green(&id),
" ".repeat(24 - id_len),
self.faint(
formatter.format_value(typical_estimate.confidence_interval.lower_bound)
@@ -594,98 +593,103 @@ impl Report for CliReport {
)
}
- if let Some(ref comp) = meas.comparison {
- let different_mean = comp.p_value < comp.significance_threshold;
- let mean_est = &comp.relative_estimates.mean;
- let point_estimate = mean_est.point_estimate;
- let mut point_estimate_str = format::change(point_estimate, true);
- // The change in throughput is related to the change in timing. Reducing the timing by
- // 50% increases the throughput by 100%.
- let to_thrpt_estimate = |ratio: f64| 1.0 / (1.0 + ratio) - 1.0;
- let mut thrpt_point_estimate_str =
- format::change(to_thrpt_estimate(point_estimate), true);
- let explanation_str: String;
-
- if !different_mean {
- explanation_str = "No change in performance detected.".to_owned();
- } else {
- let comparison = compare_to_threshold(mean_est, comp.noise_threshold);
- match comparison {
- ComparisonResult::Improved => {
- point_estimate_str = self.green(self.bold(point_estimate_str));
- thrpt_point_estimate_str = self.green(self.bold(thrpt_point_estimate_str));
- explanation_str =
- format!("Performance has {}.", self.green("improved".to_owned()));
- }
- ComparisonResult::Regressed => {
- point_estimate_str = self.red(self.bold(point_estimate_str));
- thrpt_point_estimate_str = self.red(self.bold(thrpt_point_estimate_str));
- explanation_str =
- format!("Performance has {}.", self.red("regressed".to_owned()));
- }
- ComparisonResult::NonSignificant => {
- explanation_str = "Change within noise threshold.".to_owned();
+ if !matches!(self.verbosity, CliVerbosity::Quiet) {
+ if let Some(ref comp) = meas.comparison {
+ let different_mean = comp.p_value < comp.significance_threshold;
+ let mean_est = &comp.relative_estimates.mean;
+ let point_estimate = mean_est.point_estimate;
+ let mut point_estimate_str = format::change(point_estimate, true);
+ // The change in throughput is related to the change in timing. Reducing the timing by
+ // 50% increases the throughput by 100%.
+ let to_thrpt_estimate = |ratio: f64| 1.0 / (1.0 + ratio) - 1.0;
+ let mut thrpt_point_estimate_str =
+ format::change(to_thrpt_estimate(point_estimate), true);
+ let explanation_str: String;
+
+ if !different_mean {
+ explanation_str = "No change in performance detected.".to_owned();
+ } else {
+ let comparison = compare_to_threshold(mean_est, comp.noise_threshold);
+ match comparison {
+ ComparisonResult::Improved => {
+ point_estimate_str = self.green(&self.bold(point_estimate_str));
+ thrpt_point_estimate_str =
+ self.green(&self.bold(thrpt_point_estimate_str));
+ explanation_str =
+ format!("Performance has {}.", self.green("improved"));
+ }
+ ComparisonResult::Regressed => {
+ point_estimate_str = self.red(&self.bold(point_estimate_str));
+ thrpt_point_estimate_str =
+ self.red(&self.bold(thrpt_point_estimate_str));
+ explanation_str = format!("Performance has {}.", self.red("regressed"));
+ }
+ ComparisonResult::NonSignificant => {
+ explanation_str = "Change within noise threshold.".to_owned();
+ }
}
}
- }
- if meas.throughput.is_some() {
- println!("{}change:", " ".repeat(17));
+ if meas.throughput.is_some() {
+ println!("{}change:", " ".repeat(17));
+
+ println!(
+ "{}time: [{} {} {}] (p = {:.2} {} {:.2})",
+ " ".repeat(24),
+ self.faint(format::change(
+ mean_est.confidence_interval.lower_bound,
+ true
+ )),
+ point_estimate_str,
+ self.faint(format::change(
+ mean_est.confidence_interval.upper_bound,
+ true
+ )),
+ comp.p_value,
+ if different_mean { "<" } else { ">" },
+ comp.significance_threshold
+ );
+ println!(
+ "{}thrpt: [{} {} {}]",
+ " ".repeat(24),
+ self.faint(format::change(
+ to_thrpt_estimate(mean_est.confidence_interval.upper_bound),
+ true
+ )),
+ thrpt_point_estimate_str,
+ self.faint(format::change(
+ to_thrpt_estimate(mean_est.confidence_interval.lower_bound),
+ true
+ )),
+ );
+ } else {
+ println!(
+ "{}change: [{} {} {}] (p = {:.2} {} {:.2})",
+ " ".repeat(24),
+ self.faint(format::change(
+ mean_est.confidence_interval.lower_bound,
+ true
+ )),
+ point_estimate_str,
+ self.faint(format::change(
+ mean_est.confidence_interval.upper_bound,
+ true
+ )),
+ comp.p_value,
+ if different_mean { "<" } else { ">" },
+ comp.significance_threshold
+ );
+ }
- println!(
- "{}time: [{} {} {}] (p = {:.2} {} {:.2})",
- " ".repeat(24),
- self.faint(format::change(
- mean_est.confidence_interval.lower_bound,
- true
- )),
- point_estimate_str,
- self.faint(format::change(
- mean_est.confidence_interval.upper_bound,
- true
- )),
- comp.p_value,
- if different_mean { "<" } else { ">" },
- comp.significance_threshold
- );
- println!(
- "{}thrpt: [{} {} {}]",
- " ".repeat(24),
- self.faint(format::change(
- to_thrpt_estimate(mean_est.confidence_interval.upper_bound),
- true
- )),
- thrpt_point_estimate_str,
- self.faint(format::change(
- to_thrpt_estimate(mean_est.confidence_interval.lower_bound),
- true
- )),
- );
- } else {
- println!(
- "{}change: [{} {} {}] (p = {:.2} {} {:.2})",
- " ".repeat(24),
- self.faint(format::change(
- mean_est.confidence_interval.lower_bound,
- true
- )),
- point_estimate_str,
- self.faint(format::change(
- mean_est.confidence_interval.upper_bound,
- true
- )),
- comp.p_value,
- if different_mean { "<" } else { ">" },
- comp.significance_threshold
- );
+ println!("{}{}", " ".repeat(24), explanation_str);
}
-
- println!("{}{}", " ".repeat(24), explanation_str);
}
- self.outliers(&meas.avg_times);
+ if !matches!(self.verbosity, CliVerbosity::Quiet) {
+ self.outliers(&meas.avg_times);
+ }
- if self.verbose {
+ if matches!(self.verbosity, CliVerbosity::Verbose) {
let format_short_estimate = |estimate: &Estimate| -> String {
format!(
"[{} {}]",
diff --git a/vendor/criterion/src/routine.rs b/vendor/criterion/src/routine.rs
index 5831415ac..88e4318bb 100755..100644
--- a/vendor/criterion/src/routine.rs
+++ b/vendor/criterion/src/routine.rs
@@ -2,7 +2,7 @@ use crate::benchmark::BenchmarkConfig;
use crate::connection::OutgoingMessage;
use crate::measurement::Measurement;
use crate::report::{BenchmarkId, Report, ReportContext};
-use crate::{ActualSamplingMode, Bencher, Criterion, DurationExt};
+use crate::{black_box, ActualSamplingMode, Bencher, Criterion};
use std::marker::PhantomData;
use std::time::Duration;
@@ -34,7 +34,7 @@ pub(crate) trait Routine<M: Measurement, T: ?Sized> {
) {
criterion
.report
- .profile(id, report_context, time.to_nanos() as f64);
+ .profile(id, report_context, time.as_nanos() as f64);
let mut profile_path = report_context.output_directory.clone();
if (*crate::CARGO_CRITERION_CONNECTION).is_some() {
@@ -51,7 +51,7 @@ pub(crate) trait Routine<M: Measurement, T: ?Sized> {
.borrow_mut()
.start_profiling(id.id(), &profile_path);
- let time = time.to_nanos();
+ let time = time.as_nanos() as u64;
// TODO: Some profilers will show the two batches of iterations as
// being different code-paths even though they aren't really.
@@ -88,17 +88,58 @@ pub(crate) trait Routine<M: Measurement, T: ?Sized> {
report_context: &ReportContext,
parameter: &T,
) -> (ActualSamplingMode, Box<[f64]>, Box<[f64]>) {
+ if config.quick_mode {
+ let minimum_bench_duration = Duration::from_millis(100);
+ let maximum_bench_duration = config.measurement_time; // default: 5 seconds
+ let target_rel_stdev = config.significance_level; // default: 5%, 0.05
+
+ use std::time::Instant;
+ let time_start = Instant::now();
+
+ let sq = |val| val * val;
+ let mut n = 1;
+ let mut t_prev = *self.bench(measurement, &[n], parameter).first().unwrap();
+
+ // Early exit for extremely long running benchmarks:
+ if time_start.elapsed() > maximum_bench_duration {
+ let iters = vec![n as f64, n as f64].into_boxed_slice();
+ // prevent gnuplot bug when all values are equal
+ let elapsed = vec![t_prev, t_prev + 0.000001].into_boxed_slice();
+ return (ActualSamplingMode::Flat, iters, elapsed);
+ }
+
+ // Main data collection loop.
+ loop {
+ let t_now = *self
+ .bench(measurement, &[n * 2], parameter)
+ .first()
+ .unwrap();
+ let t = (t_prev + 2. * t_now) / 5.;
+ let stdev = (sq(t_prev - t) + sq(t_now - 2. * t)).sqrt();
+ // println!("Sample: {} {:.2}", n, stdev / t);
+ let elapsed = time_start.elapsed();
+ if (stdev < target_rel_stdev * t && elapsed > minimum_bench_duration)
+ || elapsed > maximum_bench_duration
+ {
+ let iters = vec![n as f64, (n * 2) as f64].into_boxed_slice();
+ let elapsed = vec![t_prev, t_now].into_boxed_slice();
+ return (ActualSamplingMode::Linear, iters, elapsed);
+ }
+ n *= 2;
+ t_prev = t_now;
+ }
+ }
let wu = config.warm_up_time;
- let m_ns = config.measurement_time.to_nanos();
+ let m_ns = config.measurement_time.as_nanos();
criterion
.report
- .warmup(id, report_context, wu.to_nanos() as f64);
+ .warmup(id, report_context, wu.as_nanos() as f64);
if let Some(conn) = &criterion.connection {
conn.send(&OutgoingMessage::Warmup {
id: id.into(),
- nanos: wu.to_nanos() as f64,
+ nanos: wu.as_nanos() as f64,
})
.unwrap();
}
@@ -206,7 +247,7 @@ where
.iter()
.map(|iters| {
b.iters = *iters;
- (*f)(&mut b, parameter);
+ (*f)(&mut b, black_box(parameter));
b.assert_iterated();
m.to_f64(&b.value)
})
@@ -226,14 +267,14 @@ where
let mut total_iters = 0;
let mut elapsed_time = Duration::from_millis(0);
loop {
- (*f)(&mut b, parameter);
+ (*f)(&mut b, black_box(parameter));
b.assert_iterated();
total_iters += b.iters;
elapsed_time += b.elapsed_time;
if elapsed_time > how_long {
- return (elapsed_time.to_nanos(), total_iters);
+ return (elapsed_time.as_nanos() as u64, total_iters);
}
b.iters = b.iters.wrapping_mul(2);
diff --git a/vendor/criterion/src/stats/bivariate/bootstrap.rs b/vendor/criterion/src/stats/bivariate/bootstrap.rs
index 9eb7fa7b5..9eb7fa7b5 100755..100644
--- a/vendor/criterion/src/stats/bivariate/bootstrap.rs
+++ b/vendor/criterion/src/stats/bivariate/bootstrap.rs
diff --git a/vendor/criterion/src/stats/bivariate/mod.rs b/vendor/criterion/src/stats/bivariate/mod.rs
index d1e8df703..2351c9ef6 100755..100644
--- a/vendor/criterion/src/stats/bivariate/mod.rs
+++ b/vendor/criterion/src/stats/bivariate/mod.rs
@@ -8,6 +8,7 @@ use crate::stats::bivariate::resamples::Resamples;
use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
use crate::stats::univariate::Sample;
+#[cfg(feature = "rayon")]
use rayon::iter::{IntoParallelIterator, ParallelIterator};
/// Bivariate `(X, Y)` data
@@ -72,27 +73,41 @@ where
T::Distributions: Send,
T::Builder: Send,
{
- (0..nresamples)
- .into_par_iter()
- .map_init(
- || Resamples::new(*self),
- |resamples, _| statistic(resamples.next()),
- )
- .fold(
- || T::Builder::new(0),
- |mut sub_distributions, sample| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples)
+ .into_par_iter()
+ .map_init(
+ || Resamples::new(*self),
+ |resamples, _| statistic(resamples.next()),
+ )
+ .fold(
+ || T::Builder::new(0),
+ |mut sub_distributions, sample| {
+ sub_distributions.push(sample);
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut resamples = Resamples::new(*self);
+ (0..nresamples)
+ .map(|_| statistic(resamples.next()))
+ .fold(T::Builder::new(0), |mut sub_distributions, sample| {
sub_distributions.push(sample);
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
- a.extend(&mut b);
- a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
/// Returns a view into the `X` data
diff --git a/vendor/criterion/src/stats/bivariate/regression.rs b/vendor/criterion/src/stats/bivariate/regression.rs
index f09443f10..f09443f10 100755..100644
--- a/vendor/criterion/src/stats/bivariate/regression.rs
+++ b/vendor/criterion/src/stats/bivariate/regression.rs
diff --git a/vendor/criterion/src/stats/bivariate/resamples.rs b/vendor/criterion/src/stats/bivariate/resamples.rs
index e254dc792..e254dc792 100755..100644
--- a/vendor/criterion/src/stats/bivariate/resamples.rs
+++ b/vendor/criterion/src/stats/bivariate/resamples.rs
diff --git a/vendor/criterion/src/stats/float.rs b/vendor/criterion/src/stats/float.rs
index b7748ddb5..b7748ddb5 100755..100644
--- a/vendor/criterion/src/stats/float.rs
+++ b/vendor/criterion/src/stats/float.rs
diff --git a/vendor/criterion/src/stats/mod.rs b/vendor/criterion/src/stats/mod.rs
index 4f926debd..4f926debd 100755..100644
--- a/vendor/criterion/src/stats/mod.rs
+++ b/vendor/criterion/src/stats/mod.rs
diff --git a/vendor/criterion/src/stats/rand_util.rs b/vendor/criterion/src/stats/rand_util.rs
index ed374cf98..ed374cf98 100755..100644
--- a/vendor/criterion/src/stats/rand_util.rs
+++ b/vendor/criterion/src/stats/rand_util.rs
diff --git a/vendor/criterion/src/stats/test.rs b/vendor/criterion/src/stats/test.rs
index 9e13f3084..9e13f3084 100755..100644
--- a/vendor/criterion/src/stats/test.rs
+++ b/vendor/criterion/src/stats/test.rs
diff --git a/vendor/criterion/src/stats/tuple.rs b/vendor/criterion/src/stats/tuple.rs
index 1c075159e..1c075159e 100755..100644
--- a/vendor/criterion/src/stats/tuple.rs
+++ b/vendor/criterion/src/stats/tuple.rs
diff --git a/vendor/criterion/src/stats/univariate/bootstrap.rs b/vendor/criterion/src/stats/univariate/bootstrap.rs
index 21c914011..21c914011 100755..100644
--- a/vendor/criterion/src/stats/univariate/bootstrap.rs
+++ b/vendor/criterion/src/stats/univariate/bootstrap.rs
diff --git a/vendor/criterion/src/stats/univariate/kde/kernel.rs b/vendor/criterion/src/stats/univariate/kde/kernel.rs
index c3d0ff513..c3d0ff513 100755..100644
--- a/vendor/criterion/src/stats/univariate/kde/kernel.rs
+++ b/vendor/criterion/src/stats/univariate/kde/kernel.rs
diff --git a/vendor/criterion/src/stats/univariate/kde/mod.rs b/vendor/criterion/src/stats/univariate/kde/mod.rs
index 9b0836d74..c54de55a2 100755..100644
--- a/vendor/criterion/src/stats/univariate/kde/mod.rs
+++ b/vendor/criterion/src/stats/univariate/kde/mod.rs
@@ -5,6 +5,7 @@ pub mod kernel;
use self::kernel::Kernel;
use crate::stats::float::Float;
use crate::stats::univariate::Sample;
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
/// Univariate kernel density estimator
@@ -42,8 +43,13 @@ where
///
/// - Multihreaded
pub fn map(&self, xs: &[A]) -> Box<[A]> {
- xs.par_iter()
- .map(|&x| self.estimate(x))
+ #[cfg(feature = "rayon")]
+ let iter = xs.par_iter();
+
+ #[cfg(not(feature = "rayon"))]
+ let iter = xs.iter();
+
+ iter.map(|&x| self.estimate(x))
.collect::<Vec<_>>()
.into_boxed_slice()
}
diff --git a/vendor/criterion/src/stats/univariate/mixed.rs b/vendor/criterion/src/stats/univariate/mixed.rs
index 5c0a59fac..d6b845d1b 100755..100644
--- a/vendor/criterion/src/stats/univariate/mixed.rs
+++ b/vendor/criterion/src/stats/univariate/mixed.rs
@@ -4,6 +4,7 @@ use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
use crate::stats::univariate::Resamples;
use crate::stats::univariate::Sample;
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
/// Performs a *mixed* two-sample bootstrap
@@ -27,31 +28,51 @@ where
c.extend_from_slice(b);
let c = Sample::new(&c);
- (0..nresamples)
- .into_par_iter()
- .map_init(
- || Resamples::new(c),
- |resamples, _| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples)
+ .into_par_iter()
+ .map_init(
+ || Resamples::new(c),
+ |resamples, _| {
+ let resample = resamples.next();
+ let a: &Sample<A> = Sample::new(&resample[..n_a]);
+ let b: &Sample<A> = Sample::new(&resample[n_a..]);
+
+ statistic(a, b)
+ },
+ )
+ .fold(
+ || T::Builder::new(0),
+ |mut sub_distributions, sample| {
+ sub_distributions.push(sample);
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut resamples = Resamples::new(c);
+ (0..nresamples)
+ .map(|_| {
let resample = resamples.next();
let a: &Sample<A> = Sample::new(&resample[..n_a]);
let b: &Sample<A> = Sample::new(&resample[n_a..]);
statistic(a, b)
- },
- )
- .fold(
- || T::Builder::new(0),
- |mut sub_distributions, sample| {
+ })
+ .fold(T::Builder::new(0), |mut sub_distributions, sample| {
sub_distributions.push(sample);
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
- a.extend(&mut b);
- a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
diff --git a/vendor/criterion/src/stats/univariate/mod.rs b/vendor/criterion/src/stats/univariate/mod.rs
index 8dfb5f8a9..5b221272d 100755..100644
--- a/vendor/criterion/src/stats/univariate/mod.rs
+++ b/vendor/criterion/src/stats/univariate/mod.rs
@@ -11,6 +11,7 @@ pub mod outliers;
use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
use std::cmp;
@@ -42,11 +43,42 @@ where
let nresamples_sqrt = (nresamples as f64).sqrt().ceil() as usize;
let per_chunk = (nresamples + nresamples_sqrt - 1) / nresamples_sqrt;
- (0..nresamples_sqrt)
- .into_par_iter()
- .map_init(
- || (Resamples::new(a), Resamples::new(b)),
- |(a_resamples, b_resamples), i| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples_sqrt)
+ .into_par_iter()
+ .map_init(
+ || (Resamples::new(a), Resamples::new(b)),
+ |(a_resamples, b_resamples), i| {
+ let start = i * per_chunk;
+ let end = cmp::min((i + 1) * per_chunk, nresamples);
+ let a_resample = a_resamples.next();
+
+ let mut sub_distributions: T::Builder =
+ TupledDistributionsBuilder::new(end - start);
+
+ for _ in start..end {
+ let b_resample = b_resamples.next();
+ sub_distributions.push(statistic(a_resample, b_resample));
+ }
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut a_resamples = Resamples::new(a);
+ let mut b_resamples = Resamples::new(b);
+ (0..nresamples_sqrt)
+ .map(|i| {
let start = i * per_chunk;
let end = cmp::min((i + 1) * per_chunk, nresamples);
let a_resample = a_resamples.next();
@@ -59,14 +91,11 @@ where
sub_distributions.push(statistic(a_resample, b_resample));
}
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
+ })
+ .fold(T::Builder::new(0), |mut a, mut b| {
a.extend(&mut b);
a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
diff --git a/vendor/criterion/src/stats/univariate/outliers/mod.rs b/vendor/criterion/src/stats/univariate/outliers/mod.rs
index b8ed7c744..b8ed7c744 100755..100644
--- a/vendor/criterion/src/stats/univariate/outliers/mod.rs
+++ b/vendor/criterion/src/stats/univariate/outliers/mod.rs
diff --git a/vendor/criterion/src/stats/univariate/outliers/tukey.rs b/vendor/criterion/src/stats/univariate/outliers/tukey.rs
index 70713ac57..70713ac57 100755..100644
--- a/vendor/criterion/src/stats/univariate/outliers/tukey.rs
+++ b/vendor/criterion/src/stats/univariate/outliers/tukey.rs
diff --git a/vendor/criterion/src/stats/univariate/percentiles.rs b/vendor/criterion/src/stats/univariate/percentiles.rs
index be6bcf391..39def18e7 100755..100644
--- a/vendor/criterion/src/stats/univariate/percentiles.rs
+++ b/vendor/criterion/src/stats/univariate/percentiles.rs
@@ -54,27 +54,23 @@ where
/// Returns the interquartile range
pub fn iqr(&self) -> A {
- unsafe {
- let q1 = self.at_unchecked(A::cast(25));
- let q3 = self.at_unchecked(A::cast(75));
+ let q1 = self.at(A::cast(25));
+ let q3 = self.at(A::cast(75));
- q3 - q1
- }
+ q3 - q1
}
/// Returns the 50th percentile
pub fn median(&self) -> A {
- unsafe { self.at_unchecked(A::cast(50)) }
+ self.at(A::cast(50))
}
/// Returns the 25th, 50th and 75th percentiles
pub fn quartiles(&self) -> (A, A, A) {
- unsafe {
- (
- self.at_unchecked(A::cast(25)),
- self.at_unchecked(A::cast(50)),
- self.at_unchecked(A::cast(75)),
- )
- }
+ (
+ self.at(A::cast(25)),
+ self.at(A::cast(50)),
+ self.at(A::cast(75)),
+ )
}
}
diff --git a/vendor/criterion/src/stats/univariate/resamples.rs b/vendor/criterion/src/stats/univariate/resamples.rs
index 923669d59..923669d59 100755..100644
--- a/vendor/criterion/src/stats/univariate/resamples.rs
+++ b/vendor/criterion/src/stats/univariate/resamples.rs
diff --git a/vendor/criterion/src/stats/univariate/sample.rs b/vendor/criterion/src/stats/univariate/sample.rs
index 8f10db7b1..6fbb4fb2d 100755..100644
--- a/vendor/criterion/src/stats/univariate/sample.rs
+++ b/vendor/criterion/src/stats/univariate/sample.rs
@@ -4,6 +4,7 @@ use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
use crate::stats::univariate::Percentiles;
use crate::stats::univariate::Resamples;
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
/// A collection of data points drawn from a population
@@ -12,6 +13,7 @@ use rayon::prelude::*;
///
/// - The sample contains at least 2 data points
/// - The sample contains no `NaN`s
+#[repr(transparent)]
pub struct Sample<A>([A]);
// TODO(rust-lang/rfcs#735) move this `impl` into a private percentiles module
@@ -127,7 +129,10 @@ where
}
let mut v = self.to_vec().into_boxed_slice();
+ #[cfg(feature = "rayon")]
v.par_sort_unstable_by(cmp);
+ #[cfg(not(feature = "rayon"))]
+ v.sort_unstable_by(cmp);
// NB :-1: to intra-crate privacy rules
unsafe { mem::transmute(v) }
@@ -206,27 +211,41 @@ where
T::Distributions: Send,
T::Builder: Send,
{
- (0..nresamples)
- .into_par_iter()
- .map_init(
- || Resamples::new(self),
- |resamples, _| statistic(resamples.next()),
- )
- .fold(
- || T::Builder::new(0),
- |mut sub_distributions, sample| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples)
+ .into_par_iter()
+ .map_init(
+ || Resamples::new(self),
+ |resamples, _| statistic(resamples.next()),
+ )
+ .fold(
+ || T::Builder::new(0),
+ |mut sub_distributions, sample| {
+ sub_distributions.push(sample);
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut resamples = Resamples::new(self);
+ (0..nresamples)
+ .map(|_| statistic(resamples.next()))
+ .fold(T::Builder::new(0), |mut sub_distributions, sample| {
sub_distributions.push(sample);
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
- a.extend(&mut b);
- a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
#[cfg(test)]