summaryrefslogtreecommitdiffstats
path: root/vendor/criterion
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/criterion')
-rw-r--r--vendor/criterion/.cargo-checksum.json2
-rw-r--r--[-rwxr-xr-x]vendor/criterion/CHANGELOG.md180
-rw-r--r--[-rwxr-xr-x]vendor/criterion/CONTRIBUTING.md0
-rw-r--r--vendor/criterion/Cargo.toml48
-rw-r--r--[-rwxr-xr-x]vendor/criterion/LICENSE-APACHE0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/LICENSE-MIT0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/README.md8
-rw-r--r--[-rwxr-xr-x]vendor/criterion/appveyor.yml9
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/bench_main.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/async_measurement_overhead.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/compare_functions.rs49
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/custom_measurement.rs4
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/external_process.py0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/external_process.rs1
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/iter_with_large_drop.rs28
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/iter_with_large_setup.rs32
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/iter_with_setup.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/measurement_overhead.rs2
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/mod.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/sampling_mode.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/special_characters.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/benches/benchmarks/with_inputs.rs9
-rw-r--r--[-rwxr-xr-x]vendor/criterion/ci/install.sh0
-rwxr-xr-xvendor/criterion/ci/nextest-compat.sh11
-rw-r--r--[-rwxr-xr-x]vendor/criterion/ci/script.sh0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/analysis/compare.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/analysis/mod.rs15
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/async_executor.rs2
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/bencher.rs9
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/benchmark.rs558
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/benchmark_group.rs19
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/connection.rs38
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/csv_report.rs1
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/error.rs8
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/estimate.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/format.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/fs.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/html/benchmark_report.html.tt0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/html/index.html.tt0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/html/mod.rs18
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/html/report_link.html.tt0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/html/summary_report.html.tt0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/kde.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/lib.rs884
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/macros.rs3
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/macros_private.rs2
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/measurement.rs31
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/distributions.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/iteration_times.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/mod.rs2
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/pdf.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/regression.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/summary.rs15
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/gnuplot_backend/t_test.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/mod.rs2
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/distributions.rs12
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/iteration_times.rs4
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/mod.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/pdf.rs22
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/regression.rs10
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/summary.rs10
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/plot/plotters_backend/t_test.rs4
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/profiler.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/report.rs278
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/routine.rs59
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/bivariate/bootstrap.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/bivariate/mod.rs53
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/bivariate/regression.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/bivariate/resamples.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/float.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/mod.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/rand_util.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/test.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/tuple.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/bootstrap.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/kde/kernel.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/kde/mod.rs10
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/mixed.rs61
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/mod.rs55
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/outliers/mod.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/outliers/tukey.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/percentiles.rs22
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/resamples.rs0
-rw-r--r--[-rwxr-xr-x]vendor/criterion/src/stats/univariate/sample.rs57
-rw-r--r--[-rwxr-xr-x]vendor/criterion/tests/criterion_tests.rs207
85 files changed, 1215 insertions, 1639 deletions
diff --git a/vendor/criterion/.cargo-checksum.json b/vendor/criterion/.cargo-checksum.json
index 0e74236a0..58f165ab2 100644
--- a/vendor/criterion/.cargo-checksum.json
+++ b/vendor/criterion/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"a47896948b6c8d5bdb7c3d589b598738ca3b390cf77871d462c4fdfe2552e09c","CONTRIBUTING.md":"7b480a4b278228ff3fe5d3f5915291336d92468382eb1326b54cbff27fec1b83","Cargo.toml":"6b16b09fd2fba6c9d0c6a222673ca99c976e1bf54675d58e3264304127197e91","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"4594f18866be94a3ce309ed8f94d92699f5b87739b76e1216addca75f5253fa7","README.md":"23210907a22a3e9a281bf05fbc236f1fe49d65e731c8dc08cb5385644d311950","appveyor.yml":"e1df347ab1388d9960752b5b384562d30a417924cd59737e335dde4836874106","benches/bench_main.rs":"2d29d981a40a8984f4aa48f865d7147e27981c609168778ca97c2b97c2abff1d","benches/benchmarks/async_measurement_overhead.rs":"de234b7cf77b29e098e7e954b8c1b70380598d3833d3a2bfd17979debc78f76b","benches/benchmarks/compare_functions.rs":"8f838c306ae726f1ba92c1a3396d7c33f7c1503d1bc64e3995125cc14341492d","benches/benchmarks/custom_measurement.rs":"9a813bc8db07b62a3f04fb33b871d6a7c9b1c15096b6d04e83e6b21f0b5d32ef","benches/benchmarks/external_process.py":"76b4af906b47d14723b9c78676424c6e2b1f9941c985f4d12945cc0ad61d3fa9","benches/benchmarks/external_process.rs":"5a69edb7e6814359a2932aea24b1a617da8faf4b9ce6d97cf25121fc36d70762","benches/benchmarks/iter_with_large_drop.rs":"00444a1ce62fd33efa97e4c0e83d1a55e403a9d6080c19a00b7632a1c0d83287","benches/benchmarks/iter_with_large_setup.rs":"e9a93cad250f8585ce713c3b0067600c1147f544a70a534c34570db64963ff7e","benches/benchmarks/iter_with_setup.rs":"47e71edd7cbc881cf52293b710eae1e89cb02dc0e6d58b2b378bc331362079ec","benches/benchmarks/measurement_overhead.rs":"8e24bfeef221bf470a8bc5b9e3415982741e63c9f62a1beb59ebe4f55bb76096","benches/benchmarks/mod.rs":"ef9ab1efc1659c68562bd2861aa732d5019e5ba936f0c2d6c9b4e34d6c76279f","benches/benchmarks/sampling_mode.rs":"84d9f81d531104be6af210b6cc2e95e1f541e8c02eee8fd5e13f80859d33a276","benches/benchmarks/special_characters.rs":"73e1276b0bf89b1c36f3eb43b3da9b67e84fd27d6687ee43e90c57a82724009c","benches/benchmarks/with_inputs.rs":"f65a7b93a29079a95c4402c12037b0f93ba07f3066b0ae8d97a3a362eba77528","ci/install.sh":"444f0cc23e7bfedef1f51505c9b7c466873fad7e496dc17a1b55d87472ef9a6b","ci/script.sh":"bf3e29198084937145cbdc8fcab27a46e104bdd76a7b72076d02d2cae4f12348","src/analysis/compare.rs":"2633cb685478f48df82e5d849d7df6b573458742778a019a0b4b06f57cb922f0","src/analysis/mod.rs":"fb8787c0f8172c446e32580c24cc9af7c5ffa8842736476282b136897c677f34","src/async_executor.rs":"6b455f8f02ce797aa1bed855fcc76fab415b889b7e636202fd932eed7262d699","src/bencher.rs":"919dd785be79e0a2adf9ec626e7569d20ff94e1ab8bbecf1123757386f552d6e","src/benchmark.rs":"e2b692368ce9493fc21300c2f6162094d525bcc69e401ab6d6c5906d16d1af70","src/benchmark_group.rs":"a031ce2bf3a47df3ad6f04eef32e376755b5ddbc7912fba813dd3c2cb62652dc","src/connection.rs":"f69dadeb373ccbba2d89c85cbf4c51b359cb78152b42b3680319a6871594f490","src/csv_report.rs":"c1fe1bc2ae9c99f65c2a0da3ee1e0aba75bd249f5c48def1a783f1f46ea2053a","src/error.rs":"fd0a68a4d438121c66636b71b92fb4d3cb932e20b46a2738865b057d2b1b1da6","src/estimate.rs":"ac75b202a5d88be166f7e1ce38fc08ac68f061a2ded4d0f0cd001b04974e65c6","src/format.rs":"89892501ba7b62041739e4a0eabd44a92073e9aa38ea26bb4be7e2cdd6a0591b","src/fs.rs":"e2887cffc0583aab6d4e4ac932115149382d9407640e7133e475b21b4c1667af","src/html/benchmark_report.html.tt":"04d00f1c8743707ed4faed2fa8abd7c4341a21115b4d4e955f3f582e3aced0ce","src/html/index.html.tt":"35d84fe72107a3d48118e7946db84b9599e40526729452b65a4131c0ae268fa9","src/html/mod.rs":"9bb6ae1e7a675a15fdeb105a9c78747895e3a19ac6b4df8cef97b96213d783c3","src/html/report_link.html.tt":"ee814ac364f6a224f8fedef451f61b407162da23ad5f5993206adaf04623317b","src/html/summary_report.html.tt":"16a5ed96d4786899b95121c34f51c9d600636bfc2c083b39967e09367266d05c","src/kde.rs":"1ea0287829cdd386af03167ed2550b25e70b5f6001a958cf1cc38346faefb110","src/lib.rs":"ad90e6e672093c323ca303bc535ec9587552a5c3b966f34674f5d2b858a84521","src/macros.rs":"63cc83c275f00c22fe9dd71e568eff9cea7251a7830314f6bf83e3ade236c1b1","src/macros_private.rs":"af5429b94f1dc98a677e511a81718657840fe25a602791249586daf8d7988733","src/measurement.rs":"406470925883fa293dde66937ecc4f9e19b05b29357e6f68cd5ffe64b63e0111","src/plot/gnuplot_backend/distributions.rs":"4b8bf45c19af224853fba4abe891ad25e5043446c8f7e2aae0bc308c52cf8b4e","src/plot/gnuplot_backend/iteration_times.rs":"7e6b4aaffbca5e39d9761b6619e483559bb03977c418abe58a96c75d80482eab","src/plot/gnuplot_backend/mod.rs":"51000b9a777c99c33db92d6c792e9e31fa40ef88f235dfb6997030327a256233","src/plot/gnuplot_backend/pdf.rs":"7f2250a231ec93fdd2f8691cfd71609780901bcd4077935b9deb1a95a917c885","src/plot/gnuplot_backend/regression.rs":"e3dfc41e33669811cda72363488e4c371b989924dd2171aa954fa0cdb0f86788","src/plot/gnuplot_backend/summary.rs":"d0e1cda6ae185727342c7404fe43d5b462a0b087552810321c6dd9b94c60aa43","src/plot/gnuplot_backend/t_test.rs":"387010059ebe23713ea9d2b562e4160382f244787123ec4d9feed1b6a6197acc","src/plot/mod.rs":"7d798fb71840f9cc81714d2421b9639dc5acf6ab7a8c4f373a5fed5cafb0c682","src/plot/plotters_backend/distributions.rs":"0ec04369aabf6531bc6494c3df3aabe0f5815c10a56b95f73c2ef88e6bedce15","src/plot/plotters_backend/iteration_times.rs":"a5e4b73ef4e9e8fc7040f9deb6930c219beaf462311cdb1f162d7fa3fba62b65","src/plot/plotters_backend/mod.rs":"5c15257a801b442b7e84dbf7176208e67ba0dd88051c5858498a1f9f77f4c359","src/plot/plotters_backend/pdf.rs":"49e2a8da5dafd7a1a05a29d9b53cb800f49e65b0c7bf8a90f046632a2bbdcb37","src/plot/plotters_backend/regression.rs":"4d0af365204efccdf352f8893634c4ca376ff15e802bf22388224bcf94976bd0","src/plot/plotters_backend/summary.rs":"92f535e0391f2931b17ccb23ed9becd24deaf5790970eeb90bcc56919dc50bdc","src/plot/plotters_backend/t_test.rs":"a2fb052dff1bbc3c1dba2b9327b4888aa4a240b613ded3582a3db11775214c80","src/profiler.rs":"4a74ed8868872a7aada2b1bf8b0799cf621197cf11f2949af8e2870f800066d3","src/report.rs":"467ce023fbf1b5099d5f817b4030666efae966dc1471c3ea894ec5d1a1c3047e","src/routine.rs":"c24a2621101fa5d7cfcd76c412cb8c972193de0fef1e8928b0edb2abbce07eb5","src/stats/bivariate/bootstrap.rs":"7595203e14f1b7139b5f4fbe7d0c802726be44e9a98353f4235253c21079d3c8","src/stats/bivariate/mod.rs":"9c893b62e6480b218b60ad99304b3a72a7937feaa8ff93806171e52549277662","src/stats/bivariate/regression.rs":"4f4ad06cab2a9a78e218676029a9fe5d3a3b7f5289b5297395b30b9735228b92","src/stats/bivariate/resamples.rs":"bb61d19b021672578c4b35192a57ee638af2e72331c25cf27c3698f65e8d2e4f","src/stats/float.rs":"0dcabe15e74ba1dea4878bb5dbd05e85ea3c450f632bc87adc52074ef683671f","src/stats/mod.rs":"fbd06d54e2deb98d8de43709eaa95193069fc6a99141aa3bb06e8f4235dae8a9","src/stats/rand_util.rs":"fcadb36c2dd65534d9ad01f5a1e167f208ab965919608220abc02bbdf9a4a95b","src/stats/test.rs":"95f600f3f581513c1c370c9f9f70a7d650f77e57c2fe23d5ddf68cedb366f3cc","src/stats/tuple.rs":"3fcfa4077ca5585787694cb81290b85f5cfa26ca8e5f0a263a82beb1736ee64b","src/stats/univariate/bootstrap.rs":"44cf684cd5c77110d160feb52855ab4870439a28a5634ec4ed32bad50a743dd7","src/stats/univariate/kde/kernel.rs":"90af975f4c4bc0d37bba619655ce4decf892636a3d007b9a66d46d8a305afa63","src/stats/univariate/kde/mod.rs":"6504c6fd40e226153dbf0276fab5f2afdfbd63309b78d019d3581163eb729bf7","src/stats/univariate/mixed.rs":"aa74eee3437d6cb1ce988581cea8b435929b978705e60df30f00122cbdf50038","src/stats/univariate/mod.rs":"f4c6ae9d0a4f0cb07d22443b9488f1949b4bd8ca0445276ba36fc955e841f101","src/stats/univariate/outliers/mod.rs":"8977fca40f49c77d833e1f3cd025cdee6939f0ce24e876a4afc54fb87785e03e","src/stats/univariate/outliers/tukey.rs":"03f561025f773ce31947fe2e889a2a71fa8bac24621a90d112507dd19ceab72c","src/stats/univariate/percentiles.rs":"00bb65541b400360a5fdaad34df45874f81ac39d2c8c019e69740ffcd40ab43e","src/stats/univariate/resamples.rs":"a3013f0a53f465ee8f4c9b95afdece74e6521213cf4525d75ab1d7f390cfed70","src/stats/univariate/sample.rs":"c35ed61f86739a35b19a2ce6e550e9b7db9fa1ed8b85fa2799a4018eae1ba06c","tests/criterion_tests.rs":"c7f28bb7b9d5a9db2cd3521334725cc44f015fd04a92b483e8bb1fe4479a718c"},"package":"b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f"} \ No newline at end of file
+{"files":{"CHANGELOG.md":"6c1b5e263a22a483f065c1dee9b5961584c6db43f45660e072920865fbcf291a","CONTRIBUTING.md":"7b480a4b278228ff3fe5d3f5915291336d92468382eb1326b54cbff27fec1b83","Cargo.toml":"7afcd2304ffdd3fc9e0ecf5169cb6def37441e6fd37291cde3fe29bfb6d15bb1","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"4594f18866be94a3ce309ed8f94d92699f5b87739b76e1216addca75f5253fa7","README.md":"2ea6309bfe8af5c2d8e5289983ae5c89d61deddf35dfaab7039b59f26d8d8a5f","appveyor.yml":"fde52eb6f9c8ad3a188438109f4831b6c764a12338b5207d4e58d6220bbfd19d","benches/bench_main.rs":"2d29d981a40a8984f4aa48f865d7147e27981c609168778ca97c2b97c2abff1d","benches/benchmarks/async_measurement_overhead.rs":"de234b7cf77b29e098e7e954b8c1b70380598d3833d3a2bfd17979debc78f76b","benches/benchmarks/compare_functions.rs":"c1a9dccc11fba8efa9526794939ce8eda344636f48ecf347178d9ac88a799562","benches/benchmarks/custom_measurement.rs":"ba6f854519605ea9b3ed17e455e29244e9c0085a78ea366649f65d17decef4fa","benches/benchmarks/external_process.py":"76b4af906b47d14723b9c78676424c6e2b1f9941c985f4d12945cc0ad61d3fa9","benches/benchmarks/external_process.rs":"fcf4480dac3280e2097c78bafaa0463363052a692f99826d95d5c041fedd4448","benches/benchmarks/iter_with_large_drop.rs":"496da444f44c03d76505a78d3bc88ba28a72547ac87926929673043b3ce1c899","benches/benchmarks/iter_with_large_setup.rs":"3e80d03e65b0a02589bcc63336f3a77b38737ac2e5755f9e0b9d90dc3a0a5937","benches/benchmarks/iter_with_setup.rs":"47e71edd7cbc881cf52293b710eae1e89cb02dc0e6d58b2b378bc331362079ec","benches/benchmarks/measurement_overhead.rs":"3a4dbd0f5e6b716080a7aced126ec2c7a48e2bc9412393548c6f1b055e8bde9b","benches/benchmarks/mod.rs":"ef9ab1efc1659c68562bd2861aa732d5019e5ba936f0c2d6c9b4e34d6c76279f","benches/benchmarks/sampling_mode.rs":"84d9f81d531104be6af210b6cc2e95e1f541e8c02eee8fd5e13f80859d33a276","benches/benchmarks/special_characters.rs":"73e1276b0bf89b1c36f3eb43b3da9b67e84fd27d6687ee43e90c57a82724009c","benches/benchmarks/with_inputs.rs":"c5d56276793d6d851a3592924a42a173a1fe8ced5f60034d91be66b03dc7f37a","ci/install.sh":"444f0cc23e7bfedef1f51505c9b7c466873fad7e496dc17a1b55d87472ef9a6b","ci/nextest-compat.sh":"92778d7d6889b4dca70c72bd0d1faada15fd0242ec0cd9497ff6240589af5159","ci/script.sh":"bf3e29198084937145cbdc8fcab27a46e104bdd76a7b72076d02d2cae4f12348","src/analysis/compare.rs":"2633cb685478f48df82e5d849d7df6b573458742778a019a0b4b06f57cb922f0","src/analysis/mod.rs":"9975ca2d691625e310919eb2709d552630ff8010ec06877315cc3834f68682d6","src/async_executor.rs":"f79347cc0268f80675071e6dda9befabf193298bf553e160b2524df52ef33ac7","src/bencher.rs":"1b88a80685d838fcf2fe53736c6a3d569253f0fe66fefc516bab5af67e864021","src/benchmark.rs":"65741fdef1b83765307834bd977bc73981a9c772c0dd27e7f21226854153f662","src/benchmark_group.rs":"4c4c2fe39f843d4ca9bae0e49d7e78e670b9aaa004a2847d9740e4b79d7f1ee4","src/connection.rs":"2fa662a638fe0d3d54415539cf6628cd69805df49b31522413dabf9c288288dc","src/csv_report.rs":"c71c7f77851a109d3424be0e817ac59e7578e76d75c84a980b6ec99ed151d63c","src/error.rs":"40ba8920ae1734007000e935c11a7a6b89e7698b15d1b0f7c47207f659b9dde4","src/estimate.rs":"ac75b202a5d88be166f7e1ce38fc08ac68f061a2ded4d0f0cd001b04974e65c6","src/format.rs":"89892501ba7b62041739e4a0eabd44a92073e9aa38ea26bb4be7e2cdd6a0591b","src/fs.rs":"e2887cffc0583aab6d4e4ac932115149382d9407640e7133e475b21b4c1667af","src/html/benchmark_report.html.tt":"04d00f1c8743707ed4faed2fa8abd7c4341a21115b4d4e955f3f582e3aced0ce","src/html/index.html.tt":"35d84fe72107a3d48118e7946db84b9599e40526729452b65a4131c0ae268fa9","src/html/mod.rs":"f85debcb50e4f2d015e572a91f2b71ffcbf8574be2a728c5ee40f14957358d13","src/html/report_link.html.tt":"ee814ac364f6a224f8fedef451f61b407162da23ad5f5993206adaf04623317b","src/html/summary_report.html.tt":"16a5ed96d4786899b95121c34f51c9d600636bfc2c083b39967e09367266d05c","src/kde.rs":"1ea0287829cdd386af03167ed2550b25e70b5f6001a958cf1cc38346faefb110","src/lib.rs":"4040d96ca39ed17ae736366cf0cd2ea69704020280aed8ec9038e0d70c5b8673","src/macros.rs":"e07a2adcd58cad99301238c69adc4853d562cec806e73a08256fe37e1117212a","src/macros_private.rs":"e9a810c451eb79f33a88c4d0e31d5c29b9d0b8b33c2502638f5bc132a931dba3","src/measurement.rs":"14a50a34feb58d671942bc5c0c05db0d184fabd1c0400ae51d16de4d7750d915","src/plot/gnuplot_backend/distributions.rs":"4b8bf45c19af224853fba4abe891ad25e5043446c8f7e2aae0bc308c52cf8b4e","src/plot/gnuplot_backend/iteration_times.rs":"7e6b4aaffbca5e39d9761b6619e483559bb03977c418abe58a96c75d80482eab","src/plot/gnuplot_backend/mod.rs":"aa1faad94b311848c906fc939617645d9f65918356f58fc6aa2607ab4a901197","src/plot/gnuplot_backend/pdf.rs":"7f2250a231ec93fdd2f8691cfd71609780901bcd4077935b9deb1a95a917c885","src/plot/gnuplot_backend/regression.rs":"e3dfc41e33669811cda72363488e4c371b989924dd2171aa954fa0cdb0f86788","src/plot/gnuplot_backend/summary.rs":"e57a83f5ce14eafbbdb16c668f1ace14aff77d3cf5ca016cca085212d245098c","src/plot/gnuplot_backend/t_test.rs":"387010059ebe23713ea9d2b562e4160382f244787123ec4d9feed1b6a6197acc","src/plot/mod.rs":"f686be7ca3b9ba769c88e041b8b498a605f847e57395f3dd7ad80a9f1ce6011d","src/plot/plotters_backend/distributions.rs":"849bae9047f233876c77cb6ac55f75bb128eb18483739bceb501fad82c6a68b6","src/plot/plotters_backend/iteration_times.rs":"bae66df2c68e90dd124cbfd58defe247b70557ba5e6235f10aa9eda27cff432d","src/plot/plotters_backend/mod.rs":"5c15257a801b442b7e84dbf7176208e67ba0dd88051c5858498a1f9f77f4c359","src/plot/plotters_backend/pdf.rs":"be018ab95783ec3f5de17ceddb7797b499721a871fb9501437fdaacb33a06dce","src/plot/plotters_backend/regression.rs":"1e71e780b55b9c783ee7d0f1b3437b46187115a702016fb57c29de47f7ba6e73","src/plot/plotters_backend/summary.rs":"0074764f5c0fe8bf466fbe4a683772f8d39505d71d10bae2491746ba14155325","src/plot/plotters_backend/t_test.rs":"8750d605e89bba64b469c4c1104c21d726b874886d2597dc72fcce11f2adccbc","src/profiler.rs":"4a74ed8868872a7aada2b1bf8b0799cf621197cf11f2949af8e2870f800066d3","src/report.rs":"d2119a042079ff348a0ee2411247aa5bfebea09d7bb8b8eec7ac68b5a43caee3","src/routine.rs":"502be0e03e758f271c209bb47f852880258103c36a07651d2bf27ee8412ede75","src/stats/bivariate/bootstrap.rs":"7595203e14f1b7139b5f4fbe7d0c802726be44e9a98353f4235253c21079d3c8","src/stats/bivariate/mod.rs":"6696e9b7d92e69d67f9c0b38007eb13096e1527c56604ee969abf3c78979db74","src/stats/bivariate/regression.rs":"4f4ad06cab2a9a78e218676029a9fe5d3a3b7f5289b5297395b30b9735228b92","src/stats/bivariate/resamples.rs":"bb61d19b021672578c4b35192a57ee638af2e72331c25cf27c3698f65e8d2e4f","src/stats/float.rs":"0dcabe15e74ba1dea4878bb5dbd05e85ea3c450f632bc87adc52074ef683671f","src/stats/mod.rs":"fbd06d54e2deb98d8de43709eaa95193069fc6a99141aa3bb06e8f4235dae8a9","src/stats/rand_util.rs":"fcadb36c2dd65534d9ad01f5a1e167f208ab965919608220abc02bbdf9a4a95b","src/stats/test.rs":"95f600f3f581513c1c370c9f9f70a7d650f77e57c2fe23d5ddf68cedb366f3cc","src/stats/tuple.rs":"3fcfa4077ca5585787694cb81290b85f5cfa26ca8e5f0a263a82beb1736ee64b","src/stats/univariate/bootstrap.rs":"44cf684cd5c77110d160feb52855ab4870439a28a5634ec4ed32bad50a743dd7","src/stats/univariate/kde/kernel.rs":"90af975f4c4bc0d37bba619655ce4decf892636a3d007b9a66d46d8a305afa63","src/stats/univariate/kde/mod.rs":"e38f26d97025b3795d677a1b771d181183bd052eb955b6de5680c15413b6cab2","src/stats/univariate/mixed.rs":"4e5d8a59071dd7f601ccb57042b2c78ade53de5b755b92bde94cd226da197a5d","src/stats/univariate/mod.rs":"c556cdf169cd2e59bfd4ba9b8e1a9c60b208ef1330feb98a37a4b2e5f79dd501","src/stats/univariate/outliers/mod.rs":"8977fca40f49c77d833e1f3cd025cdee6939f0ce24e876a4afc54fb87785e03e","src/stats/univariate/outliers/tukey.rs":"03f561025f773ce31947fe2e889a2a71fa8bac24621a90d112507dd19ceab72c","src/stats/univariate/percentiles.rs":"62018b725b78cdf0c9c204d16dc6cb454bc685acad916cf42d605eae09a36ff2","src/stats/univariate/resamples.rs":"a3013f0a53f465ee8f4c9b95afdece74e6521213cf4525d75ab1d7f390cfed70","src/stats/univariate/sample.rs":"949904b9fbcc8ea1c815a5022f9fb2a9ef001821d05845d499ab83238868eb1a","tests/criterion_tests.rs":"92cd1132e8c9612bbcc4e7cd4f3845755b4df75c47d50481d189ddc99bb0dbeb"},"package":"f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"} \ No newline at end of file
diff --git a/vendor/criterion/CHANGELOG.md b/vendor/criterion/CHANGELOG.md
index aa7dc1951..0f7cd1da0 100755..100644
--- a/vendor/criterion/CHANGELOG.md
+++ b/vendor/criterion/CHANGELOG.md
@@ -1,4 +1,5 @@
# Changelog
+
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
@@ -6,6 +7,57 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
## [Unreleased]
+## [0.5.1] - 2023-05-26
+
+### Fixed
+ - Quick mode (--quick) no longer crashes with measured times over 5 seconds when --noplot is not active
+
+## [0.5.0] - 2023-05-23
+
+### Changed
+- Replaced lazy_static dependency with once_cell
+- Improved documentation of the `html_reports` feature
+- Replaced atty dependency with is-terminal
+- MSRV bumped to 1.64
+- Upgraded clap dependency to v4
+- Upgraded tempfile dependency to v3.5.0
+
+### Fixed
+- Quick mode (`--quick`) no longer outputs 1ms for measured times over 5 seconds
+- Documentation updates
+
+## [0.4.0] - 2022-09-10
+
+### Removed
+
+- The `Criterion::can_plot` function has been removed.
+- The `Criterion::bench_function_over_inputs` function has been removed.
+- The `Criterion::bench_functions` function has been removed.
+- The `Criterion::bench` function has been removed.
+
+### Changed
+
+- HTML report hidden behind non-default feature flag: 'html_reports'
+- Standalone support (ie without cargo-criterion) feature flag: 'cargo_bench_support'
+- MSRV bumped to 1.57
+- `rayon` and `plotters` are optional (and default) dependencies.
+- Status messages ('warming up', 'analyzing', etc) are printed to stderr, benchmark results are printed to stdout.
+- Accept subsecond durations for `--warm-up-time`, `--measurement-time` and `--profile-time`.
+- Replaced serde_cbor with ciborium because the former is no longer maintained.
+- Upgrade clap to v3 and regex to v1.5.
+
+### Added
+
+- A `--discard-baseline` flag for discarding rather than saving benchmark results.
+- Formal support for benchmarking code compiled to web-assembly.
+- A `--quiet` flag for printing just a single line per benchmark.
+- A `Throughput::BytesDecimal` option for measuring throughput in bytes but printing them using
+ decimal units like kilobytes instead of binary units like kibibytes.
+
+### Fixed
+- When using `bench_with_input`, the input parameter will now be passed through `black_box` before
+ passing it to the benchmark.
+
## [0.3.6] - 2022-07-06
### Changed
- MSRV bumped to 1.49
@@ -14,20 +66,26 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Clippy fixes
## [0.3.5] - 2021-07-26
+
### Fixed
+
- Corrected `Criterion.toml` in the book.
- Corrected configuration typo in the book.
### Changed
+
- Bump plotters dependency to always include a bug-fix.
- MSRV bumped to 1.46.
## [0.3.4] - 2021-01-24
+
### Added
+
- Added support for benchmarking async functions
- Added `with_output_color` for enabling or disabling CLI output coloring programmatically.
### Fixed
+
- Criterion.rs will now give a clear error message in case of benchmarks that take zero time.
- Added some extra code to ensure that every sample has at least one iteration.
- Added a notice to the `--help` output regarding "unrecognized option" errors.
@@ -36,19 +94,20 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Criterion.rs will now automatically detect the right output directory.
### Deprecated
+
- `Criterion::can_plot` is no longer useful and is deprecated pending deletion in 0.4.0.
-- `Benchmark` and `ParameterizedBenchmark` were already hidden from documentation, but are now
+- `Benchmark` and `ParameterizedBenchmark` were already hidden from documentation, but are now
formally deprecated pending deletion in 0.4.0. Callers should use `BenchmarkGroup` instead.
- `Criterion::bench_function_over_inputs`, `Criterion::bench_functions`, and `Criterion::bench` were
already hidden from documentation, but are now formally deprecated pending deletion in 0.4.0.
Callers should use `BenchmarkGroup` instead.
-- Three new optional features have been added; "html_reports", "csv_output" and
- "cargo_bench_support". These features currently do nothing except disable a warning message at
- runtime, but in version 0.4.0 they will be used to enable HTML report generation, CSV file
- generation, and the ability to run in cargo-bench (as opposed to [cargo-criterion]).
+- Three new optional features have been added; "html_reports", "csv_output" and
+ "cargo_bench_support". These features currently do nothing except disable a warning message at
+ runtime, but in version 0.4.0 they will be used to enable HTML report generation, CSV file
+ generation, and the ability to run in cargo-bench (as opposed to [cargo-criterion]).
"cargo_bench_support" is enabled by default, but "html_reports" and "csv_output"
are not. If you use Criterion.rs' HTML reports, it is recommended to switch to [cargo-criterion].
- If you use CSV output, it is recommended to switch to [cargo-criterion] and use the
+ If you use CSV output, it is recommended to switch to [cargo-criterion] and use the
`--message-format=json` option for machine-readable output instead. A warning message will be
printed at the start of benchmark runs which do not have "html_reports" or "cargo_bench_support"
enabled, but because CSV output is not widely used it has no warning.
@@ -56,11 +115,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
[cargo-criterion]: https://github.com/bheisler/cargo-criterion
## [0.3.3] - 2020-06-29
+
### Added
+
- Added `CRITERION_HOME` environment variable to set the directory for Criterion to store
- its results and charts in.
-- Added support for [cargo-criterion]. The long-term goal here is to remove code from Criterion-rs
- itself to improve compile times, as well as to add features to `cargo-criterion` that are
+ its results and charts in.
+- Added support for [cargo-criterion]. The long-term goal here is to remove code from Criterion-rs
+ itself to improve compile times, as well as to add features to `cargo-criterion` that are
difficult to implement in Criterion-rs.
- Add sampling mode option for benchmarks. This allows the user to change how Criterion.rs chooses
the iteration counts in each sample. By default, nothing will change for most benchmarks, but
@@ -68,11 +129,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
This affects the statistics and plots generated.
### Changed
+
- The serialization format for some of the files has changed. This may cause your first benchmark
run after updating to produce errors, but they're harmless and will go away after running the
benchmarks once.
### Fixed
+
- Fixed a bug where the current measurement was not shown on the relative regression plot.
- Fixed rare panic in the plotters backend.
- Panic with a clear error message (rather than panicking messily later on) when the user sets the
@@ -80,7 +143,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Escape single quotes in benchmark names when generating Gnuplot scripts.
## [0.3.2] - 2020-04-26
+
### Added
+
- Added `?Sized` bound to benchmark parameter types, which allows dynamically sized types like
`&str` and `&[T]` to be used as benchmark parameters.
- Added the `--output-format <format>` command-line option. If `--output-format bencher` is passed,
@@ -92,15 +157,18 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
when running tests or benchmarks and allowing stdout output from other tests.
### Fixed
+
- Fixed panic when environment variables contains non-UTF8 characters.
-- Fixed panic when `CRITERION_DEBUG` or `CRITERION_TARGET_DIR` environment variables contain
+- Fixed panic when `CRITERION_DEBUG` or `CRITERION_TARGET_DIR` environment variables contain
non-UTF8 characters.
## [0.3.1] - 2020-01-25
+
### Added
-- Added new plotting backend using the `plotters` crate. Implementation generously provided by Hao
+
+- Added new plotting backend using the `plotters` crate. Implementation generously provided by Hao
Hou, author of the `plotters` crate.
-- Added `--plotting-backend` command-line option to select the plotting backend. The existing
+- Added `--plotting-backend` command-line option to select the plotting backend. The existing
gnuplot backend will be used by default when available, and the plotters backend will be used when
gnuplot is not available or when requested.
- Added `Criterion::plotting_backend()` function to configure the plotting backend in code.
@@ -109,6 +177,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Benchmark filters can now be regular expressions.
### Fixed
+
- Fixed `fibonacci` functions.
- Fixed `#[criterion]` benchmarks ignoring the command-line options.
- Fixed incorrect scaling of the violin plots.
@@ -116,11 +185,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
sample count.
- Fix potential panic when `nresamples` is set too low. Also added a warning
against setting `nresamples` too low.
-- Fixed issue where a slow outer closure would cause Criterion.rs to calculate
+- Fixed issue where a slow outer closure would cause Criterion.rs to calculate
the wrong estimated time and number of iterations in the warm-up phase.
## [0.3.0] - 2019-08-25
+
### Added
+
- Added support for plugging in custom measurements (eg. processor counters)
into Criterion.rs' measurement and analysis.
- Added support for plugging in instrumentation for internal profilers such as
@@ -131,7 +202,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
`BenchmarkGroup` performs the same function as all of the above, but is cleaner to use and more
powerful and flexible. All of these types/functions are now soft-deprecated (meaning they're
hidden from the documentation and should not be used in new code). They will be fully deprecated
- at some point in the 0.3.* series and removed in 0.4.0.
+ at some point in the 0.3.\* series and removed in 0.4.0.
- `iter_custom` - a "timing loop" that allows the caller to perform their own measurements. This is
useful for complex measurements that don't fit into the usual mode of calling a lambda in a loop.
- If the benchmark cannot be completed in approximately the requested measurement time,
@@ -140,67 +211,84 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Added command-line options to set the defaults for warm-up time, measurement-time, etc.
### Changed
+
- The `raw.csv` file format has been changed slightly. The `sample_time_nanos` field has been split
into `sample_measured_value` and `unit` fields to accommodate custom measurements.
- Throughput has been expanded from u32 to u64 to accommodate very large input sizes.
### Fixed
+
- Fixed possible invalid file name error on Windows
- Fixed potential case where data for two different benchmarks would be stored in the same directory.
### Removed
+
- Removed the `--measure-only` command-line argument; it was deprecated in favor of `--profile-time`
in 0.2.6.
-- External program benchmarks have been removed; they were deprecated in 0.2.6. The new
+- External program benchmarks have been removed; they were deprecated in 0.2.6. The new
`iter_custom` timing loop can be used as a substitute; see `benches/external_process.rs` for an
example of this.
### Deprecated
+
- The `--test` argument is now deprecated. To test benchmarks, use `cargo test --benches`.
## [0.2.11] - 2019-04-08
+
### Added
+
- Enabled automatic text-coloring on Windows.
### Fixed
+
- Fixed panic caused by outdated files after benchmark names or types were changed.
- Reduced timing overhead of `Criterion::iter_batched/iter_batched_ref`.
## [0.2.10] - 2019-02-09
+
### Added
-- Added `iter_batched/iter_batched_ref` timing loops, which allow for setup (like
+
+- Added `iter_batched/iter_batched_ref` timing loops, which allow for setup (like
`iter_with_setup/iter_with_large_setup`) and exclude drop (like `iter_with_large_drop`) but
measure the runtime more accurately, use less memory and are more flexible.
### Deprecated
+
- `iter_with_setup/iter_with_large_setup` are now deprecated in favor of `iter_batched`.
## [0.2.9] - 2019-01-24
+
### Changed
+
- Criterion.rs no longer depends on the default features of the `rand-core` crate. This fixes some
downstream crates which use `rand` in a `no_std` context.
## [0.2.8] - 2019-01-20
+
### Changed
+
- Criterion.rs now uses `rayon` internally instead of manual `unsafe` code built with thread-scoped.
- Replaced handlebars templates with [TinyTemplate](https://github.com/bheisler/TinyTemplate)
- Merged `criterion-stats` crate into `criterion` crate. `criterion-stats` will no longer receive
updates.
-- Replaced or removed various other dependencies to reduce the size of Criterion.rs' dependency
+- Replaced or removed various other dependencies to reduce the size of Criterion.rs' dependency
tree.
## [0.2.7] - 2018-12-29
### Fixed
+
- Fixed version numbers to prevent incompatibilities between `criterion` and `criterion-stats`
crates.
## [0.2.6] - 2018-12-27 - Yanked
+
### Added
+
- Added `--list` command line option, which lists the benchmarks but does not run them, to match
`cargo test -- --list`.
- Added README/CONTRIBUTING/LICENSE files to sub-crates.
-- Displays change in throughput in the command-line and HTML output as well as change in iteration
+- Displays change in throughput in the command-line and HTML output as well as change in iteration
time.
- Benchmarks with multiple functions and multiple values will now generate a per-value summary
report file in addition to the existing per-function one.
@@ -209,8 +297,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
(now-deprecated) `--measure-only` argument.
### Fixed
-- Functions passed to `Bencher::iter_with_large_setup` can now return output. This is necessary to
- prevent the compiler from optimizing away the benchmark. This is technically a breaking change -
+
+- Functions passed to `Bencher::iter_with_large_setup` can now return output. This is necessary to
+ prevent the compiler from optimizing away the benchmark. This is technically a breaking change -
that function requires a new type parameter. It's so unlikely to break existing code that I
decided not to delay this for a breaking-change release.
- Reduced measurement overhead for the `iter_with_large_setup` and `iter_with_drop` methods.
@@ -220,16 +309,17 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Criterion.rs macros no longer require user to `use criterion::Criterion;`
- Criterion.rs no longer initializes a logger, meaning that it will no longer conflict with user
code which does.
-- Criterion.rs no longer fails to parse gnuplot version numbers like
+- Criterion.rs no longer fails to parse gnuplot version numbers like
`gnuplot 5.2 patchlevel 5a (Gentoo revision r0)`
-- Criterion.rs no longer prints an error message that gnuplot couldn't be found when chart
- generation is disabled (either by `Criterion::without_plots`, `--noplot` or disabling the
+- Criterion.rs no longer prints an error message that gnuplot couldn't be found when chart
+ generation is disabled (either by `Criterion::without_plots`, `--noplot` or disabling the
HTML reports feature)
- Benchmark names are now automatically truncated to 100 characters and a number may be added to
make them unique. This fixes a problem where gnuplot would crash if the title was extremely long,
and also improves the general usability of Criterion.rs.
### Changed
+
- Changed timing model of `iter_with_large_setup` to exclude time spent dropping values returned
by the routine. Time measurements taken with 0.2.6 using these methods may differ from those taken
with 0.2.5.
@@ -237,6 +327,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
tree in the benchmark index. This is to accommodate the new per-value summary reports.
### Deprecated
+
- Deprecated the `--measure-only` command-line-argument in favor of `--profile-time`. This will be
removed in 0.3.0.
- External-program benchmarks are now deprecated. They will be removed in 0.3.0.
@@ -246,11 +337,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
may be breaking changes that are not listed here.
## [0.2.5] - 2018-08-27
+
### Fixed
+
- Fixed links from generated report files to documentation.
- Fixed formatting for very large percentage changes (>1000%)
- Sorted the benchmarks in the index report by name
-- Fixed case where benchmark ID with special characters would cause Criterion.rs to open the wrong
+- Fixed case where benchmark ID with special characters would cause Criterion.rs to open the wrong
file and log an error message.
- Fixed case where running `cargo clean; cargo bench -- <filter>` would cause Criterion.rs to log
an error message.
@@ -261,11 +354,14 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Criterion.rs now honors the `CARGO_TARGET_DIR` environment variable.
### Added
+
- Criterion.rs will generate a chart showing the effects of changes in input (or input size) for all
benchmarks with numeric inputs or throughput, not just for those which compare multiple functions.
## [0.2.4] 2018-07-08
+
### Added
+
- Added a pair of flags, `--save-baseline` and `--baseline`, which change
how benchmark results are stored and compared. This is useful for
working against a fixed baseline(eg. comparing progress on an
@@ -286,7 +382,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
private implementation details.
### Fixed
-- The `sample_size` method on the `Criterion`, `Benchmark` and
+
+- The `sample_size` method on the `Criterion`, `Benchmark` and
`ParameterizedBenchmark` structs has been changed to panic if the sample size
is less than 2. Other parts of the code require this and will panic if the
sample size is 1, so this is not considered to be a breaking change.
@@ -295,13 +392,16 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
directory paths, to avoid generating invalid or unexpected paths.
## [0.2.3] - 2018-04-14
+
### Fixed
+
- Criterion.rs will now panic with a clear error message if the user attempts to run
a benchmark which doesn't call the `Bencher::iter` function or a related function,
rather than failing in an uncontrolled manner later.
- Fixed broken links in some more summary reports.
### Added
+
- Added a `--measure-only` argument which causes the benchmark executable to run the
warmup and measurement and then move on to the next benchmark without analyzing or
saving data. This is useful to prevent Criterion.rs' analysis code from appearing
@@ -310,12 +410,16 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
the other reports for easy navigation.
## [0.2.2] - 2018-03-25
+
### Fixed
+
- Fixed broken links in some summary reports.
- Work around apparent rustc bug in >= 1.24.0.
## [0.2.1] - 2018-02-24
+
### Added
+
- HTML reports are now a default Cargo feature. If you wish to disable HTML reports,
disable Criterion.rs' default features. Doing so will allow compatibility with
older Rust versions such as 1.20. If you wish to continue using HTML reports, you
@@ -324,14 +428,18 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
inputs.
### Changed
+
- The plots and HTML reports are now generated in a `report` folder.
### Fixed
+
- Underscores in benchmark names will no longer cause subscripted characters to
appear in generated plots.
## [0.2.0] - 2018-02-05
+
### Added
+
- Added `Criterion.bench` function, which accepts either a `Benchmark` or
`ParameterizedBenchmark`. These new structures allow for custom per-benchmark
configuration as well as more complex benchmark grouping (eg. comparing a Rust
@@ -344,6 +452,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Added `--noplot` command line option to disable plot generation.
### Changed
+
- The builder methods on the Criterion struct now take and return self by value
for easier chaining. Functions which configure a Criterion structure will need
to be updated accordingly, or will need to be changed to work with the
@@ -357,16 +466,20 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- The generated plots are stored in `target/criterion` rather than `.criterion`.
### Removed
+
- The hidden `criterion::ConfidenceInterval` and`criterion::Estimate` types are
no longer publicly accessible.
- The `Criterion.summarize` function has been removed.
### Fixed
+
- Fixed the relative mean and median reports.
- Fixed panic while summarizing benchmarks.
## [0.1.2] - 2018-01-12
+
### Changed
+
- Criterion.rs is now stable-compatible!
- Criterion.rs now includes its own stable-compatible `black_box` function.
Some benchmarks may now be affected by dead-code-elimination where they
@@ -377,34 +490,40 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
and reduce noise.
### Added
+
- Running benchmarks with the variable "CRITERION_DEBUG" in the environment will
cause Criterion.rs to generate extra debug output and save the gnuplot scripts
alongside the generated plots.
### Fixed
+
- Don't panic on IO errors or gnuplot failures
- Fix generation of invalid gnuplot scripts when benchmarking over inputs and inputs include values <= 0.
- Bug where benchmarks would run one sample fewer than was configured.
### Removed
+
- Generated plots will no longer use log-scale.
## [0.1.1] - 2017-12-12
+
### Added
+
- A changelog file.
- Added a chapter to the book on how Criterion.rs collects and analyzes data.
- Added macro rules to generate a test harness for use with `cargo bench`.
Benchmarks defined without these macros should continue to work.
- New contribution guidelines
- Criterion.rs can selectively run benchmarks. See the Command-line page for
-more details
+ more details
## 0.1.0 - 2017-12-02
+
### Added
-- Initial release on Crates.io.
+- Initial release on Crates.io.
-[Unreleased]: https://github.com/bheisler/criterion.rs/compare/0.3.6...HEAD
+[Unreleased]: https://github.com/bheisler/criterion.rs/compare/0.4.0...HEAD
[0.1.1]: https://github.com/bheisler/criterion.rs/compare/0.1.0...0.1.1
[0.1.2]: https://github.com/bheisler/criterion.rs/compare/0.1.1...0.1.2
[0.2.0]: https://github.com/bheisler/criterion.rs/compare/0.1.2...0.2.0
@@ -425,4 +544,7 @@ more details
[0.3.3]: https://github.com/bheisler/criterion.rs/compare/0.3.2...0.3.3
[0.3.4]: https://github.com/bheisler/criterion.rs/compare/0.3.3...0.3.4
[0.3.5]: https://github.com/bheisler/criterion.rs/compare/0.3.4...0.3.5
-[0.3.5]: https://github.com/bheisler/criterion.rs/compare/0.3.5...0.3.6
+[0.3.6]: https://github.com/bheisler/criterion.rs/compare/0.3.5...0.3.6
+[0.4.0]: https://github.com/bheisler/criterion.rs/compare/0.3.6...0.4.0
+[0.5.0]: https://github.com/bheisler/criterion.rs/compare/0.4.0...0.5.0
+[0.5.1]: https://github.com/bheisler/criterion.rs/compare/0.5.0...0.5.1
diff --git a/vendor/criterion/CONTRIBUTING.md b/vendor/criterion/CONTRIBUTING.md
index 59ae0262e..59ae0262e 100755..100644
--- a/vendor/criterion/CONTRIBUTING.md
+++ b/vendor/criterion/CONTRIBUTING.md
diff --git a/vendor/criterion/Cargo.toml b/vendor/criterion/Cargo.toml
index 85164c711..ad36216b9 100644
--- a/vendor/criterion/Cargo.toml
+++ b/vendor/criterion/Cargo.toml
@@ -12,7 +12,7 @@
[package]
edition = "2018"
name = "criterion"
-version = "0.3.6"
+version = "0.5.1"
authors = [
"Jorge Aparicio <japaricious@gmail.com>",
"Brook Heisler <brookheisler@gmail.com>",
@@ -26,7 +26,7 @@ keywords = [
"benchmark",
]
categories = ["development-tools::profiling"]
-license = "Apache-2.0/MIT"
+license = "Apache-2.0 OR MIT"
repository = "https://github.com/bheisler/criterion.rs"
[package.metadata.docs.rs]
@@ -44,41 +44,50 @@ bench = false
name = "bench_main"
harness = false
+[dependencies.anes]
+version = "0.1.4"
+
[dependencies.async-std]
version = "1.9"
optional = true
-[dependencies.atty]
-version = "~0.2.6"
-
[dependencies.cast]
version = "0.3"
+[dependencies.ciborium]
+version = "0.2.0"
+
[dependencies.clap]
-version = "2.34"
+version = "4"
+features = ["std"]
default-features = false
[dependencies.criterion-plot]
-version = "0.4.4"
+version = "0.5.0"
[dependencies.csv]
version = "1.1"
+optional = true
[dependencies.futures]
version = "0.3"
optional = true
default_features = false
+[dependencies.is-terminal]
+version = "0.4.6"
+
[dependencies.itertools]
version = "0.10"
-[dependencies.lazy_static]
-version = "1.4"
-
[dependencies.num-traits]
version = "0.2"
+features = ["std"]
default-features = false
+[dependencies.once_cell]
+version = "1.14"
+
[dependencies.oorandom]
version = "11.1"
@@ -89,22 +98,21 @@ features = [
"area_series",
"line_series",
]
+optional = true
default-features = false
[dependencies.rayon]
version = "1.3"
+optional = true
[dependencies.regex]
-version = "1.3"
+version = "1.5"
features = ["std"]
default-features = false
[dependencies.serde]
version = "1.0"
-[dependencies.serde_cbor]
-version = "0.11"
-
[dependencies.serde_derive]
version = "1.0"
@@ -144,7 +152,7 @@ default-features = false
version = "0.8"
[dev-dependencies.tempfile]
-version = "3.2.0"
+version = "~3.5.0"
[features]
async = ["futures"]
@@ -165,11 +173,17 @@ async_tokio = [
"async",
]
cargo_bench_support = []
-csv_output = []
-default = ["cargo_bench_support"]
+csv_output = ["csv"]
+default = [
+ "rayon",
+ "plotters",
+ "cargo_bench_support",
+]
html_reports = []
real_blackbox = []
stable = [
+ "csv_output",
+ "html_reports",
"async_futures",
"async_smol",
"async_tokio",
diff --git a/vendor/criterion/LICENSE-APACHE b/vendor/criterion/LICENSE-APACHE
index 16fe87b06..16fe87b06 100755..100644
--- a/vendor/criterion/LICENSE-APACHE
+++ b/vendor/criterion/LICENSE-APACHE
diff --git a/vendor/criterion/LICENSE-MIT b/vendor/criterion/LICENSE-MIT
index ac0fda003..ac0fda003 100755..100644
--- a/vendor/criterion/LICENSE-MIT
+++ b/vendor/criterion/LICENSE-MIT
diff --git a/vendor/criterion/README.md b/vendor/criterion/README.md
index 2f5262e19..b6f24138e 100755..100644
--- a/vendor/criterion/README.md
+++ b/vendor/criterion/README.md
@@ -56,7 +56,7 @@ To start with Criterion.<span></span>rs, add the following to your `Cargo.toml`
```toml
[dev-dependencies]
-criterion = "0.3"
+criterion = { version = "0.4", features = ["html_reports"] }
[[bench]]
name = "my_benchmark"
@@ -107,7 +107,7 @@ First, thank you for contributing.
One great way to contribute to Criterion.<span></span>rs is to use it for your own benchmarking needs and report your experiences, file and comment on issues, etc.
Code or documentation improvements in the form of pull requests are also welcome. If you're not
-sure what to work on, try checking the
+sure what to work on, try checking the
[Beginner label](https://github.com/bheisler/criterion.rs/issues?q=is%3Aissue+is%3Aopen+label%3ABeginner).
If your issues or pull requests have no response after a few days, feel free to ping me (@bheisler).
@@ -117,9 +117,9 @@ For more details, see the [CONTRIBUTING.md file](https://github.com/bheisler/cri
### Compatibility Policy
Criterion.<span></span>rs supports the last three stable minor releases of Rust. At time of
-writing, this means Rust 1.50 or later. Older versions may work, but are not guaranteed.
+writing, this means Rust 1.59 or later. Older versions may work, but are not guaranteed.
-Currently, the oldest version of Rust believed to work is 1.49. Future versions of Criterion.<span></span>rs may
+Currently, the oldest version of Rust believed to work is 1.57. Future versions of Criterion.<span></span>rs may
break support for such old versions, and this will not be considered a breaking change. If you
require Criterion.<span></span>rs to work on old versions of Rust, you will need to stick to a
specific patch version of Criterion.<span></span>rs.
diff --git a/vendor/criterion/appveyor.yml b/vendor/criterion/appveyor.yml
index f883c8646..2412672b8 100755..100644
--- a/vendor/criterion/appveyor.yml
+++ b/vendor/criterion/appveyor.yml
@@ -20,13 +20,14 @@ install:
build: false
+# Disabled in favor of github actions
test_script:
- - cargo build --release
- - cargo test --all --release
- - cargo build --benches --all --release
+# - cargo build --release
+# - cargo test --all --release
+# - cargo build --benches --all --release
# Disable benchmarking until performance can be improved.
# - cargo bench
- - cargo doc --release --all --no-deps
+# - cargo doc --release --all --no-deps
branches:
only:
diff --git a/vendor/criterion/benches/bench_main.rs b/vendor/criterion/benches/bench_main.rs
index a153b23ec..a153b23ec 100755..100644
--- a/vendor/criterion/benches/bench_main.rs
+++ b/vendor/criterion/benches/bench_main.rs
diff --git a/vendor/criterion/benches/benchmarks/async_measurement_overhead.rs b/vendor/criterion/benches/benchmarks/async_measurement_overhead.rs
index 0c9605a17..0c9605a17 100755..100644
--- a/vendor/criterion/benches/benchmarks/async_measurement_overhead.rs
+++ b/vendor/criterion/benches/benchmarks/async_measurement_overhead.rs
diff --git a/vendor/criterion/benches/benchmarks/compare_functions.rs b/vendor/criterion/benches/benchmarks/compare_functions.rs
index ce44180f9..d9af837a8 100755..100644
--- a/vendor/criterion/benches/benchmarks/compare_functions.rs
+++ b/vendor/criterion/benches/benchmarks/compare_functions.rs
@@ -1,6 +1,4 @@
-#![allow(deprecated)]
-
-use criterion::{criterion_group, BenchmarkId, Criterion, Fun, ParameterizedBenchmark};
+use criterion::{criterion_group, BenchmarkId, Criterion};
fn fibonacci_slow(n: u64) -> u64 {
match n {
@@ -27,23 +25,10 @@ fn fibonacci_fast(n: u64) -> u64 {
}
fn compare_fibonaccis(c: &mut Criterion) {
- let fib_slow = Fun::new("Recursive", |b, i| b.iter(|| fibonacci_slow(*i)));
- let fib_fast = Fun::new("Iterative", |b, i| b.iter(|| fibonacci_fast(*i)));
-
- let functions = vec![fib_slow, fib_fast];
+ let mut group = c.benchmark_group("Fibonacci");
- c.bench_functions("Fibonacci", functions, 20);
-}
-fn compare_fibonaccis_builder(c: &mut Criterion) {
- c.bench(
- "Fibonacci2",
- ParameterizedBenchmark::new(
- "Recursive",
- |b, i| b.iter(|| fibonacci_slow(*i)),
- vec![20u64, 21u64],
- )
- .with_function("Iterative", |b, i| b.iter(|| fibonacci_fast(*i))),
- );
+ group.bench_with_input("Recursive", &20, |b, i| b.iter(|| fibonacci_slow(*i)));
+ group.bench_with_input("Iterative", &20, |b, i| b.iter(|| fibonacci_fast(*i)));
}
fn compare_fibonaccis_group(c: &mut Criterion) {
let mut group = c.benchmark_group("Fibonacci3");
@@ -58,28 +43,4 @@ fn compare_fibonaccis_group(c: &mut Criterion) {
group.finish()
}
-fn compare_looped(c: &mut Criterion) {
- use criterion::black_box;
-
- c.bench(
- "small",
- ParameterizedBenchmark::new("unlooped", |b, i| b.iter(|| i + 10), vec![10]).with_function(
- "looped",
- |b, i| {
- b.iter(|| {
- for _ in 0..10_000 {
- black_box(i + 10);
- }
- })
- },
- ),
- );
-}
-
-criterion_group!(
- fibonaccis,
- compare_fibonaccis,
- compare_fibonaccis_builder,
- compare_fibonaccis_group,
- compare_looped
-);
+criterion_group!(fibonaccis, compare_fibonaccis, compare_fibonaccis_group,);
diff --git a/vendor/criterion/benches/benchmarks/custom_measurement.rs b/vendor/criterion/benches/benchmarks/custom_measurement.rs
index c685f382f..449f9030c 100755..100644
--- a/vendor/criterion/benches/benchmarks/custom_measurement.rs
+++ b/vendor/criterion/benches/benchmarks/custom_measurement.rs
@@ -14,7 +14,7 @@ impl ValueFormatter for HalfSecFormatter {
fn format_throughput(&self, throughput: &Throughput, value: f64) -> String {
match *throughput {
- Throughput::Bytes(bytes) => {
+ Throughput::Bytes(bytes) | Throughput::BytesDecimal(bytes) => {
format!("{} b/s/2", (bytes as f64) / (value * 2f64 * 10f64.powi(-9)))
}
Throughput::Elements(elems) => format!(
@@ -39,7 +39,7 @@ impl ValueFormatter for HalfSecFormatter {
values: &mut [f64],
) -> &'static str {
match *throughput {
- Throughput::Bytes(bytes) => {
+ Throughput::Bytes(bytes) | Throughput::BytesDecimal(bytes) => {
for val in values {
*val = (bytes as f64) / (*val * 2f64 * 10f64.powi(-9))
}
diff --git a/vendor/criterion/benches/benchmarks/external_process.py b/vendor/criterion/benches/benchmarks/external_process.py
index 376c4756b..376c4756b 100755..100644
--- a/vendor/criterion/benches/benchmarks/external_process.py
+++ b/vendor/criterion/benches/benchmarks/external_process.py
diff --git a/vendor/criterion/benches/benchmarks/external_process.rs b/vendor/criterion/benches/benchmarks/external_process.rs
index c823df5cd..7667a53b8 100755..100644
--- a/vendor/criterion/benches/benchmarks/external_process.rs
+++ b/vendor/criterion/benches/benchmarks/external_process.rs
@@ -14,7 +14,6 @@ fn create_command() -> Command {
command
}
-#[allow(deprecated)]
fn python_fibonacci(c: &mut Criterion) {
let has_python3 = Command::new("python3")
.arg("--version")
diff --git a/vendor/criterion/benches/benchmarks/iter_with_large_drop.rs b/vendor/criterion/benches/benchmarks/iter_with_large_drop.rs
index ee9a8e932..ee01de057 100755..100644
--- a/vendor/criterion/benches/benchmarks/iter_with_large_drop.rs
+++ b/vendor/criterion/benches/benchmarks/iter_with_large_drop.rs
@@ -1,28 +1,22 @@
-#![allow(deprecated)]
-
-use criterion::{criterion_group, Benchmark, Criterion, Throughput};
+use criterion::{criterion_group, Criterion, Throughput};
use std::time::Duration;
const SIZE: usize = 1024 * 1024;
fn large_drop(c: &mut Criterion) {
- c.bench(
- "iter_with_large_drop",
- Benchmark::new("large_drop", |b| {
- let v: Vec<_> = (0..SIZE).map(|i| i as u8).collect();
- b.iter_with_large_drop(|| v.clone());
- })
- .throughput(Throughput::Bytes(SIZE as u64)),
- );
+ let mut group = c.benchmark_group("iter_with_large_drop");
+ group.throughput(Throughput::Bytes(SIZE as u64));
+ group.bench_function("large_drop", |b| {
+ let v: Vec<_> = (0..SIZE).map(|i| i as u8).collect();
+ b.iter_with_large_drop(|| v.clone());
+ });
}
fn small_drop(c: &mut Criterion) {
- c.bench(
- "iter_with_large_drop",
- Benchmark::new("small_drop", |b| {
- b.iter_with_large_drop(|| SIZE);
- }),
- );
+ let mut group = c.benchmark_group("iter_with_large_drop");
+ group.bench_function("small_drop", |b| {
+ b.iter_with_large_drop(|| SIZE);
+ });
}
fn short_warmup() -> Criterion {
diff --git a/vendor/criterion/benches/benchmarks/iter_with_large_setup.rs b/vendor/criterion/benches/benchmarks/iter_with_large_setup.rs
index b4d53ab55..9ff2b9d5d 100755..100644
--- a/vendor/criterion/benches/benchmarks/iter_with_large_setup.rs
+++ b/vendor/criterion/benches/benchmarks/iter_with_large_setup.rs
@@ -1,29 +1,25 @@
-#![allow(deprecated)]
-
-use criterion::{criterion_group, Benchmark, Criterion, Throughput};
+use criterion::{criterion_group, BatchSize, Criterion, Throughput};
use std::time::Duration;
const SIZE: usize = 1024 * 1024;
fn large_setup(c: &mut Criterion) {
- c.bench(
- "iter_with_large_setup",
- Benchmark::new("large_setup", |b| {
- // NOTE: iter_with_large_setup is deprecated. Use iter_batched instead.
- b.iter_with_large_setup(|| (0..SIZE).map(|i| i as u8).collect::<Vec<_>>(), |v| v)
- })
- .throughput(Throughput::Bytes(SIZE as u64)),
- );
+ let mut group = c.benchmark_group("iter_with_large_setup");
+ group.throughput(Throughput::Bytes(SIZE as u64));
+ group.bench_function("large_setup", |b| {
+ b.iter_batched(
+ || (0..SIZE).map(|i| i as u8).collect::<Vec<_>>(),
+ |v| v,
+ BatchSize::NumBatches(1),
+ )
+ });
}
fn small_setup(c: &mut Criterion) {
- c.bench(
- "iter_with_large_setup",
- Benchmark::new("small_setup", |b| {
- // NOTE: iter_with_large_setup is deprecated. Use iter_batched instead.
- b.iter_with_large_setup(|| SIZE, |size| size)
- }),
- );
+ let mut group = c.benchmark_group("iter_with_large_setup");
+ group.bench_function("small_setup", |b| {
+ b.iter_batched(|| SIZE, |size| size, BatchSize::NumBatches(1))
+ });
}
fn short_warmup() -> Criterion {
diff --git a/vendor/criterion/benches/benchmarks/iter_with_setup.rs b/vendor/criterion/benches/benchmarks/iter_with_setup.rs
index e65495ca4..e65495ca4 100755..100644
--- a/vendor/criterion/benches/benchmarks/iter_with_setup.rs
+++ b/vendor/criterion/benches/benchmarks/iter_with_setup.rs
diff --git a/vendor/criterion/benches/benchmarks/measurement_overhead.rs b/vendor/criterion/benches/benchmarks/measurement_overhead.rs
index 15b243dab..c424efb01 100755..100644
--- a/vendor/criterion/benches/benchmarks/measurement_overhead.rs
+++ b/vendor/criterion/benches/benchmarks/measurement_overhead.rs
@@ -5,7 +5,7 @@ fn some_benchmark(c: &mut Criterion) {
group.bench_function("iter", |b| b.iter(|| 1));
group.bench_function("iter_with_setup", |b| b.iter_with_setup(|| (), |_| 1));
group.bench_function("iter_with_large_setup", |b| {
- b.iter_with_large_setup(|| (), |_| 1)
+ b.iter_batched(|| (), |_| 1, BatchSize::NumBatches(1))
});
group.bench_function("iter_with_large_drop", |b| b.iter_with_large_drop(|| 1));
group.bench_function("iter_batched_small_input", |b| {
diff --git a/vendor/criterion/benches/benchmarks/mod.rs b/vendor/criterion/benches/benchmarks/mod.rs
index ef85910fc..ef85910fc 100755..100644
--- a/vendor/criterion/benches/benchmarks/mod.rs
+++ b/vendor/criterion/benches/benchmarks/mod.rs
diff --git a/vendor/criterion/benches/benchmarks/sampling_mode.rs b/vendor/criterion/benches/benchmarks/sampling_mode.rs
index af761273a..af761273a 100755..100644
--- a/vendor/criterion/benches/benchmarks/sampling_mode.rs
+++ b/vendor/criterion/benches/benchmarks/sampling_mode.rs
diff --git a/vendor/criterion/benches/benchmarks/special_characters.rs b/vendor/criterion/benches/benchmarks/special_characters.rs
index 6140f9a42..6140f9a42 100755..100644
--- a/vendor/criterion/benches/benchmarks/special_characters.rs
+++ b/vendor/criterion/benches/benchmarks/special_characters.rs
diff --git a/vendor/criterion/benches/benchmarks/with_inputs.rs b/vendor/criterion/benches/benchmarks/with_inputs.rs
index 8eaaf0081..b0b12a89f 100755..100644
--- a/vendor/criterion/benches/benchmarks/with_inputs.rs
+++ b/vendor/criterion/benches/benchmarks/with_inputs.rs
@@ -13,6 +13,15 @@ fn from_elem(c: &mut Criterion) {
});
}
group.finish();
+
+ let mut group = c.benchmark_group("from_elem_decimal");
+ for size in [KB, 2 * KB].iter() {
+ group.throughput(Throughput::BytesDecimal(*size as u64));
+ group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
+ b.iter(|| iter::repeat(0u8).take(size).collect::<Vec<_>>());
+ });
+ }
+ group.finish();
}
criterion_group!(benches, from_elem);
diff --git a/vendor/criterion/ci/install.sh b/vendor/criterion/ci/install.sh
index f82a80d8e..f82a80d8e 100755..100644
--- a/vendor/criterion/ci/install.sh
+++ b/vendor/criterion/ci/install.sh
diff --git a/vendor/criterion/ci/nextest-compat.sh b/vendor/criterion/ci/nextest-compat.sh
new file mode 100755
index 000000000..fed9f2f98
--- /dev/null
+++ b/vendor/criterion/ci/nextest-compat.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+set -ex -o pipefail
+
+CARGO=${CARGO:-cargo}
+
+cd "$(git rev-parse --show-toplevel)"
+
+echo "Checking benches/bench_main..."
+
+$CARGO nextest list --benches
+$CARGO nextest run --benches
diff --git a/vendor/criterion/ci/script.sh b/vendor/criterion/ci/script.sh
index b9f688696..b9f688696 100755..100644
--- a/vendor/criterion/ci/script.sh
+++ b/vendor/criterion/ci/script.sh
diff --git a/vendor/criterion/src/analysis/compare.rs b/vendor/criterion/src/analysis/compare.rs
index a49407d85..a49407d85 100755..100644
--- a/vendor/criterion/src/analysis/compare.rs
+++ b/vendor/criterion/src/analysis/compare.rs
diff --git a/vendor/criterion/src/analysis/mod.rs b/vendor/criterion/src/analysis/mod.rs
index 5d84bef16..1851d7186 100755..100644
--- a/vendor/criterion/src/analysis/mod.rs
+++ b/vendor/criterion/src/analysis/mod.rs
@@ -26,7 +26,7 @@ macro_rules! elapsed {
info!(
"{} took {}",
$msg,
- crate::format::time(crate::DurationExt::to_nanos(elapsed) as f64)
+ crate::format::time(elapsed.as_nanos() as f64)
);
out
@@ -47,7 +47,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
) {
criterion.report.benchmark_start(id, report_context);
- if let Baseline::Compare = criterion.baseline {
+ if let Baseline::CompareStrict = criterion.baseline {
if !base_dir_exists(
id,
&criterion.baseline_directory,
@@ -128,7 +128,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
.collect::<Vec<f64>>();
let avg_times = Sample::new(&avg_times);
- if criterion.connection.is_none() && criterion.load_baseline.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut new_dir = criterion.output_directory.clone();
new_dir.push(id.as_directory_name());
@@ -139,7 +139,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
let data = Data::new(&iters, &times);
let labeled_sample = tukey::classify(avg_times);
- if criterion.connection.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut tukey_file = criterion.output_directory.to_owned();
tukey_file.push(id.as_directory_name());
@@ -156,7 +156,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
distributions.slope = Some(distribution);
}
- if criterion.connection.is_none() && criterion.load_baseline.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut sample_file = criterion.output_directory.clone();
sample_file.push(id.as_directory_name());
@@ -222,7 +222,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
};
let measurement_data = crate::report::MeasurementData {
- data: Data::new(&*iters, &*times),
+ data: Data::new(&iters, &times),
avg_times: labeled_sample,
absolute_estimates: estimates,
distributions,
@@ -237,7 +237,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
criterion.measurement.formatter(),
);
- if criterion.connection.is_none() && criterion.load_baseline.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut benchmark_file = criterion.output_directory.clone();
benchmark_file.push(id.as_directory_name());
@@ -365,5 +365,6 @@ fn copy_new_dir_to_base(id: &str, baseline: &str, output_directory: &Path) {
&new_dir.join("benchmark.json"),
&base_dir.join("benchmark.json")
));
+ #[cfg(feature = "csv_output")]
try_else_return!(fs::cp(&new_dir.join("raw.csv"), &base_dir.join("raw.csv")));
}
diff --git a/vendor/criterion/src/async_executor.rs b/vendor/criterion/src/async_executor.rs
index 127af2768..a7337fb1f 100755..100644
--- a/vendor/criterion/src/async_executor.rs
+++ b/vendor/criterion/src/async_executor.rs
@@ -32,7 +32,7 @@ impl AsyncExecutor for FuturesExecutor {
}
}
-/// Runs futures on the 'soml' crate's global executor
+/// Runs futures on the 'smol' crate's global executor
#[cfg(feature = "async_smol")]
pub struct SmolExecutor;
#[cfg(feature = "async_smol")]
diff --git a/vendor/criterion/src/bencher.rs b/vendor/criterion/src/bencher.rs
index c5e90af5c..016aa2841 100755..100644
--- a/vendor/criterion/src/bencher.rs
+++ b/vendor/criterion/src/bencher.rs
@@ -189,15 +189,6 @@ impl<'a, M: Measurement> Bencher<'a, M> {
self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput);
}
- #[doc(hidden)]
- pub fn iter_with_large_setup<I, O, S, R>(&mut self, setup: S, routine: R)
- where
- S: FnMut() -> I,
- R: FnMut(I) -> O,
- {
- self.iter_batched(setup, routine, BatchSize::NumBatches(1));
- }
-
/// Times a `routine` that requires some input by generating a batch of input, then timing the
/// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for
/// details on choosing the batch size. Use this when the routine must consume its input.
diff --git a/vendor/criterion/src/benchmark.rs b/vendor/criterion/src/benchmark.rs
index fccb791f2..3a1cb0012 100755..100644
--- a/vendor/criterion/src/benchmark.rs
+++ b/vendor/criterion/src/benchmark.rs
@@ -1,14 +1,4 @@
-#![allow(deprecated)]
-
-use crate::analysis;
-use crate::connection::OutgoingMessage;
-use crate::measurement::{Measurement, WallTime};
-use crate::report::{BenchmarkId, Report, ReportContext};
-use crate::routine::{Function, Routine};
-use crate::{Bencher, Criterion, DurationExt, Mode, PlotConfiguration, SamplingMode, Throughput};
-use std::cell::RefCell;
-use std::fmt::Debug;
-use std::marker::Sized;
+use crate::{PlotConfiguration, SamplingMode};
use std::time::Duration;
// TODO: Move the benchmark config stuff to a separate module for easier use.
@@ -23,6 +13,7 @@ pub struct BenchmarkConfig {
pub significance_level: f64,
pub warm_up_time: Duration,
pub sampling_mode: SamplingMode,
+ pub quick_mode: bool,
}
/// Struct representing a partially-complete per-benchmark configuration.
@@ -36,6 +27,7 @@ pub(crate) struct PartialBenchmarkConfig {
pub(crate) significance_level: Option<f64>,
pub(crate) warm_up_time: Option<Duration>,
pub(crate) sampling_mode: Option<SamplingMode>,
+ pub(crate) quick_mode: Option<bool>,
pub(crate) plot_config: PlotConfiguration,
}
@@ -52,549 +44,7 @@ impl PartialBenchmarkConfig {
.unwrap_or(defaults.significance_level),
warm_up_time: self.warm_up_time.unwrap_or(defaults.warm_up_time),
sampling_mode: self.sampling_mode.unwrap_or(defaults.sampling_mode),
- }
- }
-}
-
-pub(crate) struct NamedRoutine<T, M: Measurement = WallTime> {
- pub id: String,
- pub(crate) f: Box<RefCell<dyn Routine<M, T>>>,
-}
-
-/// Structure representing a benchmark (or group of benchmarks)
-/// which take one parameter.
-#[doc(hidden)]
-#[allow(clippy::type_complexity)]
-#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
-pub struct ParameterizedBenchmark<T: Debug, M: Measurement = WallTime> {
- config: PartialBenchmarkConfig,
- values: Vec<T>,
- routines: Vec<NamedRoutine<T, M>>,
- throughput: Option<Box<dyn Fn(&T) -> Throughput>>,
-}
-
-/// Structure representing a benchmark (or group of benchmarks)
-/// which takes no parameters.
-#[doc(hidden)]
-#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
-pub struct Benchmark<M: Measurement = WallTime> {
- config: PartialBenchmarkConfig,
- routines: Vec<NamedRoutine<(), M>>,
- throughput: Option<Throughput>,
-}
-
-/// Common trait for `Benchmark` and `ParameterizedBenchmark`. Not intended to be
-/// used outside of Criterion.rs.
-#[doc(hidden)]
-pub trait BenchmarkDefinition<M: Measurement = WallTime>: Sized {
- #[doc(hidden)]
- fn run(self, group_id: &str, c: &mut Criterion<M>);
-}
-
-macro_rules! benchmark_config {
- ($type:tt) => {
- /// Changes the size of the sample for this benchmark
- ///
- /// A bigger sample should yield more accurate results if paired with a sufficiently large
- /// measurement time.
- ///
- /// Sample size must be at least 10.
- ///
- /// # Panics
- ///
- /// Panics if n < 10.
- pub fn sample_size(mut self, n: usize) -> Self {
- assert!(n >= 10);
-
- self.config.sample_size = Some(n);
- self
- }
-
- /// Changes the warm up time for this benchmark
- ///
- /// # Panics
- ///
- /// Panics if the input duration is zero
- pub fn warm_up_time(mut self, dur: Duration) -> Self {
- assert!(dur.to_nanos() > 0);
-
- self.config.warm_up_time = Some(dur);
- self
- }
-
- /// Changes the target measurement time for this benchmark. Criterion will attempt
- /// to spend approximately this amount of time measuring the benchmark.
- /// With a longer time, the measurement will become more resilient to transitory peak loads
- /// caused by external programs.
- ///
- /// # Panics
- ///
- /// Panics if the input duration in zero
- pub fn measurement_time(mut self, dur: Duration) -> Self {
- assert!(dur.to_nanos() > 0);
-
- self.config.measurement_time = Some(dur);
- self
- }
-
- /// Changes the number of resamples for this benchmark
- ///
- /// Number of resamples to use for the
- /// [bootstrap](http://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Case_resampling)
- ///
- /// A larger number of resamples reduces the random sampling errors, which are inherent to the
- /// bootstrap method, but also increases the analysis time.
- ///
- /// # Panics
- ///
- /// Panics if the number of resamples is set to zero
- pub fn nresamples(mut self, n: usize) -> Self {
- assert!(n > 0);
- if n <= 1000 {
- println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
- }
-
- self.config.nresamples = Some(n);
- self
- }
-
- /// Changes the default noise threshold for this benchmark. The noise threshold
- /// is used to filter out small changes in performance, even if they are statistically
- /// significant. Sometimes benchmarking the same code twice will result in small but
- /// statistically significant differences solely because of noise. This provides a way to filter
- /// out some of these false positives at the cost of making it harder to detect small changes
- /// to the true performance of the benchmark.
- ///
- /// The default is 0.01, meaning that changes smaller than 1% will be ignored.
- ///
- /// # Panics
- ///
- /// Panics if the threshold is set to a negative value
- pub fn noise_threshold(mut self, threshold: f64) -> Self {
- assert!(threshold >= 0.0);
-
- self.config.noise_threshold = Some(threshold);
- self
- }
-
- /// Changes the default confidence level for this benchmark. The confidence
- /// level is the desired probability that the true runtime lies within the estimated
- /// [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval). The default is
- /// 0.95, meaning that the confidence interval should capture the true value 95% of the time.
- ///
- /// # Panics
- ///
- /// Panics if the confidence level is set to a value outside the `(0, 1)` range
- pub fn confidence_level(mut self, cl: f64) -> Self {
- assert!(cl > 0.0 && cl < 1.0);
- if cl < 0.5 {
- println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
- }
-
- self.config.confidence_level = Some(cl);
- self
- }
-
- /// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance)
- /// for this benchmark. This is used to perform a
- /// [hypothesis test](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) to see if
- /// the measurements from this run are different from the measured performance of the last run.
- /// The significance level is the desired probability that two measurements of identical code
- /// will be considered 'different' due to noise in the measurements. The default value is 0.05,
- /// meaning that approximately 5% of identical benchmarks will register as different due to
- /// noise.
- ///
- /// This presents a trade-off. By setting the significance level closer to 0.0, you can increase
- /// the statistical robustness against noise, but it also weakens Criterion.rs' ability to
- /// detect small but real changes in the performance. By setting the significance level
- /// closer to 1.0, Criterion.rs will be more able to detect small true changes, but will also
- /// report more spurious differences.
- ///
- /// See also the noise threshold setting.
- ///
- /// # Panics
- ///
- /// Panics if the significance level is set to a value outside the `(0, 1)` range
- pub fn significance_level(mut self, sl: f64) -> Self {
- assert!(sl > 0.0 && sl < 1.0);
-
- self.config.significance_level = Some(sl);
- self
- }
-
- /// Changes the plot configuration for this benchmark.
- pub fn plot_config(mut self, new_config: PlotConfiguration) -> Self {
- self.config.plot_config = new_config;
- self
- }
-
- /// Changes the sampling mode for this benchmark.
- pub fn sampling_mode(mut self, new_mode: SamplingMode) -> Self {
- self.config.sampling_mode = Some(new_mode);
- self
- }
- };
-}
-
-impl<M> Benchmark<M>
-where
- M: Measurement + 'static,
-{
- benchmark_config!(Benchmark);
-
- /// Create a new benchmark group and adds the given function to it.
- ///
- /// # Example
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use criterion::*;
- ///
- /// fn bench(c: &mut Criterion) {
- /// // One-time setup goes here
- /// c.bench(
- /// "my_group",
- /// Benchmark::new("my_function", |b| b.iter(|| {
- /// // Code to benchmark goes here
- /// })),
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- pub fn new<S, F>(id: S, f: F) -> Benchmark<M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>) + 'static,
- {
- Benchmark {
- config: PartialBenchmarkConfig::default(),
- routines: vec![],
- throughput: None,
- }
- .with_function(id, f)
- }
-
- /// Add a function to the benchmark group.
- pub fn with_function<S, F>(mut self, id: S, mut f: F) -> Benchmark<M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>) + 'static,
- {
- let routine = NamedRoutine {
- id: id.into(),
- f: Box::new(RefCell::new(Function::new(move |b, _| f(b)))),
- };
- self.routines.push(routine);
- self
- }
-
- /// Set the input size for this benchmark group. Used for reporting the
- /// throughput.
- pub fn throughput(mut self, throughput: Throughput) -> Benchmark<M> {
- self.throughput = Some(throughput);
- self
- }
-}
-
-impl<M: Measurement> BenchmarkDefinition<M> for Benchmark<M> {
- fn run(self, group_id: &str, c: &mut Criterion<M>) {
- let report_context = ReportContext {
- output_directory: c.output_directory.clone(),
- plot_config: self.config.plot_config.clone(),
- };
-
- let config = self.config.to_complete(&c.config);
- let num_routines = self.routines.len();
-
- let mut all_ids = vec![];
- let mut any_matched = false;
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: group_id })
- .unwrap();
- }
-
- for routine in self.routines {
- let function_id = if num_routines == 1 && group_id == routine.id {
- None
- } else {
- Some(routine.id)
- };
-
- let mut id = BenchmarkId::new(
- group_id.to_owned(),
- function_id,
- None,
- self.throughput.clone(),
- );
-
- id.ensure_directory_name_unique(&c.all_directories);
- c.all_directories.insert(id.as_directory_name().to_owned());
- id.ensure_title_unique(&c.all_titles);
- c.all_titles.insert(id.as_title().to_owned());
-
- let do_run = c.filter_matches(id.id());
- any_matched |= do_run;
-
- execute_benchmark(
- do_run,
- &id,
- c,
- &config,
- &mut *routine.f.borrow_mut(),
- &report_context,
- &(),
- self.throughput.clone(),
- );
-
- all_ids.push(id);
- }
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::FinishedBenchmarkGroup { group: group_id })
- .unwrap();
- conn.serve_value_formatter(c.measurement.formatter())
- .unwrap();
- }
-
- if all_ids.len() > 1 && any_matched && c.mode.is_benchmark() {
- c.report
- .summarize(&report_context, &all_ids, c.measurement.formatter());
- }
- if any_matched {
- c.report.group_separator();
- }
- }
-}
-
-impl<T, M> ParameterizedBenchmark<T, M>
-where
- T: Debug + 'static,
- M: Measurement + 'static,
-{
- benchmark_config!(ParameterizedBenchmark);
-
- pub(crate) fn with_functions(
- functions: Vec<NamedRoutine<T, M>>,
- parameters: Vec<T>,
- ) -> ParameterizedBenchmark<T, M> {
- ParameterizedBenchmark {
- config: PartialBenchmarkConfig::default(),
- values: parameters,
- routines: functions,
- throughput: None,
- }
- }
-
- /// Create a new parameterized benchmark group and adds the given function
- /// to it.
- /// The function under test must follow the setup - bench - teardown pattern:
- ///
- /// # Example
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use criterion::*;
- ///
- /// fn bench(c: &mut Criterion) {
- /// let parameters = vec![1u64, 2u64, 3u64];
- ///
- /// // One-time setup goes here
- /// c.bench(
- /// "my_group",
- /// ParameterizedBenchmark::new(
- /// "my_function",
- /// |b, param| b.iter(|| {
- /// // Code to benchmark using param goes here
- /// }),
- /// parameters
- /// )
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- pub fn new<S, F, I>(id: S, f: F, parameters: I) -> ParameterizedBenchmark<T, M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>, &T) + 'static,
- I: IntoIterator<Item = T>,
- {
- ParameterizedBenchmark {
- config: PartialBenchmarkConfig::default(),
- values: parameters.into_iter().collect(),
- routines: vec![],
- throughput: None,
- }
- .with_function(id, f)
- }
-
- /// Add a function to the benchmark group.
- pub fn with_function<S, F>(mut self, id: S, f: F) -> ParameterizedBenchmark<T, M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>, &T) + 'static,
- {
- let routine = NamedRoutine {
- id: id.into(),
- f: Box::new(RefCell::new(Function::new(f))),
- };
- self.routines.push(routine);
- self
- }
-
- /// Use the given function to calculate the input size for a given input.
- pub fn throughput<F>(mut self, throughput: F) -> ParameterizedBenchmark<T, M>
- where
- F: Fn(&T) -> Throughput + 'static,
- {
- self.throughput = Some(Box::new(throughput));
- self
- }
-}
-impl<T, M> BenchmarkDefinition<M> for ParameterizedBenchmark<T, M>
-where
- T: Debug + 'static,
- M: Measurement + 'static,
-{
- fn run(self, group_id: &str, c: &mut Criterion<M>) {
- let report_context = ReportContext {
- output_directory: c.output_directory.clone(),
- plot_config: self.config.plot_config.clone(),
- };
-
- let config = self.config.to_complete(&c.config);
- let num_parameters = self.values.len();
- let num_routines = self.routines.len();
-
- let mut all_ids = vec![];
- let mut any_matched = false;
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: group_id })
- .unwrap();
- }
-
- for routine in self.routines {
- for value in &self.values {
- let function_id = if num_routines == 1 && group_id == routine.id {
- None
- } else {
- Some(routine.id.clone())
- };
-
- let value_str = if num_parameters == 1 {
- None
- } else {
- Some(format!("{:?}", value))
- };
-
- let throughput = self.throughput.as_ref().map(|func| func(value));
- let mut id = BenchmarkId::new(
- group_id.to_owned(),
- function_id,
- value_str,
- throughput.clone(),
- );
-
- id.ensure_directory_name_unique(&c.all_directories);
- c.all_directories.insert(id.as_directory_name().to_owned());
- id.ensure_title_unique(&c.all_titles);
- c.all_titles.insert(id.as_title().to_owned());
-
- let do_run = c.filter_matches(id.id());
- any_matched |= do_run;
-
- execute_benchmark(
- do_run,
- &id,
- c,
- &config,
- &mut *routine.f.borrow_mut(),
- &report_context,
- value,
- throughput,
- );
-
- all_ids.push(id);
- }
- }
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::FinishedBenchmarkGroup { group: group_id })
- .unwrap();
- conn.serve_value_formatter(c.measurement.formatter())
- .unwrap();
- }
-
- if all_ids.len() > 1 && any_matched && c.mode.is_benchmark() {
- c.report
- .summarize(&report_context, &all_ids, c.measurement.formatter());
- }
- if any_matched {
- c.report.group_separator();
- }
- }
-}
-
-#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
-fn execute_benchmark<T, M>(
- do_run: bool,
- id: &BenchmarkId,
- c: &Criterion<M>,
- config: &BenchmarkConfig,
- routine: &mut dyn Routine<M, T>,
- report_context: &ReportContext,
- parameter: &T,
- throughput: Option<Throughput>,
-) where
- T: Debug,
- M: Measurement,
-{
- match &c.mode {
- Mode::Benchmark => {
- if let Some(conn) = &c.connection {
- if do_run {
- conn.send(&OutgoingMessage::BeginningBenchmark { id: id.into() })
- .unwrap();
- } else {
- conn.send(&OutgoingMessage::SkippingBenchmark { id: id.into() })
- .unwrap();
- }
- }
-
- if do_run {
- analysis::common(
- id,
- routine,
- config,
- c,
- report_context,
- parameter,
- throughput,
- );
- }
- }
- Mode::List => {
- if do_run {
- println!("{}: bench", id);
- }
- }
- Mode::Test => {
- if do_run {
- // In test mode, run the benchmark exactly once, then exit.
- c.report.test_start(id, report_context);
- routine.test(&c.measurement, parameter);
- c.report.test_pass(id, report_context);
- }
- }
- &Mode::Profile(duration) => {
- if do_run {
- routine.profile(&c.measurement, id, c, report_context, duration, parameter);
- }
+ quick_mode: self.quick_mode.unwrap_or(defaults.quick_mode),
}
}
}
diff --git a/vendor/criterion/src/benchmark_group.rs b/vendor/criterion/src/benchmark_group.rs
index 9ed88ef19..687fb2f21 100755..100644
--- a/vendor/criterion/src/benchmark_group.rs
+++ b/vendor/criterion/src/benchmark_group.rs
@@ -6,7 +6,7 @@ use crate::report::BenchmarkId as InternalBenchmarkId;
use crate::report::Report;
use crate::report::ReportContext;
use crate::routine::{Function, Routine};
-use crate::{Bencher, Criterion, DurationExt, Mode, PlotConfiguration, SamplingMode, Throughput};
+use crate::{Bencher, Criterion, Mode, PlotConfiguration, SamplingMode, Throughput};
use std::time::Duration;
/// Structure used to group together a set of related benchmarks, along with custom configuration
@@ -107,7 +107,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
///
/// Panics if the input duration is zero
pub fn warm_up_time(&mut self, dur: Duration) -> &mut Self {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.partial_config.warm_up_time = Some(dur);
self
@@ -125,7 +125,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
///
/// Panics if the input duration is zero
pub fn measurement_time(&mut self, dur: Duration) -> &mut Self {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.partial_config.measurement_time = Some(dur);
self
@@ -145,7 +145,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
pub fn nresamples(&mut self, n: usize) -> &mut Self {
assert!(n > 0);
if n <= 1000 {
- println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
+ eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.partial_config.nresamples = Some(n);
@@ -182,7 +182,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
pub fn confidence_level(&mut self, cl: f64) -> &mut Self {
assert!(cl > 0.0 && cl < 1.0);
if cl < 0.5 {
- println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
+ eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
}
self.partial_config.confidence_level = Some(cl);
@@ -290,7 +290,8 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
assert!(
!self.all_ids.contains(&id),
- "Benchmark IDs must be unique within a group."
+ "Benchmark IDs must be unique within a group. Encountered duplicated benchmark ID {}",
+ &id
);
id.ensure_directory_name_unique(&self.criterion.all_directories);
@@ -327,9 +328,9 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
);
}
}
- Mode::List => {
+ Mode::List(_) => {
if do_run {
- println!("{}: bench", id);
+ println!("{}: benchmark", id);
}
}
Mode::Test => {
@@ -391,7 +392,7 @@ impl<'a, M: Measurement> Drop for BenchmarkGroup<'a, M> {
self.criterion.measurement.formatter(),
);
}
- if self.any_matched {
+ if self.any_matched && !self.criterion.mode.is_terse() {
self.criterion.report.group_separator();
}
}
diff --git a/vendor/criterion/src/connection.rs b/vendor/criterion/src/connection.rs
index 53ad16da1..53706d608 100755..100644
--- a/vendor/criterion/src/connection.rs
+++ b/vendor/criterion/src/connection.rs
@@ -8,28 +8,39 @@ use std::net::TcpStream;
#[derive(Debug)]
pub enum MessageError {
- SerializationError(serde_cbor::Error),
- IoError(std::io::Error),
+ Deserialization(ciborium::de::Error<std::io::Error>),
+ Serialization(ciborium::ser::Error<std::io::Error>),
+ Io(std::io::Error),
}
-impl From<serde_cbor::Error> for MessageError {
- fn from(other: serde_cbor::Error) -> Self {
- MessageError::SerializationError(other)
+impl From<ciborium::de::Error<std::io::Error>> for MessageError {
+ fn from(other: ciborium::de::Error<std::io::Error>) -> Self {
+ MessageError::Deserialization(other)
+ }
+}
+impl From<ciborium::ser::Error<std::io::Error>> for MessageError {
+ fn from(other: ciborium::ser::Error<std::io::Error>) -> Self {
+ MessageError::Serialization(other)
}
}
impl From<std::io::Error> for MessageError {
fn from(other: std::io::Error) -> Self {
- MessageError::IoError(other)
+ MessageError::Io(other)
}
}
impl std::fmt::Display for MessageError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
- MessageError::SerializationError(error) => write!(
+ MessageError::Deserialization(error) => write!(
+ f,
+ "Failed to deserialize message to Criterion.rs benchmark:\n{}",
+ error
+ ),
+ MessageError::Serialization(error) => write!(
f,
- "Failed to serialize or deserialize message to Criterion.rs benchmark:\n{}",
+ "Failed to serialize message to Criterion.rs benchmark:\n{}",
error
),
- MessageError::IoError(error) => write!(
+ MessageError::Io(error) => write!(
f,
"Failed to read or write message to Criterion.rs benchmark:\n{}",
error
@@ -40,8 +51,9 @@ impl std::fmt::Display for MessageError {
impl std::error::Error for MessageError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
- MessageError::SerializationError(err) => Some(err),
- MessageError::IoError(err) => Some(err),
+ MessageError::Deserialization(err) => Some(err),
+ MessageError::Serialization(err) => Some(err),
+ MessageError::Io(err) => Some(err),
}
}
}
@@ -112,13 +124,13 @@ impl InnerConnection {
let length = u32::from_be_bytes(length_buf);
self.receive_buffer.resize(length as usize, 0u8);
self.socket.read_exact(&mut self.receive_buffer)?;
- let value = serde_cbor::from_slice(&self.receive_buffer)?;
+ let value = ciborium::de::from_reader(&self.receive_buffer[..])?;
Ok(value)
}
pub fn send(&mut self, message: &OutgoingMessage) -> Result<(), MessageError> {
self.send_buffer.truncate(0);
- serde_cbor::to_writer(&mut self.send_buffer, message)?;
+ ciborium::ser::into_writer(message, &mut self.send_buffer)?;
let size = u32::try_from(self.send_buffer.len()).unwrap();
let length_buf = size.to_be_bytes();
self.socket.write_all(&length_buf)?;
diff --git a/vendor/criterion/src/csv_report.rs b/vendor/criterion/src/csv_report.rs
index 3b744df08..f8e2a05f7 100755..100644
--- a/vendor/criterion/src/csv_report.rs
+++ b/vendor/criterion/src/csv_report.rs
@@ -35,6 +35,7 @@ impl<W: Write> CsvReportWriter<W> {
let value = id.value_str.as_deref();
let (throughput_num, throughput_type) = match id.throughput {
Some(Throughput::Bytes(bytes)) => (Some(format!("{}", bytes)), Some("bytes")),
+ Some(Throughput::BytesDecimal(bytes)) => (Some(format!("{}", bytes)), Some("bytes")),
Some(Throughput::Elements(elems)) => (Some(format!("{}", elems)), Some("elements")),
None => (None, None),
};
diff --git a/vendor/criterion/src/error.rs b/vendor/criterion/src/error.rs
index 9b7eb17b1..459a716f5 100755..100644
--- a/vendor/criterion/src/error.rs
+++ b/vendor/criterion/src/error.rs
@@ -1,3 +1,4 @@
+#[cfg(feature = "csv_output")]
use csv::Error as CsvError;
use serde_json::Error as SerdeError;
use std::error::Error as StdError;
@@ -21,6 +22,8 @@ pub enum Error {
path: PathBuf,
inner: SerdeError,
},
+ #[cfg(feature = "csv_output")]
+ /// This API requires the following crate features to be activated: csv_output
CsvError(CsvError),
}
impl fmt::Display for Error {
@@ -37,6 +40,7 @@ impl fmt::Display for Error {
"Failed to read or write file {:?} due to serialization error: {}",
path, inner
),
+ #[cfg(feature = "csv_output")]
Error::CsvError(inner) => write!(f, "CSV error: {}", inner),
}
}
@@ -47,6 +51,7 @@ impl StdError for Error {
Error::AccessError { .. } => "AccessError",
Error::CopyError { .. } => "CopyError",
Error::SerdeError { .. } => "SerdeError",
+ #[cfg(feature = "csv_output")]
Error::CsvError(_) => "CsvError",
}
}
@@ -56,10 +61,13 @@ impl StdError for Error {
Error::AccessError { inner, .. } => Some(inner),
Error::CopyError { inner, .. } => Some(inner),
Error::SerdeError { inner, .. } => Some(inner),
+ #[cfg(feature = "csv_output")]
Error::CsvError(inner) => Some(inner),
}
}
}
+
+#[cfg(feature = "csv_output")]
impl From<CsvError> for Error {
fn from(other: CsvError) -> Error {
Error::CsvError(other)
diff --git a/vendor/criterion/src/estimate.rs b/vendor/criterion/src/estimate.rs
index 8a79d27a8..8a79d27a8 100755..100644
--- a/vendor/criterion/src/estimate.rs
+++ b/vendor/criterion/src/estimate.rs
diff --git a/vendor/criterion/src/format.rs b/vendor/criterion/src/format.rs
index 53c4a4dbd..53c4a4dbd 100755..100644
--- a/vendor/criterion/src/format.rs
+++ b/vendor/criterion/src/format.rs
diff --git a/vendor/criterion/src/fs.rs b/vendor/criterion/src/fs.rs
index f47508be7..f47508be7 100755..100644
--- a/vendor/criterion/src/fs.rs
+++ b/vendor/criterion/src/fs.rs
diff --git a/vendor/criterion/src/html/benchmark_report.html.tt b/vendor/criterion/src/html/benchmark_report.html.tt
index babd0032e..babd0032e 100755..100644
--- a/vendor/criterion/src/html/benchmark_report.html.tt
+++ b/vendor/criterion/src/html/benchmark_report.html.tt
diff --git a/vendor/criterion/src/html/index.html.tt b/vendor/criterion/src/html/index.html.tt
index 7c307ed62..7c307ed62 100755..100644
--- a/vendor/criterion/src/html/index.html.tt
+++ b/vendor/criterion/src/html/index.html.tt
diff --git a/vendor/criterion/src/html/mod.rs b/vendor/criterion/src/html/mod.rs
index d36065623..eb31a8168 100755..100644
--- a/vendor/criterion/src/html/mod.rs
+++ b/vendor/criterion/src/html/mod.rs
@@ -438,7 +438,7 @@ impl Report for Html {
// If all of the value strings can be parsed into a number, sort/dedupe
// numerically. Otherwise sort lexicographically.
- if value_strs.iter().all(|os| try_parse(*os).is_some()) {
+ if value_strs.iter().all(|os| try_parse(os).is_some()) {
value_strs.sort_unstable_by(|v1, v2| {
let num1 = try_parse(v1);
let num2 = try_parse(v2);
@@ -464,7 +464,7 @@ impl Report for Html {
self.generate_summary(
&subgroup_id,
- &*samples_with_function,
+ &samples_with_function,
context,
formatter,
false,
@@ -483,13 +483,7 @@ impl Report for Html {
let subgroup_id =
BenchmarkId::new(group_id.clone(), None, Some(value_str.clone()), None);
- self.generate_summary(
- &subgroup_id,
- &*samples_with_value,
- context,
- formatter,
- false,
- );
+ self.generate_summary(&subgroup_id, &samples_with_value, context, formatter, false);
}
}
@@ -516,7 +510,7 @@ impl Report for Html {
self.generate_summary(
&BenchmarkId::new(group_id, None, None, None),
- &*(all_data),
+ &all_data,
context,
formatter,
true,
@@ -543,8 +537,8 @@ impl Report for Html {
}
let mut groups = id_groups
- .into_iter()
- .map(|(_, group)| BenchmarkGroup::new(output_directory, &group))
+ .into_values()
+ .map(|group| BenchmarkGroup::new(output_directory, &group))
.collect::<Vec<BenchmarkGroup<'_>>>();
groups.sort_unstable_by_key(|g| g.group_report.name);
diff --git a/vendor/criterion/src/html/report_link.html.tt b/vendor/criterion/src/html/report_link.html.tt
index 6013114c9..6013114c9 100755..100644
--- a/vendor/criterion/src/html/report_link.html.tt
+++ b/vendor/criterion/src/html/report_link.html.tt
diff --git a/vendor/criterion/src/html/summary_report.html.tt b/vendor/criterion/src/html/summary_report.html.tt
index 4f36f62ba..4f36f62ba 100755..100644
--- a/vendor/criterion/src/html/summary_report.html.tt
+++ b/vendor/criterion/src/html/summary_report.html.tt
diff --git a/vendor/criterion/src/kde.rs b/vendor/criterion/src/kde.rs
index 8812142eb..8812142eb 100755..100644
--- a/vendor/criterion/src/kde.rs
+++ b/vendor/criterion/src/kde.rs
diff --git a/vendor/criterion/src/lib.rs b/vendor/criterion/src/lib.rs
index 426437193..855c68ff2 100755..100644
--- a/vendor/criterion/src/lib.rs
+++ b/vendor/criterion/src/lib.rs
@@ -27,18 +27,18 @@
)
)]
+#[cfg(all(feature = "rayon", target_arch = "wasm32"))]
+compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features.");
+
#[cfg(test)]
extern crate approx;
#[cfg(test)]
extern crate quickcheck;
-use clap::value_t;
+use is_terminal::IsTerminal;
use regex::Regex;
-#[macro_use]
-extern crate lazy_static;
-
#[cfg(feature = "real_blackbox")]
extern crate test;
@@ -57,6 +57,7 @@ mod benchmark_group;
pub mod async_executor;
mod bencher;
mod connection;
+#[cfg(feature = "csv_output")]
mod csv_report;
mod error;
mod estimate;
@@ -76,9 +77,7 @@ use std::cell::RefCell;
use std::collections::HashSet;
use std::default::Default;
use std::env;
-use std::fmt;
-use std::iter::IntoIterator;
-use std::marker::PhantomData;
+use std::io::stdout;
use std::net::TcpStream;
use std::path::{Path, PathBuf};
use std::process::Command;
@@ -86,69 +85,65 @@ use std::sync::{Mutex, MutexGuard};
use std::time::Duration;
use criterion_plot::{Version, VersionError};
+use once_cell::sync::Lazy;
use crate::benchmark::BenchmarkConfig;
-use crate::benchmark::NamedRoutine;
use crate::connection::Connection;
use crate::connection::OutgoingMessage;
-use crate::csv_report::FileCsvReport;
use crate::html::Html;
use crate::measurement::{Measurement, WallTime};
-use crate::plot::{Gnuplot, Plotter, PlottersBackend};
+#[cfg(feature = "plotters")]
+use crate::plot::PlottersBackend;
+use crate::plot::{Gnuplot, Plotter};
use crate::profiler::{ExternalProfiler, Profiler};
-use crate::report::{BencherReport, CliReport, Report, ReportContext, Reports};
-use crate::routine::Function;
+use crate::report::{BencherReport, CliReport, CliVerbosity, Report, ReportContext, Reports};
#[cfg(feature = "async")]
pub use crate::bencher::AsyncBencher;
pub use crate::bencher::Bencher;
-#[allow(deprecated)]
-pub use crate::benchmark::{Benchmark, BenchmarkDefinition, ParameterizedBenchmark};
pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
-lazy_static! {
- static ref DEBUG_ENABLED: bool = std::env::var_os("CRITERION_DEBUG").is_some();
- static ref GNUPLOT_VERSION: Result<Version, VersionError> = criterion_plot::version();
- static ref DEFAULT_PLOTTING_BACKEND: PlottingBackend = {
- match &*GNUPLOT_VERSION {
- Ok(_) => PlottingBackend::Gnuplot,
- Err(e) => {
- match e {
- VersionError::Exec(_) => println!("Gnuplot not found, using plotters backend"),
- e => println!(
- "Gnuplot not found or not usable, using plotters backend\n{}",
- e
- ),
- };
- PlottingBackend::Plotters
- }
- }
- };
- static ref CARGO_CRITERION_CONNECTION: Option<Mutex<Connection>> = {
- match std::env::var("CARGO_CRITERION_PORT") {
- Ok(port_str) => {
- let port: u16 = port_str.parse().ok()?;
- let stream = TcpStream::connect(("localhost", port)).ok()?;
- Some(Mutex::new(Connection::new(stream).ok()?))
- }
- Err(_) => None,
- }
- };
- static ref DEFAULT_OUTPUT_DIRECTORY: PathBuf = {
- // Set criterion home to (in descending order of preference):
- // - $CRITERION_HOME (cargo-criterion sets this, but other users could as well)
- // - $CARGO_TARGET_DIR/criterion
- // - the cargo target dir from `cargo metadata`
- // - ./target/criterion
- if let Some(value) = env::var_os("CRITERION_HOME") {
- PathBuf::from(value)
- } else if let Some(path) = cargo_target_directory() {
- path.join("criterion")
- } else {
- PathBuf::from("target/criterion")
+static DEBUG_ENABLED: Lazy<bool> = Lazy::new(|| std::env::var_os("CRITERION_DEBUG").is_some());
+static GNUPLOT_VERSION: Lazy<Result<Version, VersionError>> = Lazy::new(criterion_plot::version);
+static DEFAULT_PLOTTING_BACKEND: Lazy<PlottingBackend> = Lazy::new(|| match &*GNUPLOT_VERSION {
+ Ok(_) => PlottingBackend::Gnuplot,
+ #[cfg(feature = "plotters")]
+ Err(e) => {
+ match e {
+ VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"),
+ e => eprintln!(
+ "Gnuplot not found or not usable, using plotters backend\n{}",
+ e
+ ),
+ };
+ PlottingBackend::Plotters
+ }
+ #[cfg(not(feature = "plotters"))]
+ Err(_) => PlottingBackend::None,
+});
+static CARGO_CRITERION_CONNECTION: Lazy<Option<Mutex<Connection>>> =
+ Lazy::new(|| match std::env::var("CARGO_CRITERION_PORT") {
+ Ok(port_str) => {
+ let port: u16 = port_str.parse().ok()?;
+ let stream = TcpStream::connect(("localhost", port)).ok()?;
+ Some(Mutex::new(Connection::new(stream).ok()?))
}
- };
-}
+ Err(_) => None,
+ });
+static DEFAULT_OUTPUT_DIRECTORY: Lazy<PathBuf> = Lazy::new(|| {
+ // Set criterion home to (in descending order of preference):
+ // - $CRITERION_HOME (cargo-criterion sets this, but other users could as well)
+ // - $CARGO_TARGET_DIR/criterion
+ // - the cargo target dir from `cargo metadata`
+ // - ./target/criterion
+ if let Some(value) = env::var_os("CRITERION_HOME") {
+ PathBuf::from(value)
+ } else if let Some(path) = cargo_target_directory() {
+ path.join("criterion")
+ } else {
+ PathBuf::from("target/criterion")
+ }
+});
fn debug_enabled() -> bool {
*DEBUG_ENABLED
@@ -177,36 +172,6 @@ pub fn black_box<T>(dummy: T) -> T {
}
}
-/// Representing a function to benchmark together with a name of that function.
-/// Used together with `bench_functions` to represent one out of multiple functions
-/// under benchmark.
-#[doc(hidden)]
-pub struct Fun<I: fmt::Debug, M: Measurement + 'static = WallTime> {
- f: NamedRoutine<I, M>,
- _phantom: PhantomData<M>,
-}
-
-impl<I, M: Measurement> Fun<I, M>
-where
- I: fmt::Debug + 'static,
-{
- /// Create a new `Fun` given a name and a closure
- pub fn new<F>(name: &str, f: F) -> Fun<I, M>
- where
- F: FnMut(&mut Bencher<'_, M>, &I) + 'static,
- {
- let routine = NamedRoutine {
- id: name.to_owned(),
- f: Box::new(RefCell::new(Function::new(f))),
- };
-
- Fun {
- f: routine,
- _phantom: PhantomData,
- }
- }
-}
-
/// Argument to [`Bencher::iter_batched`](struct.Bencher.html#method.iter_batched) and
/// [`Bencher::iter_batched_ref`](struct.Bencher.html#method.iter_batched_ref) which controls the
/// batch size.
@@ -296,12 +261,17 @@ impl BatchSize {
/// Baseline describes how the baseline_directory is handled.
#[derive(Debug, Clone, Copy)]
pub enum Baseline {
- /// Compare ensures a previous saved version of the baseline
- /// exists and runs comparison against that.
- Compare,
+ /// CompareLenient compares against a previous saved version of the baseline.
+ /// If a previous baseline does not exist, the benchmark is run as normal but no comparison occurs.
+ CompareLenient,
+ /// CompareStrict compares against a previous saved version of the baseline.
+ /// If a previous baseline does not exist, a panic occurs.
+ CompareStrict,
/// Save writes the benchmark results to the baseline directory,
/// overwriting any results that were previously there.
Save,
+ /// Discard benchmark results.
+ Discard,
}
/// Enum used to select the plotting backend.
@@ -313,12 +283,18 @@ pub enum PlottingBackend {
/// Plotting backend which uses the rust 'Plotters' library. This is the default if `gnuplot`
/// is not installed.
Plotters,
+ /// Null plotting backend which outputs nothing,
+ None,
}
impl PlottingBackend {
- fn create_plotter(&self) -> Box<dyn Plotter> {
+ fn create_plotter(&self) -> Option<Box<dyn Plotter>> {
match self {
- PlottingBackend::Gnuplot => Box::new(Gnuplot::default()),
- PlottingBackend::Plotters => Box::new(PlottersBackend::default()),
+ PlottingBackend::Gnuplot => Some(Box::<Gnuplot>::default()),
+ #[cfg(feature = "plotters")]
+ PlottingBackend::Plotters => Some(Box::<PlottersBackend>::default()),
+ #[cfg(not(feature = "plotters"))]
+ PlottingBackend::Plotters => panic!("Criterion was built without plotters support."),
+ PlottingBackend::None => None,
}
}
}
@@ -329,7 +305,7 @@ pub(crate) enum Mode {
/// Run benchmarks normally.
Benchmark,
/// List all benchmarks but do not run them.
- List,
+ List(ListFormat),
/// Run benchmarks once to verify that they work, but otherwise do not measure them.
Test,
/// Iterate benchmarks for a given length of time but do not analyze or report on them.
@@ -339,6 +315,39 @@ impl Mode {
pub fn is_benchmark(&self) -> bool {
matches!(self, Mode::Benchmark)
}
+
+ pub fn is_terse(&self) -> bool {
+ matches!(self, Mode::List(ListFormat::Terse))
+ }
+}
+
+#[derive(Debug, Clone)]
+/// Enum representing the list format.
+pub(crate) enum ListFormat {
+ /// The regular, default format.
+ Pretty,
+ /// The terse format, where nothing other than the name of the test and ": benchmark" at the end
+ /// is printed out.
+ Terse,
+}
+
+impl Default for ListFormat {
+ fn default() -> Self {
+ Self::Pretty
+ }
+}
+
+/// Benchmark filtering support.
+#[derive(Clone, Debug)]
+pub enum BenchmarkFilter {
+ /// Run all benchmarks.
+ AcceptAll,
+ /// Run benchmarks matching this regex.
+ Regex(Regex),
+ /// Run the benchmark matching this string exactly.
+ Exact(String),
+ /// Do not run any benchmarks.
+ RejectAll,
}
/// The benchmark manager
@@ -357,7 +366,7 @@ impl Mode {
/// benchmark.
pub struct Criterion<M: Measurement = WallTime> {
config: BenchmarkConfig,
- filter: Option<Regex>,
+ filter: BenchmarkFilter,
report: Reports,
output_directory: PathBuf,
baseline_directory: String,
@@ -383,7 +392,7 @@ fn cargo_target_directory() -> Option<PathBuf> {
.map(PathBuf::from)
.or_else(|| {
let output = Command::new(env::var_os("CARGO")?)
- .args(&["metadata", "--format-version", "1"])
+ .args(["metadata", "--format-version", "1"])
.output()
.ok()?;
let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?;
@@ -406,27 +415,26 @@ impl Default for Criterion {
fn default() -> Criterion {
let reports = Reports {
cli_enabled: true,
- cli: CliReport::new(false, false, false),
+ cli: CliReport::new(false, false, CliVerbosity::Normal),
bencher_enabled: false,
bencher: BencherReport,
- html_enabled: true,
- html: Html::new(DEFAULT_PLOTTING_BACKEND.create_plotter()),
- csv_enabled: true,
- csv: FileCsvReport,
+ html: DEFAULT_PLOTTING_BACKEND.create_plotter().map(Html::new),
+ csv_enabled: cfg!(feature = "csv_output"),
};
let mut criterion = Criterion {
config: BenchmarkConfig {
confidence_level: 0.95,
- measurement_time: Duration::new(5, 0),
+ measurement_time: Duration::from_secs(5),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
- warm_up_time: Duration::new(3, 0),
+ warm_up_time: Duration::from_secs(3),
sampling_mode: SamplingMode::Auto,
+ quick_mode: false,
},
- filter: None,
+ filter: BenchmarkFilter::AcceptAll,
report: reports,
baseline_directory: "base".to_owned(),
baseline: Baseline::Save,
@@ -447,7 +455,7 @@ impl Default for Criterion {
criterion.report.cli_enabled = false;
criterion.report.bencher_enabled = false;
criterion.report.csv_enabled = false;
- criterion.report.html_enabled = false;
+ criterion.report.html = None;
}
criterion
}
@@ -475,6 +483,7 @@ impl<M: Measurement> Criterion<M> {
}
}
+ #[must_use]
/// Changes the internal profiler for benchmarks run with this runner. See
/// the Profiler trait for more details.
pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
@@ -484,6 +493,7 @@ impl<M: Measurement> Criterion<M> {
}
}
+ #[must_use]
/// Set the plotting backend. By default, Criterion will use gnuplot if available, or plotters
/// if not.
///
@@ -498,10 +508,11 @@ impl<M: Measurement> Criterion<M> {
);
}
- self.report.html = Html::new(backend.create_plotter());
+ self.report.html = backend.create_plotter().map(Html::new);
self
}
+ #[must_use]
/// Changes the default size of the sample for benchmarks run with this runner.
///
/// A bigger sample should yield more accurate results if paired with a sufficiently large
@@ -519,18 +530,20 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Changes the default warm up time for benchmarks run with this runner.
///
/// # Panics
///
/// Panics if the input duration is zero
pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.config.warm_up_time = dur;
self
}
+ #[must_use]
/// Changes the default measurement time for benchmarks run with this runner.
///
/// With a longer time, the measurement will become more resilient to transitory peak loads
@@ -542,12 +555,13 @@ impl<M: Measurement> Criterion<M> {
///
/// Panics if the input duration in zero
pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.config.measurement_time = dur;
self
}
+ #[must_use]
/// Changes the default number of resamples for benchmarks run with this runner.
///
/// Number of resamples to use for the
@@ -562,13 +576,14 @@ impl<M: Measurement> Criterion<M> {
pub fn nresamples(mut self, n: usize) -> Criterion<M> {
assert!(n > 0);
if n <= 1000 {
- println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
+ eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.config.nresamples = n;
self
}
+ #[must_use]
/// Changes the default noise threshold for benchmarks run with this runner. The noise threshold
/// is used to filter out small changes in performance, even if they are statistically
/// significant. Sometimes benchmarking the same code twice will result in small but
@@ -588,6 +603,7 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Changes the default confidence level for benchmarks run with this runner. The confidence
/// level is the desired probability that the true runtime lies within the estimated
/// [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval). The default is
@@ -599,13 +615,14 @@ impl<M: Measurement> Criterion<M> {
pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
assert!(cl > 0.0 && cl < 1.0);
if cl < 0.5 {
- println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
+ eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
}
self.config.confidence_level = cl;
self
}
+ #[must_use]
/// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance)
/// for benchmarks run with this runner. This is used to perform a
/// [hypothesis test](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) to see if
@@ -633,32 +650,29 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Enables plotting
pub fn with_plots(mut self) -> Criterion<M> {
// If running under cargo-criterion then don't re-enable the reports; let it do the reporting.
- if self.connection.is_none() {
- self.report.html_enabled = true;
+ if self.connection.is_none() && self.report.html.is_none() {
+ let default_backend = DEFAULT_PLOTTING_BACKEND.create_plotter();
+ if let Some(backend) = default_backend {
+ self.report.html = Some(Html::new(backend));
+ } else {
+ panic!("Cannot find a default plotting backend!");
+ }
}
self
}
+ #[must_use]
/// Disables plotting
pub fn without_plots(mut self) -> Criterion<M> {
- self.report.html_enabled = false;
+ self.report.html = None;
self
}
- /// Return true if generation of the plots is possible.
- #[deprecated(
- since = "0.3.4",
- note = "No longer useful; since the plotters backend is available Criterion.rs can always generate plots"
- )]
- pub fn can_plot(&self) -> bool {
- // Trivially true now that we have plotters.
- // TODO: Deprecate and remove this.
- true
- }
-
+ #[must_use]
/// Names an explicit baseline and enables overwriting the previous results.
pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
self.baseline_directory = baseline;
@@ -666,15 +680,23 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Names an explicit baseline and disables overwriting the previous results.
- pub fn retain_baseline(mut self, baseline: String) -> Criterion<M> {
+ pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion<M> {
self.baseline_directory = baseline;
- self.baseline = Baseline::Compare;
+ self.baseline = if strict {
+ Baseline::CompareStrict
+ } else {
+ Baseline::CompareLenient
+ };
self
}
+ #[must_use]
/// Filters the benchmarks. Only benchmarks with names that contain the
/// given string will be executed.
+ ///
+ /// This overwrites [`Self::with_benchmark_filter`].
pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
let filter_text = filter.into();
let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
@@ -683,11 +705,21 @@ impl<M: Measurement> Criterion<M> {
filter_text, err
)
});
- self.filter = Some(filter);
+ self.filter = BenchmarkFilter::Regex(filter);
+
+ self
+ }
+
+ /// Only run benchmarks specified by the given filter.
+ ///
+ /// This overwrites [`Self::with_filter`].
+ pub fn with_benchmark_filter(mut self, filter: BenchmarkFilter) -> Criterion<M> {
+ self.filter = filter;
self
}
+ #[must_use]
/// Override whether the CLI output will be colored or not. Usually you would use the `--color`
/// CLI argument, but this is available for programmmatic use as well.
pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
@@ -696,6 +728,7 @@ impl<M: Measurement> Criterion<M> {
}
/// Set the output directory (currently for testing only)
+ #[must_use]
#[doc(hidden)]
pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
self.output_directory = path.to_owned();
@@ -704,6 +737,7 @@ impl<M: Measurement> Criterion<M> {
}
/// Set the profile time (currently for testing only)
+ #[must_use]
#[doc(hidden)]
pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
match profile_time {
@@ -731,114 +765,152 @@ impl<M: Measurement> Criterion<M> {
/// Configure this criterion struct based on the command-line arguments to
/// this process.
+ #[must_use]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))]
pub fn configure_from_args(mut self) -> Criterion<M> {
- use clap::{App, Arg};
- let matches = App::new("Criterion Benchmark")
- .arg(Arg::with_name("FILTER")
+ use clap::{value_parser, Arg, Command};
+ let matches = Command::new("Criterion Benchmark")
+ .arg(Arg::new("FILTER")
.help("Skip benchmarks whose names do not contain FILTER.")
.index(1))
- .arg(Arg::with_name("color")
- .short("c")
+ .arg(Arg::new("color")
+ .short('c')
.long("color")
.alias("colour")
- .takes_value(true)
- .possible_values(&["auto", "always", "never"])
+ .value_parser(["auto", "always", "never"])
.default_value("auto")
.help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
- .arg(Arg::with_name("verbose")
- .short("v")
+ .arg(Arg::new("verbose")
+ .short('v')
.long("verbose")
+ .num_args(0)
.help("Print additional statistical information."))
- .arg(Arg::with_name("noplot")
- .short("n")
+ .arg(Arg::new("quiet")
+ .long("quiet")
+ .num_args(0)
+ .conflicts_with("verbose")
+ .help("Print only the benchmark results."))
+ .arg(Arg::new("noplot")
+ .short('n')
.long("noplot")
+ .num_args(0)
.help("Disable plot and HTML generation."))
- .arg(Arg::with_name("save-baseline")
- .short("s")
+ .arg(Arg::new("save-baseline")
+ .short('s')
.long("save-baseline")
.default_value("base")
.help("Save results under a named baseline."))
- .arg(Arg::with_name("baseline")
- .short("b")
+ .arg(Arg::new("discard-baseline")
+ .long("discard-baseline")
+ .num_args(0)
+ .conflicts_with_all(["save-baseline", "baseline", "baseline-lenient"])
+ .help("Discard benchmark results."))
+ .arg(Arg::new("baseline")
+ .short('b')
.long("baseline")
- .takes_value(true)
- .conflicts_with("save-baseline")
- .help("Compare to a named baseline."))
- .arg(Arg::with_name("list")
+ .conflicts_with_all(["save-baseline", "baseline-lenient"])
+ .help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails."))
+ .arg(Arg::new("baseline-lenient")
+ .long("baseline-lenient")
+ .conflicts_with_all(["save-baseline", "baseline"])
+ .help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline."))
+ .arg(Arg::new("list")
.long("list")
+ .num_args(0)
.help("List all benchmarks")
- .conflicts_with_all(&["test", "profile-time"]))
- .arg(Arg::with_name("profile-time")
+ .conflicts_with_all(["test", "profile-time"]))
+ .arg(Arg::new("format")
+ .long("format")
+ .value_parser(["pretty", "terse"])
+ .default_value("pretty")
+ // Note that libtest's --format also works during test execution, but criterion
+ // doesn't support that at the moment.
+ .help("Output formatting"))
+ .arg(Arg::new("ignored")
+ .long("ignored")
+ .num_args(0)
+ .help("List or run ignored benchmarks (currently means skip all benchmarks)"))
+ .arg(Arg::new("exact")
+ .long("exact")
+ .num_args(0)
+ .help("Run benchmarks that exactly match the provided filter"))
+ .arg(Arg::new("profile-time")
.long("profile-time")
- .takes_value(true)
+ .value_parser(value_parser!(f64))
.help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
- .conflicts_with_all(&["test", "list"]))
- .arg(Arg::with_name("load-baseline")
+ .conflicts_with_all(["test", "list"]))
+ .arg(Arg::new("load-baseline")
.long("load-baseline")
- .takes_value(true)
.conflicts_with("profile-time")
.requires("baseline")
.help("Load a previous baseline instead of sampling new data."))
- .arg(Arg::with_name("sample-size")
+ .arg(Arg::new("sample-size")
.long("sample-size")
- .takes_value(true)
- .help(&format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
- .arg(Arg::with_name("warm-up-time")
+ .value_parser(value_parser!(usize))
+ .help(format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
+ .arg(Arg::new("warm-up-time")
.long("warm-up-time")
- .takes_value(true)
- .help(&format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
- .arg(Arg::with_name("measurement-time")
+ .value_parser(value_parser!(f64))
+ .help(format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
+ .arg(Arg::new("measurement-time")
.long("measurement-time")
- .takes_value(true)
- .help(&format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
- .arg(Arg::with_name("nresamples")
+ .value_parser(value_parser!(f64))
+ .help(format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
+ .arg(Arg::new("nresamples")
.long("nresamples")
- .takes_value(true)
- .help(&format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
- .arg(Arg::with_name("noise-threshold")
+ .value_parser(value_parser!(usize))
+ .help(format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
+ .arg(Arg::new("noise-threshold")
.long("noise-threshold")
- .takes_value(true)
- .help(&format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
- .arg(Arg::with_name("confidence-level")
+ .value_parser(value_parser!(f64))
+ .help(format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
+ .arg(Arg::new("confidence-level")
.long("confidence-level")
- .takes_value(true)
- .help(&format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
- .arg(Arg::with_name("significance-level")
+ .value_parser(value_parser!(f64))
+ .help(format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
+ .arg(Arg::new("significance-level")
.long("significance-level")
- .takes_value(true)
- .help(&format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
- .arg(Arg::with_name("test")
- .hidden(true)
+ .value_parser(value_parser!(f64))
+ .help(format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
+ .arg(Arg::new("quick")
+ .long("quick")
+ .num_args(0)
+ .conflicts_with("sample-size")
+ .help(format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode)))
+ .arg(Arg::new("test")
+ .hide(true)
.long("test")
+ .num_args(0)
.help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
- .conflicts_with_all(&["list", "profile-time"]))
- .arg(Arg::with_name("bench")
- .hidden(true)
- .long("bench"))
- .arg(Arg::with_name("plotting-backend")
+ .conflicts_with_all(["list", "profile-time"]))
+ .arg(Arg::new("bench")
+ .hide(true)
+ .long("bench")
+ .num_args(0))
+ .arg(Arg::new("plotting-backend")
.long("plotting-backend")
- .takes_value(true)
- .possible_values(&["gnuplot", "plotters"])
+ .value_parser(["gnuplot", "plotters"])
.help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
- .arg(Arg::with_name("output-format")
+ .arg(Arg::new("output-format")
.long("output-format")
- .takes_value(true)
- .possible_values(&["criterion", "bencher"])
+ .value_parser(["criterion", "bencher"])
.default_value("criterion")
.help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
- .arg(Arg::with_name("nocapture")
+ .arg(Arg::new("nocapture")
.long("nocapture")
- .hidden(true)
+ .num_args(0)
+ .hide(true)
.help("Ignored, but added for compatibility with libtest."))
- .arg(Arg::with_name("show-output")
+ .arg(Arg::new("show-output")
.long("show-output")
- .hidden(true)
+ .num_args(0)
+ .hide(true)
.help("Ignored, but added for compatibility with libtest."))
- .arg(Arg::with_name("version")
- .hidden(true)
- .short("V")
- .long("version"))
+ .arg(Arg::new("version")
+ .hide(true)
+ .short('V')
+ .long("version")
+ .num_args(0))
.after_help("
This executable is a Criterion.rs benchmark.
See https://github.com/bheisler/criterion.rs for more details.
@@ -855,62 +927,68 @@ https://bheisler.github.io/criterion.rs/book/faq.html
.get_matches();
if self.connection.is_some() {
- if let Some(color) = matches.value_of("color") {
+ if let Some(color) = matches.get_one::<String>("color") {
if color != "auto" {
- println!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
+ eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
}
}
- if matches.is_present("verbose") {
- println!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
+ if matches.get_flag("verbose") {
+ eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
}
- if matches.is_present("noplot") {
- println!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
+ if matches.get_flag("noplot") {
+ eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
}
- if let Some(backend) = matches.value_of("plotting-backend") {
- println!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
+ if let Some(backend) = matches.get_one::<String>("plotting-backend") {
+ eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
}
- if let Some(format) = matches.value_of("output-format") {
+ if let Some(format) = matches.get_one::<String>("output-format") {
if format != "criterion" {
- println!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
+ eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
}
}
- if matches.is_present("baseline")
+ if matches.contains_id("baseline")
|| matches
- .value_of("save-baseline")
- .map(|base| base != "base")
- .unwrap_or(false)
- || matches.is_present("load-baseline")
+ .get_one::<String>("save-baseline")
+ .map_or(false, |base| base != "base")
+ || matches.contains_id("load-baseline")
{
- println!("Error: baselines are not supported when running with cargo-criterion.");
+ eprintln!("Error: baselines are not supported when running with cargo-criterion.");
std::process::exit(1);
}
}
- let bench = matches.is_present("bench");
- let test = matches.is_present("test");
+ let bench = matches.get_flag("bench");
+ let test = matches.get_flag("test");
let test_mode = match (bench, test) {
(true, true) => true, // cargo bench -- --test should run tests
(true, false) => false, // cargo bench should run benchmarks
(false, _) => true, // cargo test --benches should run tests
};
- self.mode = if test_mode {
+ self.mode = if matches.get_flag("list") {
+ let list_format = match matches
+ .get_one::<String>("format")
+ .expect("a default value was provided for this")
+ .as_str()
+ {
+ "pretty" => ListFormat::Pretty,
+ "terse" => ListFormat::Terse,
+ other => unreachable!(
+ "unrecognized value for --format that isn't part of possible-values: {}",
+ other
+ ),
+ };
+ Mode::List(list_format)
+ } else if test_mode {
Mode::Test
- } else if matches.is_present("list") {
- Mode::List
- } else if matches.is_present("profile-time") {
- let num_seconds = value_t!(matches.value_of("profile-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
- if num_seconds < 1 {
- println!("Profile time must be at least one second.");
+ } else if let Some(&num_seconds) = matches.get_one("profile-time") {
+ if num_seconds < 1.0 {
+ eprintln!("Profile time must be at least one second.");
std::process::exit(1);
}
- Mode::Profile(Duration::from_secs(num_seconds))
+ Mode::Profile(Duration::from_secs_f64(num_seconds))
} else {
Mode::Benchmark
};
@@ -920,11 +998,27 @@ https://bheisler.github.io/criterion.rs/book/faq.html
self.connection = None;
}
- if let Some(filter) = matches.value_of("FILTER") {
- self = self.with_filter(filter);
- }
+ let filter = if matches.get_flag("ignored") {
+ // --ignored overwrites any name-based filters passed in.
+ BenchmarkFilter::RejectAll
+ } else if let Some(filter) = matches.get_one::<String>("FILTER") {
+ if matches.get_flag("exact") {
+ BenchmarkFilter::Exact(filter.to_owned())
+ } else {
+ let regex = Regex::new(filter).unwrap_or_else(|err| {
+ panic!(
+ "Unable to parse '{}' as a regular expression: {}",
+ filter, err
+ )
+ });
+ BenchmarkFilter::Regex(regex)
+ }
+ } else {
+ BenchmarkFilter::AcceptAll
+ };
+ self = self.with_benchmark_filter(filter);
- match matches.value_of("plotting-backend") {
+ match matches.get_one("plotting-backend").map(String::as_str) {
// Use plotting_backend() here to re-use the panic behavior if Gnuplot is not available.
Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
@@ -932,18 +1026,23 @@ https://bheisler.github.io/criterion.rs/book/faq.html
None => {}
}
- if matches.is_present("noplot") {
+ if matches.get_flag("noplot") {
self = self.without_plots();
- } else {
- self = self.with_plots();
}
- if let Some(dir) = matches.value_of("save-baseline") {
+ if let Some(dir) = matches.get_one::<String>("save-baseline") {
self.baseline = Baseline::Save;
self.baseline_directory = dir.to_owned()
}
- if let Some(dir) = matches.value_of("baseline") {
- self.baseline = Baseline::Compare;
+ if matches.get_flag("discard-baseline") {
+ self.baseline = Baseline::Discard;
+ }
+ if let Some(dir) = matches.get_one::<String>("baseline") {
+ self.baseline = Baseline::CompareStrict;
+ self.baseline_directory = dir.to_owned();
+ }
+ if let Some(dir) = matches.get_one::<String>("baseline-lenient") {
+ self.baseline = Baseline::CompareLenient;
self.baseline_directory = dir.to_owned();
}
@@ -952,19 +1051,26 @@ https://bheisler.github.io/criterion.rs/book/faq.html
self.report.cli_enabled = false;
self.report.bencher_enabled = false;
self.report.csv_enabled = false;
- self.report.html_enabled = false;
+ self.report.html = None;
} else {
- match matches.value_of("output-format") {
+ match matches.get_one("output-format").map(String::as_str) {
Some("bencher") => {
self.report.bencher_enabled = true;
self.report.cli_enabled = false;
}
_ => {
- let verbose = matches.is_present("verbose");
- let stdout_isatty = atty::is(atty::Stream::Stdout);
+ let verbose = matches.get_flag("verbose");
+ let verbosity = if verbose {
+ CliVerbosity::Verbose
+ } else if matches.get_flag("quiet") {
+ CliVerbosity::Quiet
+ } else {
+ CliVerbosity::Normal
+ };
+ let stdout_isatty = stdout().is_terminal();
let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
let enable_text_coloring;
- match matches.value_of("color") {
+ match matches.get_one("color").map(String::as_str) {
Some("always") => {
enable_text_coloring = true;
}
@@ -977,102 +1083,76 @@ https://bheisler.github.io/criterion.rs/book/faq.html
self.report.bencher_enabled = false;
self.report.cli_enabled = true;
self.report.cli =
- CliReport::new(enable_text_overwrite, enable_text_coloring, verbose);
+ CliReport::new(enable_text_overwrite, enable_text_coloring, verbosity);
}
};
}
- if let Some(dir) = matches.value_of("load-baseline") {
+ if let Some(dir) = matches.get_one::<String>("load-baseline") {
self.load_baseline = Some(dir.to_owned());
}
- if matches.is_present("sample-size") {
- let num_size = value_t!(matches.value_of("sample-size"), usize).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
+ if let Some(&num_size) = matches.get_one("sample-size") {
assert!(num_size >= 10);
self.config.sample_size = num_size;
}
- if matches.is_present("warm-up-time") {
- let num_seconds = value_t!(matches.value_of("warm-up-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
- let dur = std::time::Duration::new(num_seconds, 0);
- assert!(dur.to_nanos() > 0);
+ if let Some(&num_seconds) = matches.get_one("warm-up-time") {
+ let dur = std::time::Duration::from_secs_f64(num_seconds);
+ assert!(dur.as_nanos() > 0);
self.config.warm_up_time = dur;
}
- if matches.is_present("measurement-time") {
- let num_seconds =
- value_t!(matches.value_of("measurement-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
- let dur = std::time::Duration::new(num_seconds, 0);
- assert!(dur.to_nanos() > 0);
+ if let Some(&num_seconds) = matches.get_one("measurement-time") {
+ let dur = std::time::Duration::from_secs_f64(num_seconds);
+ assert!(dur.as_nanos() > 0);
self.config.measurement_time = dur;
}
- if matches.is_present("nresamples") {
- let num_resamples =
- value_t!(matches.value_of("nresamples"), usize).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
+ if let Some(&num_resamples) = matches.get_one("nresamples") {
assert!(num_resamples > 0);
self.config.nresamples = num_resamples;
}
- if matches.is_present("noise-threshold") {
- let num_noise_threshold = value_t!(matches.value_of("noise-threshold"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
+ if let Some(&num_noise_threshold) = matches.get_one("noise-threshold") {
assert!(num_noise_threshold > 0.0);
self.config.noise_threshold = num_noise_threshold;
}
- if matches.is_present("confidence-level") {
- let num_confidence_level = value_t!(matches.value_of("confidence-level"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
+ if let Some(&num_confidence_level) = matches.get_one("confidence-level") {
assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
self.config.confidence_level = num_confidence_level;
}
- if matches.is_present("significance-level") {
- let num_significance_level = value_t!(matches.value_of("significance-level"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
-
+ if let Some(&num_significance_level) = matches.get_one("significance-level") {
assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
self.config.significance_level = num_significance_level;
}
+ if matches.get_flag("quick") {
+ self.config.quick_mode = true;
+ }
+
self
}
fn filter_matches(&self, id: &str) -> bool {
match &self.filter {
- Some(regex) => regex.is_match(id),
- None => true,
+ BenchmarkFilter::AcceptAll => true,
+ BenchmarkFilter::Regex(regex) => regex.is_match(id),
+ BenchmarkFilter::Exact(exact) => id == exact,
+ BenchmarkFilter::RejectAll => false,
}
}
+ /// Returns true iff we should save the benchmark results in
+ /// json files on the local disk.
+ fn should_save_baseline(&self) -> bool {
+ self.connection.is_none()
+ && self.load_baseline.is_none()
+ && !matches!(self.baseline, Baseline::Discard)
+ }
+
/// Return a benchmark group. All benchmarks performed using a benchmark group will be
/// grouped together in the final report.
///
@@ -1185,150 +1265,6 @@ where
);
self
}
-
- /// Benchmarks a function under various inputs
- ///
- /// This is a convenience method to execute several related benchmarks. Each benchmark will
- /// receive the id: `${id}/${input}`.
- ///
- /// # Example
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use self::criterion::*;
- ///
- /// fn bench(c: &mut Criterion) {
- /// c.bench_function_over_inputs("from_elem",
- /// |b: &mut Bencher, size: &usize| {
- /// b.iter(|| vec![0u8; *size]);
- /// },
- /// vec![1024, 2048, 4096]
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- #[allow(deprecated)]
- pub fn bench_function_over_inputs<I, F>(
- &mut self,
- id: &str,
- f: F,
- inputs: I,
- ) -> &mut Criterion<M>
- where
- I: IntoIterator,
- I::Item: fmt::Debug + 'static,
- F: FnMut(&mut Bencher<'_, M>, &I::Item) + 'static,
- {
- self.bench(id, ParameterizedBenchmark::new(id, f, inputs))
- }
-
- /// Benchmarks multiple functions
- ///
- /// All functions get the same input and are compared with the other implementations.
- /// Works similar to `bench_function`, but with multiple functions.
- ///
- /// # Example
- ///
- /// ``` rust
- /// # #[macro_use] extern crate criterion;
- /// # use self::criterion::*;
- /// # fn seq_fib(i: &u32) {}
- /// # fn par_fib(i: &u32) {}
- ///
- /// fn bench_seq_fib(b: &mut Bencher, i: &u32) {
- /// b.iter(|| {
- /// seq_fib(i);
- /// });
- /// }
- ///
- /// fn bench_par_fib(b: &mut Bencher, i: &u32) {
- /// b.iter(|| {
- /// par_fib(i);
- /// });
- /// }
- ///
- /// fn bench(c: &mut Criterion) {
- /// let sequential_fib = Fun::new("Sequential", bench_seq_fib);
- /// let parallel_fib = Fun::new("Parallel", bench_par_fib);
- /// let funs = vec![sequential_fib, parallel_fib];
- ///
- /// c.bench_functions("Fibonacci", funs, 14);
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- #[allow(deprecated)]
- pub fn bench_functions<I>(
- &mut self,
- id: &str,
- funs: Vec<Fun<I, M>>,
- input: I,
- ) -> &mut Criterion<M>
- where
- I: fmt::Debug + 'static,
- {
- let benchmark = ParameterizedBenchmark::with_functions(
- funs.into_iter().map(|fun| fun.f).collect(),
- vec![input],
- );
-
- self.bench(id, benchmark)
- }
-
- /// Executes the given benchmark. Use this variant to execute benchmarks
- /// with complex configuration. This can be used to compare multiple
- /// functions, execute benchmarks with custom configuration settings and
- /// more. See the Benchmark and ParameterizedBenchmark structs for more
- /// information.
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use criterion::*;
- /// # fn routine_1() {}
- /// # fn routine_2() {}
- ///
- /// fn bench(c: &mut Criterion) {
- /// // Setup (construct data, allocate memory, etc)
- /// c.bench(
- /// "routines",
- /// Benchmark::new("routine_1", |b| b.iter(|| routine_1()))
- /// .with_function("routine_2", |b| b.iter(|| routine_2()))
- /// .sample_size(50)
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- pub fn bench<B: BenchmarkDefinition<M>>(
- &mut self,
- group_id: &str,
- benchmark: B,
- ) -> &mut Criterion<M> {
- benchmark.run(group_id, self);
- self
- }
-}
-
-trait DurationExt {
- fn to_nanos(&self) -> u64;
-}
-
-const NANOS_PER_SEC: u64 = 1_000_000_000;
-
-impl DurationExt for Duration {
- fn to_nanos(&self) -> u64 {
- self.as_secs() * NANOS_PER_SEC + u64::from(self.subsec_nanos())
- }
}
/// Enum representing different ways of measuring the throughput of benchmarked code.
@@ -1342,6 +1278,11 @@ pub enum Throughput {
/// an input string or `&[u8]`.
Bytes(u64),
+ /// Equivalent to Bytes, but the value will be reported in terms of
+ /// kilobytes (1000 bytes) per second instead of kibibytes (1024 bytes) per
+ /// second, megabytes instead of mibibytes, and gigabytes instead of gibibytes.
+ BytesDecimal(u64),
+
/// Measure throughput in terms of elements/second. The value should be the number of elements
/// processed by one iteration of the benchmarked code. Typically, this would be the size of a
/// collection, but could also be the number of lines of input text or the number of values to
@@ -1363,7 +1304,7 @@ pub enum AxisScale {
/// or benchmark group.
///
/// ```rust
-/// use self::criterion::{Bencher, Criterion, Benchmark, PlotConfiguration, AxisScale};
+/// use self::criterion::{Bencher, Criterion, PlotConfiguration, AxisScale};
///
/// let plot_config = PlotConfiguration::default()
/// .summary_scale(AxisScale::Logarithmic);
@@ -1388,6 +1329,7 @@ impl Default for PlotConfiguration {
}
impl PlotConfiguration {
+ #[must_use]
/// Set the axis scale (linear or logarithmic) for the summary plots. Typically, you would
/// set this to logarithmic if benchmarking over a range of inputs which scale exponentially.
/// Defaults to linear.
@@ -1460,7 +1402,7 @@ impl ActualSamplingMode {
ActualSamplingMode::Linear => {
let n = sample_count;
let met = warmup_mean_execution_time;
- let m_ns = target_time.to_nanos();
+ let m_ns = target_time.as_nanos();
// Solve: [d + 2*d + 3*d + ... + n*d] * met = m_ns
let total_runs = n * (n + 1) / 2;
let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
@@ -1470,25 +1412,25 @@ impl ActualSamplingMode {
let recommended_sample_size =
ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
- print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
+ eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
- println!(
+ eprintln!(
", enable flat sampling, or reduce sample count to {}.",
recommended_sample_size
);
} else {
- println!(" or enable flat sampling.");
+ eprintln!(" or enable flat sampling.");
}
}
- (1..(n + 1) as u64).map(|a| a * d).collect::<Vec<u64>>()
+ (1..(n + 1)).map(|a| a * d).collect::<Vec<u64>>()
}
ActualSamplingMode::Flat => {
let n = sample_count;
let met = warmup_mean_execution_time;
- let m_ns = target_time.to_nanos() as f64;
+ let m_ns = target_time.as_nanos() as f64;
let time_per_sample = m_ns / (n as f64);
// This is pretty simplistic; we could do something smarter to fit into the allotted time.
let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
@@ -1499,13 +1441,13 @@ impl ActualSamplingMode {
let recommended_sample_size =
ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
- print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
+ eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
- println!(", or reduce sample count to {}.", recommended_sample_size);
+ eprintln!(", or reduce sample count to {}.", recommended_sample_size);
} else {
- println!(".");
+ eprintln!(".");
}
}
@@ -1571,53 +1513,3 @@ pub fn runner(benches: &[&dyn Fn()]) {
}
Criterion::default().configure_from_args().final_summary();
}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(not(feature = "html_reports"))]
-#[doc(hidden)]
-pub fn __warn_about_html_reports_feature() {
- if CARGO_CRITERION_CONNECTION.is_none() {
- println!(
- "WARNING: HTML report generation will become a non-default optional feature in Criterion.rs 0.4.0."
- );
- println!(
- "This feature is being moved to cargo-criterion \
- (https://github.com/bheisler/cargo-criterion) and will be optional in a future \
- version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
- enable the 'html_reports' feature in your Cargo.toml."
- );
- println!();
- }
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(feature = "html_reports")]
-#[doc(hidden)]
-pub fn __warn_about_html_reports_feature() {
- // They have the feature enabled, so they're ready for the update.
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(not(feature = "cargo_bench_support"))]
-#[doc(hidden)]
-pub fn __warn_about_cargo_bench_support_feature() {
- if CARGO_CRITERION_CONNECTION.is_none() {
- println!(
- "WARNING: In Criterion.rs 0.4.0, running criterion benchmarks outside of cargo-criterion will become a default optional feature."
- );
- println!(
- "The statistical analysis and reporting is being moved to cargo-criterion \
- (https://github.com/bheisler/cargo-criterion) and will be optional in a future \
- version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
- enable the 'cargo_bench_support' feature in your Cargo.toml."
- );
- println!();
- }
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(feature = "cargo_bench_support")]
-#[doc(hidden)]
-pub fn __warn_about_cargo_bench_support_feature() {
- // They have the feature enabled, so they're ready for the update.
-}
diff --git a/vendor/criterion/src/macros.rs b/vendor/criterion/src/macros.rs
index 85d8c5956..df7a44d9a 100755..100644
--- a/vendor/criterion/src/macros.rs
+++ b/vendor/criterion/src/macros.rs
@@ -120,9 +120,6 @@ macro_rules! criterion_group {
macro_rules! criterion_main {
( $( $group:path ),+ $(,)* ) => {
fn main() {
- $crate::__warn_about_html_reports_feature();
- $crate::__warn_about_cargo_bench_support_feature();
-
$(
$group();
)+
diff --git a/vendor/criterion/src/macros_private.rs b/vendor/criterion/src/macros_private.rs
index ebad20465..26203d1da 100755..100644
--- a/vendor/criterion/src/macros_private.rs
+++ b/vendor/criterion/src/macros_private.rs
@@ -41,7 +41,7 @@ macro_rules! error {
macro_rules! info {
($($arg:tt)*) => (
if $crate::debug_enabled() {
- println!("Criterion.rs DEBUG: {}", &format!($($arg)*));
+ println!("Criterion.rs DEBUG: {}", &format!($($arg)*))
}
)
}
diff --git a/vendor/criterion/src/measurement.rs b/vendor/criterion/src/measurement.rs
index 3e1e24c4b..63719753d 100755..100644
--- a/vendor/criterion/src/measurement.rs
+++ b/vendor/criterion/src/measurement.rs
@@ -4,7 +4,6 @@
//! measurement.
use crate::format::short;
-use crate::DurationExt;
use crate::Throughput;
use std::time::{Duration, Instant};
@@ -125,6 +124,31 @@ impl DurationFormatter {
unit
}
+ fn bytes_per_second_decimal(
+ &self,
+ bytes: f64,
+ typical: f64,
+ values: &mut [f64],
+ ) -> &'static str {
+ let bytes_per_second = bytes * (1e9 / typical);
+ let (denominator, unit) = if bytes_per_second < 1000.0 {
+ (1.0, " B/s")
+ } else if bytes_per_second < 1000.0 * 1000.0 {
+ (1000.0, "KB/s")
+ } else if bytes_per_second < 1000.0 * 1000.0 * 1000.0 {
+ (1000.0 * 1000.0, "MB/s")
+ } else {
+ (1000.0 * 1000.0 * 1000.0, "GB/s")
+ };
+
+ for val in values {
+ let bytes_per_second = bytes * (1e9 / *val);
+ *val = bytes_per_second / denominator;
+ }
+
+ unit
+ }
+
fn elements_per_second(&self, elems: f64, typical: f64, values: &mut [f64]) -> &'static str {
let elems_per_second = elems * (1e9 / typical);
let (denominator, unit) = if elems_per_second < 1000.0 {
@@ -154,6 +178,9 @@ impl ValueFormatter for DurationFormatter {
) -> &'static str {
match *throughput {
Throughput::Bytes(bytes) => self.bytes_per_second(bytes as f64, typical, values),
+ Throughput::BytesDecimal(bytes) => {
+ self.bytes_per_second_decimal(bytes as f64, typical, values)
+ }
Throughput::Elements(elems) => self.elements_per_second(elems as f64, typical, values),
}
}
@@ -204,7 +231,7 @@ impl Measurement for WallTime {
Duration::from_secs(0)
}
fn to_f64(&self, val: &Self::Value) -> f64 {
- val.to_nanos() as f64
+ val.as_nanos() as f64
}
fn formatter(&self) -> &dyn ValueFormatter {
&DurationFormatter
diff --git a/vendor/criterion/src/plot/gnuplot_backend/distributions.rs b/vendor/criterion/src/plot/gnuplot_backend/distributions.rs
index 1ccbc1a25..1ccbc1a25 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/distributions.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/distributions.rs
diff --git a/vendor/criterion/src/plot/gnuplot_backend/iteration_times.rs b/vendor/criterion/src/plot/gnuplot_backend/iteration_times.rs
index 4db4de8d5..4db4de8d5 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/iteration_times.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/iteration_times.rs
diff --git a/vendor/criterion/src/plot/gnuplot_backend/mod.rs b/vendor/criterion/src/plot/gnuplot_backend/mod.rs
index 987e324c4..27cc48be3 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/mod.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/mod.rs
@@ -248,7 +248,7 @@ impl Plotter for Gnuplot {
info!(
"Waiting for {} gnuplot processes took {}",
child_count,
- format::time(crate::DurationExt::to_nanos(elapsed) as f64)
+ format::time(elapsed.as_nanos() as f64)
);
}
}
diff --git a/vendor/criterion/src/plot/gnuplot_backend/pdf.rs b/vendor/criterion/src/plot/gnuplot_backend/pdf.rs
index a0b85c7aa..a0b85c7aa 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/pdf.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/pdf.rs
diff --git a/vendor/criterion/src/plot/gnuplot_backend/regression.rs b/vendor/criterion/src/plot/gnuplot_backend/regression.rs
index 82de357c4..82de357c4 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/regression.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/regression.rs
diff --git a/vendor/criterion/src/plot/gnuplot_backend/summary.rs b/vendor/criterion/src/plot/gnuplot_backend/summary.rs
index d57a17493..e5d2ab6be 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/summary.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/summary.rs
@@ -67,7 +67,7 @@ pub fn line_comparison(
let max = all_curves
.iter()
- .map(|&&(_, ref data)| Sample::new(data).mean())
+ .map(|&(_, data)| Sample::new(data).mean())
.fold(::std::f64::NAN, f64::max);
let mut dummy = [1.0];
@@ -130,11 +130,11 @@ pub fn violin(
) -> Child {
let path = PathBuf::from(&path);
let all_curves_vec = all_curves.iter().rev().cloned().collect::<Vec<_>>();
- let all_curves: &[&(&BenchmarkId, Vec<f64>)] = &*all_curves_vec;
+ let all_curves: &[&(&BenchmarkId, Vec<f64>)] = &all_curves_vec;
let kdes = all_curves
.iter()
- .map(|&&(_, ref sample)| {
+ .map(|&(_, sample)| {
let (x, mut y) = kde::sweep(Sample::new(sample), KDE_POINTS, None);
let y_max = Sample::new(&y).max();
for y in y.iter_mut() {
@@ -144,10 +144,7 @@ pub fn violin(
(x, y)
})
.collect::<Vec<_>>();
- let mut xs = kdes
- .iter()
- .flat_map(|&(ref x, _)| x.iter())
- .filter(|&&x| x > 0.);
+ let mut xs = kdes.iter().flat_map(|(x, _)| x.iter()).filter(|&&x| x > 0.);
let (mut min, mut max) = {
let &first = xs.next().unwrap();
(first, first)
@@ -174,7 +171,7 @@ pub fn violin(
.configure(Axis::BottomX, |a| {
a.configure(Grid::Major, |g| g.show())
.configure(Grid::Minor, |g| g.hide())
- .set(Range::Limits(0., max as f64 * one[0]))
+ .set(Range::Limits(0., max * one[0]))
.set(Label(format!("Average time ({})", unit)))
.set(axis_scale.to_gnuplot())
})
@@ -190,7 +187,7 @@ pub fn violin(
});
let mut is_first = true;
- for (i, &(ref x, ref y)) in kdes.iter().enumerate() {
+ for (i, (x, y)) in kdes.iter().enumerate() {
let i = i as f64 + 0.5;
let y1: Vec<_> = y.iter().map(|&y| i + y * 0.45).collect();
let y2: Vec<_> = y.iter().map(|&y| i - y * 0.45).collect();
diff --git a/vendor/criterion/src/plot/gnuplot_backend/t_test.rs b/vendor/criterion/src/plot/gnuplot_backend/t_test.rs
index 47b4a110e..47b4a110e 100755..100644
--- a/vendor/criterion/src/plot/gnuplot_backend/t_test.rs
+++ b/vendor/criterion/src/plot/gnuplot_backend/t_test.rs
diff --git a/vendor/criterion/src/plot/mod.rs b/vendor/criterion/src/plot/mod.rs
index cb836a394..4bce39468 100755..100644
--- a/vendor/criterion/src/plot/mod.rs
+++ b/vendor/criterion/src/plot/mod.rs
@@ -1,7 +1,9 @@
mod gnuplot_backend;
+#[cfg(feature = "plotters")]
mod plotters_backend;
pub(crate) use gnuplot_backend::Gnuplot;
+#[cfg(feature = "plotters")]
pub(crate) use plotters_backend::PlottersBackend;
use crate::estimate::Statistic;
diff --git a/vendor/criterion/src/plot/plotters_backend/distributions.rs b/vendor/criterion/src/plot/plotters_backend/distributions.rs
index 82f9eae59..8de114058 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/distributions.rs
+++ b/vendor/criterion/src/plot/plotters_backend/distributions.rs
@@ -85,11 +85,11 @@ fn abs_distribution(
chart
.draw_series(LineSeries::new(
kde_xs.iter().zip(ys.iter()).map(|(&x, &y)| (x, y)),
- &DARK_BLUE,
+ DARK_BLUE,
))
.unwrap()
.label("Bootstrap distribution")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart
.draw_series(AreaSeries::new(
@@ -115,7 +115,7 @@ fn abs_distribution(
)))
.unwrap()
.label("Point estimate")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart
.configure_series_labels()
@@ -240,11 +240,11 @@ fn rel_distribution(
chart
.draw_series(LineSeries::new(
xs.iter().zip(ys.iter()).map(|(x, y)| (*x, *y)),
- &DARK_BLUE,
+ DARK_BLUE,
))
.unwrap()
.label("Bootstrap distribution")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart
.draw_series(AreaSeries::new(
@@ -269,7 +269,7 @@ fn rel_distribution(
)))
.unwrap()
.label("Point estimate")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart
.draw_series(std::iter::once(Rectangle::new(
diff --git a/vendor/criterion/src/plot/plotters_backend/iteration_times.rs b/vendor/criterion/src/plot/plotters_backend/iteration_times.rs
index 4d0a22a96..3ac4f1cc7 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/iteration_times.rs
+++ b/vendor/criterion/src/plot/plotters_backend/iteration_times.rs
@@ -37,7 +37,7 @@ pub(crate) fn iteration_times_figure(
.configure_mesh()
.y_desc(format!("Average Iteration Time ({})", unit))
.x_label_formatter(&|x| pretty_print_float(*x, true))
- .light_line_style(&TRANSPARENT)
+ .light_line_style(TRANSPARENT)
.draw()
.unwrap();
@@ -104,7 +104,7 @@ pub(crate) fn iteration_times_comparison_figure(
.configure_mesh()
.y_desc(format!("Average Iteration Time ({})", unit))
.x_label_formatter(&|x| pretty_print_float(*x, true))
- .light_line_style(&TRANSPARENT)
+ .light_line_style(TRANSPARENT)
.draw()
.unwrap();
diff --git a/vendor/criterion/src/plot/plotters_backend/mod.rs b/vendor/criterion/src/plot/plotters_backend/mod.rs
index 4cd1b183d..4cd1b183d 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/mod.rs
+++ b/vendor/criterion/src/plot/plotters_backend/mod.rs
diff --git a/vendor/criterion/src/plot/plotters_backend/pdf.rs b/vendor/criterion/src/plot/plotters_backend/pdf.rs
index 333893fc2..e55de4e6e 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/pdf.rs
+++ b/vendor/criterion/src/plot/plotters_backend/pdf.rs
@@ -38,7 +38,7 @@ pub(crate) fn pdf_comparison_figure(
let y_range = data::fitting_range(base_ys.iter().chain(ys.iter()));
let size = size.unwrap_or(SIZE);
- let root_area = SVGBackend::new(&path, (size.0 as u32, size.1 as u32)).into_drawing_area();
+ let root_area = SVGBackend::new(&path, (size.0, size.1)).into_drawing_area();
let mut cb = ChartBuilder::on(&root_area);
@@ -93,7 +93,7 @@ pub(crate) fn pdf_comparison_figure(
)))
.unwrap()
.label("Base Mean")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_RED));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_RED));
chart
.draw_series(std::iter::once(PathElement::new(
@@ -102,7 +102,7 @@ pub(crate) fn pdf_comparison_figure(
)))
.unwrap()
.label("New Mean")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
if title.is_some() {
chart.configure_series_labels().draw().unwrap();
@@ -132,7 +132,7 @@ pub(crate) fn pdf_small(
let path = context.report_path(id, "pdf_small.svg");
let size = size.unwrap_or(SIZE);
- let root_area = SVGBackend::new(&path, (size.0 as u32, size.1 as u32)).into_drawing_area();
+ let root_area = SVGBackend::new(&path, (size.0, size.1)).into_drawing_area();
let mut chart = ChartBuilder::on(&root_area)
.margin((5).percent())
@@ -208,7 +208,7 @@ pub(crate) fn pdf(
let xs_ = Sample::new(&xs);
let size = size.unwrap_or(SIZE);
- let root_area = SVGBackend::new(&path, (size.0 as u32, size.1 as u32)).into_drawing_area();
+ let root_area = SVGBackend::new(&path, (size.0, size.1)).into_drawing_area();
let range = data::fitting_range(ys.iter());
@@ -255,18 +255,18 @@ pub(crate) fn pdf(
chart
.draw_series(std::iter::once(PathElement::new(
vec![(mean, 0.0), (mean, max_iters)],
- &DARK_BLUE,
+ DARK_BLUE,
)))
.unwrap()
.label("Mean")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart
.draw_series(vec![
- PathElement::new(vec![(lomt, 0.0), (lomt, max_iters)], &DARK_ORANGE),
- PathElement::new(vec![(himt, 0.0), (himt, max_iters)], &DARK_ORANGE),
- PathElement::new(vec![(lost, 0.0), (lost, max_iters)], &DARK_RED),
- PathElement::new(vec![(hist, 0.0), (hist, max_iters)], &DARK_RED),
+ PathElement::new(vec![(lomt, 0.0), (lomt, max_iters)], DARK_ORANGE),
+ PathElement::new(vec![(himt, 0.0), (himt, max_iters)], DARK_ORANGE),
+ PathElement::new(vec![(lost, 0.0), (lost, max_iters)], DARK_RED),
+ PathElement::new(vec![(hist, 0.0), (hist, max_iters)], DARK_RED),
])
.unwrap();
use crate::stats::univariate::outliers::tukey::Label;
diff --git a/vendor/criterion/src/plot/plotters_backend/regression.rs b/vendor/criterion/src/plot/plotters_backend/regression.rs
index c944dbbe6..1a9adece0 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/regression.rs
+++ b/vendor/criterion/src/plot/plotters_backend/regression.rs
@@ -61,7 +61,7 @@ pub(crate) fn regression_figure(
.x_desc(x_label)
.y_desc(format!("Total sample time ({})", unit))
.x_label_formatter(&|x| pretty_print_float(x * x_scale, true))
- .light_line_style(&TRANSPARENT)
+ .light_line_style(TRANSPARENT)
.draw()
.unwrap();
@@ -79,7 +79,7 @@ pub(crate) fn regression_figure(
chart
.draw_series(std::iter::once(PathElement::new(
vec![(0.0, 0.0), (max_iters, point)],
- &DARK_BLUE,
+ DARK_BLUE,
)))
.unwrap()
.label("Linear regression")
@@ -187,13 +187,13 @@ pub(crate) fn regression_comparison_figure(
.x_desc(x_label)
.y_desc(format!("Total sample time ({})", unit))
.x_label_formatter(&|x| pretty_print_float(x * x_scale, true))
- .light_line_style(&TRANSPARENT)
+ .light_line_style(TRANSPARENT)
.draw()
.unwrap();
chart
.draw_series(vec![
- PathElement::new(vec![(0.0, 0.0), (max_iters, base_point)], &DARK_RED).into_dyn(),
+ PathElement::new(vec![(0.0, 0.0), (max_iters, base_point)], DARK_RED).into_dyn(),
Polygon::new(
vec![(0.0, 0.0), (max_iters, base_lb), (max_iters, base_ub)],
DARK_RED.mix(0.25).filled(),
@@ -208,7 +208,7 @@ pub(crate) fn regression_comparison_figure(
chart
.draw_series(vec![
- PathElement::new(vec![(0.0, 0.0), (max_iters, point)], &DARK_BLUE).into_dyn(),
+ PathElement::new(vec![(0.0, 0.0), (max_iters, point)], DARK_BLUE).into_dyn(),
Polygon::new(
vec![(0.0, 0.0), (max_iters, lb), (max_iters, ub)],
DARK_BLUE.mix(0.25).filled(),
diff --git a/vendor/criterion/src/plot/plotters_backend/summary.rs b/vendor/criterion/src/plot/plotters_backend/summary.rs
index a5a410d6e..0ebb851e2 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/summary.rs
+++ b/vendor/criterion/src/plot/plotters_backend/summary.rs
@@ -120,7 +120,7 @@ fn line_comparison_series_data<'a>(
) -> (&'static str, Vec<(Option<&'a String>, Vec<f64>, Vec<f64>)>) {
let max = all_curves
.iter()
- .map(|&&(_, ref data)| Sample::new(data).mean())
+ .map(|&(_, data)| Sample::new(data).mean())
.fold(::std::f64::NAN, f64::max);
let mut dummy = [1.0];
@@ -159,7 +159,7 @@ pub fn violin(
axis_scale: AxisScale,
) {
let all_curves_vec = all_curves.iter().rev().cloned().collect::<Vec<_>>();
- let all_curves: &[&(&BenchmarkId, Vec<f64>)] = &*all_curves_vec;
+ let all_curves: &[&(&BenchmarkId, Vec<f64>)] = &all_curves_vec;
let mut kdes = all_curves
.iter()
@@ -176,7 +176,7 @@ pub fn violin(
let mut xs = kdes
.iter()
- .flat_map(|&(_, ref x, _)| x.iter())
+ .flat_map(|(_, x, _)| x.iter())
.filter(|&&x| x > 0.);
let (mut min, mut max) = {
let &first = xs.next().unwrap();
@@ -250,7 +250,7 @@ fn draw_violin_figure<XR: AsRangedCoord<Value = f64>, YR: AsRangedCoord<Value =
.draw_series(AreaSeries::new(
x.iter().zip(y.iter()).map(|(x, y)| (*x, base + *y / 2.0)),
base,
- &DARK_BLUE,
+ DARK_BLUE,
))
.unwrap();
@@ -258,7 +258,7 @@ fn draw_violin_figure<XR: AsRangedCoord<Value = f64>, YR: AsRangedCoord<Value =
.draw_series(AreaSeries::new(
x.iter().zip(y.iter()).map(|(x, y)| (*x, base - *y / 2.0)),
base,
- &DARK_BLUE,
+ DARK_BLUE,
))
.unwrap();
}
diff --git a/vendor/criterion/src/plot/plotters_backend/t_test.rs b/vendor/criterion/src/plot/plotters_backend/t_test.rs
index d9c15081f..c575c2ff6 100755..100644
--- a/vendor/criterion/src/plot/plotters_backend/t_test.rs
+++ b/vendor/criterion/src/plot/plotters_backend/t_test.rs
@@ -38,7 +38,7 @@ pub(crate) fn t_test(
.draw_series(AreaSeries::new(
xs.iter().zip(ys.iter()).map(|(x, y)| (*x, *y)),
0.0,
- &DARK_BLUE.mix(0.25),
+ DARK_BLUE.mix(0.25),
))
.unwrap()
.label("t distribution")
@@ -53,7 +53,7 @@ pub(crate) fn t_test(
)))
.unwrap()
.label("t statistic")
- .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &DARK_BLUE));
+ .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], DARK_BLUE));
chart.configure_series_labels().draw().unwrap();
}
diff --git a/vendor/criterion/src/profiler.rs b/vendor/criterion/src/profiler.rs
index 906af5f03..906af5f03 100755..100644
--- a/vendor/criterion/src/profiler.rs
+++ b/vendor/criterion/src/profiler.rs
diff --git a/vendor/criterion/src/report.rs b/vendor/criterion/src/report.rs
index 9032a38e7..c5448fdbb 100755..100644
--- a/vendor/criterion/src/report.rs
+++ b/vendor/criterion/src/report.rs
@@ -1,5 +1,7 @@
+#[cfg(feature = "csv_output")]
+use crate::csv_report::FileCsvReport;
+use crate::stats::bivariate::regression::Slope;
use crate::stats::univariate::outliers::tukey::LabeledSample;
-use crate::{csv_report::FileCsvReport, stats::bivariate::regression::Slope};
use crate::{html::Html, stats::bivariate::Data};
use crate::estimate::{ChangeDistributions, ChangeEstimates, Distributions, Estimate, Estimates};
@@ -8,11 +10,11 @@ use crate::measurement::ValueFormatter;
use crate::stats::univariate::Sample;
use crate::stats::Distribution;
use crate::{PlotConfiguration, Throughput};
-use std::cell::Cell;
+use anes::{Attribute, ClearLine, Color, ResetAttributes, SetAttribute, SetForegroundColor};
use std::cmp;
use std::collections::HashSet;
use std::fmt;
-use std::io::stdout;
+use std::io::stderr;
use std::io::Write;
use std::path::{Path, PathBuf};
@@ -46,6 +48,7 @@ impl<'a> MeasurementData<'a> {
self.data.x()
}
+ #[cfg(feature = "csv_output")]
pub fn sample_times(&self) -> &Sample<f64> {
self.data.y()
}
@@ -113,9 +116,9 @@ impl BenchmarkId {
throughput: Option<Throughput>,
) -> BenchmarkId {
let full_id = match (&function_id, &value_str) {
- (&Some(ref func), &Some(ref val)) => format!("{}/{}/{}", group_id, func, val),
- (&Some(ref func), &None) => format!("{}/{}", group_id, func),
- (&None, &Some(ref val)) => format!("{}/{}", group_id, val),
+ (Some(func), Some(val)) => format!("{}/{}/{}", group_id, func, val),
+ (Some(func), &None) => format!("{}/{}", group_id, func),
+ (&None, Some(val)) => format!("{}/{}", group_id, val),
(&None, &None) => group_id.clone(),
};
@@ -126,18 +129,18 @@ impl BenchmarkId {
}
let directory_name = match (&function_id, &value_str) {
- (&Some(ref func), &Some(ref val)) => format!(
+ (Some(func), Some(val)) => format!(
"{}/{}/{}",
make_filename_safe(&group_id),
make_filename_safe(func),
make_filename_safe(val)
),
- (&Some(ref func), &None) => format!(
+ (Some(func), &None) => format!(
"{}/{}",
make_filename_safe(&group_id),
make_filename_safe(func)
),
- (&None, &Some(ref val)) => format!(
+ (&None, Some(val)) => format!(
"{}/{}",
make_filename_safe(&group_id),
make_filename_safe(val)
@@ -170,7 +173,9 @@ impl BenchmarkId {
pub fn as_number(&self) -> Option<f64> {
match self.throughput {
- Some(Throughput::Bytes(n)) | Some(Throughput::Elements(n)) => Some(n as f64),
+ Some(Throughput::Bytes(n))
+ | Some(Throughput::Elements(n))
+ | Some(Throughput::BytesDecimal(n)) => Some(n as f64),
None => self
.value_str
.as_ref()
@@ -181,6 +186,7 @@ impl BenchmarkId {
pub fn value_type(&self) -> Option<ValueType> {
match self.throughput {
Some(Throughput::Bytes(_)) => Some(ValueType::Bytes),
+ Some(Throughput::BytesDecimal(_)) => Some(ValueType::Bytes),
Some(Throughput::Elements(_)) => Some(ValueType::Elements),
None => self
.value_str
@@ -304,9 +310,7 @@ pub(crate) struct Reports {
pub(crate) bencher_enabled: bool,
pub(crate) bencher: BencherReport,
pub(crate) csv_enabled: bool,
- pub(crate) csv: FileCsvReport,
- pub(crate) html_enabled: bool,
- pub(crate) html: Html,
+ pub(crate) html: Option<Html>,
}
macro_rules! reports_impl {
(fn $name:ident(&self, $($argn:ident: $argt:ty),*)) => {
@@ -317,11 +321,12 @@ macro_rules! reports_impl {
if self.bencher_enabled {
self.bencher.$name($($argn),*);
}
+ #[cfg(feature = "csv_output")]
if self.csv_enabled {
- self.csv.$name($($argn),*);
+ FileCsvReport.$name($($argn),*);
}
- if self.html_enabled {
- self.html.$name($($argn),*);
+ if let Some(reporter) = &self.html {
+ reporter.$name($($argn),*);
}
}
};
@@ -363,35 +368,34 @@ impl Report for Reports {
reports_impl!(fn group_separator(&self, ));
}
+#[derive(Debug, Clone, Copy, Eq, PartialEq)]
+pub(crate) enum CliVerbosity {
+ Quiet,
+ Normal,
+ Verbose,
+}
+
pub(crate) struct CliReport {
pub enable_text_overwrite: bool,
pub enable_text_coloring: bool,
- pub verbose: bool,
-
- last_line_len: Cell<usize>,
+ pub verbosity: CliVerbosity,
}
impl CliReport {
pub fn new(
enable_text_overwrite: bool,
enable_text_coloring: bool,
- verbose: bool,
+ verbosity: CliVerbosity,
) -> CliReport {
CliReport {
enable_text_overwrite,
enable_text_coloring,
- verbose,
-
- last_line_len: Cell::new(0),
+ verbosity,
}
}
fn text_overwrite(&self) {
if self.enable_text_overwrite {
- print!("\r");
- for _ in 0..self.last_line_len.get() {
- print!(" ");
- }
- print!("\r");
+ eprint!("\r{}", ClearLine::All)
}
}
@@ -399,41 +403,36 @@ impl CliReport {
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))]
fn print_overwritable(&self, s: String) {
if self.enable_text_overwrite {
- self.last_line_len.set(s.len());
- print!("{}", s);
- stdout().flush().unwrap();
+ eprint!("{}", s);
+ stderr().flush().unwrap();
} else {
- println!("{}", s);
+ eprintln!("{}", s);
}
}
- fn green(&self, s: String) -> String {
+ fn with_color(&self, color: Color, s: &str) -> String {
if self.enable_text_coloring {
- format!("\x1B[32m{}\x1B[39m", s)
+ format!("{}{}{}", SetForegroundColor(color), s, ResetAttributes)
} else {
- s
+ String::from(s)
}
}
- fn yellow(&self, s: String) -> String {
- if self.enable_text_coloring {
- format!("\x1B[33m{}\x1B[39m", s)
- } else {
- s
- }
+ fn green(&self, s: &str) -> String {
+ self.with_color(Color::DarkGreen, s)
}
- fn red(&self, s: String) -> String {
- if self.enable_text_coloring {
- format!("\x1B[31m{}\x1B[39m", s)
- } else {
- s
- }
+ fn yellow(&self, s: &str) -> String {
+ self.with_color(Color::DarkYellow, s)
+ }
+
+ fn red(&self, s: &str) -> String {
+ self.with_color(Color::DarkRed, s)
}
fn bold(&self, s: String) -> String {
if self.enable_text_coloring {
- format!("\x1B[1m{}\x1B[22m", s)
+ format!("{}{}{}", SetAttribute(Attribute::Bold), s, ResetAttributes)
} else {
s
}
@@ -441,7 +440,7 @@ impl CliReport {
fn faint(&self, s: String) -> String {
if self.enable_text_coloring {
- format!("\x1B[2m{}\x1B[22m", s)
+ format!("{}{}{}", SetAttribute(Attribute::Faint), s, ResetAttributes)
} else {
s
}
@@ -460,7 +459,7 @@ impl CliReport {
println!(
"{}",
- self.yellow(format!(
+ self.yellow(&format!(
"Found {} outliers among {} measurements ({:.2}%)",
noutliers,
sample_size,
@@ -529,7 +528,7 @@ impl Report for CliReport {
iter_count: u64,
) {
self.text_overwrite();
- let iter_string = if self.verbose {
+ let iter_string = if matches!(self.verbosity, CliVerbosity::Verbose) {
format!("{} iterations", iter_count)
} else {
format::iter_count(iter_count)
@@ -559,14 +558,14 @@ impl Report for CliReport {
let mut id = id.as_title().to_owned();
if id.len() > 23 {
- println!("{}", self.green(id.clone()));
+ println!("{}", self.green(&id));
id.clear();
}
let id_len = id.len();
println!(
"{}{}time: [{} {} {}]",
- self.green(id),
+ self.green(&id),
" ".repeat(24 - id_len),
self.faint(
formatter.format_value(typical_estimate.confidence_interval.lower_bound)
@@ -594,98 +593,103 @@ impl Report for CliReport {
)
}
- if let Some(ref comp) = meas.comparison {
- let different_mean = comp.p_value < comp.significance_threshold;
- let mean_est = &comp.relative_estimates.mean;
- let point_estimate = mean_est.point_estimate;
- let mut point_estimate_str = format::change(point_estimate, true);
- // The change in throughput is related to the change in timing. Reducing the timing by
- // 50% increases the throughput by 100%.
- let to_thrpt_estimate = |ratio: f64| 1.0 / (1.0 + ratio) - 1.0;
- let mut thrpt_point_estimate_str =
- format::change(to_thrpt_estimate(point_estimate), true);
- let explanation_str: String;
-
- if !different_mean {
- explanation_str = "No change in performance detected.".to_owned();
- } else {
- let comparison = compare_to_threshold(mean_est, comp.noise_threshold);
- match comparison {
- ComparisonResult::Improved => {
- point_estimate_str = self.green(self.bold(point_estimate_str));
- thrpt_point_estimate_str = self.green(self.bold(thrpt_point_estimate_str));
- explanation_str =
- format!("Performance has {}.", self.green("improved".to_owned()));
- }
- ComparisonResult::Regressed => {
- point_estimate_str = self.red(self.bold(point_estimate_str));
- thrpt_point_estimate_str = self.red(self.bold(thrpt_point_estimate_str));
- explanation_str =
- format!("Performance has {}.", self.red("regressed".to_owned()));
- }
- ComparisonResult::NonSignificant => {
- explanation_str = "Change within noise threshold.".to_owned();
+ if !matches!(self.verbosity, CliVerbosity::Quiet) {
+ if let Some(ref comp) = meas.comparison {
+ let different_mean = comp.p_value < comp.significance_threshold;
+ let mean_est = &comp.relative_estimates.mean;
+ let point_estimate = mean_est.point_estimate;
+ let mut point_estimate_str = format::change(point_estimate, true);
+ // The change in throughput is related to the change in timing. Reducing the timing by
+ // 50% increases the throughput by 100%.
+ let to_thrpt_estimate = |ratio: f64| 1.0 / (1.0 + ratio) - 1.0;
+ let mut thrpt_point_estimate_str =
+ format::change(to_thrpt_estimate(point_estimate), true);
+ let explanation_str: String;
+
+ if !different_mean {
+ explanation_str = "No change in performance detected.".to_owned();
+ } else {
+ let comparison = compare_to_threshold(mean_est, comp.noise_threshold);
+ match comparison {
+ ComparisonResult::Improved => {
+ point_estimate_str = self.green(&self.bold(point_estimate_str));
+ thrpt_point_estimate_str =
+ self.green(&self.bold(thrpt_point_estimate_str));
+ explanation_str =
+ format!("Performance has {}.", self.green("improved"));
+ }
+ ComparisonResult::Regressed => {
+ point_estimate_str = self.red(&self.bold(point_estimate_str));
+ thrpt_point_estimate_str =
+ self.red(&self.bold(thrpt_point_estimate_str));
+ explanation_str = format!("Performance has {}.", self.red("regressed"));
+ }
+ ComparisonResult::NonSignificant => {
+ explanation_str = "Change within noise threshold.".to_owned();
+ }
}
}
- }
- if meas.throughput.is_some() {
- println!("{}change:", " ".repeat(17));
+ if meas.throughput.is_some() {
+ println!("{}change:", " ".repeat(17));
+
+ println!(
+ "{}time: [{} {} {}] (p = {:.2} {} {:.2})",
+ " ".repeat(24),
+ self.faint(format::change(
+ mean_est.confidence_interval.lower_bound,
+ true
+ )),
+ point_estimate_str,
+ self.faint(format::change(
+ mean_est.confidence_interval.upper_bound,
+ true
+ )),
+ comp.p_value,
+ if different_mean { "<" } else { ">" },
+ comp.significance_threshold
+ );
+ println!(
+ "{}thrpt: [{} {} {}]",
+ " ".repeat(24),
+ self.faint(format::change(
+ to_thrpt_estimate(mean_est.confidence_interval.upper_bound),
+ true
+ )),
+ thrpt_point_estimate_str,
+ self.faint(format::change(
+ to_thrpt_estimate(mean_est.confidence_interval.lower_bound),
+ true
+ )),
+ );
+ } else {
+ println!(
+ "{}change: [{} {} {}] (p = {:.2} {} {:.2})",
+ " ".repeat(24),
+ self.faint(format::change(
+ mean_est.confidence_interval.lower_bound,
+ true
+ )),
+ point_estimate_str,
+ self.faint(format::change(
+ mean_est.confidence_interval.upper_bound,
+ true
+ )),
+ comp.p_value,
+ if different_mean { "<" } else { ">" },
+ comp.significance_threshold
+ );
+ }
- println!(
- "{}time: [{} {} {}] (p = {:.2} {} {:.2})",
- " ".repeat(24),
- self.faint(format::change(
- mean_est.confidence_interval.lower_bound,
- true
- )),
- point_estimate_str,
- self.faint(format::change(
- mean_est.confidence_interval.upper_bound,
- true
- )),
- comp.p_value,
- if different_mean { "<" } else { ">" },
- comp.significance_threshold
- );
- println!(
- "{}thrpt: [{} {} {}]",
- " ".repeat(24),
- self.faint(format::change(
- to_thrpt_estimate(mean_est.confidence_interval.upper_bound),
- true
- )),
- thrpt_point_estimate_str,
- self.faint(format::change(
- to_thrpt_estimate(mean_est.confidence_interval.lower_bound),
- true
- )),
- );
- } else {
- println!(
- "{}change: [{} {} {}] (p = {:.2} {} {:.2})",
- " ".repeat(24),
- self.faint(format::change(
- mean_est.confidence_interval.lower_bound,
- true
- )),
- point_estimate_str,
- self.faint(format::change(
- mean_est.confidence_interval.upper_bound,
- true
- )),
- comp.p_value,
- if different_mean { "<" } else { ">" },
- comp.significance_threshold
- );
+ println!("{}{}", " ".repeat(24), explanation_str);
}
-
- println!("{}{}", " ".repeat(24), explanation_str);
}
- self.outliers(&meas.avg_times);
+ if !matches!(self.verbosity, CliVerbosity::Quiet) {
+ self.outliers(&meas.avg_times);
+ }
- if self.verbose {
+ if matches!(self.verbosity, CliVerbosity::Verbose) {
let format_short_estimate = |estimate: &Estimate| -> String {
format!(
"[{} {}]",
diff --git a/vendor/criterion/src/routine.rs b/vendor/criterion/src/routine.rs
index 5831415ac..88e4318bb 100755..100644
--- a/vendor/criterion/src/routine.rs
+++ b/vendor/criterion/src/routine.rs
@@ -2,7 +2,7 @@ use crate::benchmark::BenchmarkConfig;
use crate::connection::OutgoingMessage;
use crate::measurement::Measurement;
use crate::report::{BenchmarkId, Report, ReportContext};
-use crate::{ActualSamplingMode, Bencher, Criterion, DurationExt};
+use crate::{black_box, ActualSamplingMode, Bencher, Criterion};
use std::marker::PhantomData;
use std::time::Duration;
@@ -34,7 +34,7 @@ pub(crate) trait Routine<M: Measurement, T: ?Sized> {
) {
criterion
.report
- .profile(id, report_context, time.to_nanos() as f64);
+ .profile(id, report_context, time.as_nanos() as f64);
let mut profile_path = report_context.output_directory.clone();
if (*crate::CARGO_CRITERION_CONNECTION).is_some() {
@@ -51,7 +51,7 @@ pub(crate) trait Routine<M: Measurement, T: ?Sized> {
.borrow_mut()
.start_profiling(id.id(), &profile_path);
- let time = time.to_nanos();
+ let time = time.as_nanos() as u64;
// TODO: Some profilers will show the two batches of iterations as
// being different code-paths even though they aren't really.
@@ -88,17 +88,58 @@ pub(crate) trait Routine<M: Measurement, T: ?Sized> {
report_context: &ReportContext,
parameter: &T,
) -> (ActualSamplingMode, Box<[f64]>, Box<[f64]>) {
+ if config.quick_mode {
+ let minimum_bench_duration = Duration::from_millis(100);
+ let maximum_bench_duration = config.measurement_time; // default: 5 seconds
+ let target_rel_stdev = config.significance_level; // default: 5%, 0.05
+
+ use std::time::Instant;
+ let time_start = Instant::now();
+
+ let sq = |val| val * val;
+ let mut n = 1;
+ let mut t_prev = *self.bench(measurement, &[n], parameter).first().unwrap();
+
+ // Early exit for extremely long running benchmarks:
+ if time_start.elapsed() > maximum_bench_duration {
+ let iters = vec![n as f64, n as f64].into_boxed_slice();
+ // prevent gnuplot bug when all values are equal
+ let elapsed = vec![t_prev, t_prev + 0.000001].into_boxed_slice();
+ return (ActualSamplingMode::Flat, iters, elapsed);
+ }
+
+ // Main data collection loop.
+ loop {
+ let t_now = *self
+ .bench(measurement, &[n * 2], parameter)
+ .first()
+ .unwrap();
+ let t = (t_prev + 2. * t_now) / 5.;
+ let stdev = (sq(t_prev - t) + sq(t_now - 2. * t)).sqrt();
+ // println!("Sample: {} {:.2}", n, stdev / t);
+ let elapsed = time_start.elapsed();
+ if (stdev < target_rel_stdev * t && elapsed > minimum_bench_duration)
+ || elapsed > maximum_bench_duration
+ {
+ let iters = vec![n as f64, (n * 2) as f64].into_boxed_slice();
+ let elapsed = vec![t_prev, t_now].into_boxed_slice();
+ return (ActualSamplingMode::Linear, iters, elapsed);
+ }
+ n *= 2;
+ t_prev = t_now;
+ }
+ }
let wu = config.warm_up_time;
- let m_ns = config.measurement_time.to_nanos();
+ let m_ns = config.measurement_time.as_nanos();
criterion
.report
- .warmup(id, report_context, wu.to_nanos() as f64);
+ .warmup(id, report_context, wu.as_nanos() as f64);
if let Some(conn) = &criterion.connection {
conn.send(&OutgoingMessage::Warmup {
id: id.into(),
- nanos: wu.to_nanos() as f64,
+ nanos: wu.as_nanos() as f64,
})
.unwrap();
}
@@ -206,7 +247,7 @@ where
.iter()
.map(|iters| {
b.iters = *iters;
- (*f)(&mut b, parameter);
+ (*f)(&mut b, black_box(parameter));
b.assert_iterated();
m.to_f64(&b.value)
})
@@ -226,14 +267,14 @@ where
let mut total_iters = 0;
let mut elapsed_time = Duration::from_millis(0);
loop {
- (*f)(&mut b, parameter);
+ (*f)(&mut b, black_box(parameter));
b.assert_iterated();
total_iters += b.iters;
elapsed_time += b.elapsed_time;
if elapsed_time > how_long {
- return (elapsed_time.to_nanos(), total_iters);
+ return (elapsed_time.as_nanos() as u64, total_iters);
}
b.iters = b.iters.wrapping_mul(2);
diff --git a/vendor/criterion/src/stats/bivariate/bootstrap.rs b/vendor/criterion/src/stats/bivariate/bootstrap.rs
index 9eb7fa7b5..9eb7fa7b5 100755..100644
--- a/vendor/criterion/src/stats/bivariate/bootstrap.rs
+++ b/vendor/criterion/src/stats/bivariate/bootstrap.rs
diff --git a/vendor/criterion/src/stats/bivariate/mod.rs b/vendor/criterion/src/stats/bivariate/mod.rs
index d1e8df703..2351c9ef6 100755..100644
--- a/vendor/criterion/src/stats/bivariate/mod.rs
+++ b/vendor/criterion/src/stats/bivariate/mod.rs
@@ -8,6 +8,7 @@ use crate::stats::bivariate::resamples::Resamples;
use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
use crate::stats::univariate::Sample;
+#[cfg(feature = "rayon")]
use rayon::iter::{IntoParallelIterator, ParallelIterator};
/// Bivariate `(X, Y)` data
@@ -72,27 +73,41 @@ where
T::Distributions: Send,
T::Builder: Send,
{
- (0..nresamples)
- .into_par_iter()
- .map_init(
- || Resamples::new(*self),
- |resamples, _| statistic(resamples.next()),
- )
- .fold(
- || T::Builder::new(0),
- |mut sub_distributions, sample| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples)
+ .into_par_iter()
+ .map_init(
+ || Resamples::new(*self),
+ |resamples, _| statistic(resamples.next()),
+ )
+ .fold(
+ || T::Builder::new(0),
+ |mut sub_distributions, sample| {
+ sub_distributions.push(sample);
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut resamples = Resamples::new(*self);
+ (0..nresamples)
+ .map(|_| statistic(resamples.next()))
+ .fold(T::Builder::new(0), |mut sub_distributions, sample| {
sub_distributions.push(sample);
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
- a.extend(&mut b);
- a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
/// Returns a view into the `X` data
diff --git a/vendor/criterion/src/stats/bivariate/regression.rs b/vendor/criterion/src/stats/bivariate/regression.rs
index f09443f10..f09443f10 100755..100644
--- a/vendor/criterion/src/stats/bivariate/regression.rs
+++ b/vendor/criterion/src/stats/bivariate/regression.rs
diff --git a/vendor/criterion/src/stats/bivariate/resamples.rs b/vendor/criterion/src/stats/bivariate/resamples.rs
index e254dc792..e254dc792 100755..100644
--- a/vendor/criterion/src/stats/bivariate/resamples.rs
+++ b/vendor/criterion/src/stats/bivariate/resamples.rs
diff --git a/vendor/criterion/src/stats/float.rs b/vendor/criterion/src/stats/float.rs
index b7748ddb5..b7748ddb5 100755..100644
--- a/vendor/criterion/src/stats/float.rs
+++ b/vendor/criterion/src/stats/float.rs
diff --git a/vendor/criterion/src/stats/mod.rs b/vendor/criterion/src/stats/mod.rs
index 4f926debd..4f926debd 100755..100644
--- a/vendor/criterion/src/stats/mod.rs
+++ b/vendor/criterion/src/stats/mod.rs
diff --git a/vendor/criterion/src/stats/rand_util.rs b/vendor/criterion/src/stats/rand_util.rs
index ed374cf98..ed374cf98 100755..100644
--- a/vendor/criterion/src/stats/rand_util.rs
+++ b/vendor/criterion/src/stats/rand_util.rs
diff --git a/vendor/criterion/src/stats/test.rs b/vendor/criterion/src/stats/test.rs
index 9e13f3084..9e13f3084 100755..100644
--- a/vendor/criterion/src/stats/test.rs
+++ b/vendor/criterion/src/stats/test.rs
diff --git a/vendor/criterion/src/stats/tuple.rs b/vendor/criterion/src/stats/tuple.rs
index 1c075159e..1c075159e 100755..100644
--- a/vendor/criterion/src/stats/tuple.rs
+++ b/vendor/criterion/src/stats/tuple.rs
diff --git a/vendor/criterion/src/stats/univariate/bootstrap.rs b/vendor/criterion/src/stats/univariate/bootstrap.rs
index 21c914011..21c914011 100755..100644
--- a/vendor/criterion/src/stats/univariate/bootstrap.rs
+++ b/vendor/criterion/src/stats/univariate/bootstrap.rs
diff --git a/vendor/criterion/src/stats/univariate/kde/kernel.rs b/vendor/criterion/src/stats/univariate/kde/kernel.rs
index c3d0ff513..c3d0ff513 100755..100644
--- a/vendor/criterion/src/stats/univariate/kde/kernel.rs
+++ b/vendor/criterion/src/stats/univariate/kde/kernel.rs
diff --git a/vendor/criterion/src/stats/univariate/kde/mod.rs b/vendor/criterion/src/stats/univariate/kde/mod.rs
index 9b0836d74..c54de55a2 100755..100644
--- a/vendor/criterion/src/stats/univariate/kde/mod.rs
+++ b/vendor/criterion/src/stats/univariate/kde/mod.rs
@@ -5,6 +5,7 @@ pub mod kernel;
use self::kernel::Kernel;
use crate::stats::float::Float;
use crate::stats::univariate::Sample;
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
/// Univariate kernel density estimator
@@ -42,8 +43,13 @@ where
///
/// - Multihreaded
pub fn map(&self, xs: &[A]) -> Box<[A]> {
- xs.par_iter()
- .map(|&x| self.estimate(x))
+ #[cfg(feature = "rayon")]
+ let iter = xs.par_iter();
+
+ #[cfg(not(feature = "rayon"))]
+ let iter = xs.iter();
+
+ iter.map(|&x| self.estimate(x))
.collect::<Vec<_>>()
.into_boxed_slice()
}
diff --git a/vendor/criterion/src/stats/univariate/mixed.rs b/vendor/criterion/src/stats/univariate/mixed.rs
index 5c0a59fac..d6b845d1b 100755..100644
--- a/vendor/criterion/src/stats/univariate/mixed.rs
+++ b/vendor/criterion/src/stats/univariate/mixed.rs
@@ -4,6 +4,7 @@ use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
use crate::stats::univariate::Resamples;
use crate::stats::univariate::Sample;
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
/// Performs a *mixed* two-sample bootstrap
@@ -27,31 +28,51 @@ where
c.extend_from_slice(b);
let c = Sample::new(&c);
- (0..nresamples)
- .into_par_iter()
- .map_init(
- || Resamples::new(c),
- |resamples, _| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples)
+ .into_par_iter()
+ .map_init(
+ || Resamples::new(c),
+ |resamples, _| {
+ let resample = resamples.next();
+ let a: &Sample<A> = Sample::new(&resample[..n_a]);
+ let b: &Sample<A> = Sample::new(&resample[n_a..]);
+
+ statistic(a, b)
+ },
+ )
+ .fold(
+ || T::Builder::new(0),
+ |mut sub_distributions, sample| {
+ sub_distributions.push(sample);
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut resamples = Resamples::new(c);
+ (0..nresamples)
+ .map(|_| {
let resample = resamples.next();
let a: &Sample<A> = Sample::new(&resample[..n_a]);
let b: &Sample<A> = Sample::new(&resample[n_a..]);
statistic(a, b)
- },
- )
- .fold(
- || T::Builder::new(0),
- |mut sub_distributions, sample| {
+ })
+ .fold(T::Builder::new(0), |mut sub_distributions, sample| {
sub_distributions.push(sample);
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
- a.extend(&mut b);
- a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
diff --git a/vendor/criterion/src/stats/univariate/mod.rs b/vendor/criterion/src/stats/univariate/mod.rs
index 8dfb5f8a9..5b221272d 100755..100644
--- a/vendor/criterion/src/stats/univariate/mod.rs
+++ b/vendor/criterion/src/stats/univariate/mod.rs
@@ -11,6 +11,7 @@ pub mod outliers;
use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
use std::cmp;
@@ -42,11 +43,42 @@ where
let nresamples_sqrt = (nresamples as f64).sqrt().ceil() as usize;
let per_chunk = (nresamples + nresamples_sqrt - 1) / nresamples_sqrt;
- (0..nresamples_sqrt)
- .into_par_iter()
- .map_init(
- || (Resamples::new(a), Resamples::new(b)),
- |(a_resamples, b_resamples), i| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples_sqrt)
+ .into_par_iter()
+ .map_init(
+ || (Resamples::new(a), Resamples::new(b)),
+ |(a_resamples, b_resamples), i| {
+ let start = i * per_chunk;
+ let end = cmp::min((i + 1) * per_chunk, nresamples);
+ let a_resample = a_resamples.next();
+
+ let mut sub_distributions: T::Builder =
+ TupledDistributionsBuilder::new(end - start);
+
+ for _ in start..end {
+ let b_resample = b_resamples.next();
+ sub_distributions.push(statistic(a_resample, b_resample));
+ }
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut a_resamples = Resamples::new(a);
+ let mut b_resamples = Resamples::new(b);
+ (0..nresamples_sqrt)
+ .map(|i| {
let start = i * per_chunk;
let end = cmp::min((i + 1) * per_chunk, nresamples);
let a_resample = a_resamples.next();
@@ -59,14 +91,11 @@ where
sub_distributions.push(statistic(a_resample, b_resample));
}
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
+ })
+ .fold(T::Builder::new(0), |mut a, mut b| {
a.extend(&mut b);
a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
diff --git a/vendor/criterion/src/stats/univariate/outliers/mod.rs b/vendor/criterion/src/stats/univariate/outliers/mod.rs
index b8ed7c744..b8ed7c744 100755..100644
--- a/vendor/criterion/src/stats/univariate/outliers/mod.rs
+++ b/vendor/criterion/src/stats/univariate/outliers/mod.rs
diff --git a/vendor/criterion/src/stats/univariate/outliers/tukey.rs b/vendor/criterion/src/stats/univariate/outliers/tukey.rs
index 70713ac57..70713ac57 100755..100644
--- a/vendor/criterion/src/stats/univariate/outliers/tukey.rs
+++ b/vendor/criterion/src/stats/univariate/outliers/tukey.rs
diff --git a/vendor/criterion/src/stats/univariate/percentiles.rs b/vendor/criterion/src/stats/univariate/percentiles.rs
index be6bcf391..39def18e7 100755..100644
--- a/vendor/criterion/src/stats/univariate/percentiles.rs
+++ b/vendor/criterion/src/stats/univariate/percentiles.rs
@@ -54,27 +54,23 @@ where
/// Returns the interquartile range
pub fn iqr(&self) -> A {
- unsafe {
- let q1 = self.at_unchecked(A::cast(25));
- let q3 = self.at_unchecked(A::cast(75));
+ let q1 = self.at(A::cast(25));
+ let q3 = self.at(A::cast(75));
- q3 - q1
- }
+ q3 - q1
}
/// Returns the 50th percentile
pub fn median(&self) -> A {
- unsafe { self.at_unchecked(A::cast(50)) }
+ self.at(A::cast(50))
}
/// Returns the 25th, 50th and 75th percentiles
pub fn quartiles(&self) -> (A, A, A) {
- unsafe {
- (
- self.at_unchecked(A::cast(25)),
- self.at_unchecked(A::cast(50)),
- self.at_unchecked(A::cast(75)),
- )
- }
+ (
+ self.at(A::cast(25)),
+ self.at(A::cast(50)),
+ self.at(A::cast(75)),
+ )
}
}
diff --git a/vendor/criterion/src/stats/univariate/resamples.rs b/vendor/criterion/src/stats/univariate/resamples.rs
index 923669d59..923669d59 100755..100644
--- a/vendor/criterion/src/stats/univariate/resamples.rs
+++ b/vendor/criterion/src/stats/univariate/resamples.rs
diff --git a/vendor/criterion/src/stats/univariate/sample.rs b/vendor/criterion/src/stats/univariate/sample.rs
index 8f10db7b1..6fbb4fb2d 100755..100644
--- a/vendor/criterion/src/stats/univariate/sample.rs
+++ b/vendor/criterion/src/stats/univariate/sample.rs
@@ -4,6 +4,7 @@ use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
use crate::stats::univariate::Percentiles;
use crate::stats::univariate::Resamples;
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
/// A collection of data points drawn from a population
@@ -12,6 +13,7 @@ use rayon::prelude::*;
///
/// - The sample contains at least 2 data points
/// - The sample contains no `NaN`s
+#[repr(transparent)]
pub struct Sample<A>([A]);
// TODO(rust-lang/rfcs#735) move this `impl` into a private percentiles module
@@ -127,7 +129,10 @@ where
}
let mut v = self.to_vec().into_boxed_slice();
+ #[cfg(feature = "rayon")]
v.par_sort_unstable_by(cmp);
+ #[cfg(not(feature = "rayon"))]
+ v.sort_unstable_by(cmp);
// NB :-1: to intra-crate privacy rules
unsafe { mem::transmute(v) }
@@ -206,27 +211,41 @@ where
T::Distributions: Send,
T::Builder: Send,
{
- (0..nresamples)
- .into_par_iter()
- .map_init(
- || Resamples::new(self),
- |resamples, _| statistic(resamples.next()),
- )
- .fold(
- || T::Builder::new(0),
- |mut sub_distributions, sample| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples)
+ .into_par_iter()
+ .map_init(
+ || Resamples::new(self),
+ |resamples, _| statistic(resamples.next()),
+ )
+ .fold(
+ || T::Builder::new(0),
+ |mut sub_distributions, sample| {
+ sub_distributions.push(sample);
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut resamples = Resamples::new(self);
+ (0..nresamples)
+ .map(|_| statistic(resamples.next()))
+ .fold(T::Builder::new(0), |mut sub_distributions, sample| {
sub_distributions.push(sample);
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
- a.extend(&mut b);
- a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
#[cfg(test)]
diff --git a/vendor/criterion/tests/criterion_tests.rs b/vendor/criterion/tests/criterion_tests.rs
index cca448e02..7b608214b 100755..100644
--- a/vendor/criterion/tests/criterion_tests.rs
+++ b/vendor/criterion/tests/criterion_tests.rs
@@ -1,11 +1,7 @@
-#![allow(deprecated)]
-
-use criterion;
-use serde_json;
-
+#[cfg(feature = "plotters")]
+use criterion::SamplingMode;
use criterion::{
- criterion_group, criterion_main, profiler::Profiler, BatchSize, Benchmark, BenchmarkId,
- Criterion, Fun, ParameterizedBenchmark, SamplingMode, Throughput,
+ criterion_group, criterion_main, profiler::Profiler, BatchSize, BenchmarkId, Criterion,
};
use serde_json::value::Value;
use std::cell::{Cell, RefCell};
@@ -33,7 +29,6 @@ fn short_benchmark(dir: &TempDir) -> Criterion {
.warm_up_time(Duration::from_millis(250))
.measurement_time(Duration::from_millis(500))
.nresamples(2000)
- .with_plots()
}
#[derive(Clone)]
@@ -75,19 +70,22 @@ fn verify_json(dir: &PathBuf, path: &str) {
serde_json::from_reader::<File, Value>(f).unwrap();
}
+#[cfg(feature = "html_reports")]
fn verify_svg(dir: &PathBuf, path: &str) {
verify_file(dir, path);
}
+#[cfg(feature = "html_reports")]
fn verify_html(dir: &PathBuf, path: &str) {
verify_file(dir, path);
}
fn verify_stats(dir: &PathBuf, baseline: &str) {
- verify_json(&dir, &format!("{}/estimates.json", baseline));
- verify_json(&dir, &format!("{}/sample.json", baseline));
- verify_json(&dir, &format!("{}/tukey.json", baseline));
- verify_json(&dir, &format!("{}/benchmark.json", baseline));
+ verify_json(dir, &format!("{}/estimates.json", baseline));
+ verify_json(dir, &format!("{}/sample.json", baseline));
+ verify_json(dir, &format!("{}/tukey.json", baseline));
+ verify_json(dir, &format!("{}/benchmark.json", baseline));
+ #[cfg(feature = "csv_output")]
verify_file(&dir, &format!("{}/raw.csv", baseline));
}
@@ -164,7 +162,7 @@ fn test_retain_baseline() {
let pre_modified = latest_modified(&dir.path().join("test_retain_baseline/some-baseline"));
short_benchmark(&dir)
- .retain_baseline("some-baseline".to_owned())
+ .retain_baseline("some-baseline".to_owned(), true)
.bench_function("test_retain_baseline", |b| b.iter(|| 10));
let post_modified = latest_modified(&dir.path().join("test_retain_baseline/some-baseline"));
@@ -174,11 +172,18 @@ fn test_retain_baseline() {
#[test]
#[should_panic(expected = "Baseline 'some-baseline' must exist before comparison is allowed")]
-fn test_compare_baseline() {
- // Initial benchmark to populate
+fn test_compare_baseline_strict_panics_when_missing_baseline() {
+ let dir = temp_dir();
+ short_benchmark(&dir)
+ .retain_baseline("some-baseline".to_owned(), true)
+ .bench_function("test_compare_baseline", |b| b.iter(|| 10));
+}
+
+#[test]
+fn test_compare_baseline_lenient_when_missing_baseline() {
let dir = temp_dir();
short_benchmark(&dir)
- .retain_baseline("some-baseline".to_owned())
+ .retain_baseline("some-baseline".to_owned(), false)
.bench_function("test_compare_baseline", |b| b.iter(|| 10));
}
@@ -251,27 +256,6 @@ fn test_bench_function() {
}
#[test]
-fn test_bench_functions() {
- let dir = temp_dir();
- let function_1 = Fun::new("times 10", |b, i| b.iter(|| *i * 10));
- let function_2 = Fun::new("times 20", |b, i| b.iter(|| *i * 20));
-
- let functions = vec![function_1, function_2];
-
- short_benchmark(&dir).bench_functions("test_bench_functions", functions, 20);
-}
-
-#[test]
-fn test_bench_function_over_inputs() {
- let dir = temp_dir();
- short_benchmark(&dir).bench_function_over_inputs(
- "test_bench_function_over_inputs",
- |b, i| b.iter(|| *i * 10),
- vec![100, 1000],
- );
-}
-
-#[test]
fn test_filtering() {
let dir = temp_dir();
let counter = Counter::default();
@@ -288,82 +272,62 @@ fn test_filtering() {
#[test]
fn test_timing_loops() {
let dir = temp_dir();
- short_benchmark(&dir).bench(
- "test_timing_loops",
- Benchmark::new("iter", |b| b.iter(|| 10))
- .with_function("iter_with_setup", |b| {
- b.iter_with_setup(|| vec![10], |v| v[0])
- })
- .with_function("iter_with_large_setup", |b| {
- b.iter_with_large_setup(|| vec![10], |v| v[0])
- })
- .with_function("iter_with_large_drop", |b| {
- b.iter_with_large_drop(|| vec![10; 100])
- })
- .with_function("iter_batched_small", |b| {
- b.iter_batched(|| vec![10], |v| v[0], BatchSize::SmallInput)
- })
- .with_function("iter_batched_large", |b| {
- b.iter_batched(|| vec![10], |v| v[0], BatchSize::LargeInput)
- })
- .with_function("iter_batched_per_iteration", |b| {
- b.iter_batched(|| vec![10], |v| v[0], BatchSize::PerIteration)
- })
- .with_function("iter_batched_one_batch", |b| {
- b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
- })
- .with_function("iter_batched_10_iterations", |b| {
- b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumIterations(10))
- })
- .with_function("iter_batched_ref_small", |b| {
- b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::SmallInput)
- })
- .with_function("iter_batched_ref_large", |b| {
- b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::LargeInput)
- })
- .with_function("iter_batched_ref_per_iteration", |b| {
- b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::PerIteration)
- })
- .with_function("iter_batched_ref_one_batch", |b| {
- b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
- })
- .with_function("iter_batched_ref_10_iterations", |b| {
- b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::NumIterations(10))
- }),
- );
-}
-
-#[test]
-fn test_throughput() {
- let dir = temp_dir();
- short_benchmark(&dir).bench(
- "test_throughput_bytes",
- Benchmark::new("strlen", |b| b.iter(|| "foo".len())).throughput(Throughput::Bytes(3)),
- );
- short_benchmark(&dir).bench(
- "test_throughput_elems",
- ParameterizedBenchmark::new(
- "veclen",
- |b, v| b.iter(|| v.len()),
- vec![vec![1], vec![1, 2, 3]],
- )
- .throughput(|v| Throughput::Elements(v.len() as u64)),
- );
+ let mut c = short_benchmark(&dir);
+ let mut group = c.benchmark_group("test_timing_loops");
+ group.bench_function("iter_with_setup", |b| {
+ b.iter_with_setup(|| vec![10], |v| v[0])
+ });
+ group.bench_function("iter_with_large_setup", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
+ });
+ group.bench_function("iter_with_large_drop", |b| {
+ b.iter_with_large_drop(|| vec![10; 100])
+ });
+ group.bench_function("iter_batched_small", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::SmallInput)
+ });
+ group.bench_function("iter_batched_large", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::LargeInput)
+ });
+ group.bench_function("iter_batched_per_iteration", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::PerIteration)
+ });
+ group.bench_function("iter_batched_one_batch", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
+ });
+ group.bench_function("iter_batched_10_iterations", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumIterations(10))
+ });
+ group.bench_function("iter_batched_ref_small", |b| {
+ b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::SmallInput)
+ });
+ group.bench_function("iter_batched_ref_large", |b| {
+ b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::LargeInput)
+ });
+ group.bench_function("iter_batched_ref_per_iteration", |b| {
+ b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::PerIteration)
+ });
+ group.bench_function("iter_batched_ref_one_batch", |b| {
+ b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
+ });
+ group.bench_function("iter_batched_ref_10_iterations", |b| {
+ b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::NumIterations(10))
+ });
}
// Verify that all expected output files are present
+#[cfg(feature = "plotters")]
#[test]
fn test_output_files() {
let tempdir = temp_dir();
// Run benchmarks twice to produce comparisons
for _ in 0..2 {
- short_benchmark(&tempdir).bench(
- "test_output",
- Benchmark::new("output_1", |b| b.iter(|| 10))
- .with_function("output_2", |b| b.iter(|| 20))
- .with_function("output_\\/*\"?", |b| b.iter(|| 30))
- .sampling_mode(SamplingMode::Linear),
- );
+ let mut c = short_benchmark(&tempdir);
+ let mut group = c.benchmark_group("test_output");
+ group.sampling_mode(SamplingMode::Linear);
+ group.bench_function("output_1", |b| b.iter(|| 10));
+ group.bench_function("output_2", |b| b.iter(|| 20));
+ group.bench_function("output_\\/*\"?", |b| b.iter(|| 30));
}
// For each benchmark, assert that the expected files are present.
@@ -379,7 +343,8 @@ fn test_output_files() {
verify_stats(&dir, "base");
verify_json(&dir, "change/estimates.json");
- if short_benchmark(&tempdir).can_plot() {
+ #[cfg(feature = "html_reports")]
+ {
verify_svg(&dir, "report/MAD.svg");
verify_svg(&dir, "report/mean.svg");
verify_svg(&dir, "report/median.svg");
@@ -402,8 +367,9 @@ fn test_output_files() {
}
}
- // Check for overall report files
- if short_benchmark(&tempdir).can_plot() {
+ #[cfg(feature = "html_reports")]
+ {
+ // Check for overall report files
let dir = tempdir.path().join("test_output");
verify_svg(&dir, "report/violin.svg");
@@ -412,22 +378,24 @@ fn test_output_files() {
// Run the final summary process and check for the report that produces
short_benchmark(&tempdir).final_summary();
- if short_benchmark(&tempdir).can_plot() {
- let dir = tempdir.path().to_owned();
+ #[cfg(feature = "html_reports")]
+ {
+ let dir = tempdir.path().to_owned();
verify_html(&dir, "report/index.html");
}
}
+#[cfg(feature = "plotters")]
#[test]
fn test_output_files_flat_sampling() {
let tempdir = temp_dir();
// Run benchmark twice to produce comparisons
for _ in 0..2 {
- short_benchmark(&tempdir).bench(
- "test_output",
- Benchmark::new("output_flat", |b| b.iter(|| 10)).sampling_mode(SamplingMode::Flat),
- );
+ let mut c = short_benchmark(&tempdir);
+ let mut group = c.benchmark_group("test_output");
+ group.sampling_mode(SamplingMode::Flat);
+ group.bench_function("output_flat", |b| b.iter(|| 10));
}
let dir = tempdir.path().join("test_output/output_flat");
@@ -436,7 +404,8 @@ fn test_output_files_flat_sampling() {
verify_stats(&dir, "base");
verify_json(&dir, "change/estimates.json");
- if short_benchmark(&tempdir).can_plot() {
+ #[cfg(feature = "html_reports")]
+ {
verify_svg(&dir, "report/MAD.svg");
verify_svg(&dir, "report/mean.svg");
verify_svg(&dir, "report/median.svg");
@@ -462,7 +431,7 @@ fn test_output_files_flat_sampling() {
#[should_panic(expected = "Benchmark function must call Bencher::iter or related method.")]
fn test_bench_with_no_iteration_panics() {
let dir = temp_dir();
- short_benchmark(&dir).bench("test_no_iter", Benchmark::new("no_iter", |_b| {}));
+ short_benchmark(&dir).bench_function("no_iter", |_b| {});
}
#[test]
@@ -497,7 +466,7 @@ fn test_criterion_doesnt_panic_if_measured_time_is_zero() {
}
mod macros {
- use super::{criterion, criterion_group, criterion_main};
+ use super::{criterion_group, criterion_main, Criterion};
#[test]
#[should_panic(expected = "group executed")]
@@ -532,8 +501,6 @@ mod macros {
#[test]
#[should_panic(expected = "group executed")]
fn criterion_group() {
- use self::criterion::Criterion;
-
fn group(_crit: &mut Criterion) {}
fn group2(_crit: &mut Criterion) {
panic!("group executed");
@@ -547,8 +514,6 @@ mod macros {
#[test]
#[should_panic(expected = "group executed")]
fn criterion_group_trailing_comma() {
- use self::criterion::Criterion;
-
fn group(_crit: &mut Criterion) {}
fn group2(_crit: &mut Criterion) {
panic!("group executed");