aboutsummaryrefslogtreecommitdiff
path: root/src/lib.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib.rs')
-rwxr-xr-xsrc/lib.rs616
1 files changed, 221 insertions, 395 deletions
diff --git a/src/lib.rs b/src/lib.rs
index 98dcf1e..16e79cc 100755
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -27,13 +27,15 @@
)
)]
+#[cfg(all(feature = "rayon", target_arch = "wasm32"))]
+compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features.");
+
#[cfg(test)]
extern crate approx;
#[cfg(test)]
extern crate quickcheck;
-use clap::value_t;
use regex::Regex;
#[macro_use]
@@ -57,6 +59,7 @@ mod benchmark_group;
pub mod async_executor;
mod bencher;
mod connection;
+#[cfg(feature = "csv_output")]
mod csv_report;
mod error;
mod estimate;
@@ -76,9 +79,6 @@ use std::cell::RefCell;
use std::collections::HashSet;
use std::default::Default;
use std::env;
-use std::fmt;
-use std::iter::IntoIterator;
-use std::marker::PhantomData;
use std::net::TcpStream;
use std::path::{Path, PathBuf};
use std::process::Command;
@@ -88,40 +88,41 @@ use std::time::Duration;
use criterion_plot::{Version, VersionError};
use crate::benchmark::BenchmarkConfig;
-use crate::benchmark::NamedRoutine;
use crate::connection::Connection;
use crate::connection::OutgoingMessage;
-use crate::csv_report::FileCsvReport;
use crate::html::Html;
use crate::measurement::{Measurement, WallTime};
-use crate::plot::{Gnuplot, Plotter, PlottersBackend};
+#[cfg(feature = "plotters")]
+use crate::plot::PlottersBackend;
+use crate::plot::{Gnuplot, Plotter};
use crate::profiler::{ExternalProfiler, Profiler};
-use crate::report::{BencherReport, CliReport, Report, ReportContext, Reports};
-use crate::routine::Function;
+use crate::report::{BencherReport, CliReport, CliVerbosity, Report, ReportContext, Reports};
#[cfg(feature = "async")]
pub use crate::bencher::AsyncBencher;
pub use crate::bencher::Bencher;
-#[allow(deprecated)]
-pub use crate::benchmark::{Benchmark, BenchmarkDefinition, ParameterizedBenchmark};
pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
lazy_static! {
static ref DEBUG_ENABLED: bool = std::env::var_os("CRITERION_DEBUG").is_some();
static ref GNUPLOT_VERSION: Result<Version, VersionError> = criterion_plot::version();
static ref DEFAULT_PLOTTING_BACKEND: PlottingBackend = {
- match &*GNUPLOT_VERSION {
- Ok(_) => PlottingBackend::Gnuplot,
- Err(e) => {
- match e {
- VersionError::Exec(_) => println!("Gnuplot not found, using plotters backend"),
- e => println!(
- "Gnuplot not found or not usable, using plotters backend\n{}",
- e
- ),
- };
- PlottingBackend::Plotters
+ if cfg!(feature = "html_reports") {
+ match &*GNUPLOT_VERSION {
+ Ok(_) => PlottingBackend::Gnuplot,
+ Err(e) => {
+ match e {
+ VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"),
+ e => eprintln!(
+ "Gnuplot not found or not usable, using plotters backend\n{}",
+ e
+ ),
+ };
+ PlottingBackend::Plotters
+ }
}
+ } else {
+ PlottingBackend::None
}
};
static ref CARGO_CRITERION_CONNECTION: Option<Mutex<Connection>> = {
@@ -177,36 +178,6 @@ pub fn black_box<T>(dummy: T) -> T {
}
}
-/// Representing a function to benchmark together with a name of that function.
-/// Used together with `bench_functions` to represent one out of multiple functions
-/// under benchmark.
-#[doc(hidden)]
-pub struct Fun<I: fmt::Debug, M: Measurement + 'static = WallTime> {
- f: NamedRoutine<I, M>,
- _phantom: PhantomData<M>,
-}
-
-impl<I, M: Measurement> Fun<I, M>
-where
- I: fmt::Debug + 'static,
-{
- /// Create a new `Fun` given a name and a closure
- pub fn new<F>(name: &str, f: F) -> Fun<I, M>
- where
- F: FnMut(&mut Bencher<'_, M>, &I) + 'static,
- {
- let routine = NamedRoutine {
- id: name.to_owned(),
- f: Box::new(RefCell::new(Function::new(f))),
- };
-
- Fun {
- f: routine,
- _phantom: PhantomData,
- }
- }
-}
-
/// Argument to [`Bencher::iter_batched`](struct.Bencher.html#method.iter_batched) and
/// [`Bencher::iter_batched_ref`](struct.Bencher.html#method.iter_batched_ref) which controls the
/// batch size.
@@ -296,12 +267,17 @@ impl BatchSize {
/// Baseline describes how the baseline_directory is handled.
#[derive(Debug, Clone, Copy)]
pub enum Baseline {
- /// Compare ensures a previous saved version of the baseline
- /// exists and runs comparison against that.
- Compare,
+ /// CompareLenient compares against a previous saved version of the baseline.
+ /// If a previous baseline does not exist, the benchmark is run as normal but no comparison occurs.
+ CompareLenient,
+ /// CompareStrict compares against a previous saved version of the baseline.
+ /// If a previous baseline does not exist, a panic occurs.
+ CompareStrict,
/// Save writes the benchmark results to the baseline directory,
/// overwriting any results that were previously there.
Save,
+ /// Discard benchmark results.
+ Discard,
}
/// Enum used to select the plotting backend.
@@ -313,12 +289,18 @@ pub enum PlottingBackend {
/// Plotting backend which uses the rust 'Plotters' library. This is the default if `gnuplot`
/// is not installed.
Plotters,
+ /// Null plotting backend which outputs nothing,
+ None,
}
impl PlottingBackend {
- fn create_plotter(&self) -> Box<dyn Plotter> {
+ fn create_plotter(&self) -> Option<Box<dyn Plotter>> {
match self {
- PlottingBackend::Gnuplot => Box::new(Gnuplot::default()),
- PlottingBackend::Plotters => Box::new(PlottersBackend::default()),
+ PlottingBackend::Gnuplot => Some(Box::new(Gnuplot::default())),
+ #[cfg(feature = "plotters")]
+ PlottingBackend::Plotters => Some(Box::new(PlottersBackend::default())),
+ #[cfg(not(feature = "plotters"))]
+ PlottingBackend::Plotters => panic!("Criterion was built without plotters support."),
+ PlottingBackend::None => None,
}
}
}
@@ -406,25 +388,24 @@ impl Default for Criterion {
fn default() -> Criterion {
let reports = Reports {
cli_enabled: true,
- cli: CliReport::new(false, false, false),
+ cli: CliReport::new(false, false, CliVerbosity::Normal),
bencher_enabled: false,
bencher: BencherReport,
- html_enabled: true,
- html: Html::new(DEFAULT_PLOTTING_BACKEND.create_plotter()),
- csv_enabled: true,
- csv: FileCsvReport,
+ html: DEFAULT_PLOTTING_BACKEND.create_plotter().map(Html::new),
+ csv_enabled: cfg!(feature = "csv_output"),
};
let mut criterion = Criterion {
config: BenchmarkConfig {
confidence_level: 0.95,
- measurement_time: Duration::new(5, 0),
+ measurement_time: Duration::from_secs(5),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
- warm_up_time: Duration::new(3, 0),
+ warm_up_time: Duration::from_secs(3),
sampling_mode: SamplingMode::Auto,
+ quick_mode: false,
},
filter: None,
report: reports,
@@ -447,7 +428,7 @@ impl Default for Criterion {
criterion.report.cli_enabled = false;
criterion.report.bencher_enabled = false;
criterion.report.csv_enabled = false;
- criterion.report.html_enabled = false;
+ criterion.report.html = None;
}
criterion
}
@@ -475,6 +456,7 @@ impl<M: Measurement> Criterion<M> {
}
}
+ #[must_use]
/// Changes the internal profiler for benchmarks run with this runner. See
/// the Profiler trait for more details.
pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
@@ -484,21 +466,26 @@ impl<M: Measurement> Criterion<M> {
}
}
+ #[must_use]
/// Set the plotting backend. By default, Criterion will use gnuplot if available, or plotters
/// if not.
///
/// Panics if `backend` is `PlottingBackend::Gnuplot` and gnuplot is not available.
pub fn plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M> {
if let PlottingBackend::Gnuplot = backend {
- if GNUPLOT_VERSION.is_err() {
- panic!("Gnuplot plotting backend was requested, but gnuplot is not available. To continue, either install Gnuplot or allow Criterion.rs to fall back to using plotters.");
- }
+ assert!(
+ !GNUPLOT_VERSION.is_err(),
+ "Gnuplot plotting backend was requested, but gnuplot is not available. \
+ To continue, either install Gnuplot or allow Criterion.rs to fall back \
+ to using plotters."
+ );
}
- self.report.html = Html::new(backend.create_plotter());
+ self.report.html = backend.create_plotter().map(Html::new);
self
}
+ #[must_use]
/// Changes the default size of the sample for benchmarks run with this runner.
///
/// A bigger sample should yield more accurate results if paired with a sufficiently large
@@ -516,18 +503,20 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Changes the default warm up time for benchmarks run with this runner.
///
/// # Panics
///
/// Panics if the input duration is zero
pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.config.warm_up_time = dur;
self
}
+ #[must_use]
/// Changes the default measurement time for benchmarks run with this runner.
///
/// With a longer time, the measurement will become more resilient to transitory peak loads
@@ -539,12 +528,13 @@ impl<M: Measurement> Criterion<M> {
///
/// Panics if the input duration in zero
pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.config.measurement_time = dur;
self
}
+ #[must_use]
/// Changes the default number of resamples for benchmarks run with this runner.
///
/// Number of resamples to use for the
@@ -559,13 +549,14 @@ impl<M: Measurement> Criterion<M> {
pub fn nresamples(mut self, n: usize) -> Criterion<M> {
assert!(n > 0);
if n <= 1000 {
- println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
+ eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.config.nresamples = n;
self
}
+ #[must_use]
/// Changes the default noise threshold for benchmarks run with this runner. The noise threshold
/// is used to filter out small changes in performance, even if they are statistically
/// significant. Sometimes benchmarking the same code twice will result in small but
@@ -585,6 +576,7 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Changes the default confidence level for benchmarks run with this runner. The confidence
/// level is the desired probability that the true runtime lies within the estimated
/// [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval). The default is
@@ -596,13 +588,14 @@ impl<M: Measurement> Criterion<M> {
pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
assert!(cl > 0.0 && cl < 1.0);
if cl < 0.5 {
- println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
+ eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
}
self.config.confidence_level = cl;
self
}
+ #[must_use]
/// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance)
/// for benchmarks run with this runner. This is used to perform a
/// [hypothesis test](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) to see if
@@ -630,32 +623,29 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Enables plotting
pub fn with_plots(mut self) -> Criterion<M> {
// If running under cargo-criterion then don't re-enable the reports; let it do the reporting.
- if self.connection.is_none() {
- self.report.html_enabled = true;
+ if self.connection.is_none() && self.report.html.is_none() {
+ let default_backend = DEFAULT_PLOTTING_BACKEND.create_plotter();
+ if let Some(backend) = default_backend {
+ self.report.html = Some(Html::new(backend));
+ } else {
+ panic!("Cannot find a default plotting backend!");
+ }
}
self
}
+ #[must_use]
/// Disables plotting
pub fn without_plots(mut self) -> Criterion<M> {
- self.report.html_enabled = false;
+ self.report.html = None;
self
}
- /// Return true if generation of the plots is possible.
- #[deprecated(
- since = "0.3.4",
- note = "No longer useful; since the plotters backend is available Criterion.rs can always generate plots"
- )]
- pub fn can_plot(&self) -> bool {
- // Trivially true now that we have plotters.
- // TODO: Deprecate and remove this.
- true
- }
-
+ #[must_use]
/// Names an explicit baseline and enables overwriting the previous results.
pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
self.baseline_directory = baseline;
@@ -663,13 +653,19 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Names an explicit baseline and disables overwriting the previous results.
- pub fn retain_baseline(mut self, baseline: String) -> Criterion<M> {
+ pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion<M> {
self.baseline_directory = baseline;
- self.baseline = Baseline::Compare;
+ self.baseline = if strict {
+ Baseline::CompareStrict
+ } else {
+ Baseline::CompareLenient
+ };
self
}
+ #[must_use]
/// Filters the benchmarks. Only benchmarks with names that contain the
/// given string will be executed.
pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
@@ -685,6 +681,7 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Override whether the CLI output will be colored or not. Usually you would use the `--color`
/// CLI argument, but this is available for programmmatic use as well.
pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
@@ -693,6 +690,7 @@ impl<M: Measurement> Criterion<M> {
}
/// Set the output directory (currently for testing only)
+ #[must_use]
#[doc(hidden)]
pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
self.output_directory = path.to_owned();
@@ -701,6 +699,7 @@ impl<M: Measurement> Criterion<M> {
}
/// Set the profile time (currently for testing only)
+ #[must_use]
#[doc(hidden)]
pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
match profile_time {
@@ -728,109 +727,131 @@ impl<M: Measurement> Criterion<M> {
/// Configure this criterion struct based on the command-line arguments to
/// this process.
+ #[must_use]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))]
pub fn configure_from_args(mut self) -> Criterion<M> {
- use clap::{App, Arg};
- let matches = App::new("Criterion Benchmark")
- .arg(Arg::with_name("FILTER")
+ use clap::{Arg, Command};
+ let matches = Command::new("Criterion Benchmark")
+ .arg(Arg::new("FILTER")
.help("Skip benchmarks whose names do not contain FILTER.")
.index(1))
- .arg(Arg::with_name("color")
- .short("c")
+ .arg(Arg::new("color")
+ .short('c')
.long("color")
.alias("colour")
.takes_value(true)
.possible_values(&["auto", "always", "never"])
.default_value("auto")
.help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
- .arg(Arg::with_name("verbose")
- .short("v")
+ .arg(Arg::new("verbose")
+ .short('v')
.long("verbose")
.help("Print additional statistical information."))
- .arg(Arg::with_name("noplot")
- .short("n")
+ .arg(Arg::new("quiet")
+ .long("quiet")
+ .conflicts_with("verbose")
+ .help("Print only the benchmark results."))
+ .arg(Arg::new("noplot")
+ .short('n')
.long("noplot")
.help("Disable plot and HTML generation."))
- .arg(Arg::with_name("save-baseline")
- .short("s")
+ .arg(Arg::new("save-baseline")
+ .short('s')
.long("save-baseline")
.default_value("base")
.help("Save results under a named baseline."))
- .arg(Arg::with_name("baseline")
- .short("b")
+ .arg(Arg::new("discard-baseline")
+ .long("discard-baseline")
+ .conflicts_with_all(&["save-baseline", "baseline", "baseline-lenient"])
+ .help("Discard benchmark results."))
+ .arg(Arg::new("baseline")
+ .short('b')
.long("baseline")
.takes_value(true)
- .conflicts_with("save-baseline")
- .help("Compare to a named baseline."))
- .arg(Arg::with_name("list")
+ .conflicts_with_all(&["save-baseline", "baseline-lenient"])
+ .help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails."))
+ .arg(Arg::new("baseline-lenient")
+ .long("baseline-lenient")
+ .takes_value(true)
+ .conflicts_with_all(&["save-baseline", "baseline"])
+ .help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline."))
+ .arg(Arg::new("list")
.long("list")
.help("List all benchmarks")
.conflicts_with_all(&["test", "profile-time"]))
- .arg(Arg::with_name("profile-time")
+ .arg(Arg::new("profile-time")
.long("profile-time")
.takes_value(true)
.help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
.conflicts_with_all(&["test", "list"]))
- .arg(Arg::with_name("load-baseline")
+ .arg(Arg::new("load-baseline")
.long("load-baseline")
.takes_value(true)
.conflicts_with("profile-time")
.requires("baseline")
.help("Load a previous baseline instead of sampling new data."))
- .arg(Arg::with_name("sample-size")
+ .arg(Arg::new("sample-size")
.long("sample-size")
.takes_value(true)
- .help(&format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
- .arg(Arg::with_name("warm-up-time")
+ .help(&*format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
+ .arg(Arg::new("warm-up-time")
.long("warm-up-time")
.takes_value(true)
- .help(&format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
- .arg(Arg::with_name("measurement-time")
+ .help(&*format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
+ .arg(Arg::new("measurement-time")
.long("measurement-time")
.takes_value(true)
- .help(&format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
- .arg(Arg::with_name("nresamples")
+ .help(&*format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
+ .arg(Arg::new("nresamples")
.long("nresamples")
.takes_value(true)
- .help(&format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
- .arg(Arg::with_name("noise-threshold")
+ .help(&*format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
+ .arg(Arg::new("noise-threshold")
.long("noise-threshold")
.takes_value(true)
- .help(&format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
- .arg(Arg::with_name("confidence-level")
+ .help(&*format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
+ .arg(Arg::new("confidence-level")
.long("confidence-level")
.takes_value(true)
- .help(&format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
- .arg(Arg::with_name("significance-level")
+ .help(&*format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
+ .arg(Arg::new("significance-level")
.long("significance-level")
.takes_value(true)
- .help(&format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
- .arg(Arg::with_name("test")
- .hidden(true)
+ .help(&*format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
+ .arg(Arg::new("quick")
+ .long("quick")
+ .conflicts_with("sample-size")
+ .help(&*format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode)))
+ .arg(Arg::new("test")
+ .hide(true)
.long("test")
.help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
.conflicts_with_all(&["list", "profile-time"]))
- .arg(Arg::with_name("bench")
- .hidden(true)
+ .arg(Arg::new("bench")
+ .hide(true)
.long("bench"))
- .arg(Arg::with_name("plotting-backend")
+ .arg(Arg::new("plotting-backend")
.long("plotting-backend")
.takes_value(true)
.possible_values(&["gnuplot", "plotters"])
.help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
- .arg(Arg::with_name("output-format")
+ .arg(Arg::new("output-format")
.long("output-format")
.takes_value(true)
.possible_values(&["criterion", "bencher"])
.default_value("criterion")
.help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
- .arg(Arg::with_name("nocapture")
+ .arg(Arg::new("nocapture")
.long("nocapture")
+ .hide(true)
+ .help("Ignored, but added for compatibility with libtest."))
+ .arg(Arg::new("show-output")
+ .long("show-output")
.hidden(true)
.help("Ignored, but added for compatibility with libtest."))
- .arg(Arg::with_name("version")
+ .arg(Arg::new("version")
.hidden(true)
- .short("V")
+ .short('V')
.long("version"))
.after_help("
This executable is a Criterion.rs benchmark.
@@ -850,21 +871,21 @@ https://bheisler.github.io/criterion.rs/book/faq.html
if self.connection.is_some() {
if let Some(color) = matches.value_of("color") {
if color != "auto" {
- println!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
+ eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
}
}
if matches.is_present("verbose") {
- println!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
+ eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
}
if matches.is_present("noplot") {
- println!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
+ eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
}
if let Some(backend) = matches.value_of("plotting-backend") {
- println!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
+ eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
}
if let Some(format) = matches.value_of("output-format") {
if format != "criterion" {
- println!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
+ eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
}
}
@@ -875,7 +896,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html
.unwrap_or(false)
|| matches.is_present("load-baseline")
{
- println!("Error: baselines are not supported when running with cargo-criterion.");
+ eprintln!("Error: baselines are not supported when running with cargo-criterion.");
std::process::exit(1);
}
}
@@ -893,17 +914,14 @@ https://bheisler.github.io/criterion.rs/book/faq.html
} else if matches.is_present("list") {
Mode::List
} else if matches.is_present("profile-time") {
- let num_seconds = value_t!(matches.value_of("profile-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_seconds = matches.value_of_t_or_exit("profile-time");
- if num_seconds < 1 {
- println!("Profile time must be at least one second.");
+ if num_seconds < 1.0 {
+ eprintln!("Profile time must be at least one second.");
std::process::exit(1);
}
- Mode::Profile(Duration::from_secs(num_seconds))
+ Mode::Profile(Duration::from_secs_f64(num_seconds))
} else {
Mode::Benchmark
};
@@ -927,16 +945,21 @@ https://bheisler.github.io/criterion.rs/book/faq.html
if matches.is_present("noplot") {
self = self.without_plots();
- } else {
- self = self.with_plots();
}
if let Some(dir) = matches.value_of("save-baseline") {
self.baseline = Baseline::Save;
self.baseline_directory = dir.to_owned()
}
+ if matches.is_present("discard-baseline") {
+ self.baseline = Baseline::Discard;
+ }
if let Some(dir) = matches.value_of("baseline") {
- self.baseline = Baseline::Compare;
+ self.baseline = Baseline::CompareStrict;
+ self.baseline_directory = dir.to_owned();
+ }
+ if let Some(dir) = matches.value_of("baseline-lenient") {
+ self.baseline = Baseline::CompareLenient;
self.baseline_directory = dir.to_owned();
}
@@ -945,7 +968,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html
self.report.cli_enabled = false;
self.report.bencher_enabled = false;
self.report.csv_enabled = false;
- self.report.html_enabled = false;
+ self.report.html = None;
} else {
match matches.value_of("output-format") {
Some("bencher") => {
@@ -954,6 +977,13 @@ https://bheisler.github.io/criterion.rs/book/faq.html
}
_ => {
let verbose = matches.is_present("verbose");
+ let verbosity = if verbose {
+ CliVerbosity::Verbose
+ } else if matches.is_present("quiet") {
+ CliVerbosity::Quiet
+ } else {
+ CliVerbosity::Normal
+ };
let stdout_isatty = atty::is(atty::Stream::Stdout);
let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
let enable_text_coloring;
@@ -970,7 +1000,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html
self.report.bencher_enabled = false;
self.report.cli_enabled = true;
self.report.cli =
- CliReport::new(enable_text_overwrite, enable_text_coloring, verbose);
+ CliReport::new(enable_text_overwrite, enable_text_coloring, verbosity);
}
};
}
@@ -980,92 +1010,78 @@ https://bheisler.github.io/criterion.rs/book/faq.html
}
if matches.is_present("sample-size") {
- let num_size = value_t!(matches.value_of("sample-size"), usize).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_size = matches.value_of_t_or_exit("sample-size");
assert!(num_size >= 10);
self.config.sample_size = num_size;
}
if matches.is_present("warm-up-time") {
- let num_seconds = value_t!(matches.value_of("warm-up-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_seconds = matches.value_of_t_or_exit("warm-up-time");
- let dur = std::time::Duration::new(num_seconds, 0);
- assert!(dur.to_nanos() > 0);
+ let dur = std::time::Duration::from_secs_f64(num_seconds);
+ assert!(dur.as_nanos() > 0);
self.config.warm_up_time = dur;
}
if matches.is_present("measurement-time") {
- let num_seconds =
- value_t!(matches.value_of("measurement-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_seconds = matches.value_of_t_or_exit("measurement-time");
- let dur = std::time::Duration::new(num_seconds, 0);
- assert!(dur.to_nanos() > 0);
+ let dur = std::time::Duration::from_secs_f64(num_seconds);
+ assert!(dur.as_nanos() > 0);
self.config.measurement_time = dur;
}
if matches.is_present("nresamples") {
- let num_resamples =
- value_t!(matches.value_of("nresamples"), usize).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_resamples = matches.value_of_t_or_exit("nresamples");
assert!(num_resamples > 0);
self.config.nresamples = num_resamples;
}
if matches.is_present("noise-threshold") {
- let num_noise_threshold = value_t!(matches.value_of("noise-threshold"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_noise_threshold = matches.value_of_t_or_exit("noise-threshold");
assert!(num_noise_threshold > 0.0);
self.config.noise_threshold = num_noise_threshold;
}
if matches.is_present("confidence-level") {
- let num_confidence_level = value_t!(matches.value_of("confidence-level"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_confidence_level = matches.value_of_t_or_exit("confidence-level");
assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
self.config.confidence_level = num_confidence_level;
}
if matches.is_present("significance-level") {
- let num_significance_level = value_t!(matches.value_of("significance-level"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_significance_level = matches.value_of_t_or_exit("significance-level");
assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
self.config.significance_level = num_significance_level;
}
+ if matches.is_present("quick") {
+ self.config.quick_mode = true;
+ }
+
self
}
fn filter_matches(&self, id: &str) -> bool {
- match self.filter {
- Some(ref regex) => regex.is_match(id),
+ match &self.filter {
+ Some(regex) => regex.is_match(id),
None => true,
}
}
+ /// Returns true iff we should save the benchmark results in
+ /// json files on the local disk.
+ fn should_save_baseline(&self) -> bool {
+ self.connection.is_none()
+ && self.load_baseline.is_none()
+ && !matches!(self.baseline, Baseline::Discard)
+ }
+
/// Return a benchmark group. All benchmarks performed using a benchmark group will be
/// grouped together in the final report.
///
@@ -1091,9 +1107,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html
/// Panics if the group name is empty
pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
let group_name = group_name.into();
- if group_name.is_empty() {
- panic!("Group name must not be empty.");
- }
+ assert!(!group_name.is_empty(), "Group name must not be empty.");
if let Some(conn) = &self.connection {
conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name })
@@ -1180,163 +1194,24 @@ where
);
self
}
-
- /// Benchmarks a function under various inputs
- ///
- /// This is a convenience method to execute several related benchmarks. Each benchmark will
- /// receive the id: `${id}/${input}`.
- ///
- /// # Example
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use self::criterion::*;
- ///
- /// fn bench(c: &mut Criterion) {
- /// c.bench_function_over_inputs("from_elem",
- /// |b: &mut Bencher, size: &usize| {
- /// b.iter(|| vec![0u8; *size]);
- /// },
- /// vec![1024, 2048, 4096]
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- #[allow(deprecated)]
- pub fn bench_function_over_inputs<I, F>(
- &mut self,
- id: &str,
- f: F,
- inputs: I,
- ) -> &mut Criterion<M>
- where
- I: IntoIterator,
- I::Item: fmt::Debug + 'static,
- F: FnMut(&mut Bencher<'_, M>, &I::Item) + 'static,
- {
- self.bench(id, ParameterizedBenchmark::new(id, f, inputs))
- }
-
- /// Benchmarks multiple functions
- ///
- /// All functions get the same input and are compared with the other implementations.
- /// Works similar to `bench_function`, but with multiple functions.
- ///
- /// # Example
- ///
- /// ``` rust
- /// # #[macro_use] extern crate criterion;
- /// # use self::criterion::*;
- /// # fn seq_fib(i: &u32) {}
- /// # fn par_fib(i: &u32) {}
- ///
- /// fn bench_seq_fib(b: &mut Bencher, i: &u32) {
- /// b.iter(|| {
- /// seq_fib(i);
- /// });
- /// }
- ///
- /// fn bench_par_fib(b: &mut Bencher, i: &u32) {
- /// b.iter(|| {
- /// par_fib(i);
- /// });
- /// }
- ///
- /// fn bench(c: &mut Criterion) {
- /// let sequential_fib = Fun::new("Sequential", bench_seq_fib);
- /// let parallel_fib = Fun::new("Parallel", bench_par_fib);
- /// let funs = vec![sequential_fib, parallel_fib];
- ///
- /// c.bench_functions("Fibonacci", funs, 14);
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- #[allow(deprecated)]
- pub fn bench_functions<I>(
- &mut self,
- id: &str,
- funs: Vec<Fun<I, M>>,
- input: I,
- ) -> &mut Criterion<M>
- where
- I: fmt::Debug + 'static,
- {
- let benchmark = ParameterizedBenchmark::with_functions(
- funs.into_iter().map(|fun| fun.f).collect(),
- vec![input],
- );
-
- self.bench(id, benchmark)
- }
-
- /// Executes the given benchmark. Use this variant to execute benchmarks
- /// with complex configuration. This can be used to compare multiple
- /// functions, execute benchmarks with custom configuration settings and
- /// more. See the Benchmark and ParameterizedBenchmark structs for more
- /// information.
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use criterion::*;
- /// # fn routine_1() {}
- /// # fn routine_2() {}
- ///
- /// fn bench(c: &mut Criterion) {
- /// // Setup (construct data, allocate memory, etc)
- /// c.bench(
- /// "routines",
- /// Benchmark::new("routine_1", |b| b.iter(|| routine_1()))
- /// .with_function("routine_2", |b| b.iter(|| routine_2()))
- /// .sample_size(50)
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- pub fn bench<B: BenchmarkDefinition<M>>(
- &mut self,
- group_id: &str,
- benchmark: B,
- ) -> &mut Criterion<M> {
- benchmark.run(group_id, self);
- self
- }
-}
-
-trait DurationExt {
- fn to_nanos(&self) -> u64;
-}
-
-const NANOS_PER_SEC: u64 = 1_000_000_000;
-
-impl DurationExt for Duration {
- fn to_nanos(&self) -> u64 {
- self.as_secs() * NANOS_PER_SEC + u64::from(self.subsec_nanos())
- }
}
/// Enum representing different ways of measuring the throughput of benchmarked code.
/// If the throughput setting is configured for a benchmark then the estimated throughput will
/// be reported as well as the time per iteration.
// TODO: Remove serialize/deserialize from the public API.
-#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum Throughput {
/// Measure throughput in terms of bytes/second. The value should be the number of bytes
/// processed by one iteration of the benchmarked code. Typically, this would be the length of
/// an input string or `&[u8]`.
Bytes(u64),
+ /// Equivalent to Bytes, but the value will be reported in terms of
+ /// kilobytes (1000 bytes) per second instead of kibibytes (1024 bytes) per
+ /// second, megabytes instead of mibibytes, and gigabytes instead of gibibytes.
+ BytesDecimal(u64),
+
/// Measure throughput in terms of elements/second. The value should be the number of elements
/// processed by one iteration of the benchmarked code. Typically, this would be the size of a
/// collection, but could also be the number of lines of input text or the number of values to
@@ -1358,7 +1233,7 @@ pub enum AxisScale {
/// or benchmark group.
///
/// ```rust
-/// use self::criterion::{Bencher, Criterion, Benchmark, PlotConfiguration, AxisScale};
+/// use self::criterion::{Bencher, Criterion, PlotConfiguration, AxisScale};
///
/// let plot_config = PlotConfiguration::default()
/// .summary_scale(AxisScale::Logarithmic);
@@ -1383,6 +1258,7 @@ impl Default for PlotConfiguration {
}
impl PlotConfiguration {
+ #[must_use]
/// Set the axis scale (linear or logarithmic) for the summary plots. Typically, you would
/// set this to logarithmic if benchmarking over a range of inputs which scale exponentially.
/// Defaults to linear.
@@ -1455,7 +1331,7 @@ impl ActualSamplingMode {
ActualSamplingMode::Linear => {
let n = sample_count;
let met = warmup_mean_execution_time;
- let m_ns = target_time.to_nanos();
+ let m_ns = target_time.as_nanos();
// Solve: [d + 2*d + 3*d + ... + n*d] * met = m_ns
let total_runs = n * (n + 1) / 2;
let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
@@ -1465,16 +1341,16 @@ impl ActualSamplingMode {
let recommended_sample_size =
ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
- print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
+ eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
- println!(
+ eprintln!(
", enable flat sampling, or reduce sample count to {}.",
recommended_sample_size
);
} else {
- println!(" or enable flat sampling.");
+ eprintln!(" or enable flat sampling.");
}
}
@@ -1483,7 +1359,7 @@ impl ActualSamplingMode {
ActualSamplingMode::Flat => {
let n = sample_count;
let met = warmup_mean_execution_time;
- let m_ns = target_time.to_nanos() as f64;
+ let m_ns = target_time.as_nanos() as f64;
let time_per_sample = m_ns / (n as f64);
// This is pretty simplistic; we could do something smarter to fit into the allotted time.
let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
@@ -1494,13 +1370,13 @@ impl ActualSamplingMode {
let recommended_sample_size =
ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
- print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
+ eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
- println!(", or reduce sample count to {}.", recommended_sample_size);
+ eprintln!(", or reduce sample count to {}.", recommended_sample_size);
} else {
- println!(".");
+ eprintln!(".");
}
}
@@ -1566,53 +1442,3 @@ pub fn runner(benches: &[&dyn Fn()]) {
}
Criterion::default().configure_from_args().final_summary();
}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(not(feature = "html_reports"))]
-#[doc(hidden)]
-pub fn __warn_about_html_reports_feature() {
- if CARGO_CRITERION_CONNECTION.is_none() {
- println!(
- "WARNING: HTML report generation will become a non-default optional feature in Criterion.rs 0.4.0."
- );
- println!(
- "This feature is being moved to cargo-criterion \
- (https://github.com/bheisler/cargo-criterion) and will be optional in a future \
- version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
- enable the 'html_reports' feature in your Cargo.toml."
- );
- println!();
- }
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(feature = "html_reports")]
-#[doc(hidden)]
-pub fn __warn_about_html_reports_feature() {
- // They have the feature enabled, so they're ready for the update.
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(not(feature = "cargo_bench_support"))]
-#[doc(hidden)]
-pub fn __warn_about_cargo_bench_support_feature() {
- if CARGO_CRITERION_CONNECTION.is_none() {
- println!(
- "WARNING: In Criterion.rs 0.4.0, running criterion benchmarks outside of cargo-criterion will become a default optional feature."
- );
- println!(
- "The statistical analysis and reporting is being moved to cargo-criterion \
- (https://github.com/bheisler/cargo-criterion) and will be optional in a future \
- version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
- enable the 'cargo_bench_support' feature in your Cargo.toml."
- );
- println!();
- }
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(feature = "cargo_bench_support")]
-#[doc(hidden)]
-pub fn __warn_about_cargo_bench_support_feature() {
- // They have the feature enabled, so they're ready for the update.
-}