aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2023-03-10 20:32:17 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2023-03-10 20:32:17 +0000
commitc7a291994efff0716ff725cb4ab0bf56f38ea0e0 (patch)
tree233a7ce1b06fc9c6a665f69f59d627b1111a8cf9
parent84147751e0fdbd38c3def24d5ad557400ef35b7e (diff)
parent19ca233fef56526b0864105a9061466883eca900 (diff)
downloadcriterion-c7a291994efff0716ff725cb4ab0bf56f38ea0e0.tar.gz
Upgrade criterion to 0.4.0 am: 3c611a33f1 am: 8f1301829e am: 40e0d40821 am: 19ca233fef
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/criterion/+/2438096 Change-Id: Ic4a7be53328a27ef5efbe5b108ad3d53bb566024 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.cargo_vcs_info.json7
-rwxr-xr-x.github/workflows/ci.yaml38
-rw-r--r--Android.bp11
-rwxr-xr-xCHANGELOG.md168
-rwxr-xr-xCONTRIBUTING.md9
-rw-r--r--Cargo.toml99
-rwxr-xr-xCargo.toml.orig51
-rw-r--r--METADATA14
-rwxr-xr-xREADME.md4
-rwxr-xr-xbenches/benchmarks/compare_functions.rs49
-rwxr-xr-xbenches/benchmarks/custom_measurement.rs4
-rwxr-xr-xbenches/benchmarks/external_process.rs1
-rwxr-xr-xbenches/benchmarks/iter_with_large_drop.rs28
-rwxr-xr-xbenches/benchmarks/iter_with_large_setup.rs35
-rwxr-xr-xbenches/benchmarks/iter_with_setup.rs5
-rwxr-xr-xbenches/benchmarks/measurement_overhead.rs2
-rwxr-xr-xbenches/benchmarks/with_inputs.rs9
-rw-r--r--cargo2android.json3
-rw-r--r--patches/Android.bp.diff13
-rwxr-xr-xsrc/analysis/mod.rs13
-rwxr-xr-xsrc/bencher.rs16
-rwxr-xr-xsrc/benchmark.rs575
-rwxr-xr-xsrc/benchmark_group.rs24
-rwxr-xr-xsrc/connection.rs53
-rwxr-xr-xsrc/error.rs8
-rwxr-xr-xsrc/format.rs2
-rwxr-xr-xsrc/lib.rs616
-rwxr-xr-xsrc/macros.rs3
-rwxr-xr-xsrc/macros_private.rs6
-rwxr-xr-xsrc/measurement.rs33
-rwxr-xr-xsrc/plot/gnuplot_backend/mod.rs4
-rwxr-xr-xsrc/plot/mod.rs2
-rwxr-xr-xsrc/plot/plotters_backend/summary.rs8
-rwxr-xr-xsrc/report.rs270
-rwxr-xr-xsrc/routine.rs59
-rwxr-xr-xsrc/stats/bivariate/mod.rs53
-rwxr-xr-xsrc/stats/univariate/kde/mod.rs10
-rwxr-xr-xsrc/stats/univariate/mixed.rs61
-rwxr-xr-xsrc/stats/univariate/mod.rs55
-rwxr-xr-xsrc/stats/univariate/percentiles.rs22
-rwxr-xr-xsrc/stats/univariate/sample.rs57
-rwxr-xr-xtests/criterion_tests.rs190
42 files changed, 1141 insertions, 1549 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 40e1b5b..e6bd1b7 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,6 @@
{
"git": {
- "sha1": "4e773a3b8523a73e7105e11f0b2d4b545827712e"
- }
-}
+ "sha1": "5e27b692a43a05736d073fd449e16dcf1c24628c"
+ },
+ "path_in_vcs": ""
+} \ No newline at end of file
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 2c39904..d7f39e4 100755
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -5,6 +5,7 @@ on:
pull_request:
branches:
- master
+ - version-0.4
name: tests
@@ -16,13 +17,13 @@ jobs:
rust:
- stable
- beta
- - nightly
- - 1.46.0 # MSRV
+ - 1.57 # MSRV
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
+ name: Setup rust toolchain
with:
profile: minimal
toolchain: ${{ matrix.rust }}
@@ -30,25 +31,56 @@ jobs:
components: rustfmt, clippy
- uses: Swatinem/rust-cache@v1
+ name: Load dependencies from cache
- uses: actions-rs/cargo@v1
+ name: Build with stable features
with:
command: build
args: --features stable
- uses: actions-rs/cargo@v1
+ if: ${{ matrix.rust == 'nightly' }}
+ name: Build with unstable features
+ with:
+ command: build
+ args: --all-features
+
+ - uses: actions-rs/cargo@v1
+ name: Build with minimal features
+ with:
+ command: build
+ args: --no-default-features
+
+ - uses: actions-rs/cargo@v1
+ name: Test with stable features
with:
command: test
args: --features stable
- uses: actions-rs/cargo@v1
+ name: Test with minimal features
+ with:
+ command: test
+ args: --no-default-features
+
+ - uses: actions-rs/cargo@v1
+ name: Check for non-standard formatting
if: ${{ matrix.rust == 'stable' }}
with:
command: fmt
args: --all -- --check
- uses: actions-rs/cargo@v1
- if: ${{ matrix.rust != '1.40.0' }} # 1.40 has horrible lints.
+ name: Check for clippy hints
with:
command: clippy
args: -- -D warnings
+
+ - name: Test run targeting WASI
+ run: |
+ curl https://wasmtime.dev/install.sh -sSf | bash
+ source ~/.bashrc
+ export PATH=$HOME/.wasmtime/bin/:$PATH
+ cargo install cargo-wasi
+ cargo wasi bench --no-default-features -- --test
diff --git a/Android.bp b/Android.bp
index 3dd9f93..2448462 100644
--- a/Android.bp
+++ b/Android.bp
@@ -39,23 +39,25 @@ license {
rust_library {
name: "libcriterion",
- // has rustc warnings
host_supported: true,
crate_name: "criterion",
cargo_env_compat: true,
- cargo_pkg_version: "0.3.5",
+ cargo_pkg_version: "0.4.0",
srcs: ["src/lib.rs"],
edition: "2018",
features: [
"cargo_bench_support",
"default",
+ "plotters",
+ "rayon",
],
rustlibs: [
+ "libanes",
"libatty",
"libcast",
- "libclap_deprecated",
+ "libciborium",
+ "libclap_3.2.23",
"libcriterion_plot",
- "libcsv",
"libitertools",
"liblazy_static",
"libnum_traits",
@@ -64,7 +66,6 @@ rust_library {
"librayon",
"libregex",
"libserde",
- "libserde_cbor",
"libserde_json",
"libtinytemplate",
"libwalkdir",
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d583c31..939bc4e 100755
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,5 @@
# Changelog
+
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
@@ -6,21 +7,66 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
## [Unreleased]
+## [0.4.0] - 2022-09-10
+
+### Removed
+
+- The `Criterion::can_plot` function has been removed.
+- The `Criterion::bench_function_over_inputs` function has been removed.
+- The `Criterion::bench_functions` function has been removed.
+- The `Criterion::bench` function has been removed.
+
+### Changed
+
+- HTML report hidden behind non-default feature flag: 'html_reports'
+- Standalone support (ie without cargo-criterion) feature flag: 'cargo_bench_support'
+- MSRV bumped to 1.57
+- `rayon` and `plotters` are optional (and default) dependencies.
+- Status messages ('warming up', 'analyzing', etc) are printed to stderr, benchmark results are printed to stdout.
+- Accept subsecond durations for `--warm-up-time`, `--measurement-time` and `--profile-time`.
+- Replaced serde_cbor with ciborium because the former is no longer maintained.
+- Upgrade clap to v3 and regex to v1.5.
+
+### Added
+
+- A `--discard-baseline` flag for discarding rather than saving benchmark results.
+- Formal support for benchmarking code compiled to web-assembly.
+- A `--quiet` flag for printing just a single line per benchmark.
+- A `Throughput::BytesDecimal` option for measuring throughput in bytes but printing them using
+ decimal units like kilobytes instead of binary units like kibibytes.
+
+### Fixed
+- When using `bench_with_input`, the input parameter will now be passed through `black_box` before
+ passing it to the benchmark.
+
+## [0.3.6] - 2022-07-06
+### Changed
+- MSRV bumped to 1.49
+- Symbol for microseconds changed from ASCII 'us' to unicode 'µs'
+- Documentation fixes
+- Clippy fixes
+
## [0.3.5] - 2021-07-26
+
### Fixed
+
- Corrected `Criterion.toml` in the book.
- Corrected configuration typo in the book.
### Changed
+
- Bump plotters dependency to always include a bug-fix.
- MSRV bumped to 1.46.
## [0.3.4] - 2021-01-24
+
### Added
+
- Added support for benchmarking async functions
- Added `with_output_color` for enabling or disabling CLI output coloring programmatically.
### Fixed
+
- Criterion.rs will now give a clear error message in case of benchmarks that take zero time.
- Added some extra code to ensure that every sample has at least one iteration.
- Added a notice to the `--help` output regarding "unrecognized option" errors.
@@ -29,19 +75,20 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Criterion.rs will now automatically detect the right output directory.
### Deprecated
+
- `Criterion::can_plot` is no longer useful and is deprecated pending deletion in 0.4.0.
-- `Benchmark` and `ParameterizedBenchmark` were already hidden from documentation, but are now
+- `Benchmark` and `ParameterizedBenchmark` were already hidden from documentation, but are now
formally deprecated pending deletion in 0.4.0. Callers should use `BenchmarkGroup` instead.
- `Criterion::bench_function_over_inputs`, `Criterion::bench_functions`, and `Criterion::bench` were
already hidden from documentation, but are now formally deprecated pending deletion in 0.4.0.
Callers should use `BenchmarkGroup` instead.
-- Three new optional features have been added; "html_reports", "csv_output" and
- "cargo_bench_support". These features currently do nothing except disable a warning message at
- runtime, but in version 0.4.0 they will be used to enable HTML report generation, CSV file
- generation, and the ability to run in cargo-bench (as opposed to [cargo-criterion]).
+- Three new optional features have been added; "html_reports", "csv_output" and
+ "cargo_bench_support". These features currently do nothing except disable a warning message at
+ runtime, but in version 0.4.0 they will be used to enable HTML report generation, CSV file
+ generation, and the ability to run in cargo-bench (as opposed to [cargo-criterion]).
"cargo_bench_support" is enabled by default, but "html_reports" and "csv_output"
are not. If you use Criterion.rs' HTML reports, it is recommended to switch to [cargo-criterion].
- If you use CSV output, it is recommended to switch to [cargo-criterion] and use the
+ If you use CSV output, it is recommended to switch to [cargo-criterion] and use the
`--message-format=json` option for machine-readable output instead. A warning message will be
printed at the start of benchmark runs which do not have "html_reports" or "cargo_bench_support"
enabled, but because CSV output is not widely used it has no warning.
@@ -49,11 +96,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
[cargo-criterion]: https://github.com/bheisler/cargo-criterion
## [0.3.3] - 2020-06-29
+
### Added
+
- Added `CRITERION_HOME` environment variable to set the directory for Criterion to store
- its results and charts in.
-- Added support for [cargo-criterion]. The long-term goal here is to remove code from Criterion-rs
- itself to improve compile times, as well as to add features to `cargo-criterion` that are
+ its results and charts in.
+- Added support for [cargo-criterion]. The long-term goal here is to remove code from Criterion-rs
+ itself to improve compile times, as well as to add features to `cargo-criterion` that are
difficult to implement in Criterion-rs.
- Add sampling mode option for benchmarks. This allows the user to change how Criterion.rs chooses
the iteration counts in each sample. By default, nothing will change for most benchmarks, but
@@ -61,11 +110,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
This affects the statistics and plots generated.
### Changed
+
- The serialization format for some of the files has changed. This may cause your first benchmark
run after updating to produce errors, but they're harmless and will go away after running the
benchmarks once.
### Fixed
+
- Fixed a bug where the current measurement was not shown on the relative regression plot.
- Fixed rare panic in the plotters backend.
- Panic with a clear error message (rather than panicking messily later on) when the user sets the
@@ -73,7 +124,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Escape single quotes in benchmark names when generating Gnuplot scripts.
## [0.3.2] - 2020-04-26
+
### Added
+
- Added `?Sized` bound to benchmark parameter types, which allows dynamically sized types like
`&str` and `&[T]` to be used as benchmark parameters.
- Added the `--output-format <format>` command-line option. If `--output-format bencher` is passed,
@@ -85,15 +138,18 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
when running tests or benchmarks and allowing stdout output from other tests.
### Fixed
+
- Fixed panic when environment variables contains non-UTF8 characters.
-- Fixed panic when `CRITERION_DEBUG` or `CRITERION_TARGET_DIR` environment variables contain
+- Fixed panic when `CRITERION_DEBUG` or `CRITERION_TARGET_DIR` environment variables contain
non-UTF8 characters.
## [0.3.1] - 2020-01-25
+
### Added
-- Added new plotting backend using the `plotters` crate. Implementation generously provided by Hao
+
+- Added new plotting backend using the `plotters` crate. Implementation generously provided by Hao
Hou, author of the `plotters` crate.
-- Added `--plotting-backend` command-line option to select the plotting backend. The existing
+- Added `--plotting-backend` command-line option to select the plotting backend. The existing
gnuplot backend will be used by default when available, and the plotters backend will be used when
gnuplot is not available or when requested.
- Added `Criterion::plotting_backend()` function to configure the plotting backend in code.
@@ -102,6 +158,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Benchmark filters can now be regular expressions.
### Fixed
+
- Fixed `fibonacci` functions.
- Fixed `#[criterion]` benchmarks ignoring the command-line options.
- Fixed incorrect scaling of the violin plots.
@@ -109,11 +166,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
sample count.
- Fix potential panic when `nresamples` is set too low. Also added a warning
against setting `nresamples` too low.
-- Fixed issue where a slow outer closure would cause Criterion.rs to calculate
+- Fixed issue where a slow outer closure would cause Criterion.rs to calculate
the wrong estimated time and number of iterations in the warm-up phase.
## [0.3.0] - 2019-08-25
+
### Added
+
- Added support for plugging in custom measurements (eg. processor counters)
into Criterion.rs' measurement and analysis.
- Added support for plugging in instrumentation for internal profilers such as
@@ -124,7 +183,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
`BenchmarkGroup` performs the same function as all of the above, but is cleaner to use and more
powerful and flexible. All of these types/functions are now soft-deprecated (meaning they're
hidden from the documentation and should not be used in new code). They will be fully deprecated
- at some point in the 0.3.* series and removed in 0.4.0.
+ at some point in the 0.3.\* series and removed in 0.4.0.
- `iter_custom` - a "timing loop" that allows the caller to perform their own measurements. This is
useful for complex measurements that don't fit into the usual mode of calling a lambda in a loop.
- If the benchmark cannot be completed in approximately the requested measurement time,
@@ -133,67 +192,84 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Added command-line options to set the defaults for warm-up time, measurement-time, etc.
### Changed
+
- The `raw.csv` file format has been changed slightly. The `sample_time_nanos` field has been split
into `sample_measured_value` and `unit` fields to accommodate custom measurements.
- Throughput has been expanded from u32 to u64 to accommodate very large input sizes.
### Fixed
+
- Fixed possible invalid file name error on Windows
- Fixed potential case where data for two different benchmarks would be stored in the same directory.
### Removed
+
- Removed the `--measure-only` command-line argument; it was deprecated in favor of `--profile-time`
in 0.2.6.
-- External program benchmarks have been removed; they were deprecated in 0.2.6. The new
+- External program benchmarks have been removed; they were deprecated in 0.2.6. The new
`iter_custom` timing loop can be used as a substitute; see `benches/external_process.rs` for an
example of this.
### Deprecated
+
- The `--test` argument is now deprecated. To test benchmarks, use `cargo test --benches`.
## [0.2.11] - 2019-04-08
+
### Added
+
- Enabled automatic text-coloring on Windows.
### Fixed
+
- Fixed panic caused by outdated files after benchmark names or types were changed.
- Reduced timing overhead of `Criterion::iter_batched/iter_batched_ref`.
## [0.2.10] - 2019-02-09
+
### Added
-- Added `iter_batched/iter_batched_ref` timing loops, which allow for setup (like
+
+- Added `iter_batched/iter_batched_ref` timing loops, which allow for setup (like
`iter_with_setup/iter_with_large_setup`) and exclude drop (like `iter_with_large_drop`) but
measure the runtime more accurately, use less memory and are more flexible.
### Deprecated
+
- `iter_with_setup/iter_with_large_setup` are now deprecated in favor of `iter_batched`.
## [0.2.9] - 2019-01-24
+
### Changed
+
- Criterion.rs no longer depends on the default features of the `rand-core` crate. This fixes some
downstream crates which use `rand` in a `no_std` context.
## [0.2.8] - 2019-01-20
+
### Changed
+
- Criterion.rs now uses `rayon` internally instead of manual `unsafe` code built with thread-scoped.
- Replaced handlebars templates with [TinyTemplate](https://github.com/bheisler/TinyTemplate)
- Merged `criterion-stats` crate into `criterion` crate. `criterion-stats` will no longer receive
updates.
-- Replaced or removed various other dependencies to reduce the size of Criterion.rs' dependency
+- Replaced or removed various other dependencies to reduce the size of Criterion.rs' dependency
tree.
## [0.2.7] - 2018-12-29
### Fixed
+
- Fixed version numbers to prevent incompatibilities between `criterion` and `criterion-stats`
crates.
## [0.2.6] - 2018-12-27 - Yanked
+
### Added
+
- Added `--list` command line option, which lists the benchmarks but does not run them, to match
`cargo test -- --list`.
- Added README/CONTRIBUTING/LICENSE files to sub-crates.
-- Displays change in throughput in the command-line and HTML output as well as change in iteration
+- Displays change in throughput in the command-line and HTML output as well as change in iteration
time.
- Benchmarks with multiple functions and multiple values will now generate a per-value summary
report file in addition to the existing per-function one.
@@ -202,8 +278,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
(now-deprecated) `--measure-only` argument.
### Fixed
-- Functions passed to `Bencher::iter_with_large_setup` can now return output. This is necessary to
- prevent the compiler from optimizing away the benchmark. This is technically a breaking change -
+
+- Functions passed to `Bencher::iter_with_large_setup` can now return output. This is necessary to
+ prevent the compiler from optimizing away the benchmark. This is technically a breaking change -
that function requires a new type parameter. It's so unlikely to break existing code that I
decided not to delay this for a breaking-change release.
- Reduced measurement overhead for the `iter_with_large_setup` and `iter_with_drop` methods.
@@ -213,16 +290,17 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Criterion.rs macros no longer require user to `use criterion::Criterion;`
- Criterion.rs no longer initializes a logger, meaning that it will no longer conflict with user
code which does.
-- Criterion.rs no longer fails to parse gnuplot version numbers like
+- Criterion.rs no longer fails to parse gnuplot version numbers like
`gnuplot 5.2 patchlevel 5a (Gentoo revision r0)`
-- Criterion.rs no longer prints an error message that gnuplot couldn't be found when chart
- generation is disabled (either by `Criterion::without_plots`, `--noplot` or disabling the
+- Criterion.rs no longer prints an error message that gnuplot couldn't be found when chart
+ generation is disabled (either by `Criterion::without_plots`, `--noplot` or disabling the
HTML reports feature)
- Benchmark names are now automatically truncated to 100 characters and a number may be added to
make them unique. This fixes a problem where gnuplot would crash if the title was extremely long,
and also improves the general usability of Criterion.rs.
### Changed
+
- Changed timing model of `iter_with_large_setup` to exclude time spent dropping values returned
by the routine. Time measurements taken with 0.2.6 using these methods may differ from those taken
with 0.2.5.
@@ -230,6 +308,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
tree in the benchmark index. This is to accommodate the new per-value summary reports.
### Deprecated
+
- Deprecated the `--measure-only` command-line-argument in favor of `--profile-time`. This will be
removed in 0.3.0.
- External-program benchmarks are now deprecated. They will be removed in 0.3.0.
@@ -239,11 +318,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
may be breaking changes that are not listed here.
## [0.2.5] - 2018-08-27
+
### Fixed
+
- Fixed links from generated report files to documentation.
- Fixed formatting for very large percentage changes (>1000%)
- Sorted the benchmarks in the index report by name
-- Fixed case where benchmark ID with special characters would cause Criterion.rs to open the wrong
+- Fixed case where benchmark ID with special characters would cause Criterion.rs to open the wrong
file and log an error message.
- Fixed case where running `cargo clean; cargo bench -- <filter>` would cause Criterion.rs to log
an error message.
@@ -254,11 +335,14 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Criterion.rs now honors the `CARGO_TARGET_DIR` environment variable.
### Added
+
- Criterion.rs will generate a chart showing the effects of changes in input (or input size) for all
benchmarks with numeric inputs or throughput, not just for those which compare multiple functions.
## [0.2.4] 2018-07-08
+
### Added
+
- Added a pair of flags, `--save-baseline` and `--baseline`, which change
how benchmark results are stored and compared. This is useful for
working against a fixed baseline(eg. comparing progress on an
@@ -279,7 +363,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
private implementation details.
### Fixed
-- The `sample_size` method on the `Criterion`, `Benchmark` and
+
+- The `sample_size` method on the `Criterion`, `Benchmark` and
`ParameterizedBenchmark` structs has been changed to panic if the sample size
is less than 2. Other parts of the code require this and will panic if the
sample size is 1, so this is not considered to be a breaking change.
@@ -288,13 +373,16 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
directory paths, to avoid generating invalid or unexpected paths.
## [0.2.3] - 2018-04-14
+
### Fixed
+
- Criterion.rs will now panic with a clear error message if the user attempts to run
a benchmark which doesn't call the `Bencher::iter` function or a related function,
rather than failing in an uncontrolled manner later.
- Fixed broken links in some more summary reports.
### Added
+
- Added a `--measure-only` argument which causes the benchmark executable to run the
warmup and measurement and then move on to the next benchmark without analyzing or
saving data. This is useful to prevent Criterion.rs' analysis code from appearing
@@ -303,12 +391,16 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
the other reports for easy navigation.
## [0.2.2] - 2018-03-25
+
### Fixed
+
- Fixed broken links in some summary reports.
- Work around apparent rustc bug in >= 1.24.0.
## [0.2.1] - 2018-02-24
+
### Added
+
- HTML reports are now a default Cargo feature. If you wish to disable HTML reports,
disable Criterion.rs' default features. Doing so will allow compatibility with
older Rust versions such as 1.20. If you wish to continue using HTML reports, you
@@ -317,14 +409,18 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
inputs.
### Changed
+
- The plots and HTML reports are now generated in a `report` folder.
### Fixed
+
- Underscores in benchmark names will no longer cause subscripted characters to
appear in generated plots.
## [0.2.0] - 2018-02-05
+
### Added
+
- Added `Criterion.bench` function, which accepts either a `Benchmark` or
`ParameterizedBenchmark`. These new structures allow for custom per-benchmark
configuration as well as more complex benchmark grouping (eg. comparing a Rust
@@ -337,6 +433,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Added `--noplot` command line option to disable plot generation.
### Changed
+
- The builder methods on the Criterion struct now take and return self by value
for easier chaining. Functions which configure a Criterion structure will need
to be updated accordingly, or will need to be changed to work with the
@@ -350,16 +447,20 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- The generated plots are stored in `target/criterion` rather than `.criterion`.
### Removed
+
- The hidden `criterion::ConfidenceInterval` and`criterion::Estimate` types are
no longer publicly accessible.
- The `Criterion.summarize` function has been removed.
### Fixed
+
- Fixed the relative mean and median reports.
- Fixed panic while summarizing benchmarks.
## [0.1.2] - 2018-01-12
+
### Changed
+
- Criterion.rs is now stable-compatible!
- Criterion.rs now includes its own stable-compatible `black_box` function.
Some benchmarks may now be affected by dead-code-elimination where they
@@ -370,34 +471,40 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
and reduce noise.
### Added
+
- Running benchmarks with the variable "CRITERION_DEBUG" in the environment will
cause Criterion.rs to generate extra debug output and save the gnuplot scripts
alongside the generated plots.
### Fixed
+
- Don't panic on IO errors or gnuplot failures
- Fix generation of invalid gnuplot scripts when benchmarking over inputs and inputs include values <= 0.
- Bug where benchmarks would run one sample fewer than was configured.
### Removed
+
- Generated plots will no longer use log-scale.
## [0.1.1] - 2017-12-12
+
### Added
+
- A changelog file.
- Added a chapter to the book on how Criterion.rs collects and analyzes data.
- Added macro rules to generate a test harness for use with `cargo bench`.
Benchmarks defined without these macros should continue to work.
- New contribution guidelines
- Criterion.rs can selectively run benchmarks. See the Command-line page for
-more details
+ more details
## 0.1.0 - 2017-12-02
+
### Added
-- Initial release on Crates.io.
+- Initial release on Crates.io.
-[Unreleased]: https://github.com/bheisler/criterion.rs/compare/0.3.4...HEAD
+[Unreleased]: https://github.com/bheisler/criterion.rs/compare/0.4.0...HEAD
[0.1.1]: https://github.com/bheisler/criterion.rs/compare/0.1.0...0.1.1
[0.1.2]: https://github.com/bheisler/criterion.rs/compare/0.1.1...0.1.2
[0.2.0]: https://github.com/bheisler/criterion.rs/compare/0.1.2...0.2.0
@@ -416,4 +523,7 @@ more details
[0.3.1]: https://github.com/bheisler/criterion.rs/compare/0.3.0...0.3.1
[0.3.2]: https://github.com/bheisler/criterion.rs/compare/0.3.1...0.3.2
[0.3.3]: https://github.com/bheisler/criterion.rs/compare/0.3.2...0.3.3
-[0.3.4]: https://github.com/bheisler/criterion.rs/compare/0.3.3...0.3.4 \ No newline at end of file
+[0.3.4]: https://github.com/bheisler/criterion.rs/compare/0.3.3...0.3.4
+[0.3.5]: https://github.com/bheisler/criterion.rs/compare/0.3.4...0.3.5
+[0.3.6]: https://github.com/bheisler/criterion.rs/compare/0.3.5...0.3.6
+[0.4.0]: https://github.com/bheisler/criterion.rs/compare/0.3.6...0.4.0 \ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index be5026b..59ae026 100755
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -55,6 +55,15 @@ Some things that will increase the chance that your pull request is accepted:
* Clearly document public methods
* Write a good commit message
+## Branches
+
+* PRs with breaking changes are made against the unreleased branch. e.g. branch version-0.4
+* PRs without breaking changes are made against the master branch.
+
+If you're not sure which branch to use just start with master, as this can be changed during review.
+
+When it is time to release the unreleased branch, a PR is made from the unreleased branch to master. e.g. https://github.com/bheisler/criterion.rs/pull/496
+
## Github Labels
Criterion.<span></span>rs uses a simple set of labels to track issues. Most important are the
diff --git a/Cargo.toml b/Cargo.toml
index 26e1e10..b918ca5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -3,28 +3,39 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "criterion"
-version = "0.3.5"
-authors = ["Jorge Aparicio <japaricious@gmail.com>", "Brook Heisler <brookheisler@gmail.com>"]
+version = "0.4.0"
+authors = [
+ "Jorge Aparicio <japaricious@gmail.com>",
+ "Brook Heisler <brookheisler@gmail.com>",
+]
exclude = ["book/*"]
description = "Statistics-driven micro-benchmarking library"
homepage = "https://bheisler.github.io/criterion.rs/book/index.html"
readme = "README.md"
-keywords = ["criterion", "benchmark"]
+keywords = [
+ "criterion",
+ "benchmark",
+]
categories = ["development-tools::profiling"]
license = "Apache-2.0/MIT"
repository = "https://github.com/bheisler/criterion.rs"
+
[package.metadata.docs.rs]
-features = ["async_futures", "async_smol", "async_std", "async_tokio"]
+features = [
+ "async_futures",
+ "async_smol",
+ "async_std",
+ "async_tokio",
+]
[lib]
bench = false
@@ -32,25 +43,34 @@ bench = false
[[bench]]
name = "bench_main"
harness = false
+
+[dependencies.anes]
+version = "0.1.4"
+
[dependencies.async-std]
version = "1.9"
optional = true
[dependencies.atty]
-version = "0.2"
+version = "0.2.6"
[dependencies.cast]
-version = "0.2"
+version = "0.3"
+
+[dependencies.ciborium]
+version = "0.2.0"
[dependencies.clap]
-version = "2.33"
+version = "3.1"
+features = ["std"]
default-features = false
[dependencies.criterion-plot]
-version = "0.4.4"
+version = "0.5.0"
[dependencies.csv]
version = "1.1"
+optional = true
[dependencies.futures]
version = "0.3"
@@ -65,6 +85,7 @@ version = "1.4"
[dependencies.num-traits]
version = "0.2"
+features = ["std"]
default-features = false
[dependencies.oorandom]
@@ -72,23 +93,26 @@ version = "11.1"
[dependencies.plotters]
version = "^0.3.1"
-features = ["svg_backend", "area_series", "line_series"]
+features = [
+ "svg_backend",
+ "area_series",
+ "line_series",
+]
+optional = true
default-features = false
[dependencies.rayon]
version = "1.3"
+optional = true
[dependencies.regex]
-version = "1.3"
+version = "1.5"
features = ["std"]
default-features = false
[dependencies.serde]
version = "1.0"
-[dependencies.serde_cbor]
-version = "0.11"
-
[dependencies.serde_derive]
version = "1.0"
@@ -111,6 +135,7 @@ default-features = false
[dependencies.walkdir]
version = "2.3"
+
[dev-dependencies.approx]
version = "0.5.0"
@@ -131,15 +156,39 @@ version = "3.2.0"
[features]
async = ["futures"]
-async_futures = ["futures/executor", "async"]
-async_smol = ["smol", "async"]
-async_std = ["async-std", "async"]
-async_tokio = ["tokio", "async"]
+async_futures = [
+ "futures/executor",
+ "async",
+]
+async_smol = [
+ "smol",
+ "async",
+]
+async_std = [
+ "async-std",
+ "async",
+]
+async_tokio = [
+ "tokio",
+ "async",
+]
cargo_bench_support = []
-csv_output = []
-default = ["cargo_bench_support"]
+csv_output = ["csv"]
+default = [
+ "rayon",
+ "plotters",
+ "cargo_bench_support",
+]
html_reports = []
real_blackbox = []
-stable = ["async_futures", "async_smol", "async_tokio", "async_std"]
+stable = [
+ "csv_output",
+ "html_reports",
+ "async_futures",
+ "async_smol",
+ "async_tokio",
+ "async_std",
+]
+
[badges.maintenance]
status = "passively-maintained"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index c0e33f4..c0adc3f 100755
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -3,8 +3,8 @@ authors = [
"Jorge Aparicio <japaricious@gmail.com>",
"Brook Heisler <brookheisler@gmail.com>",
]
-name = "criterion"
-version = "0.3.5"
+name = "criterion"
+version = "0.4.0"
edition = "2018"
description = "Statistics-driven micro-benchmarking library"
@@ -17,30 +17,36 @@ license = "Apache-2.0/MIT"
exclude = ["book/*"]
[dependencies]
+anes = "0.1.4"
lazy_static = "1.4"
-criterion-plot = { path = "plot", version = "0.4.4" }
+criterion-plot = { path = "plot", version = "0.5.0" }
itertools = "0.10"
serde = "1.0"
serde_json = "1.0"
serde_derive = "1.0"
-serde_cbor = "0.11"
-atty = "0.2"
-clap = { version = "2.33", default-features = false }
-csv = "1.1"
+ciborium = "0.2.0"
+atty = "0.2.6"
+clap = { version = "3.1", default-features = false, features = ["std"] }
walkdir = "2.3"
tinytemplate = "1.1"
-cast = "0.2"
-num-traits = { version = "0.2", default-features = false }
+cast = "0.3"
+num-traits = { version = "0.2", default-features = false, features = ["std"] }
oorandom = "11.1"
-rayon = "1.3"
-regex = { version = "1.3", default-features = false, features = ["std"] }
-futures = { version = "0.3", default_features = false, optional = true }
-smol = { version = "1.2", default-features = false, optional = true }
-tokio = { version = "1.0", default-features = false, features = ["rt"], optional = true }
-async-std = { version = "1.9", optional = true }
+regex = { version = "1.5", default-features = false, features = ["std"] }
+
+# Optional dependencies
+rayon = { version = "1.3", optional = true }
+csv = { version = "1.1", optional = true }
+futures = { version = "0.3", default_features = false, optional = true }
+smol = { version = "1.2", default-features = false, optional = true }
+tokio = { version = "1.0", default-features = false, features = [
+ "rt",
+], optional = true }
+async-std = { version = "1.9", optional = true }
[dependencies.plotters]
version = "^0.3.1"
+optional = true
default-features = false
features = ["svg_backend", "area_series", "line_series"]
@@ -55,8 +61,15 @@ futures = { version = "0.3", default_features = false, features = ["executor"
maintenance = { status = "passively-maintained" }
[features]
-stable = ["async_futures", "async_smol", "async_tokio", "async_std"]
-default = ["cargo_bench_support"]
+stable = [
+ "csv_output",
+ "html_reports",
+ "async_futures",
+ "async_smol",
+ "async_tokio",
+ "async_std",
+]
+default = ["rayon", "plotters", "cargo_bench_support"]
# Enable use of the nightly-only test::black_box function to discourage compiler optimizations.
real_blackbox = []
@@ -64,7 +77,7 @@ real_blackbox = []
# Enable async/await support
async = ["futures"]
-# These features enable built-in support for running async benchmarks on each different async
+# These features enable built-in support for running async benchmarks on each different async
# runtime.
async_futures = ["futures/executor", "async"]
async_smol = ["smol", "async"]
@@ -82,7 +95,7 @@ cargo_bench_support = []
# This feature _currently_ does nothing, but in 0.4.0 it will be
# required in order to have Criterion.rs generate CSV files. This feature is deprecated in favor of
# cargo-criterion's --message-format=json option.
-csv_output = []
+csv_output = ["csv"]
[workspace]
exclude = ["cargo-criterion"]
diff --git a/METADATA b/METADATA
index 7700018..f28230b 100644
--- a/METADATA
+++ b/METADATA
@@ -1,3 +1,7 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/criterion
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
name: "criterion"
description: "Statistics-driven micro-benchmarking library"
third_party {
@@ -7,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/criterion/criterion-0.3.5.crate"
+ value: "https://static.crates.io/crates/criterion/criterion-0.4.0.crate"
}
- version: "0.3.5"
+ version: "0.4.0"
license_type: NOTICE
last_upgrade_date {
- year: 2021
- month: 8
- day: 9
+ year: 2023
+ month: 2
+ day: 15
}
}
diff --git a/README.md b/README.md
index 0d12aa9..a4dfc09 100755
--- a/README.md
+++ b/README.md
@@ -117,9 +117,9 @@ For more details, see the [CONTRIBUTING.md file](https://github.com/bheisler/cri
### Compatibility Policy
Criterion.<span></span>rs supports the last three stable minor releases of Rust. At time of
-writing, this means Rust 1.50 or later. Older versions may work, but are not guaranteed.
+writing, this means Rust 1.59 or later. Older versions may work, but are not guaranteed.
-Currently, the oldest version of Rust believed to work is 1.46. Future versions of Criterion.<span></span>rs may
+Currently, the oldest version of Rust believed to work is 1.57. Future versions of Criterion.<span></span>rs may
break support for such old versions, and this will not be considered a breaking change. If you
require Criterion.<span></span>rs to work on old versions of Rust, you will need to stick to a
specific patch version of Criterion.<span></span>rs.
diff --git a/benches/benchmarks/compare_functions.rs b/benches/benchmarks/compare_functions.rs
index ce44180..d9af837 100755
--- a/benches/benchmarks/compare_functions.rs
+++ b/benches/benchmarks/compare_functions.rs
@@ -1,6 +1,4 @@
-#![allow(deprecated)]
-
-use criterion::{criterion_group, BenchmarkId, Criterion, Fun, ParameterizedBenchmark};
+use criterion::{criterion_group, BenchmarkId, Criterion};
fn fibonacci_slow(n: u64) -> u64 {
match n {
@@ -27,23 +25,10 @@ fn fibonacci_fast(n: u64) -> u64 {
}
fn compare_fibonaccis(c: &mut Criterion) {
- let fib_slow = Fun::new("Recursive", |b, i| b.iter(|| fibonacci_slow(*i)));
- let fib_fast = Fun::new("Iterative", |b, i| b.iter(|| fibonacci_fast(*i)));
-
- let functions = vec![fib_slow, fib_fast];
+ let mut group = c.benchmark_group("Fibonacci");
- c.bench_functions("Fibonacci", functions, 20);
-}
-fn compare_fibonaccis_builder(c: &mut Criterion) {
- c.bench(
- "Fibonacci2",
- ParameterizedBenchmark::new(
- "Recursive",
- |b, i| b.iter(|| fibonacci_slow(*i)),
- vec![20u64, 21u64],
- )
- .with_function("Iterative", |b, i| b.iter(|| fibonacci_fast(*i))),
- );
+ group.bench_with_input("Recursive", &20, |b, i| b.iter(|| fibonacci_slow(*i)));
+ group.bench_with_input("Iterative", &20, |b, i| b.iter(|| fibonacci_fast(*i)));
}
fn compare_fibonaccis_group(c: &mut Criterion) {
let mut group = c.benchmark_group("Fibonacci3");
@@ -58,28 +43,4 @@ fn compare_fibonaccis_group(c: &mut Criterion) {
group.finish()
}
-fn compare_looped(c: &mut Criterion) {
- use criterion::black_box;
-
- c.bench(
- "small",
- ParameterizedBenchmark::new("unlooped", |b, i| b.iter(|| i + 10), vec![10]).with_function(
- "looped",
- |b, i| {
- b.iter(|| {
- for _ in 0..10_000 {
- black_box(i + 10);
- }
- })
- },
- ),
- );
-}
-
-criterion_group!(
- fibonaccis,
- compare_fibonaccis,
- compare_fibonaccis_builder,
- compare_fibonaccis_group,
- compare_looped
-);
+criterion_group!(fibonaccis, compare_fibonaccis, compare_fibonaccis_group,);
diff --git a/benches/benchmarks/custom_measurement.rs b/benches/benchmarks/custom_measurement.rs
index c685f38..449f903 100755
--- a/benches/benchmarks/custom_measurement.rs
+++ b/benches/benchmarks/custom_measurement.rs
@@ -14,7 +14,7 @@ impl ValueFormatter for HalfSecFormatter {
fn format_throughput(&self, throughput: &Throughput, value: f64) -> String {
match *throughput {
- Throughput::Bytes(bytes) => {
+ Throughput::Bytes(bytes) | Throughput::BytesDecimal(bytes) => {
format!("{} b/s/2", (bytes as f64) / (value * 2f64 * 10f64.powi(-9)))
}
Throughput::Elements(elems) => format!(
@@ -39,7 +39,7 @@ impl ValueFormatter for HalfSecFormatter {
values: &mut [f64],
) -> &'static str {
match *throughput {
- Throughput::Bytes(bytes) => {
+ Throughput::Bytes(bytes) | Throughput::BytesDecimal(bytes) => {
for val in values {
*val = (bytes as f64) / (*val * 2f64 * 10f64.powi(-9))
}
diff --git a/benches/benchmarks/external_process.rs b/benches/benchmarks/external_process.rs
index c823df5..7667a53 100755
--- a/benches/benchmarks/external_process.rs
+++ b/benches/benchmarks/external_process.rs
@@ -14,7 +14,6 @@ fn create_command() -> Command {
command
}
-#[allow(deprecated)]
fn python_fibonacci(c: &mut Criterion) {
let has_python3 = Command::new("python3")
.arg("--version")
diff --git a/benches/benchmarks/iter_with_large_drop.rs b/benches/benchmarks/iter_with_large_drop.rs
index ee9a8e9..ee01de0 100755
--- a/benches/benchmarks/iter_with_large_drop.rs
+++ b/benches/benchmarks/iter_with_large_drop.rs
@@ -1,28 +1,22 @@
-#![allow(deprecated)]
-
-use criterion::{criterion_group, Benchmark, Criterion, Throughput};
+use criterion::{criterion_group, Criterion, Throughput};
use std::time::Duration;
const SIZE: usize = 1024 * 1024;
fn large_drop(c: &mut Criterion) {
- c.bench(
- "iter_with_large_drop",
- Benchmark::new("large_drop", |b| {
- let v: Vec<_> = (0..SIZE).map(|i| i as u8).collect();
- b.iter_with_large_drop(|| v.clone());
- })
- .throughput(Throughput::Bytes(SIZE as u64)),
- );
+ let mut group = c.benchmark_group("iter_with_large_drop");
+ group.throughput(Throughput::Bytes(SIZE as u64));
+ group.bench_function("large_drop", |b| {
+ let v: Vec<_> = (0..SIZE).map(|i| i as u8).collect();
+ b.iter_with_large_drop(|| v.clone());
+ });
}
fn small_drop(c: &mut Criterion) {
- c.bench(
- "iter_with_large_drop",
- Benchmark::new("small_drop", |b| {
- b.iter_with_large_drop(|| SIZE);
- }),
- );
+ let mut group = c.benchmark_group("iter_with_large_drop");
+ group.bench_function("small_drop", |b| {
+ b.iter_with_large_drop(|| SIZE);
+ });
}
fn short_warmup() -> Criterion {
diff --git a/benches/benchmarks/iter_with_large_setup.rs b/benches/benchmarks/iter_with_large_setup.rs
index 217d271..9ff2b9d 100755
--- a/benches/benchmarks/iter_with_large_setup.rs
+++ b/benches/benchmarks/iter_with_large_setup.rs
@@ -1,32 +1,25 @@
-#![allow(deprecated)]
-
-use criterion::{criterion_group, Benchmark, Criterion, Throughput};
+use criterion::{criterion_group, BatchSize, Criterion, Throughput};
use std::time::Duration;
const SIZE: usize = 1024 * 1024;
fn large_setup(c: &mut Criterion) {
- c.bench(
- "iter_with_large_setup",
- Benchmark::new("large_setup", |b| {
- // NOTE: iter_with_large_setup is deprecated. Use iter_batched instead.
- b.iter_with_large_setup(
- || (0..SIZE).map(|i| i as u8).collect::<Vec<_>>(),
- |v| v.clone(),
- )
- })
- .throughput(Throughput::Bytes(SIZE as u64)),
- );
+ let mut group = c.benchmark_group("iter_with_large_setup");
+ group.throughput(Throughput::Bytes(SIZE as u64));
+ group.bench_function("large_setup", |b| {
+ b.iter_batched(
+ || (0..SIZE).map(|i| i as u8).collect::<Vec<_>>(),
+ |v| v,
+ BatchSize::NumBatches(1),
+ )
+ });
}
fn small_setup(c: &mut Criterion) {
- c.bench(
- "iter_with_large_setup",
- Benchmark::new("small_setup", |b| {
- // NOTE: iter_with_large_setup is deprecated. Use iter_batched instead.
- b.iter_with_large_setup(|| SIZE, |size| size)
- }),
- );
+ let mut group = c.benchmark_group("iter_with_large_setup");
+ group.bench_function("small_setup", |b| {
+ b.iter_batched(|| SIZE, |size| size, BatchSize::NumBatches(1))
+ });
}
fn short_warmup() -> Criterion {
diff --git a/benches/benchmarks/iter_with_setup.rs b/benches/benchmarks/iter_with_setup.rs
index 0f87063..e65495c 100755
--- a/benches/benchmarks/iter_with_setup.rs
+++ b/benches/benchmarks/iter_with_setup.rs
@@ -4,10 +4,7 @@ const SIZE: usize = 1024 * 1024;
fn setup(c: &mut Criterion) {
c.bench_function("iter_with_setup", |b| {
- b.iter_with_setup(
- || (0..SIZE).map(|i| i as u8).collect::<Vec<_>>(),
- |v| v.clone(),
- )
+ b.iter_with_setup(|| (0..SIZE).map(|i| i as u8).collect::<Vec<_>>(), |v| v)
});
}
diff --git a/benches/benchmarks/measurement_overhead.rs b/benches/benchmarks/measurement_overhead.rs
index 15b243d..c424efb 100755
--- a/benches/benchmarks/measurement_overhead.rs
+++ b/benches/benchmarks/measurement_overhead.rs
@@ -5,7 +5,7 @@ fn some_benchmark(c: &mut Criterion) {
group.bench_function("iter", |b| b.iter(|| 1));
group.bench_function("iter_with_setup", |b| b.iter_with_setup(|| (), |_| 1));
group.bench_function("iter_with_large_setup", |b| {
- b.iter_with_large_setup(|| (), |_| 1)
+ b.iter_batched(|| (), |_| 1, BatchSize::NumBatches(1))
});
group.bench_function("iter_with_large_drop", |b| b.iter_with_large_drop(|| 1));
group.bench_function("iter_batched_small_input", |b| {
diff --git a/benches/benchmarks/with_inputs.rs b/benches/benchmarks/with_inputs.rs
index 8eaaf00..b0b12a8 100755
--- a/benches/benchmarks/with_inputs.rs
+++ b/benches/benchmarks/with_inputs.rs
@@ -13,6 +13,15 @@ fn from_elem(c: &mut Criterion) {
});
}
group.finish();
+
+ let mut group = c.benchmark_group("from_elem_decimal");
+ for size in [KB, 2 * KB].iter() {
+ group.throughput(Throughput::BytesDecimal(*size as u64));
+ group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
+ b.iter(|| iter::repeat(0u8).take(size).collect::<Vec<_>>());
+ });
+ }
+ group.finish();
}
criterion_group!(benches, from_elem);
diff --git a/cargo2android.json b/cargo2android.json
index bf78496..b735e27 100644
--- a/cargo2android.json
+++ b/cargo2android.json
@@ -1,4 +1,5 @@
{
+ "patch": "patches/Android.bp.diff",
"device": true,
"run": true
-} \ No newline at end of file
+}
diff --git a/patches/Android.bp.diff b/patches/Android.bp.diff
new file mode 100644
index 0000000..c93cf1e
--- /dev/null
+++ b/patches/Android.bp.diff
@@ -0,0 +1,13 @@
+diff --git a/Android.bp b/Android.bp
+index 4356b43..699cf37 100644
+--- a/Android.bp
++++ b/Android.bp
+@@ -56,7 +56,7 @@ rust_library {
+ "libatty",
+ "libcast",
+ "libciborium",
+- "libclap",
++ "libclap_3.2.23",
+ "libcriterion_plot",
+ "libitertools",
+ "liblazy_static",
diff --git a/src/analysis/mod.rs b/src/analysis/mod.rs
index 5d84bef..23647c1 100755
--- a/src/analysis/mod.rs
+++ b/src/analysis/mod.rs
@@ -26,7 +26,7 @@ macro_rules! elapsed {
info!(
"{} took {}",
$msg,
- crate::format::time(crate::DurationExt::to_nanos(elapsed) as f64)
+ crate::format::time(elapsed.as_nanos() as f64)
);
out
@@ -47,7 +47,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
) {
criterion.report.benchmark_start(id, report_context);
- if let Baseline::Compare = criterion.baseline {
+ if let Baseline::CompareStrict = criterion.baseline {
if !base_dir_exists(
id,
&criterion.baseline_directory,
@@ -128,7 +128,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
.collect::<Vec<f64>>();
let avg_times = Sample::new(&avg_times);
- if criterion.connection.is_none() && criterion.load_baseline.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut new_dir = criterion.output_directory.clone();
new_dir.push(id.as_directory_name());
@@ -139,7 +139,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
let data = Data::new(&iters, &times);
let labeled_sample = tukey::classify(avg_times);
- if criterion.connection.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut tukey_file = criterion.output_directory.to_owned();
tukey_file.push(id.as_directory_name());
@@ -156,7 +156,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
distributions.slope = Some(distribution);
}
- if criterion.connection.is_none() && criterion.load_baseline.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut sample_file = criterion.output_directory.clone();
sample_file.push(id.as_directory_name());
@@ -237,7 +237,7 @@ pub(crate) fn common<M: Measurement, T: ?Sized>(
criterion.measurement.formatter(),
);
- if criterion.connection.is_none() && criterion.load_baseline.is_none() {
+ if criterion.should_save_baseline() {
log_if_err!({
let mut benchmark_file = criterion.output_directory.clone();
benchmark_file.push(id.as_directory_name());
@@ -365,5 +365,6 @@ fn copy_new_dir_to_base(id: &str, baseline: &str, output_directory: &Path) {
&new_dir.join("benchmark.json"),
&base_dir.join("benchmark.json")
));
+ #[cfg(feature = "csv_output")]
try_else_return!(fs::cp(&new_dir.join("raw.csv"), &base_dir.join("raw.csv")));
}
diff --git a/src/bencher.rs b/src/bencher.rs
index fa9ad3d..016aa28 100755
--- a/src/bencher.rs
+++ b/src/bencher.rs
@@ -189,15 +189,6 @@ impl<'a, M: Measurement> Bencher<'a, M> {
self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput);
}
- #[doc(hidden)]
- pub fn iter_with_large_setup<I, O, S, R>(&mut self, setup: S, routine: R)
- where
- S: FnMut() -> I,
- R: FnMut(I) -> O,
- {
- self.iter_batched(setup, routine, BatchSize::NumBatches(1));
- }
-
/// Times a `routine` that requires some input by generating a batch of input, then timing the
/// iteration of the benchmark over the input. See [`BatchSize`](enum.BatchSize.html) for
/// details on choosing the batch size. Use this when the routine must consume its input.
@@ -380,9 +371,10 @@ impl<'a, M: Measurement> Bencher<'a, M> {
// Benchmarks must actually call one of the iter methods. This causes benchmarks to fail loudly
// if they don't.
pub(crate) fn assert_iterated(&mut self) {
- if !self.iterated {
- panic!("Benchmark function must call Bencher::iter or related method.");
- }
+ assert!(
+ self.iterated,
+ "Benchmark function must call Bencher::iter or related method."
+ );
self.iterated = false;
}
diff --git a/src/benchmark.rs b/src/benchmark.rs
index bc7f610..3a1cb00 100755
--- a/src/benchmark.rs
+++ b/src/benchmark.rs
@@ -1,14 +1,4 @@
-#![allow(deprecated)]
-
-use crate::analysis;
-use crate::connection::OutgoingMessage;
-use crate::measurement::{Measurement, WallTime};
-use crate::report::{BenchmarkId, Report, ReportContext};
-use crate::routine::{Function, Routine};
-use crate::{Bencher, Criterion, DurationExt, Mode, PlotConfiguration, SamplingMode, Throughput};
-use std::cell::RefCell;
-use std::fmt::Debug;
-use std::marker::Sized;
+use crate::{PlotConfiguration, SamplingMode};
use std::time::Duration;
// TODO: Move the benchmark config stuff to a separate module for easier use.
@@ -23,10 +13,11 @@ pub struct BenchmarkConfig {
pub significance_level: f64,
pub warm_up_time: Duration,
pub sampling_mode: SamplingMode,
+ pub quick_mode: bool,
}
/// Struct representing a partially-complete per-benchmark configuration.
-#[derive(Clone)]
+#[derive(Clone, Default)]
pub(crate) struct PartialBenchmarkConfig {
pub(crate) confidence_level: Option<f64>,
pub(crate) measurement_time: Option<Duration>,
@@ -36,25 +27,10 @@ pub(crate) struct PartialBenchmarkConfig {
pub(crate) significance_level: Option<f64>,
pub(crate) warm_up_time: Option<Duration>,
pub(crate) sampling_mode: Option<SamplingMode>,
+ pub(crate) quick_mode: Option<bool>,
pub(crate) plot_config: PlotConfiguration,
}
-impl Default for PartialBenchmarkConfig {
- fn default() -> Self {
- PartialBenchmarkConfig {
- confidence_level: None,
- measurement_time: None,
- noise_threshold: None,
- nresamples: None,
- sample_size: None,
- significance_level: None,
- warm_up_time: None,
- plot_config: PlotConfiguration::default(),
- sampling_mode: None,
- }
- }
-}
-
impl PartialBenchmarkConfig {
pub(crate) fn to_complete(&self, defaults: &BenchmarkConfig) -> BenchmarkConfig {
BenchmarkConfig {
@@ -68,548 +44,7 @@ impl PartialBenchmarkConfig {
.unwrap_or(defaults.significance_level),
warm_up_time: self.warm_up_time.unwrap_or(defaults.warm_up_time),
sampling_mode: self.sampling_mode.unwrap_or(defaults.sampling_mode),
- }
- }
-}
-
-pub(crate) struct NamedRoutine<T, M: Measurement = WallTime> {
- pub id: String,
- pub(crate) f: Box<RefCell<dyn Routine<M, T>>>,
-}
-
-/// Structure representing a benchmark (or group of benchmarks)
-/// which take one parameter.
-#[doc(hidden)]
-#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
-pub struct ParameterizedBenchmark<T: Debug, M: Measurement = WallTime> {
- config: PartialBenchmarkConfig,
- values: Vec<T>,
- routines: Vec<NamedRoutine<T, M>>,
- throughput: Option<Box<dyn Fn(&T) -> Throughput>>,
-}
-
-/// Structure representing a benchmark (or group of benchmarks)
-/// which takes no parameters.
-#[doc(hidden)]
-#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
-pub struct Benchmark<M: Measurement = WallTime> {
- config: PartialBenchmarkConfig,
- routines: Vec<NamedRoutine<(), M>>,
- throughput: Option<Throughput>,
-}
-
-/// Common trait for `Benchmark` and `ParameterizedBenchmark`. Not intended to be
-/// used outside of Criterion.rs.
-#[doc(hidden)]
-pub trait BenchmarkDefinition<M: Measurement = WallTime>: Sized {
- #[doc(hidden)]
- fn run(self, group_id: &str, c: &mut Criterion<M>);
-}
-
-macro_rules! benchmark_config {
- ($type:tt) => {
- /// Changes the size of the sample for this benchmark
- ///
- /// A bigger sample should yield more accurate results if paired with a sufficiently large
- /// measurement time.
- ///
- /// Sample size must be at least 10.
- ///
- /// # Panics
- ///
- /// Panics if n < 10.
- pub fn sample_size(mut self, n: usize) -> Self {
- assert!(n >= 10);
-
- self.config.sample_size = Some(n);
- self
- }
-
- /// Changes the warm up time for this benchmark
- ///
- /// # Panics
- ///
- /// Panics if the input duration is zero
- pub fn warm_up_time(mut self, dur: Duration) -> Self {
- assert!(dur.to_nanos() > 0);
-
- self.config.warm_up_time = Some(dur);
- self
- }
-
- /// Changes the target measurement time for this benchmark. Criterion will attempt
- /// to spend approximately this amount of time measuring the benchmark.
- /// With a longer time, the measurement will become more resilient to transitory peak loads
- /// caused by external programs.
- ///
- /// # Panics
- ///
- /// Panics if the input duration in zero
- pub fn measurement_time(mut self, dur: Duration) -> Self {
- assert!(dur.to_nanos() > 0);
-
- self.config.measurement_time = Some(dur);
- self
- }
-
- /// Changes the number of resamples for this benchmark
- ///
- /// Number of resamples to use for the
- /// [bootstrap](http://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Case_resampling)
- ///
- /// A larger number of resamples reduces the random sampling errors, which are inherent to the
- /// bootstrap method, but also increases the analysis time.
- ///
- /// # Panics
- ///
- /// Panics if the number of resamples is set to zero
- pub fn nresamples(mut self, n: usize) -> Self {
- assert!(n > 0);
- if n <= 1000 {
- println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
- }
-
- self.config.nresamples = Some(n);
- self
- }
-
- /// Changes the default noise threshold for this benchmark. The noise threshold
- /// is used to filter out small changes in performance, even if they are statistically
- /// significant. Sometimes benchmarking the same code twice will result in small but
- /// statistically significant differences solely because of noise. This provides a way to filter
- /// out some of these false positives at the cost of making it harder to detect small changes
- /// to the true performance of the benchmark.
- ///
- /// The default is 0.01, meaning that changes smaller than 1% will be ignored.
- ///
- /// # Panics
- ///
- /// Panics if the threshold is set to a negative value
- pub fn noise_threshold(mut self, threshold: f64) -> Self {
- assert!(threshold >= 0.0);
-
- self.config.noise_threshold = Some(threshold);
- self
- }
-
- /// Changes the default confidence level for this benchmark. The confidence
- /// level is the desired probability that the true runtime lies within the estimated
- /// [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval). The default is
- /// 0.95, meaning that the confidence interval should capture the true value 95% of the time.
- ///
- /// # Panics
- ///
- /// Panics if the confidence level is set to a value outside the `(0, 1)` range
- pub fn confidence_level(mut self, cl: f64) -> Self {
- assert!(cl > 0.0 && cl < 1.0);
- if cl < 0.5 {
- println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
- }
-
- self.config.confidence_level = Some(cl);
- self
- }
-
- /// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance)
- /// for this benchmark. This is used to perform a
- /// [hypothesis test](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) to see if
- /// the measurements from this run are different from the measured performance of the last run.
- /// The significance level is the desired probability that two measurements of identical code
- /// will be considered 'different' due to noise in the measurements. The default value is 0.05,
- /// meaning that approximately 5% of identical benchmarks will register as different due to
- /// noise.
- ///
- /// This presents a trade-off. By setting the significance level closer to 0.0, you can increase
- /// the statistical robustness against noise, but it also weakens Criterion.rs' ability to
- /// detect small but real changes in the performance. By setting the significance level
- /// closer to 1.0, Criterion.rs will be more able to detect small true changes, but will also
- /// report more spurious differences.
- ///
- /// See also the noise threshold setting.
- ///
- /// # Panics
- ///
- /// Panics if the significance level is set to a value outside the `(0, 1)` range
- pub fn significance_level(mut self, sl: f64) -> Self {
- assert!(sl > 0.0 && sl < 1.0);
-
- self.config.significance_level = Some(sl);
- self
- }
-
- /// Changes the plot configuration for this benchmark.
- pub fn plot_config(mut self, new_config: PlotConfiguration) -> Self {
- self.config.plot_config = new_config;
- self
- }
-
- /// Changes the sampling mode for this benchmark.
- pub fn sampling_mode(mut self, new_mode: SamplingMode) -> Self {
- self.config.sampling_mode = Some(new_mode);
- self
- }
- };
-}
-
-impl<M> Benchmark<M>
-where
- M: Measurement + 'static,
-{
- benchmark_config!(Benchmark);
-
- /// Create a new benchmark group and adds the given function to it.
- ///
- /// # Example
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use criterion::*;
- ///
- /// fn bench(c: &mut Criterion) {
- /// // One-time setup goes here
- /// c.bench(
- /// "my_group",
- /// Benchmark::new("my_function", |b| b.iter(|| {
- /// // Code to benchmark goes here
- /// })),
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- pub fn new<S, F>(id: S, f: F) -> Benchmark<M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>) + 'static,
- {
- Benchmark {
- config: PartialBenchmarkConfig::default(),
- routines: vec![],
- throughput: None,
- }
- .with_function(id, f)
- }
-
- /// Add a function to the benchmark group.
- pub fn with_function<S, F>(mut self, id: S, mut f: F) -> Benchmark<M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>) + 'static,
- {
- let routine = NamedRoutine {
- id: id.into(),
- f: Box::new(RefCell::new(Function::new(move |b, _| f(b)))),
- };
- self.routines.push(routine);
- self
- }
-
- /// Set the input size for this benchmark group. Used for reporting the
- /// throughput.
- pub fn throughput(mut self, throughput: Throughput) -> Benchmark<M> {
- self.throughput = Some(throughput);
- self
- }
-}
-
-impl<M: Measurement> BenchmarkDefinition<M> for Benchmark<M> {
- fn run(self, group_id: &str, c: &mut Criterion<M>) {
- let report_context = ReportContext {
- output_directory: c.output_directory.clone(),
- plot_config: self.config.plot_config.clone(),
- };
-
- let config = self.config.to_complete(&c.config);
- let num_routines = self.routines.len();
-
- let mut all_ids = vec![];
- let mut any_matched = false;
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: group_id })
- .unwrap();
- }
-
- for routine in self.routines {
- let function_id = if num_routines == 1 && group_id == routine.id {
- None
- } else {
- Some(routine.id)
- };
-
- let mut id = BenchmarkId::new(
- group_id.to_owned(),
- function_id,
- None,
- self.throughput.clone(),
- );
-
- id.ensure_directory_name_unique(&c.all_directories);
- c.all_directories.insert(id.as_directory_name().to_owned());
- id.ensure_title_unique(&c.all_titles);
- c.all_titles.insert(id.as_title().to_owned());
-
- let do_run = c.filter_matches(id.id());
- any_matched |= do_run;
-
- execute_benchmark(
- do_run,
- &id,
- c,
- &config,
- &mut *routine.f.borrow_mut(),
- &report_context,
- &(),
- self.throughput.clone(),
- );
-
- all_ids.push(id);
- }
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::FinishedBenchmarkGroup { group: group_id })
- .unwrap();
- conn.serve_value_formatter(c.measurement.formatter())
- .unwrap();
- }
-
- if all_ids.len() > 1 && any_matched && c.mode.is_benchmark() {
- c.report
- .summarize(&report_context, &all_ids, c.measurement.formatter());
- }
- if any_matched {
- c.report.group_separator();
- }
- }
-}
-
-impl<T, M> ParameterizedBenchmark<T, M>
-where
- T: Debug + 'static,
- M: Measurement + 'static,
-{
- benchmark_config!(ParameterizedBenchmark);
-
- pub(crate) fn with_functions(
- functions: Vec<NamedRoutine<T, M>>,
- parameters: Vec<T>,
- ) -> ParameterizedBenchmark<T, M> {
- ParameterizedBenchmark {
- config: PartialBenchmarkConfig::default(),
- values: parameters,
- routines: functions,
- throughput: None,
- }
- }
-
- /// Create a new parameterized benchmark group and adds the given function
- /// to it.
- /// The function under test must follow the setup - bench - teardown pattern:
- ///
- /// # Example
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use criterion::*;
- ///
- /// fn bench(c: &mut Criterion) {
- /// let parameters = vec![1u64, 2u64, 3u64];
- ///
- /// // One-time setup goes here
- /// c.bench(
- /// "my_group",
- /// ParameterizedBenchmark::new(
- /// "my_function",
- /// |b, param| b.iter(|| {
- /// // Code to benchmark using param goes here
- /// }),
- /// parameters
- /// )
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- pub fn new<S, F, I>(id: S, f: F, parameters: I) -> ParameterizedBenchmark<T, M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>, &T) + 'static,
- I: IntoIterator<Item = T>,
- {
- ParameterizedBenchmark {
- config: PartialBenchmarkConfig::default(),
- values: parameters.into_iter().collect(),
- routines: vec![],
- throughput: None,
- }
- .with_function(id, f)
- }
-
- /// Add a function to the benchmark group.
- pub fn with_function<S, F>(mut self, id: S, f: F) -> ParameterizedBenchmark<T, M>
- where
- S: Into<String>,
- F: FnMut(&mut Bencher<'_, M>, &T) + 'static,
- {
- let routine = NamedRoutine {
- id: id.into(),
- f: Box::new(RefCell::new(Function::new(f))),
- };
- self.routines.push(routine);
- self
- }
-
- /// Use the given function to calculate the input size for a given input.
- pub fn throughput<F>(mut self, throughput: F) -> ParameterizedBenchmark<T, M>
- where
- F: Fn(&T) -> Throughput + 'static,
- {
- self.throughput = Some(Box::new(throughput));
- self
- }
-}
-impl<T, M> BenchmarkDefinition<M> for ParameterizedBenchmark<T, M>
-where
- T: Debug + 'static,
- M: Measurement + 'static,
-{
- fn run(self, group_id: &str, c: &mut Criterion<M>) {
- let report_context = ReportContext {
- output_directory: c.output_directory.clone(),
- plot_config: self.config.plot_config.clone(),
- };
-
- let config = self.config.to_complete(&c.config);
- let num_parameters = self.values.len();
- let num_routines = self.routines.len();
-
- let mut all_ids = vec![];
- let mut any_matched = false;
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: group_id })
- .unwrap();
- }
-
- for routine in self.routines {
- for value in &self.values {
- let function_id = if num_routines == 1 && group_id == routine.id {
- None
- } else {
- Some(routine.id.clone())
- };
-
- let value_str = if num_parameters == 1 {
- None
- } else {
- Some(format!("{:?}", value))
- };
-
- let throughput = self.throughput.as_ref().map(|func| func(value));
- let mut id = BenchmarkId::new(
- group_id.to_owned(),
- function_id,
- value_str,
- throughput.clone(),
- );
-
- id.ensure_directory_name_unique(&c.all_directories);
- c.all_directories.insert(id.as_directory_name().to_owned());
- id.ensure_title_unique(&c.all_titles);
- c.all_titles.insert(id.as_title().to_owned());
-
- let do_run = c.filter_matches(id.id());
- any_matched |= do_run;
-
- execute_benchmark(
- do_run,
- &id,
- c,
- &config,
- &mut *routine.f.borrow_mut(),
- &report_context,
- value,
- throughput,
- );
-
- all_ids.push(id);
- }
- }
-
- if let Some(conn) = &c.connection {
- conn.send(&OutgoingMessage::FinishedBenchmarkGroup { group: group_id })
- .unwrap();
- conn.serve_value_formatter(c.measurement.formatter())
- .unwrap();
- }
-
- if all_ids.len() > 1 && any_matched && c.mode.is_benchmark() {
- c.report
- .summarize(&report_context, &all_ids, c.measurement.formatter());
- }
- if any_matched {
- c.report.group_separator();
- }
- }
-}
-
-#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
-fn execute_benchmark<T, M>(
- do_run: bool,
- id: &BenchmarkId,
- c: &Criterion<M>,
- config: &BenchmarkConfig,
- routine: &mut dyn Routine<M, T>,
- report_context: &ReportContext,
- parameter: &T,
- throughput: Option<Throughput>,
-) where
- T: Debug,
- M: Measurement,
-{
- match c.mode {
- Mode::Benchmark => {
- if let Some(conn) = &c.connection {
- if do_run {
- conn.send(&OutgoingMessage::BeginningBenchmark { id: id.into() })
- .unwrap();
- } else {
- conn.send(&OutgoingMessage::SkippingBenchmark { id: id.into() })
- .unwrap();
- }
- }
-
- if do_run {
- analysis::common(
- id,
- routine,
- config,
- c,
- report_context,
- parameter,
- throughput,
- );
- }
- }
- Mode::List => {
- if do_run {
- println!("{}: bench", id);
- }
- }
- Mode::Test => {
- if do_run {
- // In test mode, run the benchmark exactly once, then exit.
- c.report.test_start(id, report_context);
- routine.test(&c.measurement, parameter);
- c.report.test_pass(id, report_context);
- }
- }
- Mode::Profile(duration) => {
- if do_run {
- routine.profile(&c.measurement, id, c, report_context, duration, parameter);
- }
+ quick_mode: self.quick_mode.unwrap_or(defaults.quick_mode),
}
}
}
diff --git a/src/benchmark_group.rs b/src/benchmark_group.rs
index 723d01e..5ca0ab3 100755
--- a/src/benchmark_group.rs
+++ b/src/benchmark_group.rs
@@ -6,7 +6,7 @@ use crate::report::BenchmarkId as InternalBenchmarkId;
use crate::report::Report;
use crate::report::ReportContext;
use crate::routine::{Function, Routine};
-use crate::{Bencher, Criterion, DurationExt, Mode, PlotConfiguration, SamplingMode, Throughput};
+use crate::{Bencher, Criterion, Mode, PlotConfiguration, SamplingMode, Throughput};
use std::time::Duration;
/// Structure used to group together a set of related benchmarks, along with custom configuration
@@ -107,7 +107,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
///
/// Panics if the input duration is zero
pub fn warm_up_time(&mut self, dur: Duration) -> &mut Self {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.partial_config.warm_up_time = Some(dur);
self
@@ -125,7 +125,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
///
/// Panics if the input duration is zero
pub fn measurement_time(&mut self, dur: Duration) -> &mut Self {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.partial_config.measurement_time = Some(dur);
self
@@ -145,7 +145,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
pub fn nresamples(&mut self, n: usize) -> &mut Self {
assert!(n > 0);
if n <= 1000 {
- println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
+ eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.partial_config.nresamples = Some(n);
@@ -182,7 +182,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
pub fn confidence_level(&mut self, cl: f64) -> &mut Self {
assert!(cl > 0.0 && cl < 1.0);
if cl < 0.5 {
- println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
+ eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
}
self.partial_config.confidence_level = Some(cl);
@@ -290,7 +290,8 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
assert!(
!self.all_ids.contains(&id),
- "Benchmark IDs must be unique within a group."
+ "Benchmark IDs must be unique within a group. Encountered duplicated benchmark ID {}",
+ &id
);
id.ensure_directory_name_unique(&self.criterion.all_directories);
@@ -304,7 +305,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
self.any_matched |= do_run;
let mut func = Function::new(f);
- match self.criterion.mode {
+ match &self.criterion.mode {
Mode::Benchmark => {
if let Some(conn) = &self.criterion.connection {
if do_run {
@@ -340,7 +341,7 @@ impl<'a, M: Measurement> BenchmarkGroup<'a, M> {
self.criterion.report.test_pass(&id, &report_context);
}
}
- Mode::Profile(duration) => {
+ &Mode::Profile(duration) => {
if do_run {
func.profile(
&self.criterion.measurement,
@@ -486,9 +487,10 @@ impl IntoBenchmarkId for BenchmarkId {
impl<S: Into<String>> IntoBenchmarkId for S {
fn into_benchmark_id(self) -> BenchmarkId {
let function_name = self.into();
- if function_name.is_empty() {
- panic!("Function name must not be empty.");
- }
+ assert!(
+ !function_name.is_empty(),
+ "Function name must not be empty."
+ );
BenchmarkId {
function_name: Some(function_name),
diff --git a/src/connection.rs b/src/connection.rs
index 3dd7b64..84cb8fc 100755
--- a/src/connection.rs
+++ b/src/connection.rs
@@ -8,28 +8,39 @@ use std::net::TcpStream;
#[derive(Debug)]
pub enum MessageError {
- SerializationError(serde_cbor::Error),
- IoError(std::io::Error),
+ Deserialization(ciborium::de::Error<std::io::Error>),
+ Serialization(ciborium::ser::Error<std::io::Error>),
+ Io(std::io::Error),
}
-impl From<serde_cbor::Error> for MessageError {
- fn from(other: serde_cbor::Error) -> Self {
- MessageError::SerializationError(other)
+impl From<ciborium::de::Error<std::io::Error>> for MessageError {
+ fn from(other: ciborium::de::Error<std::io::Error>) -> Self {
+ MessageError::Deserialization(other)
+ }
+}
+impl From<ciborium::ser::Error<std::io::Error>> for MessageError {
+ fn from(other: ciborium::ser::Error<std::io::Error>) -> Self {
+ MessageError::Serialization(other)
}
}
impl From<std::io::Error> for MessageError {
fn from(other: std::io::Error) -> Self {
- MessageError::IoError(other)
+ MessageError::Io(other)
}
}
impl std::fmt::Display for MessageError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
- MessageError::SerializationError(error) => write!(
+ MessageError::Deserialization(error) => write!(
f,
- "Failed to serialize or deserialize message to Criterion.rs benchmark:\n{}",
+ "Failed to deserialize message to Criterion.rs benchmark:\n{}",
error
),
- MessageError::IoError(error) => write!(
+ MessageError::Serialization(error) => write!(
+ f,
+ "Failed to serialize message to Criterion.rs benchmark:\n{}",
+ error
+ ),
+ MessageError::Io(error) => write!(
f,
"Failed to read or write message to Criterion.rs benchmark:\n{}",
error
@@ -40,8 +51,9 @@ impl std::fmt::Display for MessageError {
impl std::error::Error for MessageError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
- MessageError::SerializationError(err) => Some(err),
- MessageError::IoError(err) => Some(err),
+ MessageError::Deserialization(err) => Some(err),
+ MessageError::Serialization(err) => Some(err),
+ MessageError::Io(err) => Some(err),
}
}
}
@@ -64,16 +76,19 @@ struct InnerConnection {
socket: TcpStream,
receive_buffer: Vec<u8>,
send_buffer: Vec<u8>,
- runner_version: [u8; 3],
+ // runner_version: [u8; 3],
}
impl InnerConnection {
pub fn new(mut socket: TcpStream) -> Result<Self, std::io::Error> {
// read the runner-hello
let mut hello_buf = [0u8; RUNNER_HELLO_SIZE];
socket.read_exact(&mut hello_buf)?;
- if &hello_buf[0..RUNNER_MAGIC_NUMBER.len()] != RUNNER_MAGIC_NUMBER.as_bytes() {
- panic!("Not connected to cargo-criterion.");
- }
+ assert_eq!(
+ &hello_buf[0..RUNNER_MAGIC_NUMBER.len()],
+ RUNNER_MAGIC_NUMBER.as_bytes(),
+ "Not connected to cargo-criterion."
+ );
+
let i = RUNNER_MAGIC_NUMBER.len();
let runner_version = [hello_buf[i], hello_buf[i + 1], hello_buf[i + 2]];
@@ -98,7 +113,7 @@ impl InnerConnection {
socket,
receive_buffer: vec![],
send_buffer: vec![],
- runner_version,
+ // runner_version,
})
}
@@ -109,13 +124,13 @@ impl InnerConnection {
let length = u32::from_be_bytes(length_buf);
self.receive_buffer.resize(length as usize, 0u8);
self.socket.read_exact(&mut self.receive_buffer)?;
- let value = serde_cbor::from_slice(&self.receive_buffer)?;
+ let value = ciborium::de::from_reader(&self.receive_buffer[..])?;
Ok(value)
}
pub fn send(&mut self, message: &OutgoingMessage) -> Result<(), MessageError> {
self.send_buffer.truncate(0);
- serde_cbor::to_writer(&mut self.send_buffer, message)?;
+ ciborium::ser::into_writer(message, &mut self.send_buffer)?;
let size = u32::try_from(self.send_buffer.len()).unwrap();
let length_buf = size.to_be_bytes();
self.socket.write_all(&length_buf)?;
@@ -355,7 +370,7 @@ impl From<&crate::benchmark::BenchmarkConfig> for BenchmarkConfig {
}
/// Currently not used; defined for forwards compatibility with cargo-criterion.
-#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum SamplingMethod {
Linear,
Flat,
diff --git a/src/error.rs b/src/error.rs
index 9b7eb17..459a716 100755
--- a/src/error.rs
+++ b/src/error.rs
@@ -1,3 +1,4 @@
+#[cfg(feature = "csv_output")]
use csv::Error as CsvError;
use serde_json::Error as SerdeError;
use std::error::Error as StdError;
@@ -21,6 +22,8 @@ pub enum Error {
path: PathBuf,
inner: SerdeError,
},
+ #[cfg(feature = "csv_output")]
+ /// This API requires the following crate features to be activated: csv_output
CsvError(CsvError),
}
impl fmt::Display for Error {
@@ -37,6 +40,7 @@ impl fmt::Display for Error {
"Failed to read or write file {:?} due to serialization error: {}",
path, inner
),
+ #[cfg(feature = "csv_output")]
Error::CsvError(inner) => write!(f, "CSV error: {}", inner),
}
}
@@ -47,6 +51,7 @@ impl StdError for Error {
Error::AccessError { .. } => "AccessError",
Error::CopyError { .. } => "CopyError",
Error::SerdeError { .. } => "SerdeError",
+ #[cfg(feature = "csv_output")]
Error::CsvError(_) => "CsvError",
}
}
@@ -56,10 +61,13 @@ impl StdError for Error {
Error::AccessError { inner, .. } => Some(inner),
Error::CopyError { inner, .. } => Some(inner),
Error::SerdeError { inner, .. } => Some(inner),
+ #[cfg(feature = "csv_output")]
Error::CsvError(inner) => Some(inner),
}
}
}
+
+#[cfg(feature = "csv_output")]
impl From<CsvError> for Error {
fn from(other: CsvError) -> Error {
Error::CsvError(other)
diff --git a/src/format.rs b/src/format.rs
index 984f0f5..53c4a4d 100755
--- a/src/format.rs
+++ b/src/format.rs
@@ -12,7 +12,7 @@ pub fn time(ns: f64) -> String {
} else if ns < 10f64.powi(3) {
format!("{:>6} ns", short(ns))
} else if ns < 10f64.powi(6) {
- format!("{:>6} us", short(ns / 1e3))
+ format!("{:>6} µs", short(ns / 1e3))
} else if ns < 10f64.powi(9) {
format!("{:>6} ms", short(ns / 1e6))
} else {
diff --git a/src/lib.rs b/src/lib.rs
index 98dcf1e..16e79cc 100755
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -27,13 +27,15 @@
)
)]
+#[cfg(all(feature = "rayon", target_arch = "wasm32"))]
+compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features.");
+
#[cfg(test)]
extern crate approx;
#[cfg(test)]
extern crate quickcheck;
-use clap::value_t;
use regex::Regex;
#[macro_use]
@@ -57,6 +59,7 @@ mod benchmark_group;
pub mod async_executor;
mod bencher;
mod connection;
+#[cfg(feature = "csv_output")]
mod csv_report;
mod error;
mod estimate;
@@ -76,9 +79,6 @@ use std::cell::RefCell;
use std::collections::HashSet;
use std::default::Default;
use std::env;
-use std::fmt;
-use std::iter::IntoIterator;
-use std::marker::PhantomData;
use std::net::TcpStream;
use std::path::{Path, PathBuf};
use std::process::Command;
@@ -88,40 +88,41 @@ use std::time::Duration;
use criterion_plot::{Version, VersionError};
use crate::benchmark::BenchmarkConfig;
-use crate::benchmark::NamedRoutine;
use crate::connection::Connection;
use crate::connection::OutgoingMessage;
-use crate::csv_report::FileCsvReport;
use crate::html::Html;
use crate::measurement::{Measurement, WallTime};
-use crate::plot::{Gnuplot, Plotter, PlottersBackend};
+#[cfg(feature = "plotters")]
+use crate::plot::PlottersBackend;
+use crate::plot::{Gnuplot, Plotter};
use crate::profiler::{ExternalProfiler, Profiler};
-use crate::report::{BencherReport, CliReport, Report, ReportContext, Reports};
-use crate::routine::Function;
+use crate::report::{BencherReport, CliReport, CliVerbosity, Report, ReportContext, Reports};
#[cfg(feature = "async")]
pub use crate::bencher::AsyncBencher;
pub use crate::bencher::Bencher;
-#[allow(deprecated)]
-pub use crate::benchmark::{Benchmark, BenchmarkDefinition, ParameterizedBenchmark};
pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
lazy_static! {
static ref DEBUG_ENABLED: bool = std::env::var_os("CRITERION_DEBUG").is_some();
static ref GNUPLOT_VERSION: Result<Version, VersionError> = criterion_plot::version();
static ref DEFAULT_PLOTTING_BACKEND: PlottingBackend = {
- match &*GNUPLOT_VERSION {
- Ok(_) => PlottingBackend::Gnuplot,
- Err(e) => {
- match e {
- VersionError::Exec(_) => println!("Gnuplot not found, using plotters backend"),
- e => println!(
- "Gnuplot not found or not usable, using plotters backend\n{}",
- e
- ),
- };
- PlottingBackend::Plotters
+ if cfg!(feature = "html_reports") {
+ match &*GNUPLOT_VERSION {
+ Ok(_) => PlottingBackend::Gnuplot,
+ Err(e) => {
+ match e {
+ VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"),
+ e => eprintln!(
+ "Gnuplot not found or not usable, using plotters backend\n{}",
+ e
+ ),
+ };
+ PlottingBackend::Plotters
+ }
}
+ } else {
+ PlottingBackend::None
}
};
static ref CARGO_CRITERION_CONNECTION: Option<Mutex<Connection>> = {
@@ -177,36 +178,6 @@ pub fn black_box<T>(dummy: T) -> T {
}
}
-/// Representing a function to benchmark together with a name of that function.
-/// Used together with `bench_functions` to represent one out of multiple functions
-/// under benchmark.
-#[doc(hidden)]
-pub struct Fun<I: fmt::Debug, M: Measurement + 'static = WallTime> {
- f: NamedRoutine<I, M>,
- _phantom: PhantomData<M>,
-}
-
-impl<I, M: Measurement> Fun<I, M>
-where
- I: fmt::Debug + 'static,
-{
- /// Create a new `Fun` given a name and a closure
- pub fn new<F>(name: &str, f: F) -> Fun<I, M>
- where
- F: FnMut(&mut Bencher<'_, M>, &I) + 'static,
- {
- let routine = NamedRoutine {
- id: name.to_owned(),
- f: Box::new(RefCell::new(Function::new(f))),
- };
-
- Fun {
- f: routine,
- _phantom: PhantomData,
- }
- }
-}
-
/// Argument to [`Bencher::iter_batched`](struct.Bencher.html#method.iter_batched) and
/// [`Bencher::iter_batched_ref`](struct.Bencher.html#method.iter_batched_ref) which controls the
/// batch size.
@@ -296,12 +267,17 @@ impl BatchSize {
/// Baseline describes how the baseline_directory is handled.
#[derive(Debug, Clone, Copy)]
pub enum Baseline {
- /// Compare ensures a previous saved version of the baseline
- /// exists and runs comparison against that.
- Compare,
+ /// CompareLenient compares against a previous saved version of the baseline.
+ /// If a previous baseline does not exist, the benchmark is run as normal but no comparison occurs.
+ CompareLenient,
+ /// CompareStrict compares against a previous saved version of the baseline.
+ /// If a previous baseline does not exist, a panic occurs.
+ CompareStrict,
/// Save writes the benchmark results to the baseline directory,
/// overwriting any results that were previously there.
Save,
+ /// Discard benchmark results.
+ Discard,
}
/// Enum used to select the plotting backend.
@@ -313,12 +289,18 @@ pub enum PlottingBackend {
/// Plotting backend which uses the rust 'Plotters' library. This is the default if `gnuplot`
/// is not installed.
Plotters,
+ /// Null plotting backend which outputs nothing,
+ None,
}
impl PlottingBackend {
- fn create_plotter(&self) -> Box<dyn Plotter> {
+ fn create_plotter(&self) -> Option<Box<dyn Plotter>> {
match self {
- PlottingBackend::Gnuplot => Box::new(Gnuplot::default()),
- PlottingBackend::Plotters => Box::new(PlottersBackend::default()),
+ PlottingBackend::Gnuplot => Some(Box::new(Gnuplot::default())),
+ #[cfg(feature = "plotters")]
+ PlottingBackend::Plotters => Some(Box::new(PlottersBackend::default())),
+ #[cfg(not(feature = "plotters"))]
+ PlottingBackend::Plotters => panic!("Criterion was built without plotters support."),
+ PlottingBackend::None => None,
}
}
}
@@ -406,25 +388,24 @@ impl Default for Criterion {
fn default() -> Criterion {
let reports = Reports {
cli_enabled: true,
- cli: CliReport::new(false, false, false),
+ cli: CliReport::new(false, false, CliVerbosity::Normal),
bencher_enabled: false,
bencher: BencherReport,
- html_enabled: true,
- html: Html::new(DEFAULT_PLOTTING_BACKEND.create_plotter()),
- csv_enabled: true,
- csv: FileCsvReport,
+ html: DEFAULT_PLOTTING_BACKEND.create_plotter().map(Html::new),
+ csv_enabled: cfg!(feature = "csv_output"),
};
let mut criterion = Criterion {
config: BenchmarkConfig {
confidence_level: 0.95,
- measurement_time: Duration::new(5, 0),
+ measurement_time: Duration::from_secs(5),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
- warm_up_time: Duration::new(3, 0),
+ warm_up_time: Duration::from_secs(3),
sampling_mode: SamplingMode::Auto,
+ quick_mode: false,
},
filter: None,
report: reports,
@@ -447,7 +428,7 @@ impl Default for Criterion {
criterion.report.cli_enabled = false;
criterion.report.bencher_enabled = false;
criterion.report.csv_enabled = false;
- criterion.report.html_enabled = false;
+ criterion.report.html = None;
}
criterion
}
@@ -475,6 +456,7 @@ impl<M: Measurement> Criterion<M> {
}
}
+ #[must_use]
/// Changes the internal profiler for benchmarks run with this runner. See
/// the Profiler trait for more details.
pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
@@ -484,21 +466,26 @@ impl<M: Measurement> Criterion<M> {
}
}
+ #[must_use]
/// Set the plotting backend. By default, Criterion will use gnuplot if available, or plotters
/// if not.
///
/// Panics if `backend` is `PlottingBackend::Gnuplot` and gnuplot is not available.
pub fn plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M> {
if let PlottingBackend::Gnuplot = backend {
- if GNUPLOT_VERSION.is_err() {
- panic!("Gnuplot plotting backend was requested, but gnuplot is not available. To continue, either install Gnuplot or allow Criterion.rs to fall back to using plotters.");
- }
+ assert!(
+ !GNUPLOT_VERSION.is_err(),
+ "Gnuplot plotting backend was requested, but gnuplot is not available. \
+ To continue, either install Gnuplot or allow Criterion.rs to fall back \
+ to using plotters."
+ );
}
- self.report.html = Html::new(backend.create_plotter());
+ self.report.html = backend.create_plotter().map(Html::new);
self
}
+ #[must_use]
/// Changes the default size of the sample for benchmarks run with this runner.
///
/// A bigger sample should yield more accurate results if paired with a sufficiently large
@@ -516,18 +503,20 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Changes the default warm up time for benchmarks run with this runner.
///
/// # Panics
///
/// Panics if the input duration is zero
pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.config.warm_up_time = dur;
self
}
+ #[must_use]
/// Changes the default measurement time for benchmarks run with this runner.
///
/// With a longer time, the measurement will become more resilient to transitory peak loads
@@ -539,12 +528,13 @@ impl<M: Measurement> Criterion<M> {
///
/// Panics if the input duration in zero
pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
- assert!(dur.to_nanos() > 0);
+ assert!(dur.as_nanos() > 0);
self.config.measurement_time = dur;
self
}
+ #[must_use]
/// Changes the default number of resamples for benchmarks run with this runner.
///
/// Number of resamples to use for the
@@ -559,13 +549,14 @@ impl<M: Measurement> Criterion<M> {
pub fn nresamples(mut self, n: usize) -> Criterion<M> {
assert!(n > 0);
if n <= 1000 {
- println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
+ eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.config.nresamples = n;
self
}
+ #[must_use]
/// Changes the default noise threshold for benchmarks run with this runner. The noise threshold
/// is used to filter out small changes in performance, even if they are statistically
/// significant. Sometimes benchmarking the same code twice will result in small but
@@ -585,6 +576,7 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Changes the default confidence level for benchmarks run with this runner. The confidence
/// level is the desired probability that the true runtime lies within the estimated
/// [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval). The default is
@@ -596,13 +588,14 @@ impl<M: Measurement> Criterion<M> {
pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
assert!(cl > 0.0 && cl < 1.0);
if cl < 0.5 {
- println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
+ eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
}
self.config.confidence_level = cl;
self
}
+ #[must_use]
/// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance)
/// for benchmarks run with this runner. This is used to perform a
/// [hypothesis test](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) to see if
@@ -630,32 +623,29 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Enables plotting
pub fn with_plots(mut self) -> Criterion<M> {
// If running under cargo-criterion then don't re-enable the reports; let it do the reporting.
- if self.connection.is_none() {
- self.report.html_enabled = true;
+ if self.connection.is_none() && self.report.html.is_none() {
+ let default_backend = DEFAULT_PLOTTING_BACKEND.create_plotter();
+ if let Some(backend) = default_backend {
+ self.report.html = Some(Html::new(backend));
+ } else {
+ panic!("Cannot find a default plotting backend!");
+ }
}
self
}
+ #[must_use]
/// Disables plotting
pub fn without_plots(mut self) -> Criterion<M> {
- self.report.html_enabled = false;
+ self.report.html = None;
self
}
- /// Return true if generation of the plots is possible.
- #[deprecated(
- since = "0.3.4",
- note = "No longer useful; since the plotters backend is available Criterion.rs can always generate plots"
- )]
- pub fn can_plot(&self) -> bool {
- // Trivially true now that we have plotters.
- // TODO: Deprecate and remove this.
- true
- }
-
+ #[must_use]
/// Names an explicit baseline and enables overwriting the previous results.
pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
self.baseline_directory = baseline;
@@ -663,13 +653,19 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Names an explicit baseline and disables overwriting the previous results.
- pub fn retain_baseline(mut self, baseline: String) -> Criterion<M> {
+ pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion<M> {
self.baseline_directory = baseline;
- self.baseline = Baseline::Compare;
+ self.baseline = if strict {
+ Baseline::CompareStrict
+ } else {
+ Baseline::CompareLenient
+ };
self
}
+ #[must_use]
/// Filters the benchmarks. Only benchmarks with names that contain the
/// given string will be executed.
pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
@@ -685,6 +681,7 @@ impl<M: Measurement> Criterion<M> {
self
}
+ #[must_use]
/// Override whether the CLI output will be colored or not. Usually you would use the `--color`
/// CLI argument, but this is available for programmmatic use as well.
pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
@@ -693,6 +690,7 @@ impl<M: Measurement> Criterion<M> {
}
/// Set the output directory (currently for testing only)
+ #[must_use]
#[doc(hidden)]
pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
self.output_directory = path.to_owned();
@@ -701,6 +699,7 @@ impl<M: Measurement> Criterion<M> {
}
/// Set the profile time (currently for testing only)
+ #[must_use]
#[doc(hidden)]
pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
match profile_time {
@@ -728,109 +727,131 @@ impl<M: Measurement> Criterion<M> {
/// Configure this criterion struct based on the command-line arguments to
/// this process.
+ #[must_use]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))]
pub fn configure_from_args(mut self) -> Criterion<M> {
- use clap::{App, Arg};
- let matches = App::new("Criterion Benchmark")
- .arg(Arg::with_name("FILTER")
+ use clap::{Arg, Command};
+ let matches = Command::new("Criterion Benchmark")
+ .arg(Arg::new("FILTER")
.help("Skip benchmarks whose names do not contain FILTER.")
.index(1))
- .arg(Arg::with_name("color")
- .short("c")
+ .arg(Arg::new("color")
+ .short('c')
.long("color")
.alias("colour")
.takes_value(true)
.possible_values(&["auto", "always", "never"])
.default_value("auto")
.help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
- .arg(Arg::with_name("verbose")
- .short("v")
+ .arg(Arg::new("verbose")
+ .short('v')
.long("verbose")
.help("Print additional statistical information."))
- .arg(Arg::with_name("noplot")
- .short("n")
+ .arg(Arg::new("quiet")
+ .long("quiet")
+ .conflicts_with("verbose")
+ .help("Print only the benchmark results."))
+ .arg(Arg::new("noplot")
+ .short('n')
.long("noplot")
.help("Disable plot and HTML generation."))
- .arg(Arg::with_name("save-baseline")
- .short("s")
+ .arg(Arg::new("save-baseline")
+ .short('s')
.long("save-baseline")
.default_value("base")
.help("Save results under a named baseline."))
- .arg(Arg::with_name("baseline")
- .short("b")
+ .arg(Arg::new("discard-baseline")
+ .long("discard-baseline")
+ .conflicts_with_all(&["save-baseline", "baseline", "baseline-lenient"])
+ .help("Discard benchmark results."))
+ .arg(Arg::new("baseline")
+ .short('b')
.long("baseline")
.takes_value(true)
- .conflicts_with("save-baseline")
- .help("Compare to a named baseline."))
- .arg(Arg::with_name("list")
+ .conflicts_with_all(&["save-baseline", "baseline-lenient"])
+ .help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails."))
+ .arg(Arg::new("baseline-lenient")
+ .long("baseline-lenient")
+ .takes_value(true)
+ .conflicts_with_all(&["save-baseline", "baseline"])
+ .help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline."))
+ .arg(Arg::new("list")
.long("list")
.help("List all benchmarks")
.conflicts_with_all(&["test", "profile-time"]))
- .arg(Arg::with_name("profile-time")
+ .arg(Arg::new("profile-time")
.long("profile-time")
.takes_value(true)
.help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
.conflicts_with_all(&["test", "list"]))
- .arg(Arg::with_name("load-baseline")
+ .arg(Arg::new("load-baseline")
.long("load-baseline")
.takes_value(true)
.conflicts_with("profile-time")
.requires("baseline")
.help("Load a previous baseline instead of sampling new data."))
- .arg(Arg::with_name("sample-size")
+ .arg(Arg::new("sample-size")
.long("sample-size")
.takes_value(true)
- .help(&format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
- .arg(Arg::with_name("warm-up-time")
+ .help(&*format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
+ .arg(Arg::new("warm-up-time")
.long("warm-up-time")
.takes_value(true)
- .help(&format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
- .arg(Arg::with_name("measurement-time")
+ .help(&*format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
+ .arg(Arg::new("measurement-time")
.long("measurement-time")
.takes_value(true)
- .help(&format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
- .arg(Arg::with_name("nresamples")
+ .help(&*format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
+ .arg(Arg::new("nresamples")
.long("nresamples")
.takes_value(true)
- .help(&format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
- .arg(Arg::with_name("noise-threshold")
+ .help(&*format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
+ .arg(Arg::new("noise-threshold")
.long("noise-threshold")
.takes_value(true)
- .help(&format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
- .arg(Arg::with_name("confidence-level")
+ .help(&*format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
+ .arg(Arg::new("confidence-level")
.long("confidence-level")
.takes_value(true)
- .help(&format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
- .arg(Arg::with_name("significance-level")
+ .help(&*format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
+ .arg(Arg::new("significance-level")
.long("significance-level")
.takes_value(true)
- .help(&format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
- .arg(Arg::with_name("test")
- .hidden(true)
+ .help(&*format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
+ .arg(Arg::new("quick")
+ .long("quick")
+ .conflicts_with("sample-size")
+ .help(&*format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode)))
+ .arg(Arg::new("test")
+ .hide(true)
.long("test")
.help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
.conflicts_with_all(&["list", "profile-time"]))
- .arg(Arg::with_name("bench")
- .hidden(true)
+ .arg(Arg::new("bench")
+ .hide(true)
.long("bench"))
- .arg(Arg::with_name("plotting-backend")
+ .arg(Arg::new("plotting-backend")
.long("plotting-backend")
.takes_value(true)
.possible_values(&["gnuplot", "plotters"])
.help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
- .arg(Arg::with_name("output-format")
+ .arg(Arg::new("output-format")
.long("output-format")
.takes_value(true)
.possible_values(&["criterion", "bencher"])
.default_value("criterion")
.help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
- .arg(Arg::with_name("nocapture")
+ .arg(Arg::new("nocapture")
.long("nocapture")
+ .hide(true)
+ .help("Ignored, but added for compatibility with libtest."))
+ .arg(Arg::new("show-output")
+ .long("show-output")
.hidden(true)
.help("Ignored, but added for compatibility with libtest."))
- .arg(Arg::with_name("version")
+ .arg(Arg::new("version")
.hidden(true)
- .short("V")
+ .short('V')
.long("version"))
.after_help("
This executable is a Criterion.rs benchmark.
@@ -850,21 +871,21 @@ https://bheisler.github.io/criterion.rs/book/faq.html
if self.connection.is_some() {
if let Some(color) = matches.value_of("color") {
if color != "auto" {
- println!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
+ eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
}
}
if matches.is_present("verbose") {
- println!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
+ eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
}
if matches.is_present("noplot") {
- println!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
+ eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
}
if let Some(backend) = matches.value_of("plotting-backend") {
- println!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
+ eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
}
if let Some(format) = matches.value_of("output-format") {
if format != "criterion" {
- println!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
+ eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
}
}
@@ -875,7 +896,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html
.unwrap_or(false)
|| matches.is_present("load-baseline")
{
- println!("Error: baselines are not supported when running with cargo-criterion.");
+ eprintln!("Error: baselines are not supported when running with cargo-criterion.");
std::process::exit(1);
}
}
@@ -893,17 +914,14 @@ https://bheisler.github.io/criterion.rs/book/faq.html
} else if matches.is_present("list") {
Mode::List
} else if matches.is_present("profile-time") {
- let num_seconds = value_t!(matches.value_of("profile-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_seconds = matches.value_of_t_or_exit("profile-time");
- if num_seconds < 1 {
- println!("Profile time must be at least one second.");
+ if num_seconds < 1.0 {
+ eprintln!("Profile time must be at least one second.");
std::process::exit(1);
}
- Mode::Profile(Duration::from_secs(num_seconds))
+ Mode::Profile(Duration::from_secs_f64(num_seconds))
} else {
Mode::Benchmark
};
@@ -927,16 +945,21 @@ https://bheisler.github.io/criterion.rs/book/faq.html
if matches.is_present("noplot") {
self = self.without_plots();
- } else {
- self = self.with_plots();
}
if let Some(dir) = matches.value_of("save-baseline") {
self.baseline = Baseline::Save;
self.baseline_directory = dir.to_owned()
}
+ if matches.is_present("discard-baseline") {
+ self.baseline = Baseline::Discard;
+ }
if let Some(dir) = matches.value_of("baseline") {
- self.baseline = Baseline::Compare;
+ self.baseline = Baseline::CompareStrict;
+ self.baseline_directory = dir.to_owned();
+ }
+ if let Some(dir) = matches.value_of("baseline-lenient") {
+ self.baseline = Baseline::CompareLenient;
self.baseline_directory = dir.to_owned();
}
@@ -945,7 +968,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html
self.report.cli_enabled = false;
self.report.bencher_enabled = false;
self.report.csv_enabled = false;
- self.report.html_enabled = false;
+ self.report.html = None;
} else {
match matches.value_of("output-format") {
Some("bencher") => {
@@ -954,6 +977,13 @@ https://bheisler.github.io/criterion.rs/book/faq.html
}
_ => {
let verbose = matches.is_present("verbose");
+ let verbosity = if verbose {
+ CliVerbosity::Verbose
+ } else if matches.is_present("quiet") {
+ CliVerbosity::Quiet
+ } else {
+ CliVerbosity::Normal
+ };
let stdout_isatty = atty::is(atty::Stream::Stdout);
let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
let enable_text_coloring;
@@ -970,7 +1000,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html
self.report.bencher_enabled = false;
self.report.cli_enabled = true;
self.report.cli =
- CliReport::new(enable_text_overwrite, enable_text_coloring, verbose);
+ CliReport::new(enable_text_overwrite, enable_text_coloring, verbosity);
}
};
}
@@ -980,92 +1010,78 @@ https://bheisler.github.io/criterion.rs/book/faq.html
}
if matches.is_present("sample-size") {
- let num_size = value_t!(matches.value_of("sample-size"), usize).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_size = matches.value_of_t_or_exit("sample-size");
assert!(num_size >= 10);
self.config.sample_size = num_size;
}
if matches.is_present("warm-up-time") {
- let num_seconds = value_t!(matches.value_of("warm-up-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_seconds = matches.value_of_t_or_exit("warm-up-time");
- let dur = std::time::Duration::new(num_seconds, 0);
- assert!(dur.to_nanos() > 0);
+ let dur = std::time::Duration::from_secs_f64(num_seconds);
+ assert!(dur.as_nanos() > 0);
self.config.warm_up_time = dur;
}
if matches.is_present("measurement-time") {
- let num_seconds =
- value_t!(matches.value_of("measurement-time"), u64).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_seconds = matches.value_of_t_or_exit("measurement-time");
- let dur = std::time::Duration::new(num_seconds, 0);
- assert!(dur.to_nanos() > 0);
+ let dur = std::time::Duration::from_secs_f64(num_seconds);
+ assert!(dur.as_nanos() > 0);
self.config.measurement_time = dur;
}
if matches.is_present("nresamples") {
- let num_resamples =
- value_t!(matches.value_of("nresamples"), usize).unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_resamples = matches.value_of_t_or_exit("nresamples");
assert!(num_resamples > 0);
self.config.nresamples = num_resamples;
}
if matches.is_present("noise-threshold") {
- let num_noise_threshold = value_t!(matches.value_of("noise-threshold"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_noise_threshold = matches.value_of_t_or_exit("noise-threshold");
assert!(num_noise_threshold > 0.0);
self.config.noise_threshold = num_noise_threshold;
}
if matches.is_present("confidence-level") {
- let num_confidence_level = value_t!(matches.value_of("confidence-level"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_confidence_level = matches.value_of_t_or_exit("confidence-level");
assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
self.config.confidence_level = num_confidence_level;
}
if matches.is_present("significance-level") {
- let num_significance_level = value_t!(matches.value_of("significance-level"), f64)
- .unwrap_or_else(|e| {
- println!("{}", e);
- std::process::exit(1)
- });
+ let num_significance_level = matches.value_of_t_or_exit("significance-level");
assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
self.config.significance_level = num_significance_level;
}
+ if matches.is_present("quick") {
+ self.config.quick_mode = true;
+ }
+
self
}
fn filter_matches(&self, id: &str) -> bool {
- match self.filter {
- Some(ref regex) => regex.is_match(id),
+ match &self.filter {
+ Some(regex) => regex.is_match(id),
None => true,
}
}
+ /// Returns true iff we should save the benchmark results in
+ /// json files on the local disk.
+ fn should_save_baseline(&self) -> bool {
+ self.connection.is_none()
+ && self.load_baseline.is_none()
+ && !matches!(self.baseline, Baseline::Discard)
+ }
+
/// Return a benchmark group. All benchmarks performed using a benchmark group will be
/// grouped together in the final report.
///
@@ -1091,9 +1107,7 @@ https://bheisler.github.io/criterion.rs/book/faq.html
/// Panics if the group name is empty
pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
let group_name = group_name.into();
- if group_name.is_empty() {
- panic!("Group name must not be empty.");
- }
+ assert!(!group_name.is_empty(), "Group name must not be empty.");
if let Some(conn) = &self.connection {
conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name })
@@ -1180,163 +1194,24 @@ where
);
self
}
-
- /// Benchmarks a function under various inputs
- ///
- /// This is a convenience method to execute several related benchmarks. Each benchmark will
- /// receive the id: `${id}/${input}`.
- ///
- /// # Example
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use self::criterion::*;
- ///
- /// fn bench(c: &mut Criterion) {
- /// c.bench_function_over_inputs("from_elem",
- /// |b: &mut Bencher, size: &usize| {
- /// b.iter(|| vec![0u8; *size]);
- /// },
- /// vec![1024, 2048, 4096]
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- #[allow(deprecated)]
- pub fn bench_function_over_inputs<I, F>(
- &mut self,
- id: &str,
- f: F,
- inputs: I,
- ) -> &mut Criterion<M>
- where
- I: IntoIterator,
- I::Item: fmt::Debug + 'static,
- F: FnMut(&mut Bencher<'_, M>, &I::Item) + 'static,
- {
- self.bench(id, ParameterizedBenchmark::new(id, f, inputs))
- }
-
- /// Benchmarks multiple functions
- ///
- /// All functions get the same input and are compared with the other implementations.
- /// Works similar to `bench_function`, but with multiple functions.
- ///
- /// # Example
- ///
- /// ``` rust
- /// # #[macro_use] extern crate criterion;
- /// # use self::criterion::*;
- /// # fn seq_fib(i: &u32) {}
- /// # fn par_fib(i: &u32) {}
- ///
- /// fn bench_seq_fib(b: &mut Bencher, i: &u32) {
- /// b.iter(|| {
- /// seq_fib(i);
- /// });
- /// }
- ///
- /// fn bench_par_fib(b: &mut Bencher, i: &u32) {
- /// b.iter(|| {
- /// par_fib(i);
- /// });
- /// }
- ///
- /// fn bench(c: &mut Criterion) {
- /// let sequential_fib = Fun::new("Sequential", bench_seq_fib);
- /// let parallel_fib = Fun::new("Parallel", bench_par_fib);
- /// let funs = vec![sequential_fib, parallel_fib];
- ///
- /// c.bench_functions("Fibonacci", funs, 14);
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- #[allow(deprecated)]
- pub fn bench_functions<I>(
- &mut self,
- id: &str,
- funs: Vec<Fun<I, M>>,
- input: I,
- ) -> &mut Criterion<M>
- where
- I: fmt::Debug + 'static,
- {
- let benchmark = ParameterizedBenchmark::with_functions(
- funs.into_iter().map(|fun| fun.f).collect(),
- vec![input],
- );
-
- self.bench(id, benchmark)
- }
-
- /// Executes the given benchmark. Use this variant to execute benchmarks
- /// with complex configuration. This can be used to compare multiple
- /// functions, execute benchmarks with custom configuration settings and
- /// more. See the Benchmark and ParameterizedBenchmark structs for more
- /// information.
- ///
- /// ```rust
- /// # #[macro_use] extern crate criterion;
- /// # use criterion::*;
- /// # fn routine_1() {}
- /// # fn routine_2() {}
- ///
- /// fn bench(c: &mut Criterion) {
- /// // Setup (construct data, allocate memory, etc)
- /// c.bench(
- /// "routines",
- /// Benchmark::new("routine_1", |b| b.iter(|| routine_1()))
- /// .with_function("routine_2", |b| b.iter(|| routine_2()))
- /// .sample_size(50)
- /// );
- /// }
- ///
- /// criterion_group!(benches, bench);
- /// criterion_main!(benches);
- /// ```
- #[doc(hidden)]
- #[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
- pub fn bench<B: BenchmarkDefinition<M>>(
- &mut self,
- group_id: &str,
- benchmark: B,
- ) -> &mut Criterion<M> {
- benchmark.run(group_id, self);
- self
- }
-}
-
-trait DurationExt {
- fn to_nanos(&self) -> u64;
-}
-
-const NANOS_PER_SEC: u64 = 1_000_000_000;
-
-impl DurationExt for Duration {
- fn to_nanos(&self) -> u64 {
- self.as_secs() * NANOS_PER_SEC + u64::from(self.subsec_nanos())
- }
}
/// Enum representing different ways of measuring the throughput of benchmarked code.
/// If the throughput setting is configured for a benchmark then the estimated throughput will
/// be reported as well as the time per iteration.
// TODO: Remove serialize/deserialize from the public API.
-#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum Throughput {
/// Measure throughput in terms of bytes/second. The value should be the number of bytes
/// processed by one iteration of the benchmarked code. Typically, this would be the length of
/// an input string or `&[u8]`.
Bytes(u64),
+ /// Equivalent to Bytes, but the value will be reported in terms of
+ /// kilobytes (1000 bytes) per second instead of kibibytes (1024 bytes) per
+ /// second, megabytes instead of mibibytes, and gigabytes instead of gibibytes.
+ BytesDecimal(u64),
+
/// Measure throughput in terms of elements/second. The value should be the number of elements
/// processed by one iteration of the benchmarked code. Typically, this would be the size of a
/// collection, but could also be the number of lines of input text or the number of values to
@@ -1358,7 +1233,7 @@ pub enum AxisScale {
/// or benchmark group.
///
/// ```rust
-/// use self::criterion::{Bencher, Criterion, Benchmark, PlotConfiguration, AxisScale};
+/// use self::criterion::{Bencher, Criterion, PlotConfiguration, AxisScale};
///
/// let plot_config = PlotConfiguration::default()
/// .summary_scale(AxisScale::Logarithmic);
@@ -1383,6 +1258,7 @@ impl Default for PlotConfiguration {
}
impl PlotConfiguration {
+ #[must_use]
/// Set the axis scale (linear or logarithmic) for the summary plots. Typically, you would
/// set this to logarithmic if benchmarking over a range of inputs which scale exponentially.
/// Defaults to linear.
@@ -1455,7 +1331,7 @@ impl ActualSamplingMode {
ActualSamplingMode::Linear => {
let n = sample_count;
let met = warmup_mean_execution_time;
- let m_ns = target_time.to_nanos();
+ let m_ns = target_time.as_nanos();
// Solve: [d + 2*d + 3*d + ... + n*d] * met = m_ns
let total_runs = n * (n + 1) / 2;
let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
@@ -1465,16 +1341,16 @@ impl ActualSamplingMode {
let recommended_sample_size =
ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
- print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
+ eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
- println!(
+ eprintln!(
", enable flat sampling, or reduce sample count to {}.",
recommended_sample_size
);
} else {
- println!(" or enable flat sampling.");
+ eprintln!(" or enable flat sampling.");
}
}
@@ -1483,7 +1359,7 @@ impl ActualSamplingMode {
ActualSamplingMode::Flat => {
let n = sample_count;
let met = warmup_mean_execution_time;
- let m_ns = target_time.to_nanos() as f64;
+ let m_ns = target_time.as_nanos() as f64;
let time_per_sample = m_ns / (n as f64);
// This is pretty simplistic; we could do something smarter to fit into the allotted time.
let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
@@ -1494,13 +1370,13 @@ impl ActualSamplingMode {
let recommended_sample_size =
ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
- print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
+ eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
- println!(", or reduce sample count to {}.", recommended_sample_size);
+ eprintln!(", or reduce sample count to {}.", recommended_sample_size);
} else {
- println!(".");
+ eprintln!(".");
}
}
@@ -1566,53 +1442,3 @@ pub fn runner(benches: &[&dyn Fn()]) {
}
Criterion::default().configure_from_args().final_summary();
}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(not(feature = "html_reports"))]
-#[doc(hidden)]
-pub fn __warn_about_html_reports_feature() {
- if CARGO_CRITERION_CONNECTION.is_none() {
- println!(
- "WARNING: HTML report generation will become a non-default optional feature in Criterion.rs 0.4.0."
- );
- println!(
- "This feature is being moved to cargo-criterion \
- (https://github.com/bheisler/cargo-criterion) and will be optional in a future \
- version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
- enable the 'html_reports' feature in your Cargo.toml."
- );
- println!();
- }
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(feature = "html_reports")]
-#[doc(hidden)]
-pub fn __warn_about_html_reports_feature() {
- // They have the feature enabled, so they're ready for the update.
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(not(feature = "cargo_bench_support"))]
-#[doc(hidden)]
-pub fn __warn_about_cargo_bench_support_feature() {
- if CARGO_CRITERION_CONNECTION.is_none() {
- println!(
- "WARNING: In Criterion.rs 0.4.0, running criterion benchmarks outside of cargo-criterion will become a default optional feature."
- );
- println!(
- "The statistical analysis and reporting is being moved to cargo-criterion \
- (https://github.com/bheisler/cargo-criterion) and will be optional in a future \
- version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
- enable the 'cargo_bench_support' feature in your Cargo.toml."
- );
- println!();
- }
-}
-
-/// Print a warning informing users about upcoming changes to features
-#[cfg(feature = "cargo_bench_support")]
-#[doc(hidden)]
-pub fn __warn_about_cargo_bench_support_feature() {
- // They have the feature enabled, so they're ready for the update.
-}
diff --git a/src/macros.rs b/src/macros.rs
index 85d8c59..df7a44d 100755
--- a/src/macros.rs
+++ b/src/macros.rs
@@ -120,9 +120,6 @@ macro_rules! criterion_group {
macro_rules! criterion_main {
( $( $group:path ),+ $(,)* ) => {
fn main() {
- $crate::__warn_about_html_reports_feature();
- $crate::__warn_about_cargo_bench_support_feature();
-
$(
$group();
)+
diff --git a/src/macros_private.rs b/src/macros_private.rs
index 982602f..26203d1 100755
--- a/src/macros_private.rs
+++ b/src/macros_private.rs
@@ -16,7 +16,7 @@ macro_rules! log_if_err {
/// be passed as second parameter.
macro_rules! try_else_return {
($x:expr) => {
- try_else_return!($x, || {});
+ try_else_return!($x, || {})
};
($x:expr, $el:expr) => {
match $x {
@@ -33,7 +33,7 @@ macro_rules! try_else_return {
/// Print an error message to stdout. Format is the same as println! or format!
macro_rules! error {
($($arg:tt)*) => (
- println!("Criterion.rs ERROR: {}", &format!($($arg)*));
+ println!("Criterion.rs ERROR: {}", &format!($($arg)*))
)
}
@@ -41,7 +41,7 @@ macro_rules! error {
macro_rules! info {
($($arg:tt)*) => (
if $crate::debug_enabled() {
- println!("Criterion.rs DEBUG: {}", &format!($($arg)*));
+ println!("Criterion.rs DEBUG: {}", &format!($($arg)*))
}
)
}
diff --git a/src/measurement.rs b/src/measurement.rs
index e253670..6371975 100755
--- a/src/measurement.rs
+++ b/src/measurement.rs
@@ -4,7 +4,6 @@
//! measurement.
use crate::format::short;
-use crate::DurationExt;
use crate::Throughput;
use std::time::{Duration, Instant};
@@ -125,6 +124,31 @@ impl DurationFormatter {
unit
}
+ fn bytes_per_second_decimal(
+ &self,
+ bytes: f64,
+ typical: f64,
+ values: &mut [f64],
+ ) -> &'static str {
+ let bytes_per_second = bytes * (1e9 / typical);
+ let (denominator, unit) = if bytes_per_second < 1000.0 {
+ (1.0, " B/s")
+ } else if bytes_per_second < 1000.0 * 1000.0 {
+ (1000.0, "KB/s")
+ } else if bytes_per_second < 1000.0 * 1000.0 * 1000.0 {
+ (1000.0 * 1000.0, "MB/s")
+ } else {
+ (1000.0 * 1000.0 * 1000.0, "GB/s")
+ };
+
+ for val in values {
+ let bytes_per_second = bytes * (1e9 / *val);
+ *val = bytes_per_second / denominator;
+ }
+
+ unit
+ }
+
fn elements_per_second(&self, elems: f64, typical: f64, values: &mut [f64]) -> &'static str {
let elems_per_second = elems * (1e9 / typical);
let (denominator, unit) = if elems_per_second < 1000.0 {
@@ -154,6 +178,9 @@ impl ValueFormatter for DurationFormatter {
) -> &'static str {
match *throughput {
Throughput::Bytes(bytes) => self.bytes_per_second(bytes as f64, typical, values),
+ Throughput::BytesDecimal(bytes) => {
+ self.bytes_per_second_decimal(bytes as f64, typical, values)
+ }
Throughput::Elements(elems) => self.elements_per_second(elems as f64, typical, values),
}
}
@@ -164,7 +191,7 @@ impl ValueFormatter for DurationFormatter {
} else if ns < 10f64.powi(3) {
(10f64.powi(0), "ns")
} else if ns < 10f64.powi(6) {
- (10f64.powi(-3), "us")
+ (10f64.powi(-3), "µs")
} else if ns < 10f64.powi(9) {
(10f64.powi(-6), "ms")
} else {
@@ -204,7 +231,7 @@ impl Measurement for WallTime {
Duration::from_secs(0)
}
fn to_f64(&self, val: &Self::Value) -> f64 {
- val.to_nanos() as f64
+ val.as_nanos() as f64
}
fn formatter(&self) -> &dyn ValueFormatter {
&DurationFormatter
diff --git a/src/plot/gnuplot_backend/mod.rs b/src/plot/gnuplot_backend/mod.rs
index 95e07ef..27cc48b 100755
--- a/src/plot/gnuplot_backend/mod.rs
+++ b/src/plot/gnuplot_backend/mod.rs
@@ -26,7 +26,7 @@ use super::{PlotContext, PlotData, Plotter};
use crate::format;
fn gnuplot_escape(string: &str) -> String {
- string.replace("_", "\\_").replace("'", "''")
+ string.replace('_', "\\_").replace('\'', "''")
}
static DEFAULT_FONT: &str = "Helvetica";
@@ -248,7 +248,7 @@ impl Plotter for Gnuplot {
info!(
"Waiting for {} gnuplot processes took {}",
child_count,
- format::time(crate::DurationExt::to_nanos(elapsed) as f64)
+ format::time(elapsed.as_nanos() as f64)
);
}
}
diff --git a/src/plot/mod.rs b/src/plot/mod.rs
index cb836a3..4bce394 100755
--- a/src/plot/mod.rs
+++ b/src/plot/mod.rs
@@ -1,7 +1,9 @@
mod gnuplot_backend;
+#[cfg(feature = "plotters")]
mod plotters_backend;
pub(crate) use gnuplot_backend::Gnuplot;
+#[cfg(feature = "plotters")]
pub(crate) use plotters_backend::PlottersBackend;
use crate::estimate::Statistic;
diff --git a/src/plot/plotters_backend/summary.rs b/src/plot/plotters_backend/summary.rs
index dad8a5b..a5a410d 100755
--- a/src/plot/plotters_backend/summary.rs
+++ b/src/plot/plotters_backend/summary.rs
@@ -31,9 +31,9 @@ pub fn line_comparison(
let (unit, series_data) = line_comparison_series_data(formatter, all_curves);
let x_range =
- plotters::data::fitting_range(series_data.iter().map(|(_, xs, _)| xs.iter()).flatten());
+ plotters::data::fitting_range(series_data.iter().flat_map(|(_, xs, _)| xs.iter()));
let y_range =
- plotters::data::fitting_range(series_data.iter().map(|(_, _, ys)| ys.iter()).flatten());
+ plotters::data::fitting_range(series_data.iter().flat_map(|(_, _, ys)| ys.iter()));
let root_area = SVGBackend::new(&path, SIZE)
.into_drawing_area()
.titled(&format!("{}: Comparison", title), (DEFAULT_FONT, 20))
@@ -97,7 +97,6 @@ fn draw_line_comarision_figure<XR: AsRangedCoord<Value = f64>, YR: AsRangedCoord
)
.unwrap();
if let Some(name) = name {
- let name: &str = &*name;
series.label(name).legend(move |(x, y)| {
Rectangle::new(
[(x, y - 5), (x + 20, y + 5)],
@@ -196,8 +195,7 @@ pub fn violin(
formatter.scale_values(max, xs);
});
- let mut x_range =
- plotters::data::fitting_range(kdes.iter().map(|(_, xs, _)| xs.iter()).flatten());
+ let mut x_range = plotters::data::fitting_range(kdes.iter().flat_map(|(_, xs, _)| xs.iter()));
x_range.start = 0.0;
let y_range = -0.5..all_curves.len() as f64 - 0.5;
diff --git a/src/report.rs b/src/report.rs
index 60a144a..9374c3e 100755
--- a/src/report.rs
+++ b/src/report.rs
@@ -1,5 +1,7 @@
+#[cfg(feature = "csv_output")]
+use crate::csv_report::FileCsvReport;
+use crate::stats::bivariate::regression::Slope;
use crate::stats::univariate::outliers::tukey::LabeledSample;
-use crate::{csv_report::FileCsvReport, stats::bivariate::regression::Slope};
use crate::{html::Html, stats::bivariate::Data};
use crate::estimate::{ChangeDistributions, ChangeEstimates, Distributions, Estimate, Estimates};
@@ -8,11 +10,11 @@ use crate::measurement::ValueFormatter;
use crate::stats::univariate::Sample;
use crate::stats::Distribution;
use crate::{PlotConfiguration, Throughput};
-use std::cell::Cell;
+use anes::{Attribute, ClearLine, Color, ResetAttributes, SetAttribute, SetForegroundColor};
use std::cmp;
use std::collections::HashSet;
use std::fmt;
-use std::io::stdout;
+use std::io::stderr;
use std::io::Write;
use std::path::{Path, PathBuf};
@@ -46,6 +48,7 @@ impl<'a> MeasurementData<'a> {
self.data.x()
}
+ #[cfg(feature = "csv_output")]
pub fn sample_times(&self) -> &Sample<f64> {
self.data.y()
}
@@ -58,7 +61,7 @@ pub enum ValueType {
Value,
}
-#[derive(Clone, Serialize, Deserialize, PartialEq)]
+#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct BenchmarkId {
pub group_id: String,
pub function_id: Option<String>,
@@ -170,7 +173,9 @@ impl BenchmarkId {
pub fn as_number(&self) -> Option<f64> {
match self.throughput {
- Some(Throughput::Bytes(n)) | Some(Throughput::Elements(n)) => Some(n as f64),
+ Some(Throughput::Bytes(n))
+ | Some(Throughput::Elements(n))
+ | Some(Throughput::BytesDecimal(n)) => Some(n as f64),
None => self
.value_str
.as_ref()
@@ -181,6 +186,7 @@ impl BenchmarkId {
pub fn value_type(&self) -> Option<ValueType> {
match self.throughput {
Some(Throughput::Bytes(_)) => Some(ValueType::Bytes),
+ Some(Throughput::BytesDecimal(_)) => Some(ValueType::Bytes),
Some(Throughput::Elements(_)) => Some(ValueType::Elements),
None => self
.value_str
@@ -304,9 +310,7 @@ pub(crate) struct Reports {
pub(crate) bencher_enabled: bool,
pub(crate) bencher: BencherReport,
pub(crate) csv_enabled: bool,
- pub(crate) csv: FileCsvReport,
- pub(crate) html_enabled: bool,
- pub(crate) html: Html,
+ pub(crate) html: Option<Html>,
}
macro_rules! reports_impl {
(fn $name:ident(&self, $($argn:ident: $argt:ty),*)) => {
@@ -317,11 +321,12 @@ macro_rules! reports_impl {
if self.bencher_enabled {
self.bencher.$name($($argn),*);
}
+ #[cfg(feature = "csv_output")]
if self.csv_enabled {
- self.csv.$name($($argn),*);
+ FileCsvReport.$name($($argn),*);
}
- if self.html_enabled {
- self.html.$name($($argn),*);
+ if let Some(reporter) = &self.html {
+ reporter.$name($($argn),*);
}
}
};
@@ -363,35 +368,34 @@ impl Report for Reports {
reports_impl!(fn group_separator(&self, ));
}
+#[derive(Debug, Clone, Copy, Eq, PartialEq)]
+pub(crate) enum CliVerbosity {
+ Quiet,
+ Normal,
+ Verbose,
+}
+
pub(crate) struct CliReport {
pub enable_text_overwrite: bool,
pub enable_text_coloring: bool,
- pub verbose: bool,
-
- last_line_len: Cell<usize>,
+ pub verbosity: CliVerbosity,
}
impl CliReport {
pub fn new(
enable_text_overwrite: bool,
enable_text_coloring: bool,
- verbose: bool,
+ verbosity: CliVerbosity,
) -> CliReport {
CliReport {
enable_text_overwrite,
enable_text_coloring,
- verbose,
-
- last_line_len: Cell::new(0),
+ verbosity,
}
}
fn text_overwrite(&self) {
if self.enable_text_overwrite {
- print!("\r");
- for _ in 0..self.last_line_len.get() {
- print!(" ");
- }
- print!("\r");
+ eprint!("\r{}", ClearLine::All)
}
}
@@ -399,41 +403,36 @@ impl CliReport {
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))]
fn print_overwritable(&self, s: String) {
if self.enable_text_overwrite {
- self.last_line_len.set(s.len());
- print!("{}", s);
- stdout().flush().unwrap();
+ eprint!("{}", s);
+ stderr().flush().unwrap();
} else {
- println!("{}", s);
+ eprintln!("{}", s);
}
}
- fn green(&self, s: String) -> String {
+ fn with_color(&self, color: Color, s: &str) -> String {
if self.enable_text_coloring {
- format!("\x1B[32m{}\x1B[39m", s)
+ format!("{}{}{}", SetForegroundColor(color), s, ResetAttributes)
} else {
- s
+ String::from(s)
}
}
- fn yellow(&self, s: String) -> String {
- if self.enable_text_coloring {
- format!("\x1B[33m{}\x1B[39m", s)
- } else {
- s
- }
+ fn green(&self, s: &str) -> String {
+ self.with_color(Color::DarkGreen, s)
}
- fn red(&self, s: String) -> String {
- if self.enable_text_coloring {
- format!("\x1B[31m{}\x1B[39m", s)
- } else {
- s
- }
+ fn yellow(&self, s: &str) -> String {
+ self.with_color(Color::DarkYellow, s)
+ }
+
+ fn red(&self, s: &str) -> String {
+ self.with_color(Color::DarkRed, s)
}
fn bold(&self, s: String) -> String {
if self.enable_text_coloring {
- format!("\x1B[1m{}\x1B[22m", s)
+ format!("{}{}{}", SetAttribute(Attribute::Bold), s, ResetAttributes)
} else {
s
}
@@ -441,7 +440,7 @@ impl CliReport {
fn faint(&self, s: String) -> String {
if self.enable_text_coloring {
- format!("\x1B[2m{}\x1B[22m", s)
+ format!("{}{}{}", SetAttribute(Attribute::Faint), s, ResetAttributes)
} else {
s
}
@@ -460,7 +459,7 @@ impl CliReport {
println!(
"{}",
- self.yellow(format!(
+ self.yellow(&format!(
"Found {} outliers among {} measurements ({:.2}%)",
noutliers,
sample_size,
@@ -529,7 +528,7 @@ impl Report for CliReport {
iter_count: u64,
) {
self.text_overwrite();
- let iter_string = if self.verbose {
+ let iter_string = if matches!(self.verbosity, CliVerbosity::Verbose) {
format!("{} iterations", iter_count)
} else {
format::iter_count(iter_count)
@@ -559,14 +558,14 @@ impl Report for CliReport {
let mut id = id.as_title().to_owned();
if id.len() > 23 {
- println!("{}", self.green(id.clone()));
+ println!("{}", self.green(&id));
id.clear();
}
let id_len = id.len();
println!(
"{}{}time: [{} {} {}]",
- self.green(id),
+ self.green(&id),
" ".repeat(24 - id_len),
self.faint(
formatter.format_value(typical_estimate.confidence_interval.lower_bound)
@@ -594,98 +593,103 @@ impl Report for CliReport {
)
}
- if let Some(ref comp) = meas.comparison {
- let different_mean = comp.p_value < comp.significance_threshold;
- let mean_est = &comp.relative_estimates.mean;
- let point_estimate = mean_est.point_estimate;
- let mut point_estimate_str = format::change(point_estimate, true);
- // The change in throughput is related to the change in timing. Reducing the timing by
- // 50% increases the throughput by 100%.
- let to_thrpt_estimate = |ratio: f64| 1.0 / (1.0 + ratio) - 1.0;
- let mut thrpt_point_estimate_str =
- format::change(to_thrpt_estimate(point_estimate), true);
- let explanation_str: String;
-
- if !different_mean {
- explanation_str = "No change in performance detected.".to_owned();
- } else {
- let comparison = compare_to_threshold(mean_est, comp.noise_threshold);
- match comparison {
- ComparisonResult::Improved => {
- point_estimate_str = self.green(self.bold(point_estimate_str));
- thrpt_point_estimate_str = self.green(self.bold(thrpt_point_estimate_str));
- explanation_str =
- format!("Performance has {}.", self.green("improved".to_owned()));
- }
- ComparisonResult::Regressed => {
- point_estimate_str = self.red(self.bold(point_estimate_str));
- thrpt_point_estimate_str = self.red(self.bold(thrpt_point_estimate_str));
- explanation_str =
- format!("Performance has {}.", self.red("regressed".to_owned()));
- }
- ComparisonResult::NonSignificant => {
- explanation_str = "Change within noise threshold.".to_owned();
+ if !matches!(self.verbosity, CliVerbosity::Quiet) {
+ if let Some(ref comp) = meas.comparison {
+ let different_mean = comp.p_value < comp.significance_threshold;
+ let mean_est = &comp.relative_estimates.mean;
+ let point_estimate = mean_est.point_estimate;
+ let mut point_estimate_str = format::change(point_estimate, true);
+ // The change in throughput is related to the change in timing. Reducing the timing by
+ // 50% increases the throughput by 100%.
+ let to_thrpt_estimate = |ratio: f64| 1.0 / (1.0 + ratio) - 1.0;
+ let mut thrpt_point_estimate_str =
+ format::change(to_thrpt_estimate(point_estimate), true);
+ let explanation_str: String;
+
+ if !different_mean {
+ explanation_str = "No change in performance detected.".to_owned();
+ } else {
+ let comparison = compare_to_threshold(mean_est, comp.noise_threshold);
+ match comparison {
+ ComparisonResult::Improved => {
+ point_estimate_str = self.green(&self.bold(point_estimate_str));
+ thrpt_point_estimate_str =
+ self.green(&self.bold(thrpt_point_estimate_str));
+ explanation_str =
+ format!("Performance has {}.", self.green("improved"));
+ }
+ ComparisonResult::Regressed => {
+ point_estimate_str = self.red(&self.bold(point_estimate_str));
+ thrpt_point_estimate_str =
+ self.red(&self.bold(thrpt_point_estimate_str));
+ explanation_str = format!("Performance has {}.", self.red("regressed"));
+ }
+ ComparisonResult::NonSignificant => {
+ explanation_str = "Change within noise threshold.".to_owned();
+ }
}
}
- }
- if meas.throughput.is_some() {
- println!("{}change:", " ".repeat(17));
+ if meas.throughput.is_some() {
+ println!("{}change:", " ".repeat(17));
+
+ println!(
+ "{}time: [{} {} {}] (p = {:.2} {} {:.2})",
+ " ".repeat(24),
+ self.faint(format::change(
+ mean_est.confidence_interval.lower_bound,
+ true
+ )),
+ point_estimate_str,
+ self.faint(format::change(
+ mean_est.confidence_interval.upper_bound,
+ true
+ )),
+ comp.p_value,
+ if different_mean { "<" } else { ">" },
+ comp.significance_threshold
+ );
+ println!(
+ "{}thrpt: [{} {} {}]",
+ " ".repeat(24),
+ self.faint(format::change(
+ to_thrpt_estimate(mean_est.confidence_interval.upper_bound),
+ true
+ )),
+ thrpt_point_estimate_str,
+ self.faint(format::change(
+ to_thrpt_estimate(mean_est.confidence_interval.lower_bound),
+ true
+ )),
+ );
+ } else {
+ println!(
+ "{}change: [{} {} {}] (p = {:.2} {} {:.2})",
+ " ".repeat(24),
+ self.faint(format::change(
+ mean_est.confidence_interval.lower_bound,
+ true
+ )),
+ point_estimate_str,
+ self.faint(format::change(
+ mean_est.confidence_interval.upper_bound,
+ true
+ )),
+ comp.p_value,
+ if different_mean { "<" } else { ">" },
+ comp.significance_threshold
+ );
+ }
- println!(
- "{}time: [{} {} {}] (p = {:.2} {} {:.2})",
- " ".repeat(24),
- self.faint(format::change(
- mean_est.confidence_interval.lower_bound,
- true
- )),
- point_estimate_str,
- self.faint(format::change(
- mean_est.confidence_interval.upper_bound,
- true
- )),
- comp.p_value,
- if different_mean { "<" } else { ">" },
- comp.significance_threshold
- );
- println!(
- "{}thrpt: [{} {} {}]",
- " ".repeat(24),
- self.faint(format::change(
- to_thrpt_estimate(mean_est.confidence_interval.upper_bound),
- true
- )),
- thrpt_point_estimate_str,
- self.faint(format::change(
- to_thrpt_estimate(mean_est.confidence_interval.lower_bound),
- true
- )),
- );
- } else {
- println!(
- "{}change: [{} {} {}] (p = {:.2} {} {:.2})",
- " ".repeat(24),
- self.faint(format::change(
- mean_est.confidence_interval.lower_bound,
- true
- )),
- point_estimate_str,
- self.faint(format::change(
- mean_est.confidence_interval.upper_bound,
- true
- )),
- comp.p_value,
- if different_mean { "<" } else { ">" },
- comp.significance_threshold
- );
+ println!("{}{}", " ".repeat(24), explanation_str);
}
-
- println!("{}{}", " ".repeat(24), explanation_str);
}
- self.outliers(&meas.avg_times);
+ if !matches!(self.verbosity, CliVerbosity::Quiet) {
+ self.outliers(&meas.avg_times);
+ }
- if self.verbose {
+ if matches!(self.verbosity, CliVerbosity::Verbose) {
let format_short_estimate = |estimate: &Estimate| -> String {
format!(
"[{} {}]",
@@ -828,7 +832,7 @@ mod test {
assert_eq!("group/function/value_2", new_id.as_directory_name());
directories.insert(new_id.as_directory_name().to_owned());
- new_id = existing_id.clone();
+ new_id = existing_id;
new_id.ensure_directory_name_unique(&directories);
assert_eq!("group/function/value_3", new_id.as_directory_name());
directories.insert(new_id.as_directory_name().to_owned());
diff --git a/src/routine.rs b/src/routine.rs
index 5831415..9567fb4 100755
--- a/src/routine.rs
+++ b/src/routine.rs
@@ -2,7 +2,7 @@ use crate::benchmark::BenchmarkConfig;
use crate::connection::OutgoingMessage;
use crate::measurement::Measurement;
use crate::report::{BenchmarkId, Report, ReportContext};
-use crate::{ActualSamplingMode, Bencher, Criterion, DurationExt};
+use crate::{black_box, ActualSamplingMode, Bencher, Criterion};
use std::marker::PhantomData;
use std::time::Duration;
@@ -34,7 +34,7 @@ pub(crate) trait Routine<M: Measurement, T: ?Sized> {
) {
criterion
.report
- .profile(id, report_context, time.to_nanos() as f64);
+ .profile(id, report_context, time.as_nanos() as f64);
let mut profile_path = report_context.output_directory.clone();
if (*crate::CARGO_CRITERION_CONNECTION).is_some() {
@@ -51,7 +51,7 @@ pub(crate) trait Routine<M: Measurement, T: ?Sized> {
.borrow_mut()
.start_profiling(id.id(), &profile_path);
- let time = time.to_nanos();
+ let time = time.as_nanos() as u64;
// TODO: Some profilers will show the two batches of iterations as
// being different code-paths even though they aren't really.
@@ -88,17 +88,58 @@ pub(crate) trait Routine<M: Measurement, T: ?Sized> {
report_context: &ReportContext,
parameter: &T,
) -> (ActualSamplingMode, Box<[f64]>, Box<[f64]>) {
+ if config.quick_mode {
+ let minimum_bench_duration = Duration::from_millis(100);
+ let maximum_bench_duration = config.measurement_time; // default: 5 seconds
+ let target_rel_stdev = config.significance_level; // default: 5%, 0.05
+
+ use std::time::Instant;
+ let time_start = Instant::now();
+
+ let sq = |val| val * val;
+ let mut n = 1;
+ let mut t_prev = *self.bench(measurement, &[n], parameter).first().unwrap();
+
+ // Early exit for extremely long running benchmarks:
+ if time_start.elapsed() > maximum_bench_duration {
+ let t_prev = 1_000_000f64;
+ let iters = vec![n as f64, n as f64].into_boxed_slice();
+ let elapsed = vec![t_prev, t_prev].into_boxed_slice();
+ return (ActualSamplingMode::Flat, iters, elapsed);
+ }
+
+ // Main data collection loop.
+ loop {
+ let t_now = *self
+ .bench(measurement, &[n * 2], parameter)
+ .first()
+ .unwrap();
+ let t = (t_prev + 2. * t_now) / 5.;
+ let stdev = (sq(t_prev - t) + sq(t_now - 2. * t)).sqrt();
+ // println!("Sample: {} {:.2}", n, stdev / t);
+ let elapsed = time_start.elapsed();
+ if (stdev < target_rel_stdev * t && elapsed > minimum_bench_duration)
+ || elapsed > maximum_bench_duration
+ {
+ let iters = vec![n as f64, (n * 2) as f64].into_boxed_slice();
+ let elapsed = vec![t_prev, t_now].into_boxed_slice();
+ return (ActualSamplingMode::Linear, iters, elapsed);
+ }
+ n *= 2;
+ t_prev = t_now;
+ }
+ }
let wu = config.warm_up_time;
- let m_ns = config.measurement_time.to_nanos();
+ let m_ns = config.measurement_time.as_nanos();
criterion
.report
- .warmup(id, report_context, wu.to_nanos() as f64);
+ .warmup(id, report_context, wu.as_nanos() as f64);
if let Some(conn) = &criterion.connection {
conn.send(&OutgoingMessage::Warmup {
id: id.into(),
- nanos: wu.to_nanos() as f64,
+ nanos: wu.as_nanos() as f64,
})
.unwrap();
}
@@ -206,7 +247,7 @@ where
.iter()
.map(|iters| {
b.iters = *iters;
- (*f)(&mut b, parameter);
+ (*f)(&mut b, black_box(parameter));
b.assert_iterated();
m.to_f64(&b.value)
})
@@ -226,14 +267,14 @@ where
let mut total_iters = 0;
let mut elapsed_time = Duration::from_millis(0);
loop {
- (*f)(&mut b, parameter);
+ (*f)(&mut b, black_box(parameter));
b.assert_iterated();
total_iters += b.iters;
elapsed_time += b.elapsed_time;
if elapsed_time > how_long {
- return (elapsed_time.to_nanos(), total_iters);
+ return (elapsed_time.as_nanos() as u64, total_iters);
}
b.iters = b.iters.wrapping_mul(2);
diff --git a/src/stats/bivariate/mod.rs b/src/stats/bivariate/mod.rs
index d1e8df7..2351c9e 100755
--- a/src/stats/bivariate/mod.rs
+++ b/src/stats/bivariate/mod.rs
@@ -8,6 +8,7 @@ use crate::stats::bivariate::resamples::Resamples;
use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
use crate::stats::univariate::Sample;
+#[cfg(feature = "rayon")]
use rayon::iter::{IntoParallelIterator, ParallelIterator};
/// Bivariate `(X, Y)` data
@@ -72,27 +73,41 @@ where
T::Distributions: Send,
T::Builder: Send,
{
- (0..nresamples)
- .into_par_iter()
- .map_init(
- || Resamples::new(*self),
- |resamples, _| statistic(resamples.next()),
- )
- .fold(
- || T::Builder::new(0),
- |mut sub_distributions, sample| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples)
+ .into_par_iter()
+ .map_init(
+ || Resamples::new(*self),
+ |resamples, _| statistic(resamples.next()),
+ )
+ .fold(
+ || T::Builder::new(0),
+ |mut sub_distributions, sample| {
+ sub_distributions.push(sample);
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut resamples = Resamples::new(*self);
+ (0..nresamples)
+ .map(|_| statistic(resamples.next()))
+ .fold(T::Builder::new(0), |mut sub_distributions, sample| {
sub_distributions.push(sample);
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
- a.extend(&mut b);
- a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
/// Returns a view into the `X` data
diff --git a/src/stats/univariate/kde/mod.rs b/src/stats/univariate/kde/mod.rs
index 9b0836d..c54de55 100755
--- a/src/stats/univariate/kde/mod.rs
+++ b/src/stats/univariate/kde/mod.rs
@@ -5,6 +5,7 @@ pub mod kernel;
use self::kernel::Kernel;
use crate::stats::float::Float;
use crate::stats::univariate::Sample;
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
/// Univariate kernel density estimator
@@ -42,8 +43,13 @@ where
///
/// - Multihreaded
pub fn map(&self, xs: &[A]) -> Box<[A]> {
- xs.par_iter()
- .map(|&x| self.estimate(x))
+ #[cfg(feature = "rayon")]
+ let iter = xs.par_iter();
+
+ #[cfg(not(feature = "rayon"))]
+ let iter = xs.iter();
+
+ iter.map(|&x| self.estimate(x))
.collect::<Vec<_>>()
.into_boxed_slice()
}
diff --git a/src/stats/univariate/mixed.rs b/src/stats/univariate/mixed.rs
index 5c0a59f..d6b845d 100755
--- a/src/stats/univariate/mixed.rs
+++ b/src/stats/univariate/mixed.rs
@@ -4,6 +4,7 @@ use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
use crate::stats::univariate::Resamples;
use crate::stats::univariate::Sample;
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
/// Performs a *mixed* two-sample bootstrap
@@ -27,31 +28,51 @@ where
c.extend_from_slice(b);
let c = Sample::new(&c);
- (0..nresamples)
- .into_par_iter()
- .map_init(
- || Resamples::new(c),
- |resamples, _| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples)
+ .into_par_iter()
+ .map_init(
+ || Resamples::new(c),
+ |resamples, _| {
+ let resample = resamples.next();
+ let a: &Sample<A> = Sample::new(&resample[..n_a]);
+ let b: &Sample<A> = Sample::new(&resample[n_a..]);
+
+ statistic(a, b)
+ },
+ )
+ .fold(
+ || T::Builder::new(0),
+ |mut sub_distributions, sample| {
+ sub_distributions.push(sample);
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut resamples = Resamples::new(c);
+ (0..nresamples)
+ .map(|_| {
let resample = resamples.next();
let a: &Sample<A> = Sample::new(&resample[..n_a]);
let b: &Sample<A> = Sample::new(&resample[n_a..]);
statistic(a, b)
- },
- )
- .fold(
- || T::Builder::new(0),
- |mut sub_distributions, sample| {
+ })
+ .fold(T::Builder::new(0), |mut sub_distributions, sample| {
sub_distributions.push(sample);
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
- a.extend(&mut b);
- a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
diff --git a/src/stats/univariate/mod.rs b/src/stats/univariate/mod.rs
index 8dfb5f8..5b22127 100755
--- a/src/stats/univariate/mod.rs
+++ b/src/stats/univariate/mod.rs
@@ -11,6 +11,7 @@ pub mod outliers;
use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
use std::cmp;
@@ -42,11 +43,42 @@ where
let nresamples_sqrt = (nresamples as f64).sqrt().ceil() as usize;
let per_chunk = (nresamples + nresamples_sqrt - 1) / nresamples_sqrt;
- (0..nresamples_sqrt)
- .into_par_iter()
- .map_init(
- || (Resamples::new(a), Resamples::new(b)),
- |(a_resamples, b_resamples), i| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples_sqrt)
+ .into_par_iter()
+ .map_init(
+ || (Resamples::new(a), Resamples::new(b)),
+ |(a_resamples, b_resamples), i| {
+ let start = i * per_chunk;
+ let end = cmp::min((i + 1) * per_chunk, nresamples);
+ let a_resample = a_resamples.next();
+
+ let mut sub_distributions: T::Builder =
+ TupledDistributionsBuilder::new(end - start);
+
+ for _ in start..end {
+ let b_resample = b_resamples.next();
+ sub_distributions.push(statistic(a_resample, b_resample));
+ }
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut a_resamples = Resamples::new(a);
+ let mut b_resamples = Resamples::new(b);
+ (0..nresamples_sqrt)
+ .map(|i| {
let start = i * per_chunk;
let end = cmp::min((i + 1) * per_chunk, nresamples);
let a_resample = a_resamples.next();
@@ -59,14 +91,11 @@ where
sub_distributions.push(statistic(a_resample, b_resample));
}
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
+ })
+ .fold(T::Builder::new(0), |mut a, mut b| {
a.extend(&mut b);
a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
diff --git a/src/stats/univariate/percentiles.rs b/src/stats/univariate/percentiles.rs
index be6bcf3..39def18 100755
--- a/src/stats/univariate/percentiles.rs
+++ b/src/stats/univariate/percentiles.rs
@@ -54,27 +54,23 @@ where
/// Returns the interquartile range
pub fn iqr(&self) -> A {
- unsafe {
- let q1 = self.at_unchecked(A::cast(25));
- let q3 = self.at_unchecked(A::cast(75));
+ let q1 = self.at(A::cast(25));
+ let q3 = self.at(A::cast(75));
- q3 - q1
- }
+ q3 - q1
}
/// Returns the 50th percentile
pub fn median(&self) -> A {
- unsafe { self.at_unchecked(A::cast(50)) }
+ self.at(A::cast(50))
}
/// Returns the 25th, 50th and 75th percentiles
pub fn quartiles(&self) -> (A, A, A) {
- unsafe {
- (
- self.at_unchecked(A::cast(25)),
- self.at_unchecked(A::cast(50)),
- self.at_unchecked(A::cast(75)),
- )
- }
+ (
+ self.at(A::cast(25)),
+ self.at(A::cast(50)),
+ self.at(A::cast(75)),
+ )
}
}
diff --git a/src/stats/univariate/sample.rs b/src/stats/univariate/sample.rs
index 8f10db7..6fbb4fb 100755
--- a/src/stats/univariate/sample.rs
+++ b/src/stats/univariate/sample.rs
@@ -4,6 +4,7 @@ use crate::stats::float::Float;
use crate::stats::tuple::{Tuple, TupledDistributionsBuilder};
use crate::stats::univariate::Percentiles;
use crate::stats::univariate::Resamples;
+#[cfg(feature = "rayon")]
use rayon::prelude::*;
/// A collection of data points drawn from a population
@@ -12,6 +13,7 @@ use rayon::prelude::*;
///
/// - The sample contains at least 2 data points
/// - The sample contains no `NaN`s
+#[repr(transparent)]
pub struct Sample<A>([A]);
// TODO(rust-lang/rfcs#735) move this `impl` into a private percentiles module
@@ -127,7 +129,10 @@ where
}
let mut v = self.to_vec().into_boxed_slice();
+ #[cfg(feature = "rayon")]
v.par_sort_unstable_by(cmp);
+ #[cfg(not(feature = "rayon"))]
+ v.sort_unstable_by(cmp);
// NB :-1: to intra-crate privacy rules
unsafe { mem::transmute(v) }
@@ -206,27 +211,41 @@ where
T::Distributions: Send,
T::Builder: Send,
{
- (0..nresamples)
- .into_par_iter()
- .map_init(
- || Resamples::new(self),
- |resamples, _| statistic(resamples.next()),
- )
- .fold(
- || T::Builder::new(0),
- |mut sub_distributions, sample| {
+ #[cfg(feature = "rayon")]
+ {
+ (0..nresamples)
+ .into_par_iter()
+ .map_init(
+ || Resamples::new(self),
+ |resamples, _| statistic(resamples.next()),
+ )
+ .fold(
+ || T::Builder::new(0),
+ |mut sub_distributions, sample| {
+ sub_distributions.push(sample);
+ sub_distributions
+ },
+ )
+ .reduce(
+ || T::Builder::new(0),
+ |mut a, mut b| {
+ a.extend(&mut b);
+ a
+ },
+ )
+ .complete()
+ }
+ #[cfg(not(feature = "rayon"))]
+ {
+ let mut resamples = Resamples::new(self);
+ (0..nresamples)
+ .map(|_| statistic(resamples.next()))
+ .fold(T::Builder::new(0), |mut sub_distributions, sample| {
sub_distributions.push(sample);
sub_distributions
- },
- )
- .reduce(
- || T::Builder::new(0),
- |mut a, mut b| {
- a.extend(&mut b);
- a
- },
- )
- .complete()
+ })
+ .complete()
+ }
}
#[cfg(test)]
diff --git a/tests/criterion_tests.rs b/tests/criterion_tests.rs
index cca448e..8c3c81a 100755
--- a/tests/criterion_tests.rs
+++ b/tests/criterion_tests.rs
@@ -1,11 +1,10 @@
-#![allow(deprecated)]
-
use criterion;
use serde_json;
+#[cfg(feature = "plotters")]
+use criterion::SamplingMode;
use criterion::{
- criterion_group, criterion_main, profiler::Profiler, BatchSize, Benchmark, BenchmarkId,
- Criterion, Fun, ParameterizedBenchmark, SamplingMode, Throughput,
+ criterion_group, criterion_main, profiler::Profiler, BatchSize, BenchmarkId, Criterion,
};
use serde_json::value::Value;
use std::cell::{Cell, RefCell};
@@ -33,7 +32,6 @@ fn short_benchmark(dir: &TempDir) -> Criterion {
.warm_up_time(Duration::from_millis(250))
.measurement_time(Duration::from_millis(500))
.nresamples(2000)
- .with_plots()
}
#[derive(Clone)]
@@ -75,10 +73,12 @@ fn verify_json(dir: &PathBuf, path: &str) {
serde_json::from_reader::<File, Value>(f).unwrap();
}
+#[cfg(feature = "html_reports")]
fn verify_svg(dir: &PathBuf, path: &str) {
verify_file(dir, path);
}
+#[cfg(feature = "html_reports")]
fn verify_html(dir: &PathBuf, path: &str) {
verify_file(dir, path);
}
@@ -88,6 +88,7 @@ fn verify_stats(dir: &PathBuf, baseline: &str) {
verify_json(&dir, &format!("{}/sample.json", baseline));
verify_json(&dir, &format!("{}/tukey.json", baseline));
verify_json(&dir, &format!("{}/benchmark.json", baseline));
+ #[cfg(feature = "csv_output")]
verify_file(&dir, &format!("{}/raw.csv", baseline));
}
@@ -164,7 +165,7 @@ fn test_retain_baseline() {
let pre_modified = latest_modified(&dir.path().join("test_retain_baseline/some-baseline"));
short_benchmark(&dir)
- .retain_baseline("some-baseline".to_owned())
+ .retain_baseline("some-baseline".to_owned(), true)
.bench_function("test_retain_baseline", |b| b.iter(|| 10));
let post_modified = latest_modified(&dir.path().join("test_retain_baseline/some-baseline"));
@@ -174,11 +175,18 @@ fn test_retain_baseline() {
#[test]
#[should_panic(expected = "Baseline 'some-baseline' must exist before comparison is allowed")]
-fn test_compare_baseline() {
- // Initial benchmark to populate
+fn test_compare_baseline_strict_panics_when_missing_baseline() {
let dir = temp_dir();
short_benchmark(&dir)
- .retain_baseline("some-baseline".to_owned())
+ .retain_baseline("some-baseline".to_owned(), true)
+ .bench_function("test_compare_baseline", |b| b.iter(|| 10));
+}
+
+#[test]
+fn test_compare_baseline_lenient_when_missing_baseline() {
+ let dir = temp_dir();
+ short_benchmark(&dir)
+ .retain_baseline("some-baseline".to_owned(), false)
.bench_function("test_compare_baseline", |b| b.iter(|| 10));
}
@@ -251,27 +259,6 @@ fn test_bench_function() {
}
#[test]
-fn test_bench_functions() {
- let dir = temp_dir();
- let function_1 = Fun::new("times 10", |b, i| b.iter(|| *i * 10));
- let function_2 = Fun::new("times 20", |b, i| b.iter(|| *i * 20));
-
- let functions = vec![function_1, function_2];
-
- short_benchmark(&dir).bench_functions("test_bench_functions", functions, 20);
-}
-
-#[test]
-fn test_bench_function_over_inputs() {
- let dir = temp_dir();
- short_benchmark(&dir).bench_function_over_inputs(
- "test_bench_function_over_inputs",
- |b, i| b.iter(|| *i * 10),
- vec![100, 1000],
- );
-}
-
-#[test]
fn test_filtering() {
let dir = temp_dir();
let counter = Counter::default();
@@ -288,82 +275,62 @@ fn test_filtering() {
#[test]
fn test_timing_loops() {
let dir = temp_dir();
- short_benchmark(&dir).bench(
- "test_timing_loops",
- Benchmark::new("iter", |b| b.iter(|| 10))
- .with_function("iter_with_setup", |b| {
- b.iter_with_setup(|| vec![10], |v| v[0])
- })
- .with_function("iter_with_large_setup", |b| {
- b.iter_with_large_setup(|| vec![10], |v| v[0])
- })
- .with_function("iter_with_large_drop", |b| {
- b.iter_with_large_drop(|| vec![10; 100])
- })
- .with_function("iter_batched_small", |b| {
- b.iter_batched(|| vec![10], |v| v[0], BatchSize::SmallInput)
- })
- .with_function("iter_batched_large", |b| {
- b.iter_batched(|| vec![10], |v| v[0], BatchSize::LargeInput)
- })
- .with_function("iter_batched_per_iteration", |b| {
- b.iter_batched(|| vec![10], |v| v[0], BatchSize::PerIteration)
- })
- .with_function("iter_batched_one_batch", |b| {
- b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
- })
- .with_function("iter_batched_10_iterations", |b| {
- b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumIterations(10))
- })
- .with_function("iter_batched_ref_small", |b| {
- b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::SmallInput)
- })
- .with_function("iter_batched_ref_large", |b| {
- b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::LargeInput)
- })
- .with_function("iter_batched_ref_per_iteration", |b| {
- b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::PerIteration)
- })
- .with_function("iter_batched_ref_one_batch", |b| {
- b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
- })
- .with_function("iter_batched_ref_10_iterations", |b| {
- b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::NumIterations(10))
- }),
- );
-}
-
-#[test]
-fn test_throughput() {
- let dir = temp_dir();
- short_benchmark(&dir).bench(
- "test_throughput_bytes",
- Benchmark::new("strlen", |b| b.iter(|| "foo".len())).throughput(Throughput::Bytes(3)),
- );
- short_benchmark(&dir).bench(
- "test_throughput_elems",
- ParameterizedBenchmark::new(
- "veclen",
- |b, v| b.iter(|| v.len()),
- vec![vec![1], vec![1, 2, 3]],
- )
- .throughput(|v| Throughput::Elements(v.len() as u64)),
- );
+ let mut c = short_benchmark(&dir);
+ let mut group = c.benchmark_group("test_timing_loops");
+ group.bench_function("iter_with_setup", |b| {
+ b.iter_with_setup(|| vec![10], |v| v[0])
+ });
+ group.bench_function("iter_with_large_setup", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
+ });
+ group.bench_function("iter_with_large_drop", |b| {
+ b.iter_with_large_drop(|| vec![10; 100])
+ });
+ group.bench_function("iter_batched_small", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::SmallInput)
+ });
+ group.bench_function("iter_batched_large", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::LargeInput)
+ });
+ group.bench_function("iter_batched_per_iteration", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::PerIteration)
+ });
+ group.bench_function("iter_batched_one_batch", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
+ });
+ group.bench_function("iter_batched_10_iterations", |b| {
+ b.iter_batched(|| vec![10], |v| v[0], BatchSize::NumIterations(10))
+ });
+ group.bench_function("iter_batched_ref_small", |b| {
+ b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::SmallInput)
+ });
+ group.bench_function("iter_batched_ref_large", |b| {
+ b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::LargeInput)
+ });
+ group.bench_function("iter_batched_ref_per_iteration", |b| {
+ b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::PerIteration)
+ });
+ group.bench_function("iter_batched_ref_one_batch", |b| {
+ b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::NumBatches(1))
+ });
+ group.bench_function("iter_batched_ref_10_iterations", |b| {
+ b.iter_batched_ref(|| vec![10], |v| v[0], BatchSize::NumIterations(10))
+ });
}
// Verify that all expected output files are present
+#[cfg(feature = "plotters")]
#[test]
fn test_output_files() {
let tempdir = temp_dir();
// Run benchmarks twice to produce comparisons
for _ in 0..2 {
- short_benchmark(&tempdir).bench(
- "test_output",
- Benchmark::new("output_1", |b| b.iter(|| 10))
- .with_function("output_2", |b| b.iter(|| 20))
- .with_function("output_\\/*\"?", |b| b.iter(|| 30))
- .sampling_mode(SamplingMode::Linear),
- );
+ let mut c = short_benchmark(&tempdir);
+ let mut group = c.benchmark_group("test_output");
+ group.sampling_mode(SamplingMode::Linear);
+ group.bench_function("output_1", |b| b.iter(|| 10));
+ group.bench_function("output_2", |b| b.iter(|| 20));
+ group.bench_function("output_\\/*\"?", |b| b.iter(|| 30));
}
// For each benchmark, assert that the expected files are present.
@@ -379,7 +346,8 @@ fn test_output_files() {
verify_stats(&dir, "base");
verify_json(&dir, "change/estimates.json");
- if short_benchmark(&tempdir).can_plot() {
+ #[cfg(feature = "html_reports")]
+ {
verify_svg(&dir, "report/MAD.svg");
verify_svg(&dir, "report/mean.svg");
verify_svg(&dir, "report/median.svg");
@@ -402,8 +370,9 @@ fn test_output_files() {
}
}
- // Check for overall report files
- if short_benchmark(&tempdir).can_plot() {
+ #[cfg(feature = "html_reports")]
+ {
+ // Check for overall report files
let dir = tempdir.path().join("test_output");
verify_svg(&dir, "report/violin.svg");
@@ -412,22 +381,24 @@ fn test_output_files() {
// Run the final summary process and check for the report that produces
short_benchmark(&tempdir).final_summary();
- if short_benchmark(&tempdir).can_plot() {
- let dir = tempdir.path().to_owned();
+ #[cfg(feature = "html_reports")]
+ {
+ let dir = tempdir.path().to_owned();
verify_html(&dir, "report/index.html");
}
}
+#[cfg(feature = "plotters")]
#[test]
fn test_output_files_flat_sampling() {
let tempdir = temp_dir();
// Run benchmark twice to produce comparisons
for _ in 0..2 {
- short_benchmark(&tempdir).bench(
- "test_output",
- Benchmark::new("output_flat", |b| b.iter(|| 10)).sampling_mode(SamplingMode::Flat),
- );
+ let mut c = short_benchmark(&tempdir);
+ let mut group = c.benchmark_group("test_output");
+ group.sampling_mode(SamplingMode::Flat);
+ group.bench_function("output_flat", |b| b.iter(|| 10));
}
let dir = tempdir.path().join("test_output/output_flat");
@@ -436,7 +407,8 @@ fn test_output_files_flat_sampling() {
verify_stats(&dir, "base");
verify_json(&dir, "change/estimates.json");
- if short_benchmark(&tempdir).can_plot() {
+ #[cfg(feature = "html_reports")]
+ {
verify_svg(&dir, "report/MAD.svg");
verify_svg(&dir, "report/mean.svg");
verify_svg(&dir, "report/median.svg");
@@ -462,7 +434,7 @@ fn test_output_files_flat_sampling() {
#[should_panic(expected = "Benchmark function must call Bencher::iter or related method.")]
fn test_bench_with_no_iteration_panics() {
let dir = temp_dir();
- short_benchmark(&dir).bench("test_no_iter", Benchmark::new("no_iter", |_b| {}));
+ short_benchmark(&dir).bench_function("no_iter", |_b| {});
}
#[test]