diff options
Diffstat (limited to 'test')
47 files changed, 2800 insertions, 486 deletions
diff --git a/test/AssemblyTests.cmake b/test/AssemblyTests.cmake index 3d07858..c43c711 100644 --- a/test/AssemblyTests.cmake +++ b/test/AssemblyTests.cmake @@ -1,3 +1,23 @@ +set(CLANG_SUPPORTED_VERSION "5.0.0") +set(GCC_SUPPORTED_VERSION "5.5.0") + +if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL ${CLANG_SUPPORTED_VERSION}) + message (WARNING + "Unsupported Clang version " ${CMAKE_CXX_COMPILER_VERSION} + ". Expected is " ${CLANG_SUPPORTED_VERSION} + ". Assembly tests may be broken.") + endif() +elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL ${GCC_SUPPORTED_VERSION}) + message (WARNING + "Unsupported GCC version " ${CMAKE_CXX_COMPILER_VERSION} + ". Expected is " ${GCC_SUPPORTED_VERSION} + ". Assembly tests may be broken.") + endif() +else() + message (WARNING "Unsupported compiler. Assembly tests may be broken.") +endif() include(split_list) @@ -23,6 +43,7 @@ string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER) macro(add_filecheck_test name) cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV}) add_library(${name} OBJECT ${name}.cc) + target_link_libraries(${name} PRIVATE benchmark::benchmark) set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}") set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s") add_custom_target(copy_${name} ALL @@ -1,8 +1,18 @@ +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") + +platform( + name = "windows", + constraint_values = [ + "@platforms//os:windows", + ], +) + TEST_COPTS = [ "-pedantic", "-pedantic-errors", "-std=c++11", "-Wall", + "-Wconversion", "-Wextra", "-Wshadow", # "-Wshorten-64-to-32", @@ -10,64 +20,108 @@ TEST_COPTS = [ "-fstrict-aliasing", ] -PER_SRC_COPTS = ({ - "cxx03_test.cc": ["-std=c++03"], - # Some of the issues with DoNotOptimize only occur when optimization is enabled +# Some of the issues with DoNotOptimize only occur when optimization is enabled +PER_SRC_COPTS = { "donotoptimize_test.cc": ["-O3"], -}) +} -TEST_ARGS = ["--benchmark_min_time=0.01"] +TEST_ARGS = ["--benchmark_min_time=0.01s"] -PER_SRC_TEST_ARGS = ({ +PER_SRC_TEST_ARGS = { "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"], -}) - -load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") + "repetitions_test.cc": [" --benchmark_repetitions=3"], + "spec_arg_test.cc": ["--benchmark_filter=BM_NotChosen"], + "spec_arg_verbosity_test.cc": ["--v=42"], +} cc_library( name = "output_test_helper", testonly = 1, srcs = ["output_test_helper.cc"], hdrs = ["output_test.h"], - copts = TEST_COPTS, + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }), deps = [ "//:benchmark", "//:benchmark_internal_headers", ], ) +# Tests that use gtest. These rely on `gtest_main`. +[ + cc_test( + name = test_src[:-len(".cc")], + size = "small", + srcs = [test_src], + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }) + PER_SRC_COPTS.get(test_src, []), + deps = [ + "//:benchmark", + "//:benchmark_internal_headers", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ], + ) + for test_src in glob(["*_gtest.cc"]) +] + +# Tests that do not use gtest. These have their own `main` defined. [ cc_test( name = test_src[:-len(".cc")], size = "small", srcs = [test_src], args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []), - copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []), + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }) + PER_SRC_COPTS.get(test_src, []), deps = [ ":output_test_helper", "//:benchmark", "//:benchmark_internal_headers", - "@com_google_googletest//:gtest", - ] + ( - ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else [] - ), + ], # FIXME: Add support for assembly tests to bazel. # See Issue #556 # https://github.com/google/benchmark/issues/556 ) for test_src in glob( - ["*test.cc"], + ["*_test.cc"], exclude = [ "*_assembly_test.cc", + "cxx03_test.cc", "link_main_test.cc", ], ) ] cc_test( + name = "cxx03_test", + size = "small", + srcs = ["cxx03_test.cc"], + copts = TEST_COPTS + ["-std=c++03"], + target_compatible_with = select({ + "//:windows": ["@platforms//:incompatible"], + "//conditions:default": [], + }), + deps = [ + ":output_test_helper", + "//:benchmark", + "//:benchmark_internal_headers", + ], +) + +cc_test( name = "link_main_test", size = "small", srcs = ["link_main_test.cc"], - copts = TEST_COPTS, + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }), deps = ["//:benchmark_main"], ) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index c1a3a3f..fd88131 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,5 +1,7 @@ # Enable the tests +set(THREADS_PREFER_PTHREAD_FLAG ON) + find_package(Threads REQUIRED) include(CheckCXXCompilerFlag) @@ -22,6 +24,10 @@ if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" ) endforeach() endif() +if (NOT BUILD_SHARED_LIBS) + add_definitions(-DBENCHMARK_STATIC_DEFINE) +endif() + check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) set(BENCHMARK_O3_FLAG "") if (BENCHMARK_HAS_O3_FLAG) @@ -35,10 +41,14 @@ if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) endif() add_library(output_test_helper STATIC output_test_helper.cc output_test.h) +target_link_libraries(output_test_helper PRIVATE benchmark::benchmark) macro(compile_benchmark_test name) add_executable(${name} "${name}.cc") target_link_libraries(${name} benchmark::benchmark ${CMAKE_THREAD_LIBS_INIT}) + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "NVHPC") + target_compile_options( ${name} PRIVATE --diag_suppress partial_override ) + endif() endmacro(compile_benchmark_test) macro(compile_benchmark_test_with_main name) @@ -48,20 +58,35 @@ endmacro(compile_benchmark_test_with_main) macro(compile_output_test name) add_executable(${name} "${name}.cc" output_test.h) - target_link_libraries(${name} output_test_helper benchmark::benchmark + target_link_libraries(${name} output_test_helper benchmark::benchmark_main ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_output_test) # Demonstration executable compile_benchmark_test(benchmark_test) -add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01) +add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01s) + +compile_benchmark_test(spec_arg_test) +add_test(NAME spec_arg COMMAND spec_arg_test --benchmark_filter=BM_NotChosen) + +compile_benchmark_test(spec_arg_verbosity_test) +add_test(NAME spec_arg_verbosity COMMAND spec_arg_verbosity_test --v=42) + +compile_benchmark_test(benchmark_setup_teardown_test) +add_test(NAME benchmark_setup_teardown COMMAND benchmark_setup_teardown_test) compile_benchmark_test(filter_test) macro(add_filter_test name filter expect) - add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect}) + add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01s --benchmark_filter=${filter} ${expect}) add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect}) endmacro(add_filter_test) +compile_benchmark_test(benchmark_min_time_flag_time_test) +add_test(NAME min_time_flag_time COMMAND benchmark_min_time_flag_time_test) + +compile_benchmark_test(benchmark_min_time_flag_iters_test) +add_test(NAME min_time_flag_iters COMMAND benchmark_min_time_flag_iters_test) + add_filter_test(filter_simple "Foo" 3) add_filter_test(filter_simple_negative "-Foo" 2) add_filter_test(filter_suffix "BM_.*" 4) @@ -82,72 +107,83 @@ add_filter_test(filter_regex_end ".*Ba$" 1) add_filter_test(filter_regex_end_negative "-.*Ba$" 4) compile_benchmark_test(options_test) -add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01) +add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01s) compile_benchmark_test(basic_test) -add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01) +add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01s) + +compile_output_test(repetitions_test) +add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01s --benchmark_repetitions=3) compile_benchmark_test(diagnostics_test) -add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01) +add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01s) compile_benchmark_test(skip_with_error_test) -add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01) +add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01s) compile_benchmark_test(donotoptimize_test) +# Enable errors for deprecated deprecations (DoNotOptimize(Tp const& value)). +check_cxx_compiler_flag(-Werror=deprecated-declarations BENCHMARK_HAS_DEPRECATED_DECLARATIONS_FLAG) +if (BENCHMARK_HAS_DEPRECATED_DECLARATIONS_FLAG) + target_compile_options (donotoptimize_test PRIVATE "-Werror=deprecated-declarations") +endif() # Some of the issues with DoNotOptimize only occur when optimization is enabled check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) if (BENCHMARK_HAS_O3_FLAG) set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3") endif() -add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01) +add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01s) compile_benchmark_test(fixture_test) -add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01) +add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01s) compile_benchmark_test(register_benchmark_test) -add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01) +add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01s) compile_benchmark_test(map_test) -add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01) +add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01s) compile_benchmark_test(multiple_ranges_test) -add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01) +add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01s) compile_benchmark_test(args_product_test) -add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01) +add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01s) compile_benchmark_test_with_main(link_main_test) -add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01) +add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01s) compile_output_test(reporter_output_test) -add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01) +add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01s) compile_output_test(templated_fixture_test) -add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01) +add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01s) compile_output_test(user_counters_test) -add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01) +add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01s) + +compile_output_test(perf_counters_test) +add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01s --benchmark_perf_counters=CYCLES,BRANCHES) compile_output_test(internal_threading_test) -add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01) +add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01s) compile_output_test(report_aggregates_only_test) -add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01) +add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01s) compile_output_test(display_aggregates_only_test) -add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01) +add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01s) compile_output_test(user_counters_tabular_test) -add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01) +add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01s) compile_output_test(user_counters_thousands_test) -add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01) +add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01s) compile_output_test(memory_manager_test) -add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01) +add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01s) -check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG) -if (BENCHMARK_HAS_CXX03_FLAG) +# MSVC does not allow to set the language standard to C++98/03. +if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") compile_benchmark_test(cxx03_test) set_target_properties(cxx03_test PROPERTIES @@ -158,19 +194,25 @@ if (BENCHMARK_HAS_CXX03_FLAG) # causing the test to fail to compile. To prevent this we explicitly disable # the warning. check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR) - if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR) - set_target_properties(cxx03_test - PROPERTIES - LINK_FLAGS "-Wno-odr") + check_cxx_compiler_flag(-Wno-lto-type-mismatch BENCHMARK_HAS_WNO_LTO_TYPE_MISMATCH) + # Cannot set_target_properties multiple times here because the warnings will + # be overwritten on each call + set (DISABLE_LTO_WARNINGS "") + if (BENCHMARK_HAS_WNO_ODR) + set(DISABLE_LTO_WARNINGS "${DISABLE_LTO_WARNINGS} -Wno-odr") + endif() + if (BENCHMARK_HAS_WNO_LTO_TYPE_MISMATCH) + set(DISABLE_LTO_WARNINGS "${DISABLE_LTO_WARNINGS} -Wno-lto-type-mismatch") endif() - add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01) + set_target_properties(cxx03_test PROPERTIES LINK_FLAGS "${DISABLE_LTO_WARNINGS}") + add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01s) endif() # Attempt to work around flaky test failures when running on Appveyor servers. if (DEFINED ENV{APPVEYOR}) - set(COMPLEXITY_MIN_TIME "0.5") + set(COMPLEXITY_MIN_TIME "0.5s") else() - set(COMPLEXITY_MIN_TIME "0.01") + set(COMPLEXITY_MIN_TIME "0.01s") endif() compile_output_test(complexity_test) add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME}) @@ -193,9 +235,13 @@ if (BENCHMARK_ENABLE_GTEST_TESTS) add_gtest(benchmark_gtest) add_gtest(benchmark_name_gtest) + add_gtest(benchmark_random_interleaving_gtest) add_gtest(commandlineflags_gtest) add_gtest(statistics_gtest) add_gtest(string_util_gtest) + add_gtest(perf_counters_gtest) + add_gtest(time_unit_gtest) + add_gtest(min_time_parse_gtest) endif(BENCHMARK_ENABLE_GTEST_TESTS) ############################################################################### diff --git a/test/args_product_test.cc b/test/args_product_test.cc index 8a859f8..63b8b71 100644 --- a/test/args_product_test.cc +++ b/test/args_product_test.cc @@ -1,10 +1,10 @@ -#include "benchmark/benchmark.h" - #include <cassert> #include <iostream> #include <set> #include <vector> +#include "benchmark/benchmark.h" + class ArgsProductFixture : public ::benchmark::Fixture { public: ArgsProductFixture() @@ -23,7 +23,7 @@ class ArgsProductFixture : public ::benchmark::Fixture { {2, 15, 10, 9}, {4, 5, 6, 11}}) {} - void SetUp(const ::benchmark::State& state) { + void SetUp(const ::benchmark::State& state) override { std::vector<int64_t> ranges = {state.range(0), state.range(1), state.range(2), state.range(3)}; @@ -34,10 +34,10 @@ class ArgsProductFixture : public ::benchmark::Fixture { // NOTE: This is not TearDown as we want to check after _all_ runs are // complete. - virtual ~ArgsProductFixture() { + ~ArgsProductFixture() override { if (actualValues != expectedValues) { std::cout << "EXPECTED\n"; - for (auto v : expectedValues) { + for (const auto& v : expectedValues) { std::cout << "{"; for (int64_t iv : v) { std::cout << iv << ", "; @@ -45,7 +45,7 @@ class ArgsProductFixture : public ::benchmark::Fixture { std::cout << "}\n"; } std::cout << "ACTUAL\n"; - for (auto v : actualValues) { + for (const auto& v : actualValues) { std::cout << "{"; for (int64_t iv : v) { std::cout << iv << ", "; diff --git a/test/basic_test.cc b/test/basic_test.cc index 5f3dd1a..cba1b0f 100644 --- a/test/basic_test.cc +++ b/test/basic_test.cc @@ -5,7 +5,8 @@ void BM_empty(benchmark::State& state) { for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } } BENCHMARK(BM_empty); @@ -13,7 +14,7 @@ BENCHMARK(BM_empty)->ThreadPerCpu(); void BM_spin_empty(benchmark::State& state) { for (auto _ : state) { - for (int x = 0; x < state.range(0); ++x) { + for (auto x = 0; x < state.range(0); ++x) { benchmark::DoNotOptimize(x); } } @@ -22,11 +23,11 @@ BASIC_BENCHMARK_TEST(BM_spin_empty); BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); void BM_spin_pause_before(benchmark::State& state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -37,11 +38,11 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); void BM_spin_pause_during(benchmark::State& state) { for (auto _ : state) { state.PauseTiming(); - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } state.ResumeTiming(); - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -62,11 +63,11 @@ BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); void BM_spin_pause_after(benchmark::State& state) { for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -74,15 +75,15 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_after); BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); void BM_spin_pause_before_and_after(benchmark::State& state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -96,7 +97,6 @@ void BM_empty_stop_start(benchmark::State& state) { BENCHMARK(BM_empty_stop_start); BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); - void BM_KeepRunning(benchmark::State& state) { benchmark::IterationCount iter_count = 0; assert(iter_count == state.iterations()); @@ -108,15 +108,30 @@ void BM_KeepRunning(benchmark::State& state) { BENCHMARK(BM_KeepRunning); void BM_KeepRunningBatch(benchmark::State& state) { - // Choose a prime batch size to avoid evenly dividing max_iterations. - const benchmark::IterationCount batch_size = 101; + // Choose a batch size >1000 to skip the typical runs with iteration + // targets of 10, 100 and 1000. If these are not actually skipped the + // bug would be detectable as consecutive runs with the same iteration + // count. Below we assert that this does not happen. + const benchmark::IterationCount batch_size = 1009; + + static benchmark::IterationCount prior_iter_count = 0; benchmark::IterationCount iter_count = 0; while (state.KeepRunningBatch(batch_size)) { iter_count += batch_size; } assert(state.iterations() == iter_count); + + // Verify that the iteration count always increases across runs (see + // comment above). + assert(iter_count == batch_size // max_iterations == 1 + || iter_count > prior_iter_count); // max_iterations > batch_size + prior_iter_count = iter_count; } -BENCHMARK(BM_KeepRunningBatch); +// Register with a fixed repetition count to establish the invariant that +// the iteration count should always change across runs. This overrides +// the --benchmark_repetitions command line flag, which would otherwise +// cause this test to fail if set > 1. +BENCHMARK(BM_KeepRunningBatch)->Repetitions(1); void BM_RangedFor(benchmark::State& state) { benchmark::IterationCount iter_count = 0; @@ -127,10 +142,39 @@ void BM_RangedFor(benchmark::State& state) { } BENCHMARK(BM_RangedFor); +#ifdef BENCHMARK_HAS_CXX11 +template <typename T> +void BM_OneTemplateFunc(benchmark::State& state) { + auto arg = state.range(0); + T sum = 0; + for (auto _ : state) { + sum += static_cast<T>(arg); + } +} +BENCHMARK(BM_OneTemplateFunc<int>)->Arg(1); +BENCHMARK(BM_OneTemplateFunc<double>)->Arg(1); + +template <typename A, typename B> +void BM_TwoTemplateFunc(benchmark::State& state) { + auto arg = state.range(0); + A sum = 0; + B prod = 1; + for (auto _ : state) { + sum += static_cast<A>(arg); + prod *= static_cast<B>(arg); + } +} +BENCHMARK(BM_TwoTemplateFunc<int, double>)->Arg(1); +BENCHMARK(BM_TwoTemplateFunc<double, int>)->Arg(1); + +#endif // BENCHMARK_HAS_CXX11 + // Ensure that StateIterator provides all the necessary typedefs required to // instantiate std::iterator_traits. -static_assert(std::is_same< - typename std::iterator_traits<benchmark::State::StateIterator>::value_type, - typename benchmark::State::StateIterator::value_type>::value, ""); +static_assert( + std::is_same<typename std::iterator_traits< + benchmark::State::StateIterator>::value_type, + typename benchmark::State::StateIterator::value_type>::value, + ""); BENCHMARK_MAIN(); diff --git a/test/benchmark_gtest.cc b/test/benchmark_gtest.cc index 6dbf7a5..2c9e555 100644 --- a/test/benchmark_gtest.cc +++ b/test/benchmark_gtest.cc @@ -1,11 +1,15 @@ +#include <map> +#include <string> #include <vector> #include "../src/benchmark_register.h" +#include "benchmark/benchmark.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace benchmark { namespace internal { + namespace { TEST(AddRangeTest, Simple) { @@ -34,8 +38,9 @@ TEST(AddRangeTest, Advanced64) { TEST(AddRangeTest, FullRange8) { std::vector<int8_t> dst; - AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8); - EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127)); + AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), int8_t{8}); + EXPECT_THAT( + dst, testing::ElementsAre(int8_t{1}, int8_t{8}, int8_t{64}, int8_t{127})); } TEST(AddRangeTest, FullRange64) { @@ -125,8 +130,38 @@ TEST(AddRangeTest, FullNegativeRange64) { TEST(AddRangeTest, Simple8) { std::vector<int8_t> dst; - AddRange<int8_t>(&dst, 1, 8, 2); - EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8)); + AddRange<int8_t>(&dst, int8_t{1}, int8_t{8}, int8_t{2}); + EXPECT_THAT(dst, + testing::ElementsAre(int8_t{1}, int8_t{2}, int8_t{4}, int8_t{8})); +} + +TEST(AddCustomContext, Simple) { + std::map<std::string, std::string> *&global_context = GetGlobalContext(); + EXPECT_THAT(global_context, nullptr); + + AddCustomContext("foo", "bar"); + AddCustomContext("baz", "qux"); + + EXPECT_THAT(*global_context, + testing::UnorderedElementsAre(testing::Pair("foo", "bar"), + testing::Pair("baz", "qux"))); + + delete global_context; + global_context = nullptr; +} + +TEST(AddCustomContext, DuplicateKey) { + std::map<std::string, std::string> *&global_context = GetGlobalContext(); + EXPECT_THAT(global_context, nullptr); + + AddCustomContext("foo", "bar"); + AddCustomContext("foo", "qux"); + + EXPECT_THAT(*global_context, + testing::UnorderedElementsAre(testing::Pair("foo", "bar"))); + + delete global_context; + global_context = nullptr; } } // namespace diff --git a/test/benchmark_min_time_flag_iters_test.cc b/test/benchmark_min_time_flag_iters_test.cc new file mode 100644 index 0000000..3de93a7 --- /dev/null +++ b/test/benchmark_min_time_flag_iters_test.cc @@ -0,0 +1,66 @@ +#include <cassert> +#include <cstdlib> +#include <cstring> +#include <iostream> +#include <string> +#include <vector> + +#include "benchmark/benchmark.h" + +// Tests that we can specify the number of iterations with +// --benchmark_min_time=<NUM>x. +namespace { + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { + return ConsoleReporter::ReportContext(context); + }; + + virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE { + assert(report.size() == 1); + iter_nums_.push_back(report[0].iterations); + ConsoleReporter::ReportRuns(report); + }; + + TestReporter() {} + + virtual ~TestReporter() {} + + const std::vector<benchmark::IterationCount>& GetIters() const { + return iter_nums_; + } + + private: + std::vector<benchmark::IterationCount> iter_nums_; +}; + +} // end namespace + +static void BM_MyBench(benchmark::State& state) { + for (auto s : state) { + } +} +BENCHMARK(BM_MyBench); + +int main(int argc, char** argv) { + // Make a fake argv and append the new --benchmark_min_time=<foo> to it. + int fake_argc = argc + 1; + const char** fake_argv = new const char*[static_cast<size_t>(fake_argc)]; + for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i]; + fake_argv[argc] = "--benchmark_min_time=4x"; + + benchmark::Initialize(&fake_argc, const_cast<char**>(fake_argv)); + + TestReporter test_reporter; + const size_t returned_count = + benchmark::RunSpecifiedBenchmarks(&test_reporter, "BM_MyBench"); + assert(returned_count == 1); + + // Check the executed iters. + const std::vector<benchmark::IterationCount> iters = test_reporter.GetIters(); + assert(!iters.empty() && iters[0] == 4); + + delete[] fake_argv; + return 0; +} diff --git a/test/benchmark_min_time_flag_time_test.cc b/test/benchmark_min_time_flag_time_test.cc new file mode 100644 index 0000000..04a82eb --- /dev/null +++ b/test/benchmark_min_time_flag_time_test.cc @@ -0,0 +1,90 @@ +#include <cassert> +#include <climits> +#include <cmath> +#include <cstdlib> +#include <cstring> +#include <iostream> +#include <string> +#include <vector> + +#include "benchmark/benchmark.h" + +// Tests that we can specify the min time with +// --benchmark_min_time=<NUM> (no suffix needed) OR +// --benchmark_min_time=<NUM>s +namespace { + +// This is from benchmark.h +typedef int64_t IterationCount; + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { + return ConsoleReporter::ReportContext(context); + }; + + virtual void ReportRuns(const std::vector<Run>& report) BENCHMARK_OVERRIDE { + assert(report.size() == 1); + ConsoleReporter::ReportRuns(report); + }; + + virtual void ReportRunsConfig(double min_time, bool /* has_explicit_iters */, + IterationCount /* iters */) BENCHMARK_OVERRIDE { + min_times_.push_back(min_time); + } + + TestReporter() {} + + virtual ~TestReporter() {} + + const std::vector<double>& GetMinTimes() const { return min_times_; } + + private: + std::vector<double> min_times_; +}; + +bool AlmostEqual(double a, double b) { + return std::fabs(a - b) < std::numeric_limits<double>::epsilon(); +} + +void DoTestHelper(int* argc, const char** argv, double expected) { + benchmark::Initialize(argc, const_cast<char**>(argv)); + + TestReporter test_reporter; + const size_t returned_count = + benchmark::RunSpecifiedBenchmarks(&test_reporter, "BM_MyBench"); + assert(returned_count == 1); + + // Check the min_time + const std::vector<double>& min_times = test_reporter.GetMinTimes(); + assert(!min_times.empty() && AlmostEqual(min_times[0], expected)); +} + +} // end namespace + +static void BM_MyBench(benchmark::State& state) { + for (auto s : state) { + } +} +BENCHMARK(BM_MyBench); + +int main(int argc, char** argv) { + // Make a fake argv and append the new --benchmark_min_time=<foo> to it. + int fake_argc = argc + 1; + const char** fake_argv = new const char*[static_cast<size_t>(fake_argc)]; + + for (int i = 0; i < argc; ++i) fake_argv[i] = argv[i]; + + const char* no_suffix = "--benchmark_min_time=4"; + const char* with_suffix = "--benchmark_min_time=4.0s"; + double expected = 4.0; + + fake_argv[argc] = no_suffix; + DoTestHelper(&fake_argc, fake_argv, expected); + + fake_argv[argc] = with_suffix; + DoTestHelper(&fake_argc, fake_argv, expected); + + delete[] fake_argv; + return 0; +} diff --git a/test/benchmark_name_gtest.cc b/test/benchmark_name_gtest.cc index afb401c..0a6746d 100644 --- a/test/benchmark_name_gtest.cc +++ b/test/benchmark_name_gtest.cc @@ -32,6 +32,14 @@ TEST(BenchmarkNameTest, MinTime) { EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s"); } +TEST(BenchmarkNameTest, MinWarmUpTime) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.args = "some_args:3/4"; + name.min_warmup_time = "min_warmup_time:3.5s"; + EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_warmup_time:3.5s"); +} + TEST(BenchmarkNameTest, Iterations) { auto name = BenchmarkName(); name.function_name = "function_name"; diff --git a/test/benchmark_random_interleaving_gtest.cc b/test/benchmark_random_interleaving_gtest.cc new file mode 100644 index 0000000..7f20867 --- /dev/null +++ b/test/benchmark_random_interleaving_gtest.cc @@ -0,0 +1,126 @@ +#include <queue> +#include <string> +#include <vector> + +#include "../src/commandlineflags.h" +#include "../src/string_util.h" +#include "benchmark/benchmark.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace benchmark { + +BM_DECLARE_bool(benchmark_enable_random_interleaving); +BM_DECLARE_string(benchmark_filter); +BM_DECLARE_int32(benchmark_repetitions); + +namespace internal { +namespace { + +class EventQueue : public std::queue<std::string> { + public: + void Put(const std::string& event) { push(event); } + + void Clear() { + while (!empty()) { + pop(); + } + } + + std::string Get() { + std::string event = front(); + pop(); + return event; + } +}; + +EventQueue* queue = new EventQueue(); + +class NullReporter : public BenchmarkReporter { + public: + bool ReportContext(const Context& /*context*/) override { return true; } + void ReportRuns(const std::vector<Run>& /* report */) override {} +}; + +class BenchmarkTest : public testing::Test { + public: + static void SetupHook(int /* num_threads */) { queue->push("Setup"); } + + static void TeardownHook(int /* num_threads */) { queue->push("Teardown"); } + + void Execute(const std::string& pattern) { + queue->Clear(); + + std::unique_ptr<BenchmarkReporter> reporter(new NullReporter()); + FLAGS_benchmark_filter = pattern; + RunSpecifiedBenchmarks(reporter.get()); + + queue->Put("DONE"); // End marker + } +}; + +void BM_Match1(benchmark::State& state) { + const int64_t arg = state.range(0); + + for (auto _ : state) { + } + queue->Put(StrFormat("BM_Match1/%d", static_cast<int>(arg))); +} +BENCHMARK(BM_Match1) + ->Iterations(100) + ->Arg(1) + ->Arg(2) + ->Arg(3) + ->Range(10, 80) + ->Args({90}) + ->Args({100}); + +TEST_F(BenchmarkTest, Match1) { + Execute("BM_Match1"); + ASSERT_EQ("BM_Match1/1", queue->Get()); + ASSERT_EQ("BM_Match1/2", queue->Get()); + ASSERT_EQ("BM_Match1/3", queue->Get()); + ASSERT_EQ("BM_Match1/10", queue->Get()); + ASSERT_EQ("BM_Match1/64", queue->Get()); + ASSERT_EQ("BM_Match1/80", queue->Get()); + ASSERT_EQ("BM_Match1/90", queue->Get()); + ASSERT_EQ("BM_Match1/100", queue->Get()); + ASSERT_EQ("DONE", queue->Get()); +} + +TEST_F(BenchmarkTest, Match1WithRepetition) { + FLAGS_benchmark_repetitions = 2; + + Execute("BM_Match1/(64|80)"); + ASSERT_EQ("BM_Match1/64", queue->Get()); + ASSERT_EQ("BM_Match1/64", queue->Get()); + ASSERT_EQ("BM_Match1/80", queue->Get()); + ASSERT_EQ("BM_Match1/80", queue->Get()); + ASSERT_EQ("DONE", queue->Get()); +} + +TEST_F(BenchmarkTest, Match1WithRandomInterleaving) { + FLAGS_benchmark_enable_random_interleaving = true; + FLAGS_benchmark_repetitions = 100; + + std::map<std::string, int> element_count; + std::map<std::string, int> interleaving_count; + Execute("BM_Match1/(64|80)"); + for (int i = 0; i < 100; ++i) { + std::vector<std::string> interleaving; + interleaving.push_back(queue->Get()); + interleaving.push_back(queue->Get()); + element_count[interleaving[0]]++; + element_count[interleaving[1]]++; + interleaving_count[StrFormat("%s,%s", interleaving[0].c_str(), + interleaving[1].c_str())]++; + } + EXPECT_EQ(element_count["BM_Match1/64"], 100) << "Unexpected repetitions."; + EXPECT_EQ(element_count["BM_Match1/80"], 100) << "Unexpected repetitions."; + EXPECT_GE(interleaving_count.size(), 2) << "Interleaving was not randomized."; + ASSERT_EQ("DONE", queue->Get()); +} + +} // namespace +} // namespace internal +} // namespace benchmark diff --git a/test/benchmark_setup_teardown_test.cc b/test/benchmark_setup_teardown_test.cc new file mode 100644 index 0000000..6c3cc2e --- /dev/null +++ b/test/benchmark_setup_teardown_test.cc @@ -0,0 +1,157 @@ +#include <atomic> +#include <cassert> +#include <cstdlib> +#include <cstring> +#include <iostream> +#include <limits> +#include <string> + +#include "benchmark/benchmark.h" + +// Test that Setup() and Teardown() are called exactly once +// for each benchmark run (single-threaded). +namespace singlethreaded { +static int setup_call = 0; +static int teardown_call = 0; +} // namespace singlethreaded +static void DoSetup1(const benchmark::State& state) { + ++singlethreaded::setup_call; + + // Setup/Teardown should never be called with any thread_idx != 0. + assert(state.thread_index() == 0); +} + +static void DoTeardown1(const benchmark::State& state) { + ++singlethreaded::teardown_call; + assert(state.thread_index() == 0); +} + +static void BM_with_setup(benchmark::State& state) { + for (auto s : state) { + } +} +BENCHMARK(BM_with_setup) + ->Arg(1) + ->Arg(3) + ->Arg(5) + ->Arg(7) + ->Iterations(100) + ->Setup(DoSetup1) + ->Teardown(DoTeardown1); + +// Test that Setup() and Teardown() are called once for each group of threads. +namespace concurrent { +static std::atomic<int> setup_call(0); +static std::atomic<int> teardown_call(0); +static std::atomic<int> func_call(0); +} // namespace concurrent + +static void DoSetup2(const benchmark::State& state) { + concurrent::setup_call.fetch_add(1, std::memory_order_acquire); + assert(state.thread_index() == 0); +} + +static void DoTeardown2(const benchmark::State& state) { + concurrent::teardown_call.fetch_add(1, std::memory_order_acquire); + assert(state.thread_index() == 0); +} + +static void BM_concurrent(benchmark::State& state) { + for (auto s : state) { + } + concurrent::func_call.fetch_add(1, std::memory_order_acquire); +} + +BENCHMARK(BM_concurrent) + ->Setup(DoSetup2) + ->Teardown(DoTeardown2) + ->Iterations(100) + ->Threads(5) + ->Threads(10) + ->Threads(15); + +// Testing interaction with Fixture::Setup/Teardown +namespace fixture_interaction { +int setup = 0; +int fixture_setup = 0; +} // namespace fixture_interaction + +#define FIXTURE_BECHMARK_NAME MyFixture + +class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture { + public: + void SetUp(const ::benchmark::State&) override { + fixture_interaction::fixture_setup++; + } + + ~FIXTURE_BECHMARK_NAME() override {} +}; + +BENCHMARK_F(FIXTURE_BECHMARK_NAME, BM_WithFixture)(benchmark::State& st) { + for (auto _ : st) { + } +} + +static void DoSetupWithFixture(const benchmark::State&) { + fixture_interaction::setup++; +} + +BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, BM_WithFixture) + ->Arg(1) + ->Arg(3) + ->Arg(5) + ->Arg(7) + ->Setup(DoSetupWithFixture) + ->Repetitions(1) + ->Iterations(100); + +// Testing repetitions. +namespace repetitions { +int setup = 0; +} + +static void DoSetupWithRepetitions(const benchmark::State&) { + repetitions::setup++; +} +static void BM_WithRep(benchmark::State& state) { + for (auto _ : state) { + } +} + +BENCHMARK(BM_WithRep) + ->Arg(1) + ->Arg(3) + ->Arg(5) + ->Arg(7) + ->Setup(DoSetupWithRepetitions) + ->Iterations(100) + ->Repetitions(4); + +int main(int argc, char** argv) { + benchmark::Initialize(&argc, argv); + + size_t ret = benchmark::RunSpecifiedBenchmarks("."); + assert(ret > 0); + + // Setup/Teardown is called once for each arg group (1,3,5,7). + assert(singlethreaded::setup_call == 4); + assert(singlethreaded::teardown_call == 4); + + // 3 group of threads calling this function (3,5,10). + assert(concurrent::setup_call.load(std::memory_order_relaxed) == 3); + assert(concurrent::teardown_call.load(std::memory_order_relaxed) == 3); + assert((5 + 10 + 15) == + concurrent::func_call.load(std::memory_order_relaxed)); + + // Setup is called 4 times, once for each arg group (1,3,5,7) + assert(fixture_interaction::setup == 4); + // Fixture::Setup is called every time the bm routine is run. + // The exact number is indeterministic, so we just assert that + // it's more than setup. + assert(fixture_interaction::fixture_setup > fixture_interaction::setup); + + // Setup is call once for each repetition * num_arg = 4 * 4 = 16. + assert(repetitions::setup == 16); + + return 0; +} diff --git a/test/benchmark_test.cc b/test/benchmark_test.cc index 3cd4f55..94590d5 100644 --- a/test/benchmark_test.cc +++ b/test/benchmark_test.cc @@ -5,6 +5,7 @@ #include <stdint.h> #include <chrono> +#include <complex> #include <cstdlib> #include <iostream> #include <limits> @@ -26,7 +27,7 @@ namespace { -int BENCHMARK_NOINLINE Factorial(uint32_t n) { +int BENCHMARK_NOINLINE Factorial(int n) { return (n == 1) ? 1 : n * Factorial(n - 1); } @@ -74,7 +75,8 @@ BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024); static void BM_CalculatePi(benchmark::State& state) { static const int depth = 1024; for (auto _ : state) { - benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth))); + double pi = CalculatePi(static_cast<int>(depth)); + benchmark::DoNotOptimize(pi); } } BENCHMARK(BM_CalculatePi)->Threads(8); @@ -90,11 +92,13 @@ static void BM_SetInsert(benchmark::State& state) { for (int j = 0; j < state.range(1); ++j) data.insert(rand()); } state.SetItemsProcessed(state.iterations() * state.range(1)); - state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); + state.SetBytesProcessed(state.iterations() * state.range(1) * + static_cast<int64_t>(sizeof(int))); } -// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower, -// non-timed part of each iteration will make the benchmark take forever. +// Test many inserts at once to reduce the total iterations needed. Otherwise, +// the slower, non-timed part of each iteration will make the benchmark take +// forever. BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}}); template <typename Container, @@ -107,7 +111,7 @@ static void BM_Sequential(benchmark::State& state) { } const int64_t items_processed = state.iterations() * state.range(0); state.SetItemsProcessed(items_processed); - state.SetBytesProcessed(items_processed * sizeof(v)); + state.SetBytesProcessed(items_processed * static_cast<int64_t>(sizeof(v))); } BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int) ->Range(1 << 0, 1 << 10); @@ -121,12 +125,15 @@ static void BM_StringCompare(benchmark::State& state) { size_t len = static_cast<size_t>(state.range(0)); std::string s1(len, '-'); std::string s2(len, '-'); - for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); + for (auto _ : state) { + auto comp = s1.compare(s2); + benchmark::DoNotOptimize(comp); + } } BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); static void BM_SetupTeardown(benchmark::State& state) { - if (state.thread_index == 0) { + if (state.thread_index() == 0) { // No need to lock test_vector_mu here as this is running single-threaded. test_vector = new std::vector<int>(); } @@ -139,7 +146,7 @@ static void BM_SetupTeardown(benchmark::State& state) { test_vector->pop_back(); ++i; } - if (state.thread_index == 0) { + if (state.thread_index() == 0) { delete test_vector; } } @@ -156,11 +163,11 @@ BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28); static void BM_ParallelMemset(benchmark::State& state) { int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int)); - int thread_size = static_cast<int>(size) / state.threads; - int from = thread_size * state.thread_index; + int thread_size = static_cast<int>(size) / state.threads(); + int from = thread_size * state.thread_index(); int to = from + thread_size; - if (state.thread_index == 0) { + if (state.thread_index() == 0) { test_vector = new std::vector<int>(static_cast<size_t>(size)); } @@ -168,11 +175,11 @@ static void BM_ParallelMemset(benchmark::State& state) { for (int i = from; i < to; i++) { // No need to lock test_vector_mu as ranges // do not overlap between threads. - benchmark::DoNotOptimize(test_vector->at(i) = 1); + benchmark::DoNotOptimize(test_vector->at(static_cast<size_t>(i)) = 1); } } - if (state.thread_index == 0) { + if (state.thread_index() == 0) { delete test_vector; } } @@ -214,7 +221,8 @@ BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), std::pair<int, double>(42, 3.8)); void BM_non_template_args(benchmark::State& state, int, double) { - while(state.KeepRunning()) {} + while (state.KeepRunning()) { + } } BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); @@ -223,14 +231,14 @@ BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); static void BM_DenseThreadRanges(benchmark::State& st) { switch (st.range(0)) { case 1: - assert(st.threads == 1 || st.threads == 2 || st.threads == 3); + assert(st.threads() == 1 || st.threads() == 2 || st.threads() == 3); break; case 2: - assert(st.threads == 1 || st.threads == 3 || st.threads == 4); + assert(st.threads() == 1 || st.threads() == 3 || st.threads() == 4); break; case 3: - assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || - st.threads == 14); + assert(st.threads() == 5 || st.threads() == 8 || st.threads() == 11 || + st.threads() == 14); break; default: assert(false && "Invalid test case number"); @@ -242,4 +250,25 @@ BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3); BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2); BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3); +static void BM_BenchmarkName(benchmark::State& state) { + for (auto _ : state) { + } + + // Check that the benchmark name is passed correctly to `state`. + assert("BM_BenchmarkName" == state.name()); +} +BENCHMARK(BM_BenchmarkName); + +// regression test for #1446 +template <typename type> +static void BM_templated_test(benchmark::State& state) { + for (auto _ : state) { + type created_string; + benchmark::DoNotOptimize(created_string); + } +} + +static auto BM_templated_test_double = BM_templated_test<std::complex<double>>; +BENCHMARK(BM_templated_test_double); + BENCHMARK_MAIN(); diff --git a/test/clobber_memory_assembly_test.cc b/test/clobber_memory_assembly_test.cc index f41911a..54e26cc 100644 --- a/test/clobber_memory_assembly_test.cc +++ b/test/clobber_memory_assembly_test.cc @@ -3,13 +3,13 @@ #ifdef __clang__ #pragma clang diagnostic ignored "-Wreturn-type" #endif +BENCHMARK_DISABLE_DEPRECATED_WARNING extern "C" { extern int ExternInt; extern int ExternInt2; extern int ExternInt3; - } // CHECK-LABEL: test_basic: diff --git a/test/commandlineflags_gtest.cc b/test/commandlineflags_gtest.cc index 656020f..8412008 100644 --- a/test/commandlineflags_gtest.cc +++ b/test/commandlineflags_gtest.cc @@ -2,6 +2,7 @@ #include "../src/commandlineflags.h" #include "../src/internal_macros.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" namespace benchmark { @@ -19,9 +20,7 @@ int setenv(const char* name, const char* value, int overwrite) { return _putenv_s(name, value); } -int unsetenv(const char* name) { - return _putenv_s(name, ""); -} +int unsetenv(const char* name) { return _putenv_s(name, ""); } #endif // BENCHMARK_OS_WINDOWS @@ -197,5 +196,33 @@ TEST(StringFromEnv, Valid) { unsetenv("IN_ENV"); } +TEST(KvPairsFromEnv, Default) { + ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); + EXPECT_THAT(KvPairsFromEnv("not_in_env", {{"foo", "bar"}}), + testing::ElementsAre(testing::Pair("foo", "bar"))); +} + +TEST(KvPairsFromEnv, MalformedReturnsDefault) { + ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); + EXPECT_THAT(KvPairsFromEnv("in_env", {{"foo", "bar"}}), + testing::ElementsAre(testing::Pair("foo", "bar"))); + unsetenv("IN_ENV"); +} + +TEST(KvPairsFromEnv, Single) { + ASSERT_EQ(setenv("IN_ENV", "foo=bar", 1), 0); + EXPECT_THAT(KvPairsFromEnv("in_env", {}), + testing::ElementsAre(testing::Pair("foo", "bar"))); + unsetenv("IN_ENV"); +} + +TEST(KvPairsFromEnv, Multiple) { + ASSERT_EQ(setenv("IN_ENV", "foo=bar,baz=qux", 1), 0); + EXPECT_THAT(KvPairsFromEnv("in_env", {}), + testing::UnorderedElementsAre(testing::Pair("foo", "bar"), + testing::Pair("baz", "qux"))); + unsetenv("IN_ENV"); +} + } // namespace } // namespace benchmark diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 5681fdc..76891e0 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -4,6 +4,7 @@ #include <cmath> #include <cstdlib> #include <vector> + #include "benchmark/benchmark.h" #include "output_test.h" @@ -12,8 +13,10 @@ namespace { #define ADD_COMPLEXITY_CASES(...) \ int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__) -int AddComplexityTest(std::string test_name, std::string big_o_test_name, - std::string rms_test_name, std::string big_o) { +int AddComplexityTest(const std::string &test_name, + const std::string &big_o_test_name, + const std::string &rms_test_name, + const std::string &big_o, int family_index) { SetSubstitutions({{"%name", test_name}, {"%bigo_name", big_o_test_name}, {"%rms_name", rms_test_name}, @@ -25,25 +28,33 @@ int AddComplexityTest(std::string test_name, std::string big_o_test_name, {{"^%bigo_name %bigo_str %bigo_str[ ]*$"}, {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. {"^%rms_name %rms %rms[ ]*$", MR_Next}}); - AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"}, - {"\"run_name\": \"%name\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": %int,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"BigO\",$", MR_Next}, - {"\"cpu_coefficient\": %float,$", MR_Next}, - {"\"real_coefficient\": %float,$", MR_Next}, - {"\"big_o\": \"%bigo\",$", MR_Next}, - {"\"time_unit\": \"ns\"$", MR_Next}, - {"}", MR_Next}, - {"\"name\": \"%rms_name\",$"}, - {"\"run_name\": \"%name\",$", MR_Next}, - {"\"run_type\": \"aggregate\",$", MR_Next}, - {"\"repetitions\": %int,$", MR_Next}, - {"\"threads\": 1,$", MR_Next}, - {"\"aggregate_name\": \"RMS\",$", MR_Next}, - {"\"rms\": %float$", MR_Next}, - {"}", MR_Next}}); + AddCases( + TC_JSONOut, + {{"\"name\": \"%bigo_name\",$"}, + {"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"%name\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": %int,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"BigO\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"cpu_coefficient\": %float,$", MR_Next}, + {"\"real_coefficient\": %float,$", MR_Next}, + {"\"big_o\": \"%bigo\",$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}, + {"\"name\": \"%rms_name\",$"}, + {"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"%name\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": %int,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"RMS\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, + {"\"rms\": %float$", MR_Next}, + {"}", MR_Next}}); AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"}, {"^\"%bigo_name\"", MR_Not}, {"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}}); @@ -56,10 +67,10 @@ int AddComplexityTest(std::string test_name, std::string big_o_test_name, // --------------------------- Testing BigO O(1) --------------------------- // // ========================================================================= // -void BM_Complexity_O1(benchmark::State& state) { +void BM_Complexity_O1(benchmark::State &state) { for (auto _ : state) { for (int i = 0; i < 1024; ++i) { - benchmark::DoNotOptimize(&i); + benchmark::DoNotOptimize(i); } } state.SetComplexityN(state.range(0)); @@ -82,15 +93,15 @@ const char *lambda_big_o_1 = "f\\(N\\)"; // Add enum tests ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, - enum_big_o_1); + enum_big_o_1, /*family_index=*/0); // Add auto enum tests ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, - auto_big_o_1); + auto_big_o_1, /*family_index=*/1); // Add lambda tests ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, - lambda_big_o_1); + lambda_big_o_1, /*family_index=*/2); // ========================================================================= // // --------------------------- Testing BigO O(N) --------------------------- // @@ -98,19 +109,20 @@ ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, std::vector<int> ConstructRandomVector(int64_t size) { std::vector<int> v; - v.reserve(static_cast<int>(size)); + v.reserve(static_cast<size_t>(size)); for (int i = 0; i < size; ++i) { v.push_back(static_cast<int>(std::rand() % size)); } return v; } -void BM_Complexity_O_N(benchmark::State& state) { +void BM_Complexity_O_N(benchmark::State &state) { auto v = ConstructRandomVector(state.range(0)); // Test worst case scenario (item not in vector) const int64_t item_not_in_vector = state.range(0) * 2; for (auto _ : state) { - benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); + auto it = std::find(v.begin(), v.end(), item_not_in_vector); + benchmark::DoNotOptimize(it); } state.SetComplexityN(state.range(0)); } @@ -137,17 +149,17 @@ const char *lambda_big_o_n = "f\\(N\\)"; // Add enum tests ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, - enum_auto_big_o_n); + enum_auto_big_o_n, /*family_index=*/3); // Add lambda tests ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, - lambda_big_o_n); + lambda_big_o_n, /*family_index=*/4); // ========================================================================= // // ------------------------- Testing BigO O(N*lgN) ------------------------- // // ========================================================================= // -static void BM_Complexity_O_N_log_N(benchmark::State& state) { +static void BM_Complexity_O_N_log_N(benchmark::State &state) { auto v = ConstructRandomVector(state.range(0)); for (auto _ : state) { std::sort(v.begin(), v.end()); @@ -163,7 +175,7 @@ BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) ->Complexity([](benchmark::IterationCount n) { - return kLog2E * n * log(static_cast<double>(n)); + return kLog2E * static_cast<double>(n) * log(static_cast<double>(n)); }); BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) @@ -178,20 +190,23 @@ const char *lambda_big_o_n_lg_n = "f\\(N\\)"; // Add enum tests ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, - rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n); + rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n, + /*family_index=*/6); // Add lambda tests ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, - rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n); + rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n, + /*family_index=*/7); // ========================================================================= // // -------- Testing formatting of Complexity with captured args ------------ // // ========================================================================= // -void BM_ComplexityCaptureArgs(benchmark::State& state, int n) { +void BM_ComplexityCaptureArgs(benchmark::State &state, int n) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } state.SetComplexityN(n); } @@ -204,7 +219,7 @@ const std::string complexity_capture_name = "BM_ComplexityCaptureArgs/capture_test"; ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", - complexity_capture_name + "_RMS", "N"); + complexity_capture_name + "_RMS", "N", /*family_index=*/9); // ========================================================================= // // --------------------------- TEST CASES END ------------------------------ // diff --git a/test/cxx03_test.cc b/test/cxx03_test.cc index c4c9a52..9711c1b 100644 --- a/test/cxx03_test.cc +++ b/test/cxx03_test.cc @@ -44,8 +44,7 @@ BENCHMARK_TEMPLATE(BM_template1, long); BENCHMARK_TEMPLATE1(BM_template1, int); template <class T> -struct BM_Fixture : public ::benchmark::Fixture { -}; +struct BM_Fixture : public ::benchmark::Fixture {}; BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { BM_empty(state); @@ -55,8 +54,8 @@ BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { } void BM_counters(benchmark::State& state) { - BM_empty(state); - state.counters["Foo"] = 2; + BM_empty(state); + state.counters["Foo"] = 2; } BENCHMARK(BM_counters); diff --git a/test/diagnostics_test.cc b/test/diagnostics_test.cc index dd64a33..0cd3edb 100644 --- a/test/diagnostics_test.cc +++ b/test/diagnostics_test.cc @@ -26,7 +26,8 @@ void TestHandler() { } void try_invalid_pause_resume(benchmark::State& state) { -#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS) +#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && \ + !defined(TEST_HAS_NO_EXCEPTIONS) try { state.PauseTiming(); std::abort(); @@ -48,7 +49,8 @@ void BM_diagnostic_test(benchmark::State& state) { if (called_once == false) try_invalid_pause_resume(state); for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } if (called_once == false) try_invalid_pause_resume(state); @@ -57,14 +59,14 @@ void BM_diagnostic_test(benchmark::State& state) { } BENCHMARK(BM_diagnostic_test); - void BM_diagnostic_test_keep_running(benchmark::State& state) { static bool called_once = false; if (called_once == false) try_invalid_pause_resume(state); - while(state.KeepRunning()) { - benchmark::DoNotOptimize(state.iterations()); + while (state.KeepRunning()) { + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } if (called_once == false) try_invalid_pause_resume(state); @@ -74,7 +76,16 @@ void BM_diagnostic_test_keep_running(benchmark::State& state) { BENCHMARK(BM_diagnostic_test_keep_running); int main(int argc, char* argv[]) { +#ifdef NDEBUG + // This test is exercising functionality for debug builds, which are not + // available in release builds. Skip the test if we are in that environment + // to avoid a test failure. + std::cout << "Diagnostic test disabled in release build" << std::endl; + (void)argc; + (void)argv; +#else benchmark::internal::GetAbortHandler() = &TestHandler; benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); +#endif } diff --git a/test/display_aggregates_only_test.cc b/test/display_aggregates_only_test.cc index 3c36d3f..6ad65e7 100644 --- a/test/display_aggregates_only_test.cc +++ b/test/display_aggregates_only_test.cc @@ -19,21 +19,23 @@ BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly(); int main(int argc, char* argv[]) { const std::string output = GetFileReporterOutput(argc, argv); - if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 || + if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 7 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != - 1) { - std::cout << "Precondition mismatch. Expected to only find 6 " + 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"") != 1) { + std::cout << "Precondition mismatch. Expected to only find 8 " "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" "\"name\": \"BM_SummaryRepeat/repeats:3\", " "\"name\": \"BM_SummaryRepeat/repeats:3\", " "\"name\": \"BM_SummaryRepeat/repeats:3\", " "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " - "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " + "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"\nThe entire " "output:\n"; std::cout << output; return 1; diff --git a/test/donotoptimize_assembly_test.cc b/test/donotoptimize_assembly_test.cc index d4b0bab..dc286f5 100644 --- a/test/donotoptimize_assembly_test.cc +++ b/test/donotoptimize_assembly_test.cc @@ -3,19 +3,23 @@ #ifdef __clang__ #pragma clang diagnostic ignored "-Wreturn-type" #endif +BENCHMARK_DISABLE_DEPRECATED_WARNING extern "C" { extern int ExternInt; extern int ExternInt2; extern int ExternInt3; +extern int BigArray[2049]; + +const int ConstBigArray[2049]{}; inline int Add42(int x) { return x + 42; } struct NotTriviallyCopyable { NotTriviallyCopyable(); explicit NotTriviallyCopyable(int x) : value(x) {} - NotTriviallyCopyable(NotTriviallyCopyable const&); + NotTriviallyCopyable(NotTriviallyCopyable const &); int value; }; @@ -24,7 +28,14 @@ struct Large { int data[2]; }; +struct ExtraLarge { + int arr[2049]; +}; } + +extern ExtraLarge ExtraLargeObj; +const ExtraLarge ConstExtraLargeObj{}; + // CHECK-LABEL: test_with_rvalue: extern "C" void test_with_rvalue() { benchmark::DoNotOptimize(Add42(0)); @@ -69,6 +80,22 @@ extern "C" void test_with_large_lvalue() { // CHECK: ret } +// CHECK-LABEL: test_with_extra_large_lvalue_with_op: +extern "C" void test_with_extra_large_lvalue_with_op() { + ExtraLargeObj.arr[16] = 42; + benchmark::DoNotOptimize(ExtraLargeObj); + // CHECK: movl $42, ExtraLargeObj+64(%rip) + // CHECK: ret +} + +// CHECK-LABEL: test_with_big_array_with_op +extern "C" void test_with_big_array_with_op() { + BigArray[16] = 42; + benchmark::DoNotOptimize(BigArray); + // CHECK: movl $42, BigArray+64(%rip) + // CHECK: ret +} + // CHECK-LABEL: test_with_non_trivial_lvalue: extern "C" void test_with_non_trivial_lvalue() { NotTriviallyCopyable NTC(ExternInt); @@ -97,6 +124,18 @@ extern "C" void test_with_large_const_lvalue() { // CHECK: ret } +// CHECK-LABEL: test_with_const_extra_large_obj: +extern "C" void test_with_const_extra_large_obj() { + benchmark::DoNotOptimize(ConstExtraLargeObj); + // CHECK: ret +} + +// CHECK-LABEL: test_with_const_big_array +extern "C" void test_with_const_big_array() { + benchmark::DoNotOptimize(ConstBigArray); + // CHECK: ret +} + // CHECK-LABEL: test_with_non_trivial_const_lvalue: extern "C" void test_with_non_trivial_const_lvalue() { const NotTriviallyCopyable Obj(ExternInt); @@ -118,8 +157,7 @@ extern "C" int test_div_by_two(int input) { // CHECK-LABEL: test_inc_integer: extern "C" int test_inc_integer() { int x = 0; - for (int i=0; i < 5; ++i) - benchmark::DoNotOptimize(++x); + for (int i = 0; i < 5; ++i) benchmark::DoNotOptimize(++x); // CHECK: movl $1, [[DEST:.*]] // CHECK: {{(addl \$1,|incl)}} [[DEST]] // CHECK: {{(addl \$1,|incl)}} [[DEST]] @@ -147,7 +185,7 @@ extern "C" void test_pointer_const_lvalue() { // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK: ret int x = 42; - int * const xp = &x; + int *const xp = &x; benchmark::DoNotOptimize(xp); } diff --git a/test/donotoptimize_test.cc b/test/donotoptimize_test.cc index 2ce92d1..04ec938 100644 --- a/test/donotoptimize_test.cc +++ b/test/donotoptimize_test.cc @@ -1,33 +1,43 @@ -#include "benchmark/benchmark.h" - #include <cstdint> +#include "benchmark/benchmark.h" + namespace { #if defined(__GNUC__) -std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); +std::int64_t double_up(const std::int64_t x) __attribute__((const)); #endif -std::uint64_t double_up(const std::uint64_t x) { return x * 2; } -} +std::int64_t double_up(const std::int64_t x) { return x * 2; } +} // namespace // Using DoNotOptimize on types like BitRef seem to cause a lot of problems // with the inline assembly on both GCC and Clang. struct BitRef { int index; - unsigned char &byte; + unsigned char& byte; -public: + public: static BitRef Make() { static unsigned char arr[2] = {}; BitRef b(1, arr[0]); return b; } -private: + + private: BitRef(int i, unsigned char& b) : index(i), byte(b) {} }; int main(int, char*[]) { // this test verifies compilation of DoNotOptimize() for some types + char buffer1[1] = ""; + benchmark::DoNotOptimize(buffer1); + + char buffer2[2] = ""; + benchmark::DoNotOptimize(buffer2); + + char buffer3[3] = ""; + benchmark::DoNotOptimize(buffer3); + char buffer8[8] = ""; benchmark::DoNotOptimize(buffer8); @@ -36,17 +46,24 @@ int main(int, char*[]) { char buffer1024[1024] = ""; benchmark::DoNotOptimize(buffer1024); - benchmark::DoNotOptimize(&buffer1024[0]); + char* bptr = &buffer1024[0]; + benchmark::DoNotOptimize(bptr); int x = 123; benchmark::DoNotOptimize(x); - benchmark::DoNotOptimize(&x); + int* xp = &x; + benchmark::DoNotOptimize(xp); benchmark::DoNotOptimize(x += 42); - benchmark::DoNotOptimize(double_up(x)); + std::int64_t y = double_up(x); + benchmark::DoNotOptimize(y); // These tests are to e - benchmark::DoNotOptimize(BitRef::Make()); BitRef lval = BitRef::Make(); benchmark::DoNotOptimize(lval); + +#ifdef BENCHMARK_HAS_CXX11 + // Check that accept rvalue. + benchmark::DoNotOptimize(BitRef::Make()); +#endif } diff --git a/test/filter_test.cc b/test/filter_test.cc index 0e27065..4c8b8ea 100644 --- a/test/filter_test.cc +++ b/test/filter_test.cc @@ -1,36 +1,40 @@ -#include "benchmark/benchmark.h" - +#include <algorithm> #include <cassert> #include <cmath> #include <cstdint> #include <cstdlib> - #include <iostream> #include <limits> #include <sstream> #include <string> +#include "benchmark/benchmark.h" + namespace { class TestReporter : public benchmark::ConsoleReporter { public: - virtual bool ReportContext(const Context& context) { + bool ReportContext(const Context& context) override { return ConsoleReporter::ReportContext(context); }; - virtual void ReportRuns(const std::vector<Run>& report) { + void ReportRuns(const std::vector<Run>& report) override { ++count_; + max_family_index_ = std::max(max_family_index_, report[0].family_index); ConsoleReporter::ReportRuns(report); }; - TestReporter() : count_(0) {} + TestReporter() : count_(0), max_family_index_(0) {} - virtual ~TestReporter() {} + ~TestReporter() override {} - size_t GetCount() const { return count_; } + int GetCount() const { return count_; } + + int64_t GetMaxFamilyIndex() const { return max_family_index_; } private: - mutable size_t count_; + mutable int count_; + mutable int64_t max_family_index_; }; } // end namespace @@ -65,7 +69,7 @@ static void BM_FooBa(benchmark::State& state) { } BENCHMARK(BM_FooBa); -int main(int argc, char **argv) { +int main(int argc, char** argv) { bool list_only = false; for (int i = 0; i < argc; ++i) list_only |= std::string(argv[i]).find("--benchmark_list_tests") != @@ -74,13 +78,13 @@ int main(int argc, char **argv) { benchmark::Initialize(&argc, argv); TestReporter test_reporter; - const size_t returned_count = - benchmark::RunSpecifiedBenchmarks(&test_reporter); + const int64_t returned_count = + static_cast<int64_t>(benchmark::RunSpecifiedBenchmarks(&test_reporter)); if (argc == 2) { // Make sure we ran all of the tests std::stringstream ss(argv[1]); - size_t expected_return; + int64_t expected_return; ss >> expected_return; if (returned_count != expected_return) { @@ -90,14 +94,23 @@ int main(int argc, char **argv) { return -1; } - const size_t expected_reports = list_only ? 0 : expected_return; - const size_t reports_count = test_reporter.GetCount(); + const int64_t expected_reports = list_only ? 0 : expected_return; + const int64_t reports_count = test_reporter.GetCount(); if (reports_count != expected_reports) { std::cerr << "ERROR: Expected " << expected_reports << " tests to be run but reported_count = " << reports_count << std::endl; return -1; } + + const int64_t max_family_index = test_reporter.GetMaxFamilyIndex(); + const int64_t num_families = reports_count == 0 ? 0 : 1 + max_family_index; + if (num_families != expected_reports) { + std::cerr << "ERROR: Expected " << expected_reports + << " test families to be run but num_families = " + << num_families << std::endl; + return -1; + } } return 0; diff --git a/test/fixture_test.cc b/test/fixture_test.cc index a331c7d..d1093eb 100644 --- a/test/fixture_test.cc +++ b/test/fixture_test.cc @@ -1,33 +1,33 @@ -#include "benchmark/benchmark.h" - #include <cassert> #include <memory> +#include "benchmark/benchmark.h" + #define FIXTURE_BECHMARK_NAME MyFixture class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture { public: - void SetUp(const ::benchmark::State& state) { - if (state.thread_index == 0) { + void SetUp(const ::benchmark::State& state) override { + if (state.thread_index() == 0) { assert(data.get() == nullptr); data.reset(new int(42)); } } - void TearDown(const ::benchmark::State& state) { - if (state.thread_index == 0) { + void TearDown(const ::benchmark::State& state) override { + if (state.thread_index() == 0) { assert(data.get() != nullptr); data.reset(); } } - ~FIXTURE_BECHMARK_NAME() { assert(data == nullptr); } + ~FIXTURE_BECHMARK_NAME() override { assert(data == nullptr); } std::unique_ptr<int> data; }; -BENCHMARK_F(FIXTURE_BECHMARK_NAME, Foo)(benchmark::State &st) { +BENCHMARK_F(FIXTURE_BECHMARK_NAME, Foo)(benchmark::State& st) { assert(data.get() != nullptr); assert(*data == 42); for (auto _ : st) { @@ -35,7 +35,7 @@ BENCHMARK_F(FIXTURE_BECHMARK_NAME, Foo)(benchmark::State &st) { } BENCHMARK_DEFINE_F(FIXTURE_BECHMARK_NAME, Bar)(benchmark::State& st) { - if (st.thread_index == 0) { + if (st.thread_index() == 0) { assert(data.get() != nullptr); assert(*data == 42); } diff --git a/test/internal_threading_test.cc b/test/internal_threading_test.cc index 039d7c1..62b5b95 100644 --- a/test/internal_threading_test.cc +++ b/test/internal_threading_test.cc @@ -3,6 +3,7 @@ #include <chrono> #include <thread> + #include "../src/timers.h" #include "benchmark/benchmark.h" #include "output_test.h" diff --git a/test/link_main_test.cc b/test/link_main_test.cc index 241ad5c..e806500 100644 --- a/test/link_main_test.cc +++ b/test/link_main_test.cc @@ -2,7 +2,8 @@ void BM_empty(benchmark::State& state) { for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } } BENCHMARK(BM_empty); diff --git a/test/map_test.cc b/test/map_test.cc index dbf7982..0fdba7c 100644 --- a/test/map_test.cc +++ b/test/map_test.cc @@ -1,8 +1,8 @@ -#include "benchmark/benchmark.h" - #include <cstdlib> #include <map> +#include "benchmark/benchmark.h" + namespace { std::map<int, int> ConstructRandomMap(int size) { @@ -24,7 +24,8 @@ static void BM_MapLookup(benchmark::State& state) { m = ConstructRandomMap(size); state.ResumeTiming(); for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(std::rand() % size)); + auto it = m.find(std::rand() % size); + benchmark::DoNotOptimize(it); } } state.SetItemsProcessed(state.iterations() * size); @@ -34,11 +35,11 @@ BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); // Using fixtures. class MapFixture : public ::benchmark::Fixture { public: - void SetUp(const ::benchmark::State& st) { + void SetUp(const ::benchmark::State& st) override { m = ConstructRandomMap(static_cast<int>(st.range(0))); } - void TearDown(const ::benchmark::State&) { m.clear(); } + void TearDown(const ::benchmark::State&) override { m.clear(); } std::map<int, int> m; }; @@ -47,7 +48,8 @@ BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { const int size = static_cast<int>(state.range(0)); for (auto _ : state) { for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(std::rand() % size)); + auto it = m.find(std::rand() % size); + benchmark::DoNotOptimize(it); } } state.SetItemsProcessed(state.iterations() * size); diff --git a/test/memory_manager_test.cc b/test/memory_manager_test.cc index 90bed16..d94bd51 100644 --- a/test/memory_manager_test.cc +++ b/test/memory_manager_test.cc @@ -5,25 +5,28 @@ #include "output_test.h" class TestMemoryManager : public benchmark::MemoryManager { - void Start() {} - void Stop(Result* result) { - result->num_allocs = 42; - result->max_bytes_used = 42000; + void Start() override {} + void Stop(Result& result) override { + result.num_allocs = 42; + result.max_bytes_used = 42000; } }; void BM_empty(benchmark::State& state) { for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } } BENCHMARK(BM_empty); ADD_CASES(TC_ConsoleOut, {{"^BM_empty %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_empty\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, diff --git a/test/min_time_parse_gtest.cc b/test/min_time_parse_gtest.cc new file mode 100644 index 0000000..e2bdf67 --- /dev/null +++ b/test/min_time_parse_gtest.cc @@ -0,0 +1,30 @@ +#include "../src/benchmark_runner.h" +#include "gtest/gtest.h" + +namespace { + +TEST(ParseMinTimeTest, InvalidInput) { +#if GTEST_HAS_DEATH_TEST + // Tests only runnable in debug mode (when BM_CHECK is enabled). +#ifndef NDEBUG +#ifndef TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS + ASSERT_DEATH_IF_SUPPORTED( + { benchmark::internal::ParseBenchMinTime("abc"); }, + "Malformed seconds value passed to --benchmark_min_time: `abc`"); + + ASSERT_DEATH_IF_SUPPORTED( + { benchmark::internal::ParseBenchMinTime("123ms"); }, + "Malformed seconds value passed to --benchmark_min_time: `123ms`"); + + ASSERT_DEATH_IF_SUPPORTED( + { benchmark::internal::ParseBenchMinTime("1z"); }, + "Malformed seconds value passed to --benchmark_min_time: `1z`"); + + ASSERT_DEATH_IF_SUPPORTED( + { benchmark::internal::ParseBenchMinTime("1hs"); }, + "Malformed seconds value passed to --benchmark_min_time: `1hs`"); +#endif +#endif +#endif +} +} // namespace diff --git a/test/multiple_ranges_test.cc b/test/multiple_ranges_test.cc index b25f40e..5300a96 100644 --- a/test/multiple_ranges_test.cc +++ b/test/multiple_ranges_test.cc @@ -1,10 +1,10 @@ -#include "benchmark/benchmark.h" - #include <cassert> #include <iostream> #include <set> #include <vector> +#include "benchmark/benchmark.h" + class MultipleRangesFixture : public ::benchmark::Fixture { public: MultipleRangesFixture() @@ -28,7 +28,7 @@ class MultipleRangesFixture : public ::benchmark::Fixture { {2, 7, 15}, {7, 6, 3}}) {} - void SetUp(const ::benchmark::State& state) { + void SetUp(const ::benchmark::State& state) override { std::vector<int64_t> ranges = {state.range(0), state.range(1), state.range(2)}; @@ -39,10 +39,10 @@ class MultipleRangesFixture : public ::benchmark::Fixture { // NOTE: This is not TearDown as we want to check after _all_ runs are // complete. - virtual ~MultipleRangesFixture() { + ~MultipleRangesFixture() override { if (actualValues != expectedValues) { std::cout << "EXPECTED\n"; - for (auto v : expectedValues) { + for (const auto& v : expectedValues) { std::cout << "{"; for (int64_t iv : v) { std::cout << iv << ", "; @@ -50,7 +50,7 @@ class MultipleRangesFixture : public ::benchmark::Fixture { std::cout << "}\n"; } std::cout << "ACTUAL\n"; - for (auto v : actualValues) { + for (const auto& v : actualValues) { std::cout << "{"; for (int64_t iv : v) { std::cout << iv << ", "; diff --git a/test/options_test.cc b/test/options_test.cc index 9f9a786..a1b209f 100644 --- a/test/options_test.cc +++ b/test/options_test.cc @@ -1,7 +1,8 @@ -#include "benchmark/benchmark.h" #include <chrono> #include <thread> +#include "benchmark/benchmark.h" + #if defined(NDEBUG) #undef NDEBUG #endif @@ -32,6 +33,8 @@ BENCHMARK(BM_basic)->DenseRange(10, 15); BENCHMARK(BM_basic)->Args({42, 42}); BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}}); BENCHMARK(BM_basic)->MinTime(0.7); +BENCHMARK(BM_basic)->MinWarmUpTime(0.8); +BENCHMARK(BM_basic)->MinTime(0.1)->MinWarmUpTime(0.2); BENCHMARK(BM_basic)->UseRealTime(); BENCHMARK(BM_basic)->ThreadRange(2, 4); BENCHMARK(BM_basic)->ThreadPerCpu(); @@ -64,12 +67,10 @@ void BM_explicit_iteration_count(benchmark::State& state) { // Test that the requested iteration count is respected. assert(state.max_iterations == 42); - size_t actual_iterations = 0; - for (auto _ : state) - ++actual_iterations; + for (auto _ : state) { + } assert(state.iterations() == state.max_iterations); assert(state.iterations() == 42); - } BENCHMARK(BM_explicit_iteration_count)->Iterations(42); diff --git a/test/output_test.h b/test/output_test.h index 9385761..c08fe1d 100644 --- a/test/output_test.h +++ b/test/output_test.h @@ -85,7 +85,7 @@ std::string GetFileReporterOutput(int argc, char* argv[]); struct Results; typedef std::function<void(Results const&)> ResultsCheckFn; -size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); +size_t AddChecker(const std::string& bm_name_pattern, const ResultsCheckFn& fn); // Class holding the results of a benchmark. // It is passed in calls to checker functions. @@ -113,13 +113,11 @@ struct Results { return NumIterations() * GetTime(kRealTime); } // get the cpu_time duration of the benchmark in seconds - double DurationCPUTime() const { - return NumIterations() * GetTime(kCpuTime); - } + double DurationCPUTime() const { return NumIterations() * GetTime(kCpuTime); } // get the string for a result by name, or nullptr if the name // is not found - const std::string* Get(const char* entry_name) const { + const std::string* Get(const std::string& entry_name) const { auto it = values.find(entry_name); if (it == values.end()) return nullptr; return &it->second; @@ -128,12 +126,12 @@ struct Results { // get a result by name, parsed as a specific type. // NOTE: for counters, use GetCounterAs instead. template <class T> - T GetAs(const char* entry_name) const; + T GetAs(const std::string& entry_name) const; // counters are written as doubles, so they have to be read first // as a double, and only then converted to the asked type. template <class T> - T GetCounterAs(const char* entry_name) const { + T GetCounterAs(const std::string& entry_name) const { double dval = GetAs<double>(entry_name); T tval = static_cast<T>(dval); return tval; @@ -141,14 +139,14 @@ struct Results { }; template <class T> -T Results::GetAs(const char* entry_name) const { +T Results::GetAs(const std::string& entry_name) const { auto* sv = Get(entry_name); - CHECK(sv != nullptr && !sv->empty()); + BM_CHECK(sv != nullptr && !sv->empty()); std::stringstream ss; ss << *sv; T out; ss >> out; - CHECK(!ss.fail()); + BM_CHECK(!ss.fail()); return out; } @@ -158,8 +156,8 @@ T Results::GetAs(const char* entry_name) const { // clang-format off -#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \ - CONCAT(CHECK_, relationship) \ +#define CHECK_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value) \ + CONCAT(BM_CHECK_, relationship) \ (entry.getfn< var_type >(var_name), (value)) << "\n" \ << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ << __FILE__ << ":" << __LINE__ << ": " \ @@ -169,8 +167,8 @@ T Results::GetAs(const char* entry_name) const { // check with tolerance. eps_factor is the tolerance window, which is // interpreted relative to value (eg, 0.1 means 10% of value). -#define _CHECK_FLOAT_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value, eps_factor) \ - CONCAT(CHECK_FLOAT_, relationship) \ +#define CHECK_FLOAT_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value, eps_factor) \ + CONCAT(BM_CHECK_FLOAT_, relationship) \ (entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \ << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ << __FILE__ << ":" << __LINE__ << ": " \ @@ -187,16 +185,16 @@ T Results::GetAs(const char* entry_name) const { << "%)" #define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \ - _CHECK_RESULT_VALUE(entry, GetAs, var_type, var_name, relationship, value) + CHECK_RESULT_VALUE_IMPL(entry, GetAs, var_type, var_name, relationship, value) #define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \ - _CHECK_RESULT_VALUE(entry, GetCounterAs, var_type, var_name, relationship, value) + CHECK_RESULT_VALUE_IMPL(entry, GetCounterAs, var_type, var_name, relationship, value) #define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \ - _CHECK_FLOAT_RESULT_VALUE(entry, GetAs, double, var_name, relationship, value, eps_factor) + CHECK_FLOAT_RESULT_VALUE_IMPL(entry, GetAs, double, var_name, relationship, value, eps_factor) #define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \ - _CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) + CHECK_FLOAT_RESULT_VALUE_IMPL(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) // clang-format on diff --git a/test/output_test_helper.cc b/test/output_test_helper.cc index 1aebc55..2567370 100644 --- a/test/output_test_helper.cc +++ b/test/output_test_helper.cc @@ -10,6 +10,7 @@ #include "../src/benchmark_api_internal.h" #include "../src/check.h" // NOTE: check.h is for internal use only! +#include "../src/log.h" // NOTE: log.h is for internal use only #include "../src/re.h" // NOTE: re.h is for internal use only #include "output_test.h" @@ -40,14 +41,17 @@ SubMap& GetSubstitutions() { // clang-format off static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; static std::string time_re = "([0-9]+[.])?[0-9]+"; + static std::string percentage_re = "[0-9]+[.][0-9]{2}"; static SubMap map = { {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, // human-readable float - {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, + {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kKMGTPEZYmunpfazy]?i?"}, + {"%percentage", percentage_re}, {"%int", "[ ]*[0-9]+"}, {" %s ", "[ ]+"}, {"%time", "[ ]*" + time_re + "[ ]+ns"}, {"%console_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns [ ]*[0-9]+"}, + {"%console_percentage_report", "[ ]*" + percentage_re + "[ ]+% [ ]*" + percentage_re + "[ ]+% [ ]*[0-9]+"}, {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"}, {"%console_ms_report", "[ ]*" + time_re + "[ ]+ms [ ]*" + time_re + "[ ]+ms [ ]*[0-9]+"}, {"%console_s_report", "[ ]*" + time_re + "[ ]+s [ ]*" + time_re + "[ ]+s [ ]*[0-9]+"}, @@ -94,27 +98,27 @@ void CheckCase(std::stringstream& remaining_output, TestCase const& TC, bool on_first = true; std::string line; while (remaining_output.eof() == false) { - CHECK(remaining_output.good()); + BM_CHECK(remaining_output.good()); std::getline(remaining_output, line); if (on_first) { first_line = line; on_first = false; } for (const auto& NC : not_checks) { - CHECK(!NC.regex->Match(line)) + BM_CHECK(!NC.regex->Match(line)) << "Unexpected match for line \"" << line << "\" for MR_Not regex \"" << NC.regex_str << "\"" << "\n actual regex string \"" << TC.substituted_regex << "\"" << "\n started matching near: " << first_line; } if (TC.regex->Match(line)) return; - CHECK(TC.match_rule != MR_Next) + BM_CHECK(TC.match_rule != MR_Next) << "Expected line \"" << line << "\" to match regex \"" << TC.regex_str << "\"" << "\n actual regex string \"" << TC.substituted_regex << "\"" << "\n started matching near: " << first_line; } - CHECK(remaining_output.eof() == false) + BM_CHECK(remaining_output.eof() == false) << "End of output reached before match for regex \"" << TC.regex_str << "\" was found" << "\n actual regex string \"" << TC.substituted_regex << "\"" @@ -137,14 +141,14 @@ void CheckCases(TestCaseList const& checks, std::stringstream& output) { class TestReporter : public benchmark::BenchmarkReporter { public: TestReporter(std::vector<benchmark::BenchmarkReporter*> reps) - : reporters_(reps) {} + : reporters_(std::move(reps)) {} - virtual bool ReportContext(const Context& context) { + bool ReportContext(const Context& context) override { bool last_ret = false; bool first = true; for (auto rep : reporters_) { bool new_ret = rep->ReportContext(context); - CHECK(first || new_ret == last_ret) + BM_CHECK(first || new_ret == last_ret) << "Reports return different values for ReportContext"; first = false; last_ret = new_ret; @@ -153,10 +157,10 @@ class TestReporter : public benchmark::BenchmarkReporter { return last_ret; } - void ReportRuns(const std::vector<Run>& report) { + void ReportRuns(const std::vector<Run>& report) override { for (auto rep : reporters_) rep->ReportRuns(report); } - void Finalize() { + void Finalize() override { for (auto rep : reporters_) rep->Finalize(); } @@ -179,7 +183,7 @@ class ResultsChecker { public: struct PatternAndFn : public TestCase { // reusing TestCase for its regexes PatternAndFn(const std::string& rx, ResultsCheckFn fn_) - : TestCase(rx), fn(fn_) {} + : TestCase(rx), fn(std::move(fn_)) {} ResultsCheckFn fn; }; @@ -187,7 +191,7 @@ class ResultsChecker { std::vector<Results> results; std::vector<std::string> field_names; - void Add(const std::string& entry_pattern, ResultsCheckFn fn); + void Add(const std::string& entry_pattern, const ResultsCheckFn& fn); void CheckResults(std::stringstream& output); @@ -206,7 +210,8 @@ ResultsChecker& GetResultsChecker() { } // add a results checker for a benchmark -void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { +void ResultsChecker::Add(const std::string& entry_pattern, + const ResultsCheckFn& fn) { check_patterns.emplace_back(entry_pattern, fn); } @@ -226,7 +231,7 @@ void ResultsChecker::CheckResults(std::stringstream& output) { std::string line; bool on_first = true; while (output.eof() == false) { - CHECK(output.good()); + BM_CHECK(output.good()); std::getline(output, line); if (on_first) { SetHeader_(line); // this is important @@ -237,18 +242,17 @@ void ResultsChecker::CheckResults(std::stringstream& output) { } // finally we can call the subscribed check functions for (const auto& p : check_patterns) { - VLOG(2) << "--------------------------------\n"; - VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; + BM_VLOG(2) << "--------------------------------\n"; + BM_VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; for (const auto& r : results) { if (!p.regex->Match(r.name)) { - VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; + BM_VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; continue; - } else { - VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; } - VLOG(1) << "Checking results of " << r.name << ": ... \n"; + BM_VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; + BM_VLOG(1) << "Checking results of " << r.name << ": ... \n"; p.fn(r); - VLOG(1) << "Checking results of " << r.name << ": OK.\n"; + BM_VLOG(1) << "Checking results of " << r.name << ": OK.\n"; } } } @@ -261,9 +265,9 @@ void ResultsChecker::SetHeader_(const std::string& csv_header) { // set the values for a benchmark void ResultsChecker::SetValues_(const std::string& entry_csv_line) { if (entry_csv_line.empty()) return; // some lines are empty - CHECK(!field_names.empty()); + BM_CHECK(!field_names.empty()); auto vals = SplitCsv_(entry_csv_line); - CHECK_EQ(vals.size(), field_names.size()); + BM_CHECK_EQ(vals.size(), field_names.size()); results.emplace_back(vals[0]); // vals[0] is the benchmark name auto& entry = results.back(); for (size_t i = 1, e = vals.size(); i < e; ++i) { @@ -278,7 +282,7 @@ std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) { if (!field_names.empty()) out.reserve(field_names.size()); size_t prev = 0, pos = line.find_first_of(','), curr = pos; while (pos != line.npos) { - CHECK(curr > 0); + BM_CHECK(curr > 0); if (line[prev] == '"') ++prev; if (line[curr - 1] == '"') --curr; out.push_back(line.substr(prev, curr - prev)); @@ -295,7 +299,7 @@ std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) { } // end namespace internal -size_t AddChecker(const char* bm_name, ResultsCheckFn fn) { +size_t AddChecker(const std::string& bm_name, const ResultsCheckFn& fn) { auto& rc = internal::GetResultsChecker(); rc.Add(bm_name, fn); return rc.results.size(); @@ -309,32 +313,32 @@ int Results::NumThreads() const { ss << name.substr(pos + 9, end); int num = 1; ss >> num; - CHECK(!ss.fail()); + BM_CHECK(!ss.fail()); return num; } -double Results::NumIterations() const { - return GetAs<double>("iterations"); -} +double Results::NumIterations() const { return GetAs<double>("iterations"); } double Results::GetTime(BenchmarkTime which) const { - CHECK(which == kCpuTime || which == kRealTime); + BM_CHECK(which == kCpuTime || which == kRealTime); const char* which_str = which == kCpuTime ? "cpu_time" : "real_time"; double val = GetAs<double>(which_str); auto unit = Get("time_unit"); - CHECK(unit); + BM_CHECK(unit); if (*unit == "ns") { return val * 1.e-9; - } else if (*unit == "us") { + } + if (*unit == "us") { return val * 1.e-6; - } else if (*unit == "ms") { + } + if (*unit == "ms") { return val * 1.e-3; - } else if (*unit == "s") { + } + if (*unit == "s") { return val; - } else { - CHECK(1 == 0) << "unknown time unit: " << *unit; - return 0; } + BM_CHECK(1 == 0) << "unknown time unit: " << *unit; + return 0; } // ========================================================================= // @@ -348,10 +352,10 @@ TestCase::TestCase(std::string re, int rule) regex(std::make_shared<benchmark::Regex>()) { std::string err_str; regex->Init(substituted_regex, &err_str); - CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex - << "\"" - << "\n originally \"" << regex_str << "\"" - << "\n got error: " << err_str; + BM_CHECK(err_str.empty()) + << "Could not construct regex \"" << substituted_regex << "\"" + << "\n originally \"" << regex_str << "\"" + << "\n got error: " << err_str; } int AddCases(TestCaseID ID, std::initializer_list<TestCase> il) { @@ -380,10 +384,8 @@ int SetSubstitutions( // Disable deprecated warnings temporarily because we need to reference // CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#endif +BENCHMARK_DISABLE_DEPRECATED_WARNING + void RunOutputTests(int argc, char* argv[]) { using internal::GetTestCaseList; benchmark::Initialize(&argc, argv); @@ -392,14 +394,14 @@ void RunOutputTests(int argc, char* argv[]) { benchmark::JSONReporter JR; benchmark::CSVReporter CSVR; struct ReporterTest { - const char* name; + std::string name; std::vector<TestCase>& output_cases; std::vector<TestCase>& error_cases; benchmark::BenchmarkReporter& reporter; std::stringstream out_stream; std::stringstream err_stream; - ReporterTest(const char* n, std::vector<TestCase>& out_tc, + ReporterTest(const std::string& n, std::vector<TestCase>& out_tc, std::vector<TestCase>& err_tc, benchmark::BenchmarkReporter& br) : name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) { @@ -407,12 +409,12 @@ void RunOutputTests(int argc, char* argv[]) { reporter.SetErrorStream(&err_stream); } } TestCases[] = { - {"ConsoleReporter", GetTestCaseList(TC_ConsoleOut), + {std::string("ConsoleReporter"), GetTestCaseList(TC_ConsoleOut), GetTestCaseList(TC_ConsoleErr), CR}, - {"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr), - JR}, - {"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr), - CSVR}, + {std::string("JSONReporter"), GetTestCaseList(TC_JSONOut), + GetTestCaseList(TC_JSONErr), JR}, + {std::string("CSVReporter"), GetTestCaseList(TC_CSVOut), + GetTestCaseList(TC_CSVErr), CSVR}, }; // Create the test reporter and run the benchmarks. @@ -421,7 +423,8 @@ void RunOutputTests(int argc, char* argv[]) { benchmark::RunSpecifiedBenchmarks(&test_rep); for (auto& rep_test : TestCases) { - std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n"; + std::string msg = + std::string("\nTesting ") + rep_test.name + std::string(" Output\n"); std::string banner(msg.size() - 1, '-'); std::cout << banner << msg << banner << "\n"; @@ -438,13 +441,11 @@ void RunOutputTests(int argc, char* argv[]) { // the checks to subscribees. auto& csv = TestCases[2]; // would use == but gcc spits a warning - CHECK(std::strcmp(csv.name, "CSVReporter") == 0); + BM_CHECK(csv.name == std::string("CSVReporter")); internal::GetResultsChecker().CheckResults(csv.out_stream); } -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif +BENCHMARK_RESTORE_DEPRECATED_WARNING int SubstrCnt(const std::string& haystack, const std::string& pat) { if (pat.length() == 0) return 0; @@ -468,9 +469,8 @@ static char RandomHexChar() { static std::string GetRandomFileName() { std::string model = "test.%%%%%%"; - for (auto & ch : model) { - if (ch == '%') - ch = RandomHexChar(); + for (auto& ch : model) { + if (ch == '%') ch = RandomHexChar(); } return model; } @@ -487,8 +487,7 @@ static std::string GetTempFileName() { int retries = 3; while (--retries) { std::string name = GetRandomFileName(); - if (!FileExists(name)) - return name; + if (!FileExists(name)) return name; } std::cerr << "Failed to create unique temporary file name" << std::endl; std::abort(); diff --git a/test/perf_counters_gtest.cc b/test/perf_counters_gtest.cc new file mode 100644 index 0000000..54c7863 --- /dev/null +++ b/test/perf_counters_gtest.cc @@ -0,0 +1,307 @@ +#include <random> +#include <thread> + +#include "../src/perf_counters.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +#ifndef GTEST_SKIP +struct MsgHandler { + void operator=(std::ostream&) {} +}; +#define GTEST_SKIP() return MsgHandler() = std::cout +#endif + +using benchmark::internal::PerfCounters; +using benchmark::internal::PerfCountersMeasurement; +using benchmark::internal::PerfCounterValues; +using ::testing::AllOf; +using ::testing::Gt; +using ::testing::Lt; + +namespace { +const char kGenericPerfEvent1[] = "CYCLES"; +const char kGenericPerfEvent2[] = "INSTRUCTIONS"; + +TEST(PerfCountersTest, Init) { + EXPECT_EQ(PerfCounters::Initialize(), PerfCounters::kSupported); +} + +TEST(PerfCountersTest, OneCounter) { + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Performance counters not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + EXPECT_EQ(PerfCounters::Create({kGenericPerfEvent1}).num_counters(), 1); +} + +TEST(PerfCountersTest, NegativeTest) { + if (!PerfCounters::kSupported) { + EXPECT_FALSE(PerfCounters::Initialize()); + return; + } + EXPECT_TRUE(PerfCounters::Initialize()); + // Sanity checks + // Create() will always create a valid object, even if passed no or + // wrong arguments as the new behavior is to warn and drop unsupported + // counters + EXPECT_EQ(PerfCounters::Create({}).num_counters(), 0); + EXPECT_EQ(PerfCounters::Create({""}).num_counters(), 0); + EXPECT_EQ(PerfCounters::Create({"not a counter name"}).num_counters(), 0); + { + // Try sneaking in a bad egg to see if it is filtered out. The + // number of counters has to be two, not zero + auto counter = + PerfCounters::Create({kGenericPerfEvent2, "", kGenericPerfEvent1}); + EXPECT_EQ(counter.num_counters(), 2); + EXPECT_EQ(counter.names(), std::vector<std::string>( + {kGenericPerfEvent2, kGenericPerfEvent1})); + } + { + // Try sneaking in an outrageous counter, like a fat finger mistake + auto counter = PerfCounters::Create( + {kGenericPerfEvent2, "not a counter name", kGenericPerfEvent1}); + EXPECT_EQ(counter.num_counters(), 2); + EXPECT_EQ(counter.names(), std::vector<std::string>( + {kGenericPerfEvent2, kGenericPerfEvent1})); + } + { + // Finally try a golden input - it should like both of them + EXPECT_EQ(PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2}) + .num_counters(), + 2); + } + { + // Add a bad apple in the end of the chain to check the edges + auto counter = PerfCounters::Create( + {kGenericPerfEvent1, kGenericPerfEvent2, "bad event name"}); + EXPECT_EQ(counter.num_counters(), 2); + EXPECT_EQ(counter.names(), std::vector<std::string>( + {kGenericPerfEvent1, kGenericPerfEvent2})); + } +} + +TEST(PerfCountersTest, Read1Counter) { + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + auto counters = PerfCounters::Create({kGenericPerfEvent1}); + EXPECT_EQ(counters.num_counters(), 1); + PerfCounterValues values1(1); + EXPECT_TRUE(counters.Snapshot(&values1)); + EXPECT_GT(values1[0], 0); + PerfCounterValues values2(1); + EXPECT_TRUE(counters.Snapshot(&values2)); + EXPECT_GT(values2[0], 0); + EXPECT_GT(values2[0], values1[0]); +} + +TEST(PerfCountersTest, Read2Counters) { + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + auto counters = + PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2}); + EXPECT_EQ(counters.num_counters(), 2); + PerfCounterValues values1(2); + EXPECT_TRUE(counters.Snapshot(&values1)); + EXPECT_GT(values1[0], 0); + EXPECT_GT(values1[1], 0); + PerfCounterValues values2(2); + EXPECT_TRUE(counters.Snapshot(&values2)); + EXPECT_GT(values2[0], 0); + EXPECT_GT(values2[1], 0); +} + +TEST(PerfCountersTest, ReopenExistingCounters) { + // This test works in recent and old Intel hardware, Pixel 3, and Pixel 6. + // However we cannot make assumptions beyond 2 HW counters due to Pixel 6. + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + std::vector<std::string> kMetrics({kGenericPerfEvent1}); + std::vector<PerfCounters> counters(2); + for (auto& counter : counters) { + counter = PerfCounters::Create(kMetrics); + } + PerfCounterValues values(1); + EXPECT_TRUE(counters[0].Snapshot(&values)); + EXPECT_TRUE(counters[1].Snapshot(&values)); +} + +TEST(PerfCountersTest, CreateExistingMeasurements) { + // The test works (i.e. causes read to fail) for the assumptions + // about hardware capabilities (i.e. small number (2) hardware + // counters) at this date, + // the same as previous test ReopenExistingCounters. + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + + // This means we will try 10 counters but we can only guarantee + // for sure at this time that only 3 will work. Perhaps in the future + // we could use libpfm to query for the hardware limits on this + // particular platform. + const int kMaxCounters = 10; + const int kMinValidCounters = 2; + + // Let's use a ubiquitous counter that is guaranteed to work + // on all platforms + const std::vector<std::string> kMetrics{"cycles"}; + + // Cannot create a vector of actual objects because the + // copy constructor of PerfCounters is deleted - and so is + // implicitly deleted on PerfCountersMeasurement too + std::vector<std::unique_ptr<PerfCountersMeasurement>> + perf_counter_measurements; + + perf_counter_measurements.reserve(kMaxCounters); + for (int j = 0; j < kMaxCounters; ++j) { + perf_counter_measurements.emplace_back( + new PerfCountersMeasurement(kMetrics)); + } + + std::vector<std::pair<std::string, double>> measurements; + + // Start all counters together to see if they hold + size_t max_counters = kMaxCounters; + for (size_t i = 0; i < kMaxCounters; ++i) { + auto& counter(*perf_counter_measurements[i]); + EXPECT_EQ(counter.num_counters(), 1); + if (!counter.Start()) { + max_counters = i; + break; + }; + } + + ASSERT_GE(max_counters, kMinValidCounters); + + // Start all together + for (size_t i = 0; i < max_counters; ++i) { + auto& counter(*perf_counter_measurements[i]); + EXPECT_TRUE(counter.Stop(measurements) || (i >= kMinValidCounters)); + } + + // Start/stop individually + for (size_t i = 0; i < max_counters; ++i) { + auto& counter(*perf_counter_measurements[i]); + measurements.clear(); + counter.Start(); + EXPECT_TRUE(counter.Stop(measurements) || (i >= kMinValidCounters)); + } +} + +// We try to do some meaningful work here but the compiler +// insists in optimizing away our loop so we had to add a +// no-optimize macro. In case it fails, we added some entropy +// to this pool as well. + +BENCHMARK_DONT_OPTIMIZE size_t do_work() { + static std::mt19937 rd{std::random_device{}()}; + static std::uniform_int_distribution<size_t> mrand(0, 10); + const size_t kNumLoops = 1000000; + size_t sum = 0; + for (size_t j = 0; j < kNumLoops; ++j) { + sum += mrand(rd); + } + benchmark::DoNotOptimize(sum); + return sum; +} + +void measure(size_t threadcount, PerfCounterValues* before, + PerfCounterValues* after) { + BM_CHECK_NE(before, nullptr); + BM_CHECK_NE(after, nullptr); + std::vector<std::thread> threads(threadcount); + auto work = [&]() { BM_CHECK(do_work() > 1000); }; + + // We need to first set up the counters, then start the threads, so the + // threads would inherit the counters. But later, we need to first destroy + // the thread pool (so all the work finishes), then measure the counters. So + // the scopes overlap, and we need to explicitly control the scope of the + // threadpool. + auto counters = + PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2}); + for (auto& t : threads) t = std::thread(work); + counters.Snapshot(before); + for (auto& t : threads) t.join(); + counters.Snapshot(after); +} + +TEST(PerfCountersTest, MultiThreaded) { + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported."; + } + EXPECT_TRUE(PerfCounters::Initialize()); + PerfCounterValues before(2); + PerfCounterValues after(2); + + // Notice that this test will work even if we taskset it to a single CPU + // In this case the threads will run sequentially + // Start two threads and measure the number of combined cycles and + // instructions + measure(2, &before, &after); + std::vector<double> Elapsed2Threads{ + static_cast<double>(after[0] - before[0]), + static_cast<double>(after[1] - before[1])}; + + // Start four threads and measure the number of combined cycles and + // instructions + measure(4, &before, &after); + std::vector<double> Elapsed4Threads{ + static_cast<double>(after[0] - before[0]), + static_cast<double>(after[1] - before[1])}; + + // The following expectations fail (at least on a beefy workstation with lots + // of cpus) - it seems that in some circumstances the runtime of 4 threads + // can even be better than with 2. + // So instead of expecting 4 threads to be slower, let's just make sure they + // do not differ too much in general (one is not more than 10x than the + // other). + EXPECT_THAT(Elapsed4Threads[0] / Elapsed2Threads[0], AllOf(Gt(0.1), Lt(10))); + EXPECT_THAT(Elapsed4Threads[1] / Elapsed2Threads[1], AllOf(Gt(0.1), Lt(10))); +} + +TEST(PerfCountersTest, HardwareLimits) { + // The test works (i.e. causes read to fail) for the assumptions + // about hardware capabilities (i.e. small number (3-4) hardware + // counters) at this date, + // the same as previous test ReopenExistingCounters. + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + + // Taken from `perf list`, but focusses only on those HW events that actually + // were reported when running `sudo perf stat -a sleep 10`, intersected over + // several platforms. All HW events listed in the first command not reported + // in the second seem to not work. This is sad as we don't really get to test + // the grouping here (groups can contain up to 6 members)... + std::vector<std::string> counter_names{ + "cycles", // leader + "instructions", // + "branch-misses", // + }; + + // In the off-chance that some of these values are not supported, + // we filter them out so the test will complete without failure + // albeit it might not actually test the grouping on that platform + std::vector<std::string> valid_names; + for (const std::string& name : counter_names) { + if (PerfCounters::IsCounterSupported(name)) { + valid_names.push_back(name); + } + } + PerfCountersMeasurement counter(valid_names); + + std::vector<std::pair<std::string, double>> measurements; + + counter.Start(); + EXPECT_TRUE(counter.Stop(measurements)); +} + +} // namespace diff --git a/test/perf_counters_test.cc b/test/perf_counters_test.cc new file mode 100644 index 0000000..b0a3ab0 --- /dev/null +++ b/test/perf_counters_test.cc @@ -0,0 +1,92 @@ +#include <cstdarg> +#undef NDEBUG + +#include "../src/commandlineflags.h" +#include "../src/perf_counters.h" +#include "benchmark/benchmark.h" +#include "output_test.h" + +namespace benchmark { + +BM_DECLARE_string(benchmark_perf_counters); + +} // namespace benchmark + +static void BM_Simple(benchmark::State& state) { + for (auto _ : state) { + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); + } +} +BENCHMARK(BM_Simple); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Simple\",$"}}); + +const int kIters = 1000000; + +void BM_WithoutPauseResume(benchmark::State& state) { + int n = 0; + + for (auto _ : state) { + for (auto i = 0; i < kIters; ++i) { + n = 1 - n; + benchmark::DoNotOptimize(n); + } + } +} + +BENCHMARK(BM_WithoutPauseResume); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_WithoutPauseResume\",$"}}); + +void BM_WithPauseResume(benchmark::State& state) { + int m = 0, n = 0; + + for (auto _ : state) { + for (auto i = 0; i < kIters; ++i) { + n = 1 - n; + benchmark::DoNotOptimize(n); + } + + state.PauseTiming(); + for (auto j = 0; j < kIters; ++j) { + m = 1 - m; + benchmark::DoNotOptimize(m); + } + state.ResumeTiming(); + } +} + +BENCHMARK(BM_WithPauseResume); + +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_WithPauseResume\",$"}}); + +static void CheckSimple(Results const& e) { + CHECK_COUNTER_VALUE(e, double, "CYCLES", GT, 0); +} + +double withoutPauseResumeInstrCount = 0.0; +double withPauseResumeInstrCount = 0.0; + +static void SaveInstrCountWithoutResume(Results const& e) { + withoutPauseResumeInstrCount = e.GetAs<double>("INSTRUCTIONS"); +} + +static void SaveInstrCountWithResume(Results const& e) { + withPauseResumeInstrCount = e.GetAs<double>("INSTRUCTIONS"); +} + +CHECK_BENCHMARK_RESULTS("BM_Simple", &CheckSimple); +CHECK_BENCHMARK_RESULTS("BM_WithoutPauseResume", &SaveInstrCountWithoutResume); +CHECK_BENCHMARK_RESULTS("BM_WithPauseResume", &SaveInstrCountWithResume); + +int main(int argc, char* argv[]) { + if (!benchmark::internal::PerfCounters::kSupported) { + return 0; + } + benchmark::FLAGS_benchmark_perf_counters = "CYCLES,INSTRUCTIONS"; + benchmark::internal::PerfCounters::Initialize(); + RunOutputTests(argc, argv); + + BM_CHECK_GT(withPauseResumeInstrCount, kIters); + BM_CHECK_GT(withoutPauseResumeInstrCount, kIters); + BM_CHECK_LT(withPauseResumeInstrCount, 1.5 * withoutPauseResumeInstrCount); +} diff --git a/test/register_benchmark_test.cc b/test/register_benchmark_test.cc index 3ac5b21..d69d144 100644 --- a/test/register_benchmark_test.cc +++ b/test/register_benchmark_test.cc @@ -10,7 +10,7 @@ namespace { class TestReporter : public benchmark::ConsoleReporter { public: - virtual void ReportRuns(const std::vector<Run>& report) { + void ReportRuns(const std::vector<Run>& report) override { all_runs_.insert(all_runs_.end(), begin(report), end(report)); ConsoleReporter::ReportRuns(report); } @@ -19,24 +19,24 @@ class TestReporter : public benchmark::ConsoleReporter { }; struct TestCase { - std::string name; - const char* label; + const std::string name; + const std::string label; // Note: not explicit as we rely on it being converted through ADD_CASES. - TestCase(const char* xname) : TestCase(xname, nullptr) {} - TestCase(const char* xname, const char* xlabel) + TestCase(const std::string& xname) : TestCase(xname, "") {} + TestCase(const std::string& xname, const std::string& xlabel) : name(xname), label(xlabel) {} typedef benchmark::BenchmarkReporter::Run Run; void CheckRun(Run const& run) const { // clang-format off - CHECK(name == run.benchmark_name()) << "expected " << name << " got " + BM_CHECK(name == run.benchmark_name()) << "expected " << name << " got " << run.benchmark_name(); - if (label) { - CHECK(run.report_label == label) << "expected " << label << " got " + if (!label.empty()) { + BM_CHECK(run.report_label == label) << "expected " << label << " got " << run.report_label; } else { - CHECK(run.report_label == ""); + BM_CHECK(run.report_label.empty()); } // clang-format on } @@ -45,7 +45,7 @@ struct TestCase { std::vector<TestCase> ExpectedResults; int AddCases(std::initializer_list<TestCase> const& v) { - for (auto N : v) { + for (const auto& N : v) { ExpectedResults.push_back(N); } return 0; @@ -96,6 +96,18 @@ ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}); #endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK //----------------------------------------------------------------------------// +// Test RegisterBenchmark with DISABLED_ benchmark +//----------------------------------------------------------------------------// +void DISABLED_BM_function(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(DISABLED_BM_function); +ReturnVal dummy3 = benchmark::RegisterBenchmark("DISABLED_BM_function_manual", + DISABLED_BM_function); +// No need to add cases because we don't expect them to run. + +//----------------------------------------------------------------------------// // Test RegisterBenchmark with different callable types //----------------------------------------------------------------------------// @@ -111,7 +123,7 @@ void TestRegistrationAtRuntime() { { CustomFixture fx; benchmark::RegisterBenchmark("custom_fixture", fx); - AddCases({"custom_fixture"}); + AddCases({std::string("custom_fixture")}); } #endif #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK diff --git a/test/repetitions_test.cc b/test/repetitions_test.cc new file mode 100644 index 0000000..569777d --- /dev/null +++ b/test/repetitions_test.cc @@ -0,0 +1,214 @@ + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// ========================================================================= // +// ------------------------ Testing Basic Output --------------------------- // +// ========================================================================= // + +static void BM_ExplicitRepetitions(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_ExplicitRepetitions)->Repetitions(2); + +ADD_CASES(TC_ConsoleOut, + {{"^BM_ExplicitRepetitions/repeats:2 %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_ExplicitRepetitions/repeats:2 %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_ExplicitRepetitions/repeats:2_mean %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_ExplicitRepetitions/repeats:2_median %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_ExplicitRepetitions/repeats:2_stddev %console_report$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_ExplicitRepetitions/repeats:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_ExplicitRepetitions/repeats:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_ExplicitRepetitions/repeats:2_mean\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_ExplicitRepetitions/repeats:2_median\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_ExplicitRepetitions/repeats:2_stddev\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ExplicitRepetitions/repeats:2\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ExplicitRepetitions/repeats:2\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_ExplicitRepetitions/repeats:2_mean\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_ExplicitRepetitions/repeats:2_median\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_ExplicitRepetitions/repeats:2_stddev\",%csv_report$"}}); + +// ========================================================================= // +// ------------------------ Testing Basic Output --------------------------- // +// ========================================================================= // + +static void BM_ImplicitRepetitions(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_ImplicitRepetitions); + +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_mean %console_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_median %console_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_stddev %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_mean\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_median\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_stddev\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_mean\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_median\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_stddev\",%csv_report$"}}); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/test/report_aggregates_only_test.cc b/test/report_aggregates_only_test.cc index 9646b9b..47da503 100644 --- a/test/report_aggregates_only_test.cc +++ b/test/report_aggregates_only_test.cc @@ -19,17 +19,19 @@ BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); int main(int argc, char* argv[]) { const std::string output = GetFileReporterOutput(argc, argv); - if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 || + if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 4 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != 1 || SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != - 1) { - std::cout << "Precondition mismatch. Expected to only find three " + 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"") != 1) { + std::cout << "Precondition mismatch. Expected to only find four " "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " - "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire " + "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"\nThe entire " "output:\n"; std::cout << output; return 1; diff --git a/test/reporter_output_test.cc b/test/reporter_output_test.cc index d24a57d..2eb545a 100644 --- a/test/reporter_output_test.cc +++ b/test/reporter_output_test.cc @@ -1,5 +1,6 @@ #undef NDEBUG +#include <numeric> #include <utility> #include "benchmark/benchmark.h" @@ -16,7 +17,7 @@ static int AddContextCases() { AddCases(TC_ConsoleErr, { {"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default}, - {"Running .*/reporter_output_test(\\.exe)?$", MR_Next}, + {"Running .*(/|\\\\)reporter_output_test(\\.exe)?$", MR_Next}, {"Run on \\(%int X %float MHz CPU s?\\)", MR_Next}, }); AddCases(TC_JSONOut, @@ -71,9 +72,11 @@ BENCHMARK(BM_basic); ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_basic\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -90,7 +93,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); void BM_bytes_per_second(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } state.SetBytesProcessed(1); } @@ -99,9 +103,11 @@ BENCHMARK(BM_bytes_per_second); ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report " "bytes_per_second=%float[kM]{0,1}/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_bytes_per_second\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -119,7 +125,8 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); void BM_items_per_second(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } state.SetItemsProcessed(1); } @@ -128,9 +135,11 @@ BENCHMARK(BM_items_per_second); ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report " "items_per_second=%float[kM]{0,1}/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"}, + {"\"family_index\": 2,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_items_per_second\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -154,9 +163,11 @@ BENCHMARK(BM_label); ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"}, + {"\"family_index\": 3,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_label\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -181,9 +192,11 @@ BENCHMARK(BM_time_label_nanosecond)->Unit(benchmark::kNanosecond); ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_nanosecond %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_nanosecond\",$"}, + {"\"family_index\": 4,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_time_label_nanosecond\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -202,9 +215,11 @@ BENCHMARK(BM_time_label_microsecond)->Unit(benchmark::kMicrosecond); ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_microsecond %console_us_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_microsecond\",$"}, + {"\"family_index\": 5,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_time_label_microsecond\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -223,9 +238,11 @@ BENCHMARK(BM_time_label_millisecond)->Unit(benchmark::kMillisecond); ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_millisecond %console_ms_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_millisecond\",$"}, + {"\"family_index\": 6,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_time_label_millisecond\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -243,9 +260,11 @@ BENCHMARK(BM_time_label_second)->Unit(benchmark::kSecond); ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_second %console_s_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_second\",$"}, + {"\"family_index\": 7,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_time_label_second\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -267,9 +286,11 @@ void BM_error(benchmark::State& state) { BENCHMARK(BM_error); ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"}, + {"\"family_index\": 8,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_error\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"error_occurred\": true,$", MR_Next}, @@ -289,15 +310,17 @@ void BM_no_arg_name(benchmark::State& state) { BENCHMARK(BM_no_arg_name)->Arg(3); ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"}, + {"\"family_index\": 9,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}}); // ========================================================================= // -// ------------------------ Testing Arg Name Output ----------------------- // +// ------------------------ Testing Arg Name Output ------------------------ // // ========================================================================= // void BM_arg_name(benchmark::State& state) { @@ -307,9 +330,11 @@ void BM_arg_name(benchmark::State& state) { BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3); ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"}, + {"\"family_index\": 10,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}}); @@ -327,14 +352,42 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_arg_names/first:2/5/third:4 %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"}, + {"\"family_index\": 11,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}}); // ========================================================================= // +// ------------------------ Testing Name Output ---------------------------- // +// ========================================================================= // + +void BM_name(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_name)->Name("BM_custom_name"); + +ADD_CASES(TC_ConsoleOut, {{"^BM_custom_name %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_custom_name\",$"}, + {"\"family_index\": 12,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_custom_name\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_custom_name\",%csv_report$"}}); + +// ========================================================================= // // ------------------------ Testing Big Args Output ------------------------ // // ========================================================================= // @@ -353,7 +406,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, void BM_Complexity_O1(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } state.SetComplexityN(state.range(0)); } @@ -381,37 +435,50 @@ ADD_CASES(TC_ConsoleOut, {"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"}, {"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"family_index\": 15,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"family_index\": 15,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"repetition_index\": 1,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:2_mean\",$"}, + {"\"family_index\": 15,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:2_median\",$"}, + {"\"family_index\": 15,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}, + {"\"family_index\": 15,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"}, {"^\"BM_Repeat/repeats:2\",%csv_report$"}, @@ -428,43 +495,58 @@ ADD_CASES(TC_ConsoleOut, {"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"}, {"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"repetition_index\": 1,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"repetition_index\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3_mean\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3_median\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3\",%csv_report$"}, @@ -483,49 +565,66 @@ ADD_CASES(TC_ConsoleOut, {"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"}, {"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 4,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 4,$", MR_Next}, {"\"repetition_index\": 1,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 4,$", MR_Next}, {"\"repetition_index\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 4,$", MR_Next}, {"\"repetition_index\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4_mean\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 4,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 4,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4_median\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 4,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 4,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 4,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 4,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"}, {"^\"BM_Repeat/repeats:4\",%csv_report$"}, @@ -544,6 +643,8 @@ void BM_RepeatOnce(benchmark::State& state) { BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly(); ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"}, + {"\"family_index\": 18,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 1,$", MR_Next}, @@ -566,25 +667,34 @@ ADD_CASES( ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"}, + {"\"family_index\": 19,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"}, + {"\"family_index\": 19,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}, + {"\"family_index\": 19,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"}, @@ -608,25 +718,34 @@ ADD_CASES( ADD_CASES(TC_JSONOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, {"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"}, + {"\"family_index\": 20,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"}, + {"\"family_index\": 20,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"}, + {"\"family_index\": 20,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, @@ -654,27 +773,36 @@ ADD_CASES( ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"}, + {"\"family_index\": 21,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"time_unit\": \"us\",?$"}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"}, + {"\"family_index\": 21,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"time_unit\": \"us\",?$"}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"}, + {"\"family_index\": 21,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"time_unit\": \"us\",?$"}}); ADD_CASES(TC_CSVOut, @@ -722,6 +850,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ " ADD_CASES( TC_JSONOut, {{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, @@ -731,6 +861,8 @@ ADD_CASES( {"\"iterations\": 5,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, @@ -740,6 +872,8 @@ ADD_CASES( {"\"iterations\": 5,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, @@ -749,39 +883,51 @@ ADD_CASES( {"\"iterations\": 5,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 3,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 3,$", MR_Next}, {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}}); ADD_CASES( @@ -797,6 +943,154 @@ ADD_CASES( {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}}); // ========================================================================= // +// ------------- Testing relative standard deviation statistics ------------ // +// ========================================================================= // + +const auto UserPercentStatistics = [](const std::vector<double>&) { + return 1. / 100.; +}; +void BM_UserPercentStats(benchmark::State& state) { + for (auto _ : state) { + state.SetIterationTime(150 / 10e8); + } +} +// clang-format off +BENCHMARK(BM_UserPercentStats) + ->Repetitions(3) + ->Iterations(5) + ->UseManualTime() + ->Unit(benchmark::TimeUnit::kNanosecond) + ->ComputeStatistics("", UserPercentStatistics, benchmark::StatisticUnit::kPercentage); +// clang-format on + +// check that UserPercent-provided stats is calculated, and is after the +// default-ones empty string as name is intentional, it would sort before +// anything else +ADD_CASES(TC_ConsoleOut, + {{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_mean [ ]* 150 ns %time [ ]*3$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_median [ ]* 150 ns %time [ ]*3$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/manual_time_ " + "[ ]* 1.00 % [ ]* 1.00 %[ ]*3$"}}); +ADD_CASES( + TC_JSONOut, + {{"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_mean\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_median\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_stddev\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.(0)*e-(0)*2,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_mean\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_median\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_stddev\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_\",%csv_report$"}}); + +// ========================================================================= // // ------------------------- Testing StrEscape JSON ------------------------ // // ========================================================================= // #if 0 // enable when csv testing code correctly handles multi-line fields @@ -807,9 +1101,11 @@ void BM_JSON_Format(benchmark::State& state) { } BENCHMARK(BM_JSON_Format); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"}, + {"\"family_index\": 23,$", MR_Next}, +{"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_JSON_Format\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"error_occurred\": true,$", MR_Next}, diff --git a/test/skip_with_error_test.cc b/test/skip_with_error_test.cc index 97a2e3c..b4c5e15 100644 --- a/test/skip_with_error_test.cc +++ b/test/skip_with_error_test.cc @@ -10,17 +10,17 @@ namespace { class TestReporter : public benchmark::ConsoleReporter { public: - virtual bool ReportContext(const Context& context) { + bool ReportContext(const Context& context) override { return ConsoleReporter::ReportContext(context); }; - virtual void ReportRuns(const std::vector<Run>& report) { + void ReportRuns(const std::vector<Run>& report) override { all_runs_.insert(all_runs_.end(), begin(report), end(report)); ConsoleReporter::ReportRuns(report); } TestReporter() {} - virtual ~TestReporter() {} + ~TestReporter() override {} mutable std::vector<Run> all_runs_; }; @@ -33,21 +33,23 @@ struct TestCase { typedef benchmark::BenchmarkReporter::Run Run; void CheckRun(Run const& run) const { - CHECK(name == run.benchmark_name()) + BM_CHECK(name == run.benchmark_name()) << "expected " << name << " got " << run.benchmark_name(); - CHECK(error_occurred == run.error_occurred); - CHECK(error_message == run.error_message); + BM_CHECK_EQ(error_occurred, + benchmark::internal::SkippedWithError == run.skipped); + BM_CHECK(error_message == run.skip_message); if (error_occurred) { - // CHECK(run.iterations == 0); + // BM_CHECK(run.iterations == 0); } else { - CHECK(run.iterations != 0); + BM_CHECK(run.iterations != 0); } } }; std::vector<TestCase> ExpectedResults; -int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) { +int AddCases(const std::string& base_name, + std::initializer_list<TestCase> const& v) { for (auto TC : v) { TC.name = base_name + TC.name; ExpectedResults.push_back(std::move(TC)); @@ -97,7 +99,7 @@ ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}}); void BM_error_during_running(benchmark::State& state) { int first_iter = true; while (state.KeepRunning()) { - if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { + if (state.range(0) == 1 && state.thread_index() <= (state.threads() / 2)) { assert(first_iter); first_iter = false; state.SkipWithError("error message"); @@ -119,12 +121,13 @@ ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"}, void BM_error_during_running_ranged_for(benchmark::State& state) { assert(state.max_iterations > 3 && "test requires at least a few iterations"); - int first_iter = true; + bool first_iter = true; // NOTE: Users should not write the for loop explicitly. for (auto It = state.begin(), End = state.end(); It != End; ++It) { if (state.range(0) == 1) { assert(first_iter); first_iter = false; + (void)first_iter; state.SkipWithError("error message"); // Test the unfortunate but documented behavior that the ranged-for loop // doesn't automatically terminate when SkipWithError is set. @@ -140,9 +143,10 @@ ADD_CASES("BM_error_during_running_ranged_for", void BM_error_after_running(benchmark::State& state) { for (auto _ : state) { - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } - if (state.thread_index <= (state.threads / 2)) + if (state.thread_index() <= (state.threads() / 2)) state.SkipWithError("error message"); } BENCHMARK(BM_error_after_running)->ThreadRange(1, 8); @@ -154,7 +158,7 @@ ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"}, void BM_error_while_paused(benchmark::State& state) { bool first_iter = true; while (state.KeepRunning()) { - if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { + if (state.range(0) == 1 && state.thread_index() <= (state.threads() / 2)) { assert(first_iter); first_iter = false; state.PauseTiming(); diff --git a/test/spec_arg_test.cc b/test/spec_arg_test.cc new file mode 100644 index 0000000..06aafbe --- /dev/null +++ b/test/spec_arg_test.cc @@ -0,0 +1,105 @@ +#include <algorithm> +#include <cassert> +#include <cstdint> +#include <cstdlib> +#include <cstring> +#include <iostream> +#include <limits> +#include <string> +#include <vector> + +#include "benchmark/benchmark.h" + +// Tests that we can override benchmark-spec value from FLAGS_benchmark_filter +// with argument to RunSpecifiedBenchmarks(...). + +namespace { + +class TestReporter : public benchmark::ConsoleReporter { + public: + bool ReportContext(const Context& context) override { + return ConsoleReporter::ReportContext(context); + }; + + void ReportRuns(const std::vector<Run>& report) override { + assert(report.size() == 1); + matched_functions.push_back(report[0].run_name.function_name); + ConsoleReporter::ReportRuns(report); + }; + + TestReporter() {} + + ~TestReporter() override {} + + const std::vector<std::string>& GetMatchedFunctions() const { + return matched_functions; + } + + private: + std::vector<std::string> matched_functions; +}; + +} // end namespace + +static void BM_NotChosen(benchmark::State& state) { + assert(false && "SHOULD NOT BE CALLED"); + for (auto _ : state) { + } +} +BENCHMARK(BM_NotChosen); + +static void BM_Chosen(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_Chosen); + +int main(int argc, char** argv) { + const std::string flag = "BM_NotChosen"; + + // Verify that argv specify --benchmark_filter=BM_NotChosen. + bool found = false; + for (int i = 0; i < argc; ++i) { + if (strcmp("--benchmark_filter=BM_NotChosen", argv[i]) == 0) { + found = true; + break; + } + } + assert(found); + + benchmark::Initialize(&argc, argv); + + // Check that the current flag value is reported accurately via the + // GetBenchmarkFilter() function. + if (flag != benchmark::GetBenchmarkFilter()) { + std::cerr + << "Seeing different value for flags. GetBenchmarkFilter() returns [" + << benchmark::GetBenchmarkFilter() << "] expected flag=[" << flag + << "]\n"; + return 1; + } + TestReporter test_reporter; + const char* const spec = "BM_Chosen"; + const size_t returned_count = + benchmark::RunSpecifiedBenchmarks(&test_reporter, spec); + assert(returned_count == 1); + const std::vector<std::string> matched_functions = + test_reporter.GetMatchedFunctions(); + assert(matched_functions.size() == 1); + if (strcmp(spec, matched_functions.front().c_str()) != 0) { + std::cerr << "Expected benchmark [" << spec << "] to run, but got [" + << matched_functions.front() << "]\n"; + return 2; + } + + // Test that SetBenchmarkFilter works. + const std::string golden_value = "golden_value"; + benchmark::SetBenchmarkFilter(golden_value); + std::string current_value = benchmark::GetBenchmarkFilter(); + if (golden_value != current_value) { + std::cerr << "Expected [" << golden_value + << "] for --benchmark_filter but got [" << current_value << "]\n"; + return 3; + } + return 0; +} diff --git a/test/spec_arg_verbosity_test.cc b/test/spec_arg_verbosity_test.cc new file mode 100644 index 0000000..8f8eb6d --- /dev/null +++ b/test/spec_arg_verbosity_test.cc @@ -0,0 +1,43 @@ +#include <string.h> + +#include <iostream> + +#include "benchmark/benchmark.h" + +// Tests that the user specified verbosity level can be get. +static void BM_Verbosity(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_Verbosity); + +int main(int argc, char** argv) { + const int32_t flagv = 42; + + // Verify that argv specify --v=42. + bool found = false; + for (int i = 0; i < argc; ++i) { + if (strcmp("--v=42", argv[i]) == 0) { + found = true; + break; + } + } + if (!found) { + std::cerr << "This test requires '--v=42' to be passed as a command-line " + << "argument.\n"; + return 1; + } + + benchmark::Initialize(&argc, argv); + + // Check that the current flag value is reported accurately via the + // GetBenchmarkVerbosity() function. + if (flagv != benchmark::GetBenchmarkVerbosity()) { + std::cerr + << "Seeing different value for flags. GetBenchmarkVerbosity() returns [" + << benchmark::GetBenchmarkVerbosity() << "] expected flag=[" << flagv + << "]\n"; + return 1; + } + return 0; +} diff --git a/test/statistics_gtest.cc b/test/statistics_gtest.cc index 3ddc72d..1de2d87 100644 --- a/test/statistics_gtest.cc +++ b/test/statistics_gtest.cc @@ -25,4 +25,11 @@ TEST(StatisticsTest, StdDev) { 1.151086443322134); } +TEST(StatisticsTest, CV) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({101, 101, 101, 101}), 0.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({1, 2, 3}), 1. / 2.); + EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({2.5, 2.4, 3.3, 4.2, 5.1}), + 0.32888184094918121); +} + } // end namespace diff --git a/test/string_util_gtest.cc b/test/string_util_gtest.cc index 01bf155..67b4bc0 100644 --- a/test/string_util_gtest.cc +++ b/test/string_util_gtest.cc @@ -1,9 +1,12 @@ //===---------------------------------------------------------------------===// -// statistics_test - Unit tests for src/statistics.cc +// string_util_test - Unit tests for src/string_util.cc //===---------------------------------------------------------------------===// -#include "../src/string_util.h" +#include <tuple> + #include "../src/internal_macros.h" +#include "../src/string_util.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" namespace { @@ -32,7 +35,8 @@ TEST(StringUtilTest, stoul) { #elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul { size_t pos = 0; - EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos)); + EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, + benchmark::stoul("18446744073709551615", &pos)); EXPECT_EQ(20ul, pos); } #endif @@ -63,91 +67,133 @@ TEST(StringUtilTest, stoul) { } #ifndef BENCHMARK_HAS_NO_EXCEPTIONS { - ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); + ASSERT_THROW(std::ignore = benchmark::stoul("this is a test"), + std::invalid_argument); } #endif } -TEST(StringUtilTest, stoi) { - { - size_t pos = 0; - EXPECT_EQ(0, benchmark::stoi("0", &pos)); - EXPECT_EQ(1ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); - EXPECT_EQ(3ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); - EXPECT_EQ(4ul, pos); - } +TEST(StringUtilTest, stoi){{size_t pos = 0; +EXPECT_EQ(0, benchmark::stoi("0", &pos)); +EXPECT_EQ(1ul, pos); +} // namespace +{ + size_t pos = 0; + EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); + EXPECT_EQ(3ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); + EXPECT_EQ(4ul, pos); +} #ifndef BENCHMARK_HAS_NO_EXCEPTIONS - { - ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); - } +{ + ASSERT_THROW(std::ignore = benchmark::stoi("this is a test"), + std::invalid_argument); +} #endif } -TEST(StringUtilTest, stod) { - { - size_t pos = 0; - EXPECT_EQ(0.0, benchmark::stod("0", &pos)); - EXPECT_EQ(1ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); - EXPECT_EQ(3ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); - EXPECT_EQ(4ul, pos); - } - { - size_t pos = 0; - EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); - EXPECT_EQ(3ul, pos); - } - { - size_t pos = 0; - /* Note: exactly representable as double */ - EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); - EXPECT_EQ(8ul, pos); - } +TEST(StringUtilTest, stod){{size_t pos = 0; +EXPECT_EQ(0.0, benchmark::stod("0", &pos)); +EXPECT_EQ(1ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); + EXPECT_EQ(3ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); + EXPECT_EQ(3ul, pos); +} +{ + size_t pos = 0; + /* Note: exactly representable as double */ + EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); + EXPECT_EQ(8ul, pos); +} #ifndef BENCHMARK_HAS_NO_EXCEPTIONS - { - ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); - } +{ + ASSERT_THROW(std::ignore = benchmark::stod("this is a test"), + std::invalid_argument); +} #endif } +TEST(StringUtilTest, StrSplit) { + EXPECT_EQ(benchmark::StrSplit("", ','), std::vector<std::string>{}); + EXPECT_EQ(benchmark::StrSplit("hello", ','), + std::vector<std::string>({"hello"})); + EXPECT_EQ(benchmark::StrSplit("hello,there,is,more", ','), + std::vector<std::string>({"hello", "there", "is", "more"})); +} + +using HumanReadableFixture = ::testing::TestWithParam< + std::tuple<double, benchmark::Counter::OneK, std::string>>; + +INSTANTIATE_TEST_SUITE_P( + HumanReadableTests, HumanReadableFixture, + ::testing::Values( + std::make_tuple(0.0, benchmark::Counter::kIs1024, "0"), + std::make_tuple(999.0, benchmark::Counter::kIs1024, "999"), + std::make_tuple(1000.0, benchmark::Counter::kIs1024, "1000"), + std::make_tuple(1024.0, benchmark::Counter::kIs1024, "1Ki"), + std::make_tuple(1000 * 1000.0, benchmark::Counter::kIs1024, + "976\\.56.Ki"), + std::make_tuple(1024 * 1024.0, benchmark::Counter::kIs1024, "1Mi"), + std::make_tuple(1000 * 1000 * 1000.0, benchmark::Counter::kIs1024, + "953\\.674Mi"), + std::make_tuple(1024 * 1024 * 1024.0, benchmark::Counter::kIs1024, + "1Gi"), + std::make_tuple(0.0, benchmark::Counter::kIs1000, "0"), + std::make_tuple(999.0, benchmark::Counter::kIs1000, "999"), + std::make_tuple(1000.0, benchmark::Counter::kIs1000, "1k"), + std::make_tuple(1024.0, benchmark::Counter::kIs1000, "1.024k"), + std::make_tuple(1000 * 1000.0, benchmark::Counter::kIs1000, "1M"), + std::make_tuple(1024 * 1024.0, benchmark::Counter::kIs1000, + "1\\.04858M"), + std::make_tuple(1000 * 1000 * 1000.0, benchmark::Counter::kIs1000, + "1G"), + std::make_tuple(1024 * 1024 * 1024.0, benchmark::Counter::kIs1000, + "1\\.07374G"))); + +TEST_P(HumanReadableFixture, HumanReadableNumber) { + std::string str = benchmark::HumanReadableNumber(std::get<0>(GetParam()), + std::get<1>(GetParam())); + ASSERT_THAT(str, ::testing::MatchesRegex(std::get<2>(GetParam()))); +} + } // end namespace diff --git a/test/templated_fixture_test.cc b/test/templated_fixture_test.cc index fe9865c..af239c3 100644 --- a/test/templated_fixture_test.cc +++ b/test/templated_fixture_test.cc @@ -1,9 +1,9 @@ -#include "benchmark/benchmark.h" - #include <cassert> #include <memory> +#include "benchmark/benchmark.h" + template <typename T> class MyFixture : public ::benchmark::Fixture { public: diff --git a/test/time_unit_gtest.cc b/test/time_unit_gtest.cc new file mode 100644 index 0000000..484ecbc --- /dev/null +++ b/test/time_unit_gtest.cc @@ -0,0 +1,37 @@ +#include "../include/benchmark/benchmark.h" +#include "gtest/gtest.h" + +namespace benchmark { +namespace internal { + +namespace { + +class DummyBenchmark : public Benchmark { + public: + DummyBenchmark() : Benchmark("dummy") {} + void Run(State&) override {} +}; + +TEST(DefaultTimeUnitTest, TimeUnitIsNotSet) { + DummyBenchmark benchmark; + EXPECT_EQ(benchmark.GetTimeUnit(), kNanosecond); +} + +TEST(DefaultTimeUnitTest, DefaultIsSet) { + DummyBenchmark benchmark; + EXPECT_EQ(benchmark.GetTimeUnit(), kNanosecond); + SetDefaultTimeUnit(kMillisecond); + EXPECT_EQ(benchmark.GetTimeUnit(), kMillisecond); +} + +TEST(DefaultTimeUnitTest, DefaultAndExplicitUnitIsSet) { + DummyBenchmark benchmark; + benchmark.Unit(kMillisecond); + SetDefaultTimeUnit(kMicrosecond); + + EXPECT_EQ(benchmark.GetTimeUnit(), kMillisecond); +} + +} // namespace +} // namespace internal +} // namespace benchmark diff --git a/test/user_counters_tabular_test.cc b/test/user_counters_tabular_test.cc index 18373c0..c98b769 100644 --- a/test/user_counters_tabular_test.cc +++ b/test/user_counters_tabular_test.cc @@ -7,19 +7,25 @@ // @todo: <jpmag> this checks the full output at once; the rule for // CounterSet1 was failing because it was not matching "^[-]+$". // @todo: <jpmag> check that the counters are vertically aligned. -ADD_CASES( - TC_ConsoleOut, - { - // keeping these lines long improves readability, so: - // clang-format off +ADD_CASES(TC_ConsoleOut, + { + // keeping these lines long improves readability, so: + // clang-format off {"^[-]+$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next}, {"^[-]+$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1_cv %console_percentage_report [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*%$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2_cv %console_percentage_report [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*%$", MR_Next}, {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, @@ -46,8 +52,8 @@ ADD_CASES( {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"}, - // clang-format on - }); + // clang-format on + }); ADD_CASES(TC_CSVOut, {{"%csv_header," "\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}}); @@ -68,12 +74,15 @@ void BM_Counters_Tabular(benchmark::State& state) { {"Lob", {32, bm::Counter::kAvgThreads}}, }); } -BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); +BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 2)->Repetitions(2); ADD_CASES(TC_JSONOut, - {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"}, - {"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next}, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -87,8 +96,260 @@ ADD_CASES(TC_JSONOut, {"\"Frob\": %float,$", MR_Next}, {"\"Lob\": %float$", MR_Next}, {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report," - "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_mean\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_median\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_stddev\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_cv\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"cv\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); + +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_median\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_stddev\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_cv\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"aggregate_name\": \"cv\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1_mean\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1_median\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1_stddev\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1_cv\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2_mean\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2_median\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2_stddev\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2_cv\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckTabular(Results const& e) { @@ -99,7 +360,10 @@ void CheckTabular(Results const& e) { CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16); CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32); } -CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular); +CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:1$", + &CheckTabular); +CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$", + &CheckTabular); // ========================================================================= // // -------------------- Tabular+Rate Counters Output ----------------------- // @@ -108,7 +372,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular); void BM_CounterRates_Tabular(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters.insert({ @@ -123,10 +388,12 @@ void BM_CounterRates_Tabular(benchmark::State& state) { BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -174,9 +441,11 @@ void BM_CounterSet0_Tabular(benchmark::State& state) { BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"}, + {"\"family_index\": 2,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -212,9 +481,11 @@ void BM_CounterSet1_Tabular(benchmark::State& state) { BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"}, + {"\"family_index\": 3,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -254,9 +525,11 @@ void BM_CounterSet2_Tabular(benchmark::State& state) { BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"}, + {"\"family_index\": 4,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, diff --git a/test/user_counters_test.cc b/test/user_counters_test.cc index 5699f4f..4cd8ee3 100644 --- a/test/user_counters_test.cc +++ b/test/user_counters_test.cc @@ -26,15 +26,17 @@ void BM_Counters_Simple(benchmark::State& state) { for (auto _ : state) { } state.counters["foo"] = 1; - state.counters["bar"] = 2 * (double)state.iterations(); + state.counters["bar"] = 2 * static_cast<double>(state.iterations()); } BENCHMARK(BM_Counters_Simple); ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_Simple\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -65,7 +67,8 @@ int num_calls1 = 0; void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } state.counters["foo"] = 1; state.counters["bar"] = ++num_calls1; @@ -78,9 +81,11 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report " "foo=%hrfloat items_per_second=%hrfloat/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -114,7 +119,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", void BM_Counters_Rate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate}; @@ -125,9 +131,11 @@ ADD_CASES( TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, + {"\"family_index\": 2,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_Rate\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -155,7 +163,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); void BM_Invert(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert}; @@ -165,9 +174,11 @@ BENCHMARK(BM_Invert); ADD_CASES(TC_ConsoleOut, {{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"}, + {"\"family_index\": 3,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Invert\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -187,14 +198,14 @@ void CheckInvert(Results const& e) { CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert); // ========================================================================= // -// ------------------------- InvertedRate Counters Output -// -------------------------- // +// --------------------- InvertedRate Counters Output ---------------------- // // ========================================================================= // void BM_Counters_InvertedRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = @@ -207,9 +218,11 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report " "bar=%hrfloats foo=%hrfloats$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_InvertedRate\",$"}, + {"\"family_index\": 4,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -246,9 +259,11 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report " "bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, + {"\"family_index\": 5,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -285,9 +300,11 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int " "%console_report bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, + {"\"family_index\": 6,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -316,7 +333,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", void BM_Counters_AvgThreadsRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate}; @@ -327,10 +345,12 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int " "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, + {"\"family_index\": 7,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -367,9 +387,11 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report " "bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_IterationInvariant\",$"}, + {"\"family_index\": 8,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -399,7 +421,8 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = @@ -412,10 +435,12 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate " "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"}, + {"\"family_index\": 9,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -440,7 +465,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", &CheckIsIterationInvariantRate); // ========================================================================= // -// ------------------- AvgIterations Counters Output ------------------ // +// --------------------- AvgIterations Counters Output --------------------- // // ========================================================================= // void BM_Counters_AvgIterations(benchmark::State& state) { @@ -455,9 +480,11 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report " "bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgIterations\",$"}, + {"\"family_index\": 10,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, @@ -480,13 +507,14 @@ void CheckAvgIterations(Results const& e) { CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations); // ========================================================================= // -// ----------------- AvgIterationsRate Counters Output ---------------- // +// ------------------- AvgIterationsRate Counters Output ------------------- // // ========================================================================= // void BM_Counters_kAvgIterationsRate(benchmark::State& state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero - benchmark::DoNotOptimize(state.iterations()); + auto iterations = state.iterations(); + benchmark::DoNotOptimize(iterations); } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate}; @@ -498,9 +526,11 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate " "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"}, + {"\"family_index\": 11,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, - {"\"repetitions\": 0,$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, {"\"repetition_index\": 0,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, diff --git a/test/user_counters_thousands_test.cc b/test/user_counters_thousands_test.cc index 21d8285..fc15383 100644 --- a/test/user_counters_thousands_test.cc +++ b/test/user_counters_thousands_test.cc @@ -16,13 +16,13 @@ void BM_Counters_Thousands(benchmark::State& state) { {"t0_1000000DefaultBase", bm::Counter(1000 * 1000, bm::Counter::kDefaults)}, {"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1000)}, + bm::Counter::OneK::kIs1000)}, {"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1024)}, + bm::Counter::OneK::kIs1024)}, {"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1000)}, + bm::Counter::OneK::kIs1000)}, {"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults, - benchmark::Counter::OneK::kIs1024)}, + bm::Counter::OneK::kIs1024)}, }); } BENCHMARK(BM_Counters_Thousands)->Repetitions(2); @@ -30,27 +30,29 @@ ADD_CASES( TC_ConsoleOut, { {"^BM_Counters_Thousands/repeats:2 %console_report " - "t0_1000000DefaultBase=1000k " - "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " - "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, + "t0_1000000DefaultBase=1M " + "t1_1000000Base1000=1M t2_1000000Base1024=976.56[23]Ki " + "t3_1048576Base1000=1.04858M t4_1048576Base1024=1Mi$"}, {"^BM_Counters_Thousands/repeats:2 %console_report " - "t0_1000000DefaultBase=1000k " - "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " - "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, + "t0_1000000DefaultBase=1M " + "t1_1000000Base1000=1M t2_1000000Base1024=976.56[23]Ki " + "t3_1048576Base1000=1.04858M t4_1048576Base1024=1Mi$"}, {"^BM_Counters_Thousands/repeats:2_mean %console_report " - "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " - "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " - "t4_1048576Base1024=1024k$"}, + "t0_1000000DefaultBase=1M t1_1000000Base1000=1M " + "t2_1000000Base1024=976.56[23]Ki t3_1048576Base1000=1.04858M " + "t4_1048576Base1024=1Mi$"}, {"^BM_Counters_Thousands/repeats:2_median %console_report " - "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " - "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " - "t4_1048576Base1024=1024k$"}, + "t0_1000000DefaultBase=1M t1_1000000Base1000=1M " + "t2_1000000Base1024=976.56[23]Ki t3_1048576Base1000=1.04858M " + "t4_1048576Base1024=1Mi$"}, {"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ " "]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 " "t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"}, }); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, @@ -68,6 +70,8 @@ ADD_CASES(TC_JSONOut, {"}", MR_Next}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"run_type\": \"iteration\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, @@ -85,11 +89,14 @@ ADD_CASES(TC_JSONOut, {"}", MR_Next}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -102,11 +109,14 @@ ADD_CASES(TC_JSONOut, {"}", MR_Next}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -119,11 +129,14 @@ ADD_CASES(TC_JSONOut, {"}", MR_Next}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, {"\"run_type\": \"aggregate\",$", MR_Next}, {"\"repetitions\": 2,$", MR_Next}, {"\"threads\": 1,$", MR_Next}, {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, {"\"iterations\": 2,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, |