aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorElliott Hughes <enh@google.com>2024-05-01 18:54:24 +0000
committerElliott Hughes <enh@google.com>2024-05-01 18:54:24 +0000
commit4bf40a4860e0d76a3deb396606105b617fcd89a8 (patch)
treef3d6ef225548876298c1729555a7cfe26e2cadd6 /test
parent56734c9de21b723684c083ccf7fcb8dad46ca387 (diff)
parentbc946b919cac6f25a199a526da571638cfde109f (diff)
downloadgoogle-benchmark-4bf40a4860e0d76a3deb396606105b617fcd89a8.tar.gz
Upgrade google-benchmark to bc946b919cac6f25a199a526da571638cfde109f
This project was upgraded with external_updater. Usage: tools/external_updater/updater.sh update external/google-benchmark For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md Test: TreeHugger Change-Id: I99620b3dfaa6fe54a1a1ebe857e4affb449fc469
Diffstat (limited to 'test')
-rw-r--r--test/BUILD5
-rw-r--r--test/CMakeLists.txt89
-rw-r--r--test/basic_test.cc2
-rw-r--r--test/benchmark_gtest.cc2
-rw-r--r--test/benchmark_test.cc26
-rw-r--r--test/complexity_test.cc160
-rw-r--r--test/diagnostics_test.cc4
-rw-r--r--test/link_main_test.cc2
-rw-r--r--test/memory_manager_test.cc2
-rw-r--r--test/output_test_helper.cc1
-rw-r--r--test/perf_counters_gtest.cc2
-rw-r--r--test/perf_counters_test.cc2
-rw-r--r--test/reporter_output_test.cc11
-rw-r--r--test/skip_with_error_test.cc2
-rw-r--r--test/statistics_gtest.cc4
-rw-r--r--test/user_counters_tabular_test.cc9
-rw-r--r--test/user_counters_test.cc14
17 files changed, 213 insertions, 124 deletions
diff --git a/test/BUILD b/test/BUILD
index ea34fd4..b245fa7 100644
--- a/test/BUILD
+++ b/test/BUILD
@@ -18,6 +18,10 @@ TEST_COPTS = [
# "-Wshorten-64-to-32",
"-Wfloat-equal",
"-fstrict-aliasing",
+ ## assert() are used a lot in tests upstream, which may be optimised out leading to
+ ## unused-variable warning.
+ "-Wno-unused-variable",
+ "-Werror=old-style-cast",
]
# Some of the issues with DoNotOptimize only occur when optimization is enabled
@@ -32,6 +36,7 @@ PER_SRC_TEST_ARGS = {
"repetitions_test.cc": [" --benchmark_repetitions=3"],
"spec_arg_test.cc": ["--benchmark_filter=BM_NotChosen"],
"spec_arg_verbosity_test.cc": ["--v=42"],
+ "complexity_test.cc": ["--benchmark_min_time=1000000x"],
}
cc_library(
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index fd88131..1de175f 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -5,6 +5,8 @@ set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
include(CheckCXXCompilerFlag)
+add_cxx_compiler_flag(-Wno-unused-variable)
+
# NOTE: Some tests use `<cassert>` to perform the test. Therefore we must
# strip -DNDEBUG from the default CMake flags in DEBUG mode.
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
@@ -62,30 +64,38 @@ macro(compile_output_test name)
${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_output_test)
+macro(benchmark_add_test)
+ add_test(${ARGV})
+ if(WIN32 AND BUILD_SHARED_LIBS)
+ cmake_parse_arguments(TEST "" "NAME" "" ${ARGN})
+ set_tests_properties(${TEST_NAME} PROPERTIES ENVIRONMENT_MODIFICATION "PATH=path_list_prepend:$<TARGET_FILE_DIR:benchmark::benchmark>")
+ endif()
+endmacro(benchmark_add_test)
+
# Demonstration executable
compile_benchmark_test(benchmark_test)
-add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01s)
compile_benchmark_test(spec_arg_test)
-add_test(NAME spec_arg COMMAND spec_arg_test --benchmark_filter=BM_NotChosen)
+benchmark_add_test(NAME spec_arg COMMAND spec_arg_test --benchmark_filter=BM_NotChosen)
compile_benchmark_test(spec_arg_verbosity_test)
-add_test(NAME spec_arg_verbosity COMMAND spec_arg_verbosity_test --v=42)
+benchmark_add_test(NAME spec_arg_verbosity COMMAND spec_arg_verbosity_test --v=42)
compile_benchmark_test(benchmark_setup_teardown_test)
-add_test(NAME benchmark_setup_teardown COMMAND benchmark_setup_teardown_test)
+benchmark_add_test(NAME benchmark_setup_teardown COMMAND benchmark_setup_teardown_test)
compile_benchmark_test(filter_test)
macro(add_filter_test name filter expect)
- add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01s --benchmark_filter=${filter} ${expect})
- add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
+ benchmark_add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01s --benchmark_filter=${filter} ${expect})
+ benchmark_add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
endmacro(add_filter_test)
compile_benchmark_test(benchmark_min_time_flag_time_test)
-add_test(NAME min_time_flag_time COMMAND benchmark_min_time_flag_time_test)
+benchmark_add_test(NAME min_time_flag_time COMMAND benchmark_min_time_flag_time_test)
compile_benchmark_test(benchmark_min_time_flag_iters_test)
-add_test(NAME min_time_flag_iters COMMAND benchmark_min_time_flag_iters_test)
+benchmark_add_test(NAME min_time_flag_iters COMMAND benchmark_min_time_flag_iters_test)
add_filter_test(filter_simple "Foo" 3)
add_filter_test(filter_simple_negative "-Foo" 2)
@@ -107,19 +117,19 @@ add_filter_test(filter_regex_end ".*Ba$" 1)
add_filter_test(filter_regex_end_negative "-.*Ba$" 4)
compile_benchmark_test(options_test)
-add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01s)
compile_benchmark_test(basic_test)
-add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01s)
compile_output_test(repetitions_test)
-add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01s --benchmark_repetitions=3)
+benchmark_add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01s --benchmark_repetitions=3)
compile_benchmark_test(diagnostics_test)
-add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01s)
compile_benchmark_test(skip_with_error_test)
-add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01s)
compile_benchmark_test(donotoptimize_test)
# Enable errors for deprecated deprecations (DoNotOptimize(Tp const& value)).
@@ -132,58 +142,58 @@ check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
if (BENCHMARK_HAS_O3_FLAG)
set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3")
endif()
-add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01s)
compile_benchmark_test(fixture_test)
-add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01s)
compile_benchmark_test(register_benchmark_test)
-add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01s)
compile_benchmark_test(map_test)
-add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01s)
compile_benchmark_test(multiple_ranges_test)
-add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01s)
compile_benchmark_test(args_product_test)
-add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01s)
compile_benchmark_test_with_main(link_main_test)
-add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01s)
compile_output_test(reporter_output_test)
-add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01s)
compile_output_test(templated_fixture_test)
-add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01s)
compile_output_test(user_counters_test)
-add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01s)
compile_output_test(perf_counters_test)
-add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01s --benchmark_perf_counters=CYCLES,BRANCHES)
+benchmark_add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01s --benchmark_perf_counters=CYCLES,INSTRUCTIONS)
compile_output_test(internal_threading_test)
-add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01s)
compile_output_test(report_aggregates_only_test)
-add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01s)
compile_output_test(display_aggregates_only_test)
-add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01s)
compile_output_test(user_counters_tabular_test)
-add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01s)
+benchmark_add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01s)
compile_output_test(user_counters_thousands_test)
-add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01s)
compile_output_test(memory_manager_test)
-add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01s)
+benchmark_add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01s)
# MSVC does not allow to set the language standard to C++98/03.
-if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+if(NOT (MSVC OR CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC"))
compile_benchmark_test(cxx03_test)
set_target_properties(cxx03_test
PROPERTIES
@@ -205,17 +215,11 @@ if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(DISABLE_LTO_WARNINGS "${DISABLE_LTO_WARNINGS} -Wno-lto-type-mismatch")
endif()
set_target_properties(cxx03_test PROPERTIES LINK_FLAGS "${DISABLE_LTO_WARNINGS}")
- add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01s)
+ benchmark_add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01s)
endif()
-# Attempt to work around flaky test failures when running on Appveyor servers.
-if (DEFINED ENV{APPVEYOR})
- set(COMPLEXITY_MIN_TIME "0.5s")
-else()
- set(COMPLEXITY_MIN_TIME "0.01s")
-endif()
compile_output_test(complexity_test)
-add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
+benchmark_add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=1000000x)
###############################################################################
# GoogleTest Unit Tests
@@ -230,7 +234,12 @@ if (BENCHMARK_ENABLE_GTEST_TESTS)
macro(add_gtest name)
compile_gtest(${name})
- add_test(NAME ${name} COMMAND ${name})
+ benchmark_add_test(NAME ${name} COMMAND ${name})
+ if(WIN32 AND BUILD_SHARED_LIBS)
+ set_tests_properties(${name} PROPERTIES
+ ENVIRONMENT_MODIFICATION "PATH=path_list_prepend:$<TARGET_FILE_DIR:benchmark::benchmark>;PATH=path_list_prepend:$<TARGET_FILE_DIR:gmock_main>"
+ )
+ endif()
endmacro()
add_gtest(benchmark_gtest)
diff --git a/test/basic_test.cc b/test/basic_test.cc
index cba1b0f..c25bec7 100644
--- a/test/basic_test.cc
+++ b/test/basic_test.cc
@@ -5,7 +5,7 @@
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}
diff --git a/test/benchmark_gtest.cc b/test/benchmark_gtest.cc
index 2c9e555..0aa2552 100644
--- a/test/benchmark_gtest.cc
+++ b/test/benchmark_gtest.cc
@@ -38,7 +38,7 @@ TEST(AddRangeTest, Advanced64) {
TEST(AddRangeTest, FullRange8) {
std::vector<int8_t> dst;
- AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), int8_t{8});
+ AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
EXPECT_THAT(
dst, testing::ElementsAre(int8_t{1}, int8_t{8}, int8_t{64}, int8_t{127}));
}
diff --git a/test/benchmark_test.cc b/test/benchmark_test.cc
index 94590d5..8b14017 100644
--- a/test/benchmark_test.cc
+++ b/test/benchmark_test.cc
@@ -16,6 +16,7 @@
#include <sstream>
#include <string>
#include <thread>
+#include <type_traits>
#include <utility>
#include <vector>
@@ -226,6 +227,31 @@ void BM_non_template_args(benchmark::State& state, int, double) {
}
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
+template <class T, class U, class... ExtraArgs>
+void BM_template2_capture(benchmark::State& state, ExtraArgs&&... extra_args) {
+ static_assert(std::is_same<T, void>::value, "");
+ static_assert(std::is_same<U, char*>::value, "");
+ static_assert(std::is_same<ExtraArgs..., unsigned int>::value, "");
+ unsigned int dummy[sizeof...(ExtraArgs)] = {extra_args...};
+ assert(dummy[0] == 42);
+ for (auto _ : state) {
+ }
+}
+BENCHMARK_TEMPLATE2_CAPTURE(BM_template2_capture, void, char*, foo, 42U);
+BENCHMARK_CAPTURE((BM_template2_capture<void, char*>), foo, 42U);
+
+template <class T, class... ExtraArgs>
+void BM_template1_capture(benchmark::State& state, ExtraArgs&&... extra_args) {
+ static_assert(std::is_same<T, void>::value, "");
+ static_assert(std::is_same<ExtraArgs..., unsigned long>::value, "");
+ unsigned long dummy[sizeof...(ExtraArgs)] = {extra_args...};
+ assert(dummy[0] == 24);
+ for (auto _ : state) {
+ }
+}
+BENCHMARK_TEMPLATE1_CAPTURE(BM_template1_capture, void, foo, 24UL);
+BENCHMARK_CAPTURE(BM_template1_capture<void>, foo, 24UL);
+
#endif // BENCHMARK_HAS_CXX11
static void BM_DenseThreadRanges(benchmark::State& st) {
diff --git a/test/complexity_test.cc b/test/complexity_test.cc
index 76891e0..0729d15 100644
--- a/test/complexity_test.cc
+++ b/test/complexity_test.cc
@@ -69,35 +69,44 @@ int AddComplexityTest(const std::string &test_name,
void BM_Complexity_O1(benchmark::State &state) {
for (auto _ : state) {
- for (int i = 0; i < 1024; ++i) {
- benchmark::DoNotOptimize(i);
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ double tmp = static_cast<double>(state.iterations());
+ benchmark::DoNotOptimize(tmp);
+ for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
+ benchmark::DoNotOptimize(state.iterations());
+ tmp *= static_cast<double>(state.iterations());
+ benchmark::DoNotOptimize(tmp);
}
+
+ // always 1ns per iteration
+ state.SetIterationTime(42 * 1e-9);
}
state.SetComplexityN(state.range(0));
}
-BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
-BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
BENCHMARK(BM_Complexity_O1)
->Range(1, 1 << 18)
+ ->UseManualTime()
+ ->Complexity(benchmark::o1);
+BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->UseManualTime()->Complexity();
+BENCHMARK(BM_Complexity_O1)
+ ->Range(1, 1 << 18)
+ ->UseManualTime()
->Complexity([](benchmark::IterationCount) { return 1.0; });
-const char *one_test_name = "BM_Complexity_O1";
-const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
-const char *rms_o_1_test_name = "BM_Complexity_O1_RMS";
-const char *enum_big_o_1 = "\\([0-9]+\\)";
-// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto
-// deduced.
-// See https://github.com/google/benchmark/issues/272
-const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
+const char *one_test_name = "BM_Complexity_O1/manual_time";
+const char *big_o_1_test_name = "BM_Complexity_O1/manual_time_BigO";
+const char *rms_o_1_test_name = "BM_Complexity_O1/manual_time_RMS";
+const char *enum_auto_big_o_1 = "\\([0-9]+\\)";
const char *lambda_big_o_1 = "f\\(N\\)";
// Add enum tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
- enum_big_o_1, /*family_index=*/0);
+ enum_auto_big_o_1, /*family_index=*/0);
-// Add auto enum tests
+// Add auto tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
- auto_big_o_1, /*family_index=*/1);
+ enum_auto_big_o_1, /*family_index=*/1);
// Add lambda tests
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
@@ -107,43 +116,44 @@ ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
// --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= //
-std::vector<int> ConstructRandomVector(int64_t size) {
- std::vector<int> v;
- v.reserve(static_cast<size_t>(size));
- for (int i = 0; i < size; ++i) {
- v.push_back(static_cast<int>(std::rand() % size));
- }
- return v;
-}
-
void BM_Complexity_O_N(benchmark::State &state) {
- auto v = ConstructRandomVector(state.range(0));
- // Test worst case scenario (item not in vector)
- const int64_t item_not_in_vector = state.range(0) * 2;
for (auto _ : state) {
- auto it = std::find(v.begin(), v.end(), item_not_in_vector);
- benchmark::DoNotOptimize(it);
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ double tmp = static_cast<double>(state.iterations());
+ benchmark::DoNotOptimize(tmp);
+ for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
+ benchmark::DoNotOptimize(state.iterations());
+ tmp *= static_cast<double>(state.iterations());
+ benchmark::DoNotOptimize(tmp);
+ }
+
+ // 1ns per iteration per entry
+ state.SetIterationTime(static_cast<double>(state.range(0)) * 42 * 1e-9);
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
- ->Range(1 << 10, 1 << 16)
+ ->Range(1 << 10, 1 << 20)
+ ->UseManualTime()
->Complexity(benchmark::oN);
BENCHMARK(BM_Complexity_O_N)
->RangeMultiplier(2)
- ->Range(1 << 10, 1 << 16)
+ ->Range(1 << 10, 1 << 20)
+ ->UseManualTime()
+ ->Complexity();
+BENCHMARK(BM_Complexity_O_N)
+ ->RangeMultiplier(2)
+ ->Range(1 << 10, 1 << 20)
+ ->UseManualTime()
->Complexity([](benchmark::IterationCount n) -> double {
return static_cast<double>(n);
});
-BENCHMARK(BM_Complexity_O_N)
- ->RangeMultiplier(2)
- ->Range(1 << 10, 1 << 16)
- ->Complexity();
-const char *n_test_name = "BM_Complexity_O_N";
-const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
-const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS";
+const char *n_test_name = "BM_Complexity_O_N/manual_time";
+const char *big_o_n_test_name = "BM_Complexity_O_N/manual_time_BigO";
+const char *rms_o_n_test_name = "BM_Complexity_O_N/manual_time_RMS";
const char *enum_auto_big_o_n = "N";
const char *lambda_big_o_n = "f\\(N\\)";
@@ -151,40 +161,57 @@ const char *lambda_big_o_n = "f\\(N\\)";
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
enum_auto_big_o_n, /*family_index=*/3);
+// Add auto tests
+ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
+ enum_auto_big_o_n, /*family_index=*/4);
+
// Add lambda tests
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
- lambda_big_o_n, /*family_index=*/4);
+ lambda_big_o_n, /*family_index=*/5);
// ========================================================================= //
-// ------------------------- Testing BigO O(N*lgN) ------------------------- //
+// ------------------------- Testing BigO O(NlgN) ------------------------- //
// ========================================================================= //
+static const double kLog2E = 1.44269504088896340736;
static void BM_Complexity_O_N_log_N(benchmark::State &state) {
- auto v = ConstructRandomVector(state.range(0));
for (auto _ : state) {
- std::sort(v.begin(), v.end());
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ benchmark::DoNotOptimize(state.iterations());
+ double tmp = static_cast<double>(state.iterations());
+ benchmark::DoNotOptimize(tmp);
+ for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
+ benchmark::DoNotOptimize(state.iterations());
+ tmp *= static_cast<double>(state.iterations());
+ benchmark::DoNotOptimize(tmp);
+ }
+
+ state.SetIterationTime(static_cast<double>(state.range(0)) * kLog2E *
+ std::log(state.range(0)) * 42 * 1e-9);
}
state.SetComplexityN(state.range(0));
}
-static const double kLog2E = 1.44269504088896340736;
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
- ->Range(1 << 10, 1 << 16)
+ ->Range(1 << 10, 1U << 24)
+ ->UseManualTime()
->Complexity(benchmark::oNLogN);
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
- ->Range(1 << 10, 1 << 16)
- ->Complexity([](benchmark::IterationCount n) {
- return kLog2E * static_cast<double>(n) * log(static_cast<double>(n));
- });
+ ->Range(1 << 10, 1U << 24)
+ ->UseManualTime()
+ ->Complexity();
BENCHMARK(BM_Complexity_O_N_log_N)
->RangeMultiplier(2)
- ->Range(1 << 10, 1 << 16)
- ->Complexity();
+ ->Range(1 << 10, 1U << 24)
+ ->UseManualTime()
+ ->Complexity([](benchmark::IterationCount n) {
+ return kLog2E * static_cast<double>(n) * std::log(static_cast<double>(n));
+ });
-const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
-const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
-const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
+const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time";
+const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_BigO";
+const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N/manual_time_RMS";
const char *enum_auto_big_o_n_lg_n = "NlgN";
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
@@ -193,11 +220,16 @@ ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n,
/*family_index=*/6);
-// Add lambda tests
+// NOTE: auto big-o is wron.g
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
- rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n,
+ rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n,
/*family_index=*/7);
+//// Add lambda tests
+ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
+ rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n,
+ /*family_index=*/8);
+
// ========================================================================= //
// -------- Testing formatting of Complexity with captured args ------------ //
// ========================================================================= //
@@ -205,21 +237,31 @@ ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
void BM_ComplexityCaptureArgs(benchmark::State &state, int n) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
- benchmark::DoNotOptimize(iterations);
+ benchmark::DoNotOptimize(state.iterations());
+ double tmp = static_cast<double>(state.iterations());
+ benchmark::DoNotOptimize(tmp);
+ for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
+ benchmark::DoNotOptimize(state.iterations());
+ tmp *= static_cast<double>(state.iterations());
+ benchmark::DoNotOptimize(tmp);
+ }
+
+ state.SetIterationTime(static_cast<double>(state.range(0)) * 42 * 1e-9);
}
state.SetComplexityN(n);
}
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)
+ ->UseManualTime()
->Complexity(benchmark::oN)
->Ranges({{1, 2}, {3, 4}});
const std::string complexity_capture_name =
- "BM_ComplexityCaptureArgs/capture_test";
+ "BM_ComplexityCaptureArgs/capture_test/manual_time";
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO",
- complexity_capture_name + "_RMS", "N", /*family_index=*/9);
+ complexity_capture_name + "_RMS", "N",
+ /*family_index=*/9);
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
diff --git a/test/diagnostics_test.cc b/test/diagnostics_test.cc
index 0cd3edb..7c68a98 100644
--- a/test/diagnostics_test.cc
+++ b/test/diagnostics_test.cc
@@ -49,7 +49,7 @@ void BM_diagnostic_test(benchmark::State& state) {
if (called_once == false) try_invalid_pause_resume(state);
for (auto _ : state) {
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
@@ -65,7 +65,7 @@ void BM_diagnostic_test_keep_running(benchmark::State& state) {
if (called_once == false) try_invalid_pause_resume(state);
while (state.KeepRunning()) {
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
diff --git a/test/link_main_test.cc b/test/link_main_test.cc
index e806500..131937e 100644
--- a/test/link_main_test.cc
+++ b/test/link_main_test.cc
@@ -2,7 +2,7 @@
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}
diff --git a/test/memory_manager_test.cc b/test/memory_manager_test.cc
index d94bd51..4df674d 100644
--- a/test/memory_manager_test.cc
+++ b/test/memory_manager_test.cc
@@ -14,7 +14,7 @@ class TestMemoryManager : public benchmark::MemoryManager {
void BM_empty(benchmark::State& state) {
for (auto _ : state) {
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}
diff --git a/test/output_test_helper.cc b/test/output_test_helper.cc
index 2567370..265f28a 100644
--- a/test/output_test_helper.cc
+++ b/test/output_test_helper.cc
@@ -65,6 +65,7 @@ SubMap& GetSubstitutions() {
{"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"},
{"%csv_ms_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ms,,,,,"},
{"%csv_s_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",s,,,,,"},
+ {"%csv_cv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",,,,,,"},
{"%csv_bytes_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"},
{"%csv_items_report",
diff --git a/test/perf_counters_gtest.cc b/test/perf_counters_gtest.cc
index 54c7863..2e63049 100644
--- a/test/perf_counters_gtest.cc
+++ b/test/perf_counters_gtest.cc
@@ -41,7 +41,7 @@ TEST(PerfCountersTest, NegativeTest) {
return;
}
EXPECT_TRUE(PerfCounters::Initialize());
- // Sanity checks
+ // Safety checks
// Create() will always create a valid object, even if passed no or
// wrong arguments as the new behavior is to warn and drop unsupported
// counters
diff --git a/test/perf_counters_test.cc b/test/perf_counters_test.cc
index b0a3ab0..3cc593e 100644
--- a/test/perf_counters_test.cc
+++ b/test/perf_counters_test.cc
@@ -14,7 +14,7 @@ BM_DECLARE_string(benchmark_perf_counters);
static void BM_Simple(benchmark::State& state) {
for (auto _ : state) {
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
}
diff --git a/test/reporter_output_test.cc b/test/reporter_output_test.cc
index 2eb545a..7867165 100644
--- a/test/reporter_output_test.cc
+++ b/test/reporter_output_test.cc
@@ -55,6 +55,9 @@ static int AddContextCases() {
{{"Load Average: (%float, ){0,2}%float$", MR_Next}});
}
AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
+ AddCases(TC_JSONOut, {{"\"library_version\": \".*\",$", MR_Next}});
+ AddCases(TC_JSONOut, {{"\"library_build_type\": \".*\",$", MR_Next}});
+ AddCases(TC_JSONOut, {{"\"json_schema_version\": 1$", MR_Next}});
return 0;
}
int dummy_register = AddContextCases();
@@ -93,7 +96,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
void BM_bytes_per_second(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
state.SetBytesProcessed(1);
@@ -125,7 +128,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
void BM_items_per_second(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
state.SetItemsProcessed(1);
@@ -406,7 +409,7 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
void BM_Complexity_O1(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
state.SetComplexityN(state.range(0));
@@ -1088,7 +1091,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time_stddev\",%csv_report$"},
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
- "manual_time_\",%csv_report$"}});
+ "manual_time_\",%csv_cv_report$"}});
// ========================================================================= //
// ------------------------- Testing StrEscape JSON ------------------------ //
diff --git a/test/skip_with_error_test.cc b/test/skip_with_error_test.cc
index b4c5e15..2139a19 100644
--- a/test/skip_with_error_test.cc
+++ b/test/skip_with_error_test.cc
@@ -143,7 +143,7 @@ ADD_CASES("BM_error_during_running_ranged_for",
void BM_error_after_running(benchmark::State& state) {
for (auto _ : state) {
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
if (state.thread_index() <= (state.threads() / 2))
diff --git a/test/statistics_gtest.cc b/test/statistics_gtest.cc
index 1de2d87..48c7726 100644
--- a/test/statistics_gtest.cc
+++ b/test/statistics_gtest.cc
@@ -28,8 +28,8 @@ TEST(StatisticsTest, StdDev) {
TEST(StatisticsTest, CV) {
EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({101, 101, 101, 101}), 0.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({1, 2, 3}), 1. / 2.);
- EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({2.5, 2.4, 3.3, 4.2, 5.1}),
- 0.32888184094918121);
+ ASSERT_NEAR(benchmark::StatisticsCV({2.5, 2.4, 3.3, 4.2, 5.1}),
+ 0.32888184094918121, 1e-15);
}
} // end namespace
diff --git a/test/user_counters_tabular_test.cc b/test/user_counters_tabular_test.cc
index c98b769..cfc1ab0 100644
--- a/test/user_counters_tabular_test.cc
+++ b/test/user_counters_tabular_test.cc
@@ -63,6 +63,9 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,"
void BM_Counters_Tabular(benchmark::State& state) {
for (auto _ : state) {
+ // This test requires a non-zero CPU time to avoid divide-by-zero
+ auto iterations = double(state.iterations()) * double(state.iterations());
+ benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
state.counters.insert({
@@ -330,7 +333,7 @@ ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_stddev\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:1_cv\",%csv_report,"
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:1_cv\",%csv_cv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
@@ -348,7 +351,7 @@ ADD_CASES(TC_CSVOut,
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_stddev\",%csv_report,"
"%float,%float,%float,%float,%float,%float$"}});
ADD_CASES(TC_CSVOut,
- {{"^\"BM_Counters_Tabular/repeats:2/threads:2_cv\",%csv_report,"
+ {{"^\"BM_Counters_Tabular/repeats:2/threads:2_cv\",%csv_cv_report,"
"%float,%float,%float,%float,%float,%float$"}});
// VS2013 does not allow this function to be passed as a lambda argument
// to CHECK_BENCHMARK_RESULTS()
@@ -372,7 +375,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$",
void BM_CounterRates_Tabular(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
diff --git a/test/user_counters_test.cc b/test/user_counters_test.cc
index 4cd8ee3..22252ac 100644
--- a/test/user_counters_test.cc
+++ b/test/user_counters_test.cc
@@ -67,7 +67,7 @@ int num_calls1 = 0;
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
state.counters["foo"] = 1;
@@ -119,7 +119,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
void BM_Counters_Rate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@@ -163,7 +163,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
void BM_Invert(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@@ -204,7 +204,7 @@ CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert);
void BM_Counters_InvertedRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@@ -333,7 +333,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@@ -421,7 +421,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;
@@ -513,7 +513,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
- auto iterations = state.iterations();
+ auto iterations = double(state.iterations()) * double(state.iterations());
benchmark::DoNotOptimize(iterations);
}
namespace bm = benchmark;