diff options
author | Haibo Huang <hhb@google.com> | 2020-10-28 22:20:42 -0700 |
---|---|---|
committer | Haibo Huang <hhb@google.com> | 2020-10-28 22:20:42 -0700 |
commit | 846b7549b6b43c6947f53b1e94968558d5a36830 (patch) | |
tree | 2179868c66684e02b344ce1c009994e0307e8d8b | |
parent | ee19aadf35933890f9ea1d5bb361bc259a179df7 (diff) | |
parent | dce3322a549650d18f50b5f1428a5942327ab6a5 (diff) | |
download | google-benchmark-846b7549b6b43c6947f53b1e94968558d5a36830.tar.gz |
Upgrade google-benchmark to dce3322a549650d18f50b5f1428a5942327ab6a5
Test: make
Change-Id: I07850ca99c79dd2225cb48821200a365b90e4d2c
-rw-r--r-- | .github/workflows/test_bindings.yml | 24 | ||||
-rw-r--r-- | AUTHORS | 1 | ||||
-rw-r--r-- | CONTRIBUTORS | 2 | ||||
-rw-r--r-- | METADATA | 6 | ||||
-rw-r--r-- | README.md | 6 | ||||
-rw-r--r-- | bindings/python/google_benchmark/__init__.py | 103 | ||||
-rw-r--r-- | bindings/python/google_benchmark/benchmark.cc | 154 | ||||
-rw-r--r-- | bindings/python/google_benchmark/example.py | 94 | ||||
-rw-r--r-- | cmake/benchmark.pc.in | 4 | ||||
-rw-r--r-- | setup.py | 116 | ||||
-rw-r--r-- | src/benchmark_register.h | 1 | ||||
-rw-r--r-- | src/cycleclock.h | 7 | ||||
-rw-r--r-- | src/internal_macros.h | 10 | ||||
-rw-r--r-- | src/sysinfo.cc | 12 | ||||
-rw-r--r-- | src/timers.cc | 3 | ||||
-rwxr-xr-x | tools/compare.py | 19 | ||||
-rw-r--r-- | tools/gbench/report.py | 620 |
17 files changed, 964 insertions, 218 deletions
diff --git a/.github/workflows/test_bindings.yml b/.github/workflows/test_bindings.yml new file mode 100644 index 0000000..273d7f9 --- /dev/null +++ b/.github/workflows/test_bindings.yml @@ -0,0 +1,24 @@ +name: test-bindings + +on: + push: + branches: [master] + pull_request: + branches: [master] + +jobs: + python_bindings: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: 3.8 + - name: Install benchmark + run: + python setup.py install + - name: Run example bindings + run: + python bindings/python/google_benchmark/example.py @@ -55,3 +55,4 @@ Stripe, Inc. Yixuan Qiu <yixuanq@gmail.com> Yusuke Suzuki <utatane.tea@gmail.com> Zbigniew Skowron <zbychs@gmail.com> +Min-Yih Hsu <yihshyng223@gmail.com> diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 6beed71..d2f6435 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -41,6 +41,7 @@ Eric Backus <eric_backus@alum.mit.edu> Eric Fiselier <eric@efcs.ca> Eugene Zhuk <eugene.zhuk@gmail.com> Evgeny Safronov <division494@gmail.com> +Fanbo Meng <fanbo.meng@ibm.com> Federico Ficarelli <federico.ficarelli@gmail.com> Felix Homann <linuxaudio@showlabor.de> Geoffrey Martin-Noble <gcmn@google.com> <gmngeoffrey@gmail.com> @@ -77,3 +78,4 @@ Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com> Yixuan Qiu <yixuanq@gmail.com> Yusuke Suzuki <utatane.tea@gmail.com> Zbigniew Skowron <zbychs@gmail.com> +Min-Yih Hsu <yihshyng223@gmail.com> @@ -9,11 +9,11 @@ third_party { type: GIT value: "https://github.com/google/benchmark.git" } - version: "beb360d03e2a1a2673d9c2cf408c13b69fdb5627" + version: "dce3322a549650d18f50b5f1428a5942327ab6a5" license_type: NOTICE last_upgrade_date { year: 2020 - month: 9 - day: 9 + month: 10 + day: 28 } } @@ -1,9 +1,13 @@ # Benchmark +[![build-and-test](https://github.com/google/benchmark/workflows/build-and-test/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Abuild-and-test) +[![pylint](https://github.com/google/benchmark/workflows/pylint/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Apylint) +[![test-bindings](https://github.com/google/benchmark/workflows/test-bindings/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Atest-bindings) + [![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark) [![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master) [![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark) -[![slackin](https://slackin-iqtfqnpzxd.now.sh/badge.svg)](https://slackin-iqtfqnpzxd.now.sh/) + A library to benchmark code snippets, similar to unit tests. Example: diff --git a/bindings/python/google_benchmark/__init__.py b/bindings/python/google_benchmark/__init__.py index 44531f9..787c423 100644 --- a/bindings/python/google_benchmark/__init__.py +++ b/bindings/python/google_benchmark/__init__.py @@ -29,22 +29,111 @@ Example usage: from absl import app from google_benchmark import _benchmark +from google_benchmark._benchmark import ( + Counter, + kNanosecond, + kMicrosecond, + kMillisecond, + oNone, + o1, + oN, + oNSquared, + oNCubed, + oLogN, + oNLogN, + oAuto, + oLambda, +) + __all__ = [ "register", "main", + "Counter", + "kNanosecond", + "kMicrosecond", + "kMillisecond", + "oNone", + "o1", + "oN", + "oNSquared", + "oNCubed", + "oLogN", + "oNLogN", + "oAuto", + "oLambda", ] -__version__ = "0.1.0" +__version__ = "0.2.0" + + +class __OptionMaker: + """A stateless class to collect benchmark options. + + Collect all decorator calls like @option.range(start=0, limit=1<<5). + """ + + class Options: + """Pure data class to store options calls, along with the benchmarked function.""" + + def __init__(self, func): + self.func = func + self.builder_calls = [] + + @classmethod + def make(cls, func_or_options): + """Make Options from Options or the benchmarked function.""" + if isinstance(func_or_options, cls.Options): + return func_or_options + return cls.Options(func_or_options) + + def __getattr__(self, builder_name): + """Append option call in the Options.""" + + # The function that get returned on @option.range(start=0, limit=1<<5). + def __builder_method(*args, **kwargs): + # The decorator that get called, either with the benchmared function + # or the previous Options + def __decorator(func_or_options): + options = self.make(func_or_options) + options.builder_calls.append((builder_name, args, kwargs)) + # The decorator returns Options so it is not technically a decorator + # and needs a final call to @regiser + return options -def register(f=None, *, name=None): - if f is None: + return __decorator + + return __builder_method + + +# Alias for nicer API. +# We have to instanciate an object, even if stateless, to be able to use __getattr__ +# on option.range +option = __OptionMaker() + + +def register(undefined=None, *, name=None): + """Register function for benchmarking.""" + if undefined is None: + # Decorator is called without parenthesis so we return a decorator return lambda f: register(f, name=name) + + # We have either the function to benchmark (simple case) or an instance of Options + # (@option._ case). + options = __OptionMaker.make(undefined) + if name is None: - name = f.__name__ - _benchmark.RegisterBenchmark(name, f) - return f + name = options.func.__name__ + + # We register the benchmark and reproduce all the @option._ calls onto the + # benchmark builder pattern + benchmark = _benchmark.RegisterBenchmark(name, options.func) + for name, args, kwargs in options.builder_calls[::-1]: + getattr(benchmark, name)(*args, **kwargs) + + # return the benchmarked function because the decorator does not modify it + return options.func def _flags_parser(argv): @@ -54,7 +143,7 @@ def _flags_parser(argv): def _run_benchmarks(argv): if len(argv) > 1: - raise app.UsageError('Too many command-line arguments.') + raise app.UsageError("Too many command-line arguments.") return _benchmark.RunSpecifiedBenchmarks() diff --git a/bindings/python/google_benchmark/benchmark.cc b/bindings/python/google_benchmark/benchmark.cc index 374bf54..a733339 100644 --- a/bindings/python/google_benchmark/benchmark.cc +++ b/bindings/python/google_benchmark/benchmark.cc @@ -1,8 +1,17 @@ // Benchmark for Python. -#include "benchmark/benchmark.h" +#include <map> +#include <string> +#include <vector> + +#include "pybind11/operators.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" +#include "pybind11/stl_bind.h" + +#include "benchmark/benchmark.h" + +PYBIND11_MAKE_OPAQUE(benchmark::UserCounters); namespace { namespace py = ::pybind11; @@ -28,21 +37,144 @@ std::vector<std::string> Initialize(const std::vector<std::string>& argv) { return remaining_argv; } -void RegisterBenchmark(const char* name, py::function f) { - benchmark::RegisterBenchmark(name, [f](benchmark::State& state) { - f(&state); - }); +benchmark::internal::Benchmark* RegisterBenchmark(const char* name, + py::function f) { + return benchmark::RegisterBenchmark( + name, [f](benchmark::State& state) { f(&state); }); } PYBIND11_MODULE(_benchmark, m) { + using benchmark::TimeUnit; + py::enum_<TimeUnit>(m, "TimeUnit") + .value("kNanosecond", TimeUnit::kNanosecond) + .value("kMicrosecond", TimeUnit::kMicrosecond) + .value("kMillisecond", TimeUnit::kMillisecond) + .export_values(); + + using benchmark::BigO; + py::enum_<BigO>(m, "BigO") + .value("oNone", BigO::oNone) + .value("o1", BigO::o1) + .value("oN", BigO::oN) + .value("oNSquared", BigO::oNSquared) + .value("oNCubed", BigO::oNCubed) + .value("oLogN", BigO::oLogN) + .value("oNLogN", BigO::oLogN) + .value("oAuto", BigO::oAuto) + .value("oLambda", BigO::oLambda) + .export_values(); + + using benchmark::internal::Benchmark; + py::class_<Benchmark>(m, "Benchmark") + // For methods returning a pointer tor the current object, reference + // return policy is used to ask pybind not to take ownership oof the + // returned object and avoid calling delete on it. + // https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies + // + // For methods taking a const std::vector<...>&, a copy is created + // because a it is bound to a Python list. + // https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html + .def("unit", &Benchmark::Unit, py::return_value_policy::reference) + .def("arg", &Benchmark::Arg, py::return_value_policy::reference) + .def("args", &Benchmark::Args, py::return_value_policy::reference) + .def("range", &Benchmark::Range, py::return_value_policy::reference, + py::arg("start"), py::arg("limit")) + .def("dense_range", &Benchmark::DenseRange, + py::return_value_policy::reference, py::arg("start"), + py::arg("limit"), py::arg("step") = 1) + .def("ranges", &Benchmark::Ranges, py::return_value_policy::reference) + .def("args_product", &Benchmark::ArgsProduct, + py::return_value_policy::reference) + .def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference) + .def("arg_names", &Benchmark::ArgNames, + py::return_value_policy::reference) + .def("range_pair", &Benchmark::RangePair, + py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"), + py::arg("lo2"), py::arg("hi2")) + .def("range_multiplier", &Benchmark::RangeMultiplier, + py::return_value_policy::reference) + .def("min_time", &Benchmark::MinTime, py::return_value_policy::reference) + .def("iterations", &Benchmark::Iterations, + py::return_value_policy::reference) + .def("repetitions", &Benchmark::Repetitions, + py::return_value_policy::reference) + .def("report_aggregates_only", &Benchmark::ReportAggregatesOnly, + py::return_value_policy::reference, py::arg("value") = true) + .def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly, + py::return_value_policy::reference, py::arg("value") = true) + .def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime, + py::return_value_policy::reference) + .def("use_real_time", &Benchmark::UseRealTime, + py::return_value_policy::reference) + .def("use_manual_time", &Benchmark::UseManualTime, + py::return_value_policy::reference) + .def( + "complexity", + (Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity, + py::return_value_policy::reference, + py::arg("complexity") = benchmark::oAuto); + + using benchmark::Counter; + py::class_<Counter> py_counter(m, "Counter"); + + py::enum_<Counter::Flags>(py_counter, "Flags") + .value("kDefaults", Counter::Flags::kDefaults) + .value("kIsRate", Counter::Flags::kIsRate) + .value("kAvgThreads", Counter::Flags::kAvgThreads) + .value("kAvgThreadsRate", Counter::Flags::kAvgThreadsRate) + .value("kIsIterationInvariant", Counter::Flags::kIsIterationInvariant) + .value("kIsIterationInvariantRate", + Counter::Flags::kIsIterationInvariantRate) + .value("kAvgIterations", Counter::Flags::kAvgIterations) + .value("kAvgIterationsRate", Counter::Flags::kAvgIterationsRate) + .value("kInvert", Counter::Flags::kInvert) + .export_values() + .def(py::self | py::self); + + py::enum_<Counter::OneK>(py_counter, "OneK") + .value("kIs1000", Counter::OneK::kIs1000) + .value("kIs1024", Counter::OneK::kIs1024) + .export_values(); + + py_counter + .def(py::init<double, Counter::Flags, Counter::OneK>(), + py::arg("value") = 0., py::arg("flags") = Counter::kDefaults, + py::arg("k") = Counter::kIs1000) + .def(py::init([](double value) { return Counter(value); })) + .def_readwrite("value", &Counter::value) + .def_readwrite("flags", &Counter::flags) + .def_readwrite("oneK", &Counter::oneK); + py::implicitly_convertible<py::float_, Counter>(); + py::implicitly_convertible<py::int_, Counter>(); + + py::bind_map<benchmark::UserCounters>(m, "UserCounters"); + + using benchmark::State; + py::class_<State>(m, "State") + .def("__bool__", &State::KeepRunning) + .def_property_readonly("keep_running", &State::KeepRunning) + .def("pause_timing", &State::PauseTiming) + .def("resume_timing", &State::ResumeTiming) + .def("skip_with_error", &State::SkipWithError) + .def_property_readonly("error_occured", &State::error_occurred) + .def("set_iteration_time", &State::SetIterationTime) + .def_property("bytes_processed", &State::bytes_processed, + &State::SetBytesProcessed) + .def_property("complexity_n", &State::complexity_length_n, + &State::SetComplexityN) + .def_property("items_processed", &State::items_processed, + &State::SetItemsProcessed) + .def("set_label", (void (State::*)(const char*)) & State::SetLabel) + .def("range", &State::range, py::arg("pos") = 0) + .def_property_readonly("iterations", &State::iterations) + .def_readwrite("counters", &State::counters) + .def_readonly("thread_index", &State::thread_index) + .def_readonly("threads", &State::threads); + m.def("Initialize", Initialize); - m.def("RegisterBenchmark", RegisterBenchmark); + m.def("RegisterBenchmark", RegisterBenchmark, + py::return_value_policy::reference); m.def("RunSpecifiedBenchmarks", []() { benchmark::RunSpecifiedBenchmarks(); }); - - py::class_<benchmark::State>(m, "State") - .def("__bool__", &benchmark::State::KeepRunning) - .def_property_readonly("keep_running", &benchmark::State::KeepRunning) - .def("skip_with_error", &benchmark::State::SkipWithError); }; } // namespace diff --git a/bindings/python/google_benchmark/example.py b/bindings/python/google_benchmark/example.py index 0dead75..9134e8c 100644 --- a/bindings/python/google_benchmark/example.py +++ b/bindings/python/google_benchmark/example.py @@ -20,7 +20,11 @@ In the extracted directory, execute: python setup.py install """ +import random +import time + import google_benchmark as benchmark +from google_benchmark import Counter @benchmark.register @@ -34,15 +38,99 @@ def sum_million(state): while state: sum(range(1_000_000)) +@benchmark.register +def pause_timing(state): + """Pause timing every iteration.""" + while state: + # Construct a list of random ints every iteration without timing it + state.pause_timing() + random_list = [random.randint(0, 100) for _ in range(100)] + state.resume_timing() + # Time the in place sorting algorithm + random_list.sort() + @benchmark.register def skipped(state): if True: # Test some predicate here. - state.skip_with_error('some error') + state.skip_with_error("some error") return # NOTE: You must explicitly return, or benchmark will continue. - # Benchmark code would be here. + ... # Benchmark code would be here. + + +@benchmark.register +def manual_timing(state): + while state: + # Manually count Python CPU time + start = time.perf_counter() # perf_counter_ns() in Python 3.7+ + # Something to benchmark + time.sleep(0.01) + end = time.perf_counter() + state.set_iteration_time(end - start) + + +@benchmark.register +def custom_counters(state): + """Collect cutom metric using benchmark.Counter.""" + num_foo = 0.0 + while state: + # Benchmark some code here + pass + # Collect some custom metric named foo + num_foo += 0.13 + + # Automatic Counter from numbers. + state.counters["foo"] = num_foo + # Set a counter as a rate. + state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate) + # Set a counter as an inverse of rate. + state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert) + # Set a counter as a thread-average quantity. + state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads) + # There's also a combined flag: + state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate) + + +@benchmark.register +@benchmark.option.measure_process_cpu_time() +@benchmark.option.use_real_time() +def with_options(state): + while state: + sum(range(1_000_000)) + + +@benchmark.register(name="sum_million_microseconds") +@benchmark.option.unit(benchmark.kMicrosecond) +def with_options(state): + while state: + sum(range(1_000_000)) + + +@benchmark.register +@benchmark.option.arg(100) +@benchmark.option.arg(1000) +def passing_argument(state): + while state: + sum(range(state.range(0))) + + +@benchmark.register +@benchmark.option.range(8, limit=8 << 10) +def using_range(state): + while state: + sum(range(state.range(0))) + + +@benchmark.register +@benchmark.option.range_multiplier(2) +@benchmark.option.range(1 << 10, 1 << 18) +@benchmark.option.complexity(benchmark.oN) +def computing_complexity(state): + while state: + sum(range(state.range(0))) + state.complexity_n = state.range(0) -if __name__ == '__main__': +if __name__ == "__main__": benchmark.main() diff --git a/cmake/benchmark.pc.in b/cmake/benchmark.pc.in index 43ca8f9..34beb01 100644 --- a/cmake/benchmark.pc.in +++ b/cmake/benchmark.pc.in @@ -1,7 +1,7 @@ prefix=@CMAKE_INSTALL_PREFIX@ exec_prefix=${prefix} -libdir=${prefix}/lib -includedir=${prefix}/include +libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ +includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ Name: @PROJECT_NAME@ Description: Google microbenchmark framework @@ -12,29 +12,32 @@ from setuptools.command import build_ext HERE = os.path.dirname(os.path.abspath(__file__)) -IS_WINDOWS = sys.platform.startswith('win') +IS_WINDOWS = sys.platform.startswith("win") def _get_version(): """Parse the version string from __init__.py.""" - with open(os.path.join( - HERE, 'bindings', 'python', 'google_benchmark', '__init__.py')) as init_file: + with open( + os.path.join(HERE, "bindings", "python", "google_benchmark", "__init__.py") + ) as init_file: try: version_line = next( - line for line in init_file if line.startswith('__version__')) + line for line in init_file if line.startswith("__version__") + ) except StopIteration: - raise ValueError('__version__ not defined in __init__.py') + raise ValueError("__version__ not defined in __init__.py") else: namespace = {} exec(version_line, namespace) # pylint: disable=exec-used - return namespace['__version__'] + return namespace["__version__"] def _parse_requirements(path): with open(os.path.join(HERE, path)) as requirements: return [ - line.rstrip() for line in requirements - if not (line.isspace() or line.startswith('#')) + line.rstrip() + for line in requirements + if not (line.isspace() or line.startswith("#")) ] @@ -43,8 +46,9 @@ class BazelExtension(setuptools.Extension): def __init__(self, name, bazel_target): self.bazel_target = bazel_target - self.relpath, self.target_name = ( - posixpath.relpath(bazel_target, '//').split(':')) + self.relpath, self.target_name = posixpath.relpath(bazel_target, "//").split( + ":" + ) setuptools.Extension.__init__(self, name, sources=[]) @@ -58,71 +62,79 @@ class BuildBazelExtension(build_ext.build_ext): def bazel_build(self, ext): """Runs the bazel build to create the package.""" - with open('WORKSPACE', 'r') as workspace: + with open("WORKSPACE", "r") as workspace: workspace_contents = workspace.read() - with open('WORKSPACE', 'w') as workspace: - workspace.write(re.sub( - r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)', - sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep), - workspace_contents)) + with open("WORKSPACE", "w") as workspace: + workspace.write( + re.sub( + r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)', + sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep), + workspace_contents, + ) + ) if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) bazel_argv = [ - 'bazel', - 'build', + "bazel", + "build", ext.bazel_target, - '--symlink_prefix=' + os.path.join(self.build_temp, 'bazel-'), - '--compilation_mode=' + ('dbg' if self.debug else 'opt'), + "--symlink_prefix=" + os.path.join(self.build_temp, "bazel-"), + "--compilation_mode=" + ("dbg" if self.debug else "opt"), ] if IS_WINDOWS: # Link with python*.lib. for library_dir in self.library_dirs: - bazel_argv.append('--linkopt=/LIBPATH:' + library_dir) + bazel_argv.append("--linkopt=/LIBPATH:" + library_dir) - self.spawn(bazel_argv) + self.spawn(bazel_argv) - shared_lib_suffix = '.dll' if IS_WINDOWS else '.so' - ext_bazel_bin_path = os.path.join( - self.build_temp, 'bazel-bin', - ext.relpath, ext.target_name + shared_lib_suffix) - ext_dest_path = self.get_ext_fullpath(ext.name) - ext_dest_dir = os.path.dirname(ext_dest_path) - if not os.path.exists(ext_dest_dir): - os.makedirs(ext_dest_dir) - shutil.copyfile(ext_bazel_bin_path, ext_dest_path) + shared_lib_suffix = '.dll' if IS_WINDOWS else '.so' + ext_bazel_bin_path = os.path.join( + self.build_temp, 'bazel-bin', + ext.relpath, ext.target_name + shared_lib_suffix) + + ext_dest_path = self.get_ext_fullpath(ext.name) + ext_dest_dir = os.path.dirname(ext_dest_path) + if not os.path.exists(ext_dest_dir): + os.makedirs(ext_dest_dir) + shutil.copyfile(ext_bazel_bin_path, ext_dest_path) setuptools.setup( - name='google_benchmark', + name="google_benchmark", version=_get_version(), - url='https://github.com/google/benchmark', - description='A library to benchmark code snippets.', - author='Google', - author_email='benchmark-py@google.com', + url="https://github.com/google/benchmark", + description="A library to benchmark code snippets.", + author="Google", + author_email="benchmark-py@google.com", # Contained modules and scripts. - package_dir={'': 'bindings/python'}, - packages=setuptools.find_packages('bindings/python'), - install_requires=_parse_requirements('bindings/python/requirements.txt'), + package_dir={"": "bindings/python"}, + packages=setuptools.find_packages("bindings/python"), + install_requires=_parse_requirements("bindings/python/requirements.txt"), cmdclass=dict(build_ext=BuildBazelExtension), - ext_modules=[BazelExtension( - 'google_benchmark._benchmark', '//bindings/python/google_benchmark:_benchmark')], + ext_modules=[ + BazelExtension( + "google_benchmark._benchmark", + "//bindings/python/google_benchmark:_benchmark", + ) + ], zip_safe=False, # PyPI package information. classifiers=[ - 'Development Status :: 4 - Beta', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Software Development :: Testing', - 'Topic :: System :: Benchmark', + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Topic :: Software Development :: Testing", + "Topic :: System :: Benchmark", ], - license='Apache 2.0', - keywords='benchmark', + license="Apache 2.0", + keywords="benchmark", ) diff --git a/src/benchmark_register.h b/src/benchmark_register.h index 61377d7..204bf1d 100644 --- a/src/benchmark_register.h +++ b/src/benchmark_register.h @@ -1,6 +1,7 @@ #ifndef BENCHMARK_REGISTER_H #define BENCHMARK_REGISTER_H +#include <limits> #include <vector> #include "check.h" diff --git a/src/cycleclock.h b/src/cycleclock.h index 179c67c..77be7b9 100644 --- a/src/cycleclock.h +++ b/src/cycleclock.h @@ -161,7 +161,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { struct timeval tv; gettimeofday(&tv, nullptr); return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; -#elif defined(__mips__) +#elif defined(__mips__) || defined(__m68k__) // mips apparently only allows rdtsc for superusers, so we fall // back to gettimeofday. It's possible clock_gettime would be better. struct timeval tv; @@ -170,7 +170,12 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { #elif defined(__s390__) // Covers both s390 and s390x. // Return the CPU clock. uint64_t tsc; +#if defined(BENCHMARK_OS_ZOS) && defined(COMPILER_IBMXL) + // z/OS XL compiler HLASM syntax. + asm(" stck %0" : "=m"(tsc) : : "cc"); +#else asm("stck %0" : "=Q"(tsc) : : "cc"); +#endif return tsc; #elif defined(__riscv) // RISC-V // Use RDCYCLE (and RDCYCLEH on riscv32) diff --git a/src/internal_macros.h b/src/internal_macros.h index 6adf00d..91f367b 100644 --- a/src/internal_macros.h +++ b/src/internal_macros.h @@ -13,7 +13,11 @@ #endif #if defined(__clang__) - #if !defined(COMPILER_CLANG) + #if defined(__ibmxl__) + #if !defined(COMPILER_IBMXL) + #define COMPILER_IBMXL + #endif + #elif !defined(COMPILER_CLANG) #define COMPILER_CLANG #endif #elif defined(_MSC_VER) @@ -58,6 +62,8 @@ #define BENCHMARK_OS_NETBSD 1 #elif defined(__OpenBSD__) #define BENCHMARK_OS_OPENBSD 1 +#elif defined(__DragonFly__) + #define BENCHMARK_OS_DRAGONFLY 1 #elif defined(__linux__) #define BENCHMARK_OS_LINUX 1 #elif defined(__native_client__) @@ -72,6 +78,8 @@ #define BENCHMARK_OS_SOLARIS 1 #elif defined(__QNX__) #define BENCHMARK_OS_QNX 1 +#elif defined(__MVS__) +#define BENCHMARK_OS_ZOS 1 #endif #if defined(__ANDROID__) && defined(__GLIBCXX__) diff --git a/src/sysinfo.cc b/src/sysinfo.cc index 8bab932..b30b4f8 100644 --- a/src/sysinfo.cc +++ b/src/sysinfo.cc @@ -29,7 +29,8 @@ #include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD #include <unistd.h> #if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \ - defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD + defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD || \ + defined BENCHMARK_OS_DRAGONFLY #define BENCHMARK_HAS_SYSCTL #include <sys/sysctl.h> #endif @@ -607,6 +608,8 @@ double GetCPUCyclesPerSecond() { "machdep.tsc_freq"; #elif defined BENCHMARK_OS_OPENBSD "hw.cpuspeed"; +#elif defined BENCHMARK_OS_DRAGONFLY + "hw.tsc_frequency"; #else "hw.cpufrequency"; #endif @@ -671,9 +674,10 @@ double GetCPUCyclesPerSecond() { } std::vector<double> GetLoadAvg() { -#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \ - defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \ - defined BENCHMARK_OS_OPENBSD) && !defined(__ANDROID__) +#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \ + defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \ + defined BENCHMARK_OS_OPENBSD || defined BENCHMARK_OS_DRAGONFLY) && \ + !defined(__ANDROID__) constexpr int kMaxSamples = 3; std::vector<double> res(kMaxSamples, 0.0); const int nelem = getloadavg(res.data(), kMaxSamples); diff --git a/src/timers.cc b/src/timers.cc index 4f76edd..1d3ab9a 100644 --- a/src/timers.cc +++ b/src/timers.cc @@ -28,7 +28,8 @@ #include <sys/time.h> #include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD #include <unistd.h> -#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX +#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_DRAGONFLY || \ + defined BENCHMARK_OS_MACOSX #include <sys/sysctl.h> #endif #if defined(BENCHMARK_OS_MACOSX) diff --git a/tools/compare.py b/tools/compare.py index bd01be5..66eed93 100755 --- a/tools/compare.py +++ b/tools/compare.py @@ -7,6 +7,7 @@ compare.py - versatile benchmark output compare tool import argparse from argparse import ArgumentParser +import json import sys import gbench from gbench import util, report @@ -56,6 +57,12 @@ def create_parser(): help="Do not use colors in the terminal output" ) + parser.add_argument( + '-d', + '--dump_to_json', + dest='dump_to_json', + help="Additionally, dump benchmark comparison output to this file in JSON format.") + utest = parser.add_argument_group() utest.add_argument( '--no-utest', @@ -244,14 +251,20 @@ def main(): json2 = gbench.report.filter_benchmark( json2_orig, filter_contender, replacement) - # Diff and output - output_lines = gbench.report.generate_difference_report( - json1, json2, args.display_aggregates_only, + diff_report = gbench.report.get_difference_report( + json1, json2, args.utest) + output_lines = gbench.report.print_difference_report( + diff_report, + args.display_aggregates_only, args.utest, args.utest_alpha, args.color) print(description) for ln in output_lines: print(ln) + # Optionally, diff and output to JSON + if args.dump_to_json is not None: + with open(args.dump_to_json, 'w') as f_json: + json.dump(diff_report, f_json) class TestParser(unittest.TestCase): def setUp(self): diff --git a/tools/gbench/report.py b/tools/gbench/report.py index 5bd3a8d..bf29492 100644 --- a/tools/gbench/report.py +++ b/tools/gbench/report.py @@ -154,6 +154,7 @@ def extract_field(partition, field_name): rhs = [x[field_name] for x in partition[1]] return [lhs, rhs] + def calc_utest(timings_cpu, timings_time): min_rep_cnt = min(len(timings_time[0]), len(timings_time[1]), @@ -171,46 +172,106 @@ def calc_utest(timings_cpu, timings_time): return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue -def print_utest(partition, utest_alpha, first_col_width, use_color=True): +def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True): def get_utest_color(pval): return BC_FAIL if pval >= utest_alpha else BC_OKGREEN - timings_time = extract_field(partition, 'real_time') - timings_cpu = extract_field(partition, 'cpu_time') - have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(timings_cpu, timings_time) - # Check if we failed miserably with minimum required repetitions for utest - if not have_optimal_repetitions and cpu_pvalue is None and time_pvalue is None: + if not utest['have_optimal_repetitions'] and utest['cpu_pvalue'] is None and utest['time_pvalue'] is None: return [] dsc = "U Test, Repetitions: {} vs {}".format( - len(timings_cpu[0]), len(timings_cpu[1])) + utest['nr_of_repetitions'], utest['nr_of_repetitions_other']) dsc_color = BC_OKGREEN # We still got some results to show but issue a warning about it. - if not have_optimal_repetitions: + if not utest['have_optimal_repetitions']: dsc_color = BC_WARNING dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( UTEST_OPTIMAL_REPETITIONS) special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" - last_name = partition[0][0]['name'] return [color_format(use_color, special_str, BC_HEADER, - "{}{}".format(last_name, UTEST_COL_NAME), + "{}{}".format(bc_name, UTEST_COL_NAME), first_col_width, - get_utest_color(time_pvalue), time_pvalue, - get_utest_color(cpu_pvalue), cpu_pvalue, + get_utest_color( + utest['time_pvalue']), utest['time_pvalue'], + get_utest_color( + utest['cpu_pvalue']), utest['cpu_pvalue'], dsc_color, dsc, endc=BC_ENDC)] -def generate_difference_report( +def get_difference_report( json1, json2, - display_aggregates_only=False, + utest=False): + """ + Calculate and report the difference between each test of two benchmarks + runs specified as 'json1' and 'json2'. Output is another json containing + relevant details for each test run. + """ + assert utest is True or utest is False + + diff_report = [] + partitions = partition_benchmarks(json1, json2) + for partition in partitions: + benchmark_name = partition[0][0]['name'] + time_unit = partition[0][0]['time_unit'] + measurements = [] + utest_results = {} + # Careful, we may have different repetition count. + for i in range(min(len(partition[0]), len(partition[1]))): + bn = partition[0][i] + other_bench = partition[1][i] + measurements.append({ + 'real_time': bn['real_time'], + 'cpu_time': bn['cpu_time'], + 'real_time_other': other_bench['real_time'], + 'cpu_time_other': other_bench['cpu_time'], + 'time': calculate_change(bn['real_time'], other_bench['real_time']), + 'cpu': calculate_change(bn['cpu_time'], other_bench['cpu_time']) + }) + + # After processing the whole partition, if requested, do the U test. + if utest: + timings_cpu = extract_field(partition, 'cpu_time') + timings_time = extract_field(partition, 'real_time') + have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(timings_cpu, timings_time) + if cpu_pvalue and time_pvalue: + utest_results = { + 'have_optimal_repetitions': have_optimal_repetitions, + 'cpu_pvalue': cpu_pvalue, + 'time_pvalue': time_pvalue, + 'nr_of_repetitions': len(timings_cpu[0]), + 'nr_of_repetitions_other': len(timings_cpu[1]) + } + + # Store only if we had any measurements for given benchmark. + # E.g. partition_benchmarks will filter out the benchmarks having + # time units which are not compatible with other time units in the + # benchmark suite. + if measurements: + run_type = partition[0][0]['run_type'] if 'run_type' in partition[0][0] else '' + aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else '' + diff_report.append({ + 'name': benchmark_name, + 'measurements': measurements, + 'time_unit': time_unit, + 'run_type': run_type, + 'aggregate_name': aggregate_name, + 'utest': utest_results + }) + + return diff_report + + +def print_difference_report( + json_diff_report, + include_aggregates_only=False, utest=False, utest_alpha=0.05, use_color=True): @@ -219,14 +280,16 @@ def generate_difference_report( runs specified as 'json1' and 'json2'. """ assert utest is True or utest is False - first_col_width = find_longest_name(json1['benchmarks']) - def find_test(name): - for b in json2['benchmarks']: - if b['name'] == name: - return b - return None + def get_color(res): + if res > 0.05: + return BC_FAIL + elif res > -0.07: + return BC_WHITE + else: + return BC_CYAN + first_col_width = find_longest_name(json_diff_report) first_col_width = max( first_col_width, len('Benchmark')) @@ -235,50 +298,36 @@ def generate_difference_report( 'Benchmark', 12 + first_col_width) output_strs = [first_line, '-' * len(first_line)] - partitions = partition_benchmarks(json1, json2) - for partition in partitions: - # Careful, we may have different repetition count. - for i in range(min(len(partition[0]), len(partition[1]))): - bn = partition[0][i] - other_bench = partition[1][i] + fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" + for benchmark in json_diff_report: + # *If* we were asked to only include aggregates, + # and if it is non-aggregate, then skip it. + if include_aggregates_only and 'run_type' in benchmark: + if benchmark['run_type'] != 'aggregate': + continue - # *If* we were asked to only display aggregates, - # and if it is non-aggregate, then skip it. - if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench: - assert bn['run_type'] == other_bench['run_type'] - if bn['run_type'] != 'aggregate': - continue - - fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" - - def get_color(res): - if res > 0.05: - return BC_FAIL - elif res > -0.07: - return BC_WHITE - else: - return BC_CYAN - - tres = calculate_change(bn['real_time'], other_bench['real_time']) - cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) + for measurement in benchmark['measurements']: output_strs += [color_format(use_color, fmt_str, BC_HEADER, - bn['name'], + benchmark['name'], first_col_width, - get_color(tres), - tres, - get_color(cpures), - cpures, - bn['real_time'], - other_bench['real_time'], - bn['cpu_time'], - other_bench['cpu_time'], + get_color(measurement['time']), + measurement['time'], + get_color(measurement['cpu']), + measurement['cpu'], + measurement['real_time'], + measurement['real_time_other'], + measurement['cpu_time'], + measurement['cpu_time_other'], endc=BC_ENDC)] - # After processing the whole partition, if requested, do the U test. - if utest: - output_strs += print_utest(partition, + # After processing the measurements, if requested and + # if applicable (e.g. u-test exists for given benchmark), + # print the U test. + if utest and benchmark['utest']: + output_strs += print_utest(benchmark['name'], + benchmark['utest'], utest_alpha=utest_alpha, first_col_width=first_col_width, use_color=use_color) @@ -319,21 +368,26 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase): class TestReportDifference(unittest.TestCase): - def load_results(self): - import json - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test1_run1.json') - testOutput2 = os.path.join(testInputs, 'test1_run2.json') - with open(testOutput1, 'r') as f: - json1 = json.load(f) - with open(testOutput2, 'r') as f: - json2 = json.load(f) - return json1, json2 - - def test_basic(self): + @classmethod + def setUpClass(cls): + def load_results(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test1_run1.json') + testOutput2 = os.path.join(testInputs, 'test1_run2.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + json1, json2 = load_results() + cls.json_diff_report = get_difference_report(json1, json2) + + def test_json_diff_report_pretty_printing(self): expect_lines = [ ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], @@ -351,9 +405,8 @@ class TestReportDifference(unittest.TestCase): ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], ['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], ] - json1, json2 = self.load_results() - output_lines_with_header = generate_difference_report( - json1, json2, use_color=False) + output_lines_with_header = print_difference_report( + self.json_diff_report, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) @@ -363,31 +416,118 @@ class TestReportDifference(unittest.TestCase): self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) + def test_json_diff_report_output(self): + expected_output = [ + { + 'name': 'BM_SameTimes', + 'measurements': [{'time': 0.0000, 'cpu': 0.0000, 'real_time': 10, 'real_time_other': 10, 'cpu_time': 10, 'cpu_time_other': 10}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_2xFaster', + 'measurements': [{'time': -0.5000, 'cpu': -0.5000, 'real_time': 50, 'real_time_other': 25, 'cpu_time': 50, 'cpu_time_other': 25}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_2xSlower', + 'measurements': [{'time': 1.0000, 'cpu': 1.0000, 'real_time': 50, 'real_time_other': 100, 'cpu_time': 50, 'cpu_time_other': 100}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_1PercentFaster', + 'measurements': [{'time': -0.0100, 'cpu': -0.0100, 'real_time': 100, 'real_time_other': 98.9999999, 'cpu_time': 100, 'cpu_time_other': 98.9999999}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_1PercentSlower', + 'measurements': [{'time': 0.0100, 'cpu': 0.0100, 'real_time': 100, 'real_time_other': 101, 'cpu_time': 100, 'cpu_time_other': 101}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_10PercentFaster', + 'measurements': [{'time': -0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 90, 'cpu_time': 100, 'cpu_time_other': 90}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_10PercentSlower', + 'measurements': [{'time': 0.1000, 'cpu': 0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 110}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_100xSlower', + 'measurements': [{'time': 99.0000, 'cpu': 99.0000, 'real_time': 100, 'real_time_other': 10000, 'cpu_time': 100, 'cpu_time_other': 10000}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_100xFaster', + 'measurements': [{'time': -0.9900, 'cpu': -0.9900, 'real_time': 10000, 'real_time_other': 100, 'cpu_time': 10000, 'cpu_time_other': 100}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_10PercentCPUToTime', + 'measurements': [{'time': 0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 90}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_ThirdFaster', + 'measurements': [{'time': -0.3333, 'cpu': -0.3334, 'real_time': 100, 'real_time_other': 67, 'cpu_time': 100, 'cpu_time_other': 67}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_NotBadTimeUnit', + 'measurements': [{'time': -0.9000, 'cpu': 0.2000, 'real_time': 0.4, 'real_time_other': 0.04, 'cpu_time': 0.5, 'cpu_time_other': 0.6}], + 'time_unit': 's', + 'utest': {} + }, + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip( + self.json_diff_report, expected_output): + self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['time_unit'], expected['time_unit']) + assert_utest(self, out, expected) + assert_measurements(self, out, expected) + class TestReportDifferenceBetweenFamilies(unittest.TestCase): - def load_result(self): - import json - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput = os.path.join(testInputs, 'test2_run.json') - with open(testOutput, 'r') as f: - json = json.load(f) - return json + @classmethod + def setUpClass(cls): + def load_result(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput = os.path.join(testInputs, 'test2_run.json') + with open(testOutput, 'r') as f: + json = json.load(f) + return json + + json = load_result() + json1 = filter_benchmark(json, "BM_Z.ro", ".") + json2 = filter_benchmark(json, "BM_O.e", ".") + cls.json_diff_report = get_difference_report(json1, json2) - def test_basic(self): + def test_json_diff_report_pretty_printing(self): expect_lines = [ ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], ] - json = self.load_result() - json1 = filter_benchmark(json, "BM_Z.ro", ".") - json2 = filter_benchmark(json, "BM_O.e", ".") - output_lines_with_header = generate_difference_report( - json1, json2, use_color=False) + output_lines_with_header = print_difference_report( + self.json_diff_report, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) @@ -397,24 +537,64 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase): self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) + def test_json_diff_report(self): + expected_output = [ + { + 'name': u'.', + 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 10, 'real_time_other': 5, 'cpu_time': 10, 'cpu_time_other': 5}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': u'./4', + 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 40, 'real_time_other': 20, 'cpu_time': 40, 'cpu_time_other': 20}], + 'time_unit': 'ns', + 'utest': {}, + }, + { + 'name': u'Prefix/.', + 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 20, 'real_time_other': 10, 'cpu_time': 20, 'cpu_time_other': 10}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': u'Prefix/./3', + 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}], + 'time_unit': 'ns', + 'utest': {} + } + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip( + self.json_diff_report, expected_output): + self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['time_unit'], expected['time_unit']) + assert_utest(self, out, expected) + assert_measurements(self, out, expected) + class TestReportDifferenceWithUTest(unittest.TestCase): - def load_results(self): - import json - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test3_run0.json') - testOutput2 = os.path.join(testInputs, 'test3_run1.json') - with open(testOutput1, 'r') as f: - json1 = json.load(f) - with open(testOutput2, 'r') as f: - json2 = json.load(f) - return json1, json2 - - def test_utest(self): - expect_lines = [] + @classmethod + def setUpClass(cls): + def load_results(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test3_run0.json') + testOutput2 = os.path.join(testInputs, 'test3_run1.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + json1, json2 = load_results() + cls.json_diff_report = get_difference_report( + json1, json2, utest=True) + + def test_json_diff_report_pretty_printing(self): expect_lines = [ ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], @@ -453,9 +633,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase): 'recommended.'], ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], ] - json1, json2 = self.load_results() - output_lines_with_header = generate_difference_report( - json1, json2, utest=True, utest_alpha=0.05, use_color=False) + output_lines_with_header = print_difference_report( + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) @@ -464,25 +643,105 @@ class TestReportDifferenceWithUTest(unittest.TestCase): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(expect_lines[i], parts) + def test_json_diff_report(self): + expected_output = [ + { + 'name': u'BM_One', + 'measurements': [ + {'time': -0.1, + 'cpu': 0.1, + 'real_time': 10, + 'real_time_other': 9, + 'cpu_time': 100, + 'cpu_time_other': 110} + ], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': u'BM_Two', + 'measurements': [ + {'time': 0.1111111111111111, + 'cpu': -0.011111111111111112, + 'real_time': 9, + 'real_time_other': 10, + 'cpu_time': 90, + 'cpu_time_other': 89}, + {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8, + 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72} + ], + 'time_unit': 'ns', + 'utest': { + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387 + } + }, + { + 'name': u'short', + 'measurements': [ + {'time': -0.125, + 'cpu': -0.0625, + 'real_time': 8, + 'real_time_other': 7, + 'cpu_time': 80, + 'cpu_time_other': 75}, + {'time': -0.4325, + 'cpu': -0.13506493506493514, + 'real_time': 8, + 'real_time_other': 4.54, + 'cpu_time': 77, + 'cpu_time_other': 66.6} + ], + 'time_unit': 'ns', + 'utest': { + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772 + } + }, + { + 'name': u'medium', + 'measurements': [ + {'time': -0.375, + 'cpu': -0.3375, + 'real_time': 8, + 'real_time_other': 5, + 'cpu_time': 80, + 'cpu_time_other': 53} + ], + 'time_unit': 'ns', + 'utest': {} + } + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip( + self.json_diff_report, expected_output): + self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['time_unit'], expected['time_unit']) + assert_utest(self, out, expected) + assert_measurements(self, out, expected) + class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( unittest.TestCase): - def load_results(self): - import json - testInputs = os.path.join( - os.path.dirname( - os.path.realpath(__file__)), - 'Inputs') - testOutput1 = os.path.join(testInputs, 'test3_run0.json') - testOutput2 = os.path.join(testInputs, 'test3_run1.json') - with open(testOutput1, 'r') as f: - json1 = json.load(f) - with open(testOutput2, 'r') as f: - json2 = json.load(f) - return json1, json2 - - def test_utest(self): - expect_lines = [] + @classmethod + def setUpClass(cls): + def load_results(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test3_run0.json') + testOutput2 = os.path.join(testInputs, 'test3_run1.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + json1, json2 = load_results() + cls.json_diff_report = get_difference_report( + json1, json2, utest=True) + + def test_json_diff_report_pretty_printing(self): expect_lines = [ ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], @@ -519,10 +778,10 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( '9+', 'repetitions', 'recommended.'], + ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'] ] - json1, json2 = self.load_results() - output_lines_with_header = generate_difference_report( - json1, json2, display_aggregates_only=True, + output_lines_with_header = print_difference_report( + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False) output_lines = output_lines_with_header[2:] print("\n") @@ -532,6 +791,109 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(expect_lines[i], parts) + def test_json_diff_report(self): + expected_output = [ + { + 'name': u'BM_One', + 'measurements': [ + {'time': -0.1, + 'cpu': 0.1, + 'real_time': 10, + 'real_time_other': 9, + 'cpu_time': 100, + 'cpu_time_other': 110} + ], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': u'BM_Two', + 'measurements': [ + {'time': 0.1111111111111111, + 'cpu': -0.011111111111111112, + 'real_time': 9, + 'real_time_other': 10, + 'cpu_time': 90, + 'cpu_time_other': 89}, + {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8, + 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72} + ], + 'time_unit': 'ns', + 'utest': { + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387 + } + }, + { + 'name': u'short', + 'measurements': [ + {'time': -0.125, + 'cpu': -0.0625, + 'real_time': 8, + 'real_time_other': 7, + 'cpu_time': 80, + 'cpu_time_other': 75}, + {'time': -0.4325, + 'cpu': -0.13506493506493514, + 'real_time': 8, + 'real_time_other': 4.54, + 'cpu_time': 77, + 'cpu_time_other': 66.6} + ], + 'time_unit': 'ns', + 'utest': { + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772 + } + }, + { + 'name': u'medium', + 'measurements': [ + {'real_time_other': 5, + 'cpu_time': 80, + 'time': -0.375, + 'real_time': 8, + 'cpu_time_other': 53, + 'cpu': -0.3375 + } + ], + 'utest': {}, + 'time_unit': u'ns', + 'aggregate_name': '' + } + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip( + self.json_diff_report, expected_output): + self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['time_unit'], expected['time_unit']) + assert_utest(self, out, expected) + assert_measurements(self, out, expected) + + +def assert_utest(unittest_instance, lhs, rhs): + if lhs['utest']: + unittest_instance.assertAlmostEqual( + lhs['utest']['cpu_pvalue'], + rhs['utest']['cpu_pvalue']) + unittest_instance.assertAlmostEqual( + lhs['utest']['time_pvalue'], + rhs['utest']['time_pvalue']) + unittest_instance.assertEqual( + lhs['utest']['have_optimal_repetitions'], + rhs['utest']['have_optimal_repetitions']) + else: + # lhs is empty. assert if rhs is not. + unittest_instance.assertEqual(lhs['utest'], rhs['utest']) + + +def assert_measurements(unittest_instance, lhs, rhs): + for m1, m2 in zip(lhs['measurements'], rhs['measurements']): + unittest_instance.assertEqual(m1['real_time'], m2['real_time']) + unittest_instance.assertEqual(m1['cpu_time'], m2['cpu_time']) + # m1['time'] and m1['cpu'] hold values which are being calculated, + # and therefore we must use almost-equal pattern. + unittest_instance.assertAlmostEqual(m1['time'], m2['time'], places=4) + unittest_instance.assertAlmostEqual(m1['cpu'], m2['cpu'], places=4) + if __name__ == '__main__': unittest.main() |