aboutsummaryrefslogtreecommitdiff
path: root/bindings/python/google_benchmark
diff options
context:
space:
mode:
Diffstat (limited to 'bindings/python/google_benchmark')
-rw-r--r--bindings/python/google_benchmark/BUILD38
-rw-r--r--bindings/python/google_benchmark/__init__.py158
-rw-r--r--bindings/python/google_benchmark/benchmark.cc181
-rw-r--r--bindings/python/google_benchmark/example.py136
4 files changed, 513 insertions, 0 deletions
diff --git a/bindings/python/google_benchmark/BUILD b/bindings/python/google_benchmark/BUILD
new file mode 100644
index 0000000..3c1561f
--- /dev/null
+++ b/bindings/python/google_benchmark/BUILD
@@ -0,0 +1,38 @@
+load("//bindings/python:build_defs.bzl", "py_extension")
+
+py_library(
+ name = "google_benchmark",
+ srcs = ["__init__.py"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":_benchmark",
+ # pip; absl:app
+ ],
+)
+
+py_extension(
+ name = "_benchmark",
+ srcs = ["benchmark.cc"],
+ copts = [
+ "-fexceptions",
+ "-fno-strict-aliasing",
+ ],
+ features = ["-use_header_modules"],
+ deps = [
+ "//:benchmark",
+ "@pybind11",
+ "@python_headers",
+ ],
+)
+
+py_test(
+ name = "example",
+ srcs = ["example.py"],
+ python_version = "PY3",
+ srcs_version = "PY3",
+ visibility = ["//visibility:public"],
+ deps = [
+ ":google_benchmark",
+ ],
+)
+
diff --git a/bindings/python/google_benchmark/__init__.py b/bindings/python/google_benchmark/__init__.py
new file mode 100644
index 0000000..f31285e
--- /dev/null
+++ b/bindings/python/google_benchmark/__init__.py
@@ -0,0 +1,158 @@
+# Copyright 2020 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Python benchmarking utilities.
+
+Example usage:
+ import google_benchmark as benchmark
+
+ @benchmark.register
+ def my_benchmark(state):
+ ... # Code executed outside `while` loop is not timed.
+
+ while state:
+ ... # Code executed within `while` loop is timed.
+
+ if __name__ == '__main__':
+ benchmark.main()
+"""
+
+from absl import app
+from google_benchmark import _benchmark
+from google_benchmark._benchmark import (
+ Counter,
+ kNanosecond,
+ kMicrosecond,
+ kMillisecond,
+ kSecond,
+ oNone,
+ o1,
+ oN,
+ oNSquared,
+ oNCubed,
+ oLogN,
+ oNLogN,
+ oAuto,
+ oLambda,
+)
+
+
+__all__ = [
+ "register",
+ "main",
+ "Counter",
+ "kNanosecond",
+ "kMicrosecond",
+ "kMillisecond",
+ "kSecond",
+ "oNone",
+ "o1",
+ "oN",
+ "oNSquared",
+ "oNCubed",
+ "oLogN",
+ "oNLogN",
+ "oAuto",
+ "oLambda",
+]
+
+__version__ = "0.2.0"
+
+
+class __OptionMaker:
+ """A stateless class to collect benchmark options.
+
+ Collect all decorator calls like @option.range(start=0, limit=1<<5).
+ """
+
+ class Options:
+ """Pure data class to store options calls, along with the benchmarked function."""
+
+ def __init__(self, func):
+ self.func = func
+ self.builder_calls = []
+
+ @classmethod
+ def make(cls, func_or_options):
+ """Make Options from Options or the benchmarked function."""
+ if isinstance(func_or_options, cls.Options):
+ return func_or_options
+ return cls.Options(func_or_options)
+
+ def __getattr__(self, builder_name):
+ """Append option call in the Options."""
+
+ # The function that get returned on @option.range(start=0, limit=1<<5).
+ def __builder_method(*args, **kwargs):
+
+ # The decorator that get called, either with the benchmared function
+ # or the previous Options
+ def __decorator(func_or_options):
+ options = self.make(func_or_options)
+ options.builder_calls.append((builder_name, args, kwargs))
+ # The decorator returns Options so it is not technically a decorator
+ # and needs a final call to @regiser
+ return options
+
+ return __decorator
+
+ return __builder_method
+
+
+# Alias for nicer API.
+# We have to instanciate an object, even if stateless, to be able to use __getattr__
+# on option.range
+option = __OptionMaker()
+
+
+def register(undefined=None, *, name=None):
+ """Register function for benchmarking."""
+ if undefined is None:
+ # Decorator is called without parenthesis so we return a decorator
+ return lambda f: register(f, name=name)
+
+ # We have either the function to benchmark (simple case) or an instance of Options
+ # (@option._ case).
+ options = __OptionMaker.make(undefined)
+
+ if name is None:
+ name = options.func.__name__
+
+ # We register the benchmark and reproduce all the @option._ calls onto the
+ # benchmark builder pattern
+ benchmark = _benchmark.RegisterBenchmark(name, options.func)
+ for name, args, kwargs in options.builder_calls[::-1]:
+ getattr(benchmark, name)(*args, **kwargs)
+
+ # return the benchmarked function because the decorator does not modify it
+ return options.func
+
+
+def _flags_parser(argv):
+ argv = _benchmark.Initialize(argv)
+ return app.parse_flags_with_usage(argv)
+
+
+def _run_benchmarks(argv):
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ return _benchmark.RunSpecifiedBenchmarks()
+
+
+def main(argv=None):
+ return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser)
+
+
+# Methods for use with custom main function.
+initialize = _benchmark.Initialize
+run_benchmarks = _benchmark.RunSpecifiedBenchmarks
diff --git a/bindings/python/google_benchmark/benchmark.cc b/bindings/python/google_benchmark/benchmark.cc
new file mode 100644
index 0000000..d80816e
--- /dev/null
+++ b/bindings/python/google_benchmark/benchmark.cc
@@ -0,0 +1,181 @@
+// Benchmark for Python.
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "pybind11/operators.h"
+#include "pybind11/pybind11.h"
+#include "pybind11/stl.h"
+#include "pybind11/stl_bind.h"
+
+#include "benchmark/benchmark.h"
+
+PYBIND11_MAKE_OPAQUE(benchmark::UserCounters);
+
+namespace {
+namespace py = ::pybind11;
+
+std::vector<std::string> Initialize(const std::vector<std::string>& argv) {
+ // The `argv` pointers here become invalid when this function returns, but
+ // benchmark holds the pointer to `argv[0]`. We create a static copy of it
+ // so it persists, and replace the pointer below.
+ static std::string executable_name(argv[0]);
+ std::vector<char*> ptrs;
+ ptrs.reserve(argv.size());
+ for (auto& arg : argv) {
+ ptrs.push_back(const_cast<char*>(arg.c_str()));
+ }
+ ptrs[0] = const_cast<char*>(executable_name.c_str());
+ int argc = static_cast<int>(argv.size());
+ benchmark::Initialize(&argc, ptrs.data());
+ std::vector<std::string> remaining_argv;
+ remaining_argv.reserve(argc);
+ for (int i = 0; i < argc; ++i) {
+ remaining_argv.emplace_back(ptrs[i]);
+ }
+ return remaining_argv;
+}
+
+benchmark::internal::Benchmark* RegisterBenchmark(const char* name,
+ py::function f) {
+ return benchmark::RegisterBenchmark(
+ name, [f](benchmark::State& state) { f(&state); });
+}
+
+PYBIND11_MODULE(_benchmark, m) {
+ using benchmark::TimeUnit;
+ py::enum_<TimeUnit>(m, "TimeUnit")
+ .value("kNanosecond", TimeUnit::kNanosecond)
+ .value("kMicrosecond", TimeUnit::kMicrosecond)
+ .value("kMillisecond", TimeUnit::kMillisecond)
+ .value("kSecond", TimeUnit::kSecond)
+ .export_values();
+
+ using benchmark::BigO;
+ py::enum_<BigO>(m, "BigO")
+ .value("oNone", BigO::oNone)
+ .value("o1", BigO::o1)
+ .value("oN", BigO::oN)
+ .value("oNSquared", BigO::oNSquared)
+ .value("oNCubed", BigO::oNCubed)
+ .value("oLogN", BigO::oLogN)
+ .value("oNLogN", BigO::oLogN)
+ .value("oAuto", BigO::oAuto)
+ .value("oLambda", BigO::oLambda)
+ .export_values();
+
+ using benchmark::internal::Benchmark;
+ py::class_<Benchmark>(m, "Benchmark")
+ // For methods returning a pointer tor the current object, reference
+ // return policy is used to ask pybind not to take ownership oof the
+ // returned object and avoid calling delete on it.
+ // https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies
+ //
+ // For methods taking a const std::vector<...>&, a copy is created
+ // because a it is bound to a Python list.
+ // https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
+ .def("unit", &Benchmark::Unit, py::return_value_policy::reference)
+ .def("arg", &Benchmark::Arg, py::return_value_policy::reference)
+ .def("args", &Benchmark::Args, py::return_value_policy::reference)
+ .def("range", &Benchmark::Range, py::return_value_policy::reference,
+ py::arg("start"), py::arg("limit"))
+ .def("dense_range", &Benchmark::DenseRange,
+ py::return_value_policy::reference, py::arg("start"),
+ py::arg("limit"), py::arg("step") = 1)
+ .def("ranges", &Benchmark::Ranges, py::return_value_policy::reference)
+ .def("args_product", &Benchmark::ArgsProduct,
+ py::return_value_policy::reference)
+ .def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference)
+ .def("arg_names", &Benchmark::ArgNames,
+ py::return_value_policy::reference)
+ .def("range_pair", &Benchmark::RangePair,
+ py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"),
+ py::arg("lo2"), py::arg("hi2"))
+ .def("range_multiplier", &Benchmark::RangeMultiplier,
+ py::return_value_policy::reference)
+ .def("min_time", &Benchmark::MinTime, py::return_value_policy::reference)
+ .def("iterations", &Benchmark::Iterations,
+ py::return_value_policy::reference)
+ .def("repetitions", &Benchmark::Repetitions,
+ py::return_value_policy::reference)
+ .def("report_aggregates_only", &Benchmark::ReportAggregatesOnly,
+ py::return_value_policy::reference, py::arg("value") = true)
+ .def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly,
+ py::return_value_policy::reference, py::arg("value") = true)
+ .def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime,
+ py::return_value_policy::reference)
+ .def("use_real_time", &Benchmark::UseRealTime,
+ py::return_value_policy::reference)
+ .def("use_manual_time", &Benchmark::UseManualTime,
+ py::return_value_policy::reference)
+ .def(
+ "complexity",
+ (Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity,
+ py::return_value_policy::reference,
+ py::arg("complexity") = benchmark::oAuto);
+
+ using benchmark::Counter;
+ py::class_<Counter> py_counter(m, "Counter");
+
+ py::enum_<Counter::Flags>(py_counter, "Flags")
+ .value("kDefaults", Counter::Flags::kDefaults)
+ .value("kIsRate", Counter::Flags::kIsRate)
+ .value("kAvgThreads", Counter::Flags::kAvgThreads)
+ .value("kAvgThreadsRate", Counter::Flags::kAvgThreadsRate)
+ .value("kIsIterationInvariant", Counter::Flags::kIsIterationInvariant)
+ .value("kIsIterationInvariantRate",
+ Counter::Flags::kIsIterationInvariantRate)
+ .value("kAvgIterations", Counter::Flags::kAvgIterations)
+ .value("kAvgIterationsRate", Counter::Flags::kAvgIterationsRate)
+ .value("kInvert", Counter::Flags::kInvert)
+ .export_values()
+ .def(py::self | py::self);
+
+ py::enum_<Counter::OneK>(py_counter, "OneK")
+ .value("kIs1000", Counter::OneK::kIs1000)
+ .value("kIs1024", Counter::OneK::kIs1024)
+ .export_values();
+
+ py_counter
+ .def(py::init<double, Counter::Flags, Counter::OneK>(),
+ py::arg("value") = 0., py::arg("flags") = Counter::kDefaults,
+ py::arg("k") = Counter::kIs1000)
+ .def(py::init([](double value) { return Counter(value); }))
+ .def_readwrite("value", &Counter::value)
+ .def_readwrite("flags", &Counter::flags)
+ .def_readwrite("oneK", &Counter::oneK);
+ py::implicitly_convertible<py::float_, Counter>();
+ py::implicitly_convertible<py::int_, Counter>();
+
+ py::bind_map<benchmark::UserCounters>(m, "UserCounters");
+
+ using benchmark::State;
+ py::class_<State>(m, "State")
+ .def("__bool__", &State::KeepRunning)
+ .def_property_readonly("keep_running", &State::KeepRunning)
+ .def("pause_timing", &State::PauseTiming)
+ .def("resume_timing", &State::ResumeTiming)
+ .def("skip_with_error", &State::SkipWithError)
+ .def_property_readonly("error_occured", &State::error_occurred)
+ .def("set_iteration_time", &State::SetIterationTime)
+ .def_property("bytes_processed", &State::bytes_processed,
+ &State::SetBytesProcessed)
+ .def_property("complexity_n", &State::complexity_length_n,
+ &State::SetComplexityN)
+ .def_property("items_processed", &State::items_processed,
+ &State::SetItemsProcessed)
+ .def("set_label", (void (State::*)(const char*)) & State::SetLabel)
+ .def("range", &State::range, py::arg("pos") = 0)
+ .def_property_readonly("iterations", &State::iterations)
+ .def_readwrite("counters", &State::counters)
+ .def_readonly("thread_index", &State::thread_index)
+ .def_readonly("threads", &State::threads);
+
+ m.def("Initialize", Initialize);
+ m.def("RegisterBenchmark", RegisterBenchmark,
+ py::return_value_policy::reference);
+ m.def("RunSpecifiedBenchmarks",
+ []() { benchmark::RunSpecifiedBenchmarks(); });
+};
+} // namespace
diff --git a/bindings/python/google_benchmark/example.py b/bindings/python/google_benchmark/example.py
new file mode 100644
index 0000000..9134e8c
--- /dev/null
+++ b/bindings/python/google_benchmark/example.py
@@ -0,0 +1,136 @@
+# Copyright 2020 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Example of Python using C++ benchmark framework.
+
+To run this example, you must first install the `google_benchmark` Python package.
+
+To install using `setup.py`, download and extract the `google_benchmark` source.
+In the extracted directory, execute:
+ python setup.py install
+"""
+
+import random
+import time
+
+import google_benchmark as benchmark
+from google_benchmark import Counter
+
+
+@benchmark.register
+def empty(state):
+ while state:
+ pass
+
+
+@benchmark.register
+def sum_million(state):
+ while state:
+ sum(range(1_000_000))
+
+@benchmark.register
+def pause_timing(state):
+ """Pause timing every iteration."""
+ while state:
+ # Construct a list of random ints every iteration without timing it
+ state.pause_timing()
+ random_list = [random.randint(0, 100) for _ in range(100)]
+ state.resume_timing()
+ # Time the in place sorting algorithm
+ random_list.sort()
+
+
+@benchmark.register
+def skipped(state):
+ if True: # Test some predicate here.
+ state.skip_with_error("some error")
+ return # NOTE: You must explicitly return, or benchmark will continue.
+
+ ... # Benchmark code would be here.
+
+
+@benchmark.register
+def manual_timing(state):
+ while state:
+ # Manually count Python CPU time
+ start = time.perf_counter() # perf_counter_ns() in Python 3.7+
+ # Something to benchmark
+ time.sleep(0.01)
+ end = time.perf_counter()
+ state.set_iteration_time(end - start)
+
+
+@benchmark.register
+def custom_counters(state):
+ """Collect cutom metric using benchmark.Counter."""
+ num_foo = 0.0
+ while state:
+ # Benchmark some code here
+ pass
+ # Collect some custom metric named foo
+ num_foo += 0.13
+
+ # Automatic Counter from numbers.
+ state.counters["foo"] = num_foo
+ # Set a counter as a rate.
+ state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate)
+ # Set a counter as an inverse of rate.
+ state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert)
+ # Set a counter as a thread-average quantity.
+ state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads)
+ # There's also a combined flag:
+ state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)
+
+
+@benchmark.register
+@benchmark.option.measure_process_cpu_time()
+@benchmark.option.use_real_time()
+def with_options(state):
+ while state:
+ sum(range(1_000_000))
+
+
+@benchmark.register(name="sum_million_microseconds")
+@benchmark.option.unit(benchmark.kMicrosecond)
+def with_options(state):
+ while state:
+ sum(range(1_000_000))
+
+
+@benchmark.register
+@benchmark.option.arg(100)
+@benchmark.option.arg(1000)
+def passing_argument(state):
+ while state:
+ sum(range(state.range(0)))
+
+
+@benchmark.register
+@benchmark.option.range(8, limit=8 << 10)
+def using_range(state):
+ while state:
+ sum(range(state.range(0)))
+
+
+@benchmark.register
+@benchmark.option.range_multiplier(2)
+@benchmark.option.range(1 << 10, 1 << 18)
+@benchmark.option.complexity(benchmark.oN)
+def computing_complexity(state):
+ while state:
+ sum(range(state.range(0)))
+ state.complexity_n = state.range(0)
+
+
+if __name__ == "__main__":
+ benchmark.main()