aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAntoine Prouvost <AntoinePrv@users.noreply.github.com>2020-09-11 05:55:18 -0400
committerGitHub <noreply@github.com>2020-09-11 10:55:18 +0100
commit73d4d5e8d6d449fc8663765a42aa8aeeee844489 (patch)
tree55f32578a2b7ac1ef422aa2d33d73f7fef7e8e20
parentdf9e2948fa7bfca1ddf530ae2b23a518ed55fab1 (diff)
downloadgoogle-benchmark-73d4d5e8d6d449fc8663765a42aa8aeeee844489.tar.gz
Bind benchmark builder to Python (#1040)
* Fix setup.py and reformat * Bind benchmark * Add benchmark option to Python * Add Python examples for range, complexity, and thread * Remove invalid multithreading in Python * Bump Python bindings version to 0.2.0 Co-authored-by: Dominic Hamon <dominichamon@users.noreply.github.com>
-rw-r--r--bindings/python/google_benchmark/__init__.py101
-rw-r--r--bindings/python/google_benchmark/benchmark.cc90
-rw-r--r--bindings/python/google_benchmark/example.py42
-rw-r--r--setup.py96
4 files changed, 270 insertions, 59 deletions
diff --git a/bindings/python/google_benchmark/__init__.py b/bindings/python/google_benchmark/__init__.py
index 7ddbe2f..787c423 100644
--- a/bindings/python/google_benchmark/__init__.py
+++ b/bindings/python/google_benchmark/__init__.py
@@ -29,24 +29,111 @@ Example usage:
from absl import app
from google_benchmark import _benchmark
-from google_benchmark._benchmark import Counter
+from google_benchmark._benchmark import (
+ Counter,
+ kNanosecond,
+ kMicrosecond,
+ kMillisecond,
+ oNone,
+ o1,
+ oN,
+ oNSquared,
+ oNCubed,
+ oLogN,
+ oNLogN,
+ oAuto,
+ oLambda,
+)
+
__all__ = [
"register",
"main",
"Counter",
+ "kNanosecond",
+ "kMicrosecond",
+ "kMillisecond",
+ "oNone",
+ "o1",
+ "oN",
+ "oNSquared",
+ "oNCubed",
+ "oLogN",
+ "oNLogN",
+ "oAuto",
+ "oLambda",
]
-__version__ = "0.1.0"
+__version__ = "0.2.0"
+
+
+class __OptionMaker:
+ """A stateless class to collect benchmark options.
+
+ Collect all decorator calls like @option.range(start=0, limit=1<<5).
+ """
+
+ class Options:
+ """Pure data class to store options calls, along with the benchmarked function."""
+
+ def __init__(self, func):
+ self.func = func
+ self.builder_calls = []
+
+ @classmethod
+ def make(cls, func_or_options):
+ """Make Options from Options or the benchmarked function."""
+ if isinstance(func_or_options, cls.Options):
+ return func_or_options
+ return cls.Options(func_or_options)
+
+ def __getattr__(self, builder_name):
+ """Append option call in the Options."""
+
+ # The function that get returned on @option.range(start=0, limit=1<<5).
+ def __builder_method(*args, **kwargs):
+ # The decorator that get called, either with the benchmared function
+ # or the previous Options
+ def __decorator(func_or_options):
+ options = self.make(func_or_options)
+ options.builder_calls.append((builder_name, args, kwargs))
+ # The decorator returns Options so it is not technically a decorator
+ # and needs a final call to @regiser
+ return options
-def register(f=None, *, name=None):
- if f is None:
+ return __decorator
+
+ return __builder_method
+
+
+# Alias for nicer API.
+# We have to instanciate an object, even if stateless, to be able to use __getattr__
+# on option.range
+option = __OptionMaker()
+
+
+def register(undefined=None, *, name=None):
+ """Register function for benchmarking."""
+ if undefined is None:
+ # Decorator is called without parenthesis so we return a decorator
return lambda f: register(f, name=name)
+
+ # We have either the function to benchmark (simple case) or an instance of Options
+ # (@option._ case).
+ options = __OptionMaker.make(undefined)
+
if name is None:
- name = f.__name__
- _benchmark.RegisterBenchmark(name, f)
- return f
+ name = options.func.__name__
+
+ # We register the benchmark and reproduce all the @option._ calls onto the
+ # benchmark builder pattern
+ benchmark = _benchmark.RegisterBenchmark(name, options.func)
+ for name, args, kwargs in options.builder_calls[::-1]:
+ getattr(benchmark, name)(*args, **kwargs)
+
+ # return the benchmarked function because the decorator does not modify it
+ return options.func
def _flags_parser(argv):
diff --git a/bindings/python/google_benchmark/benchmark.cc b/bindings/python/google_benchmark/benchmark.cc
index 4e8515f..a733339 100644
--- a/bindings/python/google_benchmark/benchmark.cc
+++ b/bindings/python/google_benchmark/benchmark.cc
@@ -1,7 +1,5 @@
// Benchmark for Python.
-#include "benchmark/benchmark.h"
-
#include <map>
#include <string>
#include <vector>
@@ -11,6 +9,8 @@
#include "pybind11/stl.h"
#include "pybind11/stl_bind.h"
+#include "benchmark/benchmark.h"
+
PYBIND11_MAKE_OPAQUE(benchmark::UserCounters);
namespace {
@@ -37,16 +37,82 @@ std::vector<std::string> Initialize(const std::vector<std::string>& argv) {
return remaining_argv;
}
-void RegisterBenchmark(const char* name, py::function f) {
- benchmark::RegisterBenchmark(name,
- [f](benchmark::State& state) { f(&state); });
+benchmark::internal::Benchmark* RegisterBenchmark(const char* name,
+ py::function f) {
+ return benchmark::RegisterBenchmark(
+ name, [f](benchmark::State& state) { f(&state); });
}
PYBIND11_MODULE(_benchmark, m) {
- m.def("Initialize", Initialize);
- m.def("RegisterBenchmark", RegisterBenchmark);
- m.def("RunSpecifiedBenchmarks",
- []() { benchmark::RunSpecifiedBenchmarks(); });
+ using benchmark::TimeUnit;
+ py::enum_<TimeUnit>(m, "TimeUnit")
+ .value("kNanosecond", TimeUnit::kNanosecond)
+ .value("kMicrosecond", TimeUnit::kMicrosecond)
+ .value("kMillisecond", TimeUnit::kMillisecond)
+ .export_values();
+
+ using benchmark::BigO;
+ py::enum_<BigO>(m, "BigO")
+ .value("oNone", BigO::oNone)
+ .value("o1", BigO::o1)
+ .value("oN", BigO::oN)
+ .value("oNSquared", BigO::oNSquared)
+ .value("oNCubed", BigO::oNCubed)
+ .value("oLogN", BigO::oLogN)
+ .value("oNLogN", BigO::oLogN)
+ .value("oAuto", BigO::oAuto)
+ .value("oLambda", BigO::oLambda)
+ .export_values();
+
+ using benchmark::internal::Benchmark;
+ py::class_<Benchmark>(m, "Benchmark")
+ // For methods returning a pointer tor the current object, reference
+ // return policy is used to ask pybind not to take ownership oof the
+ // returned object and avoid calling delete on it.
+ // https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies
+ //
+ // For methods taking a const std::vector<...>&, a copy is created
+ // because a it is bound to a Python list.
+ // https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
+ .def("unit", &Benchmark::Unit, py::return_value_policy::reference)
+ .def("arg", &Benchmark::Arg, py::return_value_policy::reference)
+ .def("args", &Benchmark::Args, py::return_value_policy::reference)
+ .def("range", &Benchmark::Range, py::return_value_policy::reference,
+ py::arg("start"), py::arg("limit"))
+ .def("dense_range", &Benchmark::DenseRange,
+ py::return_value_policy::reference, py::arg("start"),
+ py::arg("limit"), py::arg("step") = 1)
+ .def("ranges", &Benchmark::Ranges, py::return_value_policy::reference)
+ .def("args_product", &Benchmark::ArgsProduct,
+ py::return_value_policy::reference)
+ .def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference)
+ .def("arg_names", &Benchmark::ArgNames,
+ py::return_value_policy::reference)
+ .def("range_pair", &Benchmark::RangePair,
+ py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"),
+ py::arg("lo2"), py::arg("hi2"))
+ .def("range_multiplier", &Benchmark::RangeMultiplier,
+ py::return_value_policy::reference)
+ .def("min_time", &Benchmark::MinTime, py::return_value_policy::reference)
+ .def("iterations", &Benchmark::Iterations,
+ py::return_value_policy::reference)
+ .def("repetitions", &Benchmark::Repetitions,
+ py::return_value_policy::reference)
+ .def("report_aggregates_only", &Benchmark::ReportAggregatesOnly,
+ py::return_value_policy::reference, py::arg("value") = true)
+ .def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly,
+ py::return_value_policy::reference, py::arg("value") = true)
+ .def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime,
+ py::return_value_policy::reference)
+ .def("use_real_time", &Benchmark::UseRealTime,
+ py::return_value_policy::reference)
+ .def("use_manual_time", &Benchmark::UseManualTime,
+ py::return_value_policy::reference)
+ .def(
+ "complexity",
+ (Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity,
+ py::return_value_policy::reference,
+ py::arg("complexity") = benchmark::oAuto);
using benchmark::Counter;
py::class_<Counter> py_counter(m, "Counter");
@@ -104,5 +170,11 @@ PYBIND11_MODULE(_benchmark, m) {
.def_readwrite("counters", &State::counters)
.def_readonly("thread_index", &State::thread_index)
.def_readonly("threads", &State::threads);
+
+ m.def("Initialize", Initialize);
+ m.def("RegisterBenchmark", RegisterBenchmark,
+ py::return_value_policy::reference);
+ m.def("RunSpecifiedBenchmarks",
+ []() { benchmark::RunSpecifiedBenchmarks(); });
};
} // namespace
diff --git a/bindings/python/google_benchmark/example.py b/bindings/python/google_benchmark/example.py
index 9bb23c4..9134e8c 100644
--- a/bindings/python/google_benchmark/example.py
+++ b/bindings/python/google_benchmark/example.py
@@ -64,7 +64,7 @@ def manual_timing(state):
while state:
# Manually count Python CPU time
start = time.perf_counter() # perf_counter_ns() in Python 3.7+
- # Somehting to benchmark
+ # Something to benchmark
time.sleep(0.01)
end = time.perf_counter()
state.set_iteration_time(end - start)
@@ -92,5 +92,45 @@ def custom_counters(state):
state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)
+@benchmark.register
+@benchmark.option.measure_process_cpu_time()
+@benchmark.option.use_real_time()
+def with_options(state):
+ while state:
+ sum(range(1_000_000))
+
+
+@benchmark.register(name="sum_million_microseconds")
+@benchmark.option.unit(benchmark.kMicrosecond)
+def with_options(state):
+ while state:
+ sum(range(1_000_000))
+
+
+@benchmark.register
+@benchmark.option.arg(100)
+@benchmark.option.arg(1000)
+def passing_argument(state):
+ while state:
+ sum(range(state.range(0)))
+
+
+@benchmark.register
+@benchmark.option.range(8, limit=8 << 10)
+def using_range(state):
+ while state:
+ sum(range(state.range(0)))
+
+
+@benchmark.register
+@benchmark.option.range_multiplier(2)
+@benchmark.option.range(1 << 10, 1 << 18)
+@benchmark.option.complexity(benchmark.oN)
+def computing_complexity(state):
+ while state:
+ sum(range(state.range(0)))
+ state.complexity_n = state.range(0)
+
+
if __name__ == "__main__":
benchmark.main()
diff --git a/setup.py b/setup.py
index a63795c..5cdab10 100644
--- a/setup.py
+++ b/setup.py
@@ -12,29 +12,32 @@ from setuptools.command import build_ext
HERE = os.path.dirname(os.path.abspath(__file__))
-IS_WINDOWS = sys.platform.startswith('win')
+IS_WINDOWS = sys.platform.startswith("win")
def _get_version():
"""Parse the version string from __init__.py."""
- with open(os.path.join(
- HERE, 'bindings', 'python', 'google_benchmark', '__init__.py')) as init_file:
+ with open(
+ os.path.join(HERE, "bindings", "python", "google_benchmark", "__init__.py")
+ ) as init_file:
try:
version_line = next(
- line for line in init_file if line.startswith('__version__'))
+ line for line in init_file if line.startswith("__version__")
+ )
except StopIteration:
- raise ValueError('__version__ not defined in __init__.py')
+ raise ValueError("__version__ not defined in __init__.py")
else:
namespace = {}
exec(version_line, namespace) # pylint: disable=exec-used
- return namespace['__version__']
+ return namespace["__version__"]
def _parse_requirements(path):
with open(os.path.join(HERE, path)) as requirements:
return [
- line.rstrip() for line in requirements
- if not (line.isspace() or line.startswith('#'))
+ line.rstrip()
+ for line in requirements
+ if not (line.isspace() or line.startswith("#"))
]
@@ -43,8 +46,9 @@ class BazelExtension(setuptools.Extension):
def __init__(self, name, bazel_target):
self.bazel_target = bazel_target
- self.relpath, self.target_name = (
- posixpath.relpath(bazel_target, '//').split(':'))
+ self.relpath, self.target_name = posixpath.relpath(bazel_target, "//").split(
+ ":"
+ )
setuptools.Extension.__init__(self, name, sources=[])
@@ -58,30 +62,33 @@ class BuildBazelExtension(build_ext.build_ext):
def bazel_build(self, ext):
"""Runs the bazel build to create the package."""
- with open('WORKSPACE', 'r') as workspace:
+ with open("WORKSPACE", "r") as workspace:
workspace_contents = workspace.read()
- with open('WORKSPACE', 'w') as workspace:
- workspace.write(re.sub(
- r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)',
- sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep),
- workspace_contents))
+ with open("WORKSPACE", "w") as workspace:
+ workspace.write(
+ re.sub(
+ r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)',
+ sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep),
+ workspace_contents,
+ )
+ )
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
bazel_argv = [
- 'bazel',
- 'build',
+ "bazel",
+ "build",
ext.bazel_target,
- '--symlink_prefix=' + os.path.join(self.build_temp, 'bazel-'),
- '--compilation_mode=' + ('dbg' if self.debug else 'opt'),
+ "--symlink_prefix=" + os.path.join(self.build_temp, "bazel-"),
+ "--compilation_mode=" + ("dbg" if self.debug else "opt"),
]
if IS_WINDOWS:
# Link with python*.lib.
for library_dir in self.library_dirs:
- bazel_argv.append('--linkopt=/LIBPATH:' + library_dir)
+ bazel_argv.append("--linkopt=/LIBPATH:" + library_dir)
self.spawn(bazel_argv)
@@ -89,6 +96,7 @@ class BuildBazelExtension(build_ext.build_ext):
ext_bazel_bin_path = os.path.join(
self.build_temp, 'bazel-bin',
ext.relpath, ext.target_name + shared_lib_suffix)
+
ext_dest_path = self.get_ext_fullpath(ext.name)
ext_dest_dir = os.path.dirname(ext_dest_path)
if not os.path.exists(ext_dest_dir):
@@ -97,32 +105,36 @@ class BuildBazelExtension(build_ext.build_ext):
setuptools.setup(
- name='google_benchmark',
+ name="google_benchmark",
version=_get_version(),
- url='https://github.com/google/benchmark',
- description='A library to benchmark code snippets.',
- author='Google',
- author_email='benchmark-py@google.com',
+ url="https://github.com/google/benchmark",
+ description="A library to benchmark code snippets.",
+ author="Google",
+ author_email="benchmark-py@google.com",
# Contained modules and scripts.
- package_dir={'': 'bindings/python'},
- packages=setuptools.find_packages('bindings/python'),
- install_requires=_parse_requirements('bindings/python/requirements.txt'),
+ package_dir={"": "bindings/python"},
+ packages=setuptools.find_packages("bindings/python"),
+ install_requires=_parse_requirements("bindings/python/requirements.txt"),
cmdclass=dict(build_ext=BuildBazelExtension),
- ext_modules=[BazelExtension(
- 'google_benchmark._benchmark', '//bindings/python/google_benchmark:_benchmark')],
+ ext_modules=[
+ BazelExtension(
+ "google_benchmark._benchmark",
+ "//bindings/python/google_benchmark:_benchmark",
+ )
+ ],
zip_safe=False,
# PyPI package information.
classifiers=[
- 'Development Status :: 4 - Beta',
- 'Intended Audience :: Developers',
- 'Intended Audience :: Science/Research',
- 'License :: OSI Approved :: Apache Software License',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8',
- 'Topic :: Software Development :: Testing',
- 'Topic :: System :: Benchmark',
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: Apache Software License",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Topic :: Software Development :: Testing",
+ "Topic :: System :: Benchmark",
],
- license='Apache 2.0',
- keywords='benchmark',
+ license="Apache 2.0",
+ keywords="benchmark",
)