aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorRoman Lebedev <lebedev.ri@gmail.com>2021-04-24 13:36:38 +0300
committerRoman Lebedev <lebedev.ri@gmail.com>2021-04-24 13:37:13 +0300
commit362c2ab9c6cc01948a1adbd88fb16d67f2cb880a (patch)
tree9e1d6a1448ce0646d343c924735ea74ad620cd08 /tools
parentc05843a9f622db08ad59804c190f98879b76beba (diff)
downloadgoogle-benchmark-362c2ab9c6cc01948a1adbd88fb16d67f2cb880a.tar.gz
[tools] Don't forget to print UTest when printing aggregates only
This probably regressed in #1042.
Diffstat (limited to 'tools')
-rw-r--r--tools/gbench/report.py83
1 files changed, 63 insertions, 20 deletions
diff --git a/tools/gbench/report.py b/tools/gbench/report.py
index bf29492..1b87df2 100644
--- a/tools/gbench/report.py
+++ b/tools/gbench/report.py
@@ -301,26 +301,23 @@ def print_difference_report(
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
for benchmark in json_diff_report:
# *If* we were asked to only include aggregates,
- # and if it is non-aggregate, then skip it.
- if include_aggregates_only and 'run_type' in benchmark:
- if benchmark['run_type'] != 'aggregate':
- continue
-
- for measurement in benchmark['measurements']:
- output_strs += [color_format(use_color,
- fmt_str,
- BC_HEADER,
- benchmark['name'],
- first_col_width,
- get_color(measurement['time']),
- measurement['time'],
- get_color(measurement['cpu']),
- measurement['cpu'],
- measurement['real_time'],
- measurement['real_time_other'],
- measurement['cpu_time'],
- measurement['cpu_time_other'],
- endc=BC_ENDC)]
+ # and if it is non-aggregate, then don't print it.
+ if not include_aggregates_only or not 'run_type' in benchmark or benchmark['run_type'] != 'aggregate':
+ for measurement in benchmark['measurements']:
+ output_strs += [color_format(use_color,
+ fmt_str,
+ BC_HEADER,
+ benchmark['name'],
+ first_col_width,
+ get_color(measurement['time']),
+ measurement['time'],
+ get_color(measurement['cpu']),
+ measurement['cpu'],
+ measurement['real_time'],
+ measurement['real_time_other'],
+ measurement['cpu_time'],
+ measurement['cpu_time_other'],
+ endc=BC_ENDC)]
# After processing the measurements, if requested and
# if applicable (e.g. u-test exists for given benchmark),
@@ -643,6 +640,52 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
+ def test_json_diff_report_pretty_printing_aggregates_only(self):
+ expect_lines = [
+ ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
+ ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
+ ['BM_Two_pvalue',
+ '0.6985',
+ '0.6985',
+ 'U',
+ 'Test,',
+ 'Repetitions:',
+ '2',
+ 'vs',
+ '2.',
+ 'WARNING:',
+ 'Results',
+ 'unreliable!',
+ '9+',
+ 'repetitions',
+ 'recommended.'],
+ ['short_pvalue',
+ '0.7671',
+ '0.1489',
+ 'U',
+ 'Test,',
+ 'Repetitions:',
+ '2',
+ 'vs',
+ '3.',
+ 'WARNING:',
+ 'Results',
+ 'unreliable!',
+ '9+',
+ 'repetitions',
+ 'recommended.'],
+ ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
+ ]
+ output_lines_with_header = print_difference_report(
+ self.json_diff_report, include_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False)
+ output_lines = output_lines_with_header[2:]
+ print("\n")
+ print("\n".join(output_lines_with_header))
+ self.assertEqual(len(output_lines), len(expect_lines))
+ for i in range(0, len(output_lines)):
+ parts = [x for x in output_lines[i].split(' ') if x]
+ self.assertEqual(expect_lines[i], parts)
+
def test_json_diff_report(self):
expected_output = [
{