diff options
author | Luis Lozano <llozano@chromium.org> | 2015-11-03 15:54:31 -0800 |
---|---|---|
committer | chrome-bot <chrome-bot@chromium.org> | 2015-11-04 01:32:05 +0000 |
commit | 89d263c7cf9773129cbe8e8858ad21ea539a2ba0 (patch) | |
tree | 62d9f49ab20da1d77d9118b9572a7d9aad72484b | |
parent | 8447c1eae629d107acb3b6802ceb5a9dc1a6cfdb (diff) | |
download | toolchain-utils-89d263c7cf9773129cbe8e8858ad21ea539a2ba0.tar.gz |
Fixed bad key problem while generating json report.
In some weird cases, telemetry benchmarks incorrectly
return empty performance results. This was causing an invalid
key error while generating the json report.
BUG=chromium:551105
TEST=Forced generation of empty results and tested we dont
have the dictionary missing key issue.
Change-Id: I32803c573f1d268e010313292b99c0017a5dff92
Reviewed-on: https://chrome-internal-review.googlesource.com/237516
Commit-Ready: Luis Lozano <llozano@chromium.org>
Tested-by: Luis Lozano <llozano@chromium.org>
Reviewed-by: Caroline Tice <cmtice@google.com>
-rw-r--r-- | crosperf/results_organizer.py | 14 | ||||
-rw-r--r-- | crosperf/results_report.py | 15 |
2 files changed, 12 insertions, 17 deletions
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py index 5395d8e4..7dfbbdb5 100644 --- a/crosperf/results_organizer.py +++ b/crosperf/results_organizer.py @@ -5,6 +5,9 @@ # found in the LICENSE file. """Parse data from benchmark_runs for tabulator.""" + +from __future__ import print_function + import json import os import re @@ -68,8 +71,15 @@ class ResultOrganizer(object): for test_key in benchmark_run.result.keyvals: if not show_all_results and not test_key in summary_list: continue - result_value = benchmark_run.result.keyvals[test_key] - cur_dict[test_key] = result_value + cur_dict[test_key] = benchmark_run.result.keyvals[test_key] + # Occasionally Telemetry tests will not fail but they will not return a + # result, either. Look for those cases, and force them to be a fail. + # (This can happen if, for example, the test has been disabled.) + if len(cur_dict) == 1 and cur_dict['retval'] == 0: + cur_dict['retval'] = 1 + # TODO: This output should be sent via logger. + print("WARNING: Test '%s' appears to have succeeded but returned" + " no results." % benchmark_name, file=sys.stderr) if json_report and benchmark_run.machine: cur_dict['machine'] = benchmark_run.machine.name cur_dict['machine_checksum'] = benchmark_run.machine.checksum diff --git a/crosperf/results_report.py b/crosperf/results_report.py index 9e1e6baa..addc7b56 100644 --- a/crosperf/results_report.py +++ b/crosperf/results_report.py @@ -147,26 +147,11 @@ class ResultsReport(object): cell.header = True return [[cell]] - def _FixFalsePositiveTests(self, result, table_type): - # Occasionally Telemetry tests will not fail but they will not return a - # result, either. Look for those cases, and force them to be a fail. - # (This can happen if, for example, the test has been disabled.) - for k in result: - for run in result[k]: - run_dict = run[0] - if len(run_dict) != 1 or run_dict['retval'] != 0: - continue - run_dict['retval'] = 1 - if table_type == 'summary': - print ("WARNING: Test '%s' appears to have succeeded but returned" - " no results." % k) - def _GetTables(self, labels, benchmark_runs, columns, table_type): tables = [] ro = ResultOrganizer(benchmark_runs, labels, self.benchmarks) result = ro.result label_name = ro.labels - self._FixFalsePositiveTests(result, table_type) for item in result: runs = result[item] for benchmark in self.benchmarks: |