aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--crosperf/results_organizer.py14
-rw-r--r--crosperf/results_report.py15
2 files changed, 12 insertions, 17 deletions
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 5395d8e4..7dfbbdb5 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -5,6 +5,9 @@
# found in the LICENSE file.
"""Parse data from benchmark_runs for tabulator."""
+
+from __future__ import print_function
+
import json
import os
import re
@@ -68,8 +71,15 @@ class ResultOrganizer(object):
for test_key in benchmark_run.result.keyvals:
if not show_all_results and not test_key in summary_list:
continue
- result_value = benchmark_run.result.keyvals[test_key]
- cur_dict[test_key] = result_value
+ cur_dict[test_key] = benchmark_run.result.keyvals[test_key]
+ # Occasionally Telemetry tests will not fail but they will not return a
+ # result, either. Look for those cases, and force them to be a fail.
+ # (This can happen if, for example, the test has been disabled.)
+ if len(cur_dict) == 1 and cur_dict['retval'] == 0:
+ cur_dict['retval'] = 1
+ # TODO: This output should be sent via logger.
+ print("WARNING: Test '%s' appears to have succeeded but returned"
+ " no results." % benchmark_name, file=sys.stderr)
if json_report and benchmark_run.machine:
cur_dict['machine'] = benchmark_run.machine.name
cur_dict['machine_checksum'] = benchmark_run.machine.checksum
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 9e1e6baa..addc7b56 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -147,26 +147,11 @@ class ResultsReport(object):
cell.header = True
return [[cell]]
- def _FixFalsePositiveTests(self, result, table_type):
- # Occasionally Telemetry tests will not fail but they will not return a
- # result, either. Look for those cases, and force them to be a fail.
- # (This can happen if, for example, the test has been disabled.)
- for k in result:
- for run in result[k]:
- run_dict = run[0]
- if len(run_dict) != 1 or run_dict['retval'] != 0:
- continue
- run_dict['retval'] = 1
- if table_type == 'summary':
- print ("WARNING: Test '%s' appears to have succeeded but returned"
- " no results." % k)
-
def _GetTables(self, labels, benchmark_runs, columns, table_type):
tables = []
ro = ResultOrganizer(benchmark_runs, labels, self.benchmarks)
result = ro.result
label_name = ro.labels
- self._FixFalsePositiveTests(result, table_type)
for item in result:
runs = result[item]
for benchmark in self.benchmarks: