aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_organizer.py
diff options
context:
space:
mode:
Diffstat (limited to 'crosperf/results_organizer.py')
-rw-r--r--crosperf/results_organizer.py28
1 files changed, 23 insertions, 5 deletions
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 097c744d..bda0cc17 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -47,6 +47,17 @@ def _Repeat(func, times):
return [func() for _ in xrange(times)]
+def _DictWithReturnValues(retval, pass_fail):
+ """Create a new dictionary pre-populated with success/fail values."""
+ new_dict = {}
+ # Note: 0 is a valid retval; test to make sure it's not None.
+ if retval is not None:
+ new_dict['retval'] = retval
+ if pass_fail:
+ new_dict[''] = pass_fail
+ return new_dict
+
+
def _GetNonDupLabel(max_dup, runs):
"""Create new list for the runs of the same label.
@@ -61,15 +72,19 @@ def _GetNonDupLabel(max_dup, runs):
"""
new_runs = []
for run in runs:
+ run_retval = run.get('retval', None)
+ run_pass_fail = run.get('', None)
new_run = {}
- added_runs = _Repeat(dict, max_dup)
+ # pylint: disable=cell-var-from-loop
+ added_runs = _Repeat(
+ lambda: _DictWithReturnValues(run_retval, run_pass_fail), max_dup)
for key, value in run.iteritems():
match = _DUP_KEY_REGEX.match(key)
if not match:
new_run[key] = value
else:
new_key, index_str = match.groups()
- added_runs[int(index_str)-1][new_key] = str(value)
+ added_runs[int(index_str) - 1][new_key] = str(value)
new_runs.append(new_run)
new_runs += added_runs
return new_runs
@@ -135,6 +150,7 @@ def _MakeOrganizeResultOutline(benchmark_runs, labels):
result[name] = _Repeat(make_dicts, len(labels))
return result
+
def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
"""Create a dict from benchmark_runs.
@@ -180,10 +196,12 @@ def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
# (This can happen if, for example, the test has been disabled.)
if len(cur_dict) == 1 and cur_dict['retval'] == 0:
cur_dict['retval'] = 1
+ benchmark_run.result.keyvals['retval'] = 1
# TODO: This output should be sent via logger.
- print("WARNING: Test '%s' appears to have succeeded but returned"
- ' no results.' % benchmark.name,
- file=sys.stderr)
+ print(
+ "WARNING: Test '%s' appears to have succeeded but returned"
+ ' no results.' % benchmark.name,
+ file=sys.stderr)
if json_report and benchmark_run.machine:
cur_dict['machine'] = benchmark_run.machine.name
cur_dict['machine_checksum'] = benchmark_run.machine.checksum