aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_organizer.py
diff options
context:
space:
mode:
Diffstat (limited to 'crosperf/results_organizer.py')
-rw-r--r--crosperf/results_organizer.py30
1 files changed, 6 insertions, 24 deletions
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 4879caeb..bda0cc17 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -1,8 +1,6 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Parse data from benchmark_runs for tabulator."""
from __future__ import print_function
@@ -46,7 +44,7 @@ def _GetMaxDup(data):
def _Repeat(func, times):
"""Returns the result of running func() n times."""
- return [func() for _ in range(times)]
+ return [func() for _ in xrange(times)]
def _DictWithReturnValues(retval, pass_fail):
@@ -171,7 +169,6 @@ def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
label_names = [label.name for label in labels]
label_indices = {name: i for i, name in enumerate(label_names)}
summary_file = _ReadSummaryFile(sys.argv[0])
-
if benchmarks is None:
benchmarks = []
@@ -185,30 +182,15 @@ def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
show_all_results = json_report or benchmark.show_all_results
if not show_all_results:
- summary_list = summary_file.get(benchmark.name)
+ summary_list = summary_file.get(benchmark.test_name)
if summary_list:
- for key in benchmark_run.result.keyvals.keys():
- if any(
- key.startswith(added_key)
- for added_key in ['retval', 'cpufreq', 'cputemp']):
- summary_list.append(key)
+ summary_list.append('retval')
else:
# Did not find test_name in json file; show everything.
show_all_results = True
- if benchmark_run.result.cwp_dso:
- # If we are in cwp approximation mode, we only care about samples
- if 'samples' in benchmark_run.result.keyvals:
- cur_dict['samples'] = benchmark_run.result.keyvals['samples']
- cur_dict['retval'] = benchmark_run.result.keyvals['retval']
- for key, value in benchmark_run.result.keyvals.items():
- if any(
- key.startswith(cpustat_keyword)
- for cpustat_keyword in ['cpufreq', 'cputemp']):
- cur_dict[key] = value
- else:
- for test_key in benchmark_run.result.keyvals:
- if show_all_results or test_key in summary_list:
- cur_dict[test_key] = benchmark_run.result.keyvals[test_key]
+ for test_key in benchmark_run.result.keyvals:
+ if show_all_results or test_key in summary_list:
+ cur_dict[test_key] = benchmark_run.result.keyvals[test_key]
# Occasionally Telemetry tests will not fail but they will not return a
# result, either. Look for those cases, and force them to be a fail.
# (This can happen if, for example, the test has been disabled.)