aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_organizer.py
diff options
context:
space:
mode:
authorcmtice <cmtice@google.com>2014-04-02 14:11:39 -0700
committerchrome-internal-fetch <chrome-internal-fetch@google.com>2014-04-04 20:26:55 +0000
commit44a44befd1f500b9a227ebfd849702efce83ef6a (patch)
tree4f334cfad1a0433fed595729c0c57eeb2aed0864 /crosperf/results_organizer.py
parent841f96b7f89dbe6f3e5bb013d211a6f799e4d3ab (diff)
downloadtoolchain-utils-44a44befd1f500b9a227ebfd849702efce83ef6a.tar.gz
Check for 'significant' results at report generation instead of caching.
The current implementation of the option that masks unimportant results discards the uninteresting results before caching the test results. After further thought, that was not the right design choice. Among other things it can cause confusing results, such as seen in issue 357346. We should always cache all the results, and do the result filtering during report generation. This CL makes that change. BUG=357346,357343 TEST=Ran crosperf tests with and without cache hits, and with and without entreies in the json file. It all seems to work as expected. Change-Id: I778e5614c73bf751ebaa2d4606af636275247c60 Reviewed-on: https://chrome-internal-review.googlesource.com/159108 Reviewed-by: Yunlian Jiang <yunlian@google.com> Commit-Queue: Caroline Tice <cmtice@google.com> Tested-by: Caroline Tice <cmtice@google.com>
Diffstat (limited to 'crosperf/results_organizer.py')
-rw-r--r--crosperf/results_organizer.py32
1 files changed, 32 insertions, 0 deletions
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 6274a484..a771922f 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -5,8 +5,14 @@
# found in the LICENSE file.
"""Parse data from benchmark_runs for tabulator."""
+import json
+import os
import re
+import sys
+from utils import misc
+
+TELEMETRY_RESULT_DEFAULTS_FILE = "default-telemetry-results.json"
class ResultOrganizer(object):
"""Create a dict from benchmark_runs.
@@ -39,6 +45,7 @@ class ResultOrganizer(object):
self.labels.append(label.name)
for benchmark_run in benchmark_runs:
benchmark_name = benchmark_run.benchmark.name
+ show_all_results = benchmark_run.benchmark.show_all_results
if benchmark_name not in self.result:
self.result[benchmark_name] = []
while len(self.result[benchmark_name]) < len(labels):
@@ -55,15 +62,40 @@ class ResultOrganizer(object):
key_filter_on = (benchmark.key_results_only and
"PyAutoPerfTest" in benchmark.name + benchmark.test_name
and "perf." not in benchmark.test_args)
+ if not show_all_results:
+ summary_list = self._GetSummaryResults(benchmark.test_name)
+ if len(summary_list) > 0:
+ summary_list.append ("retval")
+ else:
+ # Did not find test_name in json file; therefore show everything.
+ show_all_results = True
for test_key in benchmark_run.result.keyvals:
if (key_filter_on and
not any([key for key in self.key_filter if key in test_key])
):
continue
+ if not show_all_results and not test_key in summary_list:
+ continue
result_value = benchmark_run.result.keyvals[test_key]
cur_dict[test_key] = result_value
self._DuplicatePass()
+ def _GetSummaryResults (self, test_name):
+ dirname, _ = misc.GetRoot(sys.argv[0])
+ fullname = os.path.join(dirname, TELEMETRY_RESULT_DEFAULTS_FILE)
+ if os.path.exists (fullname):
+ # Slurp the file into a dictionary. The keys in the dictionary are
+ # the benchmark names. The value for a key is a list containing the
+ # names of all the result fields that should be returned in a 'default'
+ # report.
+ result_defaults = json.load(open(fullname))
+ # Check to see if the current benchmark test actually has an entry in
+ # the dictionary.
+ if test_name in result_defaults:
+ return result_defaults[test_name]
+ else:
+ return []
+
def _DuplicatePass(self):
for bench, data in self.result.items():
max_dup = self._GetMaxDup(data)