aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--crosperf/results_cache.py96
-rw-r--r--crosperf/results_organizer.py32
-rw-r--r--utils/tabulator.py12
3 files changed, 65 insertions, 75 deletions
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 6239324f..43f0d1e8 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -27,7 +27,6 @@ RESULTS_FILE = "results.txt"
MACHINE_FILE = "machine.txt"
AUTOTEST_TARBALL = "autotest.tbz2"
PERF_RESULTS_FILE = "perf-results.txt"
-TELEMETRY_RESULT_DEFAULTS_FILE = "default-telemetry-results.json"
class Result(object):
""" This class manages what exactly is stored inside the cache without knowing
@@ -94,81 +93,31 @@ class Result(object):
return keyvals_dict, units_dict
- def _GetTelemetryResultsKeyvals(self, keyvals_dict, units_dict):
+ def _AppendTelemetryUnits(self, keyvals_dict, units_dict):
"""
keyvals_dict is the dictionary of key-value pairs that is used for
generating Crosperf reports.
- Telemetry tests return many values (fields) that are not of
- interest, so we have created a json file that indicates, for each
- Telemetry benchmark, what the default return fields of interest
- are.
-
units_dict is a dictionary of the units for the return values in
- keyvals_dict. After looking for the keys in the keyvals_dict in
- the json file of "interesting" default return fields, we append
- the units to the name of the field, to make the report easier to
- understand. We don't append the units to the results name earlier,
- because the units are not part of the field names in the json file.
-
- This function reads that file into a dictionary, and finds the
- entry for the current benchmark (if it exists). The entry
- contains a list of return fields to use in the report. For each
- field in the default list, we look for the field in the input
- keyvals_dict, and if we find it we copy the entry into our results
- dictionary. We then return the results dictionary, which gets used
- for actually generating the report.
- """
-
-
- # Check to see if telemetry_Crosperf succeeded; if not, there's no point
- # in going further...
-
- succeeded = False
- if "telemetry_Crosperf" in keyvals_dict:
- if keyvals_dict["telemetry_Crosperf"] == "PASS":
- succeeded = True
+ keyvals_dict. We need to associate the units with the return values,
+ for Telemetry tests, so that we can include the units in the reports.
+ This function takes each value in keyvals_dict, finds the corresponding
+ unit in the units_dict, and replaces the old value with a list of the
+ old value and the units. This later gets properly parsed in the
+ ResultOrganizer class, for generating the reports.
- if not succeeded:
- return keyvals_dict
+ """
- # Find the Crosperf directory, and look there for the telemetry
- # results defaults file, if it exists.
results_dict = {}
- dirname, basename = misc.GetRoot(sys.argv[0])
- fullname = os.path.join(dirname, TELEMETRY_RESULT_DEFAULTS_FILE)
- if os.path.exists (fullname):
- # Slurp the file into a dictionary. The keys in the dictionary are
- # the benchmark names. The value for a key is a list containing the
- # names of all the result fields that should be returned in a 'default'
- # report.
- result_defaults = json.load(open(fullname))
- # Check to see if the current benchmark test actually has an entry in
- # the dictionary.
- if self.test_name and self.test_name in result_defaults:
- result_list = result_defaults[self.test_name]
- # We have the default results list. Make sure it's not empty...
- if len(result_list) > 0:
- # ...look for each default result in the dictionary of actual
- # result fields returned. If found, add the field and its value
- # to our final results dictionary.
- for r in result_list:
- if r in keyvals_dict:
- val = keyvals_dict[r]
- units = units_dict[r]
- # Add the units to the key name, for the report.
- newkey = r + " (" + units + ")"
- results_dict[newkey] = val
- if len(results_dict) == 0:
- # We did not find/create any new entries. Therefore use the keyvals_dict
- # that was passed in, but update the entry names to have the units.
- for k in keyvals_dict:
- val = keyvals_dict[k]
- units = units_dict[k]
- newkey = k + " (" + units + ")"
- results_dict[newkey] = val
- keyvals_dict = results_dict
- return keyvals_dict
+ for k in keyvals_dict:
+ # We don't want these lines in our reports; they add no useful data.
+ if k == "" or k == "telemetry_Crosperf":
+ continue
+ val = keyvals_dict[k]
+ units = units_dict[k]
+ new_val = [ val, units ]
+ results_dict[k] = new_val
+ return results_dict
def _GetKeyvals(self, show_all):
results_in_chroot = os.path.join(self._chromeos_root,
@@ -197,12 +146,11 @@ class Result(object):
# Check to see if there is a perf_measurements file and get the
# data from it if so.
keyvals_dict, units_dict = self._GetNewKeyvals(keyvals_dict)
- if not show_all and self.suite == "telemetry_Crosperf":
- # We're running telemetry tests and the user did not ask to
- # see all the results, so get the default results, to be used
- # for generating the report.
- keyvals_dict = self._GetTelemetryResultsKeyvals(keyvals_dict,
- units_dict)
+ if self.suite == "telemetry_Crosperf":
+ # For telemtry_Crosperf results, append the units to the return
+ # results, for use in generating the reports.
+ keyvals_dict = self._AppendTelemetryUnits(keyvals_dict,
+ units_dict)
return keyvals_dict
def _GetResultsDir(self):
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 6274a484..a771922f 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -5,8 +5,14 @@
# found in the LICENSE file.
"""Parse data from benchmark_runs for tabulator."""
+import json
+import os
import re
+import sys
+from utils import misc
+
+TELEMETRY_RESULT_DEFAULTS_FILE = "default-telemetry-results.json"
class ResultOrganizer(object):
"""Create a dict from benchmark_runs.
@@ -39,6 +45,7 @@ class ResultOrganizer(object):
self.labels.append(label.name)
for benchmark_run in benchmark_runs:
benchmark_name = benchmark_run.benchmark.name
+ show_all_results = benchmark_run.benchmark.show_all_results
if benchmark_name not in self.result:
self.result[benchmark_name] = []
while len(self.result[benchmark_name]) < len(labels):
@@ -55,15 +62,40 @@ class ResultOrganizer(object):
key_filter_on = (benchmark.key_results_only and
"PyAutoPerfTest" in benchmark.name + benchmark.test_name
and "perf." not in benchmark.test_args)
+ if not show_all_results:
+ summary_list = self._GetSummaryResults(benchmark.test_name)
+ if len(summary_list) > 0:
+ summary_list.append ("retval")
+ else:
+ # Did not find test_name in json file; therefore show everything.
+ show_all_results = True
for test_key in benchmark_run.result.keyvals:
if (key_filter_on and
not any([key for key in self.key_filter if key in test_key])
):
continue
+ if not show_all_results and not test_key in summary_list:
+ continue
result_value = benchmark_run.result.keyvals[test_key]
cur_dict[test_key] = result_value
self._DuplicatePass()
+ def _GetSummaryResults (self, test_name):
+ dirname, _ = misc.GetRoot(sys.argv[0])
+ fullname = os.path.join(dirname, TELEMETRY_RESULT_DEFAULTS_FILE)
+ if os.path.exists (fullname):
+ # Slurp the file into a dictionary. The keys in the dictionary are
+ # the benchmark names. The value for a key is a list containing the
+ # names of all the result fields that should be returned in a 'default'
+ # report.
+ result_defaults = json.load(open(fullname))
+ # Check to see if the current benchmark test actually has an entry in
+ # the dictionary.
+ if test_name in result_defaults:
+ return result_defaults[test_name]
+ else:
+ return []
+
def _DuplicatePass(self):
for bench, data in self.result.items():
max_dup = self._GetMaxDup(data)
diff --git a/utils/tabulator.py b/utils/tabulator.py
index b2cc7d94..3e2b4705 100644
--- a/utils/tabulator.py
+++ b/utils/tabulator.py
@@ -185,14 +185,24 @@ class TableGenerator(object):
rows = 0
for k in keys:
row = [k]
+ unit = None
for run_list in self._runs:
v = []
for run in run_list:
if k in run:
- v.append(run[k])
+ if type(run[k]) is list:
+ val = run[k][0]
+ unit = run[k][1]
+ else:
+ val = run[k]
+ v.append(val)
else:
v.append(None)
row.append(v)
+ # If we got a 'unit' value, append the units name to the key name.
+ if unit:
+ keyname = row[0] + " (%s) " % unit
+ row[0] = keyname
table.append(row)
rows += 1
if rows == number_of_rows: