aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cros_utils/tabulator.py8
-rw-r--r--crosperf/default-telemetry-results.json17
-rw-r--r--crosperf/results_cache.py59
3 files changed, 67 insertions, 17 deletions
diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py
index ed93de7a..e2f27bc4 100644
--- a/cros_utils/tabulator.py
+++ b/cros_utils/tabulator.py
@@ -2,6 +2,7 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+
"""Table generating, analyzing and printing functions.
This defines several classes that are used to generate, analyze and print
@@ -670,6 +671,13 @@ class KeyAwareComparisonResult(ComparisonResult):
"""Automatic key aware comparison."""
def _IsLowerBetter(self, key):
+ # Units in histograms should include directions
+ if 'smallerIsBetter' in key:
+ return True
+ if 'biggerIsBetter' in key:
+ return False
+
+ # For units in chartjson:
# TODO(llozano): Trying to guess direction by looking at the name of the
# test does not seem like a good idea. Test frameworks should provide this
# info explicitly. I believe Telemetry has this info. Need to find it out.
diff --git a/crosperf/default-telemetry-results.json b/crosperf/default-telemetry-results.json
index 4f5ccf91..5352c161 100644
--- a/crosperf/default-telemetry-results.json
+++ b/crosperf/default-telemetry-results.json
@@ -68,15 +68,15 @@
"warm@@timeToOnload_avg__summary"
],
"speedometer": [
- "Total__Total",
- "Total__summary"
+ "RunsPerMinute",
+ "Total"
],
"speedometer2": [
- "RunsPerMinute__summary",
- "Total__summary"
+ "RunsPerMinute",
+ "Total"
],
"octane": [
- "Total__Score"
+ "Total.Score"
],
"jsgamebench": [
"Score__Score"
@@ -148,13 +148,12 @@
"warm@@timeToOnload_avg__summary"
],
"kraken": [
- "Total__Total",
- "Total__summary"
+ "Total"
],
"jetstream": [
- "Score__summary"
+ "Score"
],
"cros_ui_smoothness": [
- "ui_percentage_smooth__summary"
+ "ui_percentage_smooth"
]
}
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index bef78cb4..54569808 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -251,6 +251,8 @@ class Result(object):
return out
def GetResultsFile(self):
+ if self.suite == 'telemetry_Crosperf':
+ return self.FindFilesInResultsDir('-name histograms.json').splitlines()
return self.FindFilesInResultsDir('-name results-chart.json').splitlines()
def GetPerfDataFiles(self):
@@ -262,8 +264,12 @@ class Result(object):
def GetDataMeasurementsFiles(self):
result = self.FindFilesInResultsDir('-name perf_measurements').splitlines()
if not result:
- result = \
- self.FindFilesInResultsDir('-name results-chart.json').splitlines()
+ if self.suite == 'telemetry_Crosperf':
+ result = \
+ self.FindFilesInResultsDir('-name histograms.json').splitlines()
+ else:
+ result = \
+ self.FindFilesInResultsDir('-name results-chart.json').splitlines()
return result
def _CheckDebugPath(self, option, path):
@@ -367,16 +373,15 @@ class Result(object):
# Grab keyvals from the directory.
self.ProcessResults()
- def ProcessJsonResults(self):
+ def ProcessChartResults(self):
# Open and parse the json results file generated by telemetry/test_that.
if not self.results_file:
raise IOError('No results file found.')
filename = self.results_file[0]
if not filename.endswith('.json'):
raise IOError('Attempt to call json on non-json file: %s' % filename)
-
if not os.path.exists(filename):
- return {}
+ raise IOError('%s does not exist' % filename)
keyvals = {}
with open(filename, 'r') as f:
@@ -406,13 +411,51 @@ class Result(object):
keyvals[keyname] = new_value
return keyvals
+ def ProcessHistogramsResults(self):
+ # Open and parse the json results file generated by telemetry/test_that.
+ if not self.results_file:
+ raise IOError('No results file found.')
+ filename = self.results_file[0]
+ if not filename.endswith('.json'):
+ raise IOError('Attempt to call json on non-json file: %s' % filename)
+ if not os.path.exists(filename):
+ raise IOError('%s does not exist' % filename)
+
+ keyvals = {}
+ with open(filename) as f:
+ histograms = json.loads(f)
+ for obj in histograms:
+ if 'name' not in obj or 'sampleValues' not in obj:
+ continue
+ metric_name = obj['name']
+ vals = obj['sampleValues']
+ if isinstance(vals, list):
+ result = float(sum(vals)) / len(vals)
+ else:
+ result = vals
+ unit = obj['unit']
+ if metric_name not in keyvals:
+ keyvals[metric_name] = [[result], unit]
+ else:
+ # in case the benchmark has multiple stories
+ keyvals[metric_name][0].append(result)
+ for metric_name in keyvals:
+ vals = keyvals[metric_name][0]
+ unit = keyvals[metric_name][1]
+ result = float(sum(vals)) / len(vals)
+ keyvals[metric_name] = [result, unit]
+ return keyvals
+
def ProcessResults(self, use_cache=False):
# Note that this function doesn't know anything about whether there is a
# cache hit or miss. It should process results agnostic of the cache hit
# state.
- if self.results_file and self.results_file[0].find(
- 'results-chart.json') != -1:
- self.keyvals = self.ProcessJsonResults()
+ if self.results_file and self.suite == 'telemetry_Crosperf' and \
+ 'histograms.json' in self.results_file[0]:
+ self.keyvals = self.ProcessHistogramsResults()
+ elif self.results_file and self.suite != 'telemetry_Crosperf' and \
+ 'results-chart.json' in self.results_file[0]:
+ self.keyvals = self.ProcessChartResults()
else:
if not use_cache:
print('\n ** WARNING **: Had to use deprecated output-method to '