aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorJian Cai <jiancai@google.com>2019-09-03 16:28:23 -0700
committerJian Cai <jiancai@google.com>2019-09-24 21:28:57 +0000
commit4d72583ef73e8c3bf7a1ae9dd016ef7aeed09fed (patch)
tree277eeea0b2b8202b7dea069a753926a7e4c0681f /crosperf
parentcdd9e34ff16a0f48cc7baab31c88a660a123776f (diff)
downloadtoolchain-utils-4d72583ef73e8c3bf7a1ae9dd016ef7aeed09fed.tar.gz
crosperf: add per tag story summary when parsing histograms
Add support to collecting results based on story tags in histograms. This allows us to remove the workaround for loading.desktop and deprecate chartjson. BUG=chromium:1000497 TEST=local tests. Change-Id: I8249d00b3d2d5d761c8a3bc24ef2fd95a3e1115b Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1783837 Tested-by: Jian Cai <jiancai@google.com> Legacy-Commit-Queue: Commit Bot <commit-bot@chromium.org> Commit-Queue: ChromeOS CL Exonerator Bot <chromiumos-cl-exonerator@appspot.gserviceaccount.com> Reviewed-by: Zhizhou Yang <zhizhouy@google.com> Reviewed-by: Tiancong Wang <tcwang@google.com>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/default-telemetry-results.json4
-rw-r--r--crosperf/results_cache.py53
-rwxr-xr-xcrosperf/results_cache_unittest.py102
3 files changed, 135 insertions, 24 deletions
diff --git a/crosperf/default-telemetry-results.json b/crosperf/default-telemetry-results.json
index 2a38f58f..b5ea45a6 100644
--- a/crosperf/default-telemetry-results.json
+++ b/crosperf/default-telemetry-results.json
@@ -17,8 +17,8 @@
"percentage_smooth__summary"
],
"loading.desktop@@typical": [
- "cold@@timeToFirstContentfulPaint_avg__summary",
- "warm@@timeToFirstContentfulPaint_avg__summary"
+ "timeToFirstContentfulPaint__cache_temperature:cold",
+ "timeToFirstContentfulPaint__cache_temperature:warm"
],
"page_cycler_v2.intl_es_fr_pt-BR": [
"cold_times__page_load_time",
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 4e7699da..3342e2b2 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -30,9 +30,6 @@ MACHINE_FILE = 'machine.txt'
AUTOTEST_TARBALL = 'autotest.tbz2'
PERF_RESULTS_FILE = 'perf-results.txt'
CACHE_KEYS_FILE = 'cache_keys.txt'
-HISTOGRAMS_BLOCKLIST = {
- 'loading.desktop',
-}
class Result(object):
@@ -257,8 +254,7 @@ class Result(object):
return out
def GetResultsFile(self):
- if self.suite == 'telemetry_Crosperf' and \
- self.test_name not in HISTOGRAMS_BLOCKLIST:
+ if self.suite == 'telemetry_Crosperf':
return self.FindFilesInResultsDir('-name histograms.json').splitlines()
return self.FindFilesInResultsDir('-name results-chart.json').splitlines()
@@ -271,8 +267,7 @@ class Result(object):
def GetDataMeasurementsFiles(self):
result = self.FindFilesInResultsDir('-name perf_measurements').splitlines()
if not result:
- if self.suite == 'telemetry_Crosperf' and \
- self.test_name not in HISTOGRAMS_BLOCKLIST:
+ if self.suite == 'telemetry_Crosperf':
result = \
self.FindFilesInResultsDir('-name histograms.json').splitlines()
else:
@@ -575,6 +570,12 @@ class Result(object):
keyvals = {}
with open(filename) as f:
histograms = json.load(f)
+ value_map = {}
+ # Gets generic set values.
+ for obj in histograms:
+ if 'type' in obj and obj['type'] == 'GenericSet':
+ value_map[obj['guid']] = obj['values']
+
for obj in histograms:
if 'name' not in obj or 'sampleValues' not in obj:
continue
@@ -590,30 +591,40 @@ class Result(object):
else:
result = vals
unit = obj['unit']
- if metric_name not in keyvals:
- keyvals[metric_name] = [[result], unit]
+ diagnostics = obj['diagnostics']
+ # for summaries of benchmarks
+ key = metric_name
+ if key not in keyvals:
+ keyvals[key] = [[result], unit]
else:
- # in case the benchmark has multiple stories
- keyvals[metric_name][0].append(result)
- for metric_name in keyvals:
- vals = keyvals[metric_name][0]
- unit = keyvals[metric_name][1]
+ keyvals[key][0].append(result)
+ # TODO: do we need summaries of stories?
+ # for summaries of story tags
+ if 'storyTags' in diagnostics:
+ guid = diagnostics['storyTags']
+ if guid not in value_map:
+ raise RuntimeError('Unrecognized storyTags in %s ' % (obj))
+ for story_tag in value_map[guid]:
+ key = metric_name + '__' + story_tag
+ if key not in keyvals:
+ keyvals[key] = [[result], unit]
+ else:
+ keyvals[key][0].append(result)
+ # calculate summary
+ for key in keyvals:
+ vals = keyvals[key][0]
+ unit = keyvals[key][1]
result = float(sum(vals)) / len(vals)
- keyvals[metric_name] = [result, unit]
+ keyvals[key] = [result, unit]
return keyvals
def ProcessResults(self, use_cache=False):
# Note that this function doesn't know anything about whether there is a
# cache hit or miss. It should process results agnostic of the cache hit
# state.
- # FIXME: Properly parse histograms results of the tests in the blocklist
if (self.results_file and self.suite == 'telemetry_Crosperf' and
- 'histograms.json' in self.results_file[0] and
- self.test_name not in HISTOGRAMS_BLOCKLIST):
+ 'histograms.json' in self.results_file[0]):
self.keyvals = self.ProcessHistogramsResults()
- elif (self.results_file and self.suite == 'telemetry_Crosperf' and
- 'histograms.json' in self.results_file[0]):
- self.keyvals = self.ProcessChartResults()
elif (self.results_file and self.suite != 'telemetry_Crosperf' and
'results-chart.json' in self.results_file[0]):
self.keyvals = self.ProcessChartResults()
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index a5d36383..461384f3 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -9,10 +9,11 @@
from __future__ import print_function
-import mock
import os
+import shutil
import tempfile
import unittest
+import mock
import image_checksummer
import machine_manager
@@ -246,6 +247,81 @@ CPUSTATS_DUPL_DATA = {
TMP_DIR1 = '/tmp/tmpAbcXyz'
+HISTOGRAMSET = \
+"""
+[
+ {
+ "values": [
+ "cache_temperature_cold",
+ "typical",
+ "cache_temperature:cold"
+ ],
+ "guid": "db6d463b-7c07-4873-b839-db0652ccb97e",
+ "type": "GenericSet"
+ },
+ {
+ "values": [
+ "cache_temperature_warm",
+ "typical",
+ "cache_temperature:warm"
+ ],
+ "guid": "a270eb9d-3bb0-472a-951d-74ac3398b718",
+ "type": "GenericSet"
+ },
+ {
+ "sampleValues": [
+ 1111.672
+ ],
+ "name": "timeToFirstContentfulPaint",
+ "diagnostics": {
+ "storyTags": "a270eb9d-3bb0-472a-951d-74ac3398b718"
+ },
+ "unit": "ms_smallerIsBetter"
+ },
+ {
+ "sampleValues": [
+ 1146.459
+ ],
+ "name": "timeToFirstContentfulPaint",
+ "diagnostics": {
+ "storyTags": "db6d463b-7c07-4873-b839-db0652ccb97e"
+ },
+ "unit": "ms_smallerIsBetter"
+ },
+ {
+ "sampleValues": [
+ 888.328
+ ],
+ "name": "timeToFirstContentfulPaint",
+ "diagnostics": {
+ "storyTags": "a270eb9d-3bb0-472a-951d-74ac3398b718"
+ },
+ "unit": "ms_smallerIsBetter"
+ },
+ {
+ "sampleValues": [
+ 853.541
+ ],
+ "name": "timeToFirstContentfulPaint",
+ "diagnostics": {
+ "storyTags": "db6d463b-7c07-4873-b839-db0652ccb97e"
+ },
+ "unit": "ms_smallerIsBetter"
+ },
+ {
+ "sampleValues": [
+ 400.000
+ ],
+ "name": "timeToFirstContentfulPaint",
+ "diagnostics": {
+ "storyTags": "a270eb9d-3bb0-472a-951d-74ac3398b718"
+ },
+ "unit": "ms_smallerIsBetter"
+ }
+
+]
+"""
+
class MockResult(Result):
"""Mock result class."""
@@ -900,6 +976,7 @@ class ResultTest(unittest.TestCase):
def FakeGetSamples():
return 1
+ # Test 1
self.callGatherPerfResults = False
self.result.GetKeyvals = self.FakeGetKeyvals
@@ -911,11 +988,13 @@ class ResultTest(unittest.TestCase):
self.assertEqual(len(self.result.keyvals), 2)
self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 0})
+ # Test 2
self.result.retval = 1
self.result.ProcessResults()
self.assertEqual(len(self.result.keyvals), 2)
self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 1})
+ # Test 3
self.result.cwp_dso = 'chrome'
self.result.retval = 0
self.result.GetSamples = FakeGetSamples
@@ -927,6 +1006,27 @@ class ResultTest(unittest.TestCase):
'retval': 0
})
+ # Test 4. Parse output of benchmarks with multiple sotries in histogram
+ # format
+ self.result.suite = 'telemetry_Crosperf'
+ self.result.results_file = [tempfile.mkdtemp() + '/histograms.json']
+ with open(self.result.results_file[0], 'w') as f:
+ f.write(HISTOGRAMSET)
+ self.result.ProcessResults()
+ shutil.rmtree(os.path.dirname(self.result.results_file[0]))
+ # Verify the summary for the story is correct
+ self.assertEqual(self.result.keyvals['timeToFirstContentfulPaint__typical'],
+ [880.000, u'ms_smallerIsBetter'])
+ # Veirfy the summary for a certain stroy tag is correct
+ self.assertEqual(
+ self.result
+ .keyvals['timeToFirstContentfulPaint__cache_temperature:cold'],
+ [1000.000, u'ms_smallerIsBetter'])
+ self.assertEqual(
+ self.result
+ .keyvals['timeToFirstContentfulPaint__cache_temperature:warm'],
+ [800.000, u'ms_smallerIsBetter'])
+
@mock.patch.object(Result, 'ProcessCpustatsResults')
@mock.patch.object(Result, 'ProcessTurbostatResults')
def test_process_results_with_turbostat_log(self, mock_proc_turbo,