aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorGeorge Burgess IV <gbiv@google.com>2016-06-29 12:24:48 -0700
committerchrome-bot <chrome-bot@chromium.org>2016-06-29 17:00:29 -0700
commit2e9f8a097c095ca93052b368ffab4c850d4d3d0f (patch)
tree82c853bd1c02e41f4d50376c0f402514ad6566d8 /crosperf
parent7edbfa2e0ef52487ec1286886dcbc7d401528733 (diff)
downloadtoolchain-utils-2e9f8a097c095ca93052b368ffab4c850d4d3d0f.tar.gz
[crosperf] Add results_report tests, minor refactors.
This adds unit tests for results_report.JSONResultsReport and results_report.ParseChromeosImage. The test for the former aren't as extensive as they could be, but I hope to add more in the future. This also simplifies ParseChromeosImage, and does a few refactors to JSONResultsReport. This also fixes all of `cros lint`'s complaints about results_report. BUG=None TEST=./run_tests.sh Change-Id: Ice4267a5ff2e10df6690595c6b80db1ee1977d50 Reviewed-on: https://chrome-internal-review.googlesource.com/267665 Commit-Ready: George Burgess <gbiv@google.com> Tested-by: George Burgess <gbiv@google.com> Reviewed-by: Caroline Tice <cmtice@google.com>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/results_report.py79
-rwxr-xr-xcrosperf/results_report_unittest.py230
2 files changed, 275 insertions, 34 deletions
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index db211f1c..e29c0cab 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -5,6 +5,7 @@
from __future__ import print_function
import datetime
+import itertools
import json
import os
@@ -55,27 +56,25 @@ def ParseChromeosImage(chromeos_image):
version, image: The results of parsing the input string, as explained
above.
"""
- version = ''
- real_file = os.path.realpath(os.path.expanduser(chromeos_image))
- pieces = real_file.split('/')
# Find the Chromeos Version, e.g. R45-2345.0.0.....
# chromeos_image should have been something like:
# <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
- num_pieces = len(pieces)
- if pieces[num_pieces - 1] == 'chromiumos_test_image.bin':
- version = pieces[num_pieces - 2]
- # Find last '.' in the version and chop it off (removing the .datatime
- # piece from local builds).
- loc = version.rfind('.')
- version = version[:loc]
+ if chromeos_image.endswith('/chromiumos_test_image.bin'):
+ full_version = chromeos_image.split('/')[-2]
+ # Strip the date and time off of local builds (which have the format
+ # "R43-2345.0.0.date-and-time").
+ version, _ = os.path.splitext(full_version)
+ else:
+ version = ''
+
# Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then
# it's an official image that got downloaded, so chop off the download path
# to make the official image name more clear.
- loc = real_file.find('/chroot/tmp')
- if loc != -1:
- loc += len('/chroot/tmp')
- real_file = real_file[loc:]
- image = real_file
+ official_image_path = '/chroot/tmp'
+ if official_image_path in chromeos_image:
+ image = chromeos_image.split(official_image_path, 1)[1]
+ else:
+ image = chromeos_image
return version, image
@@ -207,7 +206,7 @@ class ResultsReport(object):
table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
parsed_columns = self._ParseColumn(columns, ben.iterations)
tf = TableFormatter(table, parsed_columns)
- tf.GenerateCellTable()
+ tf.GenerateCellTable(table_type)
tf.AddColumnName()
tf.AddLabelName()
tf.AddHeader(str(event))
@@ -547,7 +546,13 @@ pre {
class JSONResultsReport(ResultsReport):
- """class to generate JASON report."""
+ """Class that generates JSON reports."""
+
+ @staticmethod
+ def _WriteResultsToFile(filename, results):
+ """Write the results as JSON to the given filename."""
+ with open(filename, 'w') as fp:
+ json.dump(results, fp, indent=2)
def __init__(self, experiment, date=None, time=None):
super(JSONResultsReport, self).__init__(experiment)
@@ -565,25 +570,29 @@ class JSONResultsReport(ResultsReport):
self.date = date
self.time = time
- def GetReport(self, results_dir):
+ def GetReport(self, results_dir, write_results=None):
+ if write_results is None:
+ write_results = JSONResultsReport._WriteResultsToFile
+
self.defaults.ReadDefaultsFile()
final_results = []
board = self.experiment.labels[0].board
+ compiler_string = 'gcc'
for test, test_results in self.ro.result.iteritems():
- for i, label in enumerate(self.ro.labels):
- label_results = test_results[i]
- for j, iter_results in enumerate(label_results):
- json_results = dict()
- json_results['date'] = self.date
- json_results['time'] = self.time
- json_results['board'] = board
- json_results['label'] = label
+ for label, label_results in itertools.izip(self.ro.labels, test_results):
+ for iter_results in label_results:
+ json_results = {
+ 'date': self.date,
+ 'time': self.time,
+ 'board': board,
+ 'label': label
+ }
common_checksum = ''
common_string = ''
- compiler_string = 'gcc'
for l in self.experiment.labels:
if l.name == label:
- ver, img = ParseChromeosImage(l.chromeos_image)
+ img_path = os.path.realpath(os.path.expanduser(l.chromeos_image))
+ ver, img = ParseChromeosImage(img_path)
json_results['chromeos_image'] = img
json_results['chromeos_version'] = ver
json_results['chrome_version'] = l.chrome_version
@@ -597,7 +606,10 @@ class JSONResultsReport(ResultsReport):
common_string = \
self.experiment.machine_manager.machine_checksum_string[l.name]
break
+ else:
+ raise RuntimeError("Label doesn't exist in label_results?")
json_results['test_name'] = test
+
if not iter_results or iter_results['retval'] != 0:
json_results['pass'] = False
else:
@@ -615,8 +627,8 @@ class JSONResultsReport(ResultsReport):
value.append(item)
json_results['overall_result'] = value
# Get detailed results.
- detail_results = dict()
- for k in iter_results.keys():
+ detail_results = {}
+ for k in iter_results:
if k != 'retval':
v = iter_results[k]
if type(v) == list:
@@ -629,9 +641,9 @@ class JSONResultsReport(ResultsReport):
detail_results[k] = [float(d) for d in v]
else:
json_results[k] = v
- if 'machine_checksum' not in json_results.keys():
+ if 'machine_checksum' not in json_results:
json_results['machine_checksum'] = common_checksum
- if 'machine_string' not in json_results.keys():
+ if 'machine_string' not in json_results:
json_results['machine_string'] = common_string
json_results['detailed_results'] = detail_results
final_results.append(json_results)
@@ -639,5 +651,4 @@ class JSONResultsReport(ResultsReport):
filename = 'report_%s_%s_%s.%s.json' % (
board, self.date, self.time.replace(':', '.'), compiler_string)
fullname = os.path.join(results_dir, filename)
- with open(fullname, 'w') as fp:
- json.dump(final_results, fp, indent=2)
+ write_results(fullname, final_results)
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
new file mode 100755
index 00000000..39dcf9e8
--- /dev/null
+++ b/crosperf/results_report_unittest.py
@@ -0,0 +1,230 @@
+#!/usr/bin/python2
+#
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unittest for the results reporter."""
+
+from __future__ import division
+from __future__ import print_function
+
+from StringIO import StringIO
+
+import os
+import test_flag
+import unittest
+
+from benchmark_run import MockBenchmarkRun
+from cros_utils import logger
+from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
+from machine_manager import MockMachineManager
+from results_cache import MockResult
+from results_report import JSONResultsReport
+from results_report import ParseChromeosImage
+
+
+class FreeFunctionsTest(unittest.TestCase):
+ """Tests for any free functions in results_report."""
+
+ def testParseChromeosImage(self):
+ # N.B. the cases with blank versions aren't explicitly supported by
+ # ParseChromeosImage. I'm not sure if they need to be supported, but the
+ # goal of this was to capture existing functionality as much as possible.
+ base_case = '/my/chroot/src/build/images/x86-generic/R01-1.0.date-time' \
+ '/chromiumos_test_image.bin'
+ self.assertEqual(ParseChromeosImage(base_case), ('R01-1.0', base_case))
+
+ dir_base_case = os.path.dirname(base_case)
+ self.assertEqual(ParseChromeosImage(dir_base_case), ('', dir_base_case))
+
+ buildbot_case = '/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time' \
+ '/chromiumos_test_image.bin'
+ buildbot_img = buildbot_case.split('/chroot/tmp')[1]
+
+ self.assertEqual(ParseChromeosImage(buildbot_case),
+ ('R02-1.0', buildbot_img))
+ self.assertEqual(ParseChromeosImage(os.path.dirname(buildbot_case)),
+ ('', os.path.dirname(buildbot_img)))
+
+ # Ensure we don't act completely insanely given a few mildly insane paths.
+ fun_case = '/chromiumos_test_image.bin'
+ self.assertEqual(ParseChromeosImage(fun_case), ('', fun_case))
+
+ fun_case2 = 'chromiumos_test_image.bin'
+ self.assertEqual(ParseChromeosImage(fun_case2), ('', fun_case2))
+
+
+# There are many ways for this to be done better, but the linter complains
+# about all of them (that I can think of, at least).
+_fake_path_number = [0]
+def FakePath(ext):
+ """Makes a unique path that shouldn't exist on the host system.
+
+ Each call returns a different path, so if said path finds its way into an
+ error message, it may be easier to track it to its source.
+ """
+ _fake_path_number[0] += 1
+ prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0], )
+ return os.path.join(prefix, ext)
+
+
+def MakeMockExperiment(compiler='gcc'):
+ """Mocks an experiment using the given compiler."""
+ mock_experiment_file = StringIO("""
+ board: x86-alex
+ remote: 127.0.0.1
+ perf_args: record -a -e cycles
+ benchmark: PageCycler {
+ iterations: 3
+ }
+
+ image1 {
+ chromeos_image: %s
+ }
+
+ image2 {
+ remote: 127.0.0.2
+ chromeos_image: %s
+ }
+ """ % (FakePath('cros_image1.bin'), FakePath('cros_image2.bin')))
+ efile = ExperimentFile(mock_experiment_file)
+ experiment = ExperimentFactory().GetExperiment(efile,
+ FakePath('working_directory'),
+ FakePath('log_dir'))
+ for label in experiment.labels:
+ label.compiler = compiler
+ return experiment
+
+
+class JSONResultsReportTest(unittest.TestCase):
+ """Tests JSONResultsReport."""
+ REQUIRED_REPORT_KEYS = ('date', 'time', 'board', 'label', 'chromeos_image',
+ 'chromeos_version', 'chrome_version', 'compiler',
+ 'test_name', 'pass')
+
+ # JSONResultsReport.GetReport was initially made to write to disk; unless we
+ # refactor it, testing is... a bit awkward.
+ def _GetResultsFor(self, experiment, results_dir, date=None, time=None):
+ """Gets a JSON report, given an experiment and results_dir.
+
+ Returns [filename, result_as_python_datastructures].
+ """
+ # Linters complain if this isn't populated with precisely two things.
+ test_results = [None, None]
+ def grab_results(filename, results):
+ test_results[0] = filename
+ test_results[1] = results
+ report = JSONResultsReport(experiment, date=date, time=time)
+ report.GetReport(results_dir, write_results=grab_results)
+ self.assertNotIn(None, test_results)
+ return test_results
+
+ def testJSONReportOutputFileNameInfo(self):
+ date, time = '1/1/2001', '01:02:03'
+ results_dir = FakePath('results')
+ experiment = MakeMockExperiment(compiler='gcc')
+ board = experiment.labels[0].board
+ out_path, _ = self._GetResultsFor(experiment, results_dir, date, time)
+
+ self.assertTrue(out_path.startswith(results_dir))
+ self.assertTrue(out_path.endswith('.json'))
+ out_file = out_path[len(results_dir):]
+
+ # This should replace : in time with something else, since : is a path sep.
+ # At the moment, it's '.'.
+ self.assertIn(time.replace(':', '.'), out_file)
+ self.assertIn(date, out_file)
+ self.assertIn(board, out_file)
+ self.assertIn('gcc', out_file)
+
+ out_path, _ = self._GetResultsFor(MakeMockExperiment(compiler='llvm'),
+ results_dir, date, time)
+ self.assertIn('llvm', out_path)
+
+ # Comments say that if *any* compiler used was LLVM, then LLVM must be in
+ # the file name, instead of gcc.
+ experiment = MakeMockExperiment(compiler='gcc')
+ experiment.labels[len(experiment.labels)//2].compiler = 'llvm'
+ out_path, _ = self._GetResultsFor(experiment, results_dir, date, time)
+ self.assertIn('llvm', out_path)
+
+ def _CheckRequiredKeys(self, test_output):
+ for output in test_output:
+ for key in JSONResultsReportTest.REQUIRED_REPORT_KEYS:
+ self.assertIn(key, output)
+
+ def testAllFailedJSONReportOutput(self):
+ _, results = self._GetResultsFor(MakeMockExperiment(), FakePath('results'))
+ self._CheckRequiredKeys(results)
+ # Nothing succeeded; we don't send anything more than what's required.
+ for result in results:
+ self.assertItemsEqual(result.iterkeys(), self.REQUIRED_REPORT_KEYS)
+
+ @staticmethod
+ def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
+ label=None):
+ if label is None:
+ # Pick an arbitrary label
+ label = experiment.benchmark_runs[0].label
+ bench = experiment.benchmarks[for_benchmark]
+ num_configs = len(experiment.benchmarks) * len(experiment.labels)
+ num_runs = len(experiment.benchmark_runs) // num_configs
+
+ # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in
+ # benchmark_run_unittest)
+ cache_conditions = []
+ log_level = 'average'
+ share_cache = ''
+ locks_dir = ''
+ log = logger.GetLogger()
+ machine_manager = MockMachineManager(FakePath('chromeos_root'), 0,
+ log_level, locks_dir)
+ machine_manager.AddMachine('testing_machine')
+ machine = next(m for m in machine_manager.GetMachines()
+ if m.name == 'testing_machine')
+
+ def MakeSuccessfulRun(n):
+ run = MockBenchmarkRun('mock_success%d' % (n, ), bench, label,
+ 1 + n + num_runs, cache_conditions,
+ machine_manager, log, log_level, share_cache)
+ mock_result = MockResult(log, label, log_level, machine)
+ mock_result.keyvals = keyvals
+ run.result = mock_result
+ return run
+
+ experiment.benchmark_runs.extend(MakeSuccessfulRun(n)
+ for n in xrange(how_many))
+ return experiment
+
+ def testJSONReportOutputWithSuccesses(self):
+ success_keyvals = {
+ 'retval': 0,
+ 'a_float': '2.3',
+ 'many_floats': [['1.0', '2.0'], ['3.0']],
+ 'machine': "i'm a pirate"
+ }
+
+ # 2 is arbitrary.
+ num_passes = 2
+ # copy success_keyvals so we can catch something trying to mutate it.
+ experiment = self._InjectSuccesses(MakeMockExperiment(), num_passes,
+ dict(success_keyvals))
+ _, results = self._GetResultsFor(experiment, FakePath('results'))
+ self._CheckRequiredKeys(results)
+ non_failures = [r for r in results if r['pass']]
+ self.assertEqual(num_passes, len(non_failures))
+
+ # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped?
+ expected_detailed = {'a_float': 2.3, 'many_floats': [1.0, 2.0]}
+ for pass_ in non_failures:
+ self.assertIn('detailed_results', pass_)
+ self.assertDictEqual(expected_detailed, pass_['detailed_results'])
+ self.assertIn('machine', pass_)
+ self.assertEqual(success_keyvals['machine'], pass_['machine'])
+
+
+if __name__ == '__main__':
+ test_flag.SetTestMode(True)
+ unittest.main()