aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_report_unittest.py
diff options
context:
space:
mode:
authorDenis Nikitin <denik@google.com>2019-10-10 22:31:23 -0700
committerDenis Nikitin <denik@chromium.org>2019-10-16 05:29:14 +0000
commitf94608f33f26b8502fd7c26d0df1e71295d75510 (patch)
treec6247e2fdd306a5408ea2e1897421f81075ea106 /crosperf/results_report_unittest.py
parentbf7ee87429d2c9730349ebc22f904b5a3fac7107 (diff)
downloadtoolchain-utils-f94608f33f26b8502fd7c26d0df1e71295d75510.tar.gz
crosperf: Update top stats and cooldown report
Redirect top statistics from benchmark runs into a separate file topstats.log under results_dir directory. Fix "highest 5" usages to show highest usages of a command (instead of a process) per snapshot. Improve mechanism of calculation chrome high CPU load when benchmark is running. Add Cooldown wait time into email report. Fix minor cros lint warnings to unblock repo upload. BUG=chromium:966514 TEST=unittests and HW tests on eve passed. Change-Id: I3999efd554cb5a3b27a2ce3fddb2f20714b434fd Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1856818 Tested-by: Denis Nikitin <denik@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org>
Diffstat (limited to 'crosperf/results_report_unittest.py')
-rwxr-xr-xcrosperf/results_report_unittest.py33
1 files changed, 23 insertions, 10 deletions
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index 61e2a7c2..ae51fda6 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -10,14 +10,14 @@
from __future__ import division
from __future__ import print_function
-from StringIO import StringIO
-
import collections
-import mock
+import io
import os
-import test_flag
import unittest
+import mock
+import test_flag
+
from benchmark_run import MockBenchmarkRun
from cros_utils import logger
from experiment_factory import ExperimentFactory
@@ -83,7 +83,7 @@ def FakePath(ext):
def MakeMockExperiment(compiler='gcc'):
"""Mocks an experiment using the given compiler."""
- mock_experiment_file = StringIO("""
+ mock_experiment_file = io.BytesIO("""
board: x86-alex
remote: 127.0.0.1
perf_args: record -a -e cycles
@@ -146,6 +146,12 @@ def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
return experiment
+def _InjectCooldownTime(experiment, cooldown_time):
+ """Inject cooldown wait time in every benchmark run."""
+ for br in experiment.benchmark_runs:
+ br.suite_runner.cooldown_wait_time = cooldown_time
+
+
class TextResultsReportTest(unittest.TestCase):
"""Tests that the output of a text report contains the things we pass in.
@@ -158,11 +164,18 @@ class TextResultsReportTest(unittest.TestCase):
success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
success_keyvals)
- text_report = TextResultsReport.FromExperiment(experiment, email=email) \
- .GetReport()
+ # Set 120 sec cooldown time for every benchmark run.
+ cooldown_time = 120
+ _InjectCooldownTime(experiment, cooldown_time)
+ text_report = TextResultsReport.FromExperiment(
+ experiment, email=email).GetReport()
self.assertIn(str(success_keyvals['a_float']), text_report)
self.assertIn(success_keyvals['machine'], text_report)
self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
+ self.assertIn('Cooldown wait time', text_report)
+ self.assertIn(
+ '%d min' % (len(experiment.benchmark_runs) * cooldown_time // 60),
+ text_report)
return text_report
def testOutput(self):
@@ -229,7 +242,7 @@ class HTMLResultsReportTest(unittest.TestCase):
_InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals))
self.assertNotIn('no result', output.summary_table)
- #self.assertIn(success_keyvals['machine'], output.summary_table)
+ # self.assertIn(success_keyvals['machine'], output.summary_table)
self.assertIn('a_float', output.summary_table)
self.assertIn(str(success_keyvals['a_float']), output.summary_table)
self.assertIn('a_float', output.full_table)
@@ -418,7 +431,7 @@ class PerfReportParserTest(unittest.TestCase):
}
report_cycles = report['cycles']
self.assertEqual(len(report_cycles), 214)
- for k, v in known_cycles_percentages.iteritems():
+ for k, v in known_cycles_percentages.items():
self.assertIn(k, report_cycles)
self.assertEqual(v, report_cycles[k])
@@ -430,7 +443,7 @@ class PerfReportParserTest(unittest.TestCase):
}
report_instructions = report['instructions']
self.assertEqual(len(report_instructions), 492)
- for k, v in known_instrunctions_percentages.iteritems():
+ for k, v in known_instrunctions_percentages.items():
self.assertIn(k, report_instructions)
self.assertEqual(v, report_instructions[k])