aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDenis Nikitin <denik@google.com>2019-10-10 22:31:23 -0700
committerDenis Nikitin <denik@chromium.org>2019-10-16 05:29:14 +0000
commitf94608f33f26b8502fd7c26d0df1e71295d75510 (patch)
treec6247e2fdd306a5408ea2e1897421f81075ea106
parentbf7ee87429d2c9730349ebc22f904b5a3fac7107 (diff)
downloadtoolchain-utils-f94608f33f26b8502fd7c26d0df1e71295d75510.tar.gz
crosperf: Update top stats and cooldown report
Redirect top statistics from benchmark runs into a separate file topstats.log under results_dir directory. Fix "highest 5" usages to show highest usages of a command (instead of a process) per snapshot. Improve mechanism of calculation chrome high CPU load when benchmark is running. Add Cooldown wait time into email report. Fix minor cros lint warnings to unblock repo upload. BUG=chromium:966514 TEST=unittests and HW tests on eve passed. Change-Id: I3999efd554cb5a3b27a2ce3fddb2f20714b434fd Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1856818 Tested-by: Denis Nikitin <denik@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org>
-rw-r--r--crosperf/benchmark_run.py21
-rwxr-xr-xcrosperf/benchmark_run_unittest.py85
-rw-r--r--crosperf/experiment_runner.py15
-rwxr-xr-xcrosperf/experiment_runner_unittest.py24
-rw-r--r--crosperf/results_cache.py79
-rwxr-xr-xcrosperf/results_cache_unittest.py79
-rw-r--r--crosperf/results_report.py13
-rwxr-xr-xcrosperf/results_report_unittest.py33
-rw-r--r--crosperf/schedv2.py6
9 files changed, 203 insertions, 152 deletions
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index 0147599d..6512b8ea 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -91,25 +91,6 @@ class BenchmarkRun(threading.Thread):
self.cache_hit = (self.result is not None)
self.cache_has_been_read = True
- def PrintTop5Cmds(self, topcmds):
- """Print top 5 commands into log."""
-
- self._logger.LogOutput('%s' % str(self))
- self._logger.LogOutput('Top 5 commands with highest CPU usage:')
- # Header.
- print_line = '%20s %9s %6s %s' % ('COMMAND', 'AVG CPU%', 'COUNT',
- 'HIGHEST 5')
- self._logger.LogOutput(print_line)
- self._logger.LogOutput('-' * 50)
- if topcmds:
- for topcmd in topcmds[:5]:
- print_line = '%20s %9.2f %6s %s' % (topcmd['cmd'], topcmd['cpu_avg'],
- topcmd['count'], topcmd['top5'])
- self._logger.LogOutput(print_line)
- else:
- self._logger.LogOutput('[NO DATA FROM THE TOP LOG]')
- self._logger.LogOutput('-' * 50)
-
def run(self):
try:
if not self.cache_has_been_read:
@@ -138,8 +119,6 @@ class BenchmarkRun(threading.Thread):
self.cache.machine = self.machine
self.result = self.RunTest(self.machine)
# TODO(denik): Add Top5 report into html.
- if self.result:
- self.PrintTop5Cmds(self.result.GetTopCmds())
self.cache.remote = self.machine.name
self.label.chrome_version = self.machine_manager.GetChromeVersion(
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 98bb96cd..51b287cf 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -206,7 +206,6 @@ class BenchmarkRunTest(unittest.TestCase):
br.ReadCache = FakeReadCache
br.RunTest = FakeRunTest
br.AcquireMachine = FakeAcquireMachine
- br.PrintTop5Cmds = mock.Mock()
# First test: No cache hit, all goes well.
ResetTestValues()
@@ -219,7 +218,6 @@ class BenchmarkRunTest(unittest.TestCase):
])
self.assertEqual(len(self.log_error), 0)
self.assertEqual(self.status, ['WAITING', 'SUCCEEDED'])
- br.PrintTop5Cmds.assert_called_once()
# Second test: No cached result found; test run was "terminated" for some
# reason.
@@ -436,89 +434,6 @@ class BenchmarkRunTest(unittest.TestCase):
br.SetCacheConditions(self.test_cache_conditions)
self.assertEqual(br.cache_conditions, self.test_cache_conditions)
- def test_print_top5_cmds(self):
- """Test print of top5 commands."""
- topcmds = [
- {
- 'cmd': 'chrome',
- 'cpu_avg': 119.753453465,
- 'count': 4,
- 'top5': [122.8, 107.9, 17.8, 1.0],
- },
- {
- 'cmd': 'irq/230-cros-ec',
- 'cpu_avg': 10.000000000000001,
- 'count': 1000,
- 'top5': [0.5, 0.4, 0.3, 0.2, 0.1],
- },
- {
- 'cmd': 'powerd',
- 'cpu_avg': 2.0,
- 'count': 2,
- 'top5': [3.0, 1.0]
- },
- {
- 'cmd': 'cmd1',
- 'cpu_avg': 1.0,
- 'count': 1,
- 'top5': [1.0],
- },
- {
- 'cmd': 'cmd2',
- 'cpu_avg': 1.0,
- 'count': 1,
- 'top5': [1.0],
- },
- {
- 'cmd': 'not_for_print',
- 'cpu_avg': 1.0,
- 'count': 1,
- 'top5': [1.0],
- },
- ]
- mock_logger = mock.Mock()
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, mock_logger,
- 'average', '', {})
- br.PrintTop5Cmds(topcmds)
- # pylint: disable=line-too-long
- self.assertEqual(mock_logger.LogOutput.call_args_list, [
- mock.call('BenchmarkRun[name="test_run"]'),
- mock.call('Top 5 commands with highest CPU usage:'),
- mock.call(' COMMAND AVG CPU% COUNT HIGHEST 5'),
- mock.call('-' * 50),
- mock.call(
- ' chrome 119.75 4 [122.8, 107.9, 17.8, 1.0]'
- ),
- mock.call(
- ' irq/230-cros-ec 10.00 1000 [0.5, 0.4, 0.3, 0.2, 0.1]'
- ),
- mock.call(' powerd 2.00 2 [3.0, 1.0]'),
- mock.call(' cmd1 1.00 1 [1.0]'),
- mock.call(' cmd2 1.00 1 [1.0]'),
- mock.call('-' * 50),
- ])
- # pylint: enable=line-too-long
-
- def test_print_top5_calls_no_data(self):
- """Test print of top5 with no data."""
- topcmds = []
- mock_logger = mock.Mock()
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, mock_logger,
- 'average', '', {})
- br.PrintTop5Cmds(topcmds)
- self.assertEqual(mock_logger.LogOutput.call_args_list, [
- mock.call('BenchmarkRun[name="test_run"]'),
- mock.call('Top 5 commands with highest CPU usage:'),
- mock.call(' COMMAND AVG CPU% COUNT HIGHEST 5'),
- mock.call('-' * 50),
- mock.call('[NO DATA FROM THE TOP LOG]'),
- mock.call('-' * 50),
- ])
-
if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 1e2c3142..cb6e9785 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -282,11 +282,24 @@ class ExperimentRunner(object):
self.l.LogOutput('Storing results of each benchmark run.')
for benchmark_run in experiment.benchmark_runs:
if benchmark_run.result:
- benchmark_run_name = filter(str.isalnum, benchmark_run.name)
+ benchmark_run_name = ''.join(
+ ch for ch in benchmark_run.name if ch.isalnum())
benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
benchmark_run.result.CopyResultsTo(benchmark_run_path)
benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
+ topstats_file = os.path.join(results_directory, 'topstats.log')
+ self.l.LogOutput('Storing top5 statistics of each benchmark run into %s.' %
+ topstats_file)
+ with open(topstats_file, 'w') as top_fd:
+ for benchmark_run in experiment.benchmark_runs:
+ if benchmark_run.result:
+ # Header with benchmark run name.
+ top_fd.write('%s\n' % str(benchmark_run))
+ # Formatted string with top statistics.
+ top_fd.write(benchmark_run.result.FormatStringTop5())
+ top_fd.write('\n\n')
+
def Run(self):
try:
self._Run(self._experiment)
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index caac4265..2ec11ccd 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -407,8 +407,11 @@ class ExperimentRunnerTest(unittest.TestCase):
@mock.patch.object(TextResultsReport, 'FromExperiment')
@mock.patch.object(Result, 'CopyResultsTo')
@mock.patch.object(Result, 'CleanUp')
- def test_store_results(self, mock_cleanup, mock_copy, _mock_text_report,
- mock_report, mock_writefile, mock_mkdir, mock_rmdir):
+ @mock.patch.object(Result, 'FormatStringTop5')
+ @mock.patch('__builtin__.open', new_callable=mock.mock_open)
+ def test_store_results(self, mock_open, mock_top5, mock_cleanup, mock_copy,
+ _mock_text_report, mock_report, mock_writefile,
+ mock_mkdir, mock_rmdir):
self.mock_logger.Reset()
self.exp.results_directory = '/usr/local/crosperf-results'
@@ -434,6 +437,8 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_mkdir.call_count, 0)
self.assertEqual(mock_rmdir.call_count, 0)
self.assertEqual(self.mock_logger.LogOutputCount, 0)
+ self.assertEqual(mock_open.call_count, 0)
+ self.assertEqual(mock_top5.call_count, 0)
# Test 2. _terminated is false; everything works properly.
fake_result = Result(self.mock_logger, self.exp.labels[0], 'average',
@@ -458,13 +463,24 @@ class ExperimentRunnerTest(unittest.TestCase):
mock_mkdir.assert_called_with('/usr/local/crosperf-results')
self.assertEqual(mock_rmdir.call_count, 1)
mock_rmdir.assert_called_with('/usr/local/crosperf-results')
- self.assertEqual(self.mock_logger.LogOutputCount, 4)
+ self.assertEqual(self.mock_logger.LogOutputCount, 5)
self.assertEqual(self.mock_logger.output_msgs, [
'Storing experiment file in /usr/local/crosperf-results.',
'Storing results report in /usr/local/crosperf-results.',
'Storing email message body in /usr/local/crosperf-results.',
- 'Storing results of each benchmark run.'
+ 'Storing results of each benchmark run.',
+ 'Storing top5 statistics of each benchmark run into'
+ ' /usr/local/crosperf-results/topstats.log.',
])
+ self.assertEqual(mock_open.call_count, 1)
+ # Check write to a topstats.log file.
+ mock_open.assert_called_with('/usr/local/crosperf-results/topstats.log',
+ 'w')
+ mock_open().write.assert_called()
+
+ # Check top5 calls with no arguments.
+ top5calls = [mock.call()] * 6
+ self.assertEqual(mock_top5.call_args_list, top5calls)
if __name__ == '__main__':
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 135c7687..977e3e22 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -8,6 +8,7 @@
from __future__ import division
from __future__ import print_function
+import collections
import glob
import hashlib
import heapq
@@ -73,6 +74,27 @@ class Result(object):
"""Get the list of top commands consuming CPU on the machine."""
return self.top_cmds
+ def FormatStringTop5(self):
+ """Get formatted top5 string.
+
+ Get the formatted string with top5 commands consuming CPU on DUT machine.
+ """
+ format_list = [
+ 'Top 5 commands with highest CPU usage:',
+ # Header.
+ '%20s %9s %6s %s' % ('COMMAND', 'AVG CPU%', 'COUNT', 'HIGHEST 5'),
+ '-' * 50,
+ ]
+ if self.top_cmds:
+ for topcmd in self.top_cmds[:5]:
+ print_line = '%20s %9.2f %6s %s' % (topcmd['cmd'], topcmd['cpu_avg'],
+ topcmd['count'], topcmd['top5'])
+ format_list.append(print_line)
+ else:
+ format_list.append('[NO DATA FROM THE TOP LOG]')
+ format_list.append('-' * 50)
+ return '\n'.join(format_list)
+
def CopyFilesTo(self, dest_dir, files_to_copy):
file_index = 0
for file_to_copy in files_to_copy:
@@ -572,45 +594,54 @@ class Result(object):
if snapshot:
snapshots.append(snapshot)
- # Threshold of CPU usage when Chrome is busy, i.e. benchmark is running.
- # FIXME(denik): 70 is just a guess and needs empirical evidence.
- # (It does not need to be configurable.)
- chrome_high_cpu_load = 70
+ # Define threshold of CPU usage when Chrome is busy, i.e. benchmark is
+ # running.
+ # Ideally it should be 100% but it will be hardly reachable with 1 core.
+ # Statistics on DUT with 2-6 cores shows that chrome load of 100%, 95% and
+ # 90% equally occurs in 72-74% of all top log snapshots.
+ # Further decreasing of load threshold leads to a shifting percent of
+ # "high load" snapshots which might include snapshots when benchmark is
+ # not running.
+ # On 1-core DUT 90% chrome cpu load occurs in 55%, 95% in 33% and 100% in 2%
+ # of snapshots accordingly.
+ CHROME_HIGH_CPU_LOAD = 90
# Number of snapshots where chrome is heavily used.
- active_snapshots = 0
+ high_load_snapshots = 0
# Total CPU use per process in ALL active snapshots.
- cmd_total_cpu_use = {}
+ cmd_total_cpu_use = collections.defaultdict(float)
# Top CPU usages per command.
- cmd_top5_cpu_use = {}
+ cmd_top5_cpu_use = collections.defaultdict(list)
# List of Top Commands to be returned.
topcmds = []
for snapshot_processes in snapshots:
- if any(chrome_proc['cpu_use'] > chrome_high_cpu_load
- for chrome_proc in snapshot_processes
- if chrome_proc['cmd'] == 'chrome'):
- # This is a snapshot where at least one chrome command
- # has CPU usage above the threshold.
- active_snapshots += 1
- for process in snapshot_processes:
- cmd = process['cmd']
- cpu_use = process['cpu_use']
-
+ # CPU usage per command in one snapshot.
+ cmd_cpu_use_per_snapshot = collections.defaultdict(float)
+ for process in snapshot_processes:
+ cmd = process['cmd']
+ cpu_use = process['cpu_use']
+
+ # Collect CPU usage per command.
+ cmd_cpu_use_per_snapshot[cmd] += cpu_use
+
+ if cmd_cpu_use_per_snapshot.setdefault('chrome',
+ 0.0) > CHROME_HIGH_CPU_LOAD:
+ # Combined CPU usage of "chrome" command exceeds "High load" threshold
+ # which means DUT is busy running a benchmark.
+ high_load_snapshots += 1
+ for cmd, cpu_use in cmd_cpu_use_per_snapshot.items():
# Update total CPU usage.
- total_cpu_use = cmd_total_cpu_use.setdefault(cmd, 0.0)
- cmd_total_cpu_use[cmd] = total_cpu_use + cpu_use
+ cmd_total_cpu_use[cmd] += cpu_use
- # Add cpu_use into command top cpu usages, sorted in descending
- # order.
- top5_list = cmd_top5_cpu_use.setdefault(cmd, [])
- heapq.heappush(top5_list, cpu_use)
+ # Add cpu_use into command top cpu usages, sorted in descending order.
+ heapq.heappush(cmd_top5_cpu_use[cmd], round(cpu_use, 1))
for consumer, usage in sorted(
cmd_total_cpu_use.items(), key=lambda x: x[1], reverse=True):
# Iterate through commands by descending order of total CPU usage.
topcmd = {
'cmd': consumer,
- 'cpu_avg': usage / active_snapshots,
+ 'cpu_avg': usage / high_load_snapshots,
'count': len(cmd_top5_cpu_use[consumer]),
'top5': heapq.nlargest(5, cmd_top5_cpu_use[consumer]),
}
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 28ce599c..7ce04221 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -226,8 +226,8 @@ TOP_DATA = [
{
'cmd': 'chrome',
'cpu_avg': 124.75,
- 'count': 4,
- 'top5': [122.8, 107.9, 17.8, 1.0],
+ 'count': 2,
+ 'top5': [125.7, 123.8],
},
{
'cmd': 'irq/cros-ec',
@@ -470,7 +470,7 @@ class ResultTest(unittest.TestCase):
mock_copyfiles.return_value = 0
- #test 1. dest_dir exists; CopyFiles returns 0.
+ # test 1. dest_dir exists; CopyFiles returns 0.
mock_isdir.return_value = True
self.result.CopyFilesTo(dest_dir, files)
self.assertEqual(mock_runcmd.call_count, 0)
@@ -484,7 +484,7 @@ class ResultTest(unittest.TestCase):
mock_runcmd.reset_mock()
mock_copyfiles.reset_mock()
- #test 2. dest_dir does not exist; CopyFiles returns 0.
+ # test 2. dest_dir does not exist; CopyFiles returns 0.
mock_isdir.return_value = False
self.result.CopyFilesTo(dest_dir, files)
self.assertEqual(mock_runcmd.call_count, 3)
@@ -495,7 +495,7 @@ class ResultTest(unittest.TestCase):
mock_runcmd.call_args_list[2])
self.assertEqual(mock_runcmd.call_args_list[0][0], ('mkdir -p /tmp/test',))
- #test 3. CopyFiles returns 1 (fails).
+ # test 3. CopyFiles returns 1 (fails).
mock_copyfiles.return_value = 1
self.assertRaises(Exception, self.result.CopyFilesTo, dest_dir, files)
@@ -990,6 +990,75 @@ class ResultTest(unittest.TestCase):
mo.assert_has_calls(calls)
self.assertEqual(topcalls, [])
+ def test_format_string_top5_cmds(self):
+ """Test formatted string with top5 commands."""
+ self.result.top_cmds = [
+ {
+ 'cmd': 'chrome',
+ 'cpu_avg': 119.753453465,
+ 'count': 44444,
+ 'top5': [222.8, 217.9, 217.8, 191.0, 189.9],
+ },
+ {
+ 'cmd': 'irq/230-cros-ec',
+ 'cpu_avg': 10.000000000000001,
+ 'count': 1000,
+ 'top5': [11.5, 11.4, 11.3, 11.2, 11.1],
+ },
+ {
+ 'cmd': 'powerd',
+ 'cpu_avg': 2.0,
+ 'count': 2,
+ 'top5': [3.0, 1.0]
+ },
+ {
+ 'cmd': 'cmd1',
+ 'cpu_avg': 1.0,
+ 'count': 1,
+ 'top5': [1.0],
+ },
+ {
+ 'cmd': 'cmd2',
+ 'cpu_avg': 1.0,
+ 'count': 1,
+ 'top5': [1.0],
+ },
+ {
+ 'cmd': 'not_for_print',
+ 'cpu_avg': 1.0,
+ 'count': 1,
+ 'top5': [1.0],
+ },
+ ]
+ form_str = self.result.FormatStringTop5()
+ self.assertEqual(
+ form_str, '\n'.join([
+ 'Top 5 commands with highest CPU usage:',
+ ' COMMAND AVG CPU% COUNT HIGHEST 5',
+ '-' * 50,
+ ' chrome 119.75 44444 '
+ '[222.8, 217.9, 217.8, 191.0, 189.9]',
+ ' irq/230-cros-ec 10.00 1000 '
+ '[11.5, 11.4, 11.3, 11.2, 11.1]',
+ ' powerd 2.00 2 [3.0, 1.0]',
+ ' cmd1 1.00 1 [1.0]',
+ ' cmd2 1.00 1 [1.0]',
+ '-' * 50,
+ ]))
+
+ def test_format_string_top5_calls_no_data(self):
+ """Test formatted string of top5 with no data."""
+ self.result.top_cmds = []
+ form_str = self.result.FormatStringTop5()
+ self.assertEqual(
+ form_str, '\n'.join([
+ 'Top 5 commands with highest CPU usage:',
+ ' COMMAND AVG CPU% COUNT HIGHEST 5',
+ '-' * 50,
+ '[NO DATA FROM THE TOP LOG]',
+ '-' * 50,
+ ]))
+
@mock.patch.object(misc, 'GetInsideChrootPath')
@mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath):
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 5f49872b..edbdd4d7 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -371,6 +371,11 @@ class TextResultsReport(ResultsReport):
cell_table = TableFormatter(table, columns).GetCellTable('status')
return [cell_table]
+ def _GetTotalWaitCooldownTime(self):
+ """Get cooldown wait time in seconds from experiment benchmark runs."""
+ return sum(br.suite_runner.GetCooldownWaitTime()
+ for br in self.experiment.benchmark_runs)
+
def GetReport(self):
"""Generate the report for email and console."""
output_type = 'EMAIL' if self.email else 'CONSOLE'
@@ -406,6 +411,9 @@ class TextResultsReport(ResultsReport):
cpu_info = experiment.machine_manager.GetAllCPUInfo(experiment.labels)
sections.append(self._MakeSection('CPUInfo', cpu_info))
+ waittime_str = '%d min' % (self._GetTotalWaitCooldownTime() // 60)
+ sections.append(self._MakeSection('Cooldown wait time', waittime_str))
+
return '\n'.join(sections)
@@ -511,8 +519,11 @@ def ParseStandardPerfReport(report_data):
"""
# This function fails silently on its if it's handed a string (as opposed to a
# list of lines). So, auto-split if we do happen to get a string.
- if isinstance(report_data, basestring):
+ if isinstance(report_data, str):
report_data = report_data.splitlines()
+ # When switching to python3 catch the case when bytes are passed.
+ elif isinstance(report_data, bytes):
+ raise TypeError()
# Samples: N{K,M,G} of event 'event-name'
samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'")
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index 61e2a7c2..ae51fda6 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -10,14 +10,14 @@
from __future__ import division
from __future__ import print_function
-from StringIO import StringIO
-
import collections
-import mock
+import io
import os
-import test_flag
import unittest
+import mock
+import test_flag
+
from benchmark_run import MockBenchmarkRun
from cros_utils import logger
from experiment_factory import ExperimentFactory
@@ -83,7 +83,7 @@ def FakePath(ext):
def MakeMockExperiment(compiler='gcc'):
"""Mocks an experiment using the given compiler."""
- mock_experiment_file = StringIO("""
+ mock_experiment_file = io.BytesIO("""
board: x86-alex
remote: 127.0.0.1
perf_args: record -a -e cycles
@@ -146,6 +146,12 @@ def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
return experiment
+def _InjectCooldownTime(experiment, cooldown_time):
+ """Inject cooldown wait time in every benchmark run."""
+ for br in experiment.benchmark_runs:
+ br.suite_runner.cooldown_wait_time = cooldown_time
+
+
class TextResultsReportTest(unittest.TestCase):
"""Tests that the output of a text report contains the things we pass in.
@@ -158,11 +164,18 @@ class TextResultsReportTest(unittest.TestCase):
success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
success_keyvals)
- text_report = TextResultsReport.FromExperiment(experiment, email=email) \
- .GetReport()
+ # Set 120 sec cooldown time for every benchmark run.
+ cooldown_time = 120
+ _InjectCooldownTime(experiment, cooldown_time)
+ text_report = TextResultsReport.FromExperiment(
+ experiment, email=email).GetReport()
self.assertIn(str(success_keyvals['a_float']), text_report)
self.assertIn(success_keyvals['machine'], text_report)
self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
+ self.assertIn('Cooldown wait time', text_report)
+ self.assertIn(
+ '%d min' % (len(experiment.benchmark_runs) * cooldown_time // 60),
+ text_report)
return text_report
def testOutput(self):
@@ -229,7 +242,7 @@ class HTMLResultsReportTest(unittest.TestCase):
_InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals))
self.assertNotIn('no result', output.summary_table)
- #self.assertIn(success_keyvals['machine'], output.summary_table)
+ # self.assertIn(success_keyvals['machine'], output.summary_table)
self.assertIn('a_float', output.summary_table)
self.assertIn(str(success_keyvals['a_float']), output.summary_table)
self.assertIn('a_float', output.full_table)
@@ -418,7 +431,7 @@ class PerfReportParserTest(unittest.TestCase):
}
report_cycles = report['cycles']
self.assertEqual(len(report_cycles), 214)
- for k, v in known_cycles_percentages.iteritems():
+ for k, v in known_cycles_percentages.items():
self.assertIn(k, report_cycles)
self.assertEqual(v, report_cycles[k])
@@ -430,7 +443,7 @@ class PerfReportParserTest(unittest.TestCase):
}
report_instructions = report['instructions']
self.assertEqual(len(report_instructions), 492)
- for k, v in known_instrunctions_percentages.iteritems():
+ for k, v in known_instrunctions_percentages.items():
self.assertIn(k, report_instructions)
self.assertEqual(v, report_instructions[k])
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index 0cc8f746..768d29d8 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -243,10 +243,14 @@ class DutWorker(Thread):
if self._kerncmd_update_needed(intel_pstate):
self._update_kerncmd_intel_pstate(intel_pstate)
+ # When calculating cooldown wait time we assume that suite_runner is
+ # never reused so we can sum up the values across all benchmark_runs.
+ # If implementation changes causing the assert below to fail the
+ # calculation should be adjusted accordingly.
+ assert br.suite_runner.GetCooldownWaitTime() == 0
# Execute the br.
self._execute_benchmark_run(br)
total_waittime += br.suite_runner.GetCooldownWaitTime()
- br.suite_runner.ResetCooldownWaitTime()
finally:
self._logger.LogOutput(
'Total wait time for cooldown: %d min' % (total_waittime // 60))