aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorDenis Nikitin <denik@google.com>2019-11-04 07:18:15 -0800
committerDenis Nikitin <denik@chromium.org>2019-11-05 06:27:52 +0000
commitfe8a44aa39ab93632e976b4aa5d00013601cdbc2 (patch)
treed9323e74ee377436d1680df69f4f080be4f35ed0 /crosperf
parentb1fc678ca17b224bfd7af33f96ab6b55eec47be0 (diff)
downloadtoolchain-utils-fe8a44aa39ab93632e976b4aa5d00013601cdbc2.tar.gz
crosperf: Improve cooldown functionality
"Restart UI" procedure is changed. DUT configuration including cooldown in SuiteRunner is now performed while UI is off. Start UI after DUT configuration is done. The change showed decrease of cooldown waiting time by a factor of 10. Added Total experiment duration time in the report along with total cooldown wait time. Cooldown wait time is now printed per machine (with parallel execution on multiple machines single cooldown time could be misleading). To collect data per machine cooldown_time attribute is moved from SuiteRunner to CrosMachine class. BUG=chromium:966514 TEST=unittests and HW tests on kevin64 passed. Change-Id: Ib3e145cd986ca7242d97b0aeeb7795ba066753fc Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1897236 Tested-by: Denis Nikitin <denik@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark_run.py11
-rwxr-xr-xcrosperf/benchmark_run_unittest.py4
-rw-r--r--crosperf/machine_manager.py8
-rwxr-xr-xcrosperf/machine_manager_unittest.py9
-rw-r--r--crosperf/results_report.py52
-rwxr-xr-xcrosperf/results_report_unittest.py43
-rw-r--r--crosperf/schedv2.py10
-rw-r--r--crosperf/suite_runner.py47
-rwxr-xr-xcrosperf/suite_runner_unittest.py43
9 files changed, 132 insertions, 95 deletions
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index 6512b8ea..a7c3b7d1 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -118,7 +118,6 @@ class BenchmarkRun(threading.Thread):
self.machine = self.AcquireMachine()
self.cache.machine = self.machine
self.result = self.RunTest(self.machine)
- # TODO(denik): Add Top5 report into html.
self.cache.remote = self.machine.name
self.label.chrome_version = self.machine_manager.GetChromeVersion(
@@ -220,9 +219,8 @@ class BenchmarkRun(threading.Thread):
else:
self.machine_manager.ImageMachine(machine, self.label)
self.timeline.Record(STATUS_RUNNING)
- retval, out, err = self.suite_runner.Run(machine.name, self.label,
- self.benchmark, self.test_args,
- self.profiler_args)
+ retval, out, err = self.suite_runner.Run(
+ machine, self.label, self.benchmark, self.test_args, self.profiler_args)
self.run_completed = True
return Result.CreateFromRun(self._logger, self.log_level, self.label,
self.machine, out, err, retval,
@@ -270,9 +268,8 @@ class MockBenchmarkRun(BenchmarkRun):
self.timeline.Record(STATUS_IMAGING)
self.machine_manager.ImageMachine(machine, self.label)
self.timeline.Record(STATUS_RUNNING)
- [retval, out,
- err] = self.suite_runner.Run(machine.name, self.label, self.benchmark,
- self.test_args, self.profiler_args)
+ [retval, out, err] = self.suite_runner.Run(
+ machine, self.label, self.benchmark, self.test_args, self.profiler_args)
self.run_completed = True
rr = MockResult('logger', self.label, self.log_level, machine)
rr.out = out
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 51b287cf..4daa77bb 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -410,8 +410,8 @@ class BenchmarkRunTest(unittest.TestCase):
br.machine_manager.ImageMachine.assert_called_with(mock_machine,
self.test_label)
self.assertEqual(mock_runner.call_count, 1)
- mock_runner.assert_called_with(mock_machine.name, br.label, br.benchmark,
- '', br.profiler_args)
+ mock_runner.assert_called_with(mock_machine, br.label, br.benchmark, '',
+ br.profiler_args)
self.assertEqual(mock_result.call_count, 1)
mock_result.assert_called_with(
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index ea3d105a..2cdaca3f 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -62,6 +62,7 @@ class CrosMachine(object):
self.checksum_string = None
self.meminfo = None
self.phys_kbytes = None
+ self.cooldown_wait_time = 0
self.ce = cmd_exec or command_executer.GetCommandExecuter(
log_level=self.log_level)
self.SetUpChecksumInfo()
@@ -85,6 +86,12 @@ class CrosMachine(object):
return False
return True
+ def AddCooldownWaitTime(self, wait_time):
+ self.cooldown_wait_time += wait_time
+
+ def GetCooldownWaitTime(self):
+ return self.cooldown_wait_time
+
def _ParseMemoryInfo(self):
line = self.meminfo.splitlines()[0]
usable_kbytes = int(line.split()[1])
@@ -627,6 +634,7 @@ power management:
self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
self.log_level = log_level
self.label = None
+ self.cooldown_wait_time = 0
self.ce = command_executer.GetCommandExecuter(log_level=self.log_level)
self._GetCPUInfo()
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index 0f64a714..984251ca 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -840,6 +840,15 @@ class CrosMachineTest(unittest.TestCase):
mock_run_cmd.return_value = [0, 'invalid hardware config', '']
self.assertRaises(cm._GetMachineID)
+ def test_add_cooldown_waittime(self):
+ cm = machine_manager.CrosMachine('1.2.3.4.cros', '/usr/local/chromeos',
+ 'average')
+ self.assertEqual(cm.GetCooldownWaitTime(), 0)
+ cm.AddCooldownWaitTime(250)
+ self.assertEqual(cm.GetCooldownWaitTime(), 250)
+ cm.AddCooldownWaitTime(1)
+ self.assertEqual(cm.GetCooldownWaitTime(), 251)
+
if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index edbdd4d7..22719027 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -12,6 +12,7 @@ import itertools
import json
import os
import re
+import time
from cros_utils.tabulator import AmeanResult
from cros_utils.tabulator import Cell
@@ -371,10 +372,16 @@ class TextResultsReport(ResultsReport):
cell_table = TableFormatter(table, columns).GetCellTable('status')
return [cell_table]
- def _GetTotalWaitCooldownTime(self):
- """Get cooldown wait time in seconds from experiment benchmark runs."""
- return sum(br.suite_runner.GetCooldownWaitTime()
- for br in self.experiment.benchmark_runs)
+ def GetTotalWaitCooldownTime(self):
+ """Get cooldown wait time in seconds from experiment benchmark runs.
+
+ Returns:
+ Dictionary {'dut': int(wait_time_in_seconds)}
+ """
+ waittime_dict = {}
+ for dut in self.experiment.machine_manager.GetMachines():
+ waittime_dict[dut.name] = dut.GetCooldownWaitTime()
+ return waittime_dict
def GetReport(self):
"""Generate the report for email and console."""
@@ -411,8 +418,20 @@ class TextResultsReport(ResultsReport):
cpu_info = experiment.machine_manager.GetAllCPUInfo(experiment.labels)
sections.append(self._MakeSection('CPUInfo', cpu_info))
- waittime_str = '%d min' % (self._GetTotalWaitCooldownTime() // 60)
- sections.append(self._MakeSection('Cooldown wait time', waittime_str))
+ totaltime = (
+ time.time() - experiment.start_time) if experiment.start_time else 0
+ totaltime_str = 'Total experiment time:\n%d min' % (totaltime // 60)
+ cooldown_waittime_list = ['Cooldown wait time:']
+ # When running experiment on multiple DUTs cooldown wait time may vary
+ # on different devices. In addition its combined time may exceed total
+ # experiment time which will look weird but it is reasonable.
+ # For this matter print cooldown time per DUT.
+ for dut, waittime in sorted(self.GetTotalWaitCooldownTime().items()):
+ cooldown_waittime_list.append('DUT %s: %d min' % (dut, waittime // 60))
+ cooldown_waittime_str = '\n'.join(cooldown_waittime_list)
+ sections.append(
+ self._MakeSection('Duration', '\n\n'.join(
+ [totaltime_str, cooldown_waittime_str])))
return '\n'.join(sections)
@@ -667,8 +686,8 @@ class JSONResultsReport(ResultsReport):
def __init__(self,
benchmark_results,
- date=None,
- time=None,
+ benchmark_date=None,
+ benchmark_time=None,
experiment=None,
json_args=None):
"""Construct a JSONResultsReport.
@@ -689,19 +708,22 @@ class JSONResultsReport(ResultsReport):
self.json_args = json_args
self.experiment = experiment
- if not date:
+ if not benchmark_date:
timestamp = datetime.datetime.strftime(datetime.datetime.now(),
'%Y-%m-%d %H:%M:%S')
- date, time = timestamp.split(' ')
- self.date = date
- self.time = time
+ benchmark_date, benchmark_time = timestamp.split(' ')
+ self.date = benchmark_date
+ self.time = benchmark_time
@staticmethod
- def FromExperiment(experiment, date=None, time=None, json_args=None):
+ def FromExperiment(experiment,
+ benchmark_date=None,
+ benchmark_time=None,
+ json_args=None):
benchmark_results = BenchmarkResults.FromExperiment(
experiment, for_json_report=True)
- return JSONResultsReport(benchmark_results, date, time, experiment,
- json_args)
+ return JSONResultsReport(benchmark_results, benchmark_date, benchmark_time,
+ experiment, json_args)
def GetReportObjectIgnoringExperiment(self):
"""Gets the JSON report object specifically for the output data.
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index ae51fda6..e6438d6f 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -146,12 +146,6 @@ def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
return experiment
-def _InjectCooldownTime(experiment, cooldown_time):
- """Inject cooldown wait time in every benchmark run."""
- for br in experiment.benchmark_runs:
- br.suite_runner.cooldown_wait_time = cooldown_time
-
-
class TextResultsReportTest(unittest.TestCase):
"""Tests that the output of a text report contains the things we pass in.
@@ -159,33 +153,48 @@ class TextResultsReportTest(unittest.TestCase):
things are displayed. It just cares that they're present.
"""
- def _checkReport(self, email):
+ def _checkReport(self, mock_getcooldown, email):
num_success = 2
success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
success_keyvals)
- # Set 120 sec cooldown time for every benchmark run.
- cooldown_time = 120
- _InjectCooldownTime(experiment, cooldown_time)
+ SECONDS_IN_MIN = 60
+ mock_getcooldown.return_value = {
+ experiment.remote[0]: 12 * SECONDS_IN_MIN,
+ experiment.remote[1]: 8 * SECONDS_IN_MIN
+ }
+
text_report = TextResultsReport.FromExperiment(
experiment, email=email).GetReport()
self.assertIn(str(success_keyvals['a_float']), text_report)
self.assertIn(success_keyvals['machine'], text_report)
self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
- self.assertIn('Cooldown wait time', text_report)
- self.assertIn(
- '%d min' % (len(experiment.benchmark_runs) * cooldown_time // 60),
- text_report)
+ self.assertIn('\nDuration\n', text_report)
+ self.assertIn('Total experiment time:\n', text_report)
+ self.assertIn('Cooldown wait time:\n', text_report)
+ self.assertIn('DUT %s: %d min' % (experiment.remote[0], 12), text_report)
+ self.assertIn('DUT %s: %d min' % (experiment.remote[1], 8), text_report)
return text_report
- def testOutput(self):
- email_report = self._checkReport(email=True)
- text_report = self._checkReport(email=False)
+ @mock.patch.object(TextResultsReport, 'GetTotalWaitCooldownTime')
+ def testOutput(self, mock_getcooldown):
+ email_report = self._checkReport(mock_getcooldown, email=True)
+ text_report = self._checkReport(mock_getcooldown, email=False)
# Ensure that the reports somehow different. Otherwise, having the
# distinction is useless.
self.assertNotEqual(email_report, text_report)
+ def test_get_totalwait_cooldowntime(self):
+ experiment = MakeMockExperiment()
+ cros_machines = experiment.machine_manager.GetMachines()
+ cros_machines[0].AddCooldownWaitTime(120)
+ cros_machines[1].AddCooldownWaitTime(240)
+ text_results = TextResultsReport.FromExperiment(experiment, email=False)
+ total = text_results.GetTotalWaitCooldownTime()
+ self.assertEqual(total[experiment.remote[0]], 120)
+ self.assertEqual(total[experiment.remote[1]], 240)
+
class HTMLResultsReportTest(unittest.TestCase):
"""Tests that the output of a HTML report contains the things we pass in.
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index 768d29d8..dea3cdd3 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -205,8 +205,6 @@ class DutWorker(Thread):
intel_pstate = self._sched.get_experiment().intel_pstate
# Firstly, handle benchmarkruns that have cache hit.
br = self._sched.get_cached_benchmark_run()
- # Total wait time for cooling down.
- total_waittime = 0
while br:
try:
self._stat_annotation = 'finishing cached {}'.format(br)
@@ -243,17 +241,9 @@ class DutWorker(Thread):
if self._kerncmd_update_needed(intel_pstate):
self._update_kerncmd_intel_pstate(intel_pstate)
- # When calculating cooldown wait time we assume that suite_runner is
- # never reused so we can sum up the values across all benchmark_runs.
- # If implementation changes causing the assert below to fail the
- # calculation should be adjusted accordingly.
- assert br.suite_runner.GetCooldownWaitTime() == 0
# Execute the br.
self._execute_benchmark_run(br)
- total_waittime += br.suite_runner.GetCooldownWaitTime()
finally:
- self._logger.LogOutput(
- 'Total wait time for cooldown: %d min' % (total_waittime // 60))
self._stat_annotation = 'finished'
# Thread finishes. Notify scheduler that I'm done.
self._sched.dut_worker_finished(self)
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 01e7114f..fbacf17a 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -68,13 +68,6 @@ class SuiteRunner(object):
self._ct = cmd_term or command_executer.CommandTerminator()
self.enable_aslr = enable_aslr
self.dut_config = dut_config
- self.cooldown_wait_time = 0
-
- def ResetCooldownWaitTime(self):
- self.cooldown_wait_time = 0
-
- def GetCooldownWaitTime(self):
- return self.cooldown_wait_time
def DutWrapper(self, machine_name, chromeos_root):
"""Wrap DUT parameters inside.
@@ -102,10 +95,11 @@ class SuiteRunner(object):
return RunCommandOnDut
- def Run(self, machine, label, benchmark, test_args, profiler_args):
+ def Run(self, cros_machine, label, benchmark, test_args, profiler_args):
+ machine_name = cros_machine.name
if not label.skylab:
# Initialize command executer on DUT.
- run_on_dut = self.DutWrapper(machine, label.chromeos_root)
+ run_on_dut = self.DutWrapper(machine_name, label.chromeos_root)
for i in range(0, benchmark.retries + 1):
if label.skylab:
# TODO: need to migrate DisableASLR and PinGovernorExecutionFrequencies
@@ -115,6 +109,11 @@ class SuiteRunner(object):
# it before running.
ret_tup = self.Skylab_Run(label, benchmark, test_args, profiler_args)
else:
+ # Stop UI before configuring the DUT.
+ # This will accelerate setup (waiting for cooldown has x10 drop)
+ # and help to reset a Chrome state left after the previous test.
+ self.StopUI(run_on_dut)
+
# Unless the user turns on ASLR in the flag, we first disable ASLR
# before running the benchmarks
if not self.enable_aslr:
@@ -141,7 +140,8 @@ class SuiteRunner(object):
# on scarlet and kevin64.
# We might have to consider reducing freq manually to the min
# if it helps to reduce waiting time.
- self.cooldown_wait_time += self.WaitCooldown(run_on_dut)
+ wait_time = self.WaitCooldown(run_on_dut)
+ cros_machine.AddCooldownWaitTime(wait_time)
# Setup CPU governor for the benchmark run.
# It overwrites the previous governor settings.
@@ -160,13 +160,16 @@ class SuiteRunner(object):
# But it may change in the future and then we have to recover the
# settings.
+ # DUT setup is done. Start a fresh new shiny UI.
+ self.StartUI(run_on_dut)
+
if benchmark.suite == 'telemetry_Crosperf':
self.DecreaseWaitTime(run_on_dut)
- ret_tup = self.Telemetry_Crosperf_Run(machine, label, benchmark,
+ ret_tup = self.Telemetry_Crosperf_Run(machine_name, label, benchmark,
test_args, profiler_args)
else:
- ret_tup = self.Test_That_Run(machine, label, benchmark, test_args,
- profiler_args)
+ ret_tup = self.Test_That_Run(machine_name, label, benchmark,
+ test_args, profiler_args)
if ret_tup[0] != 0:
self.logger.LogOutput('benchmark %s failed. Retries left: %s' %
@@ -183,11 +186,9 @@ class SuiteRunner(object):
def DisableASLR(self, run_on_dut):
disable_aslr = ('set -e && '
- 'stop ui; '
'if [[ -e /proc/sys/kernel/randomize_va_space ]]; then '
' echo 0 > /proc/sys/kernel/randomize_va_space; '
- 'fi; '
- 'start ui ')
+ 'fi')
if self.log_level == 'average':
self.logger.LogOutput('Disable ASLR.')
run_on_dut(disable_aslr)
@@ -449,10 +450,11 @@ class SuiteRunner(object):
sed_command = 'sed -i "s/_TTI_WAIT_TIME = 10/_TTI_WAIT_TIME = 2/g" '
run_on_dut(sed_command + FILE)
- def RestartUI(self, machine_name, chromeos_root):
- command = 'stop ui; sleep 5; start ui'
- self._ce.CrosRunCommand(
- command, machine=machine_name, chromeos_root=chromeos_root)
+ def StopUI(self, run_on_dut):
+ run_on_dut('stop ui')
+
+ def StartUI(self, run_on_dut):
+ run_on_dut('start ui')
def Test_That_Run(self, machine, label, benchmark, test_args, profiler_args):
"""Run the test_that test.."""
@@ -467,11 +469,6 @@ class SuiteRunner(object):
self._ce.CrosRunCommand(
command, machine=machine, chromeos_root=label.chromeos_root)
- # We do this because some tests leave the machine in weird states.
- # Rebooting between iterations has proven to help with this.
- # But the beep is anoying, we will try restart ui.
- self.RestartUI(machine, label.chromeos_root)
-
autotest_dir = AUTOTEST_DIR
if label.autotest_path != '':
autotest_dir = label.autotest_path
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index 8b336eda..bf8bc317 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -22,6 +22,7 @@ from benchmark import Benchmark
from cros_utils import command_executer
from cros_utils import logger
+from machine_manager import MockCrosMachine
BIG_LITTLE_CPUINFO = """processor : 0
model name : ARMv8 Processor rev 4 (v8l)
@@ -218,13 +219,15 @@ class SuiteRunnerTest(unittest.TestCase):
self.runner.dut_config['governor'] = 'fake_governor'
self.runner.dut_config['cpu_freq_pct'] = 65
machine = 'fake_machine'
+ cros_machine = MockCrosMachine(machine, self.mock_label.chromeos_root,
+ self.mock_logger)
test_args = ''
profiler_args = ''
reset()
self.mock_label.skylab = True
- self.runner.Run(machine, self.mock_label, self.telemetry_bench, test_args,
- profiler_args)
+ self.runner.Run(cros_machine, self.mock_label, self.telemetry_bench,
+ test_args, profiler_args)
self.assertFalse(self.call_disable_aslr)
self.assertTrue(self.call_skylab_run)
self.assertFalse(self.call_test_that_run)
@@ -241,8 +244,8 @@ class SuiteRunnerTest(unittest.TestCase):
self.mock_label.skylab = False
reset()
- self.runner.Run(machine, self.mock_label, self.test_that_bench, test_args,
- profiler_args)
+ self.runner.Run(cros_machine, self.mock_label, self.test_that_bench,
+ test_args, profiler_args)
self.assertTrue(self.call_disable_aslr)
self.assertTrue(self.call_test_that_run)
self.assertFalse(self.call_telemetry_crosperf_run)
@@ -260,8 +263,8 @@ class SuiteRunnerTest(unittest.TestCase):
self.runner.WaitCooldown.assert_not_called()
reset()
- self.runner.Run(machine, self.mock_label, self.telemetry_crosperf_bench,
- test_args, profiler_args)
+ self.runner.Run(cros_machine, self.mock_label,
+ self.telemetry_crosperf_bench, test_args, profiler_args)
self.assertTrue(self.call_disable_aslr)
self.assertFalse(self.call_test_that_run)
self.assertTrue(self.call_telemetry_crosperf_run)
@@ -291,7 +294,10 @@ class SuiteRunnerTest(unittest.TestCase):
self.runner.dut_config['governor'] = 'fake_governor'
self.runner.dut_config['cpu_freq_pct'] = 75
- self.runner.Run('fake_machine', self.mock_label,
+ machine = 'fake_machine'
+ cros_machine = MockCrosMachine(machine, self.mock_label.chromeos_root,
+ self.mock_logger)
+ self.runner.Run(cros_machine, self.mock_label,
self.telemetry_crosperf_bench, '', '')
self.runner.WaitCooldown.assert_called_once_with(FakeRunner)
@@ -356,11 +362,9 @@ class SuiteRunnerTest(unittest.TestCase):
self.runner.DisableASLR(run_on_dut)
# pyformat: disable
set_cpu_cmd = ('set -e && '
- 'stop ui; '
'if [[ -e /proc/sys/kernel/randomize_va_space ]]; then '
' echo 0 > /proc/sys/kernel/randomize_va_space; '
- 'fi; '
- 'start ui ')
+ 'fi')
run_on_dut.assert_called_once_with(set_cpu_cmd)
def test_set_cpu_governor(self):
@@ -799,14 +803,15 @@ class SuiteRunnerTest(unittest.TestCase):
mock_sleep.assert_not_called()
self.assertEqual(wait_time, 0)
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- def test_restart_ui(self, mock_cros_runcmd):
- self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
- self.runner.RestartUI('lumpy1.cros', '/tmp/chromeos')
- mock_cros_runcmd.assert_called_once_with(
- 'stop ui; sleep 5; start ui',
- chromeos_root='/tmp/chromeos',
- machine='lumpy1.cros')
+ def test_stop_ui(self):
+ dut_runner = mock.Mock(return_value=(0, '', ''))
+ self.runner.StopUI(dut_runner)
+ dut_runner.assert_called_once_with('stop ui')
+
+ def test_start_ui(self):
+ dut_runner = mock.Mock(return_value=(0, '', ''))
+ self.runner.StartUI(dut_runner)
+ dut_runner.assert_called_once_with('start ui')
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
@mock.patch.object(command_executer.CommandExecuter,
@@ -834,7 +839,7 @@ class SuiteRunnerTest(unittest.TestCase):
self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
res = self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
self.test_that_bench, '--iterations=2', '')
- self.assertEqual(mock_cros_runcmd.call_count, 2)
+ self.assertEqual(mock_cros_runcmd.call_count, 1)
self.assertEqual(mock_chroot_runcmd.call_count, 1)
self.assertEqual(res, 0)
self.assertEqual(mock_cros_runcmd.call_args_list[0][0],