diff options
Diffstat (limited to 'crosperf')
-rw-r--r-- | crosperf/benchmark_run.py | 10 | ||||
-rwxr-xr-x | crosperf/benchmark_run_unittest.py | 8 | ||||
-rw-r--r-- | crosperf/experiment_factory.py | 2 | ||||
-rw-r--r-- | crosperf/mock_instance.py | 4 | ||||
-rw-r--r-- | crosperf/settings_factory.py | 4 | ||||
-rwxr-xr-x | crosperf/settings_factory_unittest.py | 2 | ||||
-rw-r--r-- | crosperf/suite_runner.py | 206 | ||||
-rwxr-xr-x | crosperf/suite_runner_unittest.py | 213 |
8 files changed, 157 insertions, 292 deletions
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py index 74d461a6..b5912c11 100644 --- a/crosperf/benchmark_run.py +++ b/crosperf/benchmark_run.py @@ -178,12 +178,10 @@ class BenchmarkRun(threading.Thread): return machine def GetExtraAutotestArgs(self): - if self.benchmark.perf_args and self.benchmark.suite == 'telemetry': - self._logger.LogError('Telemetry does not support profiler.') - self.benchmark.perf_args = '' - - if self.benchmark.perf_args and self.benchmark.suite == 'test_that': - self._logger.LogError('test_that does not support profiler.') + if (self.benchmark.perf_args and + self.benchmark.suite != 'telemetry_Crosperf'): + self._logger.LogError( + 'Non-telemetry benchmark does not support profiler.') self.benchmark.perf_args = '' if self.benchmark.perf_args: diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py index 4cadc35c..ab863004 100755 --- a/crosperf/benchmark_run_unittest.py +++ b/crosperf/benchmark_run_unittest.py @@ -365,16 +365,12 @@ class BenchmarkRunTest(unittest.TestCase): '--profiler=custom_perf --profiler_args=\'perf_options="record -a -e ' 'cycles"\'') - self.test_benchmark.suite = 'telemetry' - result = br.GetExtraAutotestArgs() - self.assertEqual(result, '') - self.assertEqual(self.err_msg, 'Telemetry does not support profiler.') - self.test_benchmark.perf_args = 'record -e cycles' self.test_benchmark.suite = 'test_that' result = br.GetExtraAutotestArgs() self.assertEqual(result, '') - self.assertEqual(self.err_msg, 'test_that does not support profiler.') + self.assertEqual(self.err_msg, + 'Non-telemetry benchmark does not support profiler.') self.test_benchmark.perf_args = 'junk args' self.test_benchmark.suite = 'telemetry_Crosperf' diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index 7d1689cc..4527db5f 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -296,7 +296,7 @@ class ExperimentFactory(object): iterations, rm_chroot_tmp, perf_args, - '', + 'crosperf_Wrapper', # Use client wrapper in Autotest show_all_results, retries, run_local=False, diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py index 4271d8fd..842d6343 100644 --- a/crosperf/mock_instance.py +++ b/crosperf/mock_instance.py @@ -46,10 +46,10 @@ label2 = MockLabel( chrome_src=None) benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '', - perf_args, '', '') + perf_args, 'telemetry_Crosperf', '') benchmark2 = Benchmark('benchmark2', 'autotest_name_2', 'autotest_args', 2, '', - perf_args, '', '') + perf_args, 'telemetry_Crosperf', '') keyval = {} keyval[0] = { diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py index 1993a6c3..20ab2ad2 100644 --- a/crosperf/settings_factory.py +++ b/crosperf/settings_factory.py @@ -40,7 +40,9 @@ class BenchmarkSettings(Settings): 'times to get a stable result.')) self.AddField( TextField( - 'suite', default='', description='The type of the benchmark.')) + 'suite', + default='test_that', + description='The type of the benchmark.')) self.AddField( IntegerField( 'retries', diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py index 12a87ae2..a6339034 100755 --- a/crosperf/settings_factory_unittest.py +++ b/crosperf/settings_factory_unittest.py @@ -24,7 +24,7 @@ class BenchmarkSettingsTest(unittest.TestCase): self.assertEqual(res.GetField('test_name'), '') self.assertEqual(res.GetField('test_args'), '') self.assertEqual(res.GetField('iterations'), 0) - self.assertEqual(res.GetField('suite'), '') + self.assertEqual(res.GetField('suite'), 'test_that') class LabelSettingsTest(unittest.TestCase): diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index 71ca7e7f..79ace20d 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -15,7 +15,6 @@ import shlex import time from cros_utils import command_executer -from cros_utils.device_setup_utils import DutWrapper TEST_THAT_PATH = '/usr/bin/test_that' # TODO: Need to check whether Skylab is installed and set up correctly. @@ -75,34 +74,12 @@ class SuiteRunner(object): def Run(self, cros_machine, label, benchmark, test_args, profiler_args): machine_name = cros_machine.name - if benchmark.suite != 'telemetry_Crosperf': - # Initialize command executer on DUT for test_that runs. - run_on_dut = DutWrapper( - label.chromeos_root, - machine_name, - logger=self.logger, - log_level=self.log_level, - ce=self._ce, - dut_config=self.dut_config) for i in range(0, benchmark.retries + 1): - # TODO: For telemetry_Crosperf run, device setup has been moved into - # server test script; for client runs, need to figure out wrapper to do - # it before running, now it is still setup here. - if benchmark.suite != 'telemetry_Crosperf': - wait_time = run_on_dut.SetupDevice() - # This is for accumulating wait time for test_that runs only, - # for telemetry_Cropserf runs, please refer to result_cache. - cros_machine.AddCooldownWaitTime(wait_time) - if label.skylab: ret_tup = self.Skylab_Run(label, benchmark, test_args, profiler_args) - elif benchmark.suite == 'telemetry_Crosperf': - ret_tup = self.Telemetry_Crosperf_Run(machine_name, label, benchmark, - test_args, profiler_args) else: ret_tup = self.Test_That_Run(machine_name, label, benchmark, test_args, profiler_args) - if ret_tup[0] != 0: self.logger.LogOutput('benchmark %s failed. Retries left: %s' % (benchmark.name, benchmark.retries - i)) @@ -116,43 +93,94 @@ class SuiteRunner(object): break return ret_tup + def RemoveTelemetryTempFile(self, machine, chromeos_root): + filename = 'telemetry@%s' % machine + fullname = os.path.join(chromeos_root, 'chroot', 'tmp', filename) + if os.path.exists(fullname): + os.remove(fullname) + + def GenTestArgs(self, benchmark, test_args, profiler_args): + args_list = [] + + if benchmark.suite != 'telemetry_Crosperf' and profiler_args: + self.logger.LogFatal('Tests other than telemetry_Crosperf do not ' + 'support profiler.') + + if test_args: + # Strip double quotes off args (so we can wrap them in single + # quotes, to pass through to Telemetry). + if test_args[0] == '"' and test_args[-1] == '"': + test_args = test_args[1:-1] + args_list.append("test_args='%s'" % test_args) + + args_list.append(GetDutConfigArgs(self.dut_config)) + + if not (benchmark.suite == 'telemetry_Crosperf' or + benchmark.suite == 'crosperf_Wrapper'): + self.logger.LogWarning('Please make sure the server test has stage for ' + 'device setup.\n') + else: + args_list.append('test=%s' % benchmark.test_name) + if benchmark.suite == 'telemetry_Crosperf': + args_list.append('run_local=%s' % benchmark.run_local) + args_list.append(GetProfilerArgs(profiler_args)) + + return args_list + def Test_That_Run(self, machine, label, benchmark, test_args, profiler_args): """Run the test_that test..""" - options = [] - if label.board: - options.append('--board=%s' % label.board) - if test_args: - options.append(test_args) - if profiler_args: - self.logger.LogFatal('test_that does not support profiler.') + + # Remove existing test_that results command = 'rm -rf /usr/local/autotest/results/*' self._ce.CrosRunCommand( command, machine=machine, chromeos_root=label.chromeos_root) - autotest_dir = AUTOTEST_DIR - if label.autotest_path != '': - autotest_dir = label.autotest_path + if benchmark.suite == 'telemetry_Crosperf': + if not os.path.isdir(label.chrome_src): + self.logger.LogFatal('Cannot find chrome src dir to ' + 'run telemetry: %s' % label.chrome_src) + # Check for and remove temporary file that may have been left by + # previous telemetry runs (and which might prevent this run from + # working). + self.RemoveTelemetryTempFile(machine, label.chromeos_root) + + # --autotest_dir specifies which autotest directory to use. + autotest_dir_arg = '--autotest_dir=%s' % ( + label.autotest_path if label.autotest_path else AUTOTEST_DIR) + + # --fast avoids unnecessary copies of syslogs. + fast_arg = '--fast' + board_arg = '--board=%s' % label.board + + args_list = self.GenTestArgs(benchmark, test_args, profiler_args) + args_arg = '--args=%s' % pipes.quote(' '.join(args_list)) + + command = ' '.join([ + TEST_THAT_PATH, autotest_dir_arg, fast_arg, board_arg, args_arg, + machine, benchmark.suite if + (benchmark.suite == 'telemetry_Crosperf' or + benchmark.suite == 'crosperf_Wrapper') else benchmark.test_name + ]) - autotest_dir_arg = '--autotest_dir %s' % autotest_dir - # For non-telemetry tests, specify an autotest directory only if the - # specified directory is different from default (crosbug.com/679001). - if autotest_dir == AUTOTEST_DIR: - autotest_dir_arg = '' + # Use --no-ns-pid so that cros_sdk does not create a different + # process namespace and we can kill process created easily by their + # process group. + chrome_root_options = ('--no-ns-pid ' + '--chrome_root={} --chrome_root_mount={} ' + 'FEATURES="-usersandbox" ' + 'CHROME_ROOT={}'.format(label.chrome_src, + CHROME_MOUNT_DIR, + CHROME_MOUNT_DIR)) - command = (('%s %s --fast ' - '%s %s %s') % (TEST_THAT_PATH, autotest_dir_arg, - ' '.join(options), machine, benchmark.test_name)) if self.log_level != 'verbose': self.logger.LogOutput('Running test.') self.logger.LogOutput('CMD: %s' % command) - # Use --no-ns-pid so that cros_sdk does not create a different - # process namespace and we can kill process created easily by - # their process group. + return self._ce.ChrootRunCommandWOutput( label.chromeos_root, command, command_terminator=self._ct, - cros_sdk_options='--no-ns-pid') + cros_sdk_options=chrome_root_options) def DownloadResult(self, label, task_id): gsutil_cmd = os.path.join(label.chromeos_root, GS_UTIL) @@ -200,26 +228,9 @@ class SuiteRunner(object): # TODO: now only put toolchain pool here, user need to be able to specify # which pool to use. Need to request feature to not use this option at all. options.append('-pool=toolchain') - test_args_list = [] - if benchmark.suite == 'telemetry_Crosperf': - if test_args: - # Strip double quotes off args (so we can wrap them in single - # quotes, to pass through to Telemetry). - if test_args[0] == '"' and test_args[-1] == '"': - test_args_list.append(test_args[1:-1]) - if profiler_args: - test_args_list.append(GetProfilerArgs(profiler_args)) - if self.dut_config: - test_args_list.append(GetDutConfigArgs(self.dut_config)) - test_args_list.append('run_local=%s' % benchmark.run_local) - test_args_list.append('test=%s' % benchmark.test_name) - else: - if profiler_args: - self.logger.LogFatal('Client tests do not support profiler.') - if test_args: - test_args_list.append(test_args) - if test_args_list: - options.append('-test-args=%s' % pipes.quote(' '.join(test_args_list))) + + args_list = self.GenTestArgs(benchmark, test_args, profiler_args) + options.append('-test-args=%s' % pipes.quote(' '.join(args_list))) dimensions = [] for dut in label.remote: @@ -227,7 +238,9 @@ class SuiteRunner(object): command = (('%s create-test %s %s %s') % \ (SKYLAB_PATH, ' '.join(dimensions), ' '.join(options), - 'telemetry_Crosperf' if benchmark.suite == 'telemetry_Crosperf' + benchmark.suite if + (benchmark.suite == 'telemetry_Crosperf' or + benchmark.suite == 'crosperf_Wrapper') else benchmark.test_name)) if self.log_level != 'verbose': @@ -279,67 +292,6 @@ class SuiteRunner(object): return (ret_tup[0], result_dir, ret_tup[2]) return ret_tup - def RemoveTelemetryTempFile(self, machine, chromeos_root): - filename = 'telemetry@%s' % machine - fullname = os.path.join(chromeos_root, 'chroot', 'tmp', filename) - if os.path.exists(fullname): - os.remove(fullname) - - def Telemetry_Crosperf_Run(self, machine, label, benchmark, test_args, - profiler_args): - if not os.path.isdir(label.chrome_src): - self.logger.LogFatal('Cannot find chrome src dir to' - ' run telemetry: %s' % label.chrome_src) - - # Check for and remove temporary file that may have been left by - # previous telemetry runs (and which might prevent this run from - # working). - self.RemoveTelemetryTempFile(machine, label.chromeos_root) - - # For telemetry runs, we can use the autotest copy from the source - # location. No need to have one under /build/<board>. - autotest_dir_arg = '--autotest_dir %s' % AUTOTEST_DIR - if label.autotest_path != '': - autotest_dir_arg = '--autotest_dir %s' % label.autotest_path - - profiler_args = GetProfilerArgs(profiler_args) - dut_config_args = GetDutConfigArgs(self.dut_config) - # --fast avoids unnecessary copies of syslogs. - fast_arg = '--fast' - args_string = '' - if test_args: - # Strip double quotes off args (so we can wrap them in single - # quotes, to pass through to Telemetry). - if test_args[0] == '"' and test_args[-1] == '"': - test_args = test_args[1:-1] - args_string = "test_args='%s'" % test_args - - args = '{} run_local={} test={} {} {}'.format( - args_string, benchmark.run_local, benchmark.test_name, dut_config_args, - profiler_args) - - cmd = ('{} {} {} --board={} --args={} {} telemetry_Crosperf'.format( - TEST_THAT_PATH, autotest_dir_arg, fast_arg, label.board, - pipes.quote(args), machine)) - - # Use --no-ns-pid so that cros_sdk does not create a different - # process namespace and we can kill process created easily by their - # process group. - chrome_root_options = ('--no-ns-pid ' - '--chrome_root={} --chrome_root_mount={} ' - 'FEATURES="-usersandbox" ' - 'CHROME_ROOT={}'.format(label.chrome_src, - CHROME_MOUNT_DIR, - CHROME_MOUNT_DIR)) - if self.log_level != 'verbose': - self.logger.LogOutput('Running test.') - self.logger.LogOutput('CMD: %s' % cmd) - return self._ce.ChrootRunCommandWOutput( - label.chromeos_root, - cmd, - command_terminator=self._ct, - cros_sdk_options=chrome_root_options) - def CommandTerminator(self): return self._ct diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py index 7f3a7bc4..73fcb45b 100755 --- a/crosperf/suite_runner_unittest.py +++ b/crosperf/suite_runner_unittest.py @@ -10,8 +10,6 @@ from __future__ import print_function import json -import os.path -import time import unittest import unittest.mock as mock @@ -23,7 +21,6 @@ from benchmark import Benchmark from cros_utils import command_executer from cros_utils import logger -from cros_utils.device_setup_utils import DutWrapper from machine_manager import MockCrosMachine @@ -49,23 +46,21 @@ class SuiteRunnerTest(unittest.TestCase): 'telemetry_Crosperf', # suite True) # show_all_results - test_that_bench = Benchmark( + crosperf_wrapper_bench = Benchmark( 'b2_test', # name - 'octane', # test_name + 'webgl', # test_name '', # test_args 3, # iterations False, # rm_chroot_tmp - 'record -e cycles') # perf_args + '', # perf_args + 'crosperf_Wrapper') # suite def __init__(self, *args, **kwargs): super(SuiteRunnerTest, self).__init__(*args, **kwargs) - self.call_test_that_run = False self.skylab_run_args = [] self.test_that_args = [] - self.telemetry_run_args = [] - self.telemetry_crosperf_args = [] self.call_skylab_run = False - self.call_telemetry_crosperf_run = False + self.call_test_that_run = False def setUp(self): self.runner = suite_runner.SuiteRunner( @@ -79,30 +74,30 @@ class SuiteRunnerTest(unittest.TestCase): res = suite_runner.GetProfilerArgs(input_str) self.assertEqual(res, output_str) + def test_get_dut_config_args(self): + dut_config = {'enable_aslr': False, 'top_interval': 1.0} + output_str = ('dut_config=' + "'" + '{"enable_aslr": ' + 'false, "top_interval": 1.0}' + "'" + '') + res = suite_runner.GetDutConfigArgs(dut_config) + self.assertEqual(res, output_str) + def test_run(self): def reset(): + self.test_that_args = [] + self.skylab_run_args = [] self.call_test_that_run = False self.call_skylab_run = False - self.call_telemetry_crosperf_run = False - self.skylab_run_args = [] - self.test_that_args = [] - self.telemetry_run_args = [] - self.telemetry_crosperf_args = [] def FakeSkylabRun(test_label, benchmark, test_args, profiler_args): self.skylab_run_args = [test_label, benchmark, test_args, profiler_args] self.call_skylab_run = True return 'Ran FakeSkylabRun' - def FakeTelemetryCrosperfRun(machine, test_label, benchmark, test_args, - profiler_args): - self.telemetry_crosperf_args = [ - machine, test_label, benchmark, test_args, profiler_args - ] - self.call_telemetry_crosperf_run = True - return 'Ran FakeTelemetryCrosperfRun' - def FakeTestThatRun(machine, test_label, benchmark, test_args, profiler_args): self.test_that_args = [ @@ -111,16 +106,8 @@ class SuiteRunnerTest(unittest.TestCase): self.call_test_that_run = True return 'Ran FakeTestThatRun' - def FakeRunner(command, ignore_status=False): - # pylint fix for unused variable. - del command, ignore_status - return 0, '', '' - self.runner.Skylab_Run = FakeSkylabRun - self.runner.Telemetry_Crosperf_Run = FakeTelemetryCrosperfRun self.runner.Test_That_Run = FakeTestThatRun - DutWrapper.SetupDevice = mock.Mock(return_value=0) - DutWrapper.RunCommandOnDut = mock.Mock(return_value=FakeRunner) self.runner.dut_config['enable_aslr'] = False self.runner.dut_config['cooldown_time'] = 0 @@ -133,36 +120,63 @@ class SuiteRunnerTest(unittest.TestCase): test_args = '' profiler_args = '' - reset() + # Test skylab run for telemetry_Crosperf and crosperf_Wrapper benchmarks. self.mock_label.skylab = True - self.runner.Run(cros_machine, self.mock_label, self.test_that_bench, + reset() + self.runner.Run(cros_machine, self.mock_label, self.crosperf_wrapper_bench, test_args, profiler_args) self.assertTrue(self.call_skylab_run) self.assertFalse(self.call_test_that_run) - self.assertFalse(self.call_telemetry_crosperf_run) self.assertEqual(self.skylab_run_args, - [self.mock_label, self.test_that_bench, '', '']) - DutWrapper.SetupDevice.assert_called_once() - self.mock_label.skylab = False + [self.mock_label, self.crosperf_wrapper_bench, '', '']) + + reset() + self.runner.Run(cros_machine, self.mock_label, + self.telemetry_crosperf_bench, test_args, profiler_args) + self.assertTrue(self.call_skylab_run) + self.assertFalse(self.call_test_that_run) + self.assertEqual(self.skylab_run_args, + [self.mock_label, self.telemetry_crosperf_bench, '', '']) + # Test test_that run for telemetry_Crosperf and crosperf_Wrapper benchmarks. + self.mock_label.skylab = False reset() - self.runner.Run(cros_machine, self.mock_label, self.test_that_bench, + self.runner.Run(cros_machine, self.mock_label, self.crosperf_wrapper_bench, test_args, profiler_args) self.assertTrue(self.call_test_that_run) - self.assertFalse(self.call_telemetry_crosperf_run) + self.assertFalse(self.call_skylab_run) self.assertEqual( self.test_that_args, - ['fake_machine', self.mock_label, self.test_that_bench, '', '']) + ['fake_machine', self.mock_label, self.crosperf_wrapper_bench, '', '']) reset() self.runner.Run(cros_machine, self.mock_label, self.telemetry_crosperf_bench, test_args, profiler_args) - self.assertFalse(self.call_test_that_run) - self.assertTrue(self.call_telemetry_crosperf_run) - self.assertEqual(self.telemetry_crosperf_args, [ + self.assertTrue(self.call_test_that_run) + self.assertFalse(self.call_skylab_run) + self.assertEqual(self.test_that_args, [ 'fake_machine', self.mock_label, self.telemetry_crosperf_bench, '', '' ]) + def test_gen_test_args(self): + test_args = '--iterations=2' + perf_args = 'record -a -e cycles' + + # Test crosperf_Wrapper benchmarks arg list generation + args_list = ["test_args='--iterations=2'", "dut_config='{}'", 'test=webgl'] + res = self.runner.GenTestArgs(self.crosperf_wrapper_bench, test_args, '') + self.assertCountEqual(res, args_list) + + # Test telemetry_Crosperf benchmarks arg list generation + args_list = [ + "test_args='--iterations=2'", "dut_config='{}'", 'test=octane', + 'run_local=False' + ] + args_list.append(suite_runner.GetProfilerArgs(perf_args)) + res = self.runner.GenTestArgs(self.telemetry_crosperf_bench, test_args, + perf_args) + self.assertCountEqual(res, args_list) + @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand') @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommandWOutput') @@ -176,10 +190,12 @@ class SuiteRunnerTest(unittest.TestCase): self.real_logger.LogMsg = FakeLogMsg self.runner.logger = self.real_logger + # Test crosperf_Wrapper benchmarks cannot take perf_args raised_exception = False try: self.runner.Test_That_Run('lumpy1.cros', self.mock_label, - self.test_that_bench, '', 'record -a -e cycles') + self.crosperf_wrapper_bench, '', + 'record -a -e cycles') except SystemExit: raised_exception = True self.assertTrue(raised_exception) @@ -188,7 +204,8 @@ class SuiteRunnerTest(unittest.TestCase): self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd res = self.runner.Test_That_Run('lumpy1.cros', self.mock_label, - self.test_that_bench, '--iterations=2', '') + self.crosperf_wrapper_bench, + '--iterations=2', '') self.assertEqual(mock_cros_runcmd.call_count, 1) self.assertEqual(mock_chroot_runcmd.call_count, 1) self.assertEqual(res, 0) @@ -197,48 +214,9 @@ class SuiteRunnerTest(unittest.TestCase): args_list = mock_chroot_runcmd.call_args_list[0][0] args_dict = mock_chroot_runcmd.call_args_list[0][1] self.assertEqual(len(args_list), 2) - self.assertEqual(args_list[0], '/tmp/chromeos') - self.assertEqual(args_list[1], ('/usr/bin/test_that ' - '--fast --board=lumpy ' - '--iterations=2 lumpy1.cros octane')) self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term) self.real_logger.LogMsg = save_log_msg - @mock.patch.object(os.path, 'isdir') - @mock.patch.object(command_executer.CommandExecuter, - 'ChrootRunCommandWOutput') - def test_telemetry_crosperf_run(self, mock_chroot_runcmd, mock_isdir): - - mock_isdir.return_value = True - mock_chroot_runcmd.return_value = 0 - self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd - profiler_args = ("--profiler=custom_perf --profiler_args='perf_options" - '="record -a -e cycles,instructions"\'') - self.runner.dut_config['turbostat'] = True - self.runner.dut_config['top_interval'] = 3 - res = self.runner.Telemetry_Crosperf_Run('lumpy1.cros', self.mock_label, - self.telemetry_crosperf_bench, '', - profiler_args) - self.assertEqual(res, 0) - self.assertEqual(mock_chroot_runcmd.call_count, 1) - args_list = mock_chroot_runcmd.call_args_list[0][0] - args_dict = mock_chroot_runcmd.call_args_list[0][1] - self.assertEqual(args_list[0], '/tmp/chromeos') - self.assertEqual( - args_list[1], - ('/usr/bin/test_that --autotest_dir ' - '/mnt/host/source/src/third_party/autotest/files --fast ' - "--board=lumpy --args=' run_local=False test=octane " - 'dut_config=\'"\'"\'{"turbostat": true, "top_interval": 3}\'"\'"\' ' - 'profiler=custom_perf profiler_args=\'"\'"\'record -a -e ' - 'cycles,instructions\'"\'"\'\' lumpy1.cros telemetry_Crosperf')) - self.assertEqual(args_dict['cros_sdk_options'], - ('--no-ns-pid --chrome_root= ' - '--chrome_root_mount=/tmp/chrome_root ' - 'FEATURES="-usersandbox" CHROME_ROOT=/tmp/chrome_root')) - self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term) - self.assertEqual(len(args_dict), 2) - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') @mock.patch.object(json, 'loads') def test_skylab_run_client(self, mock_json_loads, mock_runcmd): @@ -265,82 +243,21 @@ class SuiteRunnerTest(unittest.TestCase): self.mock_label.skylab = True self.runner.DownloadResult = FakeDownloadResult - res = self.runner.Skylab_Run(self.mock_label, self.test_that_bench, '', '') + res = self.runner.Skylab_Run(self.mock_label, self.crosperf_wrapper_bench, + '', '') ret_tup = (0, '\nResults placed in tmp/swarming-12345\n', '') self.assertEqual(res, ret_tup) self.assertEqual(mock_runcmd.call_count, 2) args_list = mock_runcmd.call_args_list[0][0] args_dict = mock_runcmd.call_args_list[0][1] - self.assertEqual(args_list[0], - ('/usr/local/bin/skylab create-test ' - '-dim dut_name:lumpy1 -dim dut_name:lumpy.cros2 ' - '-board=lumpy -image=build ' - '-pool=toolchain octane')) + self.assertEqual(len(args_list), 1) self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term) args_list = mock_runcmd.call_args_list[1][0] self.assertEqual(args_list[0], ('skylab wait-task 12345')) self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term) - @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') - @mock.patch.object(json, 'loads') - def test_skylab_run_telemetry_Crosperf(self, mock_json_loads, mock_runcmd): - - def FakeDownloadResult(l, task_id): - if l and task_id: - self.assertEqual(task_id, '12345') - return None - - mock_runcmd.return_value = ( - 0, - 'Created Swarming task https://swarming/task/b12345', - '', - ) - self.mock_cmd_exec.RunCommandWOutput = mock_runcmd - - mock_json_loads.return_value = { - 'child-results': [{ - 'success': True, - 'task-run-url': 'https://swarming/task?id=12345' - }] - } - self.mock_json.loads = mock_json_loads - - self.mock_label.skylab = True - self.runner.DownloadResult = FakeDownloadResult - self.runner.Skylab_Run(self.mock_label, self.telemetry_crosperf_bench, '', - '') - args_list = mock_runcmd.call_args_list[0][0] - self.assertEqual( - args_list[0], - ('/usr/local/bin/skylab create-test ' - '-dim dut_name:lumpy1 -dim dut_name:lumpy.cros2 ' - '-board=lumpy -image=build ' - "-pool=toolchain -test-args='run_local=False test=octane' " - 'telemetry_Crosperf')) - - @mock.patch.object(time, 'sleep') - @mock.patch.object(command_executer.CommandExecuter, 'RunCommand') - def test_download_result(self, mock_runcmd, mock_sleep): - mock_runcmd.return_value = 0 - mock_sleep.return_value = 0 - self.mock_cmd_exec.RunCommand = mock_runcmd - - self.runner.DownloadResult(self.mock_label, '12345') - - self.assertEqual(mock_runcmd.call_count, 2) - cmd = mock_runcmd.call_args_list[0][0][0] - self.assertEqual(cmd, - ('/tmp/chromeos/src/chromium/depot_tools/gsutil.py ls ' - 'gs://chromeos-autotest-results/swarming-12345/' - 'autoserv_test')) - cmd = mock_runcmd.call_args_list[1][0][0] - self.assertEqual(cmd, - ('/tmp/chromeos/src/chromium/depot_tools/gsutil.py -mq ' - 'cp -r gs://chromeos-autotest-results/swarming-12345 ' - '/tmp/chromeos/chroot/tmp')) - if __name__ == '__main__': unittest.main() |