diff options
author | Zhizhou Yang <zhizhouy@google.com> | 2019-12-11 16:32:34 -0800 |
---|---|---|
committer | Commit Bot <commit-bot@chromium.org> | 2019-12-14 00:51:09 +0000 |
commit | bbdc566bd8906748c4e330da8be8f53dae4149bf (patch) | |
tree | 386900ff3a21b20267030f675688fb3e47f66780 | |
parent | 2a0bffb6e6eb3ed4caa2f449483e5846e1f72554 (diff) | |
download | toolchain-utils-bbdc566bd8906748c4e330da8be8f53dae4149bf.tar.gz |
crosperf: fix skylab telemetry_Crosperf tests launching command
Current command generated from suite_runner for telemetry_Crosperf run
is not correct, and this patch will fix it. Also added an unittest to
verify it in the future.
It is unfortunate that we currently still cannot run test with args
successfully in skylab, but according to the args passed to skylab, this
change should work.
TEST=Checked command line to launch skylab create-test.
BUG=None
Change-Id: I05f0f4c60e04d19e2a2637192023829d4dd9f48b
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1963355
Reviewed-by: George Burgess <gbiv@chromium.org>
Commit-Queue: Zhizhou Yang <zhizhouy@google.com>
Tested-by: Zhizhou Yang <zhizhouy@google.com>
Auto-Submit: Zhizhou Yang <zhizhouy@google.com>
-rw-r--r-- | crosperf/suite_runner.py | 47 | ||||
-rwxr-xr-x | crosperf/suite_runner_unittest.py | 50 |
2 files changed, 69 insertions, 28 deletions
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index eebd7682..579e30b2 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -118,11 +118,11 @@ class SuiteRunner(object): def Test_That_Run(self, machine, label, benchmark, test_args, profiler_args): """Run the test_that test..""" - options = '' + options = [] if label.board: - options += ' --board=%s' % label.board + options.append('--board=%s' % label.board) if test_args: - options += ' %s' % test_args + options.append(test_args) if profiler_args: self.logger.LogFatal('test_that does not support profiler.') command = 'rm -rf /usr/local/autotest/results/*' @@ -140,8 +140,8 @@ class SuiteRunner(object): autotest_dir_arg = '' command = (('%s %s --fast ' - '%s %s %s') % (TEST_THAT_PATH, autotest_dir_arg, options, - machine, benchmark.test_name)) + '%s %s %s') % (TEST_THAT_PATH, autotest_dir_arg, + ' '.join(options), machine, benchmark.test_name)) if self.log_level != 'verbose': self.logger.LogOutput('Running test.') self.logger.LogOutput('CMD: %s' % command) @@ -192,40 +192,43 @@ class SuiteRunner(object): def Skylab_Run(self, label, benchmark, test_args, profiler_args): """Run the test via skylab..""" - options = '' + options = [] if label.board: - options += '-board=%s' % label.board + options.append('-board=%s' % label.board) if label.build: - options += ' -image=%s' % label.build + options.append('-image=%s' % label.build) # TODO: now only put toolchain pool here, user need to be able to specify # which pool to use. Need to request feature to not use this option at all. - options += ' -pool=toolchain' + options.append('-pool=toolchain') + test_args_list = [] if benchmark.suite == 'telemetry_Crosperf': if test_args: # Strip double quotes off args (so we can wrap them in single # quotes, to pass through to Telemetry). if test_args[0] == '"' and test_args[-1] == '"': - test_args = test_args[1:-1] + test_args_list.append(test_args[1:-1]) if profiler_args: - test_args += GetProfilerArgs(profiler_args) + test_args_list.append(GetProfilerArgs(profiler_args)) if self.dut_config: - test_args += GetDutConfigArgs(self.dut_config) - test_args += ' run_local={} test={}'.format( - benchmark.run_local, - benchmark.test_name, - ) + test_args_list.append(GetDutConfigArgs(self.dut_config)) + test_args_list.append('run_local=%s' % benchmark.run_local) + test_args_list.append('test=%s' % benchmark.test_name) else: if profiler_args: self.logger.LogFatal('Client tests do not support profiler.') - if test_args: - options += ' -test-args=%s' % pipes.quote(test_args) + if test_args: + test_args_list.append(test_args) + if test_args_list: + options.append('-test-args=%s' % pipes.quote(' '.join(test_args_list))) - dimensions = '' + dimensions = [] for dut in label.remote: - dimensions += ' -dim dut_name:%s' % dut.rstrip('.cros') + dimensions.append('-dim dut_name:%s' % dut.rstrip('.cros')) - command = (('%s create-test%s %s %s') % \ - (SKYLAB_PATH, dimensions, options, benchmark.test_name)) + command = (('%s create-test %s %s %s') % \ + (SKYLAB_PATH, ' '.join(dimensions), ' '.join(options), + 'telemetry_Crosperf' if benchmark.suite == 'telemetry_Crosperf' + else benchmark.test_name)) if self.log_level != 'verbose': self.logger.LogOutput('Starting skylab test.') diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py index 3523f14a..413822a2 100755 --- a/crosperf/suite_runner_unittest.py +++ b/crosperf/suite_runner_unittest.py @@ -199,7 +199,7 @@ class SuiteRunnerTest(unittest.TestCase): self.assertEqual(len(args_list), 2) self.assertEqual(args_list[0], '/tmp/chromeos') self.assertEqual(args_list[1], ('/usr/bin/test_that ' - '--fast --board=lumpy ' + '--fast --board=lumpy ' '--iterations=2 lumpy1.cros octane')) self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term) self.real_logger.LogMsg = save_log_msg @@ -241,17 +241,18 @@ class SuiteRunnerTest(unittest.TestCase): @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') @mock.patch.object(json, 'loads') - def test_skylab_run(self, mock_json_loads, mock_runcmd): + def test_skylab_run_client(self, mock_json_loads, mock_runcmd): def FakeDownloadResult(l, task_id): if l and task_id: self.assertEqual(task_id, '12345') return 0 - mock_runcmd.return_value = \ - (0, - 'Created Swarming task https://swarming/task/b12345', - '') + mock_runcmd.return_value = ( + 0, + 'Created Swarming task https://swarming/task/b12345', + '', + ) self.mock_cmd_exec.RunCommandWOutput = mock_runcmd mock_json_loads.return_value = { @@ -282,6 +283,43 @@ class SuiteRunnerTest(unittest.TestCase): self.assertEqual(args_list[0], ('skylab wait-task 12345')) self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term) + @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput') + @mock.patch.object(json, 'loads') + def test_skylab_run_telemetry_Crosperf(self, mock_json_loads, mock_runcmd): + + def FakeDownloadResult(l, task_id): + if l and task_id: + self.assertEqual(task_id, '12345') + return None + + mock_runcmd.return_value = ( + 0, + 'Created Swarming task https://swarming/task/b12345', + '', + ) + self.mock_cmd_exec.RunCommandWOutput = mock_runcmd + + mock_json_loads.return_value = { + 'child-results': [{ + 'success': True, + 'task-run-url': 'https://swarming/task?id=12345' + }] + } + self.mock_json.loads = mock_json_loads + + self.mock_label.skylab = True + self.runner.DownloadResult = FakeDownloadResult + self.runner.Skylab_Run(self.mock_label, self.telemetry_crosperf_bench, '', + '') + args_list = mock_runcmd.call_args_list[0][0] + self.assertEqual( + args_list[0], + ('/usr/local/bin/skylab create-test ' + '-dim dut_name:lumpy1 -dim dut_name:lumpy.cros2 ' + '-board=lumpy -image=build ' + "-pool=toolchain -test-args='run_local=False test=octane' " + 'telemetry_Crosperf')) + @mock.patch.object(time, 'sleep') @mock.patch.object(command_executer.CommandExecuter, 'RunCommand') def test_download_result(self, mock_runcmd, mock_sleep): |