diff options
author | Zhizhou Yang <zhizhouy@google.com> | 2019-12-02 16:41:48 -0800 |
---|---|---|
committer | Zhizhou Yang <zhizhouy@google.com> | 2019-12-04 17:39:56 +0000 |
commit | 3089224e44d5d687ced2e96c7d090dfb36d7cc26 (patch) | |
tree | ee9738b4cd2e56a1639ae3f10fa7caca7323913c /crosperf | |
parent | 455d84697c1ed30dfa5064a2d024811c9c6892ac (diff) | |
download | toolchain-utils-3089224e44d5d687ced2e96c7d090dfb36d7cc26.tar.gz |
crosperf: move enable_aslr and turbostat option into dut_config
This patch moves enable_aslr and turbostat into dut_config dictionary,
since they are all device setup steps, and it make code cleaner and
easier to migrate to telemetry_Crosperf.
TEST=Passed unittests.
BUG=chromium:1020655
Change-Id: I29a649e76591e206efc19bb2b8a8df9aead16575
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1947903
Reviewed-by: Denis Nikitin <denik@chromium.org>
Commit-Queue: Zhizhou Yang <zhizhouy@google.com>
Tested-by: Zhizhou Yang <zhizhouy@google.com>
Auto-Submit: Zhizhou Yang <zhizhouy@google.com>
Diffstat (limited to 'crosperf')
-rw-r--r-- | crosperf/benchmark.py | 12 | ||||
-rw-r--r-- | crosperf/benchmark_run.py | 18 | ||||
-rwxr-xr-x | crosperf/benchmark_run_unittest.py | 2 | ||||
-rwxr-xr-x | crosperf/benchmark_unittest.py | 2 | ||||
-rw-r--r-- | crosperf/experiment.py | 14 | ||||
-rw-r--r-- | crosperf/experiment_factory.py | 122 | ||||
-rwxr-xr-x | crosperf/experiment_factory_unittest.py | 6 | ||||
-rwxr-xr-x | crosperf/machine_manager_unittest.py | 6 | ||||
-rw-r--r-- | crosperf/suite_runner.py | 11 | ||||
-rwxr-xr-x | crosperf/suite_runner_unittest.py | 6 |
10 files changed, 67 insertions, 132 deletions
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py index 5c11e27e..0413b593 100644 --- a/crosperf/benchmark.py +++ b/crosperf/benchmark.py @@ -9,8 +9,8 @@ from __future__ import division from __future__ import print_function import math -#FIXME(denik): Fix the import in chroot. -#pylint: disable=import-error +# FIXME(denik): Fix the import in chroot. +# pylint: disable=import-error from scipy import stats # See crbug.com/673558 for how these are estimated. @@ -65,12 +65,11 @@ class Benchmark(object): retries=0, run_local=False, cwp_dso='', - weight=0, - turbostat=True): + weight=0): self.name = name - #For telemetry, this is the benchmark name. + # For telemetry, this is the benchmark name. self.test_name = test_name - #For telemetry, this is the data. + # For telemetry, this is the data. self.test_args = test_args self.iterations = iterations if iterations > 0 else _samples(name) self.perf_args = perf_args @@ -86,4 +85,3 @@ class Benchmark(object): self.run_local = run_local self.cwp_dso = cwp_dso self.weight = weight - self.turbostat = turbostat diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py index a7c3b7d1..f17de1be 100644 --- a/crosperf/benchmark_run.py +++ b/crosperf/benchmark_run.py @@ -31,18 +31,9 @@ STATUS_PENDING = 'PENDING' class BenchmarkRun(threading.Thread): """The benchmarkrun class.""" - def __init__(self, - name, - benchmark, - label, - iteration, - cache_conditions, - machine_manager, - logger_to_use, - log_level, - share_cache, - dut_config, - enable_aslr=False): + def __init__(self, name, benchmark, label, iteration, cache_conditions, + machine_manager, logger_to_use, log_level, share_cache, + dut_config): threading.Thread.__init__(self) self.name = name self._logger = logger_to_use @@ -55,8 +46,7 @@ class BenchmarkRun(threading.Thread): self.retval = None self.run_completed = False self.machine_manager = machine_manager - self.suite_runner = SuiteRunner( - dut_config, self._logger, self.log_level, enable_aslr=enable_aslr) + self.suite_runner = SuiteRunner(dut_config, self._logger, self.log_level) self.machine = None self.cache_conditions = cache_conditions self.runs_complete = 0 diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py index 4daa77bb..5696c107 100755 --- a/crosperf/benchmark_run_unittest.py +++ b/crosperf/benchmark_run_unittest.py @@ -119,7 +119,7 @@ class BenchmarkRunTest(unittest.TestCase): args_list = [ 'self', 'name', 'benchmark', 'label', 'iteration', 'cache_conditions', 'machine_manager', 'logger_to_use', 'log_level', 'share_cache', - 'dut_config', 'enable_aslr' + 'dut_config' ] arg_spec = inspect.getargspec(benchmark_run.BenchmarkRun.__init__) self.assertEqual(len(arg_spec.args), len(args_list)) diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py index 6c0c22f6..63c0a1c0 100755 --- a/crosperf/benchmark_unittest.py +++ b/crosperf/benchmark_unittest.py @@ -60,7 +60,7 @@ class BenchmarkTestCase(unittest.TestCase): args_list = [ 'self', 'name', 'test_name', 'test_args', 'iterations', 'rm_chroot_tmp', 'perf_args', 'suite', 'show_all_results', 'retries', 'run_local', - 'cwp_dso', 'weight', 'turbostat' + 'cwp_dso', 'weight' ] arg_spec = inspect.getargspec(Benchmark.__init__) self.assertEqual(len(arg_spec.args), len(args_list)) diff --git a/crosperf/experiment.py b/crosperf/experiment.py index 1d87b6e4..d6d9ee4c 100644 --- a/crosperf/experiment.py +++ b/crosperf/experiment.py @@ -28,8 +28,8 @@ class Experiment(object): def __init__(self, name, remote, working_directory, chromeos_root, cache_conditions, labels, benchmarks, experiment_file, email_to, acquire_timeout, log_dir, log_level, share_cache, - results_directory, locks_directory, cwp_dso, enable_aslr, - ignore_min_max, skylab, dut_config): + results_directory, locks_directory, cwp_dso, ignore_min_max, + skylab, dut_config): self.name = name self.working_directory = working_directory self.remote = remote @@ -54,7 +54,6 @@ class Experiment(object): self.locked_machines = [] self.lock_mgr = None self.cwp_dso = cwp_dso - self.enable_aslr = enable_aslr self.ignore_min_max = ignore_min_max self.skylab = skylab self.l = logger.GetLogger(log_dir) @@ -124,11 +123,10 @@ class Experiment(object): logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name), True) benchmark_runs.append( - benchmark_run.BenchmarkRun(benchmark_run_name, benchmark, label, - iteration, self.cache_conditions, - self.machine_manager, logger_to_use, - self.log_level, self.share_cache, - dut_config, self.enable_aslr)) + benchmark_run.BenchmarkRun( + benchmark_run_name, benchmark, label, iteration, + self.cache_conditions, self.machine_manager, logger_to_use, + self.log_level, self.share_cache, dut_config)) return benchmark_runs diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index 1758f0c7..7d1689cc 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -70,7 +70,7 @@ telemetry_crosbolt_perf_tests = [ 'speedometer2', 'jetstream', 'loading.desktop', - #'rendering.desktop', + # 'rendering.desktop', ] crosbolt_perf_tests = [ @@ -82,7 +82,7 @@ crosbolt_perf_tests = [ # 'cheets_PerfBootServer', # 'cheets_CandyCrushTest', # 'cheets_LinpackTest', -#] +# ] dso_list = [ 'all', @@ -101,14 +101,12 @@ class ExperimentFactory(object): def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args, iterations, rm_chroot_tmp, perf_args, suite, - show_all_results, retries, run_local, cwp_dso, weight, - turbostat): + show_all_results, retries, run_local, cwp_dso, weight): """Add all the tests in a set to the benchmarks list.""" for test_name in benchmark_list: - telemetry_benchmark = Benchmark(test_name, test_name, test_args, - iterations, rm_chroot_tmp, perf_args, - suite, show_all_results, retries, - run_local, cwp_dso, weight, turbostat) + telemetry_benchmark = Benchmark( + test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args, + suite, show_all_results, retries, run_local, cwp_dso, weight) benchmarks.append(telemetry_benchmark) def GetExperiment(self, experiment_file, working_directory, log_dir): @@ -166,16 +164,16 @@ class ExperimentFactory(object): cwp_dso = global_settings.GetField('cwp_dso') if cwp_dso and not cwp_dso in dso_list: raise RuntimeError('The DSO specified is not supported') - enable_aslr = global_settings.GetField('enable_aslr') ignore_min_max = global_settings.GetField('ignore_min_max') - turbostat_opt = global_settings.GetField('turbostat') dut_config = { + 'enable_aslr': global_settings.GetField('enable_aslr'), 'intel_pstate': global_settings.GetField('intel_pstate'), 'cooldown_time': global_settings.GetField('cooldown_time'), 'cooldown_temp': global_settings.GetField('cooldown_temp'), 'governor': global_settings.GetField('governor'), 'cpu_usage': global_settings.GetField('cpu_usage'), 'cpu_freq_pct': global_settings.GetField('cpu_freq_pct'), + 'turbostat': global_settings.GetField('turbostat'), 'top_interval': global_settings.GetField('top_interval'), } @@ -257,50 +255,20 @@ class ExperimentFactory(object): if suite == 'telemetry_Crosperf': if test_name == 'all_perfv2': - self.AppendBenchmarkSet( - benchmarks, - telemetry_perfv2_tests, - test_args, - iterations, - rm_chroot_tmp, - perf_args, - suite, - show_all_results, - retries, - run_local, - cwp_dso, - weight, - turbostat=turbostat_opt) + self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args, + iterations, rm_chroot_tmp, perf_args, suite, + show_all_results, retries, run_local, cwp_dso, + weight) elif test_name == 'all_pagecyclers': - self.AppendBenchmarkSet( - benchmarks, - telemetry_pagecycler_tests, - test_args, - iterations, - rm_chroot_tmp, - perf_args, - suite, - show_all_results, - retries, - run_local, - cwp_dso, - weight, - turbostat=turbostat_opt) + self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests, + test_args, iterations, rm_chroot_tmp, + perf_args, suite, show_all_results, retries, + run_local, cwp_dso, weight) elif test_name == 'all_crosbolt_perf': self.AppendBenchmarkSet( - benchmarks, - telemetry_crosbolt_perf_tests, - test_args, - iterations, - rm_chroot_tmp, - perf_args, - 'telemetry_Crosperf', - show_all_results, - retries, - run_local, - cwp_dso, - weight, - turbostat=turbostat_opt) + benchmarks, telemetry_crosbolt_perf_tests, test_args, iterations, + rm_chroot_tmp, perf_args, 'telemetry_Crosperf', show_all_results, + retries, run_local, cwp_dso, weight) self.AppendBenchmarkSet( benchmarks, crosbolt_perf_tests, @@ -313,23 +281,12 @@ class ExperimentFactory(object): retries, run_local=False, cwp_dso=cwp_dso, - weight=weight, - turbostat=turbostat_opt) + weight=weight) elif test_name == 'all_toolchain_perf': - self.AppendBenchmarkSet( - benchmarks, - telemetry_toolchain_perf_tests, - test_args, - iterations, - rm_chroot_tmp, - perf_args, - suite, - show_all_results, - retries, - run_local, - cwp_dso, - weight, - turbostat=turbostat_opt) + self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests, + test_args, iterations, rm_chroot_tmp, + perf_args, suite, show_all_results, retries, + run_local, cwp_dso, weight) # Add non-telemetry toolchain-perf benchmarks: benchmarks.append( Benchmark( @@ -344,28 +301,17 @@ class ExperimentFactory(object): retries, run_local=False, cwp_dso=cwp_dso, - weight=weight, - turbostat=turbostat_opt)) + weight=weight)) elif test_name == 'all_toolchain_perf_old': self.AppendBenchmarkSet( - benchmarks, - telemetry_toolchain_old_perf_tests, - test_args, - iterations, - rm_chroot_tmp, - perf_args, - suite, - show_all_results, - retries, - run_local, - cwp_dso, - weight, - turbostat=turbostat_opt) + benchmarks, telemetry_toolchain_old_perf_tests, test_args, + iterations, rm_chroot_tmp, perf_args, suite, show_all_results, + retries, run_local, cwp_dso, weight) else: benchmark = Benchmark(benchmark_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args, suite, show_all_results, retries, run_local, cwp_dso, - weight, turbostat_opt) + weight) benchmarks.append(benchmark) else: if test_name == 'all_graphics_perf': @@ -381,8 +327,7 @@ class ExperimentFactory(object): retries, run_local=False, cwp_dso=cwp_dso, - weight=weight, - turbostat=turbostat_opt) + weight=weight) else: # Add the single benchmark. benchmark = Benchmark( @@ -397,8 +342,7 @@ class ExperimentFactory(object): retries, run_local=False, cwp_dso=cwp_dso, - weight=weight, - turbostat=turbostat_opt) + weight=weight) benchmarks.append(benchmark) if not benchmarks: @@ -477,8 +421,8 @@ class ExperimentFactory(object): chromeos_root, cache_conditions, labels, benchmarks, experiment_file.Canonicalize(), email, acquire_timeout, log_dir, log_level, share_cache, - results_dir, locks_dir, cwp_dso, enable_aslr, - ignore_min_max, skylab, dut_config) + results_dir, locks_dir, cwp_dso, ignore_min_max, + skylab, dut_config) return experiment diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py index a0a3d8d4..69e34fcc 100755 --- a/crosperf/experiment_factory_unittest.py +++ b/crosperf/experiment_factory_unittest.py @@ -243,7 +243,7 @@ class ExperimentFactoryTest(unittest.TestCase): bench_list = [] ef.AppendBenchmarkSet(bench_list, experiment_factory.telemetry_perfv2_tests, '', 1, False, '', 'telemetry_Crosperf', False, 0, - False, '', 0, False) + False, '', 0) self.assertEqual( len(bench_list), len(experiment_factory.telemetry_perfv2_tests)) self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) @@ -251,7 +251,7 @@ class ExperimentFactoryTest(unittest.TestCase): bench_list = [] ef.AppendBenchmarkSet( bench_list, experiment_factory.telemetry_pagecycler_tests, '', 1, False, - '', 'telemetry_Crosperf', False, 0, False, '', 0, False) + '', 'telemetry_Crosperf', False, 0, False, '', 0) self.assertEqual( len(bench_list), len(experiment_factory.telemetry_pagecycler_tests)) self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) @@ -259,7 +259,7 @@ class ExperimentFactoryTest(unittest.TestCase): bench_list = [] ef.AppendBenchmarkSet( bench_list, experiment_factory.telemetry_toolchain_perf_tests, '', 1, - False, '', 'telemetry_Crosperf', False, 0, False, '', 0, False) + False, '', 'telemetry_Crosperf', False, 0, False, '', 0) self.assertEqual( len(bench_list), len(experiment_factory.telemetry_toolchain_perf_tests)) self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py index 984251ca..f6a77bb8 100755 --- a/crosperf/machine_manager_unittest.py +++ b/crosperf/machine_manager_unittest.py @@ -12,9 +12,9 @@ from __future__ import print_function import os.path import time import hashlib +import unittest import mock -import unittest import label import machine_manager @@ -184,7 +184,7 @@ class MachineManagerTest(unittest.TestCase): self.assertEqual(mock_run_cmd.call_count, 0) self.assertEqual(mock_run_croscmd.call_count, 0) - #Test 2: label.image_type == "trybot" + # Test 2: label.image_type == "trybot" ResetValues() LABEL_LUMPY.image_type = 'trybot' mock_run_cmd.return_value = 0 @@ -456,7 +456,7 @@ class MachineManagerTest(unittest.TestCase): suite='telemetry_Crosperf') # suite test_run = MockBenchmarkRun('test run', bench, LABEL_LUMPY, 1, [], self.mm, - mock_logger, 'verbose', '', {}, False) + mock_logger, 'verbose', '', {}) self.mm._machines = [ self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3, self.mock_daisy1, diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index b5649e86..418358e0 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -60,8 +60,7 @@ class SuiteRunner(object): logger_to_use=None, log_level='verbose', cmd_exec=None, - cmd_term=None, - enable_aslr=False): + cmd_term=None): self.logger = logger_to_use self.log_level = log_level self._ce = cmd_exec or command_executer.GetCommandExecuter( @@ -69,7 +68,6 @@ class SuiteRunner(object): # DUT command executer. # Will be initialized and used within Run. self._ct = cmd_term or command_executer.CommandTerminator() - self.enable_aslr = enable_aslr self.dut_config = dut_config def Run(self, cros_machine, label, benchmark, test_args, profiler_args): @@ -134,7 +132,7 @@ class SuiteRunner(object): with self.PauseUI(run_on_dut): # Unless the user turns on ASLR in the flag, we first disable ASLR # before running the benchmarks - if not self.enable_aslr: + if not self.dut_config['enable_aslr']: run_on_dut.DisableASLR() # CPU usage setup comes first where we enable/disable cores. @@ -370,11 +368,12 @@ class SuiteRunner(object): args_string = "test_args='%s'" % test_args top_interval = self.dut_config['top_interval'] + turbostat = self.dut_config['turbostat'] cmd = ('{} {} {} --board={} --args="{} run_local={} test={} ' 'turbostat={} top_interval={} {}" {} telemetry_Crosperf'.format( TEST_THAT_PATH, autotest_dir_arg, fast_arg, label.board, - args_string, benchmark.run_local, benchmark.test_name, - benchmark.turbostat, top_interval, profiler_args, machine)) + args_string, benchmark.run_local, benchmark.test_name, turbostat, + top_interval, profiler_args, machine)) # Use --no-ns-pid so that cros_sdk does not create a different # process namespace and we can kill process created easily by their diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py index 5f377d7c..c2e33226 100755 --- a/crosperf/suite_runner_unittest.py +++ b/crosperf/suite_runner_unittest.py @@ -132,6 +132,7 @@ class SuiteRunnerTest(unittest.TestCase): self.runner.SetupDevice = mock.Mock() DutWrapper.RunCommandOnDut = mock.Mock(return_value=FakeRunner) + self.runner.dut_config['enable_aslr'] = False self.runner.dut_config['cooldown_time'] = 0 self.runner.dut_config['governor'] = 'fake_governor' self.runner.dut_config['cpu_freq_pct'] = 65 @@ -186,6 +187,8 @@ class SuiteRunnerTest(unittest.TestCase): mock_run_on_dut.RunCommandOnDut = mock.Mock(return_value=FakeRunner) mock_run_on_dut.WaitCooldown = mock.Mock(return_value=0) mock_run_on_dut.GetCpuOnline = mock.Mock(return_value={0: 1, 1: 1, 2: 0}) + + self.runner.dut_config['enable_aslr'] = False self.runner.dut_config['cooldown_time'] = 0 self.runner.dut_config['governor'] = 'fake_governor' self.runner.dut_config['cpu_freq_pct'] = 65 @@ -218,6 +221,7 @@ class SuiteRunnerTest(unittest.TestCase): mock_run_on_dut.WaitCooldown = mock.Mock(return_value=0) mock_run_on_dut.GetCpuOnline = mock.Mock(return_value={0: 0, 1: 1}) + self.runner.dut_config['enable_aslr'] = False self.runner.dut_config['cooldown_time'] = 10 self.runner.dut_config['governor'] = 'fake_governor' self.runner.dut_config['cpu_freq_pct'] = 75 @@ -246,6 +250,7 @@ class SuiteRunnerTest(unittest.TestCase): self.mock_logger) mock_run_on_dut.SetupCpuUsage = mock.Mock(side_effect=RuntimeError()) + self.runner.dut_config['enable_aslr'] = False with self.assertRaises(RuntimeError): self.runner.SetupDevice(mock_run_on_dut, cros_machine) @@ -313,6 +318,7 @@ class SuiteRunnerTest(unittest.TestCase): self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd profiler_args = ("--profiler=custom_perf --profiler_args='perf_options" '="record -a -e cycles,instructions"\'') + self.runner.dut_config['turbostat'] = True self.runner.dut_config['top_interval'] = 3 res = self.runner.Telemetry_Crosperf_Run('lumpy1.cros', self.mock_label, self.telemetry_crosperf_bench, '', |