diff options
author | Denis Nikitin <denik@google.com> | 2019-08-30 09:10:39 -0700 |
---|---|---|
committer | Denis Nikitin <denik@chromium.org> | 2019-09-10 05:34:30 +0000 |
commit | 9d114045ddf617b67fd7af5aaccd0b5dcd4282ea (patch) | |
tree | 741ebc440d0ade50048ea78513830f84ed35875a /crosperf/benchmark_run_unittest.py | |
parent | 0f8dae198451cfc73eb6c509cbddbe9c159ea45f (diff) | |
download | toolchain-utils-9d114045ddf617b67fd7af5aaccd0b5dcd4282ea.tar.gz |
crosperf: Add dut config arguments to experiment
In global settings added optional arguments:
"cooldown_time" - wait time prior running a benchmark (default: 0),
"cooldown_temp" - temperature threshold for waiting (default: 40),
"governor" - CPU governor (default: performance),
"cpu_usage" - Restrict CPU usage to specific configurations (default: all).
"turbostat" argument is moved from benchmark to global settings.
Current CL does not apply configurations.
Instead it just propagates parameters to SuiteRunner class
through "dut_config" dictionary.
BUG=chromium:966514
TEST=Unitest and local HW tests passed.
Change-Id: I1b2a65883e5176fdde49c9858ebe62a097df89cb
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1778515
Tested-by: Denis Nikitin <denik@chromium.org>
Reviewed-by: Manoj Gupta <manojgupta@chromium.org>
Legacy-Commit-Queue: Commit Bot <commit-bot@chromium.org>
Diffstat (limited to 'crosperf/benchmark_run_unittest.py')
-rwxr-xr-x | crosperf/benchmark_run_unittest.py | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py index 47d44dcf..4c2a7e60 100755 --- a/crosperf/benchmark_run_unittest.py +++ b/crosperf/benchmark_run_unittest.py @@ -100,8 +100,15 @@ class BenchmarkRunTest(unittest.TestCase): False, # rm_chroot_tmp '', # perf_args suite='telemetry_Crosperf') # suite + dut_conf = { + 'cooldown_time': 5, + 'cooldown_temp': 45, + 'governor': 'powersave', + 'cpu_usage': 'big_only', + } b = benchmark_run.MockBenchmarkRun('test run', bench, my_label, 1, [], m, - logger.GetLogger(), logging_level, '') + logger.GetLogger(), logging_level, '', + dut_conf) b.cache = MockResultsCache() b.suite_runner = MockSuiteRunner() b.start() @@ -111,7 +118,7 @@ class BenchmarkRunTest(unittest.TestCase): args_list = [ 'self', 'name', 'benchmark', 'label', 'iteration', 'cache_conditions', 'machine_manager', 'logger_to_use', 'log_level', 'share_cache', - 'enable_aslr' + 'dut_config', 'enable_aslr' ] arg_spec = inspect.getargspec(benchmark_run.BenchmarkRun.__init__) self.assertEqual(len(arg_spec.args), len(args_list)) @@ -129,7 +136,7 @@ class BenchmarkRunTest(unittest.TestCase): br = benchmark_run.BenchmarkRun( 'test_run', self.test_benchmark, self.test_label, 1, self.test_cache_conditions, self.mock_machine_manager, self.mock_logger, - 'average', '') + 'average', '', {}) def MockLogOutput(msg, print_to_console=False): 'Helper function for test_run.' @@ -269,7 +276,7 @@ class BenchmarkRunTest(unittest.TestCase): br = benchmark_run.BenchmarkRun( 'test_run', self.test_benchmark, self.test_label, 1, self.test_cache_conditions, self.mock_machine_manager, self.mock_logger, - 'average', '') + 'average', '', {}) def GetLastEventPassed(): 'Helper function for test_terminate_pass' @@ -296,7 +303,7 @@ class BenchmarkRunTest(unittest.TestCase): br = benchmark_run.BenchmarkRun( 'test_run', self.test_benchmark, self.test_label, 1, self.test_cache_conditions, self.mock_machine_manager, self.mock_logger, - 'average', '') + 'average', '', {}) def GetLastEventFailed(): 'Helper function for test_terminate_fail' @@ -323,7 +330,7 @@ class BenchmarkRunTest(unittest.TestCase): br = benchmark_run.BenchmarkRun( 'test_run', self.test_benchmark, self.test_label, 1, self.test_cache_conditions, self.mock_machine_manager, self.mock_logger, - 'average', '') + 'average', '', {}) br.terminated = True self.assertRaises(Exception, br.AcquireMachine) @@ -340,7 +347,7 @@ class BenchmarkRunTest(unittest.TestCase): br = benchmark_run.BenchmarkRun( 'test_run', self.test_benchmark, self.test_label, 1, self.test_cache_conditions, self.mock_machine_manager, self.mock_logger, - 'average', '') + 'average', '', {}) def MockLogError(err_msg): 'Helper function for test_get_extra_autotest_args' @@ -379,7 +386,7 @@ class BenchmarkRunTest(unittest.TestCase): br = benchmark_run.BenchmarkRun( 'test_run', self.test_benchmark, self.test_label, 1, self.test_cache_conditions, self.mock_machine_manager, self.mock_logger, - 'average', '') + 'average', '', {}) self.status = [] @@ -414,7 +421,7 @@ class BenchmarkRunTest(unittest.TestCase): br = benchmark_run.BenchmarkRun( 'test_run', self.test_benchmark, self.test_label, 1, self.test_cache_conditions, self.mock_machine_manager, self.mock_logger, - 'average', '') + 'average', '', {}) phony_cache_conditions = [123, 456, True, False] |