aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorDenis Nikitin <denik@google.com>2019-07-30 11:55:01 -0700
committerSean Abraham <seanabraham@chromium.org>2019-08-02 13:55:44 +0000
commit80522905c156d3ccb51451eaefda5773e905ffaa (patch)
tree04df4e180f14224a89608f169167682462c42d9e /crosperf
parent265c29673103ec2b03a3cd81d065537a53122187 (diff)
downloadtoolchain-utils-80522905c156d3ccb51451eaefda5773e905ffaa.tar.gz
crosperf: Add "intel_pstate" and "turbostat" args
In global setting added "intel_pstate" option. Type: string. Values: active, passive, no_hwp, ''(equivalent to active). Default ''. In benchmark setting added "turbostat" option. Type: boolean. Default True. BUG=chromium:966514 TEST=tested on samus, eve, rammus Change-Id: I1533c6d6586aab5acf3665bce68920a0ab3092bc Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1726635 Legacy-Commit-Queue: Commit Bot <commit-bot@chromium.org> Commit-Queue: Sean Abraham <seanabraham@chromium.org> Reviewed-by: Sean Abraham <seanabraham@chromium.org> Tested-by: Sean Abraham <seanabraham@chromium.org>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark.py4
-rwxr-xr-xcrosperf/benchmark_unittest.py2
-rwxr-xr-xcrosperf/crosperf_unittest.py2
-rw-r--r--crosperf/experiment_factory.py35
-rwxr-xr-xcrosperf/experiment_factory_unittest.py6
-rw-r--r--crosperf/settings_factory.py13
-rwxr-xr-xcrosperf/settings_factory_unittest.py10
-rw-r--r--crosperf/suite_runner.py4
-rwxr-xr-xcrosperf/suite_runner_unittest.py5
9 files changed, 54 insertions, 27 deletions
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index 6869bacb..e7c3b19a 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -59,7 +59,8 @@ class Benchmark(object):
retries=0,
run_local=False,
cwp_dso='',
- weight=0):
+ weight=0,
+ turbostat=True):
self.name = name
#For telemetry, this is the benchmark name.
self.test_name = test_name
@@ -79,3 +80,4 @@ class Benchmark(object):
self.run_local = run_local
self.cwp_dso = cwp_dso
self.weight = weight
+ self.turbostat = turbostat
diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py
index 551bd2d3..4041e850 100755
--- a/crosperf/benchmark_unittest.py
+++ b/crosperf/benchmark_unittest.py
@@ -56,7 +56,7 @@ class BenchmarkTestCase(unittest.TestCase):
args_list = [
'self', 'name', 'test_name', 'test_args', 'iterations', 'rm_chroot_tmp',
'perf_args', 'suite', 'show_all_results', 'retries', 'run_local',
- 'cwp_dso', 'weight'
+ 'cwp_dso', 'weight', 'turbostat'
]
arg_spec = inspect.getargspec(Benchmark.__init__)
self.assertEqual(len(arg_spec.args), len(args_list))
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 64676d77..c89c59f1 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -59,7 +59,7 @@ class CrosperfTest(unittest.TestCase):
settings = crosperf.ConvertOptionsToSettings(options)
self.assertIsNotNone(settings)
self.assertIsInstance(settings, settings_factory.GlobalSettings)
- self.assertEqual(len(settings.fields), 29)
+ self.assertEqual(len(settings.fields), 30)
self.assertTrue(settings.GetField('rerun'))
argv = ['crosperf/crosperf.py', 'temp.exp']
options, _ = parser.parse_known_args(argv)
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index b889a129..6e75644b 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -104,12 +104,12 @@ class ExperimentFactory(object):
def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local):
+ show_all_results, retries, run_local, turbostat):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
telemetry_benchmark = Benchmark(
test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries, run_local)
+ suite, show_all_results, retries, run_local, turbostat)
benchmarks.append(telemetry_benchmark)
def GetExperiment(self, experiment_file, working_directory, log_dir):
@@ -159,6 +159,7 @@ class ExperimentFactory(object):
raise RuntimeError('The DSO specified is not supported')
enable_aslr = global_settings.GetField('enable_aslr')
ignore_min_max = global_settings.GetField('ignore_min_max')
+ intel_pstate = global_settings.GetField('intel_pstate')
# Default cache hit conditions. The image checksum in the cache and the
# computed checksum of the image must match. Also a cache file must exist.
@@ -235,22 +236,25 @@ class ExperimentFactory(object):
elif cwp_dso:
raise RuntimeError('With DSO specified, each benchmark should have a '
'weight')
+ turbostat_opt = benchmark_settings.GetField('turbostat')
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local)
+ show_all_results, retries, run_local,
+ turbostat=turbostat_opt)
elif test_name == 'all_pagecyclers':
self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
test_args, iterations, rm_chroot_tmp,
perf_args, suite, show_all_results, retries,
- run_local)
+ run_local, turbostat=turbostat_opt)
elif test_name == 'all_crosbolt_perf':
self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
test_args, iterations, rm_chroot_tmp,
perf_args, 'telemetry_Crosperf',
- show_all_results, retries, run_local)
+ show_all_results, retries, run_local,
+ turbostat=turbostat_opt)
self.AppendBenchmarkSet(
benchmarks,
crosbolt_perf_tests,
@@ -261,12 +265,13 @@ class ExperimentFactory(object):
'',
show_all_results,
retries,
- run_local=False)
+ run_local=False,
+ turbostat=turbostat_opt)
elif test_name == 'all_toolchain_perf':
self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
test_args, iterations, rm_chroot_tmp,
perf_args, suite, show_all_results, retries,
- run_local)
+ run_local, turbostat=turbostat_opt)
# Add non-telemetry toolchain-perf benchmarks:
benchmarks.append(
Benchmark(
@@ -279,17 +284,19 @@ class ExperimentFactory(object):
'',
show_all_results,
retries,
- run_local=False))
+ run_local=False,
+ turbostat=turbostat_opt))
elif test_name == 'all_toolchain_perf_old':
self.AppendBenchmarkSet(benchmarks,
telemetry_toolchain_old_perf_tests, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local)
+ show_all_results, retries, run_local,
+ turbostat=turbostat_opt)
else:
benchmark = Benchmark(benchmark_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
show_all_results, retries, run_local, cwp_dso,
- weight)
+ weight, turbostat_opt)
benchmarks.append(benchmark)
else:
if test_name == 'all_graphics_perf':
@@ -303,7 +310,8 @@ class ExperimentFactory(object):
'',
show_all_results,
retries,
- run_local=False)
+ run_local=False,
+ turbostat=turbostat_opt)
else:
# Add the single benchmark.
benchmark = Benchmark(
@@ -316,7 +324,8 @@ class ExperimentFactory(object):
suite,
show_all_results,
retries,
- run_local=False)
+ run_local=False,
+ turbostat=turbostat_opt)
benchmarks.append(benchmark)
if not benchmarks:
@@ -396,7 +405,7 @@ class ExperimentFactory(object):
experiment_file.Canonicalize(), email,
acquire_timeout, log_dir, log_level, share_cache,
results_dir, locks_dir, cwp_dso, enable_aslr,
- ignore_min_max, skylab)
+ ignore_min_max, skylab, intel_pstate)
return experiment
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 26fbf8a8..d701d47b 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -239,7 +239,7 @@ class ExperimentFactoryTest(unittest.TestCase):
bench_list = []
ef.AppendBenchmarkSet(bench_list, experiment_factory.telemetry_perfv2_tests,
'', 1, False, '', 'telemetry_Crosperf', False, 0,
- False)
+ False, False)
self.assertEqual(
len(bench_list), len(experiment_factory.telemetry_perfv2_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
@@ -247,7 +247,7 @@ class ExperimentFactoryTest(unittest.TestCase):
bench_list = []
ef.AppendBenchmarkSet(bench_list,
experiment_factory.telemetry_pagecycler_tests, '', 1,
- False, '', 'telemetry_Crosperf', False, 0, False)
+ False, '', 'telemetry_Crosperf', False, 0, False, False)
self.assertEqual(
len(bench_list), len(experiment_factory.telemetry_pagecycler_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
@@ -255,7 +255,7 @@ class ExperimentFactoryTest(unittest.TestCase):
bench_list = []
ef.AppendBenchmarkSet(bench_list,
experiment_factory.telemetry_toolchain_perf_tests, '',
- 1, False, '', 'telemetry_Crosperf', False, 0, False)
+ 1, False, '', 'telemetry_Crosperf', False, 0, False, False)
self.assertEqual(
len(bench_list), len(experiment_factory.telemetry_toolchain_perf_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index a4607963..31ea87e6 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -59,6 +59,12 @@ class BenchmarkSettings(Settings):
'weight',
default=0.0,
description='Weight of the benchmark for CWP approximation'))
+ self.AddField(
+ BooleanField(
+ 'turbostat',
+ description='During benchmark run turbostat process in background',
+ required=False,
+ default=True))
class LabelSettings(Settings):
@@ -329,6 +335,13 @@ class GlobalSettings(Settings):
'ignore min and max values to reduce noise.',
required=False,
default=False))
+ self.AddField(
+ TextField(
+ 'intel_pstate',
+ description='Intel Pstate mode.\n'
+ 'Supported modes: active (default), passive, no_hwp.',
+ required=False,
+ default=''))
class SettingsFactory(object):
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index d3c9391a..cf3db353 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -20,11 +20,12 @@ class BenchmarkSettingsTest(unittest.TestCase):
def test_init(self):
res = settings_factory.BenchmarkSettings('b_settings')
self.assertIsNotNone(res)
- self.assertEqual(len(res.fields), 7)
+ self.assertEqual(len(res.fields), 8)
self.assertEqual(res.GetField('test_name'), '')
self.assertEqual(res.GetField('test_args'), '')
self.assertEqual(res.GetField('iterations'), 0)
self.assertEqual(res.GetField('suite'), '')
+ self.assertEqual(res.GetField('turbostat'), True)
class LabelSettingsTest(unittest.TestCase):
@@ -50,7 +51,7 @@ class GlobalSettingsTest(unittest.TestCase):
def test_init(self):
res = settings_factory.GlobalSettings('g_settings')
self.assertIsNotNone(res)
- self.assertEqual(len(res.fields), 29)
+ self.assertEqual(len(res.fields), 30)
self.assertEqual(res.GetField('name'), '')
self.assertEqual(res.GetField('board'), '')
self.assertEqual(res.GetField('skylab'), False)
@@ -76,6 +77,7 @@ class GlobalSettingsTest(unittest.TestCase):
self.assertEqual(res.GetField('cwp_dso'), '')
self.assertEqual(res.GetField('enable_aslr'), False)
self.assertEqual(res.GetField('ignore_min_max'), False)
+ self.assertEqual(res.GetField('intel_pstate'), '')
class SettingsFactoryTest(unittest.TestCase):
@@ -93,12 +95,12 @@ class SettingsFactoryTest(unittest.TestCase):
b_settings = settings_factory.SettingsFactory().GetSettings(
'benchmark', 'benchmark')
self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings)
- self.assertEqual(len(b_settings.fields), 7)
+ self.assertEqual(len(b_settings.fields), 8)
g_settings = settings_factory.SettingsFactory().GetSettings(
'global', 'global')
self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
- self.assertEqual(len(g_settings.fields), 29)
+ self.assertEqual(len(g_settings.fields), 30)
if __name__ == '__main__':
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index f61a90bf..5a32f92b 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -366,10 +366,10 @@ class SuiteRunner(object):
args_string = "test_args='%s'" % test_args
cmd = ('{} {} {} --board={} --args="{} run_local={} test={} '
- '{}" {} telemetry_Crosperf'.format(
+ 'turbostat={} {}" {} telemetry_Crosperf'.format(
TEST_THAT_PATH, autotest_dir_arg, fast_arg, label.board,
args_string, benchmark.run_local, benchmark.test_name,
- profiler_args, machine))
+ benchmark.turbostat, profiler_args, machine))
# Use --no-ns-pid so that cros_sdk does not create a different
# process namespace and we can kill process created easily by their
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index 2fcb45ac..5f1d3360 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -294,8 +294,9 @@ class SuiteRunnerTest(unittest.TestCase):
('/usr/bin/test_that --autotest_dir '
'~/trunk/src/third_party/autotest/files --fast '
'--board=lumpy --args=" run_local=False test=octane '
- 'profiler=custom_perf profiler_args=\'record -a -e '
- 'cycles,instructions\'" lumpy1.cros telemetry_Crosperf'))
+ 'turbostat=True profiler=custom_perf '
+ 'profiler_args=\'record -a -e cycles,instructions\'" '
+ 'lumpy1.cros telemetry_Crosperf'))
self.assertEqual(args_dict['cros_sdk_options'],
('--no-ns-pid --chrome_root= '
'--chrome_root_mount=/tmp/chrome_root '