aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorDenis Nikitin <denik@google.com>2019-08-30 09:10:39 -0700
committerDenis Nikitin <denik@chromium.org>2019-09-10 05:34:30 +0000
commit9d114045ddf617b67fd7af5aaccd0b5dcd4282ea (patch)
tree741ebc440d0ade50048ea78513830f84ed35875a /crosperf
parent0f8dae198451cfc73eb6c509cbddbe9c159ea45f (diff)
downloadtoolchain-utils-9d114045ddf617b67fd7af5aaccd0b5dcd4282ea.tar.gz
crosperf: Add dut config arguments to experiment
In global settings added optional arguments: "cooldown_time" - wait time prior running a benchmark (default: 0), "cooldown_temp" - temperature threshold for waiting (default: 40), "governor" - CPU governor (default: performance), "cpu_usage" - Restrict CPU usage to specific configurations (default: all). "turbostat" argument is moved from benchmark to global settings. Current CL does not apply configurations. Instead it just propagates parameters to SuiteRunner class through "dut_config" dictionary. BUG=chromium:966514 TEST=Unitest and local HW tests passed. Change-Id: I1b2a65883e5176fdde49c9858ebe62a097df89cb Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1778515 Tested-by: Denis Nikitin <denik@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Legacy-Commit-Queue: Commit Bot <commit-bot@chromium.org>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark_run.py4
-rwxr-xr-xcrosperf/benchmark_run_unittest.py25
-rwxr-xr-xcrosperf/crosperf_unittest.py2
-rw-r--r--crosperf/experiment.py17
-rw-r--r--crosperf/experiment_factory.py12
-rwxr-xr-xcrosperf/experiment_file_unittest.py81
-rw-r--r--crosperf/experiment_files/dut_config.exp56
-rwxr-xr-xcrosperf/machine_manager_unittest.py2
-rwxr-xr-xcrosperf/results_organizer_unittest.py17
-rwxr-xr-xcrosperf/results_report_unittest.py3
-rw-r--r--crosperf/settings_factory.py68
-rwxr-xr-xcrosperf/settings_factory_unittest.py14
-rw-r--r--crosperf/suite_runner.py2
-rwxr-xr-xcrosperf/suite_runner_unittest.py2
14 files changed, 260 insertions, 45 deletions
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index f5fe3fda..3cd4479b 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -2,6 +2,7 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+
"""Module of benchmark runs."""
from __future__ import print_function
@@ -40,6 +41,7 @@ class BenchmarkRun(threading.Thread):
logger_to_use,
log_level,
share_cache,
+ dut_config,
enable_aslr=False):
threading.Thread.__init__(self)
self.name = name
@@ -54,7 +56,7 @@ class BenchmarkRun(threading.Thread):
self.run_completed = False
self.machine_manager = machine_manager
self.suite_runner = SuiteRunner(
- self._logger, self.log_level, enable_aslr=enable_aslr)
+ dut_config, self._logger, self.log_level, enable_aslr=enable_aslr)
self.machine = None
self.cache_conditions = cache_conditions
self.runs_complete = 0
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 47d44dcf..4c2a7e60 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -100,8 +100,15 @@ class BenchmarkRunTest(unittest.TestCase):
False, # rm_chroot_tmp
'', # perf_args
suite='telemetry_Crosperf') # suite
+ dut_conf = {
+ 'cooldown_time': 5,
+ 'cooldown_temp': 45,
+ 'governor': 'powersave',
+ 'cpu_usage': 'big_only',
+ }
b = benchmark_run.MockBenchmarkRun('test run', bench, my_label, 1, [], m,
- logger.GetLogger(), logging_level, '')
+ logger.GetLogger(), logging_level, '',
+ dut_conf)
b.cache = MockResultsCache()
b.suite_runner = MockSuiteRunner()
b.start()
@@ -111,7 +118,7 @@ class BenchmarkRunTest(unittest.TestCase):
args_list = [
'self', 'name', 'benchmark', 'label', 'iteration', 'cache_conditions',
'machine_manager', 'logger_to_use', 'log_level', 'share_cache',
- 'enable_aslr'
+ 'dut_config', 'enable_aslr'
]
arg_spec = inspect.getargspec(benchmark_run.BenchmarkRun.__init__)
self.assertEqual(len(arg_spec.args), len(args_list))
@@ -129,7 +136,7 @@ class BenchmarkRunTest(unittest.TestCase):
br = benchmark_run.BenchmarkRun(
'test_run', self.test_benchmark, self.test_label, 1,
self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ 'average', '', {})
def MockLogOutput(msg, print_to_console=False):
'Helper function for test_run.'
@@ -269,7 +276,7 @@ class BenchmarkRunTest(unittest.TestCase):
br = benchmark_run.BenchmarkRun(
'test_run', self.test_benchmark, self.test_label, 1,
self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ 'average', '', {})
def GetLastEventPassed():
'Helper function for test_terminate_pass'
@@ -296,7 +303,7 @@ class BenchmarkRunTest(unittest.TestCase):
br = benchmark_run.BenchmarkRun(
'test_run', self.test_benchmark, self.test_label, 1,
self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ 'average', '', {})
def GetLastEventFailed():
'Helper function for test_terminate_fail'
@@ -323,7 +330,7 @@ class BenchmarkRunTest(unittest.TestCase):
br = benchmark_run.BenchmarkRun(
'test_run', self.test_benchmark, self.test_label, 1,
self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ 'average', '', {})
br.terminated = True
self.assertRaises(Exception, br.AcquireMachine)
@@ -340,7 +347,7 @@ class BenchmarkRunTest(unittest.TestCase):
br = benchmark_run.BenchmarkRun(
'test_run', self.test_benchmark, self.test_label, 1,
self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ 'average', '', {})
def MockLogError(err_msg):
'Helper function for test_get_extra_autotest_args'
@@ -379,7 +386,7 @@ class BenchmarkRunTest(unittest.TestCase):
br = benchmark_run.BenchmarkRun(
'test_run', self.test_benchmark, self.test_label, 1,
self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ 'average', '', {})
self.status = []
@@ -414,7 +421,7 @@ class BenchmarkRunTest(unittest.TestCase):
br = benchmark_run.BenchmarkRun(
'test_run', self.test_benchmark, self.test_label, 1,
self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ 'average', '', {})
phony_cache_conditions = [123, 456, True, False]
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 65a587a5..8d5569e0 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -67,7 +67,7 @@ class CrosperfTest(unittest.TestCase):
settings = crosperf.ConvertOptionsToSettings(options)
self.assertIsNotNone(settings)
self.assertIsInstance(settings, settings_factory.GlobalSettings)
- self.assertEqual(len(settings.fields), 30)
+ self.assertEqual(len(settings.fields), 35)
self.assertTrue(settings.GetField('rerun'))
argv = ['crosperf/crosperf.py', 'temp.exp']
options, _ = parser.parse_known_args(argv)
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index ff193425..d0f5c133 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -30,7 +30,7 @@ class Experiment(object):
cache_conditions, labels, benchmarks, experiment_file, email_to,
acquire_timeout, log_dir, log_level, share_cache,
results_directory, locks_directory, cwp_dso, enable_aslr,
- ignore_min_max, skylab, intel_pstate=''):
+ ignore_min_max, skylab, dut_config):
self.name = name
self.working_directory = working_directory
self.remote = remote
@@ -61,7 +61,7 @@ class Experiment(object):
self.ignore_min_max = ignore_min_max
self.skylab = skylab
self.l = logger.GetLogger(log_dir)
- self.intel_pstate = intel_pstate
+ self.intel_pstate = dut_config['intel_pstate']
if not self.benchmarks:
raise RuntimeError('No benchmarks specified')
@@ -115,7 +115,7 @@ class Experiment(object):
self.machine_manager.ComputeCommonCheckSumString(label)
self.start_time = None
- self.benchmark_runs = self._GenerateBenchmarkRuns()
+ self.benchmark_runs = self._GenerateBenchmarkRuns(dut_config)
self._schedv2 = None
self._internal_counter_lock = Lock()
@@ -126,7 +126,7 @@ class Experiment(object):
def schedv2(self):
return self._schedv2
- def _GenerateBenchmarkRuns(self):
+ def _GenerateBenchmarkRuns(self, dut_config):
"""Generate benchmark runs from labels and benchmark defintions."""
benchmark_runs = []
for label in self.labels:
@@ -139,10 +139,11 @@ class Experiment(object):
logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
True)
benchmark_runs.append(
- benchmark_run.BenchmarkRun(
- benchmark_run_name, benchmark, label, iteration,
- self.cache_conditions, self.machine_manager, logger_to_use,
- self.log_level, self.share_cache, self.enable_aslr))
+ benchmark_run.BenchmarkRun(benchmark_run_name, benchmark, label,
+ iteration, self.cache_conditions,
+ self.machine_manager, logger_to_use,
+ self.log_level, self.share_cache,
+ dut_config, self.enable_aslr))
return benchmark_runs
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 30514ed1..28755c80 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -158,7 +158,14 @@ class ExperimentFactory(object):
raise RuntimeError('The DSO specified is not supported')
enable_aslr = global_settings.GetField('enable_aslr')
ignore_min_max = global_settings.GetField('ignore_min_max')
- intel_pstate = global_settings.GetField('intel_pstate')
+ turbostat_opt = global_settings.GetField('turbostat')
+ dut_config = {
+ 'intel_pstate': global_settings.GetField('intel_pstate'),
+ 'cooldown_time': global_settings.GetField('cooldown_time'),
+ 'cooldown_temp': global_settings.GetField('cooldown_temp'),
+ 'governor': global_settings.GetField('governor'),
+ 'cpu_usage': global_settings.GetField('cpu_usage'),
+ }
# Default cache hit conditions. The image checksum in the cache and the
# computed checksum of the image must match. Also a cache file must exist.
@@ -235,7 +242,6 @@ class ExperimentFactory(object):
elif cwp_dso:
raise RuntimeError('With DSO specified, each benchmark should have a '
'weight')
- turbostat_opt = benchmark_settings.GetField('turbostat')
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
@@ -460,7 +466,7 @@ class ExperimentFactory(object):
experiment_file.Canonicalize(), email,
acquire_timeout, log_dir, log_level, share_cache,
results_dir, locks_dir, cwp_dso, enable_aslr,
- ignore_min_max, skylab, intel_pstate)
+ ignore_min_max, skylab, dut_config)
return experiment
diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py
index a5658bfb..861e2549 100755
--- a/crosperf/experiment_file_unittest.py
+++ b/crosperf/experiment_file_unittest.py
@@ -3,6 +3,7 @@
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+
"""The unittest of experiment_file."""
from __future__ import print_function
import StringIO
@@ -83,6 +84,55 @@ EXPERIMENT_FILE_4 = """
}
"""
+DUT_CONFIG_EXPERIMENT_FILE_GOOD = """
+ board: kevin64
+ remote: chromeos-kevin.cros
+ turbostat: False
+ intel_pstate: no_hwp
+ cooldown_temp: 38
+ cooldown_time: 5
+ governor: powersave
+ cpu_usage: exclusive_cores
+
+ benchmark: speedometer {
+ iterations: 3
+ suite: telemetry_Crosperf
+ }
+
+ image1 {
+ chromeos_image:/usr/local/google/cros_image1.bin
+ }
+ """
+
+DUT_CONFIG_EXPERIMENT_FILE_BAD_GOV = """
+ board: kevin64
+ remote: chromeos-kevin.cros
+ intel_pstate: active
+ governor: misspelled_governor
+
+ benchmark: speedometer2 {
+ iterations: 3
+ suite: telemetry_Crosperf
+ }
+ """
+
+DUT_CONFIG_EXPERIMENT_FILE_BAD_CPUUSE = """
+ board: kevin64
+ remote: chromeos-kevin.cros
+ turbostat: False
+ governor: ondemand
+ cpu_usage: unknown
+
+ benchmark: speedometer2 {
+ iterations: 3
+ suite: telemetry_Crosperf
+ }
+
+ image1 {
+ chromeos_image:/usr/local/google/cros_image1.bin
+ }
+ """
+
OUTPUT_FILE = """board: x86-alex
remote: chromeos-alex3
perf_args: record -a -e cycles
@@ -159,6 +209,37 @@ class ExperimentFileTest(unittest.TestCase):
res = experiment_file.Canonicalize()
self.assertEqual(res, OUTPUT_FILE)
+ def testLoadDutConfigExperimentFile_Good(self):
+ input_file = StringIO.StringIO(DUT_CONFIG_EXPERIMENT_FILE_GOOD)
+ experiment_file = ExperimentFile(input_file)
+ global_settings = experiment_file.GetGlobalSettings()
+ self.assertEqual(global_settings.GetField('turbostat'), False)
+ self.assertEqual(global_settings.GetField('intel_pstate'), 'no_hwp')
+ self.assertEqual(global_settings.GetField('governor'), 'powersave')
+ self.assertEqual(global_settings.GetField('cpu_usage'), 'exclusive_cores')
+ self.assertEqual(global_settings.GetField('cooldown_time'), 5)
+ self.assertEqual(global_settings.GetField('cooldown_temp'), 38)
+
+ def testLoadDutConfigExperimentFile_WrongGovernor(self):
+ input_file = StringIO.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_GOV)
+ with self.assertRaises(RuntimeError) as msg:
+ ExperimentFile(input_file)
+ self.assertRegexpMatches(
+ str(msg.exception), 'governor: misspelled_governor')
+ self.assertRegexpMatches(
+ str(msg.exception), "Invalid enum value for field 'governor'."
+ r' Must be one of \(performance, powersave, userspace, ondemand,'
+ r' conservative, schedutils, sched, interactive\)')
+
+ def testLoadDutConfigExperimentFile_WrongCpuUsage(self):
+ input_file = StringIO.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_CPUUSE)
+ with self.assertRaises(RuntimeError) as msg:
+ ExperimentFile(input_file)
+ self.assertRegexpMatches(str(msg.exception), 'cpu_usage: unknown')
+ self.assertRegexpMatches(
+ str(msg.exception), "Invalid enum value for field 'cpu_usage'."
+ r' Must be one of \(all, big_only, little_only, exclusive_cores\)')
+
if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/experiment_files/dut_config.exp b/crosperf/experiment_files/dut_config.exp
new file mode 100644
index 00000000..f96242ff
--- /dev/null
+++ b/crosperf/experiment_files/dut_config.exp
@@ -0,0 +1,56 @@
+# This experiment template shows how to run Telemetry tests (using autotest)
+# with explicitly specified DUT configurations.
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+name: dut_config_telemetry_crosperf_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+# DUT configuration parameters. All are optional.
+#
+# Run turbostat process in background. Default: True.
+turbostat: <True|False>
+# One of Intel Pstate modes defined in kernel command line:
+# active, passive, no_hwp.
+intel_pstate: <active|passive|no_hwp>
+# Wait until CPU cools down to a specified temperature
+# in Celsius or cooldown_time timeout reaches zero
+# (whichever happens first). Default: 40.
+cooldown_temp: <temperature-threshold-for-cooldown>
+# Timeout specified in minutes for CPU cooling down
+# to cooldown_temp temperature. Zero value disables cooldown.
+# Default: 0.
+cooldown_time: <time-to-cooldown-in-minutes>
+# CPU governor.
+# See: https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
+# for available values (they might differ for ARM and Intel).
+governor: <one-of-scaling_available_governors-values>
+# Restrict CPU usage to predefined "models":
+# all, big_only, little_only, exclusive_cores.
+cpu_usage: <usage-model>
+
+# The example below will run Telemetry toolchain performance benchmarks.
+# The exact list of benchmarks that will be run can be seen in
+# crosperf/experiment_factory.py
+benchmark: all_toolchain_perf {
+ suite: telemetry_Crosperf
+ run_local: False
+ iterations: 1
+}
+
+# NOTE: You must specify at least one image; you may specify more than one.
+# Replace <path-to-your-chroot-goes-here> and <board-goes-here> below.
+vanilla_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/vanilla-image/chromiumos_test_image.bin
+}
+
+# Replace the chromeos image below with the actual path to your test image.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index 324ee07c..0f64a714 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -456,7 +456,7 @@ class MachineManagerTest(unittest.TestCase):
suite='telemetry_Crosperf') # suite
test_run = MockBenchmarkRun('test run', bench, LABEL_LUMPY, 1, [], self.mm,
- mock_logger, 'verbose', '', False)
+ mock_logger, 'verbose', '', {}, False)
self.mm._machines = [
self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3, self.mock_daisy1,
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
index 2e6edf31..39a8cce6 100755
--- a/crosperf/results_organizer_unittest.py
+++ b/crosperf/results_organizer_unittest.py
@@ -4,6 +4,7 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+
"""Testing of ResultsOrganizer
We create some labels, benchmark_runs and then create a ResultsOrganizer,
@@ -139,21 +140,21 @@ class ResultOrganizerTest(unittest.TestCase):
benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
benchmark_runs = [None] * 8
benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
- '', 'average', '')
+ '', 'average', '', {})
benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
- '', 'average', '')
+ '', 'average', '', {})
benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
- '', 'average', '')
+ '', 'average', '', {})
benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
- '', 'average', '')
+ '', 'average', '', {})
benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
- '', 'average', '')
+ '', 'average', '', {})
benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
- '', 'average', '')
+ '', 'average', '', {})
benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
- '', 'average', '')
+ '', 'average', '', {})
benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '',
- '', 'average', '')
+ '', 'average', '', {})
i = 0
for b in benchmark_runs:
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index f2fc9f60..61e2a7c2 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -4,6 +4,7 @@
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+
"""Unittest for the results reporter."""
from __future__ import division
@@ -134,7 +135,7 @@ def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
def MakeSuccessfulRun(n):
run = MockBenchmarkRun('mock_success%d' % (n,), bench, label,
1 + n + num_runs, cache_conditions,
- machine_manager, log, log_level, share_cache)
+ machine_manager, log, log_level, share_cache, {})
mock_result = MockResult(log, label, log_level, machine)
mock_result.keyvals = keyvals
run.result = mock_result
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 31ea87e6..b96ad342 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -8,6 +8,7 @@
from __future__ import print_function
from field import BooleanField
+from field import EnumField
from field import FloatField
from field import IntegerField
from field import ListField
@@ -59,12 +60,6 @@ class BenchmarkSettings(Settings):
'weight',
default=0.0,
description='Weight of the benchmark for CWP approximation'))
- self.AddField(
- BooleanField(
- 'turbostat',
- description='During benchmark run turbostat process in background',
- required=False,
- default=True))
class LabelSettings(Settings):
@@ -339,9 +334,68 @@ class GlobalSettings(Settings):
TextField(
'intel_pstate',
description='Intel Pstate mode.\n'
- 'Supported modes: active (default), passive, no_hwp.',
+ 'Supported modes: passive, no_hwp.\n'
+ 'By default kernel works in active HWP mode if HWP is supported'
+ " by CPU. This corresponds to a default intel_pstate=''",
required=False,
default=''))
+ self.AddField(
+ BooleanField(
+ 'turbostat',
+ description='Run turbostat process in the background'
+ ' of a benchmark',
+ required=False,
+ default=True))
+ self.AddField(
+ IntegerField(
+ 'cooldown_temp',
+ required=False,
+ default=40,
+ description='Wait until CPU temperature goes down below'
+ ' specified temperature in Celsius'
+ ' prior starting a benchmark.'))
+ self.AddField(
+ IntegerField(
+ 'cooldown_time',
+ required=False,
+ default=0,
+ description='Wait specified time in minutes allowing'
+ ' CPU to cool down. Zero value disables cooldown,'))
+ self.AddField(
+ EnumField(
+ 'governor',
+ options=[
+ 'performance',
+ 'powersave',
+ 'userspace',
+ 'ondemand',
+ 'conservative',
+ 'schedutils',
+ 'sched',
+ 'interactive',
+ ],
+ default='performance',
+ required=False,
+ description='Setup CPU governor for all cores.\n'
+ 'For more details refer to:\n'
+ 'https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt'))
+ self.AddField(
+ EnumField(
+ 'cpu_usage',
+ options=[
+ 'all',
+ 'big_only',
+ 'little_only',
+ 'exclusive_cores',
+ ],
+ default='all',
+ required=False,
+ description='Restrict usage CPUs to decrease CPU interference.\n'
+ 'all - no restrictions;\n'
+ 'big-only, little-only - enable only big/little cores,'
+ ' applicable only on ARM;\n'
+ 'exclusive-cores - (for future use)'
+ ' isolate cores for exclusive use of benchmark processes.'))
class SettingsFactory(object):
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index cf3db353..6dfb0bff 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -20,12 +20,11 @@ class BenchmarkSettingsTest(unittest.TestCase):
def test_init(self):
res = settings_factory.BenchmarkSettings('b_settings')
self.assertIsNotNone(res)
- self.assertEqual(len(res.fields), 8)
+ self.assertEqual(len(res.fields), 7)
self.assertEqual(res.GetField('test_name'), '')
self.assertEqual(res.GetField('test_args'), '')
self.assertEqual(res.GetField('iterations'), 0)
self.assertEqual(res.GetField('suite'), '')
- self.assertEqual(res.GetField('turbostat'), True)
class LabelSettingsTest(unittest.TestCase):
@@ -51,7 +50,7 @@ class GlobalSettingsTest(unittest.TestCase):
def test_init(self):
res = settings_factory.GlobalSettings('g_settings')
self.assertIsNotNone(res)
- self.assertEqual(len(res.fields), 30)
+ self.assertEqual(len(res.fields), 35)
self.assertEqual(res.GetField('name'), '')
self.assertEqual(res.GetField('board'), '')
self.assertEqual(res.GetField('skylab'), False)
@@ -78,6 +77,11 @@ class GlobalSettingsTest(unittest.TestCase):
self.assertEqual(res.GetField('enable_aslr'), False)
self.assertEqual(res.GetField('ignore_min_max'), False)
self.assertEqual(res.GetField('intel_pstate'), '')
+ self.assertEqual(res.GetField('turbostat'), True)
+ self.assertEqual(res.GetField('cooldown_time'), 0)
+ self.assertEqual(res.GetField('cooldown_temp'), 40)
+ self.assertEqual(res.GetField('governor'), 'performance')
+ self.assertEqual(res.GetField('cpu_usage'), 'all')
class SettingsFactoryTest(unittest.TestCase):
@@ -95,12 +99,12 @@ class SettingsFactoryTest(unittest.TestCase):
b_settings = settings_factory.SettingsFactory().GetSettings(
'benchmark', 'benchmark')
self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings)
- self.assertEqual(len(b_settings.fields), 8)
+ self.assertEqual(len(b_settings.fields), 7)
g_settings = settings_factory.SettingsFactory().GetSettings(
'global', 'global')
self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
- self.assertEqual(len(g_settings.fields), 30)
+ self.assertEqual(len(g_settings.fields), 35)
if __name__ == '__main__':
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 6e24c28c..2831b9bd 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -51,6 +51,7 @@ class SuiteRunner(object):
"""This defines the interface from crosperf to test script."""
def __init__(self,
+ dut_config,
logger_to_use=None,
log_level='verbose',
cmd_exec=None,
@@ -62,6 +63,7 @@ class SuiteRunner(object):
self.logger, log_level=self.log_level)
self._ct = cmd_term or command_executer.CommandTerminator()
self.enable_aslr = enable_aslr
+ self.dut_config = dut_config
def Run(self, machine, label, benchmark, test_args, profiler_args):
for i in range(0, benchmark.retries + 1):
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index 287eec65..4af6f4e9 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -79,7 +79,7 @@ class SuiteRunnerTest(unittest.TestCase):
def setUp(self):
self.runner = suite_runner.SuiteRunner(
- self.mock_logger, 'verbose', self.mock_cmd_exec, self.mock_cmd_term)
+ {}, self.mock_logger, 'verbose', self.mock_cmd_exec, self.mock_cmd_term)
def test_get_profiler_args(self):
input_str = ('--profiler=custom_perf --profiler_args=\'perf_options'