aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJordan R Abrahams <ajordanr@google.com>2021-10-09 03:34:49 +0000
committerCommit Bot <commit-bot@chromium.org>2021-10-11 07:29:50 +0000
commita7a19342ac9fe044cbf45a3aa618defca9db413f (patch)
treef6051da037280ebe193e3ef6d55dbebe33054860
parentc5e12fbac037054444b2c07d708745c377d68328 (diff)
downloadtoolchain-utils-a7a19342ac9fe044cbf45a3aa618defca9db413f.tar.gz
Auto-format the crosperf files
Currently, these crosperf files do not abide by the pep8 standard that CrOS requires. This applies the necessary formatting to get past the commit hook. BUG=None TEST=CQ (formatting) Change-Id: I501fd37f8a1d20fd0b987587eb52eb582f18c1a1 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/3215085 Tested-by: Jordan R Abrahams <ajordanr@google.com> Reviewed-by: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: Jordan R Abrahams <ajordanr@google.com>
-rwxr-xr-xcrosperf/crosperf_unittest.py13
-rw-r--r--crosperf/experiment_factory.py140
-rw-r--r--crosperf/experiment_runner.py36
-rw-r--r--crosperf/settings_factory.py522
4 files changed, 340 insertions, 371 deletions
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 07728e7d..774159ff 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -55,13 +55,12 @@ class CrosperfTest(unittest.TestCase):
def testConvertOptionsToSettings(self):
parser = argparse.ArgumentParser()
- parser.add_argument(
- '-l',
- '--log_dir',
- dest='log_dir',
- default='',
- help='The log_dir, default is under '
- '<crosperf_logs>/logs')
+ parser.add_argument('-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under '
+ '<crosperf_logs>/logs')
crosperf.SetupParserOptions(parser)
argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
options, _ = parser.parse_known_args(argv)
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 19b373a9..a9594a20 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -101,7 +101,8 @@ class ExperimentFactory(object):
def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso, weight):
+ show_all_results, retries, run_local, cwp_dso,
+ weight):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
telemetry_benchmark = Benchmark(test_name, test_name, test_args,
@@ -258,10 +259,10 @@ class ExperimentFactory(object):
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
- self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso,
- weight)
+ self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, suite, show_all_results, retries,
+ run_local, cwp_dso, weight)
elif test_name == 'all_pagecyclers':
self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
test_args, iterations, rm_chroot_tmp,
@@ -271,21 +272,20 @@ class ExperimentFactory(object):
self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
test_args, iterations, rm_chroot_tmp,
perf_args, 'telemetry_Crosperf',
- show_all_results, retries, run_local, cwp_dso,
- weight)
- self.AppendBenchmarkSet(
- benchmarks,
- crosbolt_perf_tests,
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
+ show_all_results, retries, run_local,
+ cwp_dso, weight)
+ self.AppendBenchmarkSet(benchmarks,
+ crosbolt_perf_tests,
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight)
elif test_name == 'all_toolchain_perf':
self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
test_args, iterations, rm_chroot_tmp,
@@ -325,10 +325,10 @@ class ExperimentFactory(object):
# weight=weight))
elif test_name == 'all_toolchain_perf_old':
self.AppendBenchmarkSet(benchmarks,
- telemetry_toolchain_old_perf_tests, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso,
- weight)
+ telemetry_toolchain_old_perf_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, suite, show_all_results, retries,
+ run_local, cwp_dso, weight)
else:
benchmark = Benchmark(benchmark_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
@@ -337,34 +337,32 @@ class ExperimentFactory(object):
benchmarks.append(benchmark)
else:
if test_name == 'all_graphics_perf':
- self.AppendBenchmarkSet(
- benchmarks,
- graphics_perf_tests,
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
+ self.AppendBenchmarkSet(benchmarks,
+ graphics_perf_tests,
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight)
else:
# Add the single benchmark.
- benchmark = Benchmark(
- benchmark_name,
- test_name,
- test_args,
- iterations,
- rm_chroot_tmp,
- perf_args,
- suite,
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
+ benchmark = Benchmark(benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight)
benchmarks.append(benchmark)
if not benchmarks:
@@ -411,8 +409,8 @@ class ExperimentFactory(object):
# TODO(yunlian): We should consolidate code in machine_manager.py
# to derermine whether we are running from within google or not
- if ('corp.google.com' in socket.gethostname() and not my_remote and
- not crosfleet):
+ if ('corp.google.com' in socket.gethostname() and not my_remote
+ and not crosfleet):
my_remote = self.GetDefaultRemotes(board)
if global_settings.GetField('same_machine') and len(my_remote) > 1:
raise RuntimeError('Only one remote is allowed when same_machine '
@@ -423,8 +421,8 @@ class ExperimentFactory(object):
# pylint: disable=too-many-function-args
label = MockLabel(label_name, build, image, autotest_path, debug_path,
chromeos_root, board, my_remote, image_args,
- cache_dir, cache_only, log_level, compiler, crosfleet,
- chrome_src)
+ cache_dir, cache_only, log_level, compiler,
+ crosfleet, chrome_src)
else:
label = Label(label_name, build, image, autotest_path, debug_path,
chromeos_root, board, my_remote, image_args, cache_dir,
@@ -440,19 +438,33 @@ class ExperimentFactory(object):
if crosfleet:
for remote in all_remote:
self.CheckRemotesInCrosfleet(remote)
- experiment = Experiment(experiment_name, all_remote, working_directory,
- chromeos_root, cache_conditions, labels, benchmarks,
- experiment_file.Canonicalize(), email,
- acquire_timeout, log_dir, log_level, share_cache,
- results_dir, compress_results, locks_dir, cwp_dso,
- ignore_min_max, crosfleet, dut_config,
+ experiment = Experiment(experiment_name,
+ all_remote,
+ working_directory,
+ chromeos_root,
+ cache_conditions,
+ labels,
+ benchmarks,
+ experiment_file.Canonicalize(),
+ email,
+ acquire_timeout,
+ log_dir,
+ log_level,
+ share_cache,
+ results_dir,
+ compress_results,
+ locks_dir,
+ cwp_dso,
+ ignore_min_max,
+ crosfleet,
+ dut_config,
no_lock=no_lock)
return experiment
def GetDefaultRemotes(self, board):
- default_remotes_file = os.path.join(
- os.path.dirname(__file__), 'default_remotes')
+ default_remotes_file = os.path.join(os.path.dirname(__file__),
+ 'default_remotes')
try:
with open(default_remotes_file) as f:
for line in f:
@@ -482,8 +494,8 @@ class ExperimentFactory(object):
l = logger.GetLogger()
l.LogOutput('Crosfleet tool not installed, trying to install it.')
ce = command_executer.GetCommandExecuter(l, log_level=log_level)
- setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin', 'lab-tools',
- 'setup_lab_tools')
+ setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin',
+ 'lab-tools', 'setup_lab_tools')
cmd = '%s' % setup_lab_tools
status = ce.RunCommand(cmd)
if status != 0:
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index ca846154..6daef780 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -160,8 +160,8 @@ class ExperimentRunner(object):
cache.Init(br.label.chromeos_image, br.label.chromeos_root,
br.benchmark.test_name, br.iteration, br.test_args,
br.profiler_args, br.machine_manager, br.machine,
- br.label.board, br.cache_conditions, br.logger(), br.log_level,
- br.label, br.share_cache, br.benchmark.suite,
+ br.label.board, br.cache_conditions, br.logger(),
+ br.log_level, br.label, br.share_cache, br.benchmark.suite,
br.benchmark.show_all_results, br.benchmark.run_local,
br.benchmark.cwp_dso)
cache_dir = cache.GetCacheDirForWrite()
@@ -236,8 +236,8 @@ class ExperimentRunner(object):
if not benchmark_run.cache_hit:
send_mail = True
break
- if (not send_mail and not experiment.email_to or
- config.GetConfig('no_email')):
+ if (not send_mail and not experiment.email_to
+ or config.GetConfig('no_email')):
return
label_names = []
@@ -245,7 +245,8 @@ class ExperimentRunner(object):
label_names.append(label.name)
subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
- text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
+ text_report = TextResultsReport.FromExperiment(experiment,
+ True).GetReport()
text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
@@ -253,12 +254,11 @@ class ExperimentRunner(object):
attachment = EmailSender.Attachment('report.html', html_report)
email_to = experiment.email_to or []
email_to.append(getpass.getuser())
- EmailSender().SendEmail(
- email_to,
- subject,
- text_report,
- attachments=[attachment],
- msg_type='html')
+ EmailSender().SendEmail(email_to,
+ subject,
+ text_report,
+ attachments=[attachment],
+ msg_type='html')
def _StoreResults(self, experiment):
if self._terminated:
@@ -300,9 +300,10 @@ class ExperimentRunner(object):
self.l.LogOutput('Storing results of each benchmark run.')
for benchmark_run in experiment.benchmark_runs:
if benchmark_run.result:
- benchmark_run_name = ''.join(
- ch for ch in benchmark_run.name if ch.isalnum())
- benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
+ benchmark_run_name = ''.join(ch for ch in benchmark_run.name
+ if ch.isalnum())
+ benchmark_run_path = os.path.join(results_directory,
+ benchmark_run_name)
if experiment.compress_results:
benchmark_run.result.CompressResultsTo(benchmark_run_path)
else:
@@ -313,15 +314,16 @@ class ExperimentRunner(object):
results_table_path = os.path.join(results_directory, 'results.html')
report = HTMLResultsReport.FromExperiment(experiment).GetReport()
if self.json_report:
- json_report = JSONResultsReport.FromExperiment(
- experiment, json_args={'indent': 2})
+ json_report = JSONResultsReport.FromExperiment(experiment,
+ json_args={'indent': 2})
_WriteJSONReportToFile(experiment, results_directory, json_report)
FileUtils().WriteFile(results_table_path, report)
self.l.LogOutput('Storing email message body in %s.' % results_directory)
msg_file_path = os.path.join(results_directory, 'msg_body.html')
- text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
+ text_report = TextResultsReport.FromExperiment(experiment,
+ True).GetReport()
text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index a38f24f9..78834c63 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -22,14 +22,13 @@ class BenchmarkSettings(Settings):
def __init__(self, name):
super(BenchmarkSettings, self).__init__(name, 'benchmark')
self.AddField(
- TextField(
- 'test_name',
- description='The name of the test to run. '
- 'Defaults to the name of the benchmark.'))
+ TextField('test_name',
+ description='The name of the test to run. '
+ 'Defaults to the name of the benchmark.'))
self.AddField(
- TextField(
- 'test_args', description='Arguments to be passed to the '
- 'test.'))
+ TextField('test_args',
+ description='Arguments to be passed to the '
+ 'test.'))
self.AddField(
IntegerField(
'iterations',
@@ -39,24 +38,21 @@ class BenchmarkSettings(Settings):
'If not set, will run each benchmark test the optimum number of '
'times to get a stable result.'))
self.AddField(
- TextField(
- 'suite',
- default='test_that',
- description='The type of the benchmark.'))
+ TextField('suite',
+ default='test_that',
+ description='The type of the benchmark.'))
self.AddField(
- IntegerField(
- 'retries',
- default=0,
- description='Number of times to retry a '
- 'benchmark run.'))
+ IntegerField('retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
self.AddField(
- BooleanField(
- 'run_local',
- description='Run benchmark harness on the DUT. '
- 'Currently only compatible with the suite: '
- 'telemetry_Crosperf.',
- required=False,
- default=True))
+ BooleanField('run_local',
+ description='Run benchmark harness on the DUT. '
+ 'Currently only compatible with the suite: '
+ 'telemetry_Crosperf.',
+ required=False,
+ default=True))
self.AddField(
FloatField(
'weight',
@@ -70,12 +66,11 @@ class LabelSettings(Settings):
def __init__(self, name):
super(LabelSettings, self).__init__(name, 'label')
self.AddField(
- TextField(
- 'chromeos_image',
- required=False,
- description='The path to the image to run tests '
- 'on, for local/custom-built images. See the '
- "'build' option for official or trybot images."))
+ TextField('chromeos_image',
+ required=False,
+ description='The path to the image to run tests '
+ 'on, for local/custom-built images. See the '
+ "'build' option for official or trybot images."))
self.AddField(
TextField(
'autotest_path',
@@ -90,53 +85,46 @@ class LabelSettings(Settings):
description='Debug info directory relative to chroot which has '
'symbols and vmlinux that can be used by perf tool.'))
self.AddField(
- TextField(
- 'chromeos_root',
- description='The path to a chromeos checkout which '
- 'contains a src/scripts directory. Defaults to '
- 'the chromeos checkout which contains the '
- 'chromeos_image.'))
- self.AddField(
- ListField(
- 'remote',
- description='A comma-separated list of IPs of chromeos'
- 'devices to run experiments on.'))
- self.AddField(
- TextField(
- 'image_args',
- required=False,
- default='',
- description='Extra arguments to pass to '
- 'image_chromeos.py.'))
- self.AddField(
- TextField(
- 'cache_dir',
- default='',
- description='The cache dir for this image.'))
- self.AddField(
- TextField(
- 'compiler',
- default='gcc',
- description='The compiler used to build the '
- 'ChromeOS image (gcc or llvm).'))
- self.AddField(
- TextField(
- 'chrome_src',
- description='The path to the source of chrome. '
- 'This is used to run telemetry benchmarks. '
- 'The default one is the src inside chroot.',
- required=False,
- default=''))
- self.AddField(
- TextField(
- 'build',
- description='The xbuddy specification for an '
- 'official or trybot image to use for tests. '
- "'/remote' is assumed, and the board is given "
- "elsewhere, so omit the '/remote/<board>/' xbuddy "
- 'prefix.',
- required=False,
- default=''))
+ TextField('chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ ListField('remote',
+ description='A comma-separated list of IPs of chromeos'
+ 'devices to run experiments on.'))
+ self.AddField(
+ TextField('image_args',
+ required=False,
+ default='',
+ description='Extra arguments to pass to '
+ 'image_chromeos.py.'))
+ self.AddField(
+ TextField('cache_dir',
+ default='',
+ description='The cache dir for this image.'))
+ self.AddField(
+ TextField('compiler',
+ default='gcc',
+ description='The compiler used to build the '
+ 'ChromeOS image (gcc or llvm).'))
+ self.AddField(
+ TextField('chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(
+ TextField('build',
+ description='The xbuddy specification for an '
+ 'official or trybot image to use for tests. '
+ "'/remote' is assumed, and the board is given "
+ "elsewhere, so omit the '/remote/<board>/' xbuddy "
+ 'prefix.',
+ required=False,
+ default=''))
class GlobalSettings(Settings):
@@ -145,67 +133,56 @@ class GlobalSettings(Settings):
def __init__(self, name):
super(GlobalSettings, self).__init__(name, 'global')
self.AddField(
- TextField(
- 'name',
- description='The name of the experiment. Just an '
- 'identifier.'))
- self.AddField(
- TextField(
- 'board',
- description='The target board for running '
- 'experiments on, e.g. x86-alex.'))
- self.AddField(
- BooleanField(
- 'crosfleet',
- description='Whether to run experiments via crosfleet.',
- default=False))
- self.AddField(
- ListField(
- 'remote',
- description='A comma-separated list of IPs of '
- 'chromeos devices to run experiments on.'))
- self.AddField(
- BooleanField(
- 'rerun_if_failed',
- description='Whether to re-run failed test runs '
- 'or not.',
- default=False))
- self.AddField(
- BooleanField(
- 'rm_chroot_tmp',
- default=False,
- description='Whether to remove the test_that '
- 'result in the chroot.'))
- self.AddField(
- ListField(
- 'email',
- description='Space-separated list of email '
- 'addresses to send email to.'))
- self.AddField(
- BooleanField(
- 'rerun',
- description='Whether to ignore the cache and '
- 'for tests to be re-run.',
- default=False))
- self.AddField(
- BooleanField(
- 'same_specs',
- default=True,
- description='Ensure cached runs are run on the '
- 'same kind of devices which are specified as a '
- 'remote.'))
- self.AddField(
- BooleanField(
- 'same_machine',
- default=False,
- description='Ensure cached runs are run on the '
- 'same remote.'))
- self.AddField(
- BooleanField(
- 'use_file_locks',
- default=False,
- description='DEPRECATED: Whether to use the file locks '
- 'or AFE server lock mechanism.'))
+ TextField('name',
+ description='The name of the experiment. Just an '
+ 'identifier.'))
+ self.AddField(
+ TextField('board',
+ description='The target board for running '
+ 'experiments on, e.g. x86-alex.'))
+ self.AddField(
+ BooleanField('crosfleet',
+ description='Whether to run experiments via crosfleet.',
+ default=False))
+ self.AddField(
+ ListField('remote',
+ description='A comma-separated list of IPs of '
+ 'chromeos devices to run experiments on.'))
+ self.AddField(
+ BooleanField('rerun_if_failed',
+ description='Whether to re-run failed test runs '
+ 'or not.',
+ default=False))
+ self.AddField(
+ BooleanField('rm_chroot_tmp',
+ default=False,
+ description='Whether to remove the test_that '
+ 'result in the chroot.'))
+ self.AddField(
+ ListField('email',
+ description='Space-separated list of email '
+ 'addresses to send email to.'))
+ self.AddField(
+ BooleanField('rerun',
+ description='Whether to ignore the cache and '
+ 'for tests to be re-run.',
+ default=False))
+ self.AddField(
+ BooleanField('same_specs',
+ default=True,
+ description='Ensure cached runs are run on the '
+ 'same kind of devices which are specified as a '
+ 'remote.'))
+ self.AddField(
+ BooleanField('same_machine',
+ default=False,
+ description='Ensure cached runs are run on the '
+ 'same remote.'))
+ self.AddField(
+ BooleanField('use_file_locks',
+ default=False,
+ description='DEPRECATED: Whether to use the file locks '
+ 'or AFE server lock mechanism.'))
self.AddField(
IntegerField(
'iterations',
@@ -215,79 +192,68 @@ class GlobalSettings(Settings):
'If not set, will run each benchmark test the optimum number of '
'times to get a stable result.'))
self.AddField(
- TextField(
- 'chromeos_root',
- description='The path to a chromeos checkout which '
- 'contains a src/scripts directory. Defaults to '
- 'the chromeos checkout which contains the '
- 'chromeos_image.'))
- self.AddField(
- TextField(
- 'logging_level',
- default='average',
- description='The level of logging desired. '
- "Options are 'quiet', 'average', and 'verbose'."))
- self.AddField(
- IntegerField(
- 'acquire_timeout',
- default=0,
- description='Number of seconds to wait for '
- 'machine before exit if all the machines in '
- 'the experiment file are busy. Default is 0.'))
- self.AddField(
- TextField(
- 'perf_args',
- default='',
- description='The optional profile command. It '
- 'enables perf commands to record perforamance '
- 'related counters. It must start with perf '
- 'command record or stat followed by arguments.'))
- self.AddField(
- BooleanField(
- 'download_debug',
- default=True,
- description='Download compressed debug symbols alongwith '
- 'image. This can provide more info matching symbols for'
- 'profiles, but takes larger space. By default, download'
- 'it only when perf_args is specified.'))
- self.AddField(
- TextField(
- 'cache_dir',
- default='',
- description='The abs path of cache dir. '
- 'Default is /home/$(whoami)/cros_scratch.'))
- self.AddField(
- BooleanField(
- 'cache_only',
- default=False,
- description='Whether to use only cached '
- 'results (do not rerun failed tests).'))
- self.AddField(
- BooleanField(
- 'no_email',
- default=False,
- description='Whether to disable the email to '
- 'user after crosperf finishes.'))
- self.AddField(
- BooleanField(
- 'json_report',
- default=False,
- description='Whether to generate a json version '
- 'of the report, for archiving.'))
- self.AddField(
- BooleanField(
- 'show_all_results',
- default=False,
- description='When running Telemetry tests, '
- 'whether to all the results, instead of just '
- 'the default (summary) results.'))
- self.AddField(
- TextField(
- 'share_cache',
- default='',
- description='Path to alternate cache whose data '
- 'you want to use. It accepts multiple directories '
- 'separated by a ",".'))
+ TextField('chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ TextField('logging_level',
+ default='average',
+ description='The level of logging desired. '
+ "Options are 'quiet', 'average', and 'verbose'."))
+ self.AddField(
+ IntegerField('acquire_timeout',
+ default=0,
+ description='Number of seconds to wait for '
+ 'machine before exit if all the machines in '
+ 'the experiment file are busy. Default is 0.'))
+ self.AddField(
+ TextField('perf_args',
+ default='',
+ description='The optional profile command. It '
+ 'enables perf commands to record perforamance '
+ 'related counters. It must start with perf '
+ 'command record or stat followed by arguments.'))
+ self.AddField(
+ BooleanField('download_debug',
+ default=True,
+ description='Download compressed debug symbols alongwith '
+ 'image. This can provide more info matching symbols for'
+ 'profiles, but takes larger space. By default, download'
+ 'it only when perf_args is specified.'))
+ self.AddField(
+ TextField('cache_dir',
+ default='',
+ description='The abs path of cache dir. '
+ 'Default is /home/$(whoami)/cros_scratch.'))
+ self.AddField(
+ BooleanField('cache_only',
+ default=False,
+ description='Whether to use only cached '
+ 'results (do not rerun failed tests).'))
+ self.AddField(
+ BooleanField('no_email',
+ default=False,
+ description='Whether to disable the email to '
+ 'user after crosperf finishes.'))
+ self.AddField(
+ BooleanField('json_report',
+ default=False,
+ description='Whether to generate a json version '
+ 'of the report, for archiving.'))
+ self.AddField(
+ BooleanField('show_all_results',
+ default=False,
+ description='When running Telemetry tests, '
+ 'whether to all the results, instead of just '
+ 'the default (summary) results.'))
+ self.AddField(
+ TextField('share_cache',
+ default='',
+ description='Path to alternate cache whose data '
+ 'you want to use. It accepts multiple directories '
+ 'separated by a ",".'))
self.AddField(
TextField('results_dir', default='', description='The results dir.'))
self.AddField(
@@ -297,55 +263,49 @@ class GlobalSettings(Settings):
description='Whether to compress all test results other than '
'reports into a tarball to save disk space.'))
self.AddField(
- TextField(
- 'locks_dir',
- default='',
- description='An alternate directory to use for '
- 'storing/checking machine file locks for local machines. '
- 'By default the file locks directory is '
- '/google/data/rw/users/mo/mobiletc-prebuild/locks.\n'
- 'WARNING: If you use your own locks directory, '
- 'there is no guarantee that someone else might not '
- 'hold a lock on the same machine in a different '
- 'locks directory.'))
- self.AddField(
- TextField(
- 'chrome_src',
- description='The path to the source of chrome. '
- 'This is used to run telemetry benchmarks. '
- 'The default one is the src inside chroot.',
- required=False,
- default=''))
- self.AddField(
- IntegerField(
- 'retries',
- default=0,
- description='Number of times to retry a '
- 'benchmark run.'))
- self.AddField(
- TextField(
- 'cwp_dso',
- description='The DSO type that we want to use for '
- 'CWP approximation. This is used to run telemetry '
- 'benchmarks. Valid DSO types can be found from dso_list '
- 'in experiment_factory.py. The default value is set to '
- 'be empty.',
- required=False,
- default=''))
- self.AddField(
- BooleanField(
- 'enable_aslr',
- description='Enable ASLR on the machine to run the '
- 'benchmarks. ASLR is disabled by default',
- required=False,
- default=False))
- self.AddField(
- BooleanField(
- 'ignore_min_max',
- description='When doing math for the raw results, '
- 'ignore min and max values to reduce noise.',
- required=False,
- default=False))
+ TextField('locks_dir',
+ default='',
+ description='An alternate directory to use for '
+ 'storing/checking machine file locks for local machines. '
+ 'By default the file locks directory is '
+ '/google/data/rw/users/mo/mobiletc-prebuild/locks.\n'
+ 'WARNING: If you use your own locks directory, '
+ 'there is no guarantee that someone else might not '
+ 'hold a lock on the same machine in a different '
+ 'locks directory.'))
+ self.AddField(
+ TextField('chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(
+ IntegerField('retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
+ self.AddField(
+ TextField('cwp_dso',
+ description='The DSO type that we want to use for '
+ 'CWP approximation. This is used to run telemetry '
+ 'benchmarks. Valid DSO types can be found from dso_list '
+ 'in experiment_factory.py. The default value is set to '
+ 'be empty.',
+ required=False,
+ default=''))
+ self.AddField(
+ BooleanField('enable_aslr',
+ description='Enable ASLR on the machine to run the '
+ 'benchmarks. ASLR is disabled by default',
+ required=False,
+ default=False))
+ self.AddField(
+ BooleanField('ignore_min_max',
+ description='When doing math for the raw results, '
+ 'ignore min and max values to reduce noise.',
+ required=False,
+ default=False))
self.AddField(
TextField(
'intel_pstate',
@@ -356,12 +316,11 @@ class GlobalSettings(Settings):
required=False,
default='no_hwp'))
self.AddField(
- BooleanField(
- 'turbostat',
- description='Run turbostat process in the background'
- ' of a benchmark. Enabled by default.',
- required=False,
- default=True))
+ BooleanField('turbostat',
+ description='Run turbostat process in the background'
+ ' of a benchmark. Enabled by default.',
+ required=False,
+ default=True))
self.AddField(
FloatField(
'top_interval',
@@ -377,22 +336,20 @@ class GlobalSettings(Settings):
required=False,
default=1))
self.AddField(
- IntegerField(
- 'cooldown_temp',
- required=False,
- default=40,
- description='Wait until CPU temperature goes down below'
- ' specified temperature in Celsius'
- ' prior starting a benchmark. '
- 'By default the value is set to 40 degrees.'))
- self.AddField(
- IntegerField(
- 'cooldown_time',
- required=False,
- default=10,
- description='Wait specified time in minutes allowing'
- ' CPU to cool down. Zero value disables cooldown. '
- 'The default value is 10 minutes.'))
+ IntegerField('cooldown_temp',
+ required=False,
+ default=40,
+ description='Wait until CPU temperature goes down below'
+ ' specified temperature in Celsius'
+ ' prior starting a benchmark. '
+ 'By default the value is set to 40 degrees.'))
+ self.AddField(
+ IntegerField('cooldown_time',
+ required=False,
+ default=10,
+ description='Wait specified time in minutes allowing'
+ ' CPU to cool down. Zero value disables cooldown. '
+ 'The default value is 10 minutes.'))
self.AddField(
EnumField(
'governor',
@@ -444,8 +401,7 @@ class GlobalSettings(Settings):
'no_lock',
default=False,
description='Do not attempt to lock the DUT.'
- ' Useful when lock is held externally, say with crosfleet.'
- ))
+ ' Useful when lock is held externally, say with crosfleet.'))
class SettingsFactory(object):