aboutsummaryrefslogtreecommitdiff
path: root/crosperf/experiment_factory.py
diff options
context:
space:
mode:
authorLuis Lozano <llozano@chromium.org>2015-12-15 13:49:30 -0800
committerLuis Lozano <llozano@chromium.org>2015-12-16 17:36:06 +0000
commitf2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe (patch)
tree185d243c7eed7c7a0db6f0e640746cadc1479ea9 /crosperf/experiment_factory.py
parent2a66f70fef907c1cb15229cb58e5129cb620ac98 (diff)
downloadtoolchain-utils-f2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe.tar.gz
Run pyformat on all the toolchain-utils files.
This gets rid of a lot of lint issues. Ran by doing this: for f in *.py; do echo -n "$f " ; if [ -x $f ]; then pyformat -i --remove_trailing_comma --yapf --force_quote_type=double $f ; else pyformat -i --remove_shebang --remove_trailing_comma --yapf --force_quote_type=double $f ; fi ; done BUG=chromium:567921 TEST=Ran simple crosperf run. Change-Id: I59778835fdaa5f706d2e1765924389f9e97433d1 Reviewed-on: https://chrome-internal-review.googlesource.com/242031 Reviewed-by: Luis Lozano <llozano@chromium.org> Commit-Queue: Luis Lozano <llozano@chromium.org> Tested-by: Luis Lozano <llozano@chromium.org> Reviewed-by: Yunlian Jiang <yunlian@google.com>
Diffstat (limited to 'crosperf/experiment_factory.py')
-rw-r--r--crosperf/experiment_factory.py186
1 files changed, 94 insertions, 92 deletions
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index bd31d78f..24508c9d 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""A module to generate experiments."""
from __future__ import print_function
@@ -29,8 +28,7 @@ telemetry_perfv2_tests = ['dromaeo.domcoreattr',
'kraken',
'octane',
'robohornet_pro',
- 'sunspider',
- ]
+ 'sunspider']
telemetry_pagecycler_tests = ['page_cycler.intl_ar_fa_he',
'page_cycler.intl_es_fr_pt-BR',
@@ -41,8 +39,7 @@ telemetry_pagecycler_tests = ['page_cycler.intl_ar_fa_he',
'page_cycler.moz',
'page_cycler.netsim.top_10',
'page_cycler.tough_layout_cases',
- 'page_cycler.typical_25',
- ]
+ 'page_cycler.typical_25']
telemetry_toolchain_old_perf_tests = ['dromaeo.domcoremodify',
'page_cycler.intl_es_fr_pt-BR',
@@ -53,8 +50,7 @@ telemetry_toolchain_old_perf_tests = ['dromaeo.domcoremodify',
'page_cycler.typical_25',
'robohornet_pro',
'spaceport',
- 'tab_switching.top_10',
- ]
+ 'tab_switching.top_10']
telemetry_toolchain_perf_tests = ['octane',
'kraken',
'speedometer',
@@ -62,8 +58,8 @@ telemetry_toolchain_perf_tests = ['octane',
'dromaeo.domcoremodify',
'smoothness.tough_webgl_cases',
'page_cycler.typical_25',
- 'media.tough_video_cases',
- ]
+ 'media.tough_video_cases']
+
class ExperimentFactory(object):
"""Factory class for building an Experiment, given an ExperimentFile as input.
@@ -78,19 +74,17 @@ class ExperimentFactory(object):
show_all_results, retries, run_local):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(test_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries,
- run_local)
+ telemetry_benchmark = Benchmark(
+ test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
+ suite, show_all_results, retries, run_local)
benchmarks.append(telemetry_benchmark)
-
def GetExperiment(self, experiment_file, working_directory, log_dir):
"""Construct an experiment from an experiment file."""
global_settings = experiment_file.GetGlobalSettings()
- experiment_name = global_settings.GetField("name")
- board = global_settings.GetField("board")
- remote = global_settings.GetField("remote")
+ experiment_name = global_settings.GetField('name')
+ board = global_settings.GetField('board')
+ remote = global_settings.GetField('remote')
# This is used to remove the ",' from the remote if user
# add them to the remote string.
new_remote = []
@@ -99,56 +93,56 @@ class ExperimentFactory(object):
c = re.sub('["\']', '', i)
new_remote.append(c)
remote = new_remote
- chromeos_root = global_settings.GetField("chromeos_root")
- rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
- perf_args = global_settings.GetField("perf_args")
- acquire_timeout = global_settings.GetField("acquire_timeout")
- cache_dir = global_settings.GetField("cache_dir")
- cache_only = global_settings.GetField("cache_only")
- config.AddConfig("no_email", global_settings.GetField("no_email"))
- share_cache = global_settings.GetField("share_cache")
- results_dir = global_settings.GetField("results_dir")
- use_file_locks = global_settings.GetField("use_file_locks")
- locks_dir = global_settings.GetField("locks_dir")
+ chromeos_root = global_settings.GetField('chromeos_root')
+ rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
+ perf_args = global_settings.GetField('perf_args')
+ acquire_timeout = global_settings.GetField('acquire_timeout')
+ cache_dir = global_settings.GetField('cache_dir')
+ cache_only = global_settings.GetField('cache_only')
+ config.AddConfig('no_email', global_settings.GetField('no_email'))
+ share_cache = global_settings.GetField('share_cache')
+ results_dir = global_settings.GetField('results_dir')
+ use_file_locks = global_settings.GetField('use_file_locks')
+ locks_dir = global_settings.GetField('locks_dir')
# If we pass a blank locks_dir to the Experiment, it will use the AFE server
# lock mechanism. So if the user specified use_file_locks, but did not
# specify a locks dir, set the locks dir to the default locks dir in
# file_lock_machine.
if use_file_locks and not locks_dir:
locks_dir = file_lock_machine.Machine.LOCKS_DIR
- chrome_src = global_settings.GetField("chrome_src")
- show_all_results = global_settings.GetField("show_all_results")
- log_level = global_settings.GetField("logging_level")
- if log_level not in ("quiet", "average", "verbose"):
- log_level = "verbose"
+ chrome_src = global_settings.GetField('chrome_src')
+ show_all_results = global_settings.GetField('show_all_results')
+ log_level = global_settings.GetField('logging_level')
+ if log_level not in ('quiet', 'average', 'verbose'):
+ log_level = 'verbose'
# Default cache hit conditions. The image checksum in the cache and the
# computed checksum of the image must match. Also a cache file must exist.
cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
CacheConditions.CHECKSUMS_MATCH]
- if global_settings.GetField("rerun_if_failed"):
+ if global_settings.GetField('rerun_if_failed'):
cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
- if global_settings.GetField("rerun"):
+ if global_settings.GetField('rerun'):
cache_conditions.append(CacheConditions.FALSE)
- if global_settings.GetField("same_machine"):
+ if global_settings.GetField('same_machine'):
cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
- if global_settings.GetField("same_specs"):
+ if global_settings.GetField('same_specs'):
cache_conditions.append(CacheConditions.MACHINES_MATCH)
# Construct benchmarks.
# Some fields are common with global settings. The values are
# inherited and/or merged with the global settings values.
benchmarks = []
- all_benchmark_settings = experiment_file.GetSettings("benchmark")
+ all_benchmark_settings = experiment_file.GetSettings('benchmark')
for benchmark_settings in all_benchmark_settings:
benchmark_name = benchmark_settings.name
- test_name = benchmark_settings.GetField("test_name")
+ test_name = benchmark_settings.GetField('test_name')
if not test_name:
test_name = benchmark_name
- test_args = benchmark_settings.GetField("test_args")
- iterations = benchmark_settings.GetField("iterations")
- suite = benchmark_settings.GetField("suite")
- retries = benchmark_settings.GetField("retries")
- run_local = benchmark_settings.GetField("run_local")
+ test_args = benchmark_settings.GetField('test_args')
+ iterations = benchmark_settings.GetField('iterations')
+ suite = benchmark_settings.GetField('suite')
+ retries = benchmark_settings.GetField('retries')
+ run_local = benchmark_settings.GetField('run_local')
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
@@ -168,71 +162,81 @@ class ExperimentFactory(object):
run_local)
# Add non-telemetry toolchain-perf benchmarks:
benchmarks.append(Benchmark('graphics_WebGLAquarium',
- 'graphics_WebGLAquarium', '', iterations,
- rm_chroot_tmp, perf_args, '',
- show_all_results, retries,
+ 'graphics_WebGLAquarium',
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
run_local=False))
elif test_name == 'all_toolchain_perf_old':
- self._AppendBenchmarkSet(benchmarks,
- telemetry_toolchain_old_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local)
+ self._AppendBenchmarkSet(
+ benchmarks, telemetry_toolchain_old_perf_tests, test_args,
+ iterations, rm_chroot_tmp, perf_args, suite, show_all_results,
+ retries, run_local)
else:
- benchmark = Benchmark(test_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
+ benchmark = Benchmark(test_name, test_name, test_args, iterations,
+ rm_chroot_tmp, perf_args, suite,
show_all_results, retries, run_local)
benchmarks.append(benchmark)
else:
# Add the single benchmark.
- benchmark = Benchmark(benchmark_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local=False)
+ benchmark = Benchmark(benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local=False)
benchmarks.append(benchmark)
if not benchmarks:
- raise RuntimeError("No benchmarks specified")
+ raise RuntimeError('No benchmarks specified')
# Construct labels.
# Some fields are common with global settings. The values are
# inherited and/or merged with the global settings values.
labels = []
- all_label_settings = experiment_file.GetSettings("label")
+ all_label_settings = experiment_file.GetSettings('label')
all_remote = list(remote)
for label_settings in all_label_settings:
label_name = label_settings.name
- image = label_settings.GetField("chromeos_image")
- chromeos_root = label_settings.GetField("chromeos_root")
- my_remote = label_settings.GetField("remote")
- compiler = label_settings.GetField("compiler")
+ image = label_settings.GetField('chromeos_image')
+ chromeos_root = label_settings.GetField('chromeos_root')
+ my_remote = label_settings.GetField('remote')
+ compiler = label_settings.GetField('compiler')
new_remote = []
if my_remote:
for i in my_remote:
c = re.sub('["\']', '', i)
new_remote.append(c)
my_remote = new_remote
- if image == "":
- build = label_settings.GetField("build")
+ if image == '':
+ build = label_settings.GetField('build')
if len(build) == 0:
raise RuntimeError("Can not have empty 'build' field!")
image = label_settings.GetXbuddyPath(build, board, chromeos_root,
log_level)
- cache_dir = label_settings.GetField("cache_dir")
- chrome_src = label_settings.GetField("chrome_src")
+ cache_dir = label_settings.GetField('cache_dir')
+ chrome_src = label_settings.GetField('chrome_src')
- # TODO(yunlian): We should consolidate code in machine_manager.py
- # to derermine whether we are running from within google or not
- if ("corp.google.com" in socket.gethostname() and
- (not my_remote
- or my_remote == remote
- and global_settings.GetField("board") != board)):
+ # TODO(yunlian): We should consolidate code in machine_manager.py
+ # to derermine whether we are running from within google or not
+ if ('corp.google.com' in socket.gethostname() and
+ (not my_remote or my_remote == remote and
+ global_settings.GetField('board') != board)):
my_remote = self.GetDefaultRemotes(board)
- if global_settings.GetField("same_machine") and len(my_remote) > 1:
- raise RuntimeError("Only one remote is allowed when same_machine "
- "is turned on")
+ if global_settings.GetField('same_machine') and len(my_remote) > 1:
+ raise RuntimeError('Only one remote is allowed when same_machine '
+ 'is turned on')
all_remote += my_remote
- image_args = label_settings.GetField("image_args")
+ image_args = label_settings.GetField('image_args')
if test_flag.GetTestMode():
# pylint: disable=too-many-function-args
label = MockLabel(label_name, image, chromeos_root, board, my_remote,
@@ -245,37 +249,35 @@ class ExperimentFactory(object):
labels.append(label)
if not labels:
- raise RuntimeError("No labels specified")
+ raise RuntimeError('No labels specified')
- email = global_settings.GetField("email")
+ email = global_settings.GetField('email')
all_remote += list(set(my_remote))
all_remote = list(set(all_remote))
- experiment = Experiment(experiment_name, all_remote,
- working_directory, chromeos_root,
- cache_conditions, labels, benchmarks,
- experiment_file.Canonicalize(),
- email, acquire_timeout, log_dir, log_level,
- share_cache,
+ experiment = Experiment(experiment_name, all_remote, working_directory,
+ chromeos_root, cache_conditions, labels, benchmarks,
+ experiment_file.Canonicalize(), email,
+ acquire_timeout, log_dir, log_level, share_cache,
results_dir, locks_dir)
return experiment
def GetDefaultRemotes(self, board):
- default_remotes_file = os.path.join(os.path.dirname(__file__),
- "default_remotes")
+ default_remotes_file = os.path.join(
+ os.path.dirname(__file__), 'default_remotes')
try:
with open(default_remotes_file) as f:
for line in f:
- key, v = line.split(":")
+ key, v = line.split(':')
if key.strip() == board:
- remotes = v.strip().split(" ")
+ remotes = v.strip().split(' ')
if remotes:
return remotes
else:
- raise RuntimeError("There is no remote for {0}".format(board))
+ raise RuntimeError('There is no remote for {0}'.format(board))
except IOError:
# TODO: rethrow instead of throwing different exception.
- raise RuntimeError("IOError while reading file {0}"
+ raise RuntimeError('IOError while reading file {0}'
.format(default_remotes_file))
else:
- raise RuntimeError("There is not remote for {0}".format(board))
+ raise RuntimeError('There is not remote for {0}'.format(board))