aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorCaroline Tice <cmtice@google.com>2016-10-27 12:45:15 -0700
committerchrome-bot <chrome-bot@chromium.org>2016-10-27 14:29:10 -0700
commite82513b0aec27bf5d3ca51789edc48dde5ee439b (patch)
tree734e4480397cf7b658f45ce2ae7de7b377a2431f /crosperf
parentaee96b71ad4ffde231ca5bf9c0509d15b0e9b753 (diff)
downloadtoolchain-utils-e82513b0aec27bf5d3ca51789edc48dde5ee439b.tar.gz
[toolchain-utils] Update page_cycler in nightly tests.
Currently page_cycler_v2.typical_25 tries to run too many times, causing some of the nightly builders to take nearly 24 hours to complete (they take about 6 hours without page_cycler). Also, page_cycler_v2.typical_25 cannot run locally on some boards (such as daisy). This CL removes page_cycler_v2.typical_25 from the all_toolchain_perf set, and adds it separately to the nightly tests, with only 2 iterations, no retries, and forcing it to not run locallys on the DUT. BUG=chromium:660087 TEST=Tested the new page cycler settings in a crosperf run. Change-Id: Ia68abce4e8ee29a671e58db13f269e531d31736e Reviewed-on: https://chrome-internal-review.googlesource.com/300855 Commit-Ready: Caroline Tice <cmtice@google.com> Tested-by: Caroline Tice <cmtice@google.com> Reviewed-by: Yunlian Jiang <yunlian@google.com>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/experiment_factory.py138
1 files changed, 70 insertions, 68 deletions
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index fac9a9a8..a6807285 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -21,43 +21,41 @@ import file_lock_machine
# specified sets. Here we define sets of tests that users may want
# to run together.
-telemetry_perfv2_tests = ['dromaeo.domcoreattr',
- 'dromaeo.domcoremodify',
- 'dromaeo.domcorequery',
- 'dromaeo.domcoretraverse',
- 'kraken',
- 'octane',
- 'robohornet_pro',
- 'sunspider']
+telemetry_perfv2_tests = [
+ 'dromaeo.domcoreattr', 'dromaeo.domcoremodify', 'dromaeo.domcorequery',
+ 'dromaeo.domcoretraverse', 'kraken', 'octane', 'robohornet_pro', 'sunspider'
+]
-telemetry_pagecycler_tests = ['page_cycler_v2.intl_ar_fa_he',
- 'page_cycler_v2.intl_es_fr_pt-BR',
- 'page_cycler_v2.intl_hi_ru',
- 'page_cycler_v2.intl_ja_zh',
- 'page_cycler_v2.intl_ko_th_vi',
-# 'page_cycler_v2.morejs',
-# 'page_cycler_v2.moz',
-# 'page_cycler_v2.netsim.top_10',
- 'page_cycler_v2.tough_layout_cases',
- 'page_cycler_v2.typical_25']
+telemetry_pagecycler_tests = [
+ 'page_cycler_v2.intl_ar_fa_he',
+ 'page_cycler_v2.intl_es_fr_pt-BR',
+ 'page_cycler_v2.intl_hi_ru',
+ 'page_cycler_v2.intl_ja_zh',
+ 'page_cycler_v2.intl_ko_th_vi',
+ # 'page_cycler_v2.morejs',
+ # 'page_cycler_v2.moz',
+ # 'page_cycler_v2.netsim.top_10',
+ 'page_cycler_v2.tough_layout_cases',
+ 'page_cycler_v2.typical_25'
+]
-telemetry_toolchain_old_perf_tests = ['dromaeo.domcoremodify',
- 'page_cycler_v2.intl_es_fr_pt-BR',
- 'page_cycler_v2.intl_hi_ru',
- 'page_cycler_v2.intl_ja_zh',
- 'page_cycler_v2.intl_ko_th_vi',
- 'page_cycler_v2.netsim.top_10',
- 'page_cycler_v2.typical_25',
- 'robohornet_pro',
- 'spaceport',
- 'tab_switching.top_10']
-telemetry_toolchain_perf_tests = ['octane',
- 'kraken',
- 'speedometer',
- 'dromaeo.domcoreattr',
- 'dromaeo.domcoremodify',
- 'smoothness.tough_webgl_cases',
- 'page_cycler_v2.typical_25']
+telemetry_toolchain_old_perf_tests = [
+ 'dromaeo.domcoremodify', 'page_cycler_v2.intl_es_fr_pt-BR',
+ 'page_cycler_v2.intl_hi_ru', 'page_cycler_v2.intl_ja_zh',
+ 'page_cycler_v2.intl_ko_th_vi', 'page_cycler_v2.netsim.top_10',
+ 'page_cycler_v2.typical_25', 'robohornet_pro', 'spaceport',
+ 'tab_switching.top_10'
+]
+telemetry_toolchain_perf_tests = [
+ 'octane',
+ 'kraken',
+ 'speedometer',
+ 'dromaeo.domcoreattr',
+ 'dromaeo.domcoremodify',
+ 'smoothness.tough_webgl_cases',
+]
+
+# 'page_cycler_v2.typical_25']
class ExperimentFactory(object):
@@ -73,9 +71,10 @@ class ExperimentFactory(object):
show_all_results, retries, run_local):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(
- test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries, run_local)
+ telemetry_benchmark = Benchmark(test_name, test_name, test_args,
+ iterations, rm_chroot_tmp, perf_args,
+ suite, show_all_results, retries,
+ run_local)
benchmarks.append(telemetry_benchmark)
def GetExperiment(self, experiment_file, working_directory, log_dir):
@@ -116,8 +115,9 @@ class ExperimentFactory(object):
log_level = 'verbose'
# Default cache hit conditions. The image checksum in the cache and the
# computed checksum of the image must match. Also a cache file must exist.
- cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
- CacheConditions.CHECKSUMS_MATCH]
+ cache_conditions = [
+ CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
+ ]
if global_settings.GetField('rerun_if_failed'):
cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
if global_settings.GetField('rerun'):
@@ -145,10 +145,9 @@ class ExperimentFactory(object):
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
- self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local)
+ self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
+ iterations, rm_chroot_tmp, perf_args, suite,
+ show_all_results, retries, run_local)
elif test_name == 'all_pagecyclers':
self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
test_args, iterations, rm_chroot_tmp,
@@ -160,21 +159,23 @@ class ExperimentFactory(object):
perf_args, suite, show_all_results, retries,
run_local)
# Add non-telemetry toolchain-perf benchmarks:
- benchmarks.append(Benchmark('graphics_WebGLAquarium',
- 'graphics_WebGLAquarium',
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False))
+ benchmarks.append(
+ Benchmark(
+ 'graphics_WebGLAquarium',
+ 'graphics_WebGLAquarium',
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False))
elif test_name == 'all_toolchain_perf_old':
- self.AppendBenchmarkSet(
- benchmarks, telemetry_toolchain_old_perf_tests, test_args,
- iterations, rm_chroot_tmp, perf_args, suite, show_all_results,
- retries, run_local)
+ self.AppendBenchmarkSet(benchmarks,
+ telemetry_toolchain_old_perf_tests, test_args,
+ iterations, rm_chroot_tmp, perf_args, suite,
+ show_all_results, retries, run_local)
else:
benchmark = Benchmark(test_name, test_name, test_args, iterations,
rm_chroot_tmp, perf_args, suite,
@@ -182,16 +183,17 @@ class ExperimentFactory(object):
benchmarks.append(benchmark)
else:
# Add the single benchmark.
- benchmark = Benchmark(benchmark_name,
- test_name,
- test_args,
- iterations,
- rm_chroot_tmp,
- perf_args,
- suite,
- show_all_results,
- retries,
- run_local=False)
+ benchmark = Benchmark(
+ benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local=False)
benchmarks.append(benchmark)
if not benchmarks: