aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorLuis Lozano <llozano@chromium.org>2015-05-19 12:22:37 -0700
committerChromeOS Commit Bot <chromeos-commit-bot@chromium.org>2015-05-28 23:02:21 +0000
commitdf76222bf1cbdbc42bb41934fb960bac8694eca4 (patch)
tree3bcefbfb23af029d6512a220c584cce2da987bac /crosperf
parent7cdb11a25795a7215d59aa49c7b8619780951afa (diff)
downloadtoolchain-utils-df76222bf1cbdbc42bb41934fb960bac8694eca4.tar.gz
Added benchmark retry support.
We often get spurious failures from the benchmarks runs. This CL adds support for a "retries" field that will automatically retry the benchmark run by the amount specified. BUG=None TEST=Test by hand. Change-Id: Ib881c40d766e67a7daf855ce9137d3e3d631f12e Reviewed-on: https://chrome-internal-review.googlesource.com/217525 Reviewed-by: Luis Lozano <llozano@chromium.org> Tested-by: Luis Lozano <llozano@chromium.org> Commit-Queue: Luis Lozano <llozano@chromium.org>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark.py3
-rw-r--r--crosperf/experiment_factory.py17
-rw-r--r--crosperf/settings_factory.py11
-rw-r--r--crosperf/suite_runner.py35
4 files changed, 45 insertions, 21 deletions
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index 93462fa4..506a825d 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -17,7 +17,7 @@ class Benchmark(object):
def __init__(self, name, test_name, test_args, iterations,
rm_chroot_tmp, perf_args, suite="",
- show_all_results=False):
+ show_all_results=False, retries=0):
self.name = name
#For telemetry, this is the benchmark name.
self.test_name = test_name
@@ -29,5 +29,6 @@ class Benchmark(object):
self.iteration_adjusted = False
self.suite = suite
self.show_all_results = show_all_results
+ self.retries = retries
if self.suite == "telemetry":
self.show_all_results = True
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index eb4aa7ea..85322bf5 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -81,12 +81,12 @@ class ExperimentFactory(object):
def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results):
+ show_all_results, retries):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
telemetry_benchmark = Benchmark (test_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results)
+ suite, show_all_results, retries)
benchmarks.append(telemetry_benchmark)
@@ -132,6 +132,8 @@ class ExperimentFactory(object):
cache_conditions.append(CacheConditions.MACHINES_MATCH)
# Construct benchmarks.
+ # Some fields are common with global settings. The values are
+ # inherited and/or merged with the global settings values.
benchmarks = []
all_benchmark_settings = experiment_file.GetSettings("benchmark")
for benchmark_settings in all_benchmark_settings:
@@ -142,24 +144,25 @@ class ExperimentFactory(object):
test_args = benchmark_settings.GetField("test_args")
iterations = benchmark_settings.GetField("iterations")
suite = benchmark_settings.GetField("suite")
+ retries = benchmark_settings.GetField("retries")
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results)
+ perf_args, suite, show_all_results, retries)
elif test_name == 'all_pagecyclers':
self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results)
+ perf_args, suite, show_all_results, retries)
elif test_name == 'all_toolchain_perf':
self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results)
+ perf_args, suite, show_all_results, retries)
else:
benchmark = Benchmark(test_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results)
+ show_all_results, retries)
benchmarks.append(benchmark)
else:
# Add the single benchmark.
@@ -169,6 +172,8 @@ class ExperimentFactory(object):
benchmarks.append(benchmark)
# Construct labels.
+ # Some fields are common with global settings. The values are
+ # inherited and/or merged with the global settings values.
labels = []
all_label_settings = experiment_file.GetSettings("label")
all_remote = list(remote)
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 835748a3..9def4398 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -30,6 +30,9 @@ class BenchmarkSettings(Settings):
"test."))
self.AddField(TextField("suite", default="",
description="The type of the benchmark"))
+ self.AddField(IntegerField("retries", default=0,
+ description="Number of times to retry a "
+ "benchmark run."))
class LabelSettings(Settings):
@@ -130,9 +133,9 @@ class GlobalSettings(Settings):
"whether to all the results, instead of just "
"the default (summary) results."))
self.AddField(TextField("share_cache", default="",
- description="Path to alternat cache whose data "
+ description="Path to alternate cache whose data "
"you want to use. It accepts multiples directories"
- " se[arated bu a \",\""))
+ " separated by a \",\""))
self.AddField(TextField("results_dir", default="",
description="The results dir"))
default_locks_dir = lock_machine.Machine.LOCKS_DIR
@@ -148,7 +151,9 @@ class GlobalSettings(Settings):
"This is used to run telemetry benchmarks. "
"The default one is the src inside chroot.",
required=False, default=""))
-
+ self.AddField(IntegerField("retries", default=0,
+ description="Number of times to retry a "
+ "benchmark run."))
class SettingsFactory(object):
"""Factory class for building different types of Settings objects.
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 84dc7fd6..a1332273 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -52,15 +52,28 @@ class SuiteRunner(object):
self._ct = cmd_term or command_executer.CommandTerminator()
def Run(self, machine, label, benchmark, test_args, profiler_args):
- self.PinGovernorExecutionFrequencies(machine, label.chromeos_root)
- if benchmark.suite == "telemetry":
- return self.Telemetry_Run(machine, label, benchmark, profiler_args)
- elif benchmark.suite == "telemetry_Crosperf":
- return self.Telemetry_Crosperf_Run(machine, label, benchmark,
- test_args, profiler_args)
- else:
- return self.Test_That_Run(machine, label, benchmark, test_args,
- profiler_args)
+ for i in range(0, benchmark.retries + 1):
+ self.PinGovernorExecutionFrequencies(machine, label.chromeos_root)
+ if benchmark.suite == "telemetry":
+ ret_tup = self.Telemetry_Run(machine, label, benchmark, profiler_args)
+ elif benchmark.suite == "telemetry_Crosperf":
+ ret_tup = self.Telemetry_Crosperf_Run(machine, label, benchmark,
+ test_args, profiler_args)
+ else:
+ ret_tup = self.Test_That_Run(machine, label, benchmark, test_args,
+ profiler_args)
+ if ret_tup[0] != 0:
+ self._logger.LogOutput("benchmark %s failed. Retries left: %s"
+ % (benchmark.name, benchmark.retries - i))
+ elif i > 0:
+ self._logger.LogOutput("benchmark %s succeded after %s retries"
+ % (benchmark.name, i))
+ break
+ else:
+ self._logger.LogOutput("benchmark %s succeded on first try"
+ % benchmark.name)
+ break
+ return ret_tup
def GetHighestStaticFrequency(self, machine_name, chromeos_root):
""" Gets the highest static frequency for the specified machine
@@ -239,6 +252,6 @@ class MockSuiteRunner(object):
def Run(self, *_args):
if self._true:
- return ["", "", 0]
+ return [0, "", ""]
else:
- return ["", "", 0]
+ return [0, "", ""]