aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManoj Gupta <manojgupta@google.com>2023-03-13 19:48:07 +0000
committerChromeos LUCI <chromeos-scoped@luci-project-accounts.iam.gserviceaccount.com>2023-03-14 02:45:43 +0000
commit869e979a9b163b195d30d6dc3b2c3e646490b9cc (patch)
treeca335c410dc3647664f9d46e2593b19a19aa46cf
parent986da277421a7a0da3c0dbd6817c8a05b07fda6c (diff)
downloadtoolchain-utils-869e979a9b163b195d30d6dc3b2c3e646490b9cc.tar.gz
crosperf: Error out when all iterations of a benchmark fail
Instead of marking failure when any iteration fail, return error only ahen all iterations fail i.e. if a single iteration of a benchmark passes, crosperf does not exit with error. BUG=b:273290665 TEST=unit tests Change-Id: I02d81c8d1933759f875161c76b31d7ad13892709 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/4334919 Reviewed-by: George Burgess <gbiv@chromium.org> Reviewed-by: Denis Nikitin <denik@chromium.org> Tested-by: Manoj Gupta <manojgupta@chromium.org> Commit-Queue: Manoj Gupta <manojgupta@chromium.org>
-rw-r--r--crosperf/experiment_runner.py26
1 files changed, 17 insertions, 9 deletions
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index cb518843..c41459a1 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -298,7 +298,6 @@ class ExperimentRunner(object):
experiment_file_path = os.path.join(results_directory, "experiment.exp")
FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)
- has_failure = False
all_failed = True
topstats_file = os.path.join(results_directory, "topstats.log")
@@ -306,17 +305,21 @@ class ExperimentRunner(object):
"Storing top statistics of each benchmark run into %s."
% topstats_file
)
+ # Track if any iterations for a given benchmark has passed for each
+ # label.
+ benchmarks_passes = {}
with open(topstats_file, "w") as top_fd:
for benchmark_run in experiment.benchmark_runs:
+ benchmarks_passes.setdefault(
+ benchmark_run.label.name,
+ {benchmark_run.benchmark.name: False},
+ )
if benchmark_run.result:
- # FIXME: Pylint has a bug suggesting the following change, which
- # should be fixed in pylint 2.0. Resolve this after pylint >= 2.0.
- # Bug: https://github.com/PyCQA/pylint/issues/1984
- # pylint: disable=simplifiable-if-statement
- if benchmark_run.result.retval:
- has_failure = True
- else:
+ if not benchmark_run.result.retval:
all_failed = False
+ benchmarks_passes[benchmark_run.label.name][
+ benchmark_run.benchmark.name
+ ] = True
# Header with benchmark run name.
top_fd.write("%s\n" % str(benchmark_run))
# Formatted string with top statistics.
@@ -325,6 +328,11 @@ class ExperimentRunner(object):
if all_failed:
return self.ALL_FAILED
+ # Set has_passes if atleast one iteration of all benchmarks has passed
+ # for every label.
+ has_passes = True
+ for benchmarks in benchmarks_passes.values():
+ has_passes = has_passes and all(benchmarks.values())
self.l.LogOutput("Storing results of each benchmark run.")
for benchmark_run in experiment.benchmark_runs:
@@ -369,7 +377,7 @@ class ExperimentRunner(object):
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
FileUtils().WriteFile(msg_file_path, msg_body)
- return self.SUCCEEDED if not has_failure else self.HAS_FAILURE
+ return self.SUCCEEDED if has_passes else self.HAS_FAILURE
def Run(self):
try: