aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorzhizhouy <zhizhouy@google.com>2020-03-24 17:11:45 -0700
committerCommit Bot <commit-bot@chromium.org>2020-03-26 03:46:38 +0000
commit7ee1e5dc3841f8e521b45607bd07567fb9f7863f (patch)
treebdc0f9f87e7b208634f6db9747a2aab432d2bd56 /crosperf
parentd511f2bc35d35f203ab500d93d57c328b50529e5 (diff)
downloadtoolchain-utils-7ee1e5dc3841f8e521b45607bd07567fb9f7863f.tar.gz
crosperf: raise error at exit when benchmarks fail to run
Crosperf always returns 0 no matter benchmarks fail or experiment interrupted in the middle. Thus we cannot tell if a run succeeded or not with the exit status of it and it makes our nightly test failures hard to find. In this patch, I changed the behavior of crosperf return value: 1) Crosperf will not generate any report or send email if terminated or all benchmarks fail. It raises RuntimeError stating all benchmarks failing in the end. 2) Crosperf will generate report if benchmarks (but not all) fail, and will raise RuntimeError stating benchmarks partially failing. 3) Crosperf will also copy results json files to local results directory for further info. BUG=chromium:1063703 TEST=Passed all unittests, tested with different failure situations. Change-Id: I998bad51cd7301b9451645d22e8734963bc01aed Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/2119231 Reviewed-by: Caroline Tice <cmtice@chromium.org> Commit-Queue: Zhizhou Yang <zhizhouy@google.com> Tested-by: Zhizhou Yang <zhizhouy@google.com> Auto-Submit: Zhizhou Yang <zhizhouy@google.com>
Diffstat (limited to 'crosperf')
-rwxr-xr-xcrosperf/crosperf.py9
-rw-r--r--crosperf/experiment_runner.py41
-rwxr-xr-xcrosperf/experiment_runner_unittest.py2
-rw-r--r--crosperf/results_cache.py5
-rwxr-xr-xcrosperf/results_cache_unittest.py12
5 files changed, 50 insertions, 19 deletions
diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py
index ec07e7c7..f195b13a 100755
--- a/crosperf/crosperf.py
+++ b/crosperf/crosperf.py
@@ -27,6 +27,9 @@ from cros_utils import logger
import test_flag
+HAS_FAILURE = 1
+ALL_FAILED = 2
+
def SetupParserOptions(parser):
"""Add all options to the parser."""
@@ -128,7 +131,11 @@ def RunCrosperf(argv):
runner = ExperimentRunner(
experiment, json_report, using_schedv2=(not options.noschedv2))
- runner.Run()
+ ret = runner.Run()
+ if ret == HAS_FAILURE:
+ raise RuntimeError('One or more benchmarks failed.')
+ if ret == ALL_FAILED:
+ raise RuntimeError('All benchmarks failed to run.')
def Main(argv):
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 39e3f863..3e91e09c 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -49,6 +49,10 @@ class ExperimentRunner(object):
STATUS_TIME_DELAY = 30
THREAD_MONITOR_DELAY = 2
+ SUCCEEDED = 0
+ HAS_FAILURE = 1
+ ALL_FAILED = 2
+
def __init__(self,
experiment,
json_report,
@@ -258,7 +262,8 @@ class ExperimentRunner(object):
def _StoreResults(self, experiment):
if self._terminated:
- return
+ return self.ALL_FAILED
+
results_directory = experiment.results_directory
FileUtils().RmDir(results_directory)
FileUtils().MkDirP(results_directory)
@@ -266,6 +271,24 @@ class ExperimentRunner(object):
experiment_file_path = os.path.join(results_directory, 'experiment.exp')
FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)
+ has_failure = False
+ all_failed = True
+ self.l.LogOutput('Storing results of each benchmark run.')
+ for benchmark_run in experiment.benchmark_runs:
+ if benchmark_run.result:
+ if benchmark_run.result.retval:
+ has_failure = True
+ else:
+ all_failed = False
+ benchmark_run_name = ''.join(
+ ch for ch in benchmark_run.name if ch.isalnum())
+ benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
+ benchmark_run.result.CopyResultsTo(benchmark_run_path)
+ benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
+
+ if all_failed:
+ return self.ALL_FAILED
+
self.l.LogOutput('Storing results report in %s.' % results_directory)
results_table_path = os.path.join(results_directory, 'results.html')
report = HTMLResultsReport.FromExperiment(experiment).GetReport()
@@ -284,15 +307,6 @@ class ExperimentRunner(object):
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
FileUtils().WriteFile(msg_file_path, msg_body)
- self.l.LogOutput('Storing results of each benchmark run.')
- for benchmark_run in experiment.benchmark_runs:
- if benchmark_run.result:
- benchmark_run_name = ''.join(
- ch for ch in benchmark_run.name if ch.isalnum())
- benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
- benchmark_run.result.CopyResultsTo(benchmark_run_path)
- benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
-
topstats_file = os.path.join(results_directory, 'topstats.log')
self.l.LogOutput('Storing top5 statistics of each benchmark run into %s.' %
topstats_file)
@@ -305,15 +319,18 @@ class ExperimentRunner(object):
top_fd.write(benchmark_run.result.FormatStringTop5())
top_fd.write('\n\n')
+ return self.SUCCEEDED if not has_failure else self.HAS_FAILURE
+
def Run(self):
try:
self._Run(self._experiment)
finally:
# Always print the report at the end of the run.
self._PrintTable(self._experiment)
- if not self._terminated:
- self._StoreResults(self._experiment)
+ ret = self._StoreResults(self._experiment)
+ if ret != self.ALL_FAILED:
self._Email(self._experiment)
+ return ret
class MockExperimentRunner(ExperimentRunner):
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index 4905975c..fb584244 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -467,9 +467,9 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(self.mock_logger.LogOutputCount, 5)
self.assertEqual(self.mock_logger.output_msgs, [
'Storing experiment file in /usr/local/crosperf-results.',
+ 'Storing results of each benchmark run.',
'Storing results report in /usr/local/crosperf-results.',
'Storing email message body in /usr/local/crosperf-results.',
- 'Storing results of each benchmark run.',
'Storing top5 statistics of each benchmark run into'
' /usr/local/crosperf-results/topstats.log.',
])
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 98062194..a7fd4169 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -109,10 +109,11 @@ class Result(object):
raise IOError('Could not copy results file: %s' % file_to_copy)
def CopyResultsTo(self, dest_dir):
+ self.CopyFilesTo(dest_dir, self.results_file)
self.CopyFilesTo(dest_dir, self.perf_data_files)
self.CopyFilesTo(dest_dir, self.perf_report_files)
- if self.perf_data_files or self.perf_report_files:
- self._logger.LogOutput('Perf results files stored in %s.' % dest_dir)
+ if self.results_file or self.perf_data_files or self.perf_report_files:
+ self._logger.LogOutput('Results files stored in %s.' % dest_dir)
def GetNewKeyvals(self, keyvals_dict):
# Initialize 'units' dictionary.
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 1e7f04a1..ed6ff95b 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -501,6 +501,9 @@ class ResultTest(unittest.TestCase):
@mock.patch.object(Result, 'CopyFilesTo')
def test_copy_results_to(self, mockCopyFilesTo):
+ results_file = [
+ '/tmp/result.json.0', '/tmp/result.json.1', '/tmp/result.json.2'
+ ]
perf_data_files = [
'/tmp/perf.data.0', '/tmp/perf.data.1', '/tmp/perf.data.2'
]
@@ -508,16 +511,19 @@ class ResultTest(unittest.TestCase):
'/tmp/perf.report.0', '/tmp/perf.report.1', '/tmp/perf.report.2'
]
+ self.result.results_file = results_file
self.result.perf_data_files = perf_data_files
self.result.perf_report_files = perf_report_files
self.result.CopyFilesTo = mockCopyFilesTo
self.result.CopyResultsTo('/tmp/results/')
- self.assertEqual(mockCopyFilesTo.call_count, 2)
- self.assertEqual(len(mockCopyFilesTo.call_args_list), 2)
+ self.assertEqual(mockCopyFilesTo.call_count, 3)
+ self.assertEqual(len(mockCopyFilesTo.call_args_list), 3)
self.assertEqual(mockCopyFilesTo.call_args_list[0][0],
- ('/tmp/results/', perf_data_files))
+ ('/tmp/results/', results_file))
self.assertEqual(mockCopyFilesTo.call_args_list[1][0],
+ ('/tmp/results/', perf_data_files))
+ self.assertEqual(mockCopyFilesTo.call_args_list[2][0],
('/tmp/results/', perf_report_files))
def test_get_new_keyvals(self):