aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark.py37
-rw-r--r--crosperf/benchmark_run.py26
-rwxr-xr-xcrosperf/benchmark_run_unittest.py76
-rwxr-xr-xcrosperf/benchmark_unittest.py54
-rw-r--r--crosperf/compare_machines.py9
-rwxr-xr-xcrosperf/crosperf_unittest.py13
-rw-r--r--crosperf/download_images.py32
-rwxr-xr-xcrosperf/download_images_unittest.py4
-rw-r--r--crosperf/experiment.py9
-rw-r--r--crosperf/experiment_factory.py61
-rwxr-xr-xcrosperf/experiment_factory_unittest.py12
-rw-r--r--crosperf/experiment_file.py11
-rwxr-xr-xcrosperf/experiment_file_unittest.py2
-rw-r--r--crosperf/experiment_runner.py41
-rwxr-xr-xcrosperf/experiment_runner_unittest.py144
-rw-r--r--crosperf/experiment_status.py8
-rw-r--r--crosperf/field.py4
-rwxr-xr-xcrosperf/flag_test_unittest.py2
-rwxr-xr-xcrosperf/generate_report.py91
-rwxr-xr-xcrosperf/generate_report_unittest.py62
-rw-r--r--crosperf/image_checksummer.py8
-rw-r--r--crosperf/machine_image_manager.py42
-rwxr-xr-xcrosperf/machine_image_manager_unittest.py114
-rw-r--r--crosperf/machine_manager.py80
-rwxr-xr-xcrosperf/machine_manager_unittest.py24
-rw-r--r--crosperf/results_cache.py22
-rwxr-xr-xcrosperf/results_cache_unittest.py39
-rw-r--r--crosperf/results_organizer.py28
-rwxr-xr-xcrosperf/results_organizer_unittest.py161
-rw-r--r--crosperf/results_report.py141
-rw-r--r--crosperf/results_report_templates.py33
-rwxr-xr-xcrosperf/results_report_unittest.py84
-rw-r--r--crosperf/schedv2.py79
-rwxr-xr-xcrosperf/schedv2_unittest.py49
-rw-r--r--crosperf/settings_factory.py23
-rwxr-xr-xcrosperf/settings_factory_unittest.py20
-rwxr-xr-xcrosperf/settings_unittest.py26
-rw-r--r--crosperf/suite_runner.py52
-rwxr-xr-xcrosperf/suite_runner_unittest.py52
39 files changed, 1000 insertions, 775 deletions
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index a2a34bca..bbb1cdfc 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -1,9 +1,40 @@
-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Define a type that wraps a Benchmark instance."""
+from __future__ import print_function
+
+import math
+from scipy import stats
+
+# See crbug.com/673558 for how these are estimated.
+_estimated_stddev = {
+ 'octane': 0.015,
+ 'kraken': 0.019,
+ 'speedometer': 0.007,
+ 'dromaeo.domcoreattr': 0.023,
+ 'dromaeo.domcoremodify': 0.011,
+ 'smoothness.tough_webgl_cases': 0.025,
+ 'graphics_WebGLAquarium': 0.008,
+ 'page_cycler_v2.typical_25': 0.021,
+}
+
+
+# Get #samples needed to guarantee a given confidence interval, assuming the
+# samples follow normal distribution.
+def _samples(b):
+ # TODO: Make this an option
+ # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%.
+ p = 0.9
+ e = 0.02
+ if b not in _estimated_stddev:
+ return 1
+ d = _estimated_stddev[b]
+ # Get at least 2 samples so as to calculate standard deviation, which is
+ # needed in T-test for p-value.
+ n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e)**2))
+ return n if n > 1 else 2
+
class Benchmark(object):
"""Class representing a benchmark to be run.
@@ -31,7 +62,7 @@ class Benchmark(object):
self.test_name = test_name
#For telemetry, this is the data.
self.test_args = test_args
- self.iterations = iterations
+ self.iterations = iterations if iterations > 0 else _samples(name)
self.perf_args = perf_args
self.rm_chroot_tmp = rm_chroot_tmp
self.iteration_adjusted = False
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index e53187e2..bba71a36 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -1,8 +1,6 @@
-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Module of benchmark runs."""
from __future__ import print_function
@@ -30,6 +28,7 @@ STATUS_PENDING = 'PENDING'
class BenchmarkRun(threading.Thread):
"""The benchmarkrun class."""
+
def __init__(self, name, benchmark, label, iteration, cache_conditions,
machine_manager, logger_to_use, log_level, share_cache):
threading.Thread.__init__(self)
@@ -53,8 +52,8 @@ class BenchmarkRun(threading.Thread):
self.test_args = benchmark.test_args
self.cache = None
self.profiler_args = self.GetExtraAutotestArgs()
- self._ce = command_executer.GetCommandExecuter(self._logger,
- log_level=self.log_level)
+ self._ce = command_executer.GetCommandExecuter(
+ self._logger, log_level=self.log_level)
self.timeline = timeline.Timeline()
self.timeline.Record(STATUS_PENDING)
self.share_cache = share_cache
@@ -96,8 +95,7 @@ class BenchmarkRun(threading.Thread):
err = 'No cache hit.'
self.result = Result.CreateFromRun(
self._logger, self.log_level, self.label, self.machine, output, err,
- retval, self.benchmark.test_name,
- self.benchmark.suite)
+ retval, self.benchmark.test_name, self.benchmark.suite)
else:
self._logger.LogOutput('%s: No cache hit.' % self.name)
@@ -141,8 +139,8 @@ class BenchmarkRun(threading.Thread):
pass
elif self.machine:
if not self.machine.IsReachable():
- self._logger.LogOutput('Machine %s is not reachable, removing it.' %
- self.machine.name)
+ self._logger.LogOutput(
+ 'Machine %s is not reachable, removing it.' % self.machine.name)
self.machine_manager.RemoveMachine(self.machine.name)
self._logger.LogOutput('Releasing machine: %s' % self.machine.name)
self.machine_manager.ReleaseMachine(self.machine)
@@ -190,8 +188,10 @@ class BenchmarkRun(threading.Thread):
perf_args = ' '.join(perf_args_list)
if not perf_args_list[0] in ['record', 'stat']:
raise SyntaxError('perf_args must start with either record or stat')
- extra_test_args = ['--profiler=custom_perf',
- ("--profiler_args='perf_options=\"%s\"'" % perf_args)]
+ extra_test_args = [
+ '--profiler=custom_perf',
+ ("--profiler_args='perf_options=\"%s\"'" % perf_args)
+ ]
return ' '.join(extra_test_args)
else:
return ''
@@ -254,9 +254,9 @@ class MockBenchmarkRun(BenchmarkRun):
self.timeline.Record(STATUS_IMAGING)
self.machine_manager.ImageMachine(machine, self.label)
self.timeline.Record(STATUS_RUNNING)
- [retval, out, err] = self.suite_runner.Run(machine.name, self.label,
- self.benchmark, self.test_args,
- self.profiler_args)
+ [retval, out,
+ err] = self.suite_runner.Run(machine.name, self.label, self.benchmark,
+ self.test_args, self.profiler_args)
self.run_completed = True
rr = MockResult('logger', self.label, self.log_level, machine)
rr.out = out
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 9af66a33..74757ac2 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -117,11 +117,10 @@ class BenchmarkRunTest(unittest.TestCase):
pass
def test_run(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogOutput(msg, print_to_console=False):
'Helper function for test_run.'
@@ -258,11 +257,10 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, ['FAILED'])
def test_terminate_pass(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventPassed():
'Helper function for test_terminate_pass'
@@ -286,11 +284,10 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
def test_terminate_fail(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventFailed():
'Helper function for test_terminate_fail'
@@ -314,11 +311,10 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
def test_acquire_machine(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
br.terminated = True
self.assertRaises(Exception, br.AcquireMachine)
@@ -332,11 +328,10 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
def test_get_extra_autotest_args(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogError(err_msg):
'Helper function for test_get_extra_autotest_args'
@@ -372,11 +367,10 @@ class BenchmarkRunTest(unittest.TestCase):
@mock.patch.object(SuiteRunner, 'Run')
@mock.patch.object(Result, 'CreateFromRun')
def test_run_test(self, mock_result, mock_runner):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
self.status = []
@@ -391,9 +385,9 @@ class BenchmarkRunTest(unittest.TestCase):
br.RunTest(mock_machine)
self.assertTrue(br.run_completed)
- self.assertEqual(
- self.status,
- [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING])
+ self.assertEqual(self.status, [
+ benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING
+ ])
self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
br.machine_manager.ImageMachine.assert_called_with(mock_machine,
@@ -403,17 +397,15 @@ class BenchmarkRunTest(unittest.TestCase):
'', br.profiler_args)
self.assertEqual(mock_result.call_count, 1)
- mock_result.assert_called_with(self.mock_logger, 'average', self.test_label,
- None, "{'Score':100}", '', 0,
- 'page_cycler.netsim.top_10',
- 'telemetry_Crosperf')
+ mock_result.assert_called_with(
+ self.mock_logger, 'average', self.test_label, None, "{'Score':100}", '',
+ 0, 'page_cycler.netsim.top_10', 'telemetry_Crosperf')
def test_set_cache_conditions(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
phony_cache_conditions = [123, 456, True, False]
diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py
index 320ede65..24c364c0 100755
--- a/crosperf/benchmark_unittest.py
+++ b/crosperf/benchmark_unittest.py
@@ -16,43 +16,47 @@ class BenchmarkTestCase(unittest.TestCase):
def test_benchmark(self):
# Test creating a benchmark with all the fields filled out.
- b1 = Benchmark('b1_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry_Crosperf', # suite
- True) # show_all_results
+ b1 = Benchmark(
+ 'b1_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry_Crosperf', # suite
+ True) # show_all_results
self.assertTrue(b1.suite, 'telemetry_Crosperf')
# Test creating a benchmark field with default fields left out.
- b2 = Benchmark('b2_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles') # perf_args
+ b2 = Benchmark(
+ 'b2_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles') # perf_args
self.assertEqual(b2.suite, '')
self.assertFalse(b2.show_all_results)
# Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
# and see what happens.
- b3 = Benchmark('b3_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry', # suite
- False) # show_all_results
+ b3 = Benchmark(
+ 'b3_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry', # suite
+ False) # show_all_results
self.assertTrue(b3.show_all_results)
# Check to see if the args to Benchmark have changed since the last time
# this test was updated.
- args_list = ['self', 'name', 'test_name', 'test_args', 'iterations',
- 'rm_chroot_tmp', 'perf_args', 'suite', 'show_all_results',
- 'retries', 'run_local']
+ args_list = [
+ 'self', 'name', 'test_name', 'test_args', 'iterations', 'rm_chroot_tmp',
+ 'perf_args', 'suite', 'show_all_results', 'retries', 'run_local'
+ ]
arg_spec = inspect.getargspec(Benchmark.__init__)
self.assertEqual(len(arg_spec.args), len(args_list))
for arg in args_list:
diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py
index 0a61eeb9..34513a87 100644
--- a/crosperf/compare_machines.py
+++ b/crosperf/compare_machines.py
@@ -22,10 +22,11 @@ def PrintUsage(msg):
def Main(argv):
parser = argparse.ArgumentParser()
- parser.add_argument('--chromeos_root',
- default='/path/to/chromeos',
- dest='chromeos_root',
- help='ChromeOS root checkout directory')
+ parser.add_argument(
+ '--chromeos_root',
+ default='/path/to/chromeos',
+ dest='chromeos_root',
+ help='ChromeOS root checkout directory')
parser.add_argument('remotes', nargs=argparse.REMAINDER)
options = parser.parse_args(argv)
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 4a468967..b361f15b 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -42,12 +42,13 @@ class CrosperfTest(unittest.TestCase):
def test_convert_options_to_settings(self):
parser = argparse.ArgumentParser()
- parser.add_argument('-l',
- '--log_dir',
- dest='log_dir',
- default='',
- help='The log_dir, default is under '
- '<crosperf_logs>/logs')
+ parser.add_argument(
+ '-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under '
+ '<crosperf_logs>/logs')
crosperf.SetupParserOptions(parser)
argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
options, _ = parser.parse_known_args(argv)
diff --git a/crosperf/download_images.py b/crosperf/download_images.py
index 8ceaa874..ad0a812b 100644
--- a/crosperf/download_images.py
+++ b/crosperf/download_images.py
@@ -56,8 +56,8 @@ class ImageDownloader(object):
# image name.
command = ('cd ~/trunk/src/third_party/toolchain-utils/crosperf; '
"python translate_xbuddy.py '%s'" % xbuddy_label)
- _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(chromeos_root,
- command)
+ _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(
+ chromeos_root, command)
if not build_id_tuple_str:
raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
@@ -143,8 +143,8 @@ class ImageDownloader(object):
cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
status = self._ce.RunCommand(cmd)
if status != 0:
- raise MissingFile('Cannot find autotest package file: %s.' %
- package_file_name)
+ raise MissingFile(
+ 'Cannot find autotest package file: %s.' % package_file_name)
if self.log_level == 'average':
self._logger.LogOutput('Preparing to download %s package to local '
@@ -171,8 +171,8 @@ class ImageDownloader(object):
package_file_name, uncompress_cmd):
# Uncompress file
download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- command = ('cd %s ; %s %s' %
- (download_path, uncompress_cmd, package_file_name))
+ command = ('cd %s ; %s %s' % (download_path, uncompress_cmd,
+ package_file_name))
if self.log_level != 'verbose':
self._logger.LogOutput('CMD: %s' % command)
@@ -193,8 +193,8 @@ class ImageDownloader(object):
def VerifyAutotestFilesExist(self, chromeos_root, build_id, package_file):
# Quickly verify if the files are there
status = 0
- gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
- (build_id, package_file))
+ gs_package_name = ('gs://chromeos-image-archive/%s/%s' % (build_id,
+ package_file))
gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
if not test_flag.GetTestMode():
cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
@@ -227,9 +227,9 @@ class ImageDownloader(object):
autotest_packages_name)
if status != 0:
default_autotest_dir = '~/trunk/src/third_party/autotest/files'
- print('(Warning: Could not find autotest packages .)\n'
- '(Warning: Defaulting autotest path to %s .' %
- default_autotest_dir)
+ print(
+ '(Warning: Could not find autotest packages .)\n'
+ '(Warning: Defaulting autotest path to %s .' % default_autotest_dir)
return default_autotest_dir
# Files exist on server, download and uncompress them
@@ -242,12 +242,10 @@ class ImageDownloader(object):
self.UncompressSingleAutotestFile(chromeos_root, build_id,
autotest_packages_name, 'tar -xvf ')
- self.UncompressSingleAutotestFile(chromeos_root, build_id,
- autotest_server_package_name,
- 'tar -jxvf ')
- self.UncompressSingleAutotestFile(chromeos_root, build_id,
- autotest_control_files_name,
- 'tar -xvf ')
+ self.UncompressSingleAutotestFile(
+ chromeos_root, build_id, autotest_server_package_name, 'tar -jxvf ')
+ self.UncompressSingleAutotestFile(
+ chromeos_root, build_id, autotest_control_files_name, 'tar -xvf ')
# Rename created autotest directory to autotest_files
command = ('cd %s ; mv autotest autotest_files' % download_path)
if self.log_level != 'verbose':
diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py
index 7a4f3850..349a2dbb 100755
--- a/crosperf/download_images_unittest.py
+++ b/crosperf/download_images_unittest.py
@@ -126,8 +126,8 @@ class ImageDownloaderTestcast(unittest.TestCase):
# 2nd arg must be exception handler
except_handler_string = 'RunCommandExceptionHandler.HandleException'
self.assertTrue(
- except_handler_string in
- repr(mock_cmd_exec.RunCommand.call_args_list[0][1]))
+ except_handler_string in repr(
+ mock_cmd_exec.RunCommand.call_args_list[0][1]))
# Call 2, should have 2 arguments
self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[1]), 2)
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index dbcde213..987318a5 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -126,10 +126,11 @@ class Experiment(object):
full_name = '%s_%s_%s' % (label.name, benchmark.name, iteration)
logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
True)
- benchmark_runs.append(benchmark_run.BenchmarkRun(
- benchmark_run_name, benchmark, label, iteration,
- self.cache_conditions, self.machine_manager, logger_to_use,
- self.log_level, self.share_cache))
+ benchmark_runs.append(
+ benchmark_run.BenchmarkRun(benchmark_run_name, benchmark, label,
+ iteration, self.cache_conditions,
+ self.machine_manager, logger_to_use,
+ self.log_level, self.share_cache))
return benchmark_runs
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 2278015b..9d58048e 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -78,11 +78,13 @@ crosbolt_perf_tests = [
'power_Resume',
'video_PlaybackPerf.h264',
'build_RootFilesystemSize',
+]
+
# 'cheets_AntutuTest',
# 'cheets_PerfBootServer',
# 'cheets_CandyCrushTest',
# 'cheets_LinpackTest',
-]
+#]
class ExperimentFactory(object):
@@ -98,10 +100,9 @@ class ExperimentFactory(object):
show_all_results, retries, run_local):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(test_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries,
- run_local)
+ telemetry_benchmark = Benchmark(
+ test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
+ suite, show_all_results, retries, run_local)
benchmarks.append(telemetry_benchmark)
def GetExperiment(self, experiment_file, working_directory, log_dir):
@@ -210,20 +211,33 @@ class ExperimentFactory(object):
benchmarks.append(benchmark)
else:
if test_name == 'all_graphics_perf':
- self.AppendBenchmarkSet(benchmarks,
- graphics_perf_tests, '',
- iterations, rm_chroot_tmp, perf_args, '',
- show_all_results, retries, run_local=False)
+ self.AppendBenchmarkSet(
+ benchmarks,
+ graphics_perf_tests,
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False)
elif test_name == 'all_crosbolt_perf':
- self.AppendBenchmarkSet(benchmarks,
- telemetry_crosbolt_perf_tests, test_args,
- iterations, rm_chroot_tmp, perf_args,
- 'telemetry_Crosperf', show_all_results,
- retries, run_local)
- self.AppendBenchmarkSet(benchmarks,
- crosbolt_perf_tests, '',
- iterations, rm_chroot_tmp, perf_args, '',
- show_all_results, retries, run_local=False)
+ self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, 'telemetry_Crosperf',
+ show_all_results, retries, run_local)
+ self.AppendBenchmarkSet(
+ benchmarks,
+ crosbolt_perf_tests,
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False)
else:
# Add the single benchmark.
benchmark = Benchmark(
@@ -265,11 +279,8 @@ class ExperimentFactory(object):
build = label_settings.GetField('build')
if len(build) == 0:
raise RuntimeError("Can not have empty 'build' field!")
- image, autotest_path = label_settings.GetXbuddyPath(build,
- autotest_path,
- board,
- chromeos_root,
- log_level)
+ image, autotest_path = label_settings.GetXbuddyPath(
+ build, autotest_path, board, chromeos_root, log_level)
cache_dir = label_settings.GetField('cache_dir')
chrome_src = label_settings.GetField('chrome_src')
@@ -277,8 +288,8 @@ class ExperimentFactory(object):
# TODO(yunlian): We should consolidate code in machine_manager.py
# to derermine whether we are running from within google or not
if ('corp.google.com' in socket.gethostname() and
- (not my_remote or my_remote == remote and
- global_settings.GetField('board') != board)):
+ (not my_remote or
+ my_remote == remote and global_settings.GetField('board') != board)):
my_remote = self.GetDefaultRemotes(board)
if global_settings.GetField('same_machine') and len(my_remote) > 1:
raise RuntimeError('Only one remote is allowed when same_machine '
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 02bfd0a1..44090e5c 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -175,9 +175,9 @@ class ExperimentFactoryTest(unittest.TestCase):
test_flag.SetTestMode(True)
label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros')
exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(
- exp.remote,
- ['chromeos1.cros', 'chromeos2.cros', '123.45.67.89', '123.45.76.80'])
+ self.assertEqual(exp.remote, [
+ 'chromeos1.cros', 'chromeos2.cros', '123.45.67.89', '123.45.76.80'
+ ])
# Third test: Automatic fixing of bad logging_level param:
global_settings.SetField('logging_level', 'really loud!')
@@ -213,9 +213,9 @@ class ExperimentFactoryTest(unittest.TestCase):
self.assertEqual(len(exp.labels), 2)
self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path')
self.assertEqual(exp.labels[1].autotest_path, 'fake_autotest_path')
- self.assertEqual(
- exp.remote,
- ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'])
+ self.assertEqual(exp.remote, [
+ 'fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'
+ ])
def test_get_default_remotes(self):
board_list = [
diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py
index 016e9d86..57eb52dc 100644
--- a/crosperf/experiment_file.py
+++ b/crosperf/experiment_file.py
@@ -114,8 +114,8 @@ class ExperimentFile(object):
elif ExperimentFile._OPEN_SETTINGS_RE.match(line):
new_settings = self._ParseSettings(reader)
if new_settings.name in settings_names:
- raise SyntaxError("Duplicate settings name: '%s'." %
- new_settings.name)
+ raise SyntaxError(
+ "Duplicate settings name: '%s'." % new_settings.name)
settings_names[new_settings.name] = True
self.all_settings.append(new_settings)
elif ExperimentFile._FIELD_VALUE_RE.match(line):
@@ -160,11 +160,8 @@ class ExperimentFile(object):
autotest_path = ''
if autotest_field.assigned:
autotest_path = autotest_field.GetString()
- image_path, autotest_path = settings.GetXbuddyPath(value,
- autotest_path,
- board,
- chromeos_root,
- 'quiet')
+ image_path, autotest_path = settings.GetXbuddyPath(
+ value, autotest_path, board, chromeos_root, 'quiet')
res += '\t#actual_image: %s\n' % image_path
if not autotest_field.assigned:
res += '\t#actual_autotest_path: %s\n' % autotest_path
diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py
index ed1f176c..d4a02107 100755
--- a/crosperf/experiment_file_unittest.py
+++ b/crosperf/experiment_file_unittest.py
@@ -3,7 +3,6 @@
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The unittest of experiment_file."""
from __future__ import print_function
import StringIO
@@ -87,6 +86,7 @@ label: image2 {
class ExperimentFileTest(unittest.TestCase):
"""The main class for Experiment File test."""
+
def testLoadExperimentFile1(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
experiment_file = ExperimentFile(input_file)
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index b30c8bd5..b583743b 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -26,14 +26,15 @@ from results_report import TextResultsReport
from results_report import JSONResultsReport
from schedv2 import Schedv2
+
def _WriteJSONReportToFile(experiment, results_dir, json_report):
"""Writes a JSON report to a file in results_dir."""
has_llvm = any('llvm' in l.compiler for l in experiment.labels)
compiler_string = 'llvm' if has_llvm else 'gcc'
board = experiment.labels[0].board
- filename = 'report_%s_%s_%s.%s.json' % (
- board, json_report.date, json_report.time.replace(':', '.'),
- compiler_string)
+ filename = 'report_%s_%s_%s.%s.json' % (board, json_report.date,
+ json_report.time.replace(':', '.'),
+ compiler_string)
fullname = os.path.join(results_dir, filename)
report_text = json_report.GetReport()
with open(fullname, 'w') as out_file:
@@ -151,9 +152,10 @@ class ExperimentRunner(object):
cache.Init(br.label.chromeos_image, br.label.chromeos_root,
br.benchmark.test_name, br.iteration, br.test_args,
br.profiler_args, br.machine_manager, br.machine,
- br.label.board, br.cache_conditions, br._logger, br.log_level,
- br.label, br.share_cache, br.benchmark.suite,
- br.benchmark.show_all_results, br.benchmark.run_local)
+ br.label.board, br.cache_conditions,
+ br.logger(), br.log_level, br.label, br.share_cache,
+ br.benchmark.suite, br.benchmark.show_all_results,
+ br.benchmark.run_local)
cache_dir = cache.GetCacheDirForWrite()
if os.path.exists(cache_dir):
self.l.LogOutput('Removing cache dir: %s' % cache_dir)
@@ -229,18 +231,19 @@ class ExperimentRunner(object):
subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
- text_report += ('\nResults are stored in %s.\n' %
- experiment.results_directory)
+ text_report += (
+ '\nResults are stored in %s.\n' % experiment.results_directory)
text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
attachment = EmailSender.Attachment('report.html', html_report)
email_to = experiment.email_to or []
email_to.append(getpass.getuser())
- EmailSender().SendEmail(email_to,
- subject,
- text_report,
- attachments=[attachment],
- msg_type='html')
+ EmailSender().SendEmail(
+ email_to,
+ subject,
+ text_report,
+ attachments=[attachment],
+ msg_type='html')
def _StoreResults(self, experiment):
if self._terminated:
@@ -256,8 +259,8 @@ class ExperimentRunner(object):
results_table_path = os.path.join(results_directory, 'results.html')
report = HTMLResultsReport.FromExperiment(experiment).GetReport()
if self.json_report:
- json_report = JSONResultsReport.FromExperiment(experiment,
- json_args={'indent': 2})
+ json_report = JSONResultsReport.FromExperiment(
+ experiment, json_args={'indent': 2})
_WriteJSONReportToFile(experiment, results_directory, json_report)
FileUtils().WriteFile(results_table_path, report)
@@ -265,8 +268,8 @@ class ExperimentRunner(object):
self.l.LogOutput('Storing email message body in %s.' % results_directory)
msg_file_path = os.path.join(results_directory, 'msg_body.html')
text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
- text_report += ('\nResults are stored in %s.\n' %
- experiment.results_directory)
+ text_report += (
+ '\nResults are stored in %s.\n' % experiment.results_directory)
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
FileUtils().WriteFile(msg_file_path, msg_body)
@@ -296,8 +299,8 @@ class MockExperimentRunner(ExperimentRunner):
super(MockExperimentRunner, self).__init__(experiment, json_report)
def _Run(self, experiment):
- self.l.LogOutput("Would run the following experiment: '%s'." %
- experiment.name)
+ self.l.LogOutput(
+ "Would run the following experiment: '%s'." % experiment.name)
def _PrintTable(self, experiment):
self.l.LogOutput('Would print the experiment table.')
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index 38ac3874..4809894f 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -106,9 +106,8 @@ class ExperimentRunnerTest(unittest.TestCase):
def make_fake_experiment(self):
test_flag.SetTestMode(True)
experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
- experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory='',
- log_dir='')
+ experiment = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory='', log_dir='')
return experiment
@mock.patch.object(machine_manager.MachineManager, 'AddMachine')
@@ -120,20 +119,22 @@ class ExperimentRunnerTest(unittest.TestCase):
self.exp = self.make_fake_experiment()
def test_init(self):
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
self.assertFalse(er._terminated)
self.assertEqual(er.STATUS_TIME_DELAY, 10)
self.exp.log_level = 'verbose'
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
self.assertEqual(er.STATUS_TIME_DELAY, 30)
@mock.patch.object(experiment_status.ExperimentStatus, 'GetStatusString')
@@ -164,11 +165,12 @@ class ExperimentRunnerTest(unittest.TestCase):
# Test 1: log_level == "quiet"
self.exp.log_level = 'quiet'
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
mock_status_string.return_value = 'Fake status string'
er._Run(self.exp)
@@ -180,9 +182,10 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(self.mock_logger.dot_count, 2)
self.assertEqual(mock_progress_string.call_count, 0)
self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs,
- ['==============================', 'Fake status string',
- '=============================='])
+ self.assertEqual(self.mock_logger.output_msgs, [
+ '==============================', 'Fake status string',
+ '=============================='
+ ])
self.assertEqual(len(self.mock_logger.error_msgs), 0)
# Test 2: log_level == "average"
@@ -190,11 +193,12 @@ class ExperimentRunnerTest(unittest.TestCase):
reset()
self.exp.log_level = 'average'
mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
mock_status_string.return_value = 'Fake status string'
er._Run(self.exp)
@@ -206,9 +210,10 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(self.mock_logger.dot_count, 2)
self.assertEqual(mock_progress_string.call_count, 0)
self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs,
- ['==============================', 'Fake status string',
- '=============================='])
+ self.assertEqual(self.mock_logger.output_msgs, [
+ '==============================', 'Fake status string',
+ '=============================='
+ ])
self.assertEqual(len(self.mock_logger.error_msgs), 0)
# Test 3: log_level == "verbose"
@@ -216,11 +221,12 @@ class ExperimentRunnerTest(unittest.TestCase):
reset()
self.exp.log_level = 'verbose'
mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
mock_status_string.return_value = 'Fake status string'
mock_progress_string.return_value = 'Fake progress string'
@@ -233,22 +239,24 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(self.mock_logger.dot_count, 0)
self.assertEqual(mock_progress_string.call_count, 2)
self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs,
- ['==============================', 'Fake progress string',
- 'Fake status string', '==============================',
- '==============================', 'Fake progress string',
- 'Fake status string', '=============================='])
+ self.assertEqual(self.mock_logger.output_msgs, [
+ '==============================', 'Fake progress string',
+ 'Fake status string', '==============================',
+ '==============================', 'Fake progress string',
+ 'Fake status string', '=============================='
+ ])
self.assertEqual(len(self.mock_logger.error_msgs), 0)
@mock.patch.object(TextResultsReport, 'GetReport')
def test_print_table(self, mock_report):
self.mock_logger.Reset()
mock_report.return_value = 'This is a fake experiment report.'
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er._PrintTable(self.exp)
self.assertEqual(mock_report.call_count, 1)
self.assertEqual(self.mock_logger.output_msgs,
@@ -269,11 +277,12 @@ class ExperimentRunnerTest(unittest.TestCase):
self.mock_logger.Reset()
config.AddConfig('no_email', True)
self.exp.email_to = ['jane.doe@google.com']
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
# Test 1. Config:no_email; exp.email_to set ==> no email sent
er._Email(self.exp)
self.assertEqual(mock_getuser.call_count, 0)
@@ -295,8 +304,8 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_html_report.call_count, 1)
self.assertEqual(len(mock_emailer.call_args), 2)
self.assertEqual(mock_emailer.call_args[0],
- (['jane.doe@google.com', 'john.smith@google.com'],
- ': image1 vs. image2',
+ (['jane.doe@google.com',
+ 'john.smith@google.com'], ': image1 vs. image2',
"<pre style='font-size: 13px'>This is a fake text "
'report.\nResults are stored in _results.\n</pre>'))
self.assertTrue(type(mock_emailer.call_args[1]) is dict)
@@ -325,8 +334,10 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_html_report.call_count, 1)
self.assertEqual(len(mock_emailer.call_args), 2)
self.assertEqual(mock_emailer.call_args[0],
- (['jane.doe@google.com', 'john.smith@google.com',
- 'john.smith@google.com'], ': image1 vs. image2',
+ ([
+ 'jane.doe@google.com', 'john.smith@google.com',
+ 'john.smith@google.com'
+ ], ': image1 vs. image2',
"<pre style='font-size: 13px'>This is a fake text "
'report.\nResults are stored in _results.\n</pre>'))
self.assertTrue(type(mock_emailer.call_args[1]) is dict)
@@ -393,15 +404,16 @@ class ExperimentRunnerTest(unittest.TestCase):
self.mock_logger.Reset()
self.exp.results_directory = '/usr/local/crosperf-results'
bench_run = self.exp.benchmark_runs[5]
- bench_path = '/usr/local/crosperf-results/' + filter(str.isalnum,
- bench_run.name)
+ bench_path = '/usr/local/crosperf-results/' + filter(
+ str.isalnum, bench_run.name)
self.assertEqual(len(self.exp.benchmark_runs), 6)
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
# Test 1. Make sure nothing is done if _terminated is true.
er._terminated = True
@@ -438,12 +450,12 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_rmdir.call_count, 1)
mock_rmdir.called_with('/usr/local/crosperf-results')
self.assertEqual(self.mock_logger.LogOutputCount, 4)
- self.assertEqual(
- self.mock_logger.output_msgs,
- ['Storing experiment file in /usr/local/crosperf-results.',
- 'Storing results report in /usr/local/crosperf-results.',
- 'Storing email message body in /usr/local/crosperf-results.',
- 'Storing results of each benchmark run.'])
+ self.assertEqual(self.mock_logger.output_msgs, [
+ 'Storing experiment file in /usr/local/crosperf-results.',
+ 'Storing results report in /usr/local/crosperf-results.',
+ 'Storing email message body in /usr/local/crosperf-results.',
+ 'Storing results of each benchmark run.'
+ ])
if __name__ == '__main__':
diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py
index 627db99e..c6610433 100644
--- a/crosperf/experiment_status.py
+++ b/crosperf/experiment_status.py
@@ -80,8 +80,8 @@ class ExperimentStatus(object):
strings.append('Current time: %s Elapsed: %s ETA: %s' %
(datetime.datetime.now(),
datetime.timedelta(seconds=int(elapsed_time)), eta))
- strings.append(self._GetProgressBar(self.experiment.num_complete,
- self.num_total))
+ strings.append(
+ self._GetProgressBar(self.experiment.num_complete, self.num_total))
return '\n'.join(strings)
def GetStatusString(self):
@@ -107,8 +107,8 @@ class ExperimentStatus(object):
self.experiment.machine_manager.AsString())
elif self.experiment.schedv2():
# In schedv2 mode, we always print out thread status.
- thread_status = thread_status_format.format(self.experiment.schedv2(
- ).threads_status_as_string())
+ thread_status = thread_status_format.format(
+ self.experiment.schedv2().threads_status_as_string())
result = '{}{}'.format(thread_status, '\n'.join(status_strings))
diff --git a/crosperf/field.py b/crosperf/field.py
index bc92e2cc..6821d4d3 100644
--- a/crosperf/field.py
+++ b/crosperf/field.py
@@ -68,8 +68,8 @@ class BooleanField(Field):
return True
elif value.lower() == 'false':
return False
- raise TypeError("Invalid value for '%s'. Must be true or false." %
- self.name)
+ raise TypeError(
+ "Invalid value for '%s'. Must be true or false." % self.name)
class IntegerField(Field):
diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py
index 9f2a7136..0e743274 100755
--- a/crosperf/flag_test_unittest.py
+++ b/crosperf/flag_test_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python2
#
# Copyright 2014 Google Inc. All Rights Reserved.
-
"""The unittest of flags."""
from __future__ import print_function
@@ -12,6 +11,7 @@ import unittest
class FlagTestCase(unittest.TestCase):
"""The unittest class."""
+
def test_test_flag(self):
# Verify that test_flag.is_test exists, that it is a list,
# and that it contains 1 element.
diff --git a/crosperf/generate_report.py b/crosperf/generate_report.py
index e0add994..fd7a2cf7 100755
--- a/crosperf/generate_report.py
+++ b/crosperf/generate_report.py
@@ -3,7 +3,6 @@
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Given a specially-formatted JSON object, generates results report(s).
The JSON object should look like:
@@ -62,10 +61,12 @@ from results_report import TextResultsReport
def CountBenchmarks(benchmark_runs):
"""Counts the number of iterations for each benchmark in benchmark_runs."""
+
# Example input for benchmark_runs:
# {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]}
def _MaxLen(results):
return 0 if not results else max(len(r) for r in results)
+
return [(name, _MaxLen(results))
for name, results in benchmark_runs.iteritems()]
@@ -121,8 +122,8 @@ def CutResultsInPlace(results, max_keys=50, complain_on_update=True):
len(retained_keys) != len(removable_keys)
if actually_updated and complain_on_update:
- print("Warning: Some benchmark keyvals have been truncated.",
- file=sys.stderr)
+ print(
+ 'Warning: Some benchmark keyvals have been truncated.', file=sys.stderr)
return results
@@ -144,7 +145,7 @@ def _ConvertToASCII(obj):
def _PositiveInt(s):
i = int(s)
if i < 0:
- raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i, ))
+ raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i,))
return i
@@ -182,13 +183,13 @@ def WriteFile(output_prefix, extension, get_contents, overwrite, verbose):
"""
if output_prefix == '-':
if verbose:
- print('Writing %s report to stdout' % (extension, ), file=sys.stderr)
+ print('Writing %s report to stdout' % (extension,), file=sys.stderr)
sys.stdout.write(get_contents())
return
file_name = '%s.%s' % (output_prefix, extension)
if not overwrite and os.path.exists(file_name):
- raise IOError('Refusing to write %s -- it already exists' % (file_name, ))
+ raise IOError('Refusing to write %s -- it already exists' % (file_name,))
with open(file_name, 'w') as out_file:
if verbose:
@@ -200,7 +201,7 @@ def RunActions(actions, benchmark_results, output_prefix, overwrite, verbose):
"""Runs `actions`, returning True if all succeeded."""
failed = False
- report_ctor = None # Make the linter happy
+ report_ctor = None # Make the linter happy
for report_ctor, extension in actions:
try:
get_contents = lambda: report_ctor(benchmark_results).GetReport()
@@ -225,27 +226,49 @@ def _NoPerfReport(_label_name, _benchmark_name, _benchmark_iteration):
def _ParseArgs(argv):
parser = argparse.ArgumentParser(description='Turns JSON into results '
'report(s).')
- parser.add_argument('-v', '--verbose', action='store_true',
- help='Be a tiny bit more verbose.')
- parser.add_argument('-f', '--force', action='store_true',
- help='Overwrite existing results files.')
- parser.add_argument('-o', '--output', default='report', type=str,
- help='Prefix of the output filename (default: report). '
- '- means stdout.')
- parser.add_argument('-i', '--input', required=True, type=str,
- help='Where to read the JSON from. - means stdin.')
- parser.add_argument('-l', '--statistic-limit', default=0, type=_PositiveInt,
- help='The maximum number of benchmark statistics to '
- 'display from a single run. 0 implies unlimited.')
- parser.add_argument('--json', action='store_true',
- help='Output a JSON report.')
- parser.add_argument('--text', action='store_true',
- help='Output a text report.')
- parser.add_argument('--email', action='store_true',
- help='Output a text report suitable for email.')
- parser.add_argument('--html', action='store_true',
- help='Output an HTML report (this is the default if no '
- 'other output format is specified).')
+ parser.add_argument(
+ '-v',
+ '--verbose',
+ action='store_true',
+ help='Be a tiny bit more verbose.')
+ parser.add_argument(
+ '-f',
+ '--force',
+ action='store_true',
+ help='Overwrite existing results files.')
+ parser.add_argument(
+ '-o',
+ '--output',
+ default='report',
+ type=str,
+ help='Prefix of the output filename (default: report). '
+ '- means stdout.')
+ parser.add_argument(
+ '-i',
+ '--input',
+ required=True,
+ type=str,
+ help='Where to read the JSON from. - means stdin.')
+ parser.add_argument(
+ '-l',
+ '--statistic-limit',
+ default=0,
+ type=_PositiveInt,
+ help='The maximum number of benchmark statistics to '
+ 'display from a single run. 0 implies unlimited.')
+ parser.add_argument(
+ '--json', action='store_true', help='Output a JSON report.')
+ parser.add_argument(
+ '--text', action='store_true', help='Output a text report.')
+ parser.add_argument(
+ '--email',
+ action='store_true',
+ help='Output a text report suitable for email.')
+ parser.add_argument(
+ '--html',
+ action='store_true',
+ help='Output an HTML report (this is the default if no '
+ 'other output format is specified).')
return parser.parse_args(argv)
@@ -263,13 +286,13 @@ def Main(argv):
benches = CountBenchmarks(results)
# In crosperf, a label is essentially a platform+configuration. So, a name of
# a label and a name of a platform are equivalent for our purposes.
- bench_results = BenchmarkResults(label_names=platform_names,
- benchmark_names_and_iterations=benches,
- run_keyvals=results,
- read_perf_report=_NoPerfReport)
+ bench_results = BenchmarkResults(
+ label_names=platform_names,
+ benchmark_names_and_iterations=benches,
+ run_keyvals=results,
+ read_perf_report=_NoPerfReport)
actions = _AccumulateActions(args)
- ok = RunActions(actions, bench_results, args.output, args.force,
- args.verbose)
+ ok = RunActions(actions, bench_results, args.output, args.force, args.verbose)
return 0 if ok else 1
diff --git a/crosperf/generate_report_unittest.py b/crosperf/generate_report_unittest.py
index a5d00635..bbb0c0ae 100755
--- a/crosperf/generate_report_unittest.py
+++ b/crosperf/generate_report_unittest.py
@@ -19,8 +19,10 @@ import unittest
import generate_report
import results_report
+
class _ContextualStringIO(StringIO):
"""StringIO that can be used in `with` statements."""
+
def __init__(self, *args):
StringIO.__init__(self, *args)
@@ -33,6 +35,7 @@ class _ContextualStringIO(StringIO):
class GenerateReportTests(unittest.TestCase):
"""Tests for generate_report.py."""
+
def testCountBenchmarks(self):
runs = {
'foo': [[{}, {}, {}], [{}, {}, {}, {}]],
@@ -45,16 +48,33 @@ class GenerateReportTests(unittest.TestCase):
def testCutResultsInPlace(self):
bench_data = {
- 'foo': [[{'a': 1, 'b': 2, 'c': 3}, {'a': 3, 'b': 2.5, 'c': 1}]],
- 'bar': [[{'d': 11, 'e': 12, 'f': 13}]],
- 'baz': [[{'g': 12, 'h': 13}]],
- 'qux': [[{'i': 11}]],
+ 'foo': [[{
+ 'a': 1,
+ 'b': 2,
+ 'c': 3
+ }, {
+ 'a': 3,
+ 'b': 2.5,
+ 'c': 1
+ }]],
+ 'bar': [[{
+ 'd': 11,
+ 'e': 12,
+ 'f': 13
+ }]],
+ 'baz': [[{
+ 'g': 12,
+ 'h': 13
+ }]],
+ 'qux': [[{
+ 'i': 11
+ }]],
}
original_bench_data = copy.deepcopy(bench_data)
max_keys = 2
- results = generate_report.CutResultsInPlace(bench_data, max_keys=max_keys,
- complain_on_update=False)
+ results = generate_report.CutResultsInPlace(
+ bench_data, max_keys=max_keys, complain_on_update=False)
# Cuts should be in-place.
self.assertIs(results, bench_data)
self.assertItemsEqual(original_bench_data.keys(), bench_data.keys())
@@ -68,15 +88,21 @@ class GenerateReportTests(unittest.TestCase):
# sub_keyvals must be a subset of original_keyvals
self.assertDictContainsSubset(sub_keyvals, original_keyvals)
-
def testCutResultsInPlaceLeavesRetval(self):
bench_data = {
- 'foo': [[{'retval': 0, 'a': 1}]],
- 'bar': [[{'retval': 1}]],
- 'baz': [[{'RETVAL': 1}]],
+ 'foo': [[{
+ 'retval': 0,
+ 'a': 1
+ }]],
+ 'bar': [[{
+ 'retval': 1
+ }]],
+ 'baz': [[{
+ 'RETVAL': 1
+ }]],
}
- results = generate_report.CutResultsInPlace(bench_data, max_keys=0,
- complain_on_update=False)
+ results = generate_report.CutResultsInPlace(
+ bench_data, max_keys=0, complain_on_update=False)
# Just reach into results assuming we know it otherwise outputs things
# sanely. If it doesn't, testCutResultsInPlace should give an indication as
# to what, exactly, is broken.
@@ -121,12 +147,12 @@ class GenerateReportTests(unittest.TestCase):
# We only mock print_exc so we don't have exception info printed to stdout.
@mock.patch('generate_report.WriteFile', side_effect=ValueError('Oh noo'))
@mock.patch('traceback.print_exc')
- def testRunActionsRunsAllActionsRegardlessOfExceptions(self, mock_print_exc,
- mock_write_file):
+ def testRunActionsRunsAllActionsRegardlessOfExceptions(
+ self, mock_print_exc, mock_write_file):
actions = [(None, 'json'), (None, 'html'), (None, 'text'), (None, 'email')]
output_prefix = '-'
- ok = generate_report.RunActions(actions, {}, output_prefix, overwrite=False,
- verbose=False)
+ ok = generate_report.RunActions(
+ actions, {}, output_prefix, overwrite=False, verbose=False)
self.assertFalse(ok)
self.assertEqual(mock_write_file.call_count, len(actions))
self.assertEqual(mock_print_exc.call_count, len(actions))
@@ -135,8 +161,8 @@ class GenerateReportTests(unittest.TestCase):
def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file):
actions = [(None, 'json'), (None, 'html'), (None, 'text')]
output_prefix = '-'
- ok = generate_report.RunActions(actions, {}, output_prefix, overwrite=False,
- verbose=False)
+ ok = generate_report.RunActions(
+ actions, {}, output_prefix, overwrite=False, verbose=False)
self.assertEqual(mock_write_file.call_count, len(actions))
self.assertTrue(ok)
diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py
index e330084e..f5862e4d 100644
--- a/crosperf/image_checksummer.py
+++ b/crosperf/image_checksummer.py
@@ -25,8 +25,8 @@ class ImageChecksummer(object):
def Checksum(self):
with self._lock:
if not self._checksum:
- logger.GetLogger().LogOutput("Acquiring checksum for '%s'." %
- self.label.name)
+ logger.GetLogger().LogOutput(
+ "Acquiring checksum for '%s'." % self.label.name)
self._checksum = None
if self.label.image_type != 'local':
raise RuntimeError('Called Checksum on non-local image!')
@@ -48,8 +48,8 @@ class ImageChecksummer(object):
def __new__(cls, *args, **kwargs):
with cls._lock:
if not cls._instance:
- cls._instance = super(ImageChecksummer, cls).__new__(cls, *args,
- **kwargs)
+ cls._instance = super(ImageChecksummer, cls).__new__(
+ cls, *args, **kwargs)
return cls._instance
def Checksum(self, label, log_level):
diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py
index 3cc464bb..2ad750d3 100644
--- a/crosperf/machine_image_manager.py
+++ b/crosperf/machine_image_manager.py
@@ -1,10 +1,9 @@
-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""MachineImageManager allocates images to duts."""
+
class MachineImageManager(object):
"""Management of allocating images to duts.
@@ -132,8 +131,7 @@ class MachineImageManager(object):
* Special / common case to handle seperately
We have only 1 dut or if we have only 1 label, that's simple enough.
-
- """
+ """
def __init__(self, labels, duts):
self.labels_ = labels
@@ -158,13 +156,13 @@ class MachineImageManager(object):
def compute_initial_allocation(self):
"""Compute the initial label-dut allocation.
- This method finds the most efficient way that every label gets imaged at
- least once.
+ This method finds the most efficient way that every label gets imaged at
+ least once.
- Returns:
- False, only if not all labels could be imaged to a certain machine,
- otherwise True.
- """
+ Returns:
+ False, only if not all labels could be imaged to a certain machine,
+ otherwise True.
+ """
if self.n_duts_ == 1:
for i, v in self.matrix_vertical_generator(0):
@@ -196,15 +194,15 @@ class MachineImageManager(object):
def allocate(self, dut, schedv2=None):
"""Allocate a label for dut.
- Args:
- dut: the dut that asks for a new image.
- schedv2: the scheduling instance, we need the benchmark run
- information with schedv2 for a better allocation.
+ Args:
+ dut: the dut that asks for a new image.
+ schedv2: the scheduling instance, we need the benchmark run
+ information with schedv2 for a better allocation.
- Returns:
- a label to image onto the dut or None if no more available images for
- the dut.
- """
+ Returns:
+ a label to image onto the dut or None if no more available images for
+ the dut.
+ """
j = self.dut_name_ordinal_[dut.name]
# 'can_' prefix means candidate label's.
can_reimage_number = 999
@@ -270,16 +268,16 @@ class MachineImageManager(object):
def matrix_vertical_generator(self, col):
"""Iterate matrix vertically at column 'col'.
- Yield row number i and value at matrix_[i][col].
- """
+ Yield row number i and value at matrix_[i][col].
+ """
for i, _ in enumerate(self.labels_):
yield i, self.matrix_[i][col]
def matrix_horizontal_generator(self, row):
"""Iterate matrix horizontally at row 'row'.
- Yield col number j and value at matrix_[row][j].
- """
+ Yield col number j and value at matrix_[row][j].
+ """
for j, _ in enumerate(self.duts_):
yield j, self.matrix_[row][j]
diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py
index fe41dc09..02afaa06 100755
--- a/crosperf/machine_image_manager_unittest.py
+++ b/crosperf/machine_image_manager_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python2
# Copyright 2015 Google Inc. All Rights Reserved.
-
"""Unit tests for the MachineImageManager class."""
from __future__ import print_function
@@ -23,14 +22,14 @@ class MockLabel(object):
"""Provide hash function for label.
This is required because Label object is used inside a dict as key.
- """
+ """
return hash(self.name)
def __eq__(self, other):
"""Provide eq function for label.
This is required because Label object is used inside a dict as key.
- """
+ """
return isinstance(other, MockLabel) and other.name == self.name
@@ -52,6 +51,7 @@ class MachineImageManagerTester(unittest.TestCase):
return duts
def print_matrix(self, matrix):
+ # pylint: disable=expression-not-assigned
for r in matrix:
for v in r:
print('{} '.format('.' if v == ' ' else v)),
@@ -97,53 +97,63 @@ class MachineImageManagerTester(unittest.TestCase):
self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
def test_case1(self):
- labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
+ labels = [
+ MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+ 'l3', ['m1'])
+ ]
duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '],
+ [' ', 'X', 'X']])
mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+ ['Y', 'X', 'X']])
def test_case2(self):
- labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
+ labels = [
+ MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+ 'l3', ['m1'])
+ ]
duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '],
+ [' ', 'X', 'X']])
mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+ ['Y', 'X', 'X']])
def test_case3(self):
- labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
+ labels = [
+ MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+ 'l3', ['m1'])
+ ]
duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')]
mim = MachineImageManager(labels, duts)
mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+ ['Y', 'X', 'X']])
def test_case4(self):
- labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
+ labels = [
+ MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+ 'l3', ['m1'])
+ ]
duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')]
mim = MachineImageManager(labels, duts)
mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+ ['Y', 'X', 'X']])
def test_case5(self):
- labels = [MockLabel('l1', ['m3']), MockLabel('l2', ['m3']),
- MockLabel('l3', ['m1'])]
+ labels = [
+ MockLabel('l1', ['m3']), MockLabel('l2', ['m3']), MockLabel(
+ 'l3', ['m1'])
+ ]
duts = self.gen_duts_by_name('m1', 'm2', 'm3')
mim = MachineImageManager(labels, duts)
self.assertTrue(mim.compute_initial_allocation())
- self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'], ['Y', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'],
+ ['Y', 'X', 'X']])
def test_2x2_with_allocation(self):
labels = [MockLabel('l0'), MockLabel('l1')]
@@ -193,29 +203,37 @@ class MachineImageManagerTester(unittest.TestCase):
self.assertTrue(mim.compute_initial_allocation())
def test_10x10_fully_random(self):
- inp = ['X . . . X X . X X .', 'X X . X . X . X X .',
- 'X X X . . X . X . X', 'X . X X . . X X . X',
- 'X X X X . . . X . .', 'X X . X . X . . X .',
- '. X . X . X X X . .', '. X . X X . X X . .',
- 'X X . . . X X X . .', '. X X X X . . . . X']
- output = ['X Y . . X X . X X .', 'X X Y X . X . X X .',
- 'X X X Y . X . X . X', 'X . X X Y . X X . X',
- 'X X X X . Y . X . .', 'X X . X . X Y . X .',
- 'Y X . X . X X X . .', '. X . X X . X X Y .',
- 'X X . . . X X X . Y', '. X X X X . . Y . X']
+ inp = [
+ 'X . . . X X . X X .', 'X X . X . X . X X .',
+ 'X X X . . X . X . X', 'X . X X . . X X . X',
+ 'X X X X . . . X . .', 'X X . X . X . . X .',
+ '. X . X . X X X . .', '. X . X X . X X . .',
+ 'X X . . . X X X . .', '. X X X X . . . . X'
+ ]
+ output = [
+ 'X Y . . X X . X X .', 'X X Y X . X . X X .',
+ 'X X X Y . X . X . X', 'X . X X Y . X X . X',
+ 'X X X X . Y . X . .', 'X X . X . X Y . X .',
+ 'Y X . X . X X X . .', '. X . X X . X X Y .',
+ 'X X . . . X X X . Y', '. X X X X . . Y . X'
+ ]
self.pattern_based_test(inp, output)
def test_10x10_fully_random2(self):
- inp = ['X . X . . X . X X X', 'X X X X X X . . X .',
- 'X . X X X X X . . X', 'X X X . X . X X . .',
- '. X . X . X X X X X', 'X X X X X X X . . X',
- 'X . X X X X X . . X', 'X X X . X X X X . .',
- 'X X X . . . X X X X', '. X X . X X X . X X']
- output = ['X . X Y . X . X X X', 'X X X X X X Y . X .',
- 'X Y X X X X X . . X', 'X X X . X Y X X . .',
- '. X Y X . X X X X X', 'X X X X X X X Y . X',
- 'X . X X X X X . Y X', 'X X X . X X X X . Y',
- 'X X X . Y . X X X X', 'Y X X . X X X . X X']
+ inp = [
+ 'X . X . . X . X X X', 'X X X X X X . . X .',
+ 'X . X X X X X . . X', 'X X X . X . X X . .',
+ '. X . X . X X X X X', 'X X X X X X X . . X',
+ 'X . X X X X X . . X', 'X X X . X X X X . .',
+ 'X X X . . . X X X X', '. X X . X X X . X X'
+ ]
+ output = [
+ 'X . X Y . X . X X X', 'X X X X X X Y . X .',
+ 'X Y X X X X X . . X', 'X X X . X Y X X . .',
+ '. X Y X . X X X X X', 'X X X X X X X Y . X',
+ 'X . X X X X X . Y X', 'X X X . X X X X . Y',
+ 'X X X . Y . X X X X', 'Y X X . X X X . X X'
+ ]
self.pattern_based_test(inp, output)
def test_3x4_with_allocation(self):
@@ -273,7 +291,7 @@ class MachineImageManagerTester(unittest.TestCase):
l1 Y X X
l2 Y X X
- """
+ """
inp = ['. X X', '. X X', '. X X']
output = ['Y X X', 'Y X X', 'Y X X']
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index 2fdf141b..b9dda148 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -78,9 +78,8 @@ class CrosMachine(object):
def IsReachable(self):
command = 'ls'
- ret = self.ce.CrosRunCommand(command,
- machine=self.name,
- chromeos_root=self.chromeos_root)
+ ret = self.ce.CrosRunCommand(
+ command, machine=self.name, chromeos_root=self.chromeos_root)
if ret:
return False
return True
@@ -121,9 +120,7 @@ class CrosMachine(object):
#meminfo, the assert does not catch it either
command = 'cat /proc/meminfo'
ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput(
- command,
- machine=self.name,
- chromeos_root=self.chromeos_root)
+ command, machine=self.name, chromeos_root=self.chromeos_root)
assert ret == 0, 'Could not get meminfo from machine: %s' % self.name
if ret == 0:
self._ParseMemoryInfo()
@@ -131,9 +128,7 @@ class CrosMachine(object):
def _GetCPUInfo(self):
command = 'cat /proc/cpuinfo'
ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput(
- command,
- machine=self.name,
- chromeos_root=self.chromeos_root)
+ command, machine=self.name, chromeos_root=self.chromeos_root)
assert ret == 0, 'Could not get cpuinfo from machine: %s' % self.name
def _ComputeMachineChecksumString(self):
@@ -153,9 +148,7 @@ class CrosMachine(object):
def _GetMachineID(self):
command = 'dump_vpd_log --full --stdout'
_, if_out, _ = self.ce.CrosRunCommandWOutput(
- command,
- machine=self.name,
- chromeos_root=self.chromeos_root)
+ command, machine=self.name, chromeos_root=self.chromeos_root)
b = if_out.splitlines()
a = [l for l in b if 'Product' in l]
if len(a):
@@ -163,9 +156,7 @@ class CrosMachine(object):
return
command = 'ifconfig'
_, if_out, _ = self.ce.CrosRunCommandWOutput(
- command,
- machine=self.name,
- chromeos_root=self.chromeos_root)
+ command, machine=self.name, chromeos_root=self.chromeos_root)
b = if_out.splitlines()
a = [l for l in b if 'HWaddr' in l]
if len(a):
@@ -222,8 +213,8 @@ class MachineManager(object):
self.logger = lgr or logger.GetLogger()
if self.locks_dir and not os.path.isdir(self.locks_dir):
- raise MissingLocksDirectory('Cannot access locks directory: %s' %
- self.locks_dir)
+ raise MissingLocksDirectory(
+ 'Cannot access locks directory: %s' % self.locks_dir)
self._initialized_machines = []
self.chromeos_root = chromeos_root
@@ -242,12 +233,10 @@ class MachineManager(object):
cmd = '/opt/google/chrome/chrome --version'
ret, version, _ = self.ce.CrosRunCommandWOutput(
- cmd,
- machine=machine.name,
- chromeos_root=self.chromeos_root)
+ cmd, machine=machine.name, chromeos_root=self.chromeos_root)
if ret != 0:
- raise CrosCommandError("Couldn't get Chrome version from %s." %
- machine.name)
+ raise CrosCommandError(
+ "Couldn't get Chrome version from %s." % machine.name)
if ret != 0:
version = ''
@@ -261,11 +250,13 @@ class MachineManager(object):
chromeos_root = label.chromeos_root
if not chromeos_root:
chromeos_root = self.chromeos_root
- image_chromeos_args = [image_chromeos.__file__, '--no_lock',
- '--chromeos_root=%s' % chromeos_root,
- '--image=%s' % label.chromeos_image,
- '--image_args=%s' % label.image_args, '--remote=%s' %
- machine.name, '--logging_level=%s' % self.log_level]
+ image_chromeos_args = [
+ image_chromeos.__file__, '--no_lock',
+ '--chromeos_root=%s' % chromeos_root,
+ '--image=%s' % label.chromeos_image,
+ '--image_args=%s' % label.image_args, '--remote=%s' % machine.name,
+ '--logging_level=%s' % self.log_level
+ ]
if label.board:
image_chromeos_args.append('--board=%s' % label.board)
@@ -287,9 +278,8 @@ class MachineManager(object):
cmd = 'reboot && exit'
if self.log_level != 'verbose':
self.logger.LogOutput('reboot & exit.')
- self.ce.CrosRunCommand(cmd,
- machine=machine.name,
- chromeos_root=self.chromeos_root)
+ self.ce.CrosRunCommand(
+ cmd, machine=machine.name, chromeos_root=self.chromeos_root)
time.sleep(60)
if self.log_level != 'verbose':
self.logger.LogOutput('Pushing image onto machine.')
@@ -349,8 +339,8 @@ class MachineManager(object):
locked = True
if self.locks_dir:
locked = file_lock_machine.Machine(cros_machine.name,
- self.locks_dir).Lock(True,
- sys.argv[0])
+ self.locks_dir).Lock(
+ True, sys.argv[0])
if locked:
self._machines.append(cros_machine)
command = 'cat %s' % CHECKSUM_FILE
@@ -371,8 +361,8 @@ class MachineManager(object):
if self.log_level != 'verbose':
self.logger.LogOutput('Setting up remote access to %s' % machine_name)
- self.logger.LogOutput('Checking machine characteristics for %s' %
- machine_name)
+ self.logger.LogOutput(
+ 'Checking machine characteristics for %s' % machine_name)
cm = CrosMachine(machine_name, self.chromeos_root, self.log_level)
if cm.machine_checksum:
self._all_machines.append(cm)
@@ -411,17 +401,19 @@ class MachineManager(object):
self.acquire_timeout -= sleep_time
if self.acquire_timeout < 0:
- self.logger.LogFatal('Could not acquire any of the '
- "following machines: '%s'" %
- ', '.join(machine.name for machine in machines))
+ self.logger.LogFatal(
+ 'Could not acquire any of the '
+ "following machines: '%s'" % ', '.join(machine.name
+ for machine in machines))
### for m in self._machines:
### if (m.locked and time.time() - m.released_time < 10 and
### m.checksum == image_checksum):
### return None
- unlocked_machines = [machine
- for machine in self.GetAvailableMachines(label)
- if not machine.locked]
+ unlocked_machines = [
+ machine for machine in self.GetAvailableMachines(label)
+ if not machine.locked
+ ]
for m in unlocked_machines:
if image_checksum and m.checksum == image_checksum:
m.locked = True
@@ -651,8 +643,8 @@ class MockMachineManager(MachineManager):
"""Mock machine manager class."""
def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir):
- super(MockMachineManager, self).__init__(
- chromeos_root, acquire_timeout, log_level, locks_dir)
+ super(MockMachineManager, self).__init__(chromeos_root, acquire_timeout,
+ log_level, locks_dir)
def _TryToLockMachine(self, cros_machine):
self._machines.append(cros_machine)
@@ -663,8 +655,8 @@ class MockMachineManager(MachineManager):
for m in self._all_machines:
assert m.name != machine_name, 'Tried to double-add %s' % machine_name
cm = MockCrosMachine(machine_name, self.chromeos_root, self.log_level)
- assert cm.machine_checksum, ('Could not find checksum for machine %s' %
- machine_name)
+ assert cm.machine_checksum, (
+ 'Could not find checksum for machine %s' % machine_name)
# In Original MachineManager, the test is 'if cm.machine_checksum:' - if a
# machine is unreachable, then its machine_checksum is None. Here we
# cannot do this, because machine_checksum is always faked, so we directly
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index 8652f171..b267d698 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -41,22 +41,21 @@ class MyMachineManager(machine_manager.MachineManager):
assert m.name != machine_name, 'Tried to double-add %s' % machine_name
cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root,
'average')
- assert cm.machine_checksum, ('Could not find checksum for machine %s' %
- machine_name)
+ assert cm.machine_checksum, (
+ 'Could not find checksum for machine %s' % machine_name)
self._all_machines.append(cm)
CHROMEOS_ROOT = '/tmp/chromeos-root'
MACHINE_NAMES = ['lumpy1', 'lumpy2', 'lumpy3', 'daisy1', 'daisy2']
-LABEL_LUMPY = label.MockLabel('lumpy', 'lumpy_chromeos_image', 'autotest_dir',
- CHROMEOS_ROOT, 'lumpy',
- ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'], '', '',
- False, 'average,'
- 'gcc', None)
+LABEL_LUMPY = label.MockLabel(
+ 'lumpy', 'lumpy_chromeos_image', 'autotest_dir', CHROMEOS_ROOT, 'lumpy',
+ ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'], '', '', False, 'average,'
+ 'gcc', None)
LABEL_MIX = label.MockLabel('mix', 'chromeos_image', 'autotest_dir',
CHROMEOS_ROOT, 'mix',
- ['daisy1', 'daisy2', 'lumpy3', 'lumpy4'], '', '',
- False, 'average', 'gcc', None)
+ ['daisy1', 'daisy2', 'lumpy3',
+ 'lumpy4'], '', '', False, 'average', 'gcc', None)
class MachineManagerTest(unittest.TestCase):
@@ -85,10 +84,9 @@ class MachineManagerTest(unittest.TestCase):
def setUp(self, mock_isdir):
mock_isdir.return_value = True
- self.mm = machine_manager.MachineManager('/usr/local/chromeos', 0,
- 'average', None,
- self.mock_cmd_exec,
- self.mock_logger)
+ self.mm = machine_manager.MachineManager(
+ '/usr/local/chromeos', 0, 'average', None, self.mock_cmd_exec,
+ self.mock_logger)
self.mock_lumpy1.name = 'lumpy1'
self.mock_lumpy2.name = 'lumpy2'
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 29e118e8..04e6590b 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -12,7 +12,6 @@ import pickle
import re
import tempfile
import json
-import sys
from cros_utils import command_executer
from cros_utils import misc
@@ -67,9 +66,8 @@ class Result(object):
if not os.path.isdir(dest_dir):
command = 'mkdir -p %s' % dest_dir
self.ce.RunCommand(command)
- dest_file = os.path.join(dest_dir,
- ('%s.%s' % (os.path.basename(file_to_copy),
- file_index)))
+ dest_file = os.path.join(
+ dest_dir, ('%s.%s' % (os.path.basename(file_to_copy), file_index)))
ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
if ret:
raise IOError('Could not copy results file: %s' % file_to_copy)
@@ -230,10 +228,10 @@ class Result(object):
perf_data_file)
perf_report_file = '%s.report' % perf_data_file
if os.path.exists(perf_report_file):
- raise RuntimeError('Perf report file already exists: %s' %
- perf_report_file)
- chroot_perf_report_file = misc.GetInsideChrootPath(self.chromeos_root,
- perf_report_file)
+ raise RuntimeError(
+ 'Perf report file already exists: %s' % perf_report_file)
+ chroot_perf_report_file = misc.GetInsideChrootPath(
+ self.chromeos_root, perf_report_file)
perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
perf_file = '/usr/sbin/perf'
@@ -366,8 +364,8 @@ class Result(object):
self.retval = pickle.load(f)
# Untar the tarball to a temporary directory
- self.temp_dir = tempfile.mkdtemp(
- dir=os.path.join(self.chromeos_root, 'chroot', 'tmp'))
+ self.temp_dir = tempfile.mkdtemp(dir=os.path.join(self.chromeos_root,
+ 'chroot', 'tmp'))
command = ('cd %s && tar xf %s' %
(self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
@@ -439,8 +437,8 @@ class Result(object):
if ret:
command = 'rm -rf {0}'.format(temp_dir)
self.ce.RunCommand(command)
- raise RuntimeError('Could not move dir %s to dir %s' %
- (temp_dir, cache_dir))
+ raise RuntimeError('Could not move dir %s to dir %s' % (temp_dir,
+ cache_dir))
@classmethod
def CreateFromRun(cls,
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 9e97c9b1..a2480d21 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -268,10 +268,10 @@ class ResultTest(unittest.TestCase):
self.result.CopyResultsTo('/tmp/results/')
self.assertEqual(mockCopyFilesTo.call_count, 2)
self.assertEqual(len(mockCopyFilesTo.call_args_list), 2)
- self.assertEqual(mockCopyFilesTo.call_args_list[0][0],
- ('/tmp/results/', perf_data_files))
- self.assertEqual(mockCopyFilesTo.call_args_list[1][0],
- ('/tmp/results/', perf_report_files))
+ self.assertEqual(mockCopyFilesTo.call_args_list[0][0], ('/tmp/results/',
+ perf_data_files))
+ self.assertEqual(mockCopyFilesTo.call_args_list[1][0], ('/tmp/results/',
+ perf_report_files))
def test_get_new_keyvals(self):
kv_dict = {}
@@ -436,8 +436,10 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_runcmd.call_args_list[0][0],
('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1,))
self.assertEqual(mock_chrootruncmd.call_count, 1)
- self.assertEqual(mock_chrootruncmd.call_args_list[0][0], (
- '/tmp', ('python generate_test_report --no-color --csv %s') % TMP_DIR1))
+ self.assertEqual(
+ mock_chrootruncmd.call_args_list[0][0],
+ ('/tmp',
+ ('python generate_test_report --no-color --csv %s') % TMP_DIR1))
self.assertEqual(mock_getpath.call_count, 1)
self.assertEqual(mock_mkdtemp.call_count, 1)
self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
@@ -899,9 +901,8 @@ class TelemetryResultTest(unittest.TestCase):
self.mock_label = MockLabel('mock_label', 'chromeos_image', 'autotest_dir',
'/tmp', 'lumpy', 'remote', 'image_args',
'cache_dir', 'average', 'gcc', None)
- self.mock_machine = machine_manager.MockCrosMachine('falco.cros',
- '/tmp/chromeos',
- 'average')
+ self.mock_machine = machine_manager.MockCrosMachine(
+ 'falco.cros', '/tmp/chromeos', 'average')
def test_populate_from_run(self):
@@ -979,12 +980,10 @@ class ResultsCacheTest(unittest.TestCase):
def FakeGetMachines(label):
if label:
pass
- m1 = machine_manager.MockCrosMachine('lumpy1.cros',
- self.results_cache.chromeos_root,
- 'average')
- m2 = machine_manager.MockCrosMachine('lumpy2.cros',
- self.results_cache.chromeos_root,
- 'average')
+ m1 = machine_manager.MockCrosMachine(
+ 'lumpy1.cros', self.results_cache.chromeos_root, 'average')
+ m2 = machine_manager.MockCrosMachine(
+ 'lumpy2.cros', self.results_cache.chromeos_root, 'average')
return [m1, m2]
mock_checksum.return_value = 'FakeImageChecksumabc123'
@@ -1026,12 +1025,10 @@ class ResultsCacheTest(unittest.TestCase):
def FakeGetMachines(label):
if label:
pass
- m1 = machine_manager.MockCrosMachine('lumpy1.cros',
- self.results_cache.chromeos_root,
- 'average')
- m2 = machine_manager.MockCrosMachine('lumpy2.cros',
- self.results_cache.chromeos_root,
- 'average')
+ m1 = machine_manager.MockCrosMachine(
+ 'lumpy1.cros', self.results_cache.chromeos_root, 'average')
+ m2 = machine_manager.MockCrosMachine(
+ 'lumpy2.cros', self.results_cache.chromeos_root, 'average')
return [m1, m2]
mock_checksum.return_value = 'FakeImageChecksumabc123'
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 097c744d..bda0cc17 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -47,6 +47,17 @@ def _Repeat(func, times):
return [func() for _ in xrange(times)]
+def _DictWithReturnValues(retval, pass_fail):
+ """Create a new dictionary pre-populated with success/fail values."""
+ new_dict = {}
+ # Note: 0 is a valid retval; test to make sure it's not None.
+ if retval is not None:
+ new_dict['retval'] = retval
+ if pass_fail:
+ new_dict[''] = pass_fail
+ return new_dict
+
+
def _GetNonDupLabel(max_dup, runs):
"""Create new list for the runs of the same label.
@@ -61,15 +72,19 @@ def _GetNonDupLabel(max_dup, runs):
"""
new_runs = []
for run in runs:
+ run_retval = run.get('retval', None)
+ run_pass_fail = run.get('', None)
new_run = {}
- added_runs = _Repeat(dict, max_dup)
+ # pylint: disable=cell-var-from-loop
+ added_runs = _Repeat(
+ lambda: _DictWithReturnValues(run_retval, run_pass_fail), max_dup)
for key, value in run.iteritems():
match = _DUP_KEY_REGEX.match(key)
if not match:
new_run[key] = value
else:
new_key, index_str = match.groups()
- added_runs[int(index_str)-1][new_key] = str(value)
+ added_runs[int(index_str) - 1][new_key] = str(value)
new_runs.append(new_run)
new_runs += added_runs
return new_runs
@@ -135,6 +150,7 @@ def _MakeOrganizeResultOutline(benchmark_runs, labels):
result[name] = _Repeat(make_dicts, len(labels))
return result
+
def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
"""Create a dict from benchmark_runs.
@@ -180,10 +196,12 @@ def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
# (This can happen if, for example, the test has been disabled.)
if len(cur_dict) == 1 and cur_dict['retval'] == 0:
cur_dict['retval'] = 1
+ benchmark_run.result.keyvals['retval'] = 1
# TODO: This output should be sent via logger.
- print("WARNING: Test '%s' appears to have succeeded but returned"
- ' no results.' % benchmark.name,
- file=sys.stderr)
+ print(
+ "WARNING: Test '%s' appears to have succeeded but returned"
+ ' no results.' % benchmark.name,
+ file=sys.stderr)
if json_report and benchmark_run.machine:
cur_dict['machine'] = benchmark_run.machine.name
cur_dict['machine_checksum'] = benchmark_run.machine.checksum
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
index ccf02973..e7657373 100755
--- a/crosperf/results_organizer_unittest.py
+++ b/crosperf/results_organizer_unittest.py
@@ -3,12 +3,11 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Testing of ResultsOrganizer
We create some labels, benchmark_runs and then create a ResultsOrganizer,
after that, we compare the result of ResultOrganizer.
- """
+"""
from __future__ import print_function
@@ -20,55 +19,115 @@ from results_organizer import OrganizeResults
import mock_instance
-result = {'benchmark1': [[{'': 'PASS',
- 'bool': 'True',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'ms_1': '2.1',
- 'total': '5'}, {'test': '2'}, {'test': '4'},
- {'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6'}, {'test': '3'}, {'test': '4'}],
- [{'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6'}, {'test': '5'}, {'test': '4'},
- {'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7'}, {'test': '2'}, {'test': '6'}]],
- 'benchmark2': [[{'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7'}, {'test': '2'}, {'test': '6'},
- {'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7'}, {'test': '2'}, {'test': '2'}],
- [{'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7'}, {'test': '2'}, {'test': '4'},
- {'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7'}, {'test': '1'}, {'test': '6'}]]}
+result = {
+ 'benchmark1': [[{
+ '': 'PASS',
+ 'bool': 'True',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'ms_1': '2.1',
+ 'total': '5'
+ }, {
+ '': 'PASS',
+ 'test': '2'
+ }, {
+ '': 'PASS',
+ 'test': '4'
+ }, {
+ '': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6'
+ }, {
+ '': 'PASS',
+ 'test': '3'
+ }, {
+ '': 'PASS',
+ 'test': '4'
+ }], [{
+ '': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6'
+ }, {
+ '': 'PASS',
+ 'test': '5'
+ }, {
+ '': 'PASS',
+ 'test': '4'
+ }, {
+ '': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7'
+ }, {
+ '': 'PASS',
+ 'test': '2'
+ }, {
+ '': 'PASS',
+ 'test': '6'
+ }]],
+ 'benchmark2': [[{
+ '': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7'
+ }, {
+ '': 'PASS',
+ 'test': '2'
+ }, {
+ '': 'PASS',
+ 'test': '6'
+ }, {
+ '': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7'
+ }, {
+ '': 'PASS',
+ 'test': '2'
+ }, {
+ '': 'PASS',
+ 'test': '2'
+ }], [{
+ '': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7'
+ }, {
+ '': 'PASS',
+ 'test': '2'
+ }, {
+ '': 'PASS',
+ 'test': '4'
+ }, {
+ '': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7'
+ }, {
+ '': 'PASS',
+ 'test': '1'
+ }, {
+ '': 'PASS',
+ 'test': '6'
+ }]]
+}
class ResultOrganizerTest(unittest.TestCase):
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 7a465349..fac044fb 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -92,9 +92,13 @@ def _AppendUntilLengthIs(gen, the_list, target_len):
def _FilterPerfReport(event_threshold, report):
"""Filters out entries with `< event_threshold` percent in a perf report."""
+
def filter_dict(m):
- return {fn_name: pct for fn_name, pct in m.iteritems()
- if pct >= event_threshold}
+ return {
+ fn_name: pct
+ for fn_name, pct in m.iteritems() if pct >= event_threshold
+ }
+
return {event: filter_dict(m) for event, m in report.iteritems()}
@@ -109,8 +113,11 @@ class _PerfTable(object):
percentage of time spent in function_name).
"""
- def __init__(self, benchmark_names_and_iterations, label_names,
- read_perf_report, event_threshold=None):
+ def __init__(self,
+ benchmark_names_and_iterations,
+ label_names,
+ read_perf_report,
+ event_threshold=None):
"""Constructor.
read_perf_report is a function that takes a label name, benchmark name, and
@@ -143,8 +150,8 @@ class _PerfTable(object):
def _GetResultsTableHeader(ben_name, iterations):
- benchmark_info = ('Benchmark: {0}; Iterations: {1}'
- .format(ben_name, iterations))
+ benchmark_info = ('Benchmark: {0}; Iterations: {1}'.format(
+ ben_name, iterations))
cell = Cell()
cell.string_value = benchmark_info
cell.header = True
@@ -157,8 +164,9 @@ def _ParseColumn(columns, iteration):
if column.result.__class__.__name__ != 'RawResult':
new_column.append(column)
else:
- new_column.extend(Column(LiteralResult(i), Format(), str(i + 1))
- for i in xrange(iteration))
+ new_column.extend(
+ Column(LiteralResult(i), Format(), str(i + 1))
+ for i in xrange(iteration))
return new_column
@@ -199,9 +207,10 @@ def _GetPerfTables(benchmark_results, columns, table_type):
benchmark_data = p_table.perf_data[benchmark]
table = []
for event in benchmark_data:
- tg = TableGenerator(benchmark_data[event],
- benchmark_results.label_names,
- sort=TableGenerator.SORT_BY_VALUES_DESC)
+ tg = TableGenerator(
+ benchmark_data[event],
+ benchmark_results.label_names,
+ sort=TableGenerator.SORT_BY_VALUES_DESC)
table = tg.GetTable(ResultsReport.PERF_ROWS)
parsed_columns = _ParseColumn(columns, iterations)
tf = TableFormatter(table, parsed_columns)
@@ -227,22 +236,24 @@ class ResultsReport(object):
return get_tables(self.benchmark_results, columns, table_type)
def GetFullTables(self, perf=False):
- columns = [Column(RawResult(), Format()),
- Column(MinResult(), Format()),
- Column(MaxResult(), Format()),
- Column(AmeanResult(), Format()),
- Column(StdResult(), Format(), 'StdDev'),
- Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
- Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
- Column(PValueResult(), PValueFormat(), 'p-value')]
+ columns = [
+ Column(RawResult(), Format()), Column(MinResult(), Format()), Column(
+ MaxResult(), Format()), Column(AmeanResult(), Format()), Column(
+ StdResult(), Format(), 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'), Column(
+ GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'), Column(
+ PValueResult(), PValueFormat(), 'p-value')
+ ]
return self._GetTablesWithColumns(columns, 'full', perf)
def GetSummaryTables(self, perf=False):
- columns = [Column(AmeanResult(), Format()),
- Column(StdResult(), Format(), 'StdDev'),
- Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
- Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
- Column(PValueResult(), PValueFormat(), 'p-value')]
+ columns = [
+ Column(AmeanResult(), Format()), Column(StdResult(), Format(),
+ 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'), Column(
+ GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'), Column(
+ PValueResult(), PValueFormat(), 'p-value')
+ ]
return self._GetTablesWithColumns(columns, 'summary', perf)
@@ -299,12 +310,16 @@ class TextResultsReport(ResultsReport):
def GetStatusTable(self):
"""Generate the status table by the tabulator."""
table = [['', '']]
- columns = [Column(LiteralResult(iteration=0), Format(), 'Status'),
- Column(LiteralResult(iteration=1), Format(), 'Failing Reason')]
+ columns = [
+ Column(LiteralResult(iteration=0), Format(), 'Status'), Column(
+ LiteralResult(iteration=1), Format(), 'Failing Reason')
+ ]
for benchmark_run in self.experiment.benchmark_runs:
- status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
- benchmark_run.failure_reason]]
+ status = [
+ benchmark_run.name,
+ [benchmark_run.timeline.GetLastEvent(), benchmark_run.failure_reason]
+ ]
table.append(status)
cell_table = TableFormatter(table, columns).GetCellTable('status')
return [cell_table]
@@ -316,7 +331,7 @@ class TextResultsReport(ResultsReport):
sections = []
if experiment is not None:
- title_contents = "Results report for '%s'" % (experiment.name, )
+ title_contents = "Results report for '%s'" % (experiment.name,)
else:
title_contents = 'Results report'
sections.append(self._MakeTitle(title_contents))
@@ -348,8 +363,10 @@ def _GetHTMLCharts(label_names, test_results):
# Fun fact: label_names is actually *entirely* useless as a param, since we
# never add headers. We still need to pass it anyway.
table = TableGenerator(runs, label_names).GetTable()
- columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
- Column(MaxResult(), Format())]
+ columns = [
+ Column(AmeanResult(), Format()), Column(MinResult(), Format()), Column(
+ MaxResult(), Format())
+ ]
tf = TableFormatter(table, columns)
data_table = tf.GetCellTable('full', headers=False)
@@ -365,10 +382,10 @@ def _GetHTMLCharts(label_names, test_results):
chart.AddSeries('Max', 'line', 'black')
cur_index = 1
for label in label_names:
- chart.AddRow([label,
- cur_row_data[cur_index].value,
- cur_row_data[cur_index + 1].value,
- cur_row_data[cur_index + 2].value])
+ chart.AddRow([
+ label, cur_row_data[cur_index].value,
+ cur_row_data[cur_index + 1].value, cur_row_data[cur_index + 2].value
+ ])
if isinstance(cur_row_data[cur_index].value, str):
chart = None
break
@@ -387,8 +404,8 @@ class HTMLResultsReport(ResultsReport):
@staticmethod
def FromExperiment(experiment):
- return HTMLResultsReport(BenchmarkResults.FromExperiment(experiment),
- experiment=experiment)
+ return HTMLResultsReport(
+ BenchmarkResults.FromExperiment(experiment), experiment=experiment)
def GetReport(self):
label_names = self.benchmark_results.label_names
@@ -404,13 +421,14 @@ class HTMLResultsReport(ResultsReport):
if self.experiment is not None:
experiment_file = self.experiment.experiment_file
# Use kwargs for sanity, and so that testing is a bit easier.
- return templates.GenerateHTMLPage(perf_table=perf_table,
- chart_js=chart_javascript,
- summary_table=summary_table,
- print_table=_PrintTable,
- chart_divs=chart_divs,
- full_table=full_table,
- experiment_file=experiment_file)
+ return templates.GenerateHTMLPage(
+ perf_table=perf_table,
+ chart_js=chart_javascript,
+ summary_table=summary_table,
+ print_table=_PrintTable,
+ chart_divs=chart_divs,
+ full_table=full_table,
+ experiment_file=experiment_file)
def ParseStandardPerfReport(report_data):
@@ -446,12 +464,12 @@ def ParseStandardPerfReport(report_data):
#
# Note that we're looking at stripped lines, so there is no space at the
# start.
- perf_regex = re.compile(r'^(\d+(?:.\d*)?)%' # N.NN%
- r'\s*\d+' # samples count (ignored)
- r'\s*\S+' # command (ignored)
- r'\s*\S+' # shared_object (ignored)
- r'\s*\[.\]' # location (ignored)
- r'\s*(\S.+)' # function
+ perf_regex = re.compile(r'^(\d+(?:.\d*)?)%' # N.NN%
+ r'\s*\d+' # samples count (ignored)
+ r'\s*\S+' # command (ignored)
+ r'\s*\S+' # shared_object (ignored)
+ r'\s*\[.\]' # location (ignored)
+ r'\s*(\S.+)' # function
)
stripped_lines = (l.strip() for l in report_data)
@@ -511,17 +529,23 @@ def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name,
# Split out so that testing (specifically: mocking) is easier
def _ExperimentToKeyvals(experiment, for_json_report):
"""Converts an experiment to keyvals."""
- return OrganizeResults(experiment.benchmark_runs, experiment.labels,
- json_report=for_json_report)
+ return OrganizeResults(
+ experiment.benchmark_runs, experiment.labels, json_report=for_json_report)
class BenchmarkResults(object):
"""The minimum set of fields that any ResultsReport will take."""
- def __init__(self, label_names, benchmark_names_and_iterations, run_keyvals,
+
+ def __init__(self,
+ label_names,
+ benchmark_names_and_iterations,
+ run_keyvals,
read_perf_report=None):
if read_perf_report is None:
+
def _NoPerfReport(*_args, **_kwargs):
return {}
+
read_perf_report = _NoPerfReport
self.label_names = label_names
@@ -557,10 +581,15 @@ def _Unlist(l):
"""If l is a list, extracts the first element of l. Otherwise, returns l."""
return l[0] if isinstance(l, list) else l
+
class JSONResultsReport(ResultsReport):
"""Class that generates JSON reports for experiments."""
- def __init__(self, benchmark_results, date=None, time=None, experiment=None,
+ def __init__(self,
+ benchmark_results,
+ date=None,
+ time=None,
+ experiment=None,
json_args=None):
"""Construct a JSONResultsReport.
@@ -589,8 +618,8 @@ class JSONResultsReport(ResultsReport):
@staticmethod
def FromExperiment(experiment, date=None, time=None, json_args=None):
- benchmark_results = BenchmarkResults.FromExperiment(experiment,
- for_json_report=True)
+ benchmark_results = BenchmarkResults.FromExperiment(
+ experiment, for_json_report=True)
return JSONResultsReport(benchmark_results, date, time, experiment,
json_args)
diff --git a/crosperf/results_report_templates.py b/crosperf/results_report_templates.py
index 827649fd..15ce5827 100644
--- a/crosperf/results_report_templates.py
+++ b/crosperf/results_report_templates.py
@@ -14,6 +14,7 @@ _TabMenuTemplate = Template("""
<a href="javascript:switchTab('$table_name', 'tsv')">TSV</a>
</div>""")
+
def _GetTabMenuHTML(table_name):
# N.B. cgi.escape does some very basic HTML escaping. Nothing more.
escaped = cgi.escape(table_name, quote=True)
@@ -28,10 +29,11 @@ _ExperimentFileHTML = """
</div>
"""
+
def _GetExperimentFileHTML(experiment_file_text):
if not experiment_file_text:
return ''
- return _ExperimentFileHTML % (cgi.escape(experiment_file_text), )
+ return _ExperimentFileHTML % (cgi.escape(experiment_file_text),)
_ResultsSectionHTML = Template("""
@@ -46,16 +48,17 @@ _ResultsSectionHTML = Template("""
</div>
""")
+
def _GetResultsSectionHTML(print_table, table_name, data):
first_word = table_name.strip().split()[0]
short_name = first_word.lower()
- return _ResultsSectionHTML.substitute(sect_name=table_name,
- html_table=print_table(data, 'HTML'),
- text_table=print_table(data, 'PLAIN'),
- tsv_table=print_table(data, 'TSV'),
- tab_menu=_GetTabMenuHTML(short_name),
- short_name=short_name)
-
+ return _ResultsSectionHTML.substitute(
+ sect_name=table_name,
+ html_table=print_table(data, 'HTML'),
+ text_table=print_table(data, 'PLAIN'),
+ tsv_table=print_table(data, 'TSV'),
+ tab_menu=_GetTabMenuHTML(short_name),
+ short_name=short_name)
_MainHTML = Template("""
@@ -166,6 +169,7 @@ _MainHTML = Template("""
</html>
""")
+
# It's a bit ugly that we take some HTML things, and some non-HTML things, but I
# need to balance prettiness with time spent making things pretty.
def GenerateHTMLPage(perf_table, chart_js, summary_table, print_table,
@@ -189,8 +193,11 @@ def GenerateHTMLPage(perf_table, chart_js, summary_table, print_table,
full_table_html = _GetResultsSectionHTML(print_table, 'Full Table',
full_table)
experiment_file_html = _GetExperimentFileHTML(experiment_file)
- return _MainHTML.substitute(perf_init=perf_init, chart_js=chart_js,
- summary_table=summary_table_html,
- perf_html=perf_html, chart_divs=chart_divs,
- full_table=full_table_html,
- experiment_file=experiment_file_html)
+ return _MainHTML.substitute(
+ perf_init=perf_init,
+ chart_js=chart_js,
+ summary_table=summary_table_html,
+ perf_html=perf_html,
+ chart_divs=chart_divs,
+ full_table=full_table_html,
+ experiment_file=experiment_file_html)
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index ed5c74fa..2a23aa78 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -3,7 +3,6 @@
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Unittest for the results reporter."""
from __future__ import division
@@ -50,10 +49,11 @@ class FreeFunctionsTest(unittest.TestCase):
'/chromiumos_test_image.bin'
buildbot_img = buildbot_case.split('/chroot/tmp')[1]
- self.assertEqual(ParseChromeosImage(buildbot_case),
- ('R02-1.0', buildbot_img))
- self.assertEqual(ParseChromeosImage(os.path.dirname(buildbot_case)),
- ('', os.path.dirname(buildbot_img)))
+ self.assertEqual(
+ ParseChromeosImage(buildbot_case), ('R02-1.0', buildbot_img))
+ self.assertEqual(
+ ParseChromeosImage(os.path.dirname(buildbot_case)),
+ ('', os.path.dirname(buildbot_img)))
# Ensure we don't act completely insanely given a few mildly insane paths.
fun_case = '/chromiumos_test_image.bin'
@@ -66,6 +66,8 @@ class FreeFunctionsTest(unittest.TestCase):
# There are many ways for this to be done better, but the linter complains
# about all of them (that I can think of, at least).
_fake_path_number = [0]
+
+
def FakePath(ext):
"""Makes a unique path that shouldn't exist on the host system.
@@ -73,7 +75,7 @@ def FakePath(ext):
error message, it may be easier to track it to its source.
"""
_fake_path_number[0] += 1
- prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0], )
+ prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0],)
return os.path.join(prefix, ext)
@@ -121,14 +123,15 @@ def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
share_cache = ''
locks_dir = ''
log = logger.GetLogger()
- machine_manager = MockMachineManager(FakePath('chromeos_root'), 0,
- log_level, locks_dir)
+ machine_manager = MockMachineManager(
+ FakePath('chromeos_root'), 0, log_level, locks_dir)
machine_manager.AddMachine('testing_machine')
machine = next(m for m in machine_manager.GetMachines()
if m.name == 'testing_machine')
for label in experiment.labels:
+
def MakeSuccessfulRun(n):
- run = MockBenchmarkRun('mock_success%d' % (n, ), bench, label,
+ run = MockBenchmarkRun('mock_success%d' % (n,), bench, label,
1 + n + num_runs, cache_conditions,
machine_manager, log, log_level, share_cache)
mock_result = MockResult(log, label, log_level, machine)
@@ -136,8 +139,8 @@ def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
run.result = mock_result
return run
- experiment.benchmark_runs.extend(MakeSuccessfulRun(n)
- for n in xrange(how_many))
+ experiment.benchmark_runs.extend(
+ MakeSuccessfulRun(n) for n in xrange(how_many))
return experiment
@@ -160,7 +163,6 @@ class TextResultsReportTest(unittest.TestCase):
self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
return text_report
-
def testOutput(self):
email_report = self._checkReport(email=True)
text_report = self._checkReport(email=False)
@@ -177,12 +179,10 @@ class HTMLResultsReportTest(unittest.TestCase):
things are displayed. It just cares that they're present.
"""
- _TestOutput = collections.namedtuple('TestOutput', ['summary_table',
- 'perf_html',
- 'chart_js',
- 'charts',
- 'full_table',
- 'experiment_file'])
+ _TestOutput = collections.namedtuple('TestOutput', [
+ 'summary_table', 'perf_html', 'chart_js', 'charts', 'full_table',
+ 'experiment_file'
+ ])
@staticmethod
def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
@@ -192,12 +192,13 @@ class HTMLResultsReportTest(unittest.TestCase):
summary_table = print_table(summary_table, 'HTML')
perf_html = print_table(perf_table, 'HTML')
full_table = print_table(full_table, 'HTML')
- return HTMLResultsReportTest._TestOutput(summary_table=summary_table,
- perf_html=perf_html,
- chart_js=chart_js,
- charts=chart_divs,
- full_table=full_table,
- experiment_file=experiment_file)
+ return HTMLResultsReportTest._TestOutput(
+ summary_table=summary_table,
+ perf_html=perf_html,
+ chart_js=chart_js,
+ charts=chart_divs,
+ full_table=full_table,
+ experiment_file=experiment_file)
def _GetOutput(self, experiment=None, benchmark_results=None):
with mock.patch('results_report_templates.GenerateHTMLPage') as standin:
@@ -222,8 +223,8 @@ class HTMLResultsReportTest(unittest.TestCase):
def testSuccessfulOutput(self):
num_success = 2
success_keyvals = {'retval': 0, 'a_float': 3.96}
- output = self._GetOutput(_InjectSuccesses(MakeMockExperiment(), num_success,
- success_keyvals))
+ output = self._GetOutput(
+ _InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals))
self.assertNotIn('no result', output.summary_table)
#self.assertIn(success_keyvals['machine'], output.summary_table)
@@ -321,8 +322,17 @@ class JSONResultsReportTest(unittest.TestCase):
benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2),
('bench3', 1), ('bench4', 0)]
benchmark_keyvals = {
- 'bench1': [[{'retval': 1, 'foo': 2.0}]],
- 'bench2': [[{'retval': 1, 'foo': 4.0}, {'retval': -1, 'bar': 999}]],
+ 'bench1': [[{
+ 'retval': 1,
+ 'foo': 2.0
+ }]],
+ 'bench2': [[{
+ 'retval': 1,
+ 'foo': 4.0
+ }, {
+ 'retval': -1,
+ 'bar': 999
+ }]],
# lack of retval is considered a failure.
'bench3': [[{}]],
'bench4': [[]]
@@ -341,8 +351,8 @@ class JSONResultsReportTest(unittest.TestCase):
benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]}
bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
benchmark_keyvals)
- reporter = JSONResultsReport(bench_results,
- json_args={'separators': separators})
+ reporter = JSONResultsReport(
+ bench_results, json_args={'separators': separators})
result_str = reporter.GetReport()
self.assertIn(separators[0], result_str)
self.assertIn(separators[1], result_str)
@@ -351,8 +361,17 @@ class JSONResultsReportTest(unittest.TestCase):
labels = ['label1']
benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)]
benchmark_keyvals = {
- 'bench1': [[{'retval': 0, 'foo': 2.0}]],
- 'bench2': [[{'retval': 0, 'foo': 4.0}, {'retval': 0, 'bar': 999}]]
+ 'bench1': [[{
+ 'retval': 0,
+ 'foo': 2.0
+ }]],
+ 'bench2': [[{
+ 'retval': 0,
+ 'foo': 4.0
+ }, {
+ 'retval': 0,
+ 'bar': 999
+ }]]
}
bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
benchmark_keyvals)
@@ -374,6 +393,7 @@ class JSONResultsReportTest(unittest.TestCase):
class PerfReportParserTest(unittest.TestCase):
"""Tests for the perf report parser in results_report."""
+
@staticmethod
def _ReadRealPerfReport():
my_dir = os.path.dirname(os.path.realpath(__file__))
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index 90fe83a3..e661f307 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -3,7 +3,6 @@
# found in the LICENSE file.
"""Module to optimize the scheduling of benchmark_run tasks."""
-
from __future__ import print_function
import sys
@@ -48,7 +47,7 @@ class DutWorker(Thread):
"""Do the "run-test->(optionally reimage)->run-test" chore.
Note - 'br' below means 'benchmark_run'.
- """
+ """
# Firstly, handle benchmarkruns that have cache hit.
br = self._sched.get_cached_benchmark_run()
@@ -93,12 +92,12 @@ class DutWorker(Thread):
def _reimage(self, label):
"""Reimage image to label.
- Args:
- label: the label to remimage onto dut.
+ Args:
+ label: the label to remimage onto dut.
- Returns:
- 0 if successful, otherwise 1.
- """
+ Returns:
+ 0 if successful, otherwise 1.
+ """
# Termination could happen anywhere, check it.
if self._terminated:
@@ -111,8 +110,7 @@ class DutWorker(Thread):
# Note, only 1 reimage at any given time, this is guaranteed in
# ImageMachine, so no sync needed below.
retval = self._sched.get_experiment().machine_manager.ImageMachine(
- self._dut,
- label)
+ self._dut, label)
if retval:
return 1
@@ -126,7 +124,7 @@ class DutWorker(Thread):
"""Execute a single benchmark_run.
Note - this function never throws exceptions.
- """
+ """
# Termination could happen anywhere, check it.
if self._terminated:
@@ -152,7 +150,7 @@ class DutWorker(Thread):
If such match is found, we just skip doing reimage and jump to execute
some benchmark_runs.
- """
+ """
checksum_file = '/usr/local/osimage_checksum_file'
try:
@@ -166,8 +164,8 @@ class DutWorker(Thread):
checksum = checksum.strip()
for l in self._sched.get_labels():
if l.checksum == checksum:
- self._logger.LogOutput("Dut '{}' is pre-installed with '{}'".format(
- self._dut.name, l))
+ self._logger.LogOutput(
+ "Dut '{}' is pre-installed with '{}'".format(self._dut.name, l))
self._dut.label = l
return
except RuntimeError:
@@ -196,7 +194,7 @@ class BenchmarkRunCacheReader(Thread):
On creation, each instance of this class is given a br_list, which is a
subset of experiment._benchmark_runs.
- """
+ """
def __init__(self, schedv2, br_list):
super(BenchmarkRunCacheReader, self).__init__()
@@ -272,7 +270,7 @@ class Schedv2(object):
We do this by firstly creating a few threads, and then assign each
thread a segment of all brs. Each thread will check cache status for
each br and put those with cache into '_cached_br_list'.
- """
+ """
self._cached_br_list = []
n_benchmarkruns = len(self._experiment.benchmark_runs)
@@ -287,16 +285,16 @@ class Schedv2(object):
# a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
n_threads = max(2, min(20, (n_benchmarkruns + 3) / 4))
self._logger.LogOutput(('Starting {} threads to read cache status for '
- '{} benchmark runs ...').format(n_threads,
- n_benchmarkruns))
+ '{} benchmark runs ...').format(
+ n_threads, n_benchmarkruns))
benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) / n_threads
benchmarkrun_segments = []
for i in range(n_threads - 1):
start = i * benchmarkruns_per_thread
end = (i + 1) * benchmarkruns_per_thread
benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end])
- benchmarkrun_segments.append(self._experiment.benchmark_runs[
- (n_threads - 1) * benchmarkruns_per_thread:])
+ benchmarkrun_segments.append(self._experiment.benchmark_runs[(
+ n_threads - 1) * benchmarkruns_per_thread:])
# Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
assert sum(len(x) for x in benchmarkrun_segments) == n_benchmarkruns
@@ -314,9 +312,8 @@ class Schedv2(object):
x.join()
# Summarize.
- self._logger.LogOutput(
- 'Total {} cache hit out of {} benchmark_runs.'.format(
- len(self._cached_br_list), n_benchmarkruns))
+ self._logger.LogOutput('Total {} cache hit out of {} benchmark_runs.'.
+ format(len(self._cached_br_list), n_benchmarkruns))
def get_cached_run_list(self):
return self._cached_br_list
@@ -338,9 +335,9 @@ class Schedv2(object):
def get_cached_benchmark_run(self):
"""Get a benchmark_run with 'cache hit'.
- Returns:
- The benchmark that has cache hit, if any. Otherwise none.
- """
+ Returns:
+ The benchmark that has cache hit, if any. Otherwise none.
+ """
with self.lock_on('_cached_br_list'):
if self._cached_br_list:
@@ -350,14 +347,14 @@ class Schedv2(object):
def get_benchmark_run(self, dut):
"""Get a benchmark_run (br) object for a certain dut.
- Args:
- dut: the dut for which a br is returned.
+ Args:
+ dut: the dut for which a br is returned.
- Returns:
- A br with its label matching that of the dut. If no such br could be
- found, return None (this usually means a reimage is required for the
- dut).
- """
+ Returns:
+ A br with its label matching that of the dut. If no such br could be
+ found, return None (this usually means a reimage is required for the
+ dut).
+ """
# If terminated, stop providing any br.
if self._terminated:
@@ -384,12 +381,12 @@ class Schedv2(object):
The dut_worker calling this method is responsible for reimage the dut to
this label.
- Args:
- dut: the new label that is to be reimaged onto the dut.
+ Args:
+ dut: the new label that is to be reimaged onto the dut.
- Returns:
- The label or None.
- """
+ Returns:
+ The label or None.
+ """
if self._terminated:
return None
@@ -399,9 +396,9 @@ class Schedv2(object):
def dut_worker_finished(self, dut_worker):
"""Notify schedv2 that the dut_worker thread finished.
- Args:
- dut_worker: the thread that is about to end.
- """
+ Args:
+ dut_worker: the thread that is about to end.
+ """
self._logger.LogOutput('{} finished.'.format(dut_worker))
with self._workers_lock:
@@ -418,7 +415,7 @@ class Schedv2(object):
"""Mark flag so we stop providing br/reimages.
Also terminate each DutWorker, so they refuse to execute br or reimage.
- """
+ """
self._terminated = True
for dut_worker in self._active_workers:
diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py
index be0fde4b..250968dc 100755
--- a/crosperf/schedv2_unittest.py
+++ b/crosperf/schedv2_unittest.py
@@ -72,11 +72,10 @@ class Schedv2Test(unittest.TestCase):
"""Create fake experiment from string.
Note - we mock out BenchmarkRun in this step.
- """
+ """
experiment_file = ExperimentFile(StringIO.StringIO(expstr))
- experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory='',
- log_dir='')
+ experiment = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory='', log_dir='')
return experiment
def test_remote(self):
@@ -99,8 +98,8 @@ class Schedv2Test(unittest.TestCase):
return (cm.name != 'chromeos-daisy3.cros' and
cm.name != 'chromeos-daisy5.cros')
- with mock.patch('machine_manager.MockCrosMachine.IsReachable',
- new=MockIsReachable):
+ with mock.patch(
+ 'machine_manager.MockCrosMachine.IsReachable', new=MockIsReachable):
self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
self.assertIn('chromeos-daisy1.cros', self.exp.remote)
self.assertIn('chromeos-daisy2.cros', self.exp.remote)
@@ -119,8 +118,8 @@ class Schedv2Test(unittest.TestCase):
def test_BenchmarkRunCacheReader_1(self, reader):
"""Test benchmarkrun set is split into 5 segments."""
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=9))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=9))
my_schedv2 = Schedv2(self.exp)
self.assertFalse(my_schedv2.is_complete())
# We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4,
@@ -141,8 +140,8 @@ class Schedv2Test(unittest.TestCase):
def test_BenchmarkRunCacheReader_2(self, reader):
"""Test benchmarkrun set is split into 4 segments."""
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=8))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=8))
my_schedv2 = Schedv2(self.exp)
self.assertFalse(my_schedv2.is_complete())
# We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs.
@@ -156,8 +155,8 @@ class Schedv2Test(unittest.TestCase):
def test_BenchmarkRunCacheReader_3(self, reader):
"""Test benchmarkrun set is split into 2 segments."""
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=3))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=3))
my_schedv2 = Schedv2(self.exp)
self.assertFalse(my_schedv2.is_complete())
# We have 3 * 2 == 6 brs, we use 2 threads.
@@ -169,8 +168,8 @@ class Schedv2Test(unittest.TestCase):
def test_BenchmarkRunCacheReader_4(self, reader):
"""Test benchmarkrun set is not splitted."""
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=1))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=1))
my_schedv2 = Schedv2(self.exp)
self.assertFalse(my_schedv2.is_complete())
# We have 1 * 2 == 2 br, so only 1 instance.
@@ -183,18 +182,17 @@ class Schedv2Test(unittest.TestCase):
def MockReadCache(br):
br.cache_hit = (br.label.name == 'image2')
- with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
- new=MockReadCache):
+ with mock.patch(
+ 'benchmark_run.MockBenchmarkRun.ReadCache', new=MockReadCache):
# We have 2 * 30 brs, half of which are put into _cached_br_list.
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=30))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
my_schedv2 = Schedv2(self.exp)
self.assertEquals(len(my_schedv2.get_cached_run_list()), 30)
# The non-cache-hit brs are put into Schedv2._label_brl_map.
self.assertEquals(
reduce(lambda a, x: a + len(x[1]),
- my_schedv2.get_label_map().iteritems(),
- 0), 30)
+ my_schedv2.get_label_map().iteritems(), 0), 30)
def test_nocachehit(self):
"""Test no cache-hit."""
@@ -202,18 +200,17 @@ class Schedv2Test(unittest.TestCase):
def MockReadCache(br):
br.cache_hit = False
- with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
- new=MockReadCache):
+ with mock.patch(
+ 'benchmark_run.MockBenchmarkRun.ReadCache', new=MockReadCache):
# We have 2 * 30 brs, none of which are put into _cached_br_list.
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=30))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
my_schedv2 = Schedv2(self.exp)
self.assertEquals(len(my_schedv2.get_cached_run_list()), 0)
# The non-cache-hit brs are put into Schedv2._label_brl_map.
self.assertEquals(
reduce(lambda a, x: a + len(x[1]),
- my_schedv2.get_label_map().iteritems(),
- 0), 60)
+ my_schedv2.get_label_map().iteritems(), 0), 60)
if __name__ == '__main__':
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index e42d82a9..efbb534f 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -29,9 +29,11 @@ class BenchmarkSettings(Settings):
self.AddField(
IntegerField(
'iterations',
- default=1,
- description='Number of iterations to run the '
- 'test.'))
+ required=False,
+ default=0,
+ description='Number of iterations to run the test. '
+ 'If not set, will run each benchmark test the optimum number of '
+ 'times to get a stable result.'))
self.AddField(
TextField(
'suite', default='', description='The type of the benchmark.'))
@@ -68,8 +70,8 @@ class LabelSettings(Settings):
'autotest_path',
required=False,
description='Autotest directory path relative to chroot which '
- 'has autotest files for the image to run tests requiring autotest files'
- ))
+ 'has autotest files for the image to run tests requiring autotest '
+ 'files.'))
self.AddField(
TextField(
'chromeos_root',
@@ -186,9 +188,11 @@ class GlobalSettings(Settings):
self.AddField(
IntegerField(
'iterations',
- default=1,
- description='Number of iterations to run all '
- 'tests.'))
+ required=False,
+ default=0,
+ description='Number of iterations to run all tests. '
+ 'If not set, will run each benchmark test the optimum number of '
+ 'times to get a stable result.'))
self.AddField(
TextField(
'chromeos_root',
@@ -256,8 +260,7 @@ class GlobalSettings(Settings):
'you want to use. It accepts multiple directories '
'separated by a ",".'))
self.AddField(
- TextField(
- 'results_dir', default='', description='The results dir.'))
+ TextField('results_dir', default='', description='The results dir.'))
self.AddField(
TextField(
'locks_dir',
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index 127bfd3c..1ff6a133 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -1,6 +1,8 @@
#!/usr/bin/env python2
#
-# Copyright 2014 Google Inc. All Rights Reserved.
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
"""Unittest for crosperf."""
from __future__ import print_function
@@ -19,7 +21,7 @@ class BenchmarkSettingsTest(unittest.TestCase):
self.assertEqual(len(res.fields), 6)
self.assertEqual(res.GetField('test_name'), '')
self.assertEqual(res.GetField('test_args'), '')
- self.assertEqual(res.GetField('iterations'), 1)
+ self.assertEqual(res.GetField('iterations'), 0)
self.assertEqual(res.GetField('suite'), '')
@@ -56,7 +58,7 @@ class GlobalSettingsTest(unittest.TestCase):
self.assertEqual(res.GetField('rerun'), False)
self.assertEqual(res.GetField('same_specs'), True)
self.assertEqual(res.GetField('same_machine'), False)
- self.assertEqual(res.GetField('iterations'), 1)
+ self.assertEqual(res.GetField('iterations'), 0)
self.assertEqual(res.GetField('chromeos_root'), '')
self.assertEqual(res.GetField('logging_level'), 'average')
self.assertEqual(res.GetField('acquire_timeout'), 0)
@@ -77,18 +79,18 @@ class SettingsFactoryTest(unittest.TestCase):
self.assertRaises(Exception, settings_factory.SettingsFactory.GetSettings,
'global', 'bad_type')
- l_settings = settings_factory.SettingsFactory().GetSettings('label',
- 'label')
+ l_settings = settings_factory.SettingsFactory().GetSettings(
+ 'label', 'label')
self.assertIsInstance(l_settings, settings_factory.LabelSettings)
self.assertEqual(len(l_settings.fields), 9)
- b_settings = settings_factory.SettingsFactory().GetSettings('benchmark',
- 'benchmark')
+ b_settings = settings_factory.SettingsFactory().GetSettings(
+ 'benchmark', 'benchmark')
self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings)
self.assertEqual(len(b_settings.fields), 6)
- g_settings = settings_factory.SettingsFactory().GetSettings('global',
- 'global')
+ g_settings = settings_factory.SettingsFactory().GetSettings(
+ 'global', 'global')
self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
self.assertEqual(len(g_settings.fields), 25)
diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py
index f1062f0d..fea55c05 100755
--- a/crosperf/settings_unittest.py
+++ b/crosperf/settings_unittest.py
@@ -48,14 +48,12 @@ class TestSettings(unittest.TestCase):
'run the test.'))
self.assertEqual(len(self.settings.fields), 1)
# Adding the same field twice raises an exception.
- self.assertRaises(
- Exception,
- self.settings.AddField, (IntegerField(
- 'iterations',
- default=1,
- required=False,
- description='Number of iterations to run '
- 'the test.')))
+ self.assertRaises(Exception, self.settings.AddField, (IntegerField(
+ 'iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to run '
+ 'the test.')))
res = self.settings.fields['iterations']
self.assertIsInstance(res, IntegerField)
self.assertEqual(res.Get(), 1)
@@ -116,10 +114,10 @@ class TestSettings(unittest.TestCase):
self.assertEqual(res, 5)
def test_inherit(self):
- parent_settings = settings_factory.SettingsFactory().GetSettings('global',
- 'global')
- label_settings = settings_factory.SettingsFactory().GetSettings('label',
- 'label')
+ parent_settings = settings_factory.SettingsFactory().GetSettings(
+ 'global', 'global')
+ label_settings = settings_factory.SettingsFactory().GetSettings(
+ 'label', 'label')
self.assertEqual(parent_settings.GetField('chromeos_root'), '')
self.assertEqual(label_settings.GetField('chromeos_root'), '')
self.assertIsNone(label_settings.parent)
@@ -140,8 +138,8 @@ class TestSettings(unittest.TestCase):
'list of email addresses to send '
'email to.'))
- global_settings = settings_factory.SettingsFactory().GetSettings('global',
- 'global')
+ global_settings = settings_factory.SettingsFactory().GetSettings(
+ 'global', 'global')
global_settings.SetField('email', 'john.doe@google.com', append=True)
global_settings.SetField('email', 'jane.smith@google.com', append=True)
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 678113a7..bd27f282 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -78,8 +78,8 @@ class SuiteRunner(object):
(benchmark.name, i))
break
else:
- self.logger.LogOutput('benchmark %s succeded on first try' %
- benchmark.name)
+ self.logger.LogOutput(
+ 'benchmark %s succeded on first try' % benchmark.name)
break
return ret_tup
@@ -88,32 +88,37 @@ class SuiteRunner(object):
# pyformat: disable
set_cpu_freq = (
'set -e && '
+ # Disable Turbo in Intel pstate driver
+ 'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then '
+ 'echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; fi; '
+ # Set governor to performance for each cpu
'for f in /sys/devices/system/cpu/cpu*/cpufreq; do '
'cd $f; '
- 'val=0; '
- 'if [[ -e scaling_available_frequencies ]]; then '
- # pylint: disable=line-too-long
- ' val=`cat scaling_available_frequencies | tr " " "\\n" | sort -n -b -r`; '
- 'else '
- ' val=`cat scaling_max_freq | tr " " "\\n" | sort -n -b -r`; fi ; '
- 'set -- $val; '
- 'highest=$1; '
- 'if [[ $# -gt 1 ]]; then '
- ' case $highest in *1000) highest=$2;; esac; '
- 'fi ;'
- 'echo $highest > scaling_max_freq; '
- 'echo $highest > scaling_min_freq; '
'echo performance > scaling_governor; '
+ # Uncomment rest of lines to enable setting frequency by crosperf
+ #'val=0; '
+ #'if [[ -e scaling_available_frequencies ]]; then '
+ # pylint: disable=line-too-long
+ #' val=`cat scaling_available_frequencies | tr " " "\\n" | sort -n -b -r`; '
+ #'else '
+ #' val=`cat scaling_max_freq | tr " " "\\n" | sort -n -b -r`; fi ; '
+ #'set -- $val; '
+ #'highest=$1; '
+ #'if [[ $# -gt 1 ]]; then '
+ #' case $highest in *1000) highest=$2;; esac; '
+ #'fi ;'
+ #'echo $highest > scaling_max_freq; '
+ #'echo $highest > scaling_min_freq; '
'done'
)
# pyformat: enable
if self.log_level == 'average':
- self.logger.LogOutput('Pinning governor execution frequencies for %s' %
- machine_name)
+ self.logger.LogOutput(
+ 'Pinning governor execution frequencies for %s' % machine_name)
ret = self._ce.CrosRunCommand(
set_cpu_freq, machine=machine_name, chromeos_root=chromeos_root)
- self.logger.LogFatalIf(ret, 'Could not pin frequencies on machine: %s' %
- machine_name)
+ self.logger.LogFatalIf(
+ ret, 'Could not pin frequencies on machine: %s' % machine_name)
def DecreaseWaitTime(self, machine_name, chromeos_root):
"""Change the ten seconds wait time for pagecycler to two seconds."""
@@ -218,11 +223,10 @@ class SuiteRunner(object):
args_string = "test_args='%s'" % test_args
cmd = ('{} {} {} --board={} --args="{} run_local={} test={} '
- '{}" {} telemetry_Crosperf'.format(TEST_THAT_PATH, autotest_dir_arg,
- fast_arg, label.board,
- args_string, benchmark.run_local,
- benchmark.test_name,
- profiler_args, machine))
+ '{}" {} telemetry_Crosperf'.format(
+ TEST_THAT_PATH, autotest_dir_arg, fast_arg, label.board,
+ args_string, benchmark.run_local, benchmark.test_name,
+ profiler_args, machine))
# Use --no-ns-pid so that cros_sdk does not create a different
# process namespace and we can kill process created easily by their
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index fd8de661..78bdfbdf 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -28,10 +28,9 @@ class SuiteRunnerTest(unittest.TestCase):
mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
mock_cmd_term = mock.Mock(spec=command_executer.CommandTerminator)
mock_logger = mock.Mock(spec=logger.Logger)
- mock_label = label.MockLabel('lumpy', 'lumpy_chromeos_image', '',
- '/tmp/chromeos', 'lumpy',
- ['lumpy1.cros', 'lumpy.cros2'], '', '', False,
- 'average', 'gcc', '')
+ mock_label = label.MockLabel(
+ 'lumpy', 'lumpy_chromeos_image', '', '/tmp/chromeos', 'lumpy',
+ ['lumpy1.cros', 'lumpy.cros2'], '', '', False, 'average', 'gcc', '')
telemetry_crosperf_bench = Benchmark(
'b1_test', # name
'octane', # test_name
@@ -72,9 +71,8 @@ class SuiteRunnerTest(unittest.TestCase):
self.call_telemetry_run = False
def setUp(self):
- self.runner = suite_runner.SuiteRunner(self.mock_logger, 'verbose',
- self.mock_cmd_exec,
- self.mock_cmd_term)
+ self.runner = suite_runner.SuiteRunner(
+ self.mock_logger, 'verbose', self.mock_cmd_exec, self.mock_cmd_term)
def test_get_profiler_args(self):
input_str = ('--profiler=custom_perf --profiler_args=\'perf_options'
@@ -136,9 +134,9 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertTrue(self.call_telemetry_run)
self.assertFalse(self.call_test_that_run)
self.assertFalse(self.call_telemetry_crosperf_run)
- self.assertEqual(
- self.telemetry_run_args,
- ['fake_machine', self.mock_label, self.telemetry_bench, ''])
+ self.assertEqual(self.telemetry_run_args, [
+ 'fake_machine', self.mock_label, self.telemetry_bench, ''
+ ])
reset()
self.runner.Run(machine, self.mock_label, self.test_that_bench, test_args,
@@ -147,9 +145,9 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertFalse(self.call_telemetry_run)
self.assertTrue(self.call_test_that_run)
self.assertFalse(self.call_telemetry_crosperf_run)
- self.assertEqual(
- self.test_that_args,
- ['fake_machine', self.mock_label, self.test_that_bench, '', ''])
+ self.assertEqual(self.test_that_args, [
+ 'fake_machine', self.mock_label, self.test_that_bench, '', ''
+ ])
reset()
self.runner.Run(machine, self.mock_label, self.telemetry_crosperf_bench,
@@ -171,21 +169,12 @@ class SuiteRunnerTest(unittest.TestCase):
# pyformat: disable
set_cpu_cmd = (
'set -e && '
+ # Disable Turbo in Intel pstate driver
+ 'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then '
+ 'echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; fi; '
+ # Set governor to performance for each cpu
'for f in /sys/devices/system/cpu/cpu*/cpufreq; do '
'cd $f; '
- 'val=0; '
- 'if [[ -e scaling_available_frequencies ]]; then '
- # pylint: disable=line-too-long
- ' val=`cat scaling_available_frequencies | tr " " "\\n" | sort -n -b -r`; '
- 'else '
- ' val=`cat scaling_max_freq | tr " " "\\n" | sort -n -b -r`; fi ; '
- 'set -- $val; '
- 'highest=$1; '
- 'if [[ $# -gt 1 ]]; then '
- ' case $highest in *1000) highest=$2;; esac; '
- 'fi ;'
- 'echo $highest > scaling_max_freq; '
- 'echo $highest > scaling_min_freq; '
'echo performance > scaling_governor; '
'done'
)
@@ -338,11 +327,12 @@ class SuiteRunnerTest(unittest.TestCase):
self.telemetry_bench, '')
self.assertEqual(res, 0)
self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0], (
- ('cd src/tools/perf && ./run_measurement '
- '--browser=cros-chrome --output-format=csv '
- '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
- '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0],
+ (('cd src/tools/perf && ./run_measurement '
+ '--browser=cros-chrome --output-format=csv '
+ '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
+ '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
self.real_logger.LogMsg = save_log_msg