aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManoj Gupta <manojgupta@google.com>2016-11-01 12:56:25 -0700
committerchrome-bot <chrome-bot@chromium.org>2016-11-01 15:50:24 -0700
commitc39917fe61858e884d06656122cf88a8c66fd825 (patch)
tree4281d50331476710319707d3d15d613690cea646
parentd768dc1c6dbd2cdc8f2503574e88ad88316e24f2 (diff)
downloadtoolchain-utils-c39917fe61858e884d06656122cf88a8c66fd825.tar.gz
Update formatting of files before committing changes for autotest. No functional change.
BUG: chromium:647429 TEST: crosperf unit tests Change-Id: I827f9ec4d00a0d8c1ae8a9930d7832289e694dc8 Reviewed-on: https://chrome-internal-review.googlesource.com/301457 Commit-Ready: Manoj Gupta <manojgupta@google.com> Tested-by: Manoj Gupta <manojgupta@google.com> Reviewed-by: Luis Lozano <llozano@chromium.org>
-rwxr-xr-xcrosperf/benchmark_run_unittest.py201
-rw-r--r--crosperf/download_images.py16
-rwxr-xr-xcrosperf/download_images_unittest.py8
-rw-r--r--crosperf/experiment_file.py5
-rwxr-xr-xcrosperf/machine_manager_unittest.py64
-rw-r--r--crosperf/mock_instance.py164
-rwxr-xr-xcrosperf/results_cache_unittest.py445
-rw-r--r--crosperf/settings_factory.py419
-rw-r--r--crosperf/suite_runner.py69
-rwxr-xr-xcrosperf/suite_runner_unittest.py98
10 files changed, 807 insertions, 682 deletions
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index f0815ec0..b37594dc 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -37,54 +37,59 @@ class BenchmarkRunTest(unittest.TestCase):
self.log_error = []
self.log_output = []
self.err_msg = None
- self.test_benchmark = Benchmark('page_cycler.netsim.top_10', # name
- 'page_cycler.netsim.top_10', # test_name
- '', # test_args
- 1, # iterations
- False, # rm_chroot_tmp
- '', # perf_args
- suite='telemetry_Crosperf') # suite
-
- self.test_label = MockLabel('test1',
- 'image1',
- '/tmp/test_benchmark_run',
- 'x86-alex',
- 'chromeos2-row1-rack4-host9.cros',
- image_args='',
- cache_dir='',
- cache_only=False,
- log_level='average',
- compiler='gcc')
-
- self.test_cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
- CacheConditions.CHECKSUMS_MATCH]
+ self.test_benchmark = Benchmark(
+ 'page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
+ 1, # iterations
+ False, # rm_chroot_tmp
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
+
+ self.test_label = MockLabel(
+ 'test1',
+ 'image1',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos2-row1-rack4-host9.cros',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+ self.test_cache_conditions = [
+ CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
+ ]
self.mock_logger = logger.GetLogger(log_dir='', mock=True)
self.mock_machine_manager = mock.Mock(spec=MachineManager)
def testDryRun(self):
- my_label = MockLabel('test1',
- 'image1',
- '/tmp/test_benchmark_run',
- 'x86-alex',
- 'chromeos2-row1-rack4-host9.cros',
- image_args='',
- cache_dir='',
- cache_only=False,
- log_level='average',
- compiler='gcc')
+ my_label = MockLabel(
+ 'test1',
+ 'image1',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos2-row1-rack4-host9.cros',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
logging_level = 'average'
m = MockMachineManager('/tmp/chromeos_root', 0, logging_level, '')
m.AddMachine('chromeos2-row1-rack4-host9.cros')
- bench = Benchmark('page_cycler.netsim.top_10', # name
- 'page_cycler.netsim.top_10', # test_name
- '', # test_args
- 1, # iterations
- False, # rm_chroot_tmp
- '', # perf_args
- suite='telemetry_Crosperf') # suite
+ bench = Benchmark(
+ 'page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
+ 1, # iterations
+ False, # rm_chroot_tmp
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
b = benchmark_run.MockBenchmarkRun('test run', bench, my_label, 1, [], m,
logger.GetLogger(), logging_level, '')
b.cache = MockResultsCache()
@@ -93,9 +98,10 @@ class BenchmarkRunTest(unittest.TestCase):
# Make sure the arguments to BenchmarkRun.__init__ have not changed
# since the last time this test was updated:
- args_list = ['self', 'name', 'benchmark', 'label', 'iteration',
- 'cache_conditions', 'machine_manager', 'logger_to_use',
- 'log_level', 'share_cache']
+ args_list = [
+ 'self', 'name', 'benchmark', 'label', 'iteration', 'cache_conditions',
+ 'machine_manager', 'logger_to_use', 'log_level', 'share_cache'
+ ]
arg_spec = inspect.getargspec(benchmark_run.BenchmarkRun.__init__)
self.assertEqual(len(arg_spec.args), len(args_list))
self.assertEqual(arg_spec.args, args_list)
@@ -109,10 +115,11 @@ class BenchmarkRunTest(unittest.TestCase):
pass
def test_run(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogOutput(msg, print_to_console=False):
'Helper function for test_run.'
@@ -186,10 +193,11 @@ class BenchmarkRunTest(unittest.TestCase):
ResetTestValues()
br.run()
self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(self.log_output, [
+ 'test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'
+ ])
self.assertEqual(len(self.log_error), 0)
self.assertEqual(self.status, ['WAITING', 'SUCCEEDED'])
@@ -199,10 +207,11 @@ class BenchmarkRunTest(unittest.TestCase):
br.terminated = True
br.run()
self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(self.log_output, [
+ 'test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'
+ ])
self.assertEqual(len(self.log_error), 0)
self.assertEqual(self.status, ['WAITING'])
@@ -212,10 +221,11 @@ class BenchmarkRunTest(unittest.TestCase):
br.RunTest = FakeRunTestFail
br.run()
self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(self.log_output, [
+ 'test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'
+ ])
self.assertEqual(len(self.log_error), 0)
self.assertEqual(self.status, ['WAITING', 'FAILED'])
@@ -225,10 +235,11 @@ class BenchmarkRunTest(unittest.TestCase):
br.ReadCache = FakeReadCacheSucceed
br.run()
self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output,
- ['test_run: Cache hit.', 'result.out stuff',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(self.log_output, [
+ 'test_run: Cache hit.', 'result.out stuff',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'
+ ])
self.assertEqual(self.log_error, ['result.err stuff'])
self.assertEqual(self.status, ['SUCCEEDED'])
@@ -240,15 +251,16 @@ class BenchmarkRunTest(unittest.TestCase):
br.run()
self.assertEqual(self.log_error, [
"Benchmark run: 'test_run' failed: This is an exception test; it is "
- "supposed to happen"
+ 'supposed to happen'
])
self.assertEqual(self.status, ['FAILED'])
def test_terminate_pass(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventPassed():
'Helper function for test_terminate_pass'
@@ -272,10 +284,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
def test_terminate_fail(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventFailed():
'Helper function for test_terminate_fail'
@@ -299,10 +312,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
def test_acquire_machine(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
br.terminated = True
self.assertRaises(Exception, br.AcquireMachine)
@@ -316,10 +330,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
def test_get_extra_autotest_args(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogError(err_msg):
'Helper function for test_get_extra_autotest_args'
@@ -355,10 +370,11 @@ class BenchmarkRunTest(unittest.TestCase):
@mock.patch.object(SuiteRunner, 'Run')
@mock.patch.object(Result, 'CreateFromRun')
def test_run_test(self, mock_result, mock_runner):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
self.status = []
@@ -373,8 +389,9 @@ class BenchmarkRunTest(unittest.TestCase):
br.RunTest(mock_machine)
self.assertTrue(br.run_completed)
- self.assertEqual(self.status, [benchmark_run.STATUS_IMAGING,
- benchmark_run.STATUS_RUNNING])
+ self.assertEqual(
+ self.status,
+ [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING])
self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
br.machine_manager.ImageMachine.assert_called_with(mock_machine,
@@ -384,15 +401,17 @@ class BenchmarkRunTest(unittest.TestCase):
'', br.profiler_args)
self.assertEqual(mock_result.call_count, 1)
- mock_result.assert_called_with(
- self.mock_logger, 'average', self.test_label, None, "{'Score':100}", '',
- 0, 'page_cycler.netsim.top_10', 'telemetry_Crosperf')
+ mock_result.assert_called_with(self.mock_logger, 'average', self.test_label,
+ None, "{'Score':100}", '', 0,
+ 'page_cycler.netsim.top_10',
+ 'telemetry_Crosperf')
def test_set_cache_conditions(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
phony_cache_conditions = [123, 456, True, False]
diff --git a/crosperf/download_images.py b/crosperf/download_images.py
index da0b4e37..c07d82d0 100644
--- a/crosperf/download_images.py
+++ b/crosperf/download_images.py
@@ -1,7 +1,6 @@
# Copyright (c) 2014-2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Download images from Cloud Storage."""
from __future__ import print_function
@@ -25,16 +24,15 @@ class ImageDownloader(object):
self._logger = logger_to_use
self.log_level = log_level
self._ce = cmd_exec or command_executer.GetCommandExecuter(
- self._logger,
- log_level=self.log_level)
+ self._logger, log_level=self.log_level)
def GetBuildID(self, chromeos_root, xbuddy_label):
# Get the translation of the xbuddy_label into the real Google Storage
# image name.
command = ('cd ~/trunk/src/third_party/toolchain-utils/crosperf; '
"python translate_xbuddy.py '%s'" % xbuddy_label)
- _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(
- chromeos_root, command)
+ _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(chromeos_root,
+ command)
if not build_id_tuple_str:
raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
@@ -65,15 +63,15 @@ class ImageDownloader(object):
downloaded_image_name = os.path.join(download_path,
'chromiumos_test_image.tar.xz')
if status != 0 or not os.path.exists(downloaded_image_name):
- raise MissingImage('Cannot download image: %s.'
- % downloaded_image_name)
+ raise MissingImage('Cannot download image: %s.' % downloaded_image_name)
return image_path
def UncompressImage(self, chromeos_root, build_id):
# Check to see if the file has already been uncompresssed, etc.
- if os.path.exists(os.path.join(chromeos_root, 'chroot/tmp', build_id,
- 'chromiumos_test_image.bin')):
+ if os.path.exists(
+ os.path.join(chromeos_root, 'chroot/tmp', build_id,
+ 'chromiumos_test_image.bin')):
return
# Uncompress and untar the downloaded image.
diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py
index 912673c7..06ea5bb7 100755
--- a/crosperf/download_images_unittest.py
+++ b/crosperf/download_images_unittest.py
@@ -38,8 +38,8 @@ class ImageDownloaderTestcast(unittest.TestCase):
image_path = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz'
% test_build_id)
- downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER,
- cmd_exec=mock_cmd_exec)
+ downloader = download_images.ImageDownloader(
+ logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec)
# Set os.path.exists to always return False and run downloader
mock_path_exists.return_value = False
@@ -97,8 +97,8 @@ class ImageDownloaderTestcast(unittest.TestCase):
test_chroot = '/usr/local/home/chromeos'
test_build_id = 'lumpy-release/R36-5814.0.0'
- downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER,
- cmd_exec=mock_cmd_exec)
+ downloader = download_images.ImageDownloader(
+ logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec)
# Set os.path.exists to always return False and run uncompress.
mock_path_exists.return_value = False
diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py
index 87c6f203..fe22bec8 100644
--- a/crosperf/experiment_file.py
+++ b/crosperf/experiment_file.py
@@ -1,15 +1,14 @@
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The experiment file module. It manages the input file of crosperf."""
-
from __future__ import print_function
import os.path
import re
from settings_factory import SettingsFactory
+
class ExperimentFile(object):
"""Class for parsing the experiment file format.
@@ -116,7 +115,7 @@ class ExperimentFile(object):
new_settings = self._ParseSettings(reader)
if new_settings.name in settings_names:
raise SyntaxError("Duplicate settings name: '%s'." %
- new_settings.name)
+ new_settings.name)
settings_names[new_settings.name] = True
self.all_settings.append(new_settings)
elif ExperimentFile._FIELD_VALUE_RE.match(line):
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index 2002baa8..b632336b 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -49,8 +49,9 @@ class MyMachineManager(machine_manager.MachineManager):
CHROMEOS_ROOT = '/tmp/chromeos-root'
MACHINE_NAMES = ['lumpy1', 'lumpy2', 'lumpy3', 'daisy1', 'daisy2']
LABEL_LUMPY = label.MockLabel('lumpy', 'lumpy_chromeos_image', CHROMEOS_ROOT,
- 'lumpy', ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'],
- '', '', False, 'average,'
+ 'lumpy',
+ ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'], '', '',
+ False, 'average,'
'gcc', None)
LABEL_MIX = label.MockLabel('mix', 'chromeos_image', CHROMEOS_ROOT, 'mix',
['daisy1', 'daisy2', 'lumpy3', 'lumpy4'], '', '',
@@ -83,9 +84,10 @@ class MachineManagerTest(unittest.TestCase):
def setUp(self, mock_isdir):
mock_isdir.return_value = True
- self.mm = machine_manager.MachineManager(
- '/usr/local/chromeos', 0, 'average', None, self.mock_cmd_exec,
- self.mock_logger)
+ self.mm = machine_manager.MachineManager('/usr/local/chromeos', 0,
+ 'average', None,
+ self.mock_cmd_exec,
+ self.mock_logger)
self.mock_lumpy1.name = 'lumpy1'
self.mock_lumpy2.name = 'lumpy2'
@@ -204,11 +206,11 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(image_call_args[0], 'python')
self.assertEqual(image_call_args[1].split('/')[-1], 'image_chromeos.pyc')
image_call_args = image_call_args[2:]
- self.assertEqual(image_call_args,
- ['--chromeos_root=/tmp/chromeos-root',
- '--image=lumpy_chromeos_image', '--image_args=',
- '--remote=lumpy1', '--logging_level=average',
- '--board=lumpy'])
+ self.assertEqual(image_call_args, [
+ '--chromeos_root=/tmp/chromeos-root', '--image=lumpy_chromeos_image',
+ '--image_args=', '--remote=lumpy1', '--logging_level=average',
+ '--board=lumpy'
+ ])
self.assertEqual(mock_run_croscmd.call_args[0][0], 'reboot && exit')
# Test 4: Everything works properly. Trybot image type.
@@ -343,8 +345,9 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(m, self.mock_lumpy1)
self.assertTrue(self.mock_lumpy1.locked)
self.assertEqual(mock_md5.call_count, 0)
- self.assertEqual(self.msgs, ['Tried to lock lumpy1', 'Tried to lock lumpy2',
- 'Tried to lock lumpy3'])
+ self.assertEqual(self.msgs, [
+ 'Tried to lock lumpy1', 'Tried to lock lumpy2', 'Tried to lock lumpy3'
+ ])
# Test the second return statment (machine is unlocked, has no checksum)
save_locked = self.mock_lumpy1.locked
@@ -380,24 +383,24 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(machine_list, self.mm._all_machines)
machine_list = self.mm.GetAvailableMachines(LABEL_MIX)
- self.assertEqual(machine_list, [self.mock_daisy1, self.mock_daisy2,
- self.mock_lumpy3])
+ self.assertEqual(machine_list,
+ [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3])
machine_list = self.mm.GetAvailableMachines(LABEL_LUMPY)
- self.assertEqual(machine_list, [self.mock_lumpy1, self.mock_lumpy2,
- self.mock_lumpy3])
+ self.assertEqual(machine_list,
+ [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3])
def test_get_machines(self):
machine_list = self.mm.GetMachines()
self.assertEqual(machine_list, self.mm._all_machines)
machine_list = self.mm.GetMachines(LABEL_MIX)
- self.assertEqual(machine_list, [self.mock_daisy1, self.mock_daisy2,
- self.mock_lumpy3])
+ self.assertEqual(machine_list,
+ [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3])
machine_list = self.mm.GetMachines(LABEL_LUMPY)
- self.assertEqual(machine_list, [self.mock_lumpy1, self.mock_lumpy2,
- self.mock_lumpy3])
+ self.assertEqual(machine_list,
+ [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3])
def test_release_machines(self):
@@ -442,19 +445,22 @@ class MachineManagerTest(unittest.TestCase):
mock_logger = mock.Mock(spec=logger.Logger)
- bench = Benchmark('page_cycler_v2.netsim.top_10', # name
- 'page_cycler_v2.netsim.top_10', # test_name
- '', # test_args
- 1, # iteratins
- False, # rm_chroot_tmp
- '', # perf_args
- suite='telemetry_Crosperf') # suite
+ bench = Benchmark(
+ 'page_cycler_v2.netsim.top_10', # name
+ 'page_cycler_v2.netsim.top_10', # test_name
+ '', # test_args
+ 1, # iteratins
+ False, # rm_chroot_tmp
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
test_run = MockBenchmarkRun('test run', bench, LABEL_LUMPY, 1, [], self.mm,
mock_logger, 'verbose', '')
- self.mm._machines = [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3,
- self.mock_daisy1, self.mock_daisy2]
+ self.mm._machines = [
+ self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3, self.mock_daisy1,
+ self.mock_daisy2
+ ]
self.mock_lumpy1.test_run = test_run
self.mock_lumpy2.test_run = test_run
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
index 8d427413..6092a981 100644
--- a/crosperf/mock_instance.py
+++ b/crosperf/mock_instance.py
@@ -40,84 +40,102 @@ benchmark2 = Benchmark('benchmark2', 'autotest_name_2', 'autotest_args', 2, '',
perf_args, '', '')
keyval = {}
-keyval[0] = {'': 'PASS',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'test{1}': '2',
- 'test{2}': '4',
- 'ms_1': '2.1',
- 'total': '5',
- 'bool': 'True'}
+keyval[0] = {
+ '': 'PASS',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'ms_1': '2.1',
+ 'total': '5',
+ 'bool': 'True'
+}
-keyval[1] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6',
- 'test{1}': '3',
- 'test{2}': '4',
- 'bool': 'FALSE'}
+keyval[1] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6',
+ 'test{1}': '3',
+ 'test{2}': '4',
+ 'bool': 'FALSE'
+}
-keyval[2] = {'': 'PASS',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6',
- 'test{1}': '5',
- 'test{2}': '4',
- 'bool': 'FALSE'}
+keyval[2] = {
+ '': 'PASS',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6',
+ 'test{1}': '5',
+ 'test{2}': '4',
+ 'bool': 'FALSE'
+}
-keyval[3] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '6',
- 'bool': 'FALSE'}
+keyval[3] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'FALSE'
+}
-keyval[4] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '6',
- 'bool': 'TRUE'}
+keyval[4] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'TRUE'
+}
-keyval[5] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '2',
- 'bool': 'TRUE'}
+keyval[5] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '2',
+ 'bool': 'TRUE'
+}
-keyval[6] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '4',
- 'bool': 'TRUE'}
+keyval[6] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'bool': 'TRUE'
+}
-keyval[7] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7',
- 'test{1}': '1',
- 'test{2}': '6',
- 'bool': 'TRUE'}
+keyval[7] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7',
+ 'test{1}': '1',
+ 'test{2}': '6',
+ 'bool': 'TRUE'
+}
-keyval[8] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '3.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '8',
- 'bool': 'TRUE'}
+keyval[8] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '3.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '8',
+ 'bool': 'TRUE'
+}
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 5a824b14..0d0dc990 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -3,7 +3,6 @@
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Module of result cache unittest."""
from __future__ import print_function
@@ -123,42 +122,44 @@ INFO : Test results:
INFO : Elapsed time: 0m18s
"""
-keyvals = {'': 'PASS',
- 'b_stdio_putcgetc__0_': '0.100005711667',
- 'b_string_strstr___azbycxdwevfugthsirjqkplomn__': '0.0133123556667',
- 'b_malloc_thread_local__0_': '0.01138439',
- 'b_string_strlen__0_': '0.044893587',
- 'b_malloc_sparse__0_': '0.015053784',
- 'b_string_memset__0_': '0.00275405066667',
- 'platform_LibCBench': 'PASS',
- 'b_pthread_uselesslock__0_': '0.0294113346667',
- 'b_string_strchr__0_': '0.00456903',
- 'b_pthread_create_serial1__0_': '0.0291785246667',
- 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__':
- '0.118360778',
- 'b_string_strstr___aaaaaaaaaaaaaacccccccccccc__': '0.0135694476667',
- 'b_pthread_createjoin_serial1__0_': '0.031907936',
- 'b_malloc_thread_stress__0_': '0.0367894733333',
- 'b_regex_search____a_b_c__d_b__': '0.00165455066667',
- 'b_malloc_bubble__0_': '0.015066374',
- 'b_malloc_big2__0_': '0.002951359',
- 'b_stdio_putcgetc_unlocked__0_': '0.0371443833333',
- 'b_pthread_createjoin_serial2__0_': '0.043485347',
- 'b_regex_search___a_25_b__': '0.0496191923333',
- 'b_utf8_bigbuf__0_': '0.0473772253333',
- 'b_malloc_big1__0_': '0.00375231466667',
- 'b_regex_compile____a_b_c__d_b__': '0.00529833933333',
- 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__': '0.068957325',
- 'b_malloc_tiny2__0_': '0.000581407333333',
- 'b_utf8_onebyone__0_': '0.130938538333',
- 'b_malloc_tiny1__0_': '0.000768474333333',
- 'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333'}
+keyvals = {
+ '': 'PASS',
+ 'b_stdio_putcgetc__0_': '0.100005711667',
+ 'b_string_strstr___azbycxdwevfugthsirjqkplomn__': '0.0133123556667',
+ 'b_malloc_thread_local__0_': '0.01138439',
+ 'b_string_strlen__0_': '0.044893587',
+ 'b_malloc_sparse__0_': '0.015053784',
+ 'b_string_memset__0_': '0.00275405066667',
+ 'platform_LibCBench': 'PASS',
+ 'b_pthread_uselesslock__0_': '0.0294113346667',
+ 'b_string_strchr__0_': '0.00456903',
+ 'b_pthread_create_serial1__0_': '0.0291785246667',
+ 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__': '0.118360778',
+ 'b_string_strstr___aaaaaaaaaaaaaacccccccccccc__': '0.0135694476667',
+ 'b_pthread_createjoin_serial1__0_': '0.031907936',
+ 'b_malloc_thread_stress__0_': '0.0367894733333',
+ 'b_regex_search____a_b_c__d_b__': '0.00165455066667',
+ 'b_malloc_bubble__0_': '0.015066374',
+ 'b_malloc_big2__0_': '0.002951359',
+ 'b_stdio_putcgetc_unlocked__0_': '0.0371443833333',
+ 'b_pthread_createjoin_serial2__0_': '0.043485347',
+ 'b_regex_search___a_25_b__': '0.0496191923333',
+ 'b_utf8_bigbuf__0_': '0.0473772253333',
+ 'b_malloc_big1__0_': '0.00375231466667',
+ 'b_regex_compile____a_b_c__d_b__': '0.00529833933333',
+ 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__': '0.068957325',
+ 'b_malloc_tiny2__0_': '0.000581407333333',
+ 'b_utf8_onebyone__0_': '0.130938538333',
+ 'b_malloc_tiny1__0_': '0.000768474333333',
+ 'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333'
+}
TMP_DIR1 = '/tmp/tmpAbcXyz'
class MockResult(Result):
"""Mock result class."""
+
def __init__(self, mylogger, label, logging_level, machine):
super(MockResult, self).__init__(mylogger, label, logging_level, machine)
@@ -253,10 +254,12 @@ class ResultTest(unittest.TestCase):
@mock.patch.object(Result, 'CopyFilesTo')
def test_copy_results_to(self, mockCopyFilesTo):
- perf_data_files = ['/tmp/perf.data.0', '/tmp/perf.data.1',
- '/tmp/perf.data.2']
- perf_report_files = ['/tmp/perf.report.0', '/tmp/perf.report.1',
- '/tmp/perf.report.2']
+ perf_data_files = [
+ '/tmp/perf.data.0', '/tmp/perf.data.1', '/tmp/perf.data.2'
+ ]
+ perf_report_files = [
+ '/tmp/perf.report.0', '/tmp/perf.report.1', '/tmp/perf.report.2'
+ ]
self.result.perf_data_files = perf_data_files
self.result.perf_report_files = perf_report_files
@@ -279,107 +282,114 @@ class ResultTest(unittest.TestCase):
self.result.GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles
kv_dict2, udict = self.result.GetNewKeyvals(kv_dict)
- self.assertEqual(kv_dict2,
- {u'Box2D__Box2D': 4775,
- u'Mandreel__Mandreel': 6620,
- u'Gameboy__Gameboy': 9901,
- u'Crypto__Crypto': 8737,
- u'telemetry_page_measurement_results__num_errored': 0,
- u'telemetry_page_measurement_results__num_failed': 0,
- u'PdfJS__PdfJS': 6455,
- u'Total__Score': 7918,
- u'EarleyBoyer__EarleyBoyer': 14340,
- u'MandreelLatency__MandreelLatency': 5188,
- u'CodeLoad__CodeLoad': 6271,
- u'DeltaBlue__DeltaBlue': 14401,
- u'Typescript__Typescript': 9815,
- u'SplayLatency__SplayLatency': 7653,
- u'zlib__zlib': 16094,
- u'Richards__Richards': 10358,
- u'RegExp__RegExp': 1765,
- u'NavierStokes__NavierStokes': 9815,
- u'Splay__Splay': 4425,
- u'RayTrace__RayTrace': 16600})
- self.assertEqual(
- udict, {u'Box2D__Box2D': u'score',
- u'Mandreel__Mandreel': u'score',
- u'Gameboy__Gameboy': u'score',
- u'Crypto__Crypto': u'score',
- u'telemetry_page_measurement_results__num_errored': u'count',
- u'telemetry_page_measurement_results__num_failed': u'count',
- u'PdfJS__PdfJS': u'score',
- u'Total__Score': u'score',
- u'EarleyBoyer__EarleyBoyer': u'score',
- u'MandreelLatency__MandreelLatency': u'score',
- u'CodeLoad__CodeLoad': u'score',
- u'DeltaBlue__DeltaBlue': u'score',
- u'Typescript__Typescript': u'score',
- u'SplayLatency__SplayLatency': u'score',
- u'zlib__zlib': u'score',
- u'Richards__Richards': u'score',
- u'RegExp__RegExp': u'score',
- u'NavierStokes__NavierStokes': u'score',
- u'Splay__Splay': u'score',
- u'RayTrace__RayTrace': u'score'})
+ self.assertEqual(kv_dict2, {
+ u'Box2D__Box2D': 4775,
+ u'Mandreel__Mandreel': 6620,
+ u'Gameboy__Gameboy': 9901,
+ u'Crypto__Crypto': 8737,
+ u'telemetry_page_measurement_results__num_errored': 0,
+ u'telemetry_page_measurement_results__num_failed': 0,
+ u'PdfJS__PdfJS': 6455,
+ u'Total__Score': 7918,
+ u'EarleyBoyer__EarleyBoyer': 14340,
+ u'MandreelLatency__MandreelLatency': 5188,
+ u'CodeLoad__CodeLoad': 6271,
+ u'DeltaBlue__DeltaBlue': 14401,
+ u'Typescript__Typescript': 9815,
+ u'SplayLatency__SplayLatency': 7653,
+ u'zlib__zlib': 16094,
+ u'Richards__Richards': 10358,
+ u'RegExp__RegExp': 1765,
+ u'NavierStokes__NavierStokes': 9815,
+ u'Splay__Splay': 4425,
+ u'RayTrace__RayTrace': 16600
+ })
+ self.assertEqual(udict, {
+ u'Box2D__Box2D': u'score',
+ u'Mandreel__Mandreel': u'score',
+ u'Gameboy__Gameboy': u'score',
+ u'Crypto__Crypto': u'score',
+ u'telemetry_page_measurement_results__num_errored': u'count',
+ u'telemetry_page_measurement_results__num_failed': u'count',
+ u'PdfJS__PdfJS': u'score',
+ u'Total__Score': u'score',
+ u'EarleyBoyer__EarleyBoyer': u'score',
+ u'MandreelLatency__MandreelLatency': u'score',
+ u'CodeLoad__CodeLoad': u'score',
+ u'DeltaBlue__DeltaBlue': u'score',
+ u'Typescript__Typescript': u'score',
+ u'SplayLatency__SplayLatency': u'score',
+ u'zlib__zlib': u'score',
+ u'Richards__Richards': u'score',
+ u'RegExp__RegExp': u'score',
+ u'NavierStokes__NavierStokes': u'score',
+ u'Splay__Splay': u'score',
+ u'RayTrace__RayTrace': u'score'
+ })
def test_append_telemetry_units(self):
- kv_dict = {u'Box2D__Box2D': 4775,
- u'Mandreel__Mandreel': 6620,
- u'Gameboy__Gameboy': 9901,
- u'Crypto__Crypto': 8737,
- u'PdfJS__PdfJS': 6455,
- u'Total__Score': 7918,
- u'EarleyBoyer__EarleyBoyer': 14340,
- u'MandreelLatency__MandreelLatency': 5188,
- u'CodeLoad__CodeLoad': 6271,
- u'DeltaBlue__DeltaBlue': 14401,
- u'Typescript__Typescript': 9815,
- u'SplayLatency__SplayLatency': 7653,
- u'zlib__zlib': 16094,
- u'Richards__Richards': 10358,
- u'RegExp__RegExp': 1765,
- u'NavierStokes__NavierStokes': 9815,
- u'Splay__Splay': 4425,
- u'RayTrace__RayTrace': 16600}
- units_dict = {u'Box2D__Box2D': u'score',
- u'Mandreel__Mandreel': u'score',
- u'Gameboy__Gameboy': u'score',
- u'Crypto__Crypto': u'score',
- u'PdfJS__PdfJS': u'score',
- u'Total__Score': u'score',
- u'EarleyBoyer__EarleyBoyer': u'score',
- u'MandreelLatency__MandreelLatency': u'score',
- u'CodeLoad__CodeLoad': u'score',
- u'DeltaBlue__DeltaBlue': u'score',
- u'Typescript__Typescript': u'score',
- u'SplayLatency__SplayLatency': u'score',
- u'zlib__zlib': u'score',
- u'Richards__Richards': u'score',
- u'RegExp__RegExp': u'score',
- u'NavierStokes__NavierStokes': u'score',
- u'Splay__Splay': u'score',
- u'RayTrace__RayTrace': u'score'}
+ kv_dict = {
+ u'Box2D__Box2D': 4775,
+ u'Mandreel__Mandreel': 6620,
+ u'Gameboy__Gameboy': 9901,
+ u'Crypto__Crypto': 8737,
+ u'PdfJS__PdfJS': 6455,
+ u'Total__Score': 7918,
+ u'EarleyBoyer__EarleyBoyer': 14340,
+ u'MandreelLatency__MandreelLatency': 5188,
+ u'CodeLoad__CodeLoad': 6271,
+ u'DeltaBlue__DeltaBlue': 14401,
+ u'Typescript__Typescript': 9815,
+ u'SplayLatency__SplayLatency': 7653,
+ u'zlib__zlib': 16094,
+ u'Richards__Richards': 10358,
+ u'RegExp__RegExp': 1765,
+ u'NavierStokes__NavierStokes': 9815,
+ u'Splay__Splay': 4425,
+ u'RayTrace__RayTrace': 16600
+ }
+ units_dict = {
+ u'Box2D__Box2D': u'score',
+ u'Mandreel__Mandreel': u'score',
+ u'Gameboy__Gameboy': u'score',
+ u'Crypto__Crypto': u'score',
+ u'PdfJS__PdfJS': u'score',
+ u'Total__Score': u'score',
+ u'EarleyBoyer__EarleyBoyer': u'score',
+ u'MandreelLatency__MandreelLatency': u'score',
+ u'CodeLoad__CodeLoad': u'score',
+ u'DeltaBlue__DeltaBlue': u'score',
+ u'Typescript__Typescript': u'score',
+ u'SplayLatency__SplayLatency': u'score',
+ u'zlib__zlib': u'score',
+ u'Richards__Richards': u'score',
+ u'RegExp__RegExp': u'score',
+ u'NavierStokes__NavierStokes': u'score',
+ u'Splay__Splay': u'score',
+ u'RayTrace__RayTrace': u'score'
+ }
results_dict = self.result.AppendTelemetryUnits(kv_dict, units_dict)
- self.assertEqual(results_dict,
- {u'Box2D__Box2D': [4775, u'score'],
- u'Splay__Splay': [4425, u'score'],
- u'Gameboy__Gameboy': [9901, u'score'],
- u'Crypto__Crypto': [8737, u'score'],
- u'PdfJS__PdfJS': [6455, u'score'],
- u'Total__Score': [7918, u'score'],
- u'EarleyBoyer__EarleyBoyer': [14340, u'score'],
- u'MandreelLatency__MandreelLatency': [5188, u'score'],
- u'DeltaBlue__DeltaBlue': [14401, u'score'],
- u'SplayLatency__SplayLatency': [7653, u'score'],
- u'Mandreel__Mandreel': [6620, u'score'],
- u'Richards__Richards': [10358, u'score'],
- u'zlib__zlib': [16094, u'score'],
- u'CodeLoad__CodeLoad': [6271, u'score'],
- u'Typescript__Typescript': [9815, u'score'],
- u'RegExp__RegExp': [1765, u'score'],
- u'RayTrace__RayTrace': [16600, u'score'],
- u'NavierStokes__NavierStokes': [9815, u'score']})
+ self.assertEqual(results_dict, {
+ u'Box2D__Box2D': [4775, u'score'],
+ u'Splay__Splay': [4425, u'score'],
+ u'Gameboy__Gameboy': [9901, u'score'],
+ u'Crypto__Crypto': [8737, u'score'],
+ u'PdfJS__PdfJS': [6455, u'score'],
+ u'Total__Score': [7918, u'score'],
+ u'EarleyBoyer__EarleyBoyer': [14340, u'score'],
+ u'MandreelLatency__MandreelLatency': [5188, u'score'],
+ u'DeltaBlue__DeltaBlue': [14401, u'score'],
+ u'SplayLatency__SplayLatency': [7653, u'score'],
+ u'Mandreel__Mandreel': [6620, u'score'],
+ u'Richards__Richards': [10358, u'score'],
+ u'zlib__zlib': [16094, u'score'],
+ u'CodeLoad__CodeLoad': [6271, u'score'],
+ u'Typescript__Typescript': [9815, u'score'],
+ u'RegExp__RegExp': [1765, u'score'],
+ u'RayTrace__RayTrace': [16600, u'score'],
+ u'NavierStokes__NavierStokes': [9815, u'score']
+ })
@mock.patch.object(misc, 'GetInsideChrootPath')
@mock.patch.object(tempfile, 'mkdtemp')
@@ -408,9 +418,9 @@ class ResultTest(unittest.TestCase):
return return_kvdict, return_udict
mock_mkdtemp.return_value = TMP_DIR1
- mock_chrootruncmd.return_value = ['',
- ('%s,PASS\n%s/telemetry_Crosperf,PASS\n')
- % (TMP_DIR1, TMP_DIR1), '']
+ mock_chrootruncmd.return_value = [
+ '', ('%s,PASS\n%s/telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1), ''
+ ]
mock_getpath.return_value = TMP_DIR1
self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
self.result.ce.RunCommand = mock_runcmd
@@ -434,9 +444,10 @@ class ResultTest(unittest.TestCase):
# Test 2. self.temp_dir
reset()
- mock_chrootruncmd.return_value = ['',
- ('/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/'
- 'telemetry_Crosperf,PASS\n'), '']
+ mock_chrootruncmd.return_value = [
+ '', ('/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/'
+ 'telemetry_Crosperf,PASS\n'), ''
+ ]
mock_getpath.return_value = '/tmp/tmpJCajRG'
self.result.temp_dir = '/tmp/tmpJCajRG'
res = self.result.GetKeyvals()
@@ -579,8 +590,7 @@ class ResultTest(unittest.TestCase):
self.result.GeneratePerfReportFiles = FakeGetPerfReportFiles
self.result.ProcessResults = FakeProcessResults
- self.result.PopulateFromRun(OUTPUT, '', 0, 'test',
- 'telemetry_Crosperf')
+ self.result.PopulateFromRun(OUTPUT, '', 0, 'test', 'telemetry_Crosperf')
self.assertTrue(self.callGetResultsDir)
self.assertTrue(self.callGetResultsFile)
self.assertTrue(self.callGetPerfDataFiles)
@@ -607,8 +617,7 @@ class ResultTest(unittest.TestCase):
self.result.ProcessResults()
self.assertTrue(self.callGatherPerfResults)
self.assertEqual(len(self.result.keyvals), 2)
- self.assertEqual(self.result.keyvals, {'Total': 10,
- 'retval': 0})
+ self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 0})
self.result.retval = 1
self.result.ProcessResults()
@@ -630,9 +639,10 @@ class ResultTest(unittest.TestCase):
cache_dir = os.path.join(current_path, 'test_cache/test_input')
self.result.ce = command_executer.GetCommandExecuter(log_level='average')
self.result.ce.ChrootRunCommandWOutput = mock_runchrootcmd
- mock_runchrootcmd.return_value = ['',
- ('%s,PASS\n%s/\telemetry_Crosperf,PASS\n')
- % (TMP_DIR1, TMP_DIR1), '']
+ mock_runchrootcmd.return_value = [
+ '', ('%s,PASS\n%s/\telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1),
+ ''
+ ]
mock_getpath.return_value = TMP_DIR1
self.tmpdir = tempfile.mkdtemp()
save_real_mkdtemp = tempfile.mkdtemp
@@ -640,39 +650,39 @@ class ResultTest(unittest.TestCase):
self.result.PopulateFromCacheDir(cache_dir, 'sunspider',
'telemetry_Crosperf')
- self.assertEqual(
- self.result.keyvals,
- {u'Total__Total': [444.0, u'ms'],
- u'regexp-dna__regexp-dna': [16.2, u'ms'],
- u'telemetry_page_measurement_results__num_failed': [0, u'count'],
- u'telemetry_page_measurement_results__num_errored': [0, u'count'],
- u'string-fasta__string-fasta': [23.2, u'ms'],
- u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
- u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': [3.2, u'ms'],
- u'access-nsieve__access-nsieve': [7.9, u'ms'],
- u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
- u'string-validate-input__string-validate-input': [19.3, u'ms'],
- u'3d-raytrace__3d-raytrace': [24.7, u'ms'],
- u'3d-cube__3d-cube': [28.0, u'ms'],
- u'string-unpack-code__string-unpack-code': [46.7, u'ms'],
- u'date-format-tofte__date-format-tofte': [26.3, u'ms'],
- u'math-partial-sums__math-partial-sums': [22.0, u'ms'],
- '\telemetry_Crosperf': ['PASS', ''],
- u'crypto-aes__crypto-aes': [15.2, u'ms'],
- u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'],
- u'crypto-md5__crypto-md5': [10.5, u'ms'],
- u'string-tagcloud__string-tagcloud': [52.8, u'ms'],
- u'access-nbody__access-nbody': [8.5, u'ms'],
- 'retval': 0,
- u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'],
- u'math-cordic__math-cordic': [8.7, u'ms'],
- u'access-binary-trees__access-binary-trees': [4.5, u'ms'],
- u'controlflow-recursive__controlflow-recursive': [4.4, u'ms'],
- u'access-fannkuch__access-fannkuch': [17.8, u'ms'],
- u'string-base64__string-base64': [16.0, u'ms'],
- u'date-format-xparb__date-format-xparb': [20.9, u'ms'],
- u'3d-morph__3d-morph': [22.1, u'ms'],
- u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms']})
+ self.assertEqual(self.result.keyvals, {
+ u'Total__Total': [444.0, u'ms'],
+ u'regexp-dna__regexp-dna': [16.2, u'ms'],
+ u'telemetry_page_measurement_results__num_failed': [0, u'count'],
+ u'telemetry_page_measurement_results__num_errored': [0, u'count'],
+ u'string-fasta__string-fasta': [23.2, u'ms'],
+ u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
+ u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': [3.2, u'ms'],
+ u'access-nsieve__access-nsieve': [7.9, u'ms'],
+ u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
+ u'string-validate-input__string-validate-input': [19.3, u'ms'],
+ u'3d-raytrace__3d-raytrace': [24.7, u'ms'],
+ u'3d-cube__3d-cube': [28.0, u'ms'],
+ u'string-unpack-code__string-unpack-code': [46.7, u'ms'],
+ u'date-format-tofte__date-format-tofte': [26.3, u'ms'],
+ u'math-partial-sums__math-partial-sums': [22.0, u'ms'],
+ '\telemetry_Crosperf': ['PASS', ''],
+ u'crypto-aes__crypto-aes': [15.2, u'ms'],
+ u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'],
+ u'crypto-md5__crypto-md5': [10.5, u'ms'],
+ u'string-tagcloud__string-tagcloud': [52.8, u'ms'],
+ u'access-nbody__access-nbody': [8.5, u'ms'],
+ 'retval': 0,
+ u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'],
+ u'math-cordic__math-cordic': [8.7, u'ms'],
+ u'access-binary-trees__access-binary-trees': [4.5, u'ms'],
+ u'controlflow-recursive__controlflow-recursive': [4.4, u'ms'],
+ u'access-fannkuch__access-fannkuch': [17.8, u'ms'],
+ u'string-base64__string-base64': [16.0, u'ms'],
+ u'date-format-xparb__date-format-xparb': [20.9, u'ms'],
+ u'3d-morph__3d-morph': [22.1, u'ms'],
+ u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms']
+ })
# Clean up after test.
tempfile.mkdtemp = save_real_mkdtemp
@@ -694,8 +704,7 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_runcmd.call_count, 2)
self.assertEqual(mock_runcmd.call_args_list[0][0],
('rm -rf test_results_dir',))
- self.assertEqual(mock_runcmd.call_args_list[1][0],
- ('rm -rf testtemp_dir',))
+ self.assertEqual(mock_runcmd.call_args_list[1][0], ('rm -rf testtemp_dir',))
# Test 2. Same, except ath results_dir name does not contain
# 'test_that_results_'
@@ -710,8 +719,7 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_runcmd.call_count, 2)
self.assertEqual(mock_runcmd.call_args_list[0][0],
('rm -rf /tmp/tmp_AbcXyz',))
- self.assertEqual(mock_runcmd.call_args_list[1][0],
- ('rm -rf testtemp_dir',))
+ self.assertEqual(mock_runcmd.call_args_list[1][0], ('rm -rf testtemp_dir',))
# Test 3. mock_getroot returns nothing; 'rm_chroot_tmp' is False.
mock_getroot.reset_mock()
@@ -719,8 +727,7 @@ class ResultTest(unittest.TestCase):
self.result.CleanUp(False)
self.assertEqual(mock_getroot.call_count, 0)
self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('rm -rf testtemp_dir',))
+ self.assertEqual(mock_runcmd.call_args_list[0][0], ('rm -rf testtemp_dir',))
# Test 4. 'rm_chroot_tmp' is True, but result_dir & temp_dir are None.
mock_getroot.reset_mock()
@@ -868,7 +875,8 @@ TELEMETRY_RESULT_KEYVALS = {
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'bitops-3bit-bits-in-byte (ms)':
'3.5',
- 'retval': 0,
+ 'retval':
+ 0,
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-unpack-code (ms)':
'45.0'
@@ -888,9 +896,9 @@ class TelemetryResultTest(unittest.TestCase):
self.result = None
self.mock_logger = mock.Mock(spec=logger.Logger)
self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- self.mock_label = MockLabel('mock_label', 'chromeos_image', '/tmp',
- 'lumpy', 'remote', 'image_args', 'cache_dir',
- 'average', 'gcc', None)
+ self.mock_label = MockLabel('mock_label', 'chromeos_image', '/tmp', 'lumpy',
+ 'remote', 'image_args', 'cache_dir', 'average',
+ 'gcc', None)
self.mock_machine = machine_manager.MockCrosMachine('falco.cros',
'/tmp/chromeos',
'average')
@@ -946,24 +954,24 @@ class ResultsCacheTest(unittest.TestCase):
'average', '')
mock_mm.machine_checksum_string['mock_label'] = 'fake_machine_checksum123'
- self.results_cache.Init(self.mock_label.chromeos_image,
- self.mock_label.chromeos_root,
- 'sunspider',
- 1, # benchmark_run.iteration,
- '', # benchmark_run.test_args,
- '', # benchmark_run.profiler_args,
- mock_mm,
- mock_machine,
- self.mock_label.board,
- [CacheConditions.CACHE_FILE_EXISTS,
- CacheConditions.CHECKSUMS_MATCH],
- self.mock_logger,
- 'average',
- self.mock_label,
- '', # benchmark_run.share_cache
- 'telemetry_Crosperf',
- True, # benchmark_run.show_all_results
- False) # benchmark_run.run_local
+ self.results_cache.Init(
+ self.mock_label.chromeos_image,
+ self.mock_label.chromeos_root,
+ 'sunspider',
+ 1, # benchmark_run.iteration,
+ '', # benchmark_run.test_args,
+ '', # benchmark_run.profiler_args,
+ mock_mm,
+ mock_machine,
+ self.mock_label.board,
+ [CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH],
+ self.mock_logger,
+ 'average',
+ self.mock_label,
+ '', # benchmark_run.share_cache
+ 'telemetry_Crosperf',
+ True, # benchmark_run.show_all_results
+ False) # benchmark_run.run_local
@mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
def test_get_cache_dir_for_write(self, mock_checksum):
@@ -971,10 +979,12 @@ class ResultsCacheTest(unittest.TestCase):
def FakeGetMachines(label):
if label:
pass
- m1 = machine_manager.MockCrosMachine(
- 'lumpy1.cros', self.results_cache.chromeos_root, 'average')
- m2 = machine_manager.MockCrosMachine(
- 'lumpy2.cros', self.results_cache.chromeos_root, 'average')
+ m1 = machine_manager.MockCrosMachine('lumpy1.cros',
+ self.results_cache.chromeos_root,
+ 'average')
+ m2 = machine_manager.MockCrosMachine('lumpy2.cros',
+ self.results_cache.chromeos_root,
+ 'average')
return [m1, m2]
mock_checksum.return_value = 'FakeImageChecksumabc123'
@@ -1016,10 +1026,12 @@ class ResultsCacheTest(unittest.TestCase):
def FakeGetMachines(label):
if label:
pass
- m1 = machine_manager.MockCrosMachine(
- 'lumpy1.cros', self.results_cache.chromeos_root, 'average')
- m2 = machine_manager.MockCrosMachine(
- 'lumpy2.cros', self.results_cache.chromeos_root, 'average')
+ m1 = machine_manager.MockCrosMachine('lumpy1.cros',
+ self.results_cache.chromeos_root,
+ 'average')
+ m2 = machine_manager.MockCrosMachine('lumpy2.cros',
+ self.results_cache.chromeos_root,
+ 'average')
return [m1, m2]
mock_checksum.return_value = 'FakeImageChecksumabc123'
@@ -1104,8 +1116,9 @@ class ResultsCacheTest(unittest.TestCase):
self.results_cache.GetCacheDirForWrite = FakeGetCacheDirForWrite
mock_isdir.return_value = True
- save_cc = [CacheConditions.CACHE_FILE_EXISTS,
- CacheConditions.CHECKSUMS_MATCH]
+ save_cc = [
+ CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
+ ]
self.results_cache.cache_conditions.append(CacheConditions.FALSE)
# Test 1. CacheCondition.FALSE, which means do not read from the cache.
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 372c7ff3..3bda6139 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -1,4 +1,3 @@
-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -18,29 +17,38 @@ class BenchmarkSettings(Settings):
def __init__(self, name):
super(BenchmarkSettings, self).__init__(name, 'benchmark')
- self.AddField(TextField('test_name',
- description='The name of the test to run. '
- 'Defaults to the name of the benchmark.'))
- self.AddField(TextField('test_args',
- description='Arguments to be passed to the '
- 'test.'))
- self.AddField(IntegerField('iterations',
- default=1,
- description='Number of iterations to run the '
- 'test.'))
- self.AddField(TextField('suite',
- default='',
- description='The type of the benchmark.'))
- self.AddField(IntegerField('retries',
- default=0,
- description='Number of times to retry a '
- 'benchmark run.'))
- self.AddField(BooleanField('run_local',
- description='Run benchmark harness on the DUT. '
- 'Currently only compatible with the suite: '
- 'telemetry_Crosperf.',
- required=False,
- default=True))
+ self.AddField(
+ TextField(
+ 'test_name',
+ description='The name of the test to run. '
+ 'Defaults to the name of the benchmark.'))
+ self.AddField(
+ TextField(
+ 'test_args', description='Arguments to be passed to the '
+ 'test.'))
+ self.AddField(
+ IntegerField(
+ 'iterations',
+ default=1,
+ description='Number of iterations to run the '
+ 'test.'))
+ self.AddField(
+ TextField(
+ 'suite', default='', description='The type of the benchmark.'))
+ self.AddField(
+ IntegerField(
+ 'retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
+ self.AddField(
+ BooleanField(
+ 'run_local',
+ description='Run benchmark harness on the DUT. '
+ 'Currently only compatible with the suite: '
+ 'telemetry_Crosperf.',
+ required=False,
+ default=True))
class LabelSettings(Settings):
@@ -48,46 +56,61 @@ class LabelSettings(Settings):
def __init__(self, name):
super(LabelSettings, self).__init__(name, 'label')
- self.AddField(TextField('chromeos_image',
- required=False,
- description='The path to the image to run tests '
- 'on, for local/custom-built images. See the '
- "'build' option for official or trybot images."))
- self.AddField(TextField('chromeos_root',
- description='The path to a chromeos checkout which '
- 'contains a src/scripts directory. Defaults to '
- 'the chromeos checkout which contains the '
- 'chromeos_image.'))
- self.AddField(
- ListField('remote',
- description='A comma-separated list of IPs of chromeos'
- 'devices to run experiments on.'))
- self.AddField(TextField('image_args',
- required=False,
- default='',
- description='Extra arguments to pass to '
- 'image_chromeos.py.'))
- self.AddField(TextField('cache_dir',
- default='',
- description='The cache dir for this image.'))
- self.AddField(TextField('compiler',
- default='gcc',
- description='The compiler used to build the '
- 'ChromeOS image (gcc or llvm).'))
- self.AddField(TextField('chrome_src',
- description='The path to the source of chrome. '
- 'This is used to run telemetry benchmarks. '
- 'The default one is the src inside chroot.',
- required=False,
- default=''))
- self.AddField(TextField('build',
- description='The xbuddy specification for an '
- 'official or trybot image to use for tests. '
- "'/remote' is assumed, and the board is given "
- "elsewhere, so omit the '/remote/<board>/' xbuddy "
- 'prefix.',
- required=False,
- default=''))
+ self.AddField(
+ TextField(
+ 'chromeos_image',
+ required=False,
+ description='The path to the image to run tests '
+ 'on, for local/custom-built images. See the '
+ "'build' option for official or trybot images."))
+ self.AddField(
+ TextField(
+ 'chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ ListField(
+ 'remote',
+ description='A comma-separated list of IPs of chromeos'
+ 'devices to run experiments on.'))
+ self.AddField(
+ TextField(
+ 'image_args',
+ required=False,
+ default='',
+ description='Extra arguments to pass to '
+ 'image_chromeos.py.'))
+ self.AddField(
+ TextField(
+ 'cache_dir',
+ default='',
+ description='The cache dir for this image.'))
+ self.AddField(
+ TextField(
+ 'compiler',
+ default='gcc',
+ description='The compiler used to build the '
+ 'ChromeOS image (gcc or llvm).'))
+ self.AddField(
+ TextField(
+ 'chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(
+ TextField(
+ 'build',
+ description='The xbuddy specification for an '
+ 'official or trybot image to use for tests. '
+ "'/remote' is assumed, and the board is given "
+ "elsewhere, so omit the '/remote/<board>/' xbuddy "
+ 'prefix.',
+ required=False,
+ default=''))
class GlobalSettings(Settings):
@@ -95,116 +118,164 @@ class GlobalSettings(Settings):
def __init__(self, name):
super(GlobalSettings, self).__init__(name, 'global')
- self.AddField(TextField('name',
- description='The name of the experiment. Just an '
- 'identifier.'))
- self.AddField(TextField('board',
- description='The target board for running '
- 'experiments on, e.g. x86-alex.'))
- self.AddField(ListField('remote',
- description="A comma-separated list of IPs of "
- 'chromeos devices to run experiments on.'))
- self.AddField(BooleanField('rerun_if_failed',
- description='Whether to re-run failed test runs '
- 'or not.',
- default=False))
- self.AddField(BooleanField('rm_chroot_tmp',
- default=False,
- description='Whether to remove the test_that '
- 'result in the chroot.'))
- self.AddField(ListField('email',
- description='Space-separated list of email '
- 'addresses to send email to.'))
- self.AddField(BooleanField('rerun',
- description='Whether to ignore the cache and '
- 'for tests to be re-run.',
- default=False))
- self.AddField(BooleanField('same_specs',
- default=True,
- description='Ensure cached runs are run on the '
- 'same kind of devices which are specified as a '
- 'remote.'))
- self.AddField(BooleanField('same_machine',
- default=False,
- description='Ensure cached runs are run on the '
- 'same remote.'))
- self.AddField(BooleanField('use_file_locks',
- default=False,
- description='Whether to use the file locks '
- 'mechanism (deprecated) instead of the AFE '
- 'server lock mechanism.'))
- self.AddField(IntegerField('iterations',
- default=1,
- description='Number of iterations to run all '
- 'tests.'))
- self.AddField(TextField('chromeos_root',
- description='The path to a chromeos checkout which '
- 'contains a src/scripts directory. Defaults to '
- 'the chromeos checkout which contains the '
- 'chromeos_image.'))
- self.AddField(TextField('logging_level',
- default='average',
- description='The level of logging desired. '
- "Options are 'quiet', 'average', and 'verbose'."))
- self.AddField(IntegerField('acquire_timeout',
- default=0,
- description='Number of seconds to wait for '
- 'machine before exit if all the machines in '
- 'the experiment file are busy. Default is 0.'))
- self.AddField(TextField('perf_args',
- default='',
- description='The optional profile command. It '
- 'enables perf commands to record perforamance '
- 'related counters. It must start with perf '
- 'command record or stat followed by arguments.'))
- self.AddField(TextField('cache_dir',
- default='',
- description='The abs path of cache dir. '
- 'Default is /home/$(whoami)/cros_scratch.'))
- self.AddField(BooleanField('cache_only',
- default=False,
- description='Whether to use only cached '
- 'results (do not rerun failed tests).'))
- self.AddField(BooleanField('no_email',
- default=False,
- description='Whether to disable the email to '
- 'user after crosperf finishes.'))
- self.AddField(BooleanField('json_report',
- default=False,
- description='Whether to generate a json version '
- 'of the report, for archiving.'))
- self.AddField(BooleanField('show_all_results',
- default=False,
- description='When running Telemetry tests, '
- 'whether to all the results, instead of just '
- 'the default (summary) results.'))
- self.AddField(TextField('share_cache',
- default='',
- description='Path to alternate cache whose data '
- 'you want to use. It accepts multiple directories '
- 'separated by a ",".'))
- self.AddField(TextField('results_dir',
- default='',
- description='The results dir.'))
- self.AddField(TextField('locks_dir',
- default='',
- description='An alternate directory to use for '
- 'storing/checking machine locks. Using this field '
- 'automatically sets use_file_locks to True.\n'
- 'WARNING: If you use your own locks directory, '
- 'there is no guarantee that someone else might not '
- 'hold a lock on the same machine in a different '
- 'locks directory.'))
- self.AddField(TextField('chrome_src',
- description='The path to the source of chrome. '
- 'This is used to run telemetry benchmarks. '
- 'The default one is the src inside chroot.',
- required=False,
- default=''))
- self.AddField(IntegerField('retries',
- default=0,
- description='Number of times to retry a '
- 'benchmark run.'))
+ self.AddField(
+ TextField(
+ 'name',
+ description='The name of the experiment. Just an '
+ 'identifier.'))
+ self.AddField(
+ TextField(
+ 'board',
+ description='The target board for running '
+ 'experiments on, e.g. x86-alex.'))
+ self.AddField(
+ ListField(
+ 'remote',
+ description='A comma-separated list of IPs of '
+ 'chromeos devices to run experiments on.'))
+ self.AddField(
+ BooleanField(
+ 'rerun_if_failed',
+ description='Whether to re-run failed test runs '
+ 'or not.',
+ default=False))
+ self.AddField(
+ BooleanField(
+ 'rm_chroot_tmp',
+ default=False,
+ description='Whether to remove the test_that '
+ 'result in the chroot.'))
+ self.AddField(
+ ListField(
+ 'email',
+ description='Space-separated list of email '
+ 'addresses to send email to.'))
+ self.AddField(
+ BooleanField(
+ 'rerun',
+ description='Whether to ignore the cache and '
+ 'for tests to be re-run.',
+ default=False))
+ self.AddField(
+ BooleanField(
+ 'same_specs',
+ default=True,
+ description='Ensure cached runs are run on the '
+ 'same kind of devices which are specified as a '
+ 'remote.'))
+ self.AddField(
+ BooleanField(
+ 'same_machine',
+ default=False,
+ description='Ensure cached runs are run on the '
+ 'same remote.'))
+ self.AddField(
+ BooleanField(
+ 'use_file_locks',
+ default=False,
+ description='Whether to use the file locks '
+ 'mechanism (deprecated) instead of the AFE '
+ 'server lock mechanism.'))
+ self.AddField(
+ IntegerField(
+ 'iterations',
+ default=1,
+ description='Number of iterations to run all '
+ 'tests.'))
+ self.AddField(
+ TextField(
+ 'chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ TextField(
+ 'logging_level',
+ default='average',
+ description='The level of logging desired. '
+ "Options are 'quiet', 'average', and 'verbose'."))
+ self.AddField(
+ IntegerField(
+ 'acquire_timeout',
+ default=0,
+ description='Number of seconds to wait for '
+ 'machine before exit if all the machines in '
+ 'the experiment file are busy. Default is 0.'))
+ self.AddField(
+ TextField(
+ 'perf_args',
+ default='',
+ description='The optional profile command. It '
+ 'enables perf commands to record perforamance '
+ 'related counters. It must start with perf '
+ 'command record or stat followed by arguments.'))
+ self.AddField(
+ TextField(
+ 'cache_dir',
+ default='',
+ description='The abs path of cache dir. '
+ 'Default is /home/$(whoami)/cros_scratch.'))
+ self.AddField(
+ BooleanField(
+ 'cache_only',
+ default=False,
+ description='Whether to use only cached '
+ 'results (do not rerun failed tests).'))
+ self.AddField(
+ BooleanField(
+ 'no_email',
+ default=False,
+ description='Whether to disable the email to '
+ 'user after crosperf finishes.'))
+ self.AddField(
+ BooleanField(
+ 'json_report',
+ default=False,
+ description='Whether to generate a json version '
+ 'of the report, for archiving.'))
+ self.AddField(
+ BooleanField(
+ 'show_all_results',
+ default=False,
+ description='When running Telemetry tests, '
+ 'whether to all the results, instead of just '
+ 'the default (summary) results.'))
+ self.AddField(
+ TextField(
+ 'share_cache',
+ default='',
+ description='Path to alternate cache whose data '
+ 'you want to use. It accepts multiple directories '
+ 'separated by a ",".'))
+ self.AddField(
+ TextField(
+ 'results_dir', default='', description='The results dir.'))
+ self.AddField(
+ TextField(
+ 'locks_dir',
+ default='',
+ description='An alternate directory to use for '
+ 'storing/checking machine locks. Using this field '
+ 'automatically sets use_file_locks to True.\n'
+ 'WARNING: If you use your own locks directory, '
+ 'there is no guarantee that someone else might not '
+ 'hold a lock on the same machine in a different '
+ 'locks directory.'))
+ self.AddField(
+ TextField(
+ 'chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(
+ IntegerField(
+ 'retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
class SettingsFactory(object):
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index df81ff2d..9824a085 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -53,8 +53,7 @@ class SuiteRunner(object):
self._logger = logger_to_use
self.log_level = log_level
self._ce = cmd_exec or command_executer.GetCommandExecuter(
- self._logger,
- log_level=self.log_level)
+ self._logger, log_level=self.log_level)
self._ct = cmd_term or command_executer.CommandTerminator()
def Run(self, machine, label, benchmark, test_args, profiler_args):
@@ -91,9 +90,7 @@ class SuiteRunner(object):
' cat scaling_max_freq ; '
'fi')
ret, freqs_str, _ = self._ce.CrosRunCommandWOutput(
- get_avail_freqs,
- machine=machine_name,
- chromeos_root=chromeos_root)
+ get_avail_freqs, machine=machine_name, chromeos_root=chromeos_root)
self._logger.LogFatalIf(ret, 'Could not get available frequencies '
'from machine: %s' % machine_name)
freqs = freqs_str.split()
@@ -116,45 +113,44 @@ class SuiteRunner(object):
highest_freq = self.GetHighestStaticFrequency(machine_name, chromeos_root)
BASH_FOR = 'for f in {list}; do {body}; done'
CPUFREQ_DIRS = '/sys/devices/system/cpu/cpu*/cpufreq/'
- change_max_freq = BASH_FOR.format(list=CPUFREQ_DIRS + 'scaling_max_freq',
- body='echo %s > $f' % highest_freq)
- change_min_freq = BASH_FOR.format(list=CPUFREQ_DIRS + 'scaling_min_freq',
- body='echo %s > $f' % highest_freq)
- change_perf_gov = BASH_FOR.format(list=CPUFREQ_DIRS + 'scaling_governor',
- body='echo performance > $f')
+ change_max_freq = BASH_FOR.format(
+ list=CPUFREQ_DIRS + 'scaling_max_freq',
+ body='echo %s > $f' % highest_freq)
+ change_min_freq = BASH_FOR.format(
+ list=CPUFREQ_DIRS + 'scaling_min_freq',
+ body='echo %s > $f' % highest_freq)
+ change_perf_gov = BASH_FOR.format(
+ list=CPUFREQ_DIRS + 'scaling_governor', body='echo performance > $f')
if self.log_level == 'average':
self._logger.LogOutput('Pinning governor execution frequencies for %s' %
machine_name)
- ret = self._ce.CrosRunCommand(' && '.join((
- 'set -e ', change_max_freq, change_min_freq, change_perf_gov)),
- machine=machine_name,
- chromeos_root=chromeos_root)
+ ret = self._ce.CrosRunCommand(
+ ' && '.join(('set -e ', change_max_freq, change_min_freq,
+ change_perf_gov)),
+ machine=machine_name,
+ chromeos_root=chromeos_root)
self._logger.LogFatalIf(ret, 'Could not pin frequencies on machine: %s' %
machine_name)
def DecreaseWaitTime(self, machine_name, chromeos_root):
"""Change the ten seconds wait time for pagecycler to two seconds."""
FILE = '/usr/local/telemetry/src/tools/perf/page_sets/page_cycler_story.py'
- ret = self._ce.CrosRunCommand('ls ' + FILE,
- machine=machine_name,
- chromeos_root=chromeos_root)
+ ret = self._ce.CrosRunCommand(
+ 'ls ' + FILE, machine=machine_name, chromeos_root=chromeos_root)
self._logger.LogFatalIf(ret, 'Could not find {} on machine: {}'.format(
FILE, machine_name))
if not ret:
sed_command = 'sed -i "s/_TTI_WAIT_TIME = 10/_TTI_WAIT_TIME = 2/g" '
- ret = self._ce.CrosRunCommand(sed_command + FILE,
- machine=machine_name,
- chromeos_root=chromeos_root)
+ ret = self._ce.CrosRunCommand(
+ sed_command + FILE, machine=machine_name, chromeos_root=chromeos_root)
self._logger.LogFatalIf(ret, 'Could not modify {} on machine: {}'.format(
FILE, machine_name))
-
def RebootMachine(self, machine_name, chromeos_root):
command = 'reboot && exit'
- self._ce.CrosRunCommand(command,
- machine=machine_name,
- chromeos_root=chromeos_root)
+ self._ce.CrosRunCommand(
+ command, machine=machine_name, chromeos_root=chromeos_root)
time.sleep(60)
# Whenever we reboot the machine, we need to restore the governor settings.
self.PinGovernorExecutionFrequencies(machine_name, chromeos_root)
@@ -169,9 +165,8 @@ class SuiteRunner(object):
if profiler_args:
self._logger.LogFatal('test_that does not support profiler.')
command = 'rm -rf /usr/local/autotest/results/*'
- self._ce.CrosRunCommand(command,
- machine=machine,
- chromeos_root=label.chromeos_root)
+ self._ce.CrosRunCommand(
+ command, machine=machine, chromeos_root=label.chromeos_root)
# We do this because some tests leave the machine in weird states.
# Rebooting between iterations has proven to help with this.
@@ -186,10 +181,11 @@ class SuiteRunner(object):
# Use --no-ns-pid so that cros_sdk does not create a different
# process namespace and we can kill process created easily by
# their process group.
- return self._ce.ChrootRunCommandWOutput(label.chromeos_root,
- command,
- command_terminator=self._ct,
- cros_sdk_options='--no-ns-pid')
+ return self._ce.ChrootRunCommandWOutput(
+ label.chromeos_root,
+ command,
+ command_terminator=self._ct,
+ cros_sdk_options='--no-ns-pid')
def RemoveTelemetryTempFile(self, machine, chromeos_root):
filename = 'telemetry@%s' % machine
@@ -227,10 +223,11 @@ class SuiteRunner(object):
args_string = "test_args='%s'" % test_args
cmd = ('{} {} {} --board={} --args="{} run_local={} test={} '
- '{}" {} telemetry_Crosperf'.format(
- TEST_THAT_PATH, autotest_dir_arg, fast_arg, label.board,
- args_string, benchmark.run_local, benchmark.test_name,
- profiler_args, machine))
+ '{}" {} telemetry_Crosperf'.format(TEST_THAT_PATH, autotest_dir_arg,
+ fast_arg, label.board,
+ args_string, benchmark.run_local,
+ benchmark.test_name,
+ profiler_args, machine))
# Use --no-ns-pid so that cros_sdk does not create a different
# process namespace and we can kill process created easily by their
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index 176ef5cf..0b184934 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -31,30 +31,33 @@ class SuiteRunnerTest(unittest.TestCase):
mock_label = label.MockLabel('lumpy', 'lumpy_chromeos_image', '/tmp/chromeos',
'lumpy', ['lumpy1.cros', 'lumpy.cros2'], '', '',
False, 'average', 'gcc', '')
- telemetry_crosperf_bench = Benchmark('b1_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry_Crosperf', # suite
- True) # show_all_results
-
- test_that_bench = Benchmark('b2_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles') # perf_args
-
- telemetry_bench = Benchmark('b3_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry', # suite
- False) # show_all_results
+ telemetry_crosperf_bench = Benchmark(
+ 'b1_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry_Crosperf', # suite
+ True) # show_all_results
+
+ test_that_bench = Benchmark(
+ 'b2_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles') # perf_args
+
+ telemetry_bench = Benchmark(
+ 'b3_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry', # suite
+ False) # show_all_results
def __init__(self, *args, **kwargs):
super(SuiteRunnerTest, self).__init__(*args, **kwargs)
@@ -68,8 +71,9 @@ class SuiteRunnerTest(unittest.TestCase):
self.call_telemetry_run = False
def setUp(self):
- self.runner = suite_runner.SuiteRunner(
- self.mock_logger, 'verbose', self.mock_cmd_exec, self.mock_cmd_term)
+ self.runner = suite_runner.SuiteRunner(self.mock_logger, 'verbose',
+ self.mock_cmd_exec,
+ self.mock_cmd_term)
def test_get_profiler_args(self):
input_str = ('--profiler=custom_perf --profiler_args=\'perf_options'
@@ -102,16 +106,17 @@ class SuiteRunnerTest(unittest.TestCase):
def FakeTelemetryCrosperfRun(machine, test_label, benchmark, test_args,
profiler_args):
- self.telemetry_crosperf_args = [machine, test_label, benchmark, test_args,
- profiler_args]
+ self.telemetry_crosperf_args = [
+ machine, test_label, benchmark, test_args, profiler_args
+ ]
self.call_telemetry_crosperf_run = True
return 'Ran FakeTelemetryCrosperfRun'
- def FakeTestThatRun(machine, test_label, benchmark,
- test_args, profiler_args):
- self.test_that_args = [machine, test_label, benchmark,
- test_args, profiler_args
- ]
+ def FakeTestThatRun(machine, test_label, benchmark, test_args,
+ profiler_args):
+ self.test_that_args = [
+ machine, test_label, benchmark, test_args, profiler_args
+ ]
self.call_test_that_run = True
return 'Ran FakeTestThatRun'
@@ -124,8 +129,8 @@ class SuiteRunnerTest(unittest.TestCase):
test_args = ''
profiler_args = ''
reset()
- self.runner.Run(machine, self.mock_label, self.telemetry_bench,
- test_args, profiler_args)
+ self.runner.Run(machine, self.mock_label, self.telemetry_bench, test_args,
+ profiler_args)
self.assertTrue(self.call_pin_governor)
self.assertTrue(self.call_telemetry_run)
self.assertFalse(self.call_test_that_run)
@@ -135,26 +140,26 @@ class SuiteRunnerTest(unittest.TestCase):
['fake_machine', self.mock_label, self.telemetry_bench, ''])
reset()
- self.runner.Run(machine, self.mock_label, self.test_that_bench,
- test_args, profiler_args)
+ self.runner.Run(machine, self.mock_label, self.test_that_bench, test_args,
+ profiler_args)
self.assertTrue(self.call_pin_governor)
self.assertFalse(self.call_telemetry_run)
self.assertTrue(self.call_test_that_run)
self.assertFalse(self.call_telemetry_crosperf_run)
- self.assertEqual(self.test_that_args, ['fake_machine', self.mock_label,
- self.test_that_bench, '', ''])
+ self.assertEqual(
+ self.test_that_args,
+ ['fake_machine', self.mock_label, self.test_that_bench, '', ''])
reset()
- self.runner.Run(machine, self.mock_label,
- self.telemetry_crosperf_bench, test_args,
- profiler_args)
+ self.runner.Run(machine, self.mock_label, self.telemetry_crosperf_bench,
+ test_args, profiler_args)
self.assertTrue(self.call_pin_governor)
self.assertFalse(self.call_telemetry_run)
self.assertFalse(self.call_test_that_run)
self.assertTrue(self.call_telemetry_crosperf_run)
- self.assertEqual(self.telemetry_crosperf_args,
- ['fake_machine', self.mock_label,
- self.telemetry_crosperf_bench, '', ''])
+ self.assertEqual(self.telemetry_crosperf_args, [
+ 'fake_machine', self.mock_label, self.telemetry_crosperf_bench, '', ''
+ ])
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
def test_get_highest_static_frequency(self, mock_cros_runcmd):
@@ -192,8 +197,7 @@ class SuiteRunnerTest(unittest.TestCase):
'/sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq; do echo '
'1666000 > $f; done && for f in '
'/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do echo '
- 'performance > $f; done',
- ))
+ 'performance > $f; done',))
@mock.patch.object(time, 'sleep')
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')