aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/README.md (renamed from crosperf/experiment_files/README.md)8
-rwxr-xr-xcrosperf/benchmark_run_unittest.py74
-rwxr-xr-xcrosperf/crosperf_autolock.py281
-rwxr-xr-xcrosperf/crosperf_unittest.py15
-rw-r--r--crosperf/default_remotes17
-rw-r--r--crosperf/download_images.py34
-rw-r--r--crosperf/experiment.py20
-rw-r--r--crosperf/experiment_factory.py193
-rwxr-xr-xcrosperf/experiment_factory_unittest.py59
-rw-r--r--crosperf/experiment_runner.py56
-rw-r--r--crosperf/label.py8
-rw-r--r--crosperf/mock_instance.py4
-rw-r--r--crosperf/results_cache.py145
-rwxr-xr-xcrosperf/results_cache_unittest.py264
-rw-r--r--crosperf/schedv2.py10
-rw-r--r--crosperf/settings_factory.py525
-rwxr-xr-xcrosperf/settings_factory_unittest.py6
-rw-r--r--crosperf/suite_runner.py28
-rwxr-xr-xcrosperf/suite_runner_unittest.py55
-rw-r--r--crosperf/test_cache/compare_output/results.pickle (renamed from crosperf/test_cache/compare_output/results.txt)bin8124 -> 8081 bytes
-rw-r--r--crosperf/test_cache/test_input/results.pickle (renamed from crosperf/test_cache/test_input/results.txt)0
-rw-r--r--crosperf/test_cache/test_puretelemetry_input/results.pickle (renamed from crosperf/test_cache/test_puretelemetry_input/results.txt)0
22 files changed, 1078 insertions, 724 deletions
diff --git a/crosperf/experiment_files/README.md b/crosperf/README.md
index 8c1fe200..18601b67 100644
--- a/crosperf/experiment_files/README.md
+++ b/crosperf/README.md
@@ -1,4 +1,4 @@
-# Experiment files
+# experiment_files
To use these experiment files, replace the board, remote and images
placeholders and run crosperf on them.
@@ -37,3 +37,9 @@ benchmark: page_cycler_v2.morejs {
iterations: 1
}
```
+
+# default_remotes
+
+This is the list of machines allocated for toolchain team.
+This should be kept in sync with:
+https://chromeos-swarming.appspot.com/botlist?c=id&c=task&c=label-board&c=label-pool&c=os&c=status&d=asc&f=label-pool%3Atoolchain&k=label-pool&s=id
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index ab863004..9d815b80 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -61,7 +61,7 @@ class BenchmarkRunTest(unittest.TestCase):
cache_only=False,
log_level='average',
compiler='gcc',
- skylab=False)
+ crosfleet=False)
self.test_cache_conditions = [
CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
@@ -86,7 +86,7 @@ class BenchmarkRunTest(unittest.TestCase):
cache_only=False,
log_level='average',
compiler='gcc',
- skylab=False)
+ crosfleet=False)
logging_level = 'average'
m = MockMachineManager('/tmp/chromeos_root', 0, logging_level, '')
@@ -133,10 +133,11 @@ class BenchmarkRunTest(unittest.TestCase):
pass
def test_run(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
def MockLogOutput(msg, print_to_console=False):
"""Helper function for test_run."""
@@ -273,10 +274,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, ['FAILED'])
def test_terminate_pass(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
def GetLastEventPassed():
"""Helper function for test_terminate_pass"""
@@ -300,10 +302,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
def test_terminate_fail(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
def GetLastEventFailed():
"""Helper function for test_terminate_fail"""
@@ -327,10 +330,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
def test_acquire_machine(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
br.terminated = True
self.assertRaises(Exception, br.AcquireMachine)
@@ -344,10 +348,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
def test_get_extra_autotest_args(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
def MockLogError(err_msg):
"""Helper function for test_get_extra_autotest_args"""
@@ -379,10 +384,11 @@ class BenchmarkRunTest(unittest.TestCase):
@mock.patch.object(SuiteRunner, 'Run')
@mock.patch.object(Result, 'CreateFromRun')
def test_run_test(self, mock_result, mock_runner):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
self.status = []
@@ -409,15 +415,17 @@ class BenchmarkRunTest(unittest.TestCase):
br.profiler_args)
self.assertEqual(mock_result.call_count, 1)
- mock_result.assert_called_with(
- self.mock_logger, 'average', self.test_label, None, "{'Score':100}", '',
- 0, 'page_cycler.netsim.top_10', 'telemetry_Crosperf', '')
+ mock_result.assert_called_with(self.mock_logger, 'average', self.test_label,
+ None, "{'Score':100}", '', 0,
+ 'page_cycler.netsim.top_10',
+ 'telemetry_Crosperf', '')
def test_set_cache_conditions(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
phony_cache_conditions = [123, 456, True, False]
diff --git a/crosperf/crosperf_autolock.py b/crosperf/crosperf_autolock.py
new file mode 100755
index 00000000..b593fa9c
--- /dev/null
+++ b/crosperf/crosperf_autolock.py
@@ -0,0 +1,281 @@
+#!/usr/bin/env python3
+
+# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Wrapper script to automatically lock devices for crosperf."""
+
+import os
+import sys
+import argparse
+import subprocess
+import contextlib
+import json
+from typing import Optional, Any
+import dataclasses
+
+# Have to do sys.path hackery because crosperf relies on PYTHONPATH
+# modifications.
+PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.append(PARENT_DIR)
+
+
+def main(sys_args: list[str]) -> Optional[str]:
+ """Run crosperf_autolock. Returns error msg or None"""
+ args, leftover_args = parse_args(sys_args)
+ fleet_params = [
+ CrosfleetParams(board=args.board,
+ pool=args.pool,
+ lease_time=args.lease_time)
+ for _ in range(args.num_leases)
+ ]
+ if not fleet_params:
+ return ('No board names identified. If you want to use'
+ ' a known host, just use crosperf directly.')
+ try:
+ _run_crosperf(fleet_params, args.dut_lock_timeout, leftover_args)
+ except BoardLockError as e:
+ _eprint('ERROR:', e)
+ _eprint('May need to login to crosfleet? Run "crosfleet login"')
+ _eprint('The leases may also be successful later on. '
+ 'Check with "crosfleet dut leases"')
+ return 'crosperf_autolock failed'
+ except BoardReleaseError as e:
+ _eprint('ERROR:', e)
+ _eprint('May need to re-run "crosfleet dut abandon"')
+ return 'crosperf_autolock failed'
+ return None
+
+
+def parse_args(args: list[str]) -> tuple[Any, list]:
+ """Parse the CLI arguments."""
+ parser = argparse.ArgumentParser(
+ 'crosperf_autolock',
+ description='Wrapper around crosperf'
+ ' to autolock DUTs from crosfleet.',
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('--board',
+ type=str,
+ help='Space or comma separated list of boards to lock',
+ required=True,
+ default=argparse.SUPPRESS)
+ parser.add_argument('--num-leases',
+ type=int,
+ help='Number of boards to lock.',
+ metavar='NUM',
+ default=1)
+ parser.add_argument('--pool',
+ type=str,
+ help='Pool to pull from.',
+ default='DUT_POOL_QUOTA')
+ parser.add_argument('--dut-lock-timeout',
+ type=float,
+ metavar='SEC',
+ help='Number of seconds we want to try to lease a board'
+ ' from crosfleet. This option does NOT change the'
+ ' lease length.',
+ default=600)
+ parser.add_argument('--lease-time',
+ type=int,
+ metavar='MIN',
+ help='Number of minutes to lock the board. Max is 1440.',
+ default=1440)
+ parser.epilog = (
+ 'For more detailed flags, you have to read the args taken by the'
+ ' crosperf executable. Args are passed transparently to crosperf.')
+ return parser.parse_known_args(args)
+
+
+class BoardLockError(Exception):
+ """Error to indicate failure to lock a board."""
+
+ def __init__(self, msg: str):
+ self.msg = 'BoardLockError: ' + msg
+ super().__init__(self.msg)
+
+
+class BoardReleaseError(Exception):
+ """Error to indicate failure to release a board."""
+
+ def __init__(self, msg: str):
+ self.msg = 'BoardReleaseError: ' + msg
+ super().__init__(self.msg)
+
+
+@dataclasses.dataclass(frozen=True)
+class CrosfleetParams:
+ """Dataclass to hold all crosfleet parameterizations."""
+ board: str
+ pool: str
+ lease_time: int
+
+
+def _eprint(*msg, **kwargs):
+ print(*msg, file=sys.stderr, **kwargs)
+
+
+def _run_crosperf(crosfleet_params: list[CrosfleetParams], lock_timeout: float,
+ leftover_args: list[str]):
+ """Autolock devices and run crosperf with leftover arguments.
+
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ BoardReleaseError: When board was unable to be released.
+ """
+ if not crosfleet_params:
+ raise ValueError('No crosfleet params given; cannot call crosfleet.')
+
+ # We'll assume all the boards are the same type, which seems to be the case
+ # in experiments that actually get used.
+ passed_board_arg = crosfleet_params[0].board
+ with contextlib.ExitStack() as stack:
+ dut_hostnames = []
+ for param in crosfleet_params:
+ print(
+ f'Sent lock request for {param.board} for {param.lease_time} minutes'
+ '\nIf this fails, you may need to run "crosfleet dut abandon <...>"')
+ # May raise BoardLockError, abandoning previous DUTs.
+ dut_hostname = stack.enter_context(
+ crosfleet_machine_ctx(
+ param.board,
+ param.lease_time,
+ lock_timeout,
+ {'label-pool': param.pool},
+ ))
+ if dut_hostname:
+ print(f'Locked {param.board} machine: {dut_hostname}')
+ dut_hostnames.append(dut_hostname)
+
+ # We import crosperf late, because this import is extremely slow.
+ # We don't want the user to wait several seconds just to get
+ # help info.
+ import crosperf
+ for dut_hostname in dut_hostnames:
+ crosperf.Main([
+ sys.argv[0],
+ '--no_lock',
+ 'True',
+ '--remote',
+ dut_hostname,
+ '--board',
+ passed_board_arg,
+ ] + leftover_args)
+
+
+@contextlib.contextmanager
+def crosfleet_machine_ctx(board: str,
+ lease_minutes: int,
+ lock_timeout: float,
+ dims: dict[str, Any],
+ abandon_timeout: float = 120.0) -> Any:
+ """Acquire dut from crosfleet, and release once it leaves the context.
+
+ Args:
+ board: Board type to lease.
+ lease_minutes: Length of lease, in minutes.
+ lock_timeout: How long to wait for a lock until quitting.
+ dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
+ abandon_timeout (optional): How long to wait for releasing until quitting.
+
+ Yields:
+ A string representing the crosfleet DUT hostname.
+
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ BoardReleaseError: When board was unable to be released.
+ """
+ # This lock may raise an exception, but if it does, we can't release
+ # the DUT anyways as we won't have the dut_hostname.
+ dut_hostname = crosfleet_autolock(board, lease_minutes, dims, lock_timeout)
+ try:
+ yield dut_hostname
+ finally:
+ if dut_hostname:
+ crosfleet_release(dut_hostname, abandon_timeout)
+
+
+def crosfleet_autolock(board: str, lease_minutes: int, dims: dict[str, Any],
+ timeout_sec: float) -> str:
+ """Lock a device using crosfleet, paramaterized by the board type.
+
+ Args:
+ board: Board of the DUT we want to lock.
+ lease_minutes: Number of minutes we're trying to lease the DUT for.
+ dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
+ timeout_sec: Number of seconds to try to lease the DUT. Default 120s.
+
+ Returns:
+ The hostname of the board, or empty string if it couldn't be parsed.
+
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ """
+ crosfleet_cmd_args = [
+ 'crosfleet',
+ 'dut',
+ 'lease',
+ '-json',
+ '-reason="crosperf autolock"',
+ f'-board={board}',
+ f'-minutes={lease_minutes}',
+ ]
+ if dims:
+ dims_arg = ','.join('{}={}'.format(k, v) for k, v in dims.items())
+ crosfleet_cmd_args.extend(['-dims', f'{dims_arg}'])
+
+ try:
+ output = subprocess.check_output(crosfleet_cmd_args,
+ timeout=timeout_sec,
+ encoding='utf-8')
+ except subprocess.CalledProcessError as e:
+ raise BoardLockError(
+ f'crosfleet dut lease failed with exit code: {e.returncode}')
+ except subprocess.TimeoutExpired as e:
+ raise BoardLockError(f'crosfleet dut lease timed out after {timeout_sec}s;'
+ ' please abandon the dut manually.')
+
+ try:
+ json_obj = json.loads(output)
+ dut_hostname = json_obj['DUT']['Hostname']
+ if not isinstance(dut_hostname, str):
+ raise TypeError('dut_hostname was not a string')
+ except (json.JSONDecodeError, IndexError, KeyError, TypeError) as e:
+ raise BoardLockError(
+ f'crosfleet dut lease output was parsed incorrectly: {e!r};'
+ f' observed output was {output}')
+ return _maybe_append_suffix(dut_hostname)
+
+
+def crosfleet_release(dut_hostname: str, timeout_sec: float = 120.0):
+ """Release a crosfleet device.
+
+ Consider using the context managed crosfleet_machine_context
+
+ Args:
+ dut_hostname: Name of the device we want to release.
+ timeout_sec: Number of seconds to try to release the DUT. Default is 120s.
+
+ Raises:
+ BoardReleaseError: Potentially failed to abandon the lease.
+ """
+ crosfleet_cmd_args = [
+ 'crosfleet',
+ 'dut',
+ 'abandon',
+ dut_hostname,
+ ]
+ exit_code = subprocess.call(crosfleet_cmd_args, timeout=timeout_sec)
+ if exit_code != 0:
+ raise BoardReleaseError(
+ f'"crosfleet dut abandon" had exit code {exit_code}')
+
+
+def _maybe_append_suffix(hostname: str) -> str:
+ if hostname.endswith('.cros') or '.cros.' in hostname:
+ return hostname
+ return hostname + '.cros'
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 9c7d52a1..774159ff 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -55,20 +55,19 @@ class CrosperfTest(unittest.TestCase):
def testConvertOptionsToSettings(self):
parser = argparse.ArgumentParser()
- parser.add_argument(
- '-l',
- '--log_dir',
- dest='log_dir',
- default='',
- help='The log_dir, default is under '
- '<crosperf_logs>/logs')
+ parser.add_argument('-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under '
+ '<crosperf_logs>/logs')
crosperf.SetupParserOptions(parser)
argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
options, _ = parser.parse_known_args(argv)
settings = crosperf.ConvertOptionsToSettings(options)
self.assertIsNotNone(settings)
self.assertIsInstance(settings, settings_factory.GlobalSettings)
- self.assertEqual(len(settings.fields), 39)
+ self.assertEqual(len(settings.fields), 40)
self.assertTrue(settings.GetField('rerun'))
argv = ['crosperf/crosperf.py', 'temp.exp']
options, _ = parser.parse_known_args(argv)
diff --git a/crosperf/default_remotes b/crosperf/default_remotes
index f23fe21b..faecb833 100644
--- a/crosperf/default_remotes
+++ b/crosperf/default_remotes
@@ -1,9 +1,8 @@
-bob : chromeos2-row10-rack9-host1.cros chromeos2-row10-rack9-host3.cros
-coral : chromeos2-row9-rack9-host9.cros chromeos2-row9-rack9-host11.cros chromeos2-row9-rack9-host13.cros
-elm : chromeos2-row10-rack9-host19.cros chromeos2-row10-rack9-host21.cros
-chell : chromeos2-row9-rack9-host1.cros chromeos2-row9-rack9-host3.cros
-kefka : chromeos2-row9-rack9-host21.cros chromeos2-row10-rack9-host13.cros
-lulu : chromeos2-row9-rack9-host5.cros chromeos2-row9-rack9-host7.cros
-nautilus : chromeos2-row10-rack9-host9.cros chromeos2-row10-rack9-host11.cros
-snappy : chromeos2-row10-rack9-host5.cros chromeos2-row10-rack9-host7.cros
-veyron_tiger : chromeos2-row9-rack9-host17.cros chromeos2-row9-rack9-host19.cros
+bob : chromeos6-row4-rack13-host6.cros
+chell : chromeos2-row1-rack10-host2.cros chromeos2-row1-rack10-host4.cros
+coral : chromeos6-row5-rack6-host1.cros chromeos6-row5-rack6-host3.cros chromeos6-row5-rack6-host5.cros
+elm : chromeos6-row14-rack15-host21.cros
+kefka : chromeos6-row6-rack22-host2.cros chromeos6-row6-rack22-host3.cros chromeos6-row11-rack22-host7.cros
+nautilus : chromeos6-row5-rack10-host1.cros chromeos6-row5-rack10-host3.cros
+snappy : chromeos6-row3-rack20-host1.cros chromeos6-row3-rack20-host3.cros
+veyron_tiger : chromeos6-row3-rack7-host1.cros
diff --git a/crosperf/download_images.py b/crosperf/download_images.py
index 9bd4a8bf..8e1bad11 100644
--- a/crosperf/download_images.py
+++ b/crosperf/download_images.py
@@ -137,8 +137,8 @@ class ImageDownloader(object):
def DownloadSingleFile(self, chromeos_root, build_id, package_file_name):
# Verify if package files exist
status = 0
- gs_package_name = (
- 'gs://chromeos-image-archive/%s/%s' % (build_id, package_file_name))
+ gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
+ (build_id, package_file_name))
gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
if not test_flag.GetTestMode():
cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
@@ -171,8 +171,8 @@ class ImageDownloader(object):
uncompress_cmd):
# Uncompress file
download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- command = (
- 'cd %s ; %s %s' % (download_path, uncompress_cmd, package_file_name))
+ command = ('cd %s ; %s %s' %
+ (download_path, uncompress_cmd, package_file_name))
if self.log_level != 'verbose':
self._logger.LogOutput('CMD: %s' % command)
@@ -193,8 +193,8 @@ class ImageDownloader(object):
def VerifyFileExists(self, chromeos_root, build_id, package_file):
# Quickly verify if the files are there
status = 0
- gs_package_name = (
- 'gs://chromeos-image-archive/%s/%s' % (build_id, package_file))
+ gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
+ (build_id, package_file))
gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
if not test_flag.GetTestMode():
cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
@@ -227,9 +227,9 @@ class ImageDownloader(object):
autotest_packages_name)
if status != 0:
default_autotest_dir = '/mnt/host/source/src/third_party/autotest/files'
- print(
- '(Warning: Could not find autotest packages .)\n'
- '(Warning: Defaulting autotest path to %s .' % default_autotest_dir)
+ print('(Warning: Could not find autotest packages .)\n'
+ '(Warning: Defaulting autotest path to %s .' %
+ default_autotest_dir)
return default_autotest_dir
# Files exist on server, download and uncompress them
@@ -281,22 +281,26 @@ class ImageDownloader(object):
self.UncompressSingleFile(chromeos_root, build_id, debug_archive_name,
'tar -xf ')
- # Rename created autotest directory to autotest_files
- command = ('cd %s ; mv debug debug_files' % download_path)
+ # Extract and move debug files into the proper location.
+ debug_dir = 'debug_files/usr/lib'
+ command = ('cd %s ; mkdir -p %s; mv debug %s' %
+ (download_path, debug_dir, debug_dir))
if self.log_level != 'verbose':
self._logger.LogOutput('CMD: %s' % command)
- print('(Moving downloaded debug files to debug_files)')
+ print('Moving downloaded debug files to %s' % debug_dir)
retval = self._ce.RunCommand(command)
if retval != 0:
- raise MissingFile('Could not create directory debug_files')
+ raise MissingFile('Could not create directory %s' %
+ os.path.join(debug_dir, 'debug'))
return debug_rel_path
def Run(self, chromeos_root, xbuddy_label, autotest_path, debug_path,
download_debug):
build_id = self.GetBuildID(chromeos_root, xbuddy_label)
- image_name = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz'
- % build_id)
+ image_name = (
+ 'gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz' %
+ build_id)
# Verify that image exists for build_id, before attempting to
# download it.
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index 6e2efd45..e919f6ee 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -29,7 +29,7 @@ class Experiment(object):
cache_conditions, labels, benchmarks, experiment_file, email_to,
acquire_timeout, log_dir, log_level, share_cache,
results_directory, compress_results, locks_directory, cwp_dso,
- ignore_min_max, skylab, dut_config):
+ ignore_min_max, crosfleet, dut_config, no_lock: bool):
self.name = name
self.working_directory = working_directory
self.remote = remote
@@ -56,14 +56,15 @@ class Experiment(object):
self.lock_mgr = None
self.cwp_dso = cwp_dso
self.ignore_min_max = ignore_min_max
- self.skylab = skylab
+ self.crosfleet = crosfleet
+ self.no_lock = no_lock
self.l = logger.GetLogger(log_dir)
if not self.benchmarks:
raise RuntimeError('No benchmarks specified')
if not self.labels:
raise RuntimeError('No labels specified')
- if not remote and not self.skylab:
+ if not remote and not self.crosfleet:
raise RuntimeError('No remote hosts specified')
# We need one chromeos_root to run the benchmarks in, but it doesn't
@@ -123,10 +124,11 @@ class Experiment(object):
logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
True)
benchmark_runs.append(
- benchmark_run.BenchmarkRun(
- benchmark_run_name, benchmark, label, iteration,
- self.cache_conditions, self.machine_manager, logger_to_use,
- self.log_level, self.share_cache, dut_config))
+ benchmark_run.BenchmarkRun(benchmark_run_name, benchmark, label,
+ iteration, self.cache_conditions,
+ self.machine_manager, logger_to_use,
+ self.log_level, self.share_cache,
+ dut_config))
return benchmark_runs
@@ -223,6 +225,6 @@ class Experiment(object):
m for m in self.locked_machines if m not in unlocked_machines
]
if failed_machines:
- raise RuntimeError(
- 'These machines are not unlocked correctly: %s' % failed_machines)
+ raise RuntimeError('These machines are not unlocked correctly: %s' %
+ failed_machines)
self.lock_mgr = None
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 332f0357..a9594a20 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -101,12 +101,14 @@ class ExperimentFactory(object):
def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso, weight):
+ show_all_results, retries, run_local, cwp_dso,
+ weight):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(
- test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries, run_local, cwp_dso, weight)
+ telemetry_benchmark = Benchmark(test_name, test_name, test_args,
+ iterations, rm_chroot_tmp, perf_args,
+ suite, show_all_results, retries,
+ run_local, cwp_dso, weight)
benchmarks.append(telemetry_benchmark)
def GetExperiment(self, experiment_file, working_directory, log_dir):
@@ -119,9 +121,10 @@ class ExperimentFactory(object):
if log_level not in ('quiet', 'average', 'verbose'):
log_level = 'verbose'
- skylab = global_settings.GetField('skylab')
- # Check whether skylab tool is installed correctly for skylab mode.
- if skylab and not self.CheckSkylabTool(chromeos_root, log_level):
+ crosfleet = global_settings.GetField('crosfleet')
+ no_lock = bool(global_settings.GetField('no_lock'))
+ # Check whether crosfleet tool is installed correctly for crosfleet mode.
+ if crosfleet and not self.CheckCrosfleetTool(chromeos_root, log_level):
sys.exit(0)
remote = global_settings.GetField('remote')
@@ -256,33 +259,33 @@ class ExperimentFactory(object):
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
- self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso,
- weight)
+ self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, suite, show_all_results, retries,
+ run_local, cwp_dso, weight)
elif test_name == 'all_pagecyclers':
self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
test_args, iterations, rm_chroot_tmp,
perf_args, suite, show_all_results, retries,
run_local, cwp_dso, weight)
elif test_name == 'all_crosbolt_perf':
- self.AppendBenchmarkSet(
- benchmarks, telemetry_crosbolt_perf_tests, test_args, iterations,
- rm_chroot_tmp, perf_args, 'telemetry_Crosperf', show_all_results,
- retries, run_local, cwp_dso, weight)
- self.AppendBenchmarkSet(
- benchmarks,
- crosbolt_perf_tests,
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
+ self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, 'telemetry_Crosperf',
+ show_all_results, retries, run_local,
+ cwp_dso, weight)
+ self.AppendBenchmarkSet(benchmarks,
+ crosbolt_perf_tests,
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight)
elif test_name == 'all_toolchain_perf':
self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
test_args, iterations, rm_chroot_tmp,
@@ -321,10 +324,11 @@ class ExperimentFactory(object):
# cwp_dso=cwp_dso,
# weight=weight))
elif test_name == 'all_toolchain_perf_old':
- self.AppendBenchmarkSet(
- benchmarks, telemetry_toolchain_old_perf_tests, test_args,
- iterations, rm_chroot_tmp, perf_args, suite, show_all_results,
- retries, run_local, cwp_dso, weight)
+ self.AppendBenchmarkSet(benchmarks,
+ telemetry_toolchain_old_perf_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, suite, show_all_results, retries,
+ run_local, cwp_dso, weight)
else:
benchmark = Benchmark(benchmark_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
@@ -333,34 +337,32 @@ class ExperimentFactory(object):
benchmarks.append(benchmark)
else:
if test_name == 'all_graphics_perf':
- self.AppendBenchmarkSet(
- benchmarks,
- graphics_perf_tests,
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
+ self.AppendBenchmarkSet(benchmarks,
+ graphics_perf_tests,
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight)
else:
# Add the single benchmark.
- benchmark = Benchmark(
- benchmark_name,
- test_name,
- test_args,
- iterations,
- rm_chroot_tmp,
- perf_args,
- suite,
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
+ benchmark = Benchmark(benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight)
benchmarks.append(benchmark)
if not benchmarks:
@@ -389,8 +391,9 @@ class ExperimentFactory(object):
my_remote = new_remote
if image:
- if skylab:
- raise RuntimeError('In skylab mode, local image should not be used.')
+ if crosfleet:
+ raise RuntimeError(
+ 'In crosfleet mode, local image should not be used.')
if build:
raise RuntimeError('Image path and build are provided at the same '
'time, please use only one of them.')
@@ -406,8 +409,8 @@ class ExperimentFactory(object):
# TODO(yunlian): We should consolidate code in machine_manager.py
# to derermine whether we are running from within google or not
- if ('corp.google.com' in socket.gethostname() and not my_remote and
- not skylab):
+ if ('corp.google.com' in socket.gethostname() and not my_remote
+ and not crosfleet):
my_remote = self.GetDefaultRemotes(board)
if global_settings.GetField('same_machine') and len(my_remote) > 1:
raise RuntimeError('Only one remote is allowed when same_machine '
@@ -418,12 +421,12 @@ class ExperimentFactory(object):
# pylint: disable=too-many-function-args
label = MockLabel(label_name, build, image, autotest_path, debug_path,
chromeos_root, board, my_remote, image_args,
- cache_dir, cache_only, log_level, compiler, skylab,
- chrome_src)
+ cache_dir, cache_only, log_level, compiler,
+ crosfleet, chrome_src)
else:
label = Label(label_name, build, image, autotest_path, debug_path,
chromeos_root, board, my_remote, image_args, cache_dir,
- cache_only, log_level, compiler, skylab, chrome_src)
+ cache_only, log_level, compiler, crosfleet, chrome_src)
labels.append(label)
if not labels:
@@ -432,21 +435,36 @@ class ExperimentFactory(object):
email = global_settings.GetField('email')
all_remote += list(set(my_remote))
all_remote = list(set(all_remote))
- if skylab:
+ if crosfleet:
for remote in all_remote:
- self.CheckRemotesInSkylab(remote)
- experiment = Experiment(experiment_name, all_remote, working_directory,
- chromeos_root, cache_conditions, labels, benchmarks,
- experiment_file.Canonicalize(), email,
- acquire_timeout, log_dir, log_level, share_cache,
- results_dir, compress_results, locks_dir, cwp_dso,
- ignore_min_max, skylab, dut_config)
+ self.CheckRemotesInCrosfleet(remote)
+ experiment = Experiment(experiment_name,
+ all_remote,
+ working_directory,
+ chromeos_root,
+ cache_conditions,
+ labels,
+ benchmarks,
+ experiment_file.Canonicalize(),
+ email,
+ acquire_timeout,
+ log_dir,
+ log_level,
+ share_cache,
+ results_dir,
+ compress_results,
+ locks_dir,
+ cwp_dso,
+ ignore_min_max,
+ crosfleet,
+ dut_config,
+ no_lock=no_lock)
return experiment
def GetDefaultRemotes(self, board):
- default_remotes_file = os.path.join(
- os.path.dirname(__file__), 'default_remotes')
+ default_remotes_file = os.path.join(os.path.dirname(__file__),
+ 'default_remotes')
try:
with open(default_remotes_file) as f:
for line in f:
@@ -464,26 +482,27 @@ class ExperimentFactory(object):
else:
raise RuntimeError('There is no remote for {0}'.format(board))
- def CheckRemotesInSkylab(self, remote):
+ def CheckRemotesInCrosfleet(self, remote):
# TODO: (AI:zhizhouy) need to check whether a remote is a local or lab
# machine. If not lab machine, raise an error.
pass
- def CheckSkylabTool(self, chromeos_root, log_level):
- SKYLAB_PATH = '/usr/local/bin/skylab'
- if os.path.exists(SKYLAB_PATH):
+ def CheckCrosfleetTool(self, chromeos_root, log_level):
+ CROSFLEET_PATH = 'crosfleet'
+ if os.path.exists(CROSFLEET_PATH):
return True
l = logger.GetLogger()
- l.LogOutput('Skylab tool not installed, trying to install it.')
+ l.LogOutput('Crosfleet tool not installed, trying to install it.')
ce = command_executer.GetCommandExecuter(l, log_level=log_level)
- setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin', 'lab-tools',
- 'setup_lab_tools')
+ setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin',
+ 'lab-tools', 'setup_lab_tools')
cmd = '%s' % setup_lab_tools
status = ce.RunCommand(cmd)
if status != 0:
- raise RuntimeError('Skylab tool not installed correctly, please try to '
- 'manually install it from %s' % setup_lab_tools)
- l.LogOutput('Skylab is installed at %s, please login before first use. '
- 'Login by running "skylab login" and follow instructions.' %
- SKYLAB_PATH)
+ raise RuntimeError(
+ 'Crosfleet tool not installed correctly, please try to '
+ 'manually install it from %s' % setup_lab_tools)
+ l.LogOutput('Crosfleet is installed at %s, please login before first use. '
+ 'Login by running "crosfleet login" and follow instructions.' %
+ CROSFLEET_PATH)
return False
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 3528eb1f..9637c108 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -79,14 +79,14 @@ EXPERIMENT_FILE_2 = """
class ExperimentFactoryTest(unittest.TestCase):
"""Class for running experiment factory unittests."""
-
def setUp(self):
self.append_benchmark_call_args = []
def testLoadExperimentFile1(self):
experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
- exp = ExperimentFactory().GetExperiment(
- experiment_file, working_directory='', log_dir='')
+ exp = ExperimentFactory().GetExperiment(experiment_file,
+ working_directory='',
+ log_dir='')
self.assertEqual(exp.remote, ['chromeos-alex3'])
self.assertEqual(len(exp.benchmarks), 2)
@@ -104,8 +104,9 @@ class ExperimentFactoryTest(unittest.TestCase):
def testLoadExperimentFile2CWP(self):
experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_2))
- exp = ExperimentFactory().GetExperiment(
- experiment_file, working_directory='', log_dir='')
+ exp = ExperimentFactory().GetExperiment(experiment_file,
+ working_directory='',
+ log_dir='')
self.assertEqual(exp.cwp_dso, 'kallsyms')
self.assertEqual(len(exp.benchmarks), 2)
self.assertEqual(exp.benchmarks[0].weight, 0.8)
@@ -240,11 +241,12 @@ class ExperimentFactoryTest(unittest.TestCase):
ef = ExperimentFactory()
bench_list = []
- ef.AppendBenchmarkSet(bench_list, experiment_factory.telemetry_perfv2_tests,
- '', 1, False, '', 'telemetry_Crosperf', False, 0,
- False, '', 0)
- self.assertEqual(
- len(bench_list), len(experiment_factory.telemetry_perfv2_tests))
+ ef.AppendBenchmarkSet(bench_list,
+ experiment_factory.telemetry_perfv2_tests, '', 1,
+ False, '', 'telemetry_Crosperf', False, 0, False, '',
+ 0)
+ self.assertEqual(len(bench_list),
+ len(experiment_factory.telemetry_perfv2_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
bench_list = []
@@ -252,17 +254,17 @@ class ExperimentFactoryTest(unittest.TestCase):
experiment_factory.telemetry_pagecycler_tests, '', 1,
False, '', 'telemetry_Crosperf', False, 0, False, '',
0)
- self.assertEqual(
- len(bench_list), len(experiment_factory.telemetry_pagecycler_tests))
+ self.assertEqual(len(bench_list),
+ len(experiment_factory.telemetry_pagecycler_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
bench_list = []
ef.AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_toolchain_perf_tests, '',
- 1, False, '', 'telemetry_Crosperf', False, 0, False,
- '', 0)
- self.assertEqual(
- len(bench_list), len(experiment_factory.telemetry_toolchain_perf_tests))
+ experiment_factory.telemetry_toolchain_perf_tests,
+ '', 1, False, '', 'telemetry_Crosperf', False, 0,
+ False, '', 0)
+ self.assertEqual(len(bench_list),
+ len(experiment_factory.telemetry_toolchain_perf_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
@mock.patch.object(socket, 'gethostname')
@@ -370,7 +372,8 @@ class ExperimentFactoryTest(unittest.TestCase):
global_settings.SetField('same_machine', 'true')
global_settings.SetField('same_specs', 'true')
- self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '', '')
+ self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '',
+ '')
label_settings.SetField('remote', '')
global_settings.SetField('remote', '123.45.67.89')
exp = ef.GetExperiment(mock_experiment_file, '', '')
@@ -399,46 +402,42 @@ class ExperimentFactoryTest(unittest.TestCase):
def test_get_default_remotes(self):
board_list = [
- 'elm', 'bob', 'chell', 'kefka', 'lulu', 'nautilus', 'snappy',
+ 'bob', 'chell', 'coral', 'elm', 'kefka', 'nautilus', 'snappy',
'veyron_tiger'
]
ef = ExperimentFactory()
self.assertRaises(Exception, ef.GetDefaultRemotes, 'bad-board')
- # Verify that we have entries for every board, and that we get at least
- # two machines for each board.
+ # Verify that we have entries for every board
for b in board_list:
remotes = ef.GetDefaultRemotes(b)
- if b == 'daisy':
- self.assertEqual(len(remotes), 1)
- else:
- self.assertGreaterEqual(len(remotes), 2)
+ self.assertGreaterEqual(len(remotes), 1)
@mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
@mock.patch.object(os.path, 'exists')
- def test_check_skylab_tool(self, mock_exists, mock_runcmd):
+ def test_check_crosfleet_tool(self, mock_exists, mock_runcmd):
ef = ExperimentFactory()
chromeos_root = '/tmp/chromeos'
log_level = 'average'
mock_exists.return_value = True
- ret = ef.CheckSkylabTool(chromeos_root, log_level)
+ ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
self.assertTrue(ret)
mock_exists.return_value = False
mock_runcmd.return_value = 1
with self.assertRaises(RuntimeError) as err:
- ef.CheckSkylabTool(chromeos_root, log_level)
+ ef.CheckCrosfleetTool(chromeos_root, log_level)
self.assertEqual(mock_runcmd.call_count, 1)
self.assertEqual(
- str(err.exception), 'Skylab tool not installed '
+ str(err.exception), 'Crosfleet tool not installed '
'correctly, please try to manually install it from '
'/tmp/chromeos/chromeos-admin/lab-tools/setup_lab_tools')
mock_runcmd.return_value = 0
mock_runcmd.call_count = 0
- ret = ef.CheckSkylabTool(chromeos_root, log_level)
+ ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
self.assertEqual(mock_runcmd.call_count, 1)
self.assertFalse(ret)
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 8ba85a4c..6daef780 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -107,15 +107,15 @@ class ExperimentRunner(object):
"""Get where is the machine from.
Returns:
- The location of the machine: local or skylab
+ The location of the machine: local or crosfleet
"""
# We assume that lab machine always starts with chromeos*, and local
# machines are ip address.
if 'chromeos' in machine:
- if lock_mgr.CheckMachineInSkylab(machine):
- return 'skylab'
+ if lock_mgr.CheckMachineInCrosfleet(machine):
+ return 'crosfleet'
else:
- raise RuntimeError('Lab machine not in Skylab.')
+ raise RuntimeError('Lab machine not in Crosfleet.')
return 'local'
def _LockAllMachines(self, experiment):
@@ -125,7 +125,7 @@ class ExperimentRunner(object):
in three different modes automatically, to prevent any other crosperf runs
from being able to update/use the machines while this experiment is
running:
- - Skylab machines: Use skylab lease-dut mechanism to lease
+ - Crosfleet machines: Use crosfleet lease-dut mechanism to lease
- Local machines: Use file lock mechanism to lock
"""
if test_flag.GetTestMode():
@@ -143,8 +143,8 @@ class ExperimentRunner(object):
machine_type = self._GetMachineType(experiment.lock_mgr, m)
if machine_type == 'local':
experiment.lock_mgr.AddMachineToLocal(m)
- elif machine_type == 'skylab':
- experiment.lock_mgr.AddMachineToSkylab(m)
+ elif machine_type == 'crosfleet':
+ experiment.lock_mgr.AddMachineToCrosfleet(m)
machine_states = experiment.lock_mgr.GetMachineStates('lock')
experiment.lock_mgr.CheckMachineLocks(machine_states, 'lock')
self.locked_machines = experiment.lock_mgr.UpdateMachines(True)
@@ -160,8 +160,8 @@ class ExperimentRunner(object):
cache.Init(br.label.chromeos_image, br.label.chromeos_root,
br.benchmark.test_name, br.iteration, br.test_args,
br.profiler_args, br.machine_manager, br.machine,
- br.label.board, br.cache_conditions, br.logger(), br.log_level,
- br.label, br.share_cache, br.benchmark.suite,
+ br.label.board, br.cache_conditions, br.logger(),
+ br.log_level, br.label, br.share_cache, br.benchmark.suite,
br.benchmark.show_all_results, br.benchmark.run_local,
br.benchmark.cwp_dso)
cache_dir = cache.GetCacheDirForWrite()
@@ -171,12 +171,12 @@ class ExperimentRunner(object):
def _Run(self, experiment):
try:
- # We should not lease machines if tests are launched via `skylab
- # create-test`. This is because leasing DUT in skylab will create a
+ # We should not lease machines if tests are launched via `crosfleet
+ # create-test`. This is because leasing DUT in crosfleet will create a
# no-op task on the DUT and new test created will be hanging there.
# TODO(zhizhouy): Need to check whether machine is ready or not before
# assigning a test to it.
- if not experiment.skylab:
+ if not experiment.no_lock and not experiment.crosfleet:
self._LockAllMachines(experiment)
# Calculate all checksums of avaiable/locked machines, to ensure same
# label has same machines for testing
@@ -236,8 +236,8 @@ class ExperimentRunner(object):
if not benchmark_run.cache_hit:
send_mail = True
break
- if (not send_mail and not experiment.email_to or
- config.GetConfig('no_email')):
+ if (not send_mail and not experiment.email_to
+ or config.GetConfig('no_email')):
return
label_names = []
@@ -245,7 +245,8 @@ class ExperimentRunner(object):
label_names.append(label.name)
subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
- text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
+ text_report = TextResultsReport.FromExperiment(experiment,
+ True).GetReport()
text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
@@ -253,12 +254,11 @@ class ExperimentRunner(object):
attachment = EmailSender.Attachment('report.html', html_report)
email_to = experiment.email_to or []
email_to.append(getpass.getuser())
- EmailSender().SendEmail(
- email_to,
- subject,
- text_report,
- attachments=[attachment],
- msg_type='html')
+ EmailSender().SendEmail(email_to,
+ subject,
+ text_report,
+ attachments=[attachment],
+ msg_type='html')
def _StoreResults(self, experiment):
if self._terminated:
@@ -300,9 +300,10 @@ class ExperimentRunner(object):
self.l.LogOutput('Storing results of each benchmark run.')
for benchmark_run in experiment.benchmark_runs:
if benchmark_run.result:
- benchmark_run_name = ''.join(
- ch for ch in benchmark_run.name if ch.isalnum())
- benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
+ benchmark_run_name = ''.join(ch for ch in benchmark_run.name
+ if ch.isalnum())
+ benchmark_run_path = os.path.join(results_directory,
+ benchmark_run_name)
if experiment.compress_results:
benchmark_run.result.CompressResultsTo(benchmark_run_path)
else:
@@ -313,15 +314,16 @@ class ExperimentRunner(object):
results_table_path = os.path.join(results_directory, 'results.html')
report = HTMLResultsReport.FromExperiment(experiment).GetReport()
if self.json_report:
- json_report = JSONResultsReport.FromExperiment(
- experiment, json_args={'indent': 2})
+ json_report = JSONResultsReport.FromExperiment(experiment,
+ json_args={'indent': 2})
_WriteJSONReportToFile(experiment, results_directory, json_report)
FileUtils().WriteFile(results_table_path, report)
self.l.LogOutput('Storing email message body in %s.' % results_directory)
msg_file_path = os.path.join(results_directory, 'msg_body.html')
- text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
+ text_report = TextResultsReport.FromExperiment(experiment,
+ True).GetReport()
text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
diff --git a/crosperf/label.py b/crosperf/label.py
index a55d663c..30bf5f8c 100644
--- a/crosperf/label.py
+++ b/crosperf/label.py
@@ -32,7 +32,7 @@ class Label(object):
cache_only,
log_level,
compiler,
- skylab=False,
+ crosfleet=False,
chrome_src=None):
self.image_type = self._GetImageType(chromeos_image)
@@ -55,7 +55,7 @@ class Label(object):
self.log_level = log_level
self.chrome_version = ''
self.compiler = compiler
- self.skylab = skylab
+ self.crosfleet = crosfleet
if not chromeos_root:
if self.image_type == 'local':
@@ -153,7 +153,7 @@ class MockLabel(object):
cache_only,
log_level,
compiler,
- skylab=False,
+ crosfleet=False,
chrome_src=None):
self.name = name
self.build = build
@@ -174,7 +174,7 @@ class MockLabel(object):
self.checksum = ''
self.log_level = log_level
self.compiler = compiler
- self.skylab = skylab
+ self.crosfleet = crosfleet
self.chrome_version = 'Fake Chrome Version 50'
def _GetImageType(self, chromeos_image):
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
index 842d6343..f44ed87c 100644
--- a/crosperf/mock_instance.py
+++ b/crosperf/mock_instance.py
@@ -25,7 +25,7 @@ label1 = MockLabel(
cache_only=False,
log_level='average',
compiler='gcc',
- skylab=False,
+ crosfleet=False,
chrome_src=None)
label2 = MockLabel(
@@ -42,7 +42,7 @@ label2 = MockLabel(
cache_only=False,
log_level='average',
compiler='gcc',
- skylab=False,
+ crosfleet=False,
chrome_src=None)
benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '',
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index c5c85942..5525858c 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -27,7 +27,7 @@ import results_report
import test_flag
SCRATCH_DIR = os.path.expanduser('~/cros_scratch')
-RESULTS_FILE = 'results.txt'
+RESULTS_FILE = 'results.pickle'
MACHINE_FILE = 'machine.txt'
AUTOTEST_TARBALL = 'autotest.tbz2'
RESULTS_TARBALL = 'results.tbz2'
@@ -129,6 +129,7 @@ class Result(object):
ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
if ret:
raise IOError('Could not copy results file: %s' % file_to_copy)
+ file_index += 1
def CopyResultsTo(self, dest_dir):
self.CopyFilesTo(dest_dir, self.results_file)
@@ -196,9 +197,9 @@ class Result(object):
keyvals_dict[key] = result_dict['value']
elif 'values' in result_dict:
values = result_dict['values']
- if ('type' in result_dict and
- result_dict['type'] == 'list_of_scalar_values' and values and
- values != 'null'):
+ if ('type' in result_dict
+ and result_dict['type'] == 'list_of_scalar_values' and values
+ and values != 'null'):
keyvals_dict[key] = sum(values) / float(len(values))
else:
keyvals_dict[key] = values
@@ -244,13 +245,14 @@ class Result(object):
results_in_chroot = os.path.join(self.chromeos_root, 'chroot', 'tmp')
if not self.temp_dir:
self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
- command = 'cp -r {0}/* {1}'.format(self.results_dir, self.temp_dir)
+ command = f'cp -r {self.results_dir}/* {self.temp_dir}'
self.ce.RunCommand(command, print_to_console=False)
command = ('./generate_test_report --no-color --csv %s' %
(os.path.join('/tmp', os.path.basename(self.temp_dir))))
- _, out, _ = self.ce.ChrootRunCommandWOutput(
- self.chromeos_root, command, print_to_console=False)
+ _, out, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root,
+ command,
+ print_to_console=False)
keyvals_dict = {}
tmp_dir_in_chroot = misc.GetInsideChrootPath(self.chromeos_root,
self.temp_dir)
@@ -272,7 +274,7 @@ class Result(object):
return keyvals_dict
def GetSamples(self):
- samples = 0
+ actual_samples = 0
for perf_data_file in self.perf_data_files:
chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
perf_data_file)
@@ -303,17 +305,53 @@ class Result(object):
# Each line looks like this:
# 45.42% 237210 chrome
# And we want the second number which is the sample count.
- sample = 0
+ samples = 0
try:
for line in result.split('\n'):
attr = line.split()
if len(attr) == 3 and '%' in attr[0]:
- sample += int(attr[1])
+ samples += int(attr[1])
except:
raise RuntimeError('Cannot parse perf dso result')
- samples += sample
- return [samples, u'samples']
+ actual_samples += samples
+
+ # Remove idle cycles from the accumulated sample count.
+ perf_report_file = f'{perf_data_file}.report'
+ if not os.path.exists(perf_report_file):
+ raise RuntimeError(f'Missing perf report file: {perf_report_file}')
+
+ idle_functions = {
+ '[kernel.kallsyms]':
+ ('intel_idle', 'arch_cpu_idle', 'intel_idle', 'cpu_startup_entry',
+ 'default_idle', 'cpu_idle_loop', 'do_idle'),
+ }
+ idle_samples = 0
+
+ with open(perf_report_file) as f:
+ try:
+ for line in f:
+ line = line.strip()
+ if not line or line[0] == '#':
+ continue
+ # Each line has the following fields,
+ # pylint: disable=line-too-long
+ # Overhead Samples Command Shared Object Symbol
+ # pylint: disable=line-too-long
+ # 1.48% 60 swapper [kernel.kallsyms] [k] intel_idle
+ # pylint: disable=line-too-long
+ # 0.00% 1 shill libshill-net.so [.] std::__1::vector<unsigned char, std::__1::allocator<unsigned char> >::vector<unsigned char const*>
+ _, samples, _, dso, _, function = line.split(None, 5)
+
+ if dso in idle_functions and function in idle_functions[dso]:
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('Removing %s samples from %s in %s' %
+ (samples, function, dso))
+ idle_samples += int(samples)
+ except:
+ raise RuntimeError('Cannot parse perf report')
+ actual_samples -= idle_samples
+ return [actual_samples, u'samples']
def GetResultsDir(self):
if self.suite == 'tast':
@@ -350,11 +388,11 @@ class Result(object):
result = self.FindFilesInResultsDir('-name perf_measurements').splitlines()
if not result:
if self.suite == 'telemetry_Crosperf':
- result = \
- self.FindFilesInResultsDir('-name histograms.json').splitlines()
+ result = (
+ self.FindFilesInResultsDir('-name histograms.json').splitlines())
else:
- result = \
- self.FindFilesInResultsDir('-name results-chart.json').splitlines()
+ result = (self.FindFilesInResultsDir(
+ '-name results-chart.json').splitlines())
return result
def GetTurbostatFile(self):
@@ -412,7 +450,8 @@ class Result(object):
if debug_path:
symfs = '--symfs ' + debug_path
- vmlinux = '--vmlinux ' + os.path.join(debug_path, 'boot', 'vmlinux')
+ vmlinux = '--vmlinux ' + os.path.join(debug_path, 'usr', 'lib',
+ 'debug', 'boot', 'vmlinux')
kallsyms = ''
print('** WARNING **: --kallsyms option not applied, no System.map-* '
'for downloaded image.')
@@ -508,9 +547,9 @@ class Result(object):
values = value_dict['values']
if not values:
continue
- if ('type' in value_dict and
- value_dict['type'] == 'list_of_scalar_values' and
- values != 'null'):
+ if ('type' in value_dict
+ and value_dict['type'] == 'list_of_scalar_values'
+ and values != 'null'):
result = sum(values) / float(len(values))
else:
result = values
@@ -708,8 +747,9 @@ class Result(object):
# order.
heapq.heappush(cmd_top5_cpu_use[cmd_with_pid], round(cpu_use, 1))
- for consumer, usage in sorted(
- cmd_total_cpu_use.items(), key=lambda x: x[1], reverse=True):
+ for consumer, usage in sorted(cmd_total_cpu_use.items(),
+ key=lambda x: x[1],
+ reverse=True):
# Iterate through commands by descending order of total CPU usage.
topcmd = {
'cmd': consumer,
@@ -875,7 +915,8 @@ class Result(object):
self.chromeos_root, path_str)
if status:
# Error of reading a perf.data profile is fatal.
- raise PerfDataReadError(f'Failed to read perf.data profile: {path_str}')
+ raise PerfDataReadError(
+ f'Failed to read perf.data profile: {path_str}')
# Pattern to search a line with "perf record" command line:
# # cmdline : /usr/bin/perf record -e instructions -p 123"
@@ -900,7 +941,8 @@ class Result(object):
break
else:
# cmdline wasn't found in the header. It's a fatal error.
- raise PerfDataReadError(f'Perf command line is not found in {path_str}')
+ raise PerfDataReadError(
+ f'Perf command line is not found in {path_str}')
return pids
def VerifyPerfDataPID(self):
@@ -938,11 +980,11 @@ class Result(object):
# Note that this function doesn't know anything about whether there is a
# cache hit or miss. It should process results agnostic of the cache hit
# state.
- if (self.results_file and self.suite == 'telemetry_Crosperf' and
- 'histograms.json' in self.results_file[0]):
+ if (self.results_file and self.suite == 'telemetry_Crosperf'
+ and 'histograms.json' in self.results_file[0]):
self.keyvals = self.ProcessHistogramsResults()
- elif (self.results_file and self.suite != 'telemetry_Crosperf' and
- 'results-chart.json' in self.results_file[0]):
+ elif (self.results_file and self.suite != 'telemetry_Crosperf'
+ and 'results-chart.json' in self.results_file[0]):
self.keyvals = self.ProcessChartResults()
else:
if not use_cache:
@@ -1096,15 +1138,16 @@ class Result(object):
f.write(machine_manager.machine_checksum_string[self.label.name])
if os.path.exists(cache_dir):
- command = 'rm -rf {0}'.format(cache_dir)
+ command = f'rm -rf {cache_dir}'
self.ce.RunCommand(command)
- command = 'mkdir -p {0} && '.format(os.path.dirname(cache_dir))
- command += 'chmod g+x {0} && '.format(temp_dir)
- command += 'mv {0} {1}'.format(temp_dir, cache_dir)
+ parent_dir = os.path.dirname(cache_dir)
+ command = f'mkdir -p {parent_dir} && '
+ command += f'chmod g+x {temp_dir} && '
+ command += f'mv {temp_dir} {cache_dir}'
ret = self.ce.RunCommand(command)
if ret:
- command = 'rm -rf {0}'.format(temp_dir)
+ command = f'rm -rf {temp_dir}'
self.ce.RunCommand(command)
raise RuntimeError('Could not move dir %s to dir %s' %
(temp_dir, cache_dir))
@@ -1203,8 +1246,8 @@ class TelemetryResult(Result):
self.err = pickle.load(f)
self.retval = pickle.load(f)
- self.chrome_version = \
- super(TelemetryResult, self).GetChromeVersionFromCache(cache_dir)
+ self.chrome_version = (super(TelemetryResult,
+ self).GetChromeVersionFromCache(cache_dir))
self.ProcessResults()
@@ -1266,10 +1309,10 @@ class ResultsCache(object):
self.run_local = None
self.cwp_dso = None
- def Init(self, chromeos_image, chromeos_root, test_name, iteration, test_args,
- profiler_args, machine_manager, machine, board, cache_conditions,
- logger_to_use, log_level, label, share_cache, suite,
- show_all_results, run_local, cwp_dso):
+ def Init(self, chromeos_image, chromeos_root, test_name, iteration,
+ test_args, profiler_args, machine_manager, machine, board,
+ cache_conditions, logger_to_use, log_level, label, share_cache,
+ suite, show_all_results, run_local, cwp_dso):
self.chromeos_image = chromeos_image
self.chromeos_root = chromeos_root
self.test_name = test_name
@@ -1281,8 +1324,8 @@ class ResultsCache(object):
self.machine_manager = machine_manager
self.machine = machine
self._logger = logger_to_use
- self.ce = command_executer.GetCommandExecuter(
- self._logger, log_level=log_level)
+ self.ce = command_executer.GetCommandExecuter(self._logger,
+ log_level=log_level)
self.label = label
self.share_cache = share_cache
self.suite = suite
@@ -1368,15 +1411,16 @@ class ResultsCache(object):
temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
self.run_local)
- test_args_checksum = hashlib.md5(temp_test_args.encode('utf-8')).hexdigest()
+ test_args_checksum = hashlib.md5(
+ temp_test_args.encode('utf-8')).hexdigest()
return (image_path_checksum, self.test_name, str(self.iteration),
- test_args_checksum, checksum, machine_checksum, machine_id_checksum,
- str(self.CACHE_VERSION))
+ test_args_checksum, checksum, machine_checksum,
+ machine_id_checksum, str(self.CACHE_VERSION))
def ReadResult(self):
if CacheConditions.FALSE in self.cache_conditions:
cache_dir = self.GetCacheDirForWrite()
- command = 'rm -rf %s' % (cache_dir,)
+ command = 'rm -rf %s' % (cache_dir, )
self.ce.RunCommand(command)
return None
cache_dir = self.GetCacheDirForRead()
@@ -1389,14 +1433,15 @@ class ResultsCache(object):
if self.log_level == 'verbose':
self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir)
- result = Result.CreateFromCacheHit(self._logger, self.log_level, self.label,
- self.machine, cache_dir, self.test_name,
- self.suite, self.cwp_dso)
+ result = Result.CreateFromCacheHit(self._logger, self.log_level,
+ self.label, self.machine, cache_dir,
+ self.test_name, self.suite,
+ self.cwp_dso)
if not result:
return None
- if (result.retval == 0 or
- CacheConditions.RUN_SUCCEEDED not in self.cache_conditions):
+ if (result.retval == 0
+ or CacheConditions.RUN_SUCCEEDED not in self.cache_conditions):
return result
return None
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 91ceed22..d6953eed 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -9,7 +9,9 @@
from __future__ import print_function
+import io
import os
+import pickle
import shutil
import tempfile
import unittest
@@ -30,6 +32,8 @@ from cros_utils import command_executer
from cros_utils import logger
from cros_utils import misc
+# The following hardcoded string has blocked words replaced, and thus
+# is not representative of a true crosperf output.
# pylint: disable=line-too-long
OUTPUT = """CMD (True): ./test_that.sh\
--remote=172.17.128.241 --board=lumpy LibCBench
@@ -41,13 +45,13 @@ INFO : Running the following control files 1 times:
INFO : * 'client/site_tests/platform_LibCBench/control'
INFO : Running client test client/site_tests/platform_LibCBench/control
-./server/autoserv -m 172.17.128.241 --ssh-port 22 -c client/site_tests/platform_LibCBench/control -r /tmp/test_that.PO1234567/platform_LibCBench --test-retry=0 --args
+./server/autoserv -m 172.17.128.241 --ssh-port 22 -c client/site_tests/platform_LibCBench/control -r /tmp/test_that.PO1234567/platform_LibCBench --test-retry=0 --args
ERROR:root:import statsd failed, no stats will be reported.
14:20:22 INFO | Results placed in /tmp/test_that.PO1234567/platform_LibCBench
14:20:22 INFO | Processing control file
-14:20:23 INFO | Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_VIIP67ssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'
+14:20:23 INFO | Starting main ssh connection '/usr/bin/ssh -a -x -N -o ControlMain=yes -o ControlPath=/tmp/_autotmp_VIIP67ssh-main/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'
14:20:23 ERROR| [stderr] Warning: Permanently added '172.17.128.241' (RSA) to the list of known hosts.
-14:20:23 INFO | INFO ---- ---- kernel=3.8.11 localtime=May 22 14:20:23 timestamp=1369257623
+14:20:23 INFO | INFO\t----\t----\tkernel=3.8.11\tlocaltime=May 22 14:20:23\ttimestamp=1369257623
14:20:23 INFO | Installing autotest on 172.17.128.241
14:20:23 INFO | Using installation dir /usr/local/autotest
14:20:23 WARNI| No job_repo_url for <remote host: 172.17.128.241>
@@ -58,11 +62,11 @@ ERROR:root:import statsd failed, no stats will be reported.
14:20:24 INFO | Entered autotestd_monitor.
14:20:24 INFO | Finished launching tail subprocesses.
14:20:24 INFO | Finished waiting on autotestd to start.
-14:20:26 INFO | START ---- ---- timestamp=1369257625 localtime=May 22 14:20:25
-14:20:26 INFO | START platform_LibCBench platform_LibCBench timestamp=1369257625 localtime=May 22 14:20:25
-14:20:30 INFO | GOOD platform_LibCBench platform_LibCBench timestamp=1369257630 localtime=May 22 14:20:30 completed successfully
-14:20:30 INFO | END GOOD platform_LibCBench platform_LibCBench timestamp=1369257630 localtime=May 22 14:20:30
-14:20:31 INFO | END GOOD ---- ---- timestamp=1369257630 localtime=May 22 14:20:30
+14:20:26 INFO | START\t----\t----\ttimestamp=1369257625\tlocaltime=May 22 14:20:25
+14:20:26 INFO | \tSTART\tplatform_LibCBench\tplatform_LibCBench\ttimestamp=1369257625\tlocaltime=May 22 14:20:25
+14:20:30 INFO | \t\tGOOD\tplatform_LibCBench\tplatform_LibCBench\ttimestamp=1369257630\tlocaltime=May 22 14:20:30\tcompleted successfully
+14:20:30 INFO | \tEND GOOD\tplatform_LibCBench\tplatform_LibCBench\ttimestamp=1369257630\tlocaltime=May 22 14:20:30
+14:20:31 INFO | END GOOD\t----\t----\ttimestamp=1369257630\tlocaltime=May 22 14:20:30
14:20:31 INFO | Got lock of exit_code_file.
14:20:31 INFO | Released lock of exit_code_file and closed it.
OUTPUT: ==============================
@@ -71,14 +75,14 @@ Done: 0% [ ]
OUTPUT: Thread Status:
RUNNING: 1 ('ttt: LibCBench (1)' 0:01:21)
Machine Status:
-Machine Thread Lock Status Checksum
+Machine Thread Lock Status Checksum
172.17.128.241 ttt: LibCBench (1) True RUNNING 3ba9f2ecbb222f20887daea5583d86ba
OUTPUT: ==============================
14:20:33 INFO | Killing child processes.
14:20:33 INFO | Client complete
14:20:33 INFO | Finished processing control file
-14:20:33 INFO | Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_aVJUgmssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'
+14:20:33 INFO | Starting main ssh connection '/usr/bin/ssh -a -x -N -o ControlMain=yes -o ControlPath=/tmp/_autotmp_aVJUgmssh-main/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'
14:20:33 ERROR| [stderr] Warning: Permanently added '172.17.128.241' (RSA) to the list of known hosts.
INFO : Test results:
@@ -115,7 +119,7 @@ platform_LibCBench/platform_LibCBench b_utf8_onebyone__0_
-------------------------------------------------------------------
Total PASS: 2/2 (100%)
-INFO : Elapsed time: 0m16s
+INFO : Elapsed time: 0m16s
"""
error = """
@@ -176,7 +180,7 @@ PERF_DATA_HEADER = """
# total memory : 5911496 kB
# cmdline : /usr/bin/perf record -e instructions -p {pid}
# event : name = instructions, , id = ( 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193 ), type = 8, size = 112
-# event : name = dummy:u, , id = ( 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204 ), type = 1, size = 112, config = 0x9
+# event : name = placeholder:u, , id = ( 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204 ), type = 1, size = 112, config = 0x9
# CPU_TOPOLOGY info available, use -I to display
# pmu mappings: software = 1, uprobe = 6, cs_etm = 8, breakpoint = 5, tracepoint = 2, armv8_pmuv3 = 7
# contains AUX area data (e.g. instruction trace)
@@ -188,8 +192,8 @@ PERF_DATA_HEADER = """
#
"""
-TURBOSTAT_LOG_OUTPUT = \
-"""CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+TURBOSTAT_LOG_OUTPUT = (
+ """CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
- 329 12.13 2723 2393 10975 77
0 336 12.41 2715 2393 6328 77
2 323 11.86 2731 2393 4647 69
@@ -217,7 +221,7 @@ CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
- 843 29.83 2832 2393 28161 47
0 827 29.35 2826 2393 16093 47
2 858 30.31 2838 2393 12068 46
-"""
+""")
TURBOSTAT_DATA = {
'cpufreq': {
'all': [2723, 2884, 2927, 2937, 2932, 2933, 2832]
@@ -227,8 +231,7 @@ TURBOSTAT_DATA = {
},
}
-TOP_LOG = \
-"""
+TOP_LOG = ("""
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome
4204 chronos 12 -8 2492716 205728 179016 S 11.8 5.3 0:03.89 chrome
@@ -250,7 +253,7 @@ TOP_LOG = \
5713 chronos 20 0 5178652 103120 50372 S 17.8 2.6 0:01.13 chrome
7 root 20 0 0 0 0 S 1.0 0.0 0:00.73 rcu_preempt
855 root 20 0 0 0 0 S 1.0 0.0 0:00.01 kworker/4:2
-"""
+""")
TOP_DATA = [
{
'cmd': 'chrome-5745',
@@ -301,8 +304,7 @@ TOP_DATA = [
'top5_cpu_use': [1.0],
},
]
-TOP_OUTPUT = \
-""" COMMAND AVG CPU% SEEN HIGHEST 5
+TOP_OUTPUT = (""" COMMAND AVG CPU% SEEN HIGHEST 5
chrome 128.250000 6 [122.8, 107.9, 17.8, 5.0, 2.0]
irq/230-cros-ec 1.000000 1 [2.0]
sshd 0.500000 1 [1.0]
@@ -310,10 +312,9 @@ TOP_OUTPUT = \
spi5 0.500000 1 [1.0]
rcu_preempt 0.500000 1 [1.0]
kworker/4:2 0.500000 1 [1.0]
-"""
+""")
-CPUSTATS_UNIQ_OUTPUT = \
-"""
+CPUSTATS_UNIQ_OUTPUT = ("""
/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_cur_freq 2016000
@@ -326,7 +327,7 @@ big-cpu 51234
soc-thermal 45456
little-cpu 42555
big-cpu 61724
-"""
+""")
CPUSTATS_UNIQ_DATA = {
'cpufreq': {
'cpu0': [1512, 1500],
@@ -339,8 +340,7 @@ CPUSTATS_UNIQ_DATA = {
'big-cpu': [51.2, 61.7]
}
}
-CPUSTATS_DUPL_OUTPUT = \
-"""
+CPUSTATS_DUPL_OUTPUT = ("""
/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 1512000
@@ -353,7 +353,7 @@ CPUSTATS_DUPL_OUTPUT = \
/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1614000
/sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 1614000
/sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_cur_freq 1982000
-"""
+""")
CPUSTATS_DUPL_DATA = {
'cpufreq': {
'cpu0': [1512, 1500, 1614],
@@ -363,8 +363,7 @@ CPUSTATS_DUPL_DATA = {
TMP_DIR1 = '/tmp/tmpAbcXyz'
-HISTOGRAMSET = \
-"""
+HISTOGRAMSET = ("""
[
{
"values": [
@@ -436,14 +435,13 @@ HISTOGRAMSET = \
}
]
-"""
+""")
# pylint: enable=line-too-long
class MockResult(Result):
"""Mock result class."""
-
def __init__(self, mylogger, label, logging_level, machine):
super(MockResult, self).__init__(mylogger, label, logging_level, machine)
@@ -459,7 +457,6 @@ class MockResult(Result):
class ResultTest(unittest.TestCase):
"""Result test class."""
-
def __init__(self, *args, **kwargs):
super(ResultTest, self).__init__(*args, **kwargs)
self.callFakeProcessResults = False
@@ -488,8 +485,8 @@ class ResultTest(unittest.TestCase):
def testCreateFromRun(self):
result = MockResult.CreateFromRun(logger.GetLogger(), 'average',
- self.mock_label, 'remote1', OUTPUT, error,
- 0, True)
+ self.mock_label, 'remote1', OUTPUT,
+ error, 0, True)
self.assertEqual(result.keyvals, keyvals)
self.assertEqual(result.chroot_results_dir,
'/tmp/test_that.PO1234567/platform_LibCBench')
@@ -523,8 +520,8 @@ class ResultTest(unittest.TestCase):
second_args = mock_copyfiles.call_args_list[1][0]
third_args = mock_copyfiles.call_args_list[2][0]
self.assertEqual(first_args, ('src_file_1', '/tmp/test/src_file_1.0'))
- self.assertEqual(second_args, ('src_file_2', '/tmp/test/src_file_2.0'))
- self.assertEqual(third_args, ('src_file_3', '/tmp/test/src_file_3.0'))
+ self.assertEqual(second_args, ('src_file_2', '/tmp/test/src_file_2.1'))
+ self.assertEqual(third_args, ('src_file_3', '/tmp/test/src_file_3.2'))
mock_runcmd.reset_mock()
mock_copyfiles.reset_mock()
@@ -537,7 +534,8 @@ class ResultTest(unittest.TestCase):
mock_runcmd.call_args_list[1])
self.assertEqual(mock_runcmd.call_args_list[0],
mock_runcmd.call_args_list[2])
- self.assertEqual(mock_runcmd.call_args_list[0][0], ('mkdir -p /tmp/test',))
+ self.assertEqual(mock_runcmd.call_args_list[0][0],
+ ('mkdir -p /tmp/test', ))
# test 3. CopyFiles returns 1 (fails).
mock_copyfiles.return_value = 1
@@ -719,7 +717,8 @@ class ResultTest(unittest.TestCase):
mock_mkdtemp.return_value = TMP_DIR1
mock_chrootruncmd.return_value = [
- '', ('%s,PASS\n%s/telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1), ''
+ '', ('%s,PASS\n%s/telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1),
+ ''
]
mock_getpath.return_value = TMP_DIR1
self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
@@ -734,7 +733,7 @@ class ResultTest(unittest.TestCase):
self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
self.assertEqual(mock_runcmd.call_count, 1)
self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1,))
+ ('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1, ))
self.assertEqual(mock_chrootruncmd.call_count, 1)
self.assertEqual(
mock_chrootruncmd.call_args_list[0][0],
@@ -773,15 +772,26 @@ class ResultTest(unittest.TestCase):
@mock.patch.object(misc, 'GetInsideChrootPath')
@mock.patch.object(command_executer.CommandExecuter,
'ChrootRunCommandWOutput')
- def test_get_samples(self, mock_chrootruncmd, mock_getpath):
- fake_file = '/usr/chromeos/chroot/tmp/results/fake_file'
+ @mock.patch.object(os.path, 'exists')
+ def test_get_samples(self, mock_exists, mock_get_total_samples,
+ mock_getpath):
self.result.perf_data_files = ['/tmp/results/perf.data']
self.result.board = 'samus'
- mock_getpath.return_value = fake_file
- self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
- mock_chrootruncmd.return_value = ['', '45.42% 237210 chrome ', '']
- samples = self.result.GetSamples()
- self.assertEqual(samples, [237210, u'samples'])
+ mock_getpath.return_value = '/usr/chromeos/chroot/tmp/results/perf.data'
+ mock_get_total_samples.return_value = [
+ '', '45.42% 237210 chrome ', ''
+ ]
+ mock_exists.return_value = True
+
+ # mock_open does not seem to support iteration.
+ # pylint: disable=line-too-long
+ content = """1.63% 66 dav1d-tile chrome [.] decode_coefs
+ 1.48% 60 swapper [kernel.kallsyms] [k] intel_idle
+ 1.16% 47 dav1d-tile chrome [.] decode_sb"""
+
+ with mock.patch('builtins.open', return_value=io.StringIO(content)):
+ samples = self.result.GetSamples()
+ self.assertEqual(samples, [237210 - 60, u'samples'])
def test_get_results_dir(self):
@@ -805,7 +815,7 @@ class ResultTest(unittest.TestCase):
res = self.result.FindFilesInResultsDir('-name perf.data')
self.assertEqual(mock_runcmd.call_count, 1)
self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('find /tmp/test_results -name perf.data',))
+ ('find /tmp/test_results -name perf.data', ))
self.assertEqual(res, '/tmp/test_results/perf.data')
mock_runcmd.reset_mock()
@@ -821,7 +831,8 @@ class ResultTest(unittest.TestCase):
self.result.FindFilesInResultsDir = mock_findfiles
res = self.result.GetPerfDataFiles()
self.assertEqual(res, ['line1', 'line1'])
- self.assertEqual(mock_findfiles.call_args_list[0][0], ('-name perf.data',))
+ self.assertEqual(mock_findfiles.call_args_list[0][0],
+ ('-name perf.data', ))
def test_get_perf_report_files(self):
self.args = None
@@ -952,16 +963,18 @@ class ResultTest(unittest.TestCase):
"""Verify perf PID which is present in TOP_DATA."""
self.result.top_cmds = TOP_DATA
# pid is present in TOP_DATA.
- with mock.patch.object(
- Result, 'ReadPidFromPerfData', return_value=['5713']):
+ with mock.patch.object(Result,
+ 'ReadPidFromPerfData',
+ return_value=['5713']):
self.result.VerifyPerfDataPID()
def test_verify_perf_data_pid_fail(self):
"""Test perf PID missing in top raises the error."""
self.result.top_cmds = TOP_DATA
# pid is not in the list of top processes.
- with mock.patch.object(
- Result, 'ReadPidFromPerfData', return_value=['9999']):
+ with mock.patch.object(Result,
+ 'ReadPidFromPerfData',
+ return_value=['9999']):
with self.assertRaises(PidVerificationError):
self.result.VerifyPerfDataPID()
@@ -970,7 +983,9 @@ class ResultTest(unittest.TestCase):
def test_read_pid_from_perf_data_ok(self, mock_runcmd):
"""Test perf header parser, normal flow."""
self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = ['/tmp/chromeos/chroot/tmp/results/perf.data']
+ self.result.perf_data_files = [
+ '/tmp/chromeos/chroot/tmp/results/perf.data'
+ ]
exp_pid = '12345'
mock_runcmd.return_value = (0, PERF_DATA_HEADER.format(pid=exp_pid), '')
pids = self.result.ReadPidFromPerfData()
@@ -1001,7 +1016,9 @@ class ResultTest(unittest.TestCase):
def test_read_pid_from_perf_data_no_pid(self, mock_runcmd):
"""Test perf.data without PID."""
self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = ['/tmp/chromeos/chroot/tmp/results/perf.data']
+ self.result.perf_data_files = [
+ '/tmp/chromeos/chroot/tmp/results/perf.data'
+ ]
cmd_line = '# cmdline : /usr/bin/perf record -e instructions'
mock_runcmd.return_value = (0, cmd_line, '')
pids = self.result.ReadPidFromPerfData()
@@ -1013,7 +1030,9 @@ class ResultTest(unittest.TestCase):
def test_read_pid_from_perf_data_system_wide(self, mock_runcmd):
"""Test reading from system-wide profile with PID."""
self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = ['/tmp/chromeos/chroot/tmp/results/perf.data']
+ self.result.perf_data_files = [
+ '/tmp/chromeos/chroot/tmp/results/perf.data'
+ ]
# There is '-p <pid>' in command line but it's still system-wide: '-a'.
cmd_line = '# cmdline : /usr/bin/perf record -e instructions -a -p 1234'
mock_runcmd.return_value = (0, cmd_line, '')
@@ -1026,7 +1045,9 @@ class ResultTest(unittest.TestCase):
def test_read_pid_from_perf_data_read_fail(self, mock_runcmd):
"""Failure to read perf.data raises the error."""
self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = ['/tmp/chromeos/chroot/tmp/results/perf.data']
+ self.result.perf_data_files = [
+ '/tmp/chromeos/chroot/tmp/results/perf.data'
+ ]
# Error status of the profile read.
mock_runcmd.return_value = (1, '', '')
with self.assertRaises(PerfDataReadError):
@@ -1037,7 +1058,9 @@ class ResultTest(unittest.TestCase):
def test_read_pid_from_perf_data_fail(self, mock_runcmd):
"""Failure to find cmdline in perf.data header raises the error."""
self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = ['/tmp/chromeos/chroot/tmp/results/perf.data']
+ self.result.perf_data_files = [
+ '/tmp/chromeos/chroot/tmp/results/perf.data'
+ ]
# Empty output.
mock_runcmd.return_value = (0, '', '')
with self.assertRaises(PerfDataReadError):
@@ -1262,12 +1285,11 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_chrootruncmd.call_args_list[0][0],
(self.result.chromeos_root,
('/usr/sbin/perf report -n --symfs /tmp/debug '
- '--vmlinux /tmp/debug/boot/vmlinux '
+ '--vmlinux /tmp/debug/usr/lib/debug/boot/vmlinux '
'-i %s --stdio > %s') % (fake_file, fake_file)))
@mock.patch.object(misc, 'GetOutsideChrootPath')
def test_populate_from_run(self, mock_getpath):
-
def FakeGetResultsDir():
self.callGetResultsDir = True
return '/tmp/results_dir'
@@ -1355,7 +1377,6 @@ class ResultTest(unittest.TestCase):
return {'Total': 10}
def test_process_results(self):
-
def FakeGatherPerfResults():
self.callGatherPerfResults = True
@@ -1401,16 +1422,17 @@ class ResultTest(unittest.TestCase):
self.result.ProcessResults()
shutil.rmtree(os.path.dirname(self.result.results_file[0]))
# Verify the summary for the story is correct
- self.assertEqual(self.result.keyvals['timeToFirstContentfulPaint__typical'],
- [880.000, u'ms_smallerIsBetter'])
+ self.assertEqual(
+ self.result.keyvals['timeToFirstContentfulPaint__typical'],
+ [880.000, u'ms_smallerIsBetter'])
# Veirfy the summary for a certain stroy tag is correct
self.assertEqual(
- self.result
- .keyvals['timeToFirstContentfulPaint__cache_temperature:cold'],
+ self.result.
+ keyvals['timeToFirstContentfulPaint__cache_temperature:cold'],
[1000.000, u'ms_smallerIsBetter'])
self.assertEqual(
- self.result
- .keyvals['timeToFirstContentfulPaint__cache_temperature:warm'],
+ self.result.
+ keyvals['timeToFirstContentfulPaint__cache_temperature:warm'],
[800.000, u'ms_smallerIsBetter'])
@mock.patch.object(Result, 'ProcessCpustatsResults')
@@ -1566,7 +1588,8 @@ class ResultTest(unittest.TestCase):
u'telemetry_page_measurement_results__num_errored': [0, u'count'],
u'string-fasta__string-fasta': [23.2, u'ms'],
u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
- u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': [3.2, u'ms'],
+ u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte':
+ [3.2, u'ms'],
u'access-nsieve__access-nsieve': [7.9, u'ms'],
u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
u'string-validate-input__string-validate-input': [19.3, u'ms'],
@@ -1604,7 +1627,8 @@ class ResultTest(unittest.TestCase):
u'telemetry_page_measurement_results__num_errored': [0, u'count'],
u'string-fasta__string-fasta': [23.2, u'ms'],
u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
- u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': [3.2, u'ms'],
+ u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte':
+ [3.2, u'ms'],
u'access-nsieve__access-nsieve': [7.9, u'ms'],
u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
u'string-validate-input__string-validate-input': [19.3, u'ms'],
@@ -1651,8 +1675,9 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_getroot.call_count, 1)
self.assertEqual(mock_runcmd.call_count, 2)
self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('rm -rf test_results_dir',))
- self.assertEqual(mock_runcmd.call_args_list[1][0], ('rm -rf testtemp_dir',))
+ ('rm -rf test_results_dir', ))
+ self.assertEqual(mock_runcmd.call_args_list[1][0],
+ ('rm -rf testtemp_dir', ))
# Test 2. Same, except ath results_dir name does not contain
# 'test_that_results_'
@@ -1666,8 +1691,9 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_getroot.call_count, 1)
self.assertEqual(mock_runcmd.call_count, 2)
self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('rm -rf /tmp/tmp_AbcXyz',))
- self.assertEqual(mock_runcmd.call_args_list[1][0], ('rm -rf testtemp_dir',))
+ ('rm -rf /tmp/tmp_AbcXyz', ))
+ self.assertEqual(mock_runcmd.call_args_list[1][0],
+ ('rm -rf testtemp_dir', ))
# Test 3. mock_getroot returns nothing; 'rm_chroot_tmp' is False.
mock_getroot.reset_mock()
@@ -1675,7 +1701,8 @@ class ResultTest(unittest.TestCase):
self.result.CleanUp(False)
self.assertEqual(mock_getroot.call_count, 0)
self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0], ('rm -rf testtemp_dir',))
+ self.assertEqual(mock_runcmd.call_args_list[0][0],
+ ('rm -rf testtemp_dir', ))
# Test 4. 'rm_chroot_tmp' is True, but result_dir & temp_dir are None.
mock_getroot.reset_mock()
@@ -1689,7 +1716,6 @@ class ResultTest(unittest.TestCase):
@mock.patch.object(misc, 'GetInsideChrootPath')
@mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_store_to_cache_dir(self, mock_chrootruncmd, mock_getpath):
-
def FakeMkdtemp(directory=''):
if directory:
pass
@@ -1724,7 +1750,7 @@ class ResultTest(unittest.TestCase):
base_dir = os.path.join(os.getcwd(), 'test_cache/compare_output')
self.assertTrue(os.path.exists(os.path.join(test_dir, 'autotest.tbz2')))
self.assertTrue(os.path.exists(os.path.join(test_dir, 'machine.txt')))
- self.assertTrue(os.path.exists(os.path.join(test_dir, 'results.txt')))
+ self.assertTrue(os.path.exists(os.path.join(test_dir, 'results.pickle')))
f1 = os.path.join(test_dir, 'machine.txt')
f2 = os.path.join(base_dir, 'machine.txt')
@@ -1732,11 +1758,13 @@ class ResultTest(unittest.TestCase):
[_, out, _] = self.result.ce.RunCommandWOutput(cmd)
self.assertEqual(len(out), 0)
- f1 = os.path.join(test_dir, 'results.txt')
- f2 = os.path.join(base_dir, 'results.txt')
- cmd = 'diff %s %s' % (f1, f2)
- [_, out, _] = self.result.ce.RunCommandWOutput(cmd)
- self.assertEqual(len(out), 0)
+ f1 = os.path.join(test_dir, 'results.pickle')
+ f2 = os.path.join(base_dir, 'results.pickle')
+ with open(f1, 'rb') as f:
+ f1_obj = pickle.load(f)
+ with open(f2, 'rb') as f:
+ f2_obj = pickle.load(f)
+ self.assertEqual(f1_obj, f2_obj)
# Clean up after test.
tempfile.mkdtemp = save_real_mkdtemp
@@ -1747,87 +1775,87 @@ class ResultTest(unittest.TestCase):
TELEMETRY_RESULT_KEYVALS = {
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'math-cordic (ms)':
- '11.4',
+ '11.4',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'access-nbody (ms)':
- '6.9',
+ '6.9',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'access-fannkuch (ms)':
- '26.3',
+ '26.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'math-spectral-norm (ms)':
- '6.3',
+ '6.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'bitops-nsieve-bits (ms)':
- '9.3',
+ '9.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'math-partial-sums (ms)':
- '32.8',
+ '32.8',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'regexp-dna (ms)':
- '16.1',
+ '16.1',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'3d-cube (ms)':
- '42.7',
+ '42.7',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'crypto-md5 (ms)':
- '10.8',
+ '10.8',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'crypto-sha1 (ms)':
- '12.4',
+ '12.4',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-tagcloud (ms)':
- '47.2',
+ '47.2',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-fasta (ms)':
- '36.3',
+ '36.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'access-binary-trees (ms)':
- '7.3',
+ '7.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'date-format-xparb (ms)':
- '138.1',
+ '138.1',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'crypto-aes (ms)':
- '19.2',
+ '19.2',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'Total (ms)':
- '656.5',
+ '656.5',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-base64 (ms)':
- '17.5',
+ '17.5',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-validate-input (ms)':
- '24.8',
+ '24.8',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'3d-raytrace (ms)':
- '28.7',
+ '28.7',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'controlflow-recursive (ms)':
- '5.3',
+ '5.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'bitops-bits-in-byte (ms)':
- '9.8',
+ '9.8',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'3d-morph (ms)':
- '50.2',
+ '50.2',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'bitops-bitwise-and (ms)':
- '8.8',
+ '8.8',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'access-nsieve (ms)':
- '8.6',
+ '8.6',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'date-format-tofte (ms)':
- '31.2',
+ '31.2',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'bitops-3bit-bits-in-byte (ms)':
- '3.5',
+ '3.5',
'retval':
- 0,
+ 0,
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-unpack-code (ms)':
- '45.0'
+ '45.0'
}
PURE_TELEMETRY_OUTPUT = """
@@ -1837,7 +1865,6 @@ page_name,3d-cube (ms),3d-morph (ms),3d-raytrace (ms),Total (ms),access-binary-t
class TelemetryResultTest(unittest.TestCase):
"""Telemetry result test."""
-
def __init__(self, *args, **kwargs):
super(TelemetryResultTest, self).__init__(*args, **kwargs)
self.callFakeProcessResults = False
@@ -1848,12 +1875,10 @@ class TelemetryResultTest(unittest.TestCase):
'autotest_dir', 'debug_dir', '/tmp', 'lumpy',
'remote', 'image_args', 'cache_dir', 'average',
'gcc', False, None)
- self.mock_machine = machine_manager.MockCrosMachine('falco.cros',
- '/tmp/chromeos',
- 'average')
+ self.mock_machine = machine_manager.MockCrosMachine(
+ 'falco.cros', '/tmp/chromeos', 'average')
def test_populate_from_run(self):
-
def FakeProcessResults():
self.callFakeProcessResults = True
@@ -1884,7 +1909,6 @@ class TelemetryResultTest(unittest.TestCase):
class ResultsCacheTest(unittest.TestCase):
"""Resultcache test class."""
-
def __init__(self, *args, **kwargs):
super(ResultsCacheTest, self).__init__(*args, **kwargs)
self.fakeCacheReturnResult = None
@@ -1926,7 +1950,6 @@ class ResultsCacheTest(unittest.TestCase):
@mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
def test_get_cache_dir_for_write(self, mock_checksum):
-
def FakeGetMachines(label):
if label:
pass
@@ -1940,8 +1963,8 @@ class ResultsCacheTest(unittest.TestCase):
mock_checksum.return_value = 'FakeImageChecksumabc123'
self.results_cache.machine_manager.GetMachines = FakeGetMachines
- self.results_cache.machine_manager.machine_checksum['mock_label'] = \
- 'FakeMachineChecksumabc987'
+ self.results_cache.machine_manager.machine_checksum['mock_label'] = (
+ 'FakeMachineChecksumabc987')
# Based on the label, benchmark and machines, get the directory in which
# to store the cache information for this test run.
result_path = self.results_cache.GetCacheDirForWrite()
@@ -1987,8 +2010,8 @@ class ResultsCacheTest(unittest.TestCase):
mock_checksum.return_value = 'FakeImageChecksumabc123'
self.results_cache.machine_manager.GetMachines = FakeGetMachines
- self.results_cache.machine_manager.machine_checksum['mock_label'] = \
- 'FakeMachineChecksumabc987'
+ self.results_cache.machine_manager.machine_checksum['mock_label'] = (
+ 'FakeMachineChecksumabc987')
# Test 1. Generating cache name for reading (not writing).
key_list = self.results_cache.GetCacheKeyList(True)
@@ -2035,7 +2058,8 @@ class ResultsCacheTest(unittest.TestCase):
# Test 5. Generating cache name for writing, with local image type, and
# specifying that the image path must match the cached image path.
self.results_cache.label.image_type = 'local'
- self.results_cache.cache_conditions.append(CacheConditions.IMAGE_PATH_MATCH)
+ self.results_cache.cache_conditions.append(
+ CacheConditions.IMAGE_PATH_MATCH)
key_list = self.results_cache.GetCacheKeyList(False)
self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127')
self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index 68e1e5b8..49c6344d 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -108,8 +108,8 @@ class DutWorker(Thread):
if self._terminated:
return 1
- if self._sched.get_experiment().skylab:
- self._logger.LogOutput('Skylab mode, do not image before testing.')
+ if self._sched.get_experiment().crosfleet:
+ self._logger.LogOutput('Crosfleet mode, do not image before testing.')
self._dut.label = label
return 0
@@ -295,9 +295,9 @@ class Schedv2(object):
# Split benchmarkruns set into segments. Each segment will be handled by
# a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
n_threads = max(2, min(20, (n_benchmarkruns + 3) // 4))
- self._logger.LogOutput(('Starting {} threads to read cache status for '
- '{} benchmark runs ...').format(
- n_threads, n_benchmarkruns))
+ self._logger.LogOutput(
+ ('Starting {} threads to read cache status for '
+ '{} benchmark runs ...').format(n_threads, n_benchmarkruns))
benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) // n_threads
benchmarkrun_segments = []
for i in range(n_threads - 1):
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 7033a3e8..78834c63 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -22,14 +22,13 @@ class BenchmarkSettings(Settings):
def __init__(self, name):
super(BenchmarkSettings, self).__init__(name, 'benchmark')
self.AddField(
- TextField(
- 'test_name',
- description='The name of the test to run. '
- 'Defaults to the name of the benchmark.'))
+ TextField('test_name',
+ description='The name of the test to run. '
+ 'Defaults to the name of the benchmark.'))
self.AddField(
- TextField(
- 'test_args', description='Arguments to be passed to the '
- 'test.'))
+ TextField('test_args',
+ description='Arguments to be passed to the '
+ 'test.'))
self.AddField(
IntegerField(
'iterations',
@@ -39,24 +38,21 @@ class BenchmarkSettings(Settings):
'If not set, will run each benchmark test the optimum number of '
'times to get a stable result.'))
self.AddField(
- TextField(
- 'suite',
- default='test_that',
- description='The type of the benchmark.'))
+ TextField('suite',
+ default='test_that',
+ description='The type of the benchmark.'))
self.AddField(
- IntegerField(
- 'retries',
- default=0,
- description='Number of times to retry a '
- 'benchmark run.'))
+ IntegerField('retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
self.AddField(
- BooleanField(
- 'run_local',
- description='Run benchmark harness on the DUT. '
- 'Currently only compatible with the suite: '
- 'telemetry_Crosperf.',
- required=False,
- default=True))
+ BooleanField('run_local',
+ description='Run benchmark harness on the DUT. '
+ 'Currently only compatible with the suite: '
+ 'telemetry_Crosperf.',
+ required=False,
+ default=True))
self.AddField(
FloatField(
'weight',
@@ -70,12 +66,11 @@ class LabelSettings(Settings):
def __init__(self, name):
super(LabelSettings, self).__init__(name, 'label')
self.AddField(
- TextField(
- 'chromeos_image',
- required=False,
- description='The path to the image to run tests '
- 'on, for local/custom-built images. See the '
- "'build' option for official or trybot images."))
+ TextField('chromeos_image',
+ required=False,
+ description='The path to the image to run tests '
+ 'on, for local/custom-built images. See the '
+ "'build' option for official or trybot images."))
self.AddField(
TextField(
'autotest_path',
@@ -90,53 +85,46 @@ class LabelSettings(Settings):
description='Debug info directory relative to chroot which has '
'symbols and vmlinux that can be used by perf tool.'))
self.AddField(
- TextField(
- 'chromeos_root',
- description='The path to a chromeos checkout which '
- 'contains a src/scripts directory. Defaults to '
- 'the chromeos checkout which contains the '
- 'chromeos_image.'))
- self.AddField(
- ListField(
- 'remote',
- description='A comma-separated list of IPs of chromeos'
- 'devices to run experiments on.'))
- self.AddField(
- TextField(
- 'image_args',
- required=False,
- default='',
- description='Extra arguments to pass to '
- 'image_chromeos.py.'))
- self.AddField(
- TextField(
- 'cache_dir',
- default='',
- description='The cache dir for this image.'))
- self.AddField(
- TextField(
- 'compiler',
- default='gcc',
- description='The compiler used to build the '
- 'ChromeOS image (gcc or llvm).'))
- self.AddField(
- TextField(
- 'chrome_src',
- description='The path to the source of chrome. '
- 'This is used to run telemetry benchmarks. '
- 'The default one is the src inside chroot.',
- required=False,
- default=''))
- self.AddField(
- TextField(
- 'build',
- description='The xbuddy specification for an '
- 'official or trybot image to use for tests. '
- "'/remote' is assumed, and the board is given "
- "elsewhere, so omit the '/remote/<board>/' xbuddy "
- 'prefix.',
- required=False,
- default=''))
+ TextField('chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ ListField('remote',
+ description='A comma-separated list of IPs of chromeos'
+ 'devices to run experiments on.'))
+ self.AddField(
+ TextField('image_args',
+ required=False,
+ default='',
+ description='Extra arguments to pass to '
+ 'image_chromeos.py.'))
+ self.AddField(
+ TextField('cache_dir',
+ default='',
+ description='The cache dir for this image.'))
+ self.AddField(
+ TextField('compiler',
+ default='gcc',
+ description='The compiler used to build the '
+ 'ChromeOS image (gcc or llvm).'))
+ self.AddField(
+ TextField('chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(
+ TextField('build',
+ description='The xbuddy specification for an '
+ 'official or trybot image to use for tests. '
+ "'/remote' is assumed, and the board is given "
+ "elsewhere, so omit the '/remote/<board>/' xbuddy "
+ 'prefix.',
+ required=False,
+ default=''))
class GlobalSettings(Settings):
@@ -145,67 +133,56 @@ class GlobalSettings(Settings):
def __init__(self, name):
super(GlobalSettings, self).__init__(name, 'global')
self.AddField(
- TextField(
- 'name',
- description='The name of the experiment. Just an '
- 'identifier.'))
- self.AddField(
- TextField(
- 'board',
- description='The target board for running '
- 'experiments on, e.g. x86-alex.'))
- self.AddField(
- BooleanField(
- 'skylab',
- description='Whether to run experiments via skylab.',
- default=False))
- self.AddField(
- ListField(
- 'remote',
- description='A comma-separated list of IPs of '
- 'chromeos devices to run experiments on.'))
- self.AddField(
- BooleanField(
- 'rerun_if_failed',
- description='Whether to re-run failed test runs '
- 'or not.',
- default=False))
- self.AddField(
- BooleanField(
- 'rm_chroot_tmp',
- default=False,
- description='Whether to remove the test_that '
- 'result in the chroot.'))
- self.AddField(
- ListField(
- 'email',
- description='Space-separated list of email '
- 'addresses to send email to.'))
- self.AddField(
- BooleanField(
- 'rerun',
- description='Whether to ignore the cache and '
- 'for tests to be re-run.',
- default=False))
- self.AddField(
- BooleanField(
- 'same_specs',
- default=True,
- description='Ensure cached runs are run on the '
- 'same kind of devices which are specified as a '
- 'remote.'))
- self.AddField(
- BooleanField(
- 'same_machine',
- default=False,
- description='Ensure cached runs are run on the '
- 'same remote.'))
- self.AddField(
- BooleanField(
- 'use_file_locks',
- default=False,
- description='DEPRECATED: Whether to use the file locks '
- 'or AFE server lock mechanism.'))
+ TextField('name',
+ description='The name of the experiment. Just an '
+ 'identifier.'))
+ self.AddField(
+ TextField('board',
+ description='The target board for running '
+ 'experiments on, e.g. x86-alex.'))
+ self.AddField(
+ BooleanField('crosfleet',
+ description='Whether to run experiments via crosfleet.',
+ default=False))
+ self.AddField(
+ ListField('remote',
+ description='A comma-separated list of IPs of '
+ 'chromeos devices to run experiments on.'))
+ self.AddField(
+ BooleanField('rerun_if_failed',
+ description='Whether to re-run failed test runs '
+ 'or not.',
+ default=False))
+ self.AddField(
+ BooleanField('rm_chroot_tmp',
+ default=False,
+ description='Whether to remove the test_that '
+ 'result in the chroot.'))
+ self.AddField(
+ ListField('email',
+ description='Space-separated list of email '
+ 'addresses to send email to.'))
+ self.AddField(
+ BooleanField('rerun',
+ description='Whether to ignore the cache and '
+ 'for tests to be re-run.',
+ default=False))
+ self.AddField(
+ BooleanField('same_specs',
+ default=True,
+ description='Ensure cached runs are run on the '
+ 'same kind of devices which are specified as a '
+ 'remote.'))
+ self.AddField(
+ BooleanField('same_machine',
+ default=False,
+ description='Ensure cached runs are run on the '
+ 'same remote.'))
+ self.AddField(
+ BooleanField('use_file_locks',
+ default=False,
+ description='DEPRECATED: Whether to use the file locks '
+ 'or AFE server lock mechanism.'))
self.AddField(
IntegerField(
'iterations',
@@ -215,79 +192,68 @@ class GlobalSettings(Settings):
'If not set, will run each benchmark test the optimum number of '
'times to get a stable result.'))
self.AddField(
- TextField(
- 'chromeos_root',
- description='The path to a chromeos checkout which '
- 'contains a src/scripts directory. Defaults to '
- 'the chromeos checkout which contains the '
- 'chromeos_image.'))
- self.AddField(
- TextField(
- 'logging_level',
- default='average',
- description='The level of logging desired. '
- "Options are 'quiet', 'average', and 'verbose'."))
- self.AddField(
- IntegerField(
- 'acquire_timeout',
- default=0,
- description='Number of seconds to wait for '
- 'machine before exit if all the machines in '
- 'the experiment file are busy. Default is 0.'))
- self.AddField(
- TextField(
- 'perf_args',
- default='',
- description='The optional profile command. It '
- 'enables perf commands to record perforamance '
- 'related counters. It must start with perf '
- 'command record or stat followed by arguments.'))
- self.AddField(
- BooleanField(
- 'download_debug',
- default=True,
- description='Download compressed debug symbols alongwith '
- 'image. This can provide more info matching symbols for'
- 'profiles, but takes larger space. By default, download'
- 'it only when perf_args is specified.'))
- self.AddField(
- TextField(
- 'cache_dir',
- default='',
- description='The abs path of cache dir. '
- 'Default is /home/$(whoami)/cros_scratch.'))
- self.AddField(
- BooleanField(
- 'cache_only',
- default=False,
- description='Whether to use only cached '
- 'results (do not rerun failed tests).'))
- self.AddField(
- BooleanField(
- 'no_email',
- default=False,
- description='Whether to disable the email to '
- 'user after crosperf finishes.'))
- self.AddField(
- BooleanField(
- 'json_report',
- default=False,
- description='Whether to generate a json version '
- 'of the report, for archiving.'))
- self.AddField(
- BooleanField(
- 'show_all_results',
- default=False,
- description='When running Telemetry tests, '
- 'whether to all the results, instead of just '
- 'the default (summary) results.'))
- self.AddField(
- TextField(
- 'share_cache',
- default='',
- description='Path to alternate cache whose data '
- 'you want to use. It accepts multiple directories '
- 'separated by a ",".'))
+ TextField('chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ TextField('logging_level',
+ default='average',
+ description='The level of logging desired. '
+ "Options are 'quiet', 'average', and 'verbose'."))
+ self.AddField(
+ IntegerField('acquire_timeout',
+ default=0,
+ description='Number of seconds to wait for '
+ 'machine before exit if all the machines in '
+ 'the experiment file are busy. Default is 0.'))
+ self.AddField(
+ TextField('perf_args',
+ default='',
+ description='The optional profile command. It '
+ 'enables perf commands to record perforamance '
+ 'related counters. It must start with perf '
+ 'command record or stat followed by arguments.'))
+ self.AddField(
+ BooleanField('download_debug',
+ default=True,
+ description='Download compressed debug symbols alongwith '
+ 'image. This can provide more info matching symbols for'
+ 'profiles, but takes larger space. By default, download'
+ 'it only when perf_args is specified.'))
+ self.AddField(
+ TextField('cache_dir',
+ default='',
+ description='The abs path of cache dir. '
+ 'Default is /home/$(whoami)/cros_scratch.'))
+ self.AddField(
+ BooleanField('cache_only',
+ default=False,
+ description='Whether to use only cached '
+ 'results (do not rerun failed tests).'))
+ self.AddField(
+ BooleanField('no_email',
+ default=False,
+ description='Whether to disable the email to '
+ 'user after crosperf finishes.'))
+ self.AddField(
+ BooleanField('json_report',
+ default=False,
+ description='Whether to generate a json version '
+ 'of the report, for archiving.'))
+ self.AddField(
+ BooleanField('show_all_results',
+ default=False,
+ description='When running Telemetry tests, '
+ 'whether to all the results, instead of just '
+ 'the default (summary) results.'))
+ self.AddField(
+ TextField('share_cache',
+ default='',
+ description='Path to alternate cache whose data '
+ 'you want to use. It accepts multiple directories '
+ 'separated by a ",".'))
self.AddField(
TextField('results_dir', default='', description='The results dir.'))
self.AddField(
@@ -297,55 +263,49 @@ class GlobalSettings(Settings):
description='Whether to compress all test results other than '
'reports into a tarball to save disk space.'))
self.AddField(
- TextField(
- 'locks_dir',
- default='',
- description='An alternate directory to use for '
- 'storing/checking machine file locks for local machines. '
- 'By default the file locks directory is '
- '/google/data/rw/users/mo/mobiletc-prebuild/locks.\n'
- 'WARNING: If you use your own locks directory, '
- 'there is no guarantee that someone else might not '
- 'hold a lock on the same machine in a different '
- 'locks directory.'))
- self.AddField(
- TextField(
- 'chrome_src',
- description='The path to the source of chrome. '
- 'This is used to run telemetry benchmarks. '
- 'The default one is the src inside chroot.',
- required=False,
- default=''))
- self.AddField(
- IntegerField(
- 'retries',
- default=0,
- description='Number of times to retry a '
- 'benchmark run.'))
- self.AddField(
- TextField(
- 'cwp_dso',
- description='The DSO type that we want to use for '
- 'CWP approximation. This is used to run telemetry '
- 'benchmarks. Valid DSO types can be found from dso_list '
- 'in experiment_factory.py. The default value is set to '
- 'be empty.',
- required=False,
- default=''))
- self.AddField(
- BooleanField(
- 'enable_aslr',
- description='Enable ASLR on the machine to run the '
- 'benchmarks. ASLR is disabled by default',
- required=False,
- default=False))
- self.AddField(
- BooleanField(
- 'ignore_min_max',
- description='When doing math for the raw results, '
- 'ignore min and max values to reduce noise.',
- required=False,
- default=False))
+ TextField('locks_dir',
+ default='',
+ description='An alternate directory to use for '
+ 'storing/checking machine file locks for local machines. '
+ 'By default the file locks directory is '
+ '/google/data/rw/users/mo/mobiletc-prebuild/locks.\n'
+ 'WARNING: If you use your own locks directory, '
+ 'there is no guarantee that someone else might not '
+ 'hold a lock on the same machine in a different '
+ 'locks directory.'))
+ self.AddField(
+ TextField('chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(
+ IntegerField('retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
+ self.AddField(
+ TextField('cwp_dso',
+ description='The DSO type that we want to use for '
+ 'CWP approximation. This is used to run telemetry '
+ 'benchmarks. Valid DSO types can be found from dso_list '
+ 'in experiment_factory.py. The default value is set to '
+ 'be empty.',
+ required=False,
+ default=''))
+ self.AddField(
+ BooleanField('enable_aslr',
+ description='Enable ASLR on the machine to run the '
+ 'benchmarks. ASLR is disabled by default',
+ required=False,
+ default=False))
+ self.AddField(
+ BooleanField('ignore_min_max',
+ description='When doing math for the raw results, '
+ 'ignore min and max values to reduce noise.',
+ required=False,
+ default=False))
self.AddField(
TextField(
'intel_pstate',
@@ -356,12 +316,11 @@ class GlobalSettings(Settings):
required=False,
default='no_hwp'))
self.AddField(
- BooleanField(
- 'turbostat',
- description='Run turbostat process in the background'
- ' of a benchmark. Enabled by default.',
- required=False,
- default=True))
+ BooleanField('turbostat',
+ description='Run turbostat process in the background'
+ ' of a benchmark. Enabled by default.',
+ required=False,
+ default=True))
self.AddField(
FloatField(
'top_interval',
@@ -377,22 +336,20 @@ class GlobalSettings(Settings):
required=False,
default=1))
self.AddField(
- IntegerField(
- 'cooldown_temp',
- required=False,
- default=40,
- description='Wait until CPU temperature goes down below'
- ' specified temperature in Celsius'
- ' prior starting a benchmark. '
- 'By default the value is set to 40 degrees.'))
- self.AddField(
- IntegerField(
- 'cooldown_time',
- required=False,
- default=10,
- description='Wait specified time in minutes allowing'
- ' CPU to cool down. Zero value disables cooldown. '
- 'The default value is 10 minutes.'))
+ IntegerField('cooldown_temp',
+ required=False,
+ default=40,
+ description='Wait until CPU temperature goes down below'
+ ' specified temperature in Celsius'
+ ' prior starting a benchmark. '
+ 'By default the value is set to 40 degrees.'))
+ self.AddField(
+ IntegerField('cooldown_time',
+ required=False,
+ default=10,
+ description='Wait specified time in minutes allowing'
+ ' CPU to cool down. Zero value disables cooldown. '
+ 'The default value is 10 minutes.'))
self.AddField(
EnumField(
'governor',
@@ -439,6 +396,12 @@ class GlobalSettings(Settings):
' or equal to a percent of max_freq. '
'CPU frequency is reduced to 95%% by default to reduce thermal '
'throttling.'))
+ self.AddField(
+ BooleanField(
+ 'no_lock',
+ default=False,
+ description='Do not attempt to lock the DUT.'
+ ' Useful when lock is held externally, say with crosfleet.'))
class SettingsFactory(object):
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index bc107110..8277e870 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -50,10 +50,10 @@ class GlobalSettingsTest(unittest.TestCase):
def test_init(self):
res = settings_factory.GlobalSettings('g_settings')
self.assertIsNotNone(res)
- self.assertEqual(len(res.fields), 39)
+ self.assertEqual(len(res.fields), 40)
self.assertEqual(res.GetField('name'), '')
self.assertEqual(res.GetField('board'), '')
- self.assertEqual(res.GetField('skylab'), False)
+ self.assertEqual(res.GetField('crosfleet'), False)
self.assertEqual(res.GetField('remote'), None)
self.assertEqual(res.GetField('rerun_if_failed'), False)
self.assertEqual(res.GetField('rm_chroot_tmp'), False)
@@ -108,7 +108,7 @@ class SettingsFactoryTest(unittest.TestCase):
g_settings = settings_factory.SettingsFactory().GetSettings(
'global', 'global')
self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
- self.assertEqual(len(g_settings.fields), 39)
+ self.assertEqual(len(g_settings.fields), 40)
if __name__ == '__main__':
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 17e1ad73..6bd4ff39 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -18,7 +18,7 @@ from cros_utils import command_executer
TEST_THAT_PATH = '/usr/bin/test_that'
TAST_PATH = '/usr/bin/tast'
-SKYLAB_PATH = '/usr/local/bin/skylab'
+CROSFLEET_PATH = 'crosfleet'
GS_UTIL = 'src/chromium/depot_tools/gsutil.py'
AUTOTEST_DIR = '/mnt/host/source/src/third_party/autotest/files'
CHROME_MOUNT_DIR = '/tmp/chrome_root'
@@ -75,8 +75,8 @@ class SuiteRunner(object):
def Run(self, cros_machine, label, benchmark, test_args, profiler_args):
machine_name = cros_machine.name
for i in range(0, benchmark.retries + 1):
- if label.skylab:
- ret_tup = self.Skylab_Run(label, benchmark, test_args, profiler_args)
+ if label.crosfleet:
+ ret_tup = self.Crosfleet_Run(label, benchmark, test_args, profiler_args)
else:
if benchmark.suite == 'tast':
ret_tup = self.Tast_Run(machine_name, label, benchmark)
@@ -87,12 +87,12 @@ class SuiteRunner(object):
self.logger.LogOutput('benchmark %s failed. Retries left: %s' %
(benchmark.name, benchmark.retries - i))
elif i > 0:
- self.logger.LogOutput(
- 'benchmark %s succeded after %s retries' % (benchmark.name, i))
+ self.logger.LogOutput('benchmark %s succeded after %s retries' %
+ (benchmark.name, i))
break
else:
- self.logger.LogOutput(
- 'benchmark %s succeded on first try' % benchmark.name)
+ self.logger.LogOutput('benchmark %s succeded on first try' %
+ benchmark.name)
break
return ret_tup
@@ -238,8 +238,8 @@ class SuiteRunner(object):
self.logger.LogOutput('Result downloaded for task %s' % task_id)
return status
- def Skylab_Run(self, label, benchmark, test_args, profiler_args):
- """Run the test via skylab.."""
+ def Crosfleet_Run(self, label, benchmark, test_args, profiler_args):
+ """Run the test via crosfleet.."""
options = []
if label.board:
options.append('-board=%s' % label.board)
@@ -257,19 +257,19 @@ class SuiteRunner(object):
dimensions.append('-dim dut_name:%s' % dut.rstrip('.cros'))
command = (('%s create-test %s %s %s') % \
- (SKYLAB_PATH, ' '.join(dimensions), ' '.join(options),
+ (CROSFLEET_PATH, ' '.join(dimensions), ' '.join(options),
benchmark.suite if
(benchmark.suite == 'telemetry_Crosperf' or
benchmark.suite == 'crosperf_Wrapper')
else benchmark.test_name))
if self.log_level != 'verbose':
- self.logger.LogOutput('Starting skylab test.')
+ self.logger.LogOutput('Starting crosfleet test.')
self.logger.LogOutput('CMD: %s' % command)
ret_tup = self._ce.RunCommandWOutput(command, command_terminator=self._ct)
if ret_tup[0] != 0:
- self.logger.LogOutput('Skylab test not created successfully.')
+ self.logger.LogOutput('Crosfleet test not created successfully.')
return ret_tup
# Std output of the command will look like:
@@ -278,9 +278,9 @@ class SuiteRunner(object):
# number in the very end of the link address.
task_id = ret_tup[1].strip().split('b')[-1]
- command = ('skylab wait-task %s' % task_id)
+ command = ('crosfleet wait-task %s' % task_id)
if self.log_level != 'verbose':
- self.logger.LogOutput('Waiting for skylab test to finish.')
+ self.logger.LogOutput('Waiting for crosfleet test to finish.')
self.logger.LogOutput('CMD: %s' % command)
ret_tup = self._ce.RunCommandWOutput(command, command_terminator=self._ct)
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index 86e1ef19..c1eacb32 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -64,16 +64,17 @@ class SuiteRunnerTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(SuiteRunnerTest, self).__init__(*args, **kwargs)
- self.skylab_run_args = []
+ self.crosfleet_run_args = []
self.test_that_args = []
self.tast_args = []
- self.call_skylab_run = False
+ self.call_crosfleet_run = False
self.call_test_that_run = False
self.call_tast_run = False
def setUp(self):
- self.runner = suite_runner.SuiteRunner(
- {}, self.mock_logger, 'verbose', self.mock_cmd_exec, self.mock_cmd_term)
+ self.runner = suite_runner.SuiteRunner({}, self.mock_logger, 'verbose',
+ self.mock_cmd_exec,
+ self.mock_cmd_term)
def test_get_profiler_args(self):
input_str = ("--profiler=custom_perf --profiler_args='perf_options"
@@ -98,16 +99,18 @@ class SuiteRunnerTest(unittest.TestCase):
def reset():
self.test_that_args = []
- self.skylab_run_args = []
+ self.crosfleet_run_args = []
self.tast_args = []
self.call_test_that_run = False
- self.call_skylab_run = False
+ self.call_crosfleet_run = False
self.call_tast_run = False
- def FakeSkylabRun(test_label, benchmark, test_args, profiler_args):
- self.skylab_run_args = [test_label, benchmark, test_args, profiler_args]
- self.call_skylab_run = True
- return 'Ran FakeSkylabRun'
+ def FakeCrosfleetRun(test_label, benchmark, test_args, profiler_args):
+ self.crosfleet_run_args = [
+ test_label, benchmark, test_args, profiler_args
+ ]
+ self.call_crosfleet_run = True
+ return 'Ran FakeCrosfleetRun'
def FakeTestThatRun(machine, test_label, benchmark, test_args,
profiler_args):
@@ -122,7 +125,7 @@ class SuiteRunnerTest(unittest.TestCase):
self.call_tast_run = True
return 'Ran FakeTastRun'
- self.runner.Skylab_Run = FakeSkylabRun
+ self.runner.Crosfleet_Run = FakeCrosfleetRun
self.runner.Test_That_Run = FakeTestThatRun
self.runner.Tast_Run = FakeTastRun
@@ -137,31 +140,31 @@ class SuiteRunnerTest(unittest.TestCase):
test_args = ''
profiler_args = ''
- # Test skylab run for telemetry_Crosperf and crosperf_Wrapper benchmarks.
- self.mock_label.skylab = True
+ # Test crosfleet run for telemetry_Crosperf and crosperf_Wrapper benchmarks.
+ self.mock_label.crosfleet = True
reset()
self.runner.Run(cros_machine, self.mock_label, self.crosperf_wrapper_bench,
test_args, profiler_args)
- self.assertTrue(self.call_skylab_run)
+ self.assertTrue(self.call_crosfleet_run)
self.assertFalse(self.call_test_that_run)
- self.assertEqual(self.skylab_run_args,
+ self.assertEqual(self.crosfleet_run_args,
[self.mock_label, self.crosperf_wrapper_bench, '', ''])
reset()
self.runner.Run(cros_machine, self.mock_label,
self.telemetry_crosperf_bench, test_args, profiler_args)
- self.assertTrue(self.call_skylab_run)
+ self.assertTrue(self.call_crosfleet_run)
self.assertFalse(self.call_test_that_run)
- self.assertEqual(self.skylab_run_args,
+ self.assertEqual(self.crosfleet_run_args,
[self.mock_label, self.telemetry_crosperf_bench, '', ''])
# Test test_that run for telemetry_Crosperf and crosperf_Wrapper benchmarks.
- self.mock_label.skylab = False
+ self.mock_label.crosfleet = False
reset()
self.runner.Run(cros_machine, self.mock_label, self.crosperf_wrapper_bench,
test_args, profiler_args)
self.assertTrue(self.call_test_that_run)
- self.assertFalse(self.call_skylab_run)
+ self.assertFalse(self.call_crosfleet_run)
self.assertEqual(
self.test_that_args,
['fake_machine', self.mock_label, self.crosperf_wrapper_bench, '', ''])
@@ -170,7 +173,7 @@ class SuiteRunnerTest(unittest.TestCase):
self.runner.Run(cros_machine, self.mock_label,
self.telemetry_crosperf_bench, test_args, profiler_args)
self.assertTrue(self.call_test_that_run)
- self.assertFalse(self.call_skylab_run)
+ self.assertFalse(self.call_crosfleet_run)
self.assertEqual(self.test_that_args, [
'fake_machine', self.mock_label, self.telemetry_crosperf_bench, '', ''
])
@@ -180,7 +183,7 @@ class SuiteRunnerTest(unittest.TestCase):
self.runner.Run(cros_machine, self.mock_label, self.tast_bench, '', '')
self.assertTrue(self.call_tast_run)
self.assertFalse(self.call_test_that_run)
- self.assertFalse(self.call_skylab_run)
+ self.assertFalse(self.call_crosfleet_run)
self.assertEqual(self.tast_args,
['fake_machine', self.mock_label, self.tast_bench])
@@ -257,7 +260,7 @@ class SuiteRunnerTest(unittest.TestCase):
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
@mock.patch.object(json, 'loads')
- def test_skylab_run_client(self, mock_json_loads, mock_runcmd):
+ def test_crosfleet_run_client(self, mock_json_loads, mock_runcmd):
def FakeDownloadResult(l, task_id):
if l and task_id:
@@ -279,10 +282,10 @@ class SuiteRunnerTest(unittest.TestCase):
}
self.mock_json.loads = mock_json_loads
- self.mock_label.skylab = True
+ self.mock_label.crosfleet = True
self.runner.DownloadResult = FakeDownloadResult
- res = self.runner.Skylab_Run(self.mock_label, self.crosperf_wrapper_bench,
- '', '')
+ res = self.runner.Crosfleet_Run(self.mock_label,
+ self.crosperf_wrapper_bench, '', '')
ret_tup = (0, '\nResults placed in tmp/swarming-12345\n', '')
self.assertEqual(res, ret_tup)
self.assertEqual(mock_runcmd.call_count, 2)
@@ -293,7 +296,7 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
args_list = mock_runcmd.call_args_list[1][0]
- self.assertEqual(args_list[0], ('skylab wait-task 12345'))
+ self.assertEqual(args_list[0], ('crosfleet wait-task 12345'))
self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
diff --git a/crosperf/test_cache/compare_output/results.txt b/crosperf/test_cache/compare_output/results.pickle
index 592e7161..587863c5 100644
--- a/crosperf/test_cache/compare_output/results.txt
+++ b/crosperf/test_cache/compare_output/results.pickle
Binary files differ
diff --git a/crosperf/test_cache/test_input/results.txt b/crosperf/test_cache/test_input/results.pickle
index 33ba6ab7..33ba6ab7 100644
--- a/crosperf/test_cache/test_input/results.txt
+++ b/crosperf/test_cache/test_input/results.pickle
diff --git a/crosperf/test_cache/test_puretelemetry_input/results.txt b/crosperf/test_cache/test_puretelemetry_input/results.pickle
index 497d1cf3..497d1cf3 100644
--- a/crosperf/test_cache/test_puretelemetry_input/results.txt
+++ b/crosperf/test_cache/test_puretelemetry_input/results.pickle