aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
Diffstat (limited to 'crosperf')
-rwxr-xr-xcrosperf/crosperf_autolock.py281
-rwxr-xr-xcrosperf/crosperf_unittest.py15
-rw-r--r--crosperf/default_remotes15
-rw-r--r--crosperf/experiment.py3
-rw-r--r--crosperf/experiment_factory.py142
-rwxr-xr-xcrosperf/experiment_factory_unittest.py41
-rw-r--r--crosperf/experiment_runner.py38
-rw-r--r--crosperf/results_cache.py99
-rwxr-xr-xcrosperf/results_cache_unittest.py196
-rw-r--r--crosperf/settings_factory.py525
-rwxr-xr-xcrosperf/settings_factory_unittest.py4
-rw-r--r--crosperf/test_cache/compare_output/results.pickle (renamed from crosperf/test_cache/compare_output/results.txt)bin8124 -> 8081 bytes
-rw-r--r--crosperf/test_cache/test_input/results.pickle (renamed from crosperf/test_cache/test_input/results.txt)0
-rw-r--r--crosperf/test_cache/test_puretelemetry_input/results.pickle (renamed from crosperf/test_cache/test_puretelemetry_input/results.txt)0
14 files changed, 823 insertions, 536 deletions
diff --git a/crosperf/crosperf_autolock.py b/crosperf/crosperf_autolock.py
new file mode 100755
index 00000000..b593fa9c
--- /dev/null
+++ b/crosperf/crosperf_autolock.py
@@ -0,0 +1,281 @@
+#!/usr/bin/env python3
+
+# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Wrapper script to automatically lock devices for crosperf."""
+
+import os
+import sys
+import argparse
+import subprocess
+import contextlib
+import json
+from typing import Optional, Any
+import dataclasses
+
+# Have to do sys.path hackery because crosperf relies on PYTHONPATH
+# modifications.
+PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.append(PARENT_DIR)
+
+
+def main(sys_args: list[str]) -> Optional[str]:
+ """Run crosperf_autolock. Returns error msg or None"""
+ args, leftover_args = parse_args(sys_args)
+ fleet_params = [
+ CrosfleetParams(board=args.board,
+ pool=args.pool,
+ lease_time=args.lease_time)
+ for _ in range(args.num_leases)
+ ]
+ if not fleet_params:
+ return ('No board names identified. If you want to use'
+ ' a known host, just use crosperf directly.')
+ try:
+ _run_crosperf(fleet_params, args.dut_lock_timeout, leftover_args)
+ except BoardLockError as e:
+ _eprint('ERROR:', e)
+ _eprint('May need to login to crosfleet? Run "crosfleet login"')
+ _eprint('The leases may also be successful later on. '
+ 'Check with "crosfleet dut leases"')
+ return 'crosperf_autolock failed'
+ except BoardReleaseError as e:
+ _eprint('ERROR:', e)
+ _eprint('May need to re-run "crosfleet dut abandon"')
+ return 'crosperf_autolock failed'
+ return None
+
+
+def parse_args(args: list[str]) -> tuple[Any, list]:
+ """Parse the CLI arguments."""
+ parser = argparse.ArgumentParser(
+ 'crosperf_autolock',
+ description='Wrapper around crosperf'
+ ' to autolock DUTs from crosfleet.',
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('--board',
+ type=str,
+ help='Space or comma separated list of boards to lock',
+ required=True,
+ default=argparse.SUPPRESS)
+ parser.add_argument('--num-leases',
+ type=int,
+ help='Number of boards to lock.',
+ metavar='NUM',
+ default=1)
+ parser.add_argument('--pool',
+ type=str,
+ help='Pool to pull from.',
+ default='DUT_POOL_QUOTA')
+ parser.add_argument('--dut-lock-timeout',
+ type=float,
+ metavar='SEC',
+ help='Number of seconds we want to try to lease a board'
+ ' from crosfleet. This option does NOT change the'
+ ' lease length.',
+ default=600)
+ parser.add_argument('--lease-time',
+ type=int,
+ metavar='MIN',
+ help='Number of minutes to lock the board. Max is 1440.',
+ default=1440)
+ parser.epilog = (
+ 'For more detailed flags, you have to read the args taken by the'
+ ' crosperf executable. Args are passed transparently to crosperf.')
+ return parser.parse_known_args(args)
+
+
+class BoardLockError(Exception):
+ """Error to indicate failure to lock a board."""
+
+ def __init__(self, msg: str):
+ self.msg = 'BoardLockError: ' + msg
+ super().__init__(self.msg)
+
+
+class BoardReleaseError(Exception):
+ """Error to indicate failure to release a board."""
+
+ def __init__(self, msg: str):
+ self.msg = 'BoardReleaseError: ' + msg
+ super().__init__(self.msg)
+
+
+@dataclasses.dataclass(frozen=True)
+class CrosfleetParams:
+ """Dataclass to hold all crosfleet parameterizations."""
+ board: str
+ pool: str
+ lease_time: int
+
+
+def _eprint(*msg, **kwargs):
+ print(*msg, file=sys.stderr, **kwargs)
+
+
+def _run_crosperf(crosfleet_params: list[CrosfleetParams], lock_timeout: float,
+ leftover_args: list[str]):
+ """Autolock devices and run crosperf with leftover arguments.
+
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ BoardReleaseError: When board was unable to be released.
+ """
+ if not crosfleet_params:
+ raise ValueError('No crosfleet params given; cannot call crosfleet.')
+
+ # We'll assume all the boards are the same type, which seems to be the case
+ # in experiments that actually get used.
+ passed_board_arg = crosfleet_params[0].board
+ with contextlib.ExitStack() as stack:
+ dut_hostnames = []
+ for param in crosfleet_params:
+ print(
+ f'Sent lock request for {param.board} for {param.lease_time} minutes'
+ '\nIf this fails, you may need to run "crosfleet dut abandon <...>"')
+ # May raise BoardLockError, abandoning previous DUTs.
+ dut_hostname = stack.enter_context(
+ crosfleet_machine_ctx(
+ param.board,
+ param.lease_time,
+ lock_timeout,
+ {'label-pool': param.pool},
+ ))
+ if dut_hostname:
+ print(f'Locked {param.board} machine: {dut_hostname}')
+ dut_hostnames.append(dut_hostname)
+
+ # We import crosperf late, because this import is extremely slow.
+ # We don't want the user to wait several seconds just to get
+ # help info.
+ import crosperf
+ for dut_hostname in dut_hostnames:
+ crosperf.Main([
+ sys.argv[0],
+ '--no_lock',
+ 'True',
+ '--remote',
+ dut_hostname,
+ '--board',
+ passed_board_arg,
+ ] + leftover_args)
+
+
+@contextlib.contextmanager
+def crosfleet_machine_ctx(board: str,
+ lease_minutes: int,
+ lock_timeout: float,
+ dims: dict[str, Any],
+ abandon_timeout: float = 120.0) -> Any:
+ """Acquire dut from crosfleet, and release once it leaves the context.
+
+ Args:
+ board: Board type to lease.
+ lease_minutes: Length of lease, in minutes.
+ lock_timeout: How long to wait for a lock until quitting.
+ dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
+ abandon_timeout (optional): How long to wait for releasing until quitting.
+
+ Yields:
+ A string representing the crosfleet DUT hostname.
+
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ BoardReleaseError: When board was unable to be released.
+ """
+ # This lock may raise an exception, but if it does, we can't release
+ # the DUT anyways as we won't have the dut_hostname.
+ dut_hostname = crosfleet_autolock(board, lease_minutes, dims, lock_timeout)
+ try:
+ yield dut_hostname
+ finally:
+ if dut_hostname:
+ crosfleet_release(dut_hostname, abandon_timeout)
+
+
+def crosfleet_autolock(board: str, lease_minutes: int, dims: dict[str, Any],
+ timeout_sec: float) -> str:
+ """Lock a device using crosfleet, paramaterized by the board type.
+
+ Args:
+ board: Board of the DUT we want to lock.
+ lease_minutes: Number of minutes we're trying to lease the DUT for.
+ dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
+ timeout_sec: Number of seconds to try to lease the DUT. Default 120s.
+
+ Returns:
+ The hostname of the board, or empty string if it couldn't be parsed.
+
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ """
+ crosfleet_cmd_args = [
+ 'crosfleet',
+ 'dut',
+ 'lease',
+ '-json',
+ '-reason="crosperf autolock"',
+ f'-board={board}',
+ f'-minutes={lease_minutes}',
+ ]
+ if dims:
+ dims_arg = ','.join('{}={}'.format(k, v) for k, v in dims.items())
+ crosfleet_cmd_args.extend(['-dims', f'{dims_arg}'])
+
+ try:
+ output = subprocess.check_output(crosfleet_cmd_args,
+ timeout=timeout_sec,
+ encoding='utf-8')
+ except subprocess.CalledProcessError as e:
+ raise BoardLockError(
+ f'crosfleet dut lease failed with exit code: {e.returncode}')
+ except subprocess.TimeoutExpired as e:
+ raise BoardLockError(f'crosfleet dut lease timed out after {timeout_sec}s;'
+ ' please abandon the dut manually.')
+
+ try:
+ json_obj = json.loads(output)
+ dut_hostname = json_obj['DUT']['Hostname']
+ if not isinstance(dut_hostname, str):
+ raise TypeError('dut_hostname was not a string')
+ except (json.JSONDecodeError, IndexError, KeyError, TypeError) as e:
+ raise BoardLockError(
+ f'crosfleet dut lease output was parsed incorrectly: {e!r};'
+ f' observed output was {output}')
+ return _maybe_append_suffix(dut_hostname)
+
+
+def crosfleet_release(dut_hostname: str, timeout_sec: float = 120.0):
+ """Release a crosfleet device.
+
+ Consider using the context managed crosfleet_machine_context
+
+ Args:
+ dut_hostname: Name of the device we want to release.
+ timeout_sec: Number of seconds to try to release the DUT. Default is 120s.
+
+ Raises:
+ BoardReleaseError: Potentially failed to abandon the lease.
+ """
+ crosfleet_cmd_args = [
+ 'crosfleet',
+ 'dut',
+ 'abandon',
+ dut_hostname,
+ ]
+ exit_code = subprocess.call(crosfleet_cmd_args, timeout=timeout_sec)
+ if exit_code != 0:
+ raise BoardReleaseError(
+ f'"crosfleet dut abandon" had exit code {exit_code}')
+
+
+def _maybe_append_suffix(hostname: str) -> str:
+ if hostname.endswith('.cros') or '.cros.' in hostname:
+ return hostname
+ return hostname + '.cros'
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 9c7d52a1..774159ff 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -55,20 +55,19 @@ class CrosperfTest(unittest.TestCase):
def testConvertOptionsToSettings(self):
parser = argparse.ArgumentParser()
- parser.add_argument(
- '-l',
- '--log_dir',
- dest='log_dir',
- default='',
- help='The log_dir, default is under '
- '<crosperf_logs>/logs')
+ parser.add_argument('-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under '
+ '<crosperf_logs>/logs')
crosperf.SetupParserOptions(parser)
argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
options, _ = parser.parse_known_args(argv)
settings = crosperf.ConvertOptionsToSettings(options)
self.assertIsNotNone(settings)
self.assertIsInstance(settings, settings_factory.GlobalSettings)
- self.assertEqual(len(settings.fields), 39)
+ self.assertEqual(len(settings.fields), 40)
self.assertTrue(settings.GetField('rerun'))
argv = ['crosperf/crosperf.py', 'temp.exp']
options, _ = parser.parse_known_args(argv)
diff --git a/crosperf/default_remotes b/crosperf/default_remotes
index 45110752..faecb833 100644
--- a/crosperf/default_remotes
+++ b/crosperf/default_remotes
@@ -1,9 +1,8 @@
-bob : chromeos2-row10-rack9-host3.cros chromeos6-row3-rack13-host15.cros
-chell : chromeos2-row9-rack9-host1.cros chromeos2-row9-rack9-host3.cros
-coral : chromeos2-row9-rack9-host9.cros chromeos2-row9-rack9-host11.cros chromeos2-row9-rack9-host13.cros
+bob : chromeos6-row4-rack13-host6.cros
+chell : chromeos2-row1-rack10-host2.cros chromeos2-row1-rack10-host4.cros
+coral : chromeos6-row5-rack6-host1.cros chromeos6-row5-rack6-host3.cros chromeos6-row5-rack6-host5.cros
elm : chromeos6-row14-rack15-host21.cros
-kefka : chromeos6-row6-rack22-host2.cros chromeos6-row6-rack22-host3.cros
-lulu : chromeos2-row9-rack9-host5.cros chromeos2-row9-rack9-host7.cros
-nautilus : chromeos2-row10-rack9-host9.cros chromeos2-row10-rack9-host11.cros
-snappy : chromeos2-row10-rack9-host5.cros chromeos2-row10-rack9-host7.cros
-veyron_tiger : chromeos2-row9-rack9-host17.cros
+kefka : chromeos6-row6-rack22-host2.cros chromeos6-row6-rack22-host3.cros chromeos6-row11-rack22-host7.cros
+nautilus : chromeos6-row5-rack10-host1.cros chromeos6-row5-rack10-host3.cros
+snappy : chromeos6-row3-rack20-host1.cros chromeos6-row3-rack20-host3.cros
+veyron_tiger : chromeos6-row3-rack7-host1.cros
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index 854d7f77..e919f6ee 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -29,7 +29,7 @@ class Experiment(object):
cache_conditions, labels, benchmarks, experiment_file, email_to,
acquire_timeout, log_dir, log_level, share_cache,
results_directory, compress_results, locks_directory, cwp_dso,
- ignore_min_max, crosfleet, dut_config):
+ ignore_min_max, crosfleet, dut_config, no_lock: bool):
self.name = name
self.working_directory = working_directory
self.remote = remote
@@ -57,6 +57,7 @@ class Experiment(object):
self.cwp_dso = cwp_dso
self.ignore_min_max = ignore_min_max
self.crosfleet = crosfleet
+ self.no_lock = no_lock
self.l = logger.GetLogger(log_dir)
if not self.benchmarks:
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 73928756..a9594a20 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -101,7 +101,8 @@ class ExperimentFactory(object):
def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso, weight):
+ show_all_results, retries, run_local, cwp_dso,
+ weight):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
telemetry_benchmark = Benchmark(test_name, test_name, test_args,
@@ -121,6 +122,7 @@ class ExperimentFactory(object):
log_level = 'verbose'
crosfleet = global_settings.GetField('crosfleet')
+ no_lock = bool(global_settings.GetField('no_lock'))
# Check whether crosfleet tool is installed correctly for crosfleet mode.
if crosfleet and not self.CheckCrosfleetTool(chromeos_root, log_level):
sys.exit(0)
@@ -257,10 +259,10 @@ class ExperimentFactory(object):
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
- self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso,
- weight)
+ self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, suite, show_all_results, retries,
+ run_local, cwp_dso, weight)
elif test_name == 'all_pagecyclers':
self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
test_args, iterations, rm_chroot_tmp,
@@ -270,21 +272,20 @@ class ExperimentFactory(object):
self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
test_args, iterations, rm_chroot_tmp,
perf_args, 'telemetry_Crosperf',
- show_all_results, retries, run_local, cwp_dso,
- weight)
- self.AppendBenchmarkSet(
- benchmarks,
- crosbolt_perf_tests,
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
+ show_all_results, retries, run_local,
+ cwp_dso, weight)
+ self.AppendBenchmarkSet(benchmarks,
+ crosbolt_perf_tests,
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight)
elif test_name == 'all_toolchain_perf':
self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
test_args, iterations, rm_chroot_tmp,
@@ -324,10 +325,10 @@ class ExperimentFactory(object):
# weight=weight))
elif test_name == 'all_toolchain_perf_old':
self.AppendBenchmarkSet(benchmarks,
- telemetry_toolchain_old_perf_tests, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso,
- weight)
+ telemetry_toolchain_old_perf_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, suite, show_all_results, retries,
+ run_local, cwp_dso, weight)
else:
benchmark = Benchmark(benchmark_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
@@ -336,34 +337,32 @@ class ExperimentFactory(object):
benchmarks.append(benchmark)
else:
if test_name == 'all_graphics_perf':
- self.AppendBenchmarkSet(
- benchmarks,
- graphics_perf_tests,
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
+ self.AppendBenchmarkSet(benchmarks,
+ graphics_perf_tests,
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight)
else:
# Add the single benchmark.
- benchmark = Benchmark(
- benchmark_name,
- test_name,
- test_args,
- iterations,
- rm_chroot_tmp,
- perf_args,
- suite,
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
+ benchmark = Benchmark(benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight)
benchmarks.append(benchmark)
if not benchmarks:
@@ -410,8 +409,8 @@ class ExperimentFactory(object):
# TODO(yunlian): We should consolidate code in machine_manager.py
# to derermine whether we are running from within google or not
- if ('corp.google.com' in socket.gethostname() and not my_remote and
- not crosfleet):
+ if ('corp.google.com' in socket.gethostname() and not my_remote
+ and not crosfleet):
my_remote = self.GetDefaultRemotes(board)
if global_settings.GetField('same_machine') and len(my_remote) > 1:
raise RuntimeError('Only one remote is allowed when same_machine '
@@ -422,8 +421,8 @@ class ExperimentFactory(object):
# pylint: disable=too-many-function-args
label = MockLabel(label_name, build, image, autotest_path, debug_path,
chromeos_root, board, my_remote, image_args,
- cache_dir, cache_only, log_level, compiler, crosfleet,
- chrome_src)
+ cache_dir, cache_only, log_level, compiler,
+ crosfleet, chrome_src)
else:
label = Label(label_name, build, image, autotest_path, debug_path,
chromeos_root, board, my_remote, image_args, cache_dir,
@@ -439,18 +438,33 @@ class ExperimentFactory(object):
if crosfleet:
for remote in all_remote:
self.CheckRemotesInCrosfleet(remote)
- experiment = Experiment(experiment_name, all_remote, working_directory,
- chromeos_root, cache_conditions, labels, benchmarks,
- experiment_file.Canonicalize(), email,
- acquire_timeout, log_dir, log_level, share_cache,
- results_dir, compress_results, locks_dir, cwp_dso,
- ignore_min_max, crosfleet, dut_config)
+ experiment = Experiment(experiment_name,
+ all_remote,
+ working_directory,
+ chromeos_root,
+ cache_conditions,
+ labels,
+ benchmarks,
+ experiment_file.Canonicalize(),
+ email,
+ acquire_timeout,
+ log_dir,
+ log_level,
+ share_cache,
+ results_dir,
+ compress_results,
+ locks_dir,
+ cwp_dso,
+ ignore_min_max,
+ crosfleet,
+ dut_config,
+ no_lock=no_lock)
return experiment
def GetDefaultRemotes(self, board):
- default_remotes_file = os.path.join(
- os.path.dirname(__file__), 'default_remotes')
+ default_remotes_file = os.path.join(os.path.dirname(__file__),
+ 'default_remotes')
try:
with open(default_remotes_file) as f:
for line in f:
@@ -480,8 +494,8 @@ class ExperimentFactory(object):
l = logger.GetLogger()
l.LogOutput('Crosfleet tool not installed, trying to install it.')
ce = command_executer.GetCommandExecuter(l, log_level=log_level)
- setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin', 'lab-tools',
- 'setup_lab_tools')
+ setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin',
+ 'lab-tools', 'setup_lab_tools')
cmd = '%s' % setup_lab_tools
status = ce.RunCommand(cmd)
if status != 0:
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 78cf780c..9637c108 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -79,14 +79,14 @@ EXPERIMENT_FILE_2 = """
class ExperimentFactoryTest(unittest.TestCase):
"""Class for running experiment factory unittests."""
-
def setUp(self):
self.append_benchmark_call_args = []
def testLoadExperimentFile1(self):
experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
- exp = ExperimentFactory().GetExperiment(
- experiment_file, working_directory='', log_dir='')
+ exp = ExperimentFactory().GetExperiment(experiment_file,
+ working_directory='',
+ log_dir='')
self.assertEqual(exp.remote, ['chromeos-alex3'])
self.assertEqual(len(exp.benchmarks), 2)
@@ -104,8 +104,9 @@ class ExperimentFactoryTest(unittest.TestCase):
def testLoadExperimentFile2CWP(self):
experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_2))
- exp = ExperimentFactory().GetExperiment(
- experiment_file, working_directory='', log_dir='')
+ exp = ExperimentFactory().GetExperiment(experiment_file,
+ working_directory='',
+ log_dir='')
self.assertEqual(exp.cwp_dso, 'kallsyms')
self.assertEqual(len(exp.benchmarks), 2)
self.assertEqual(exp.benchmarks[0].weight, 0.8)
@@ -240,11 +241,12 @@ class ExperimentFactoryTest(unittest.TestCase):
ef = ExperimentFactory()
bench_list = []
- ef.AppendBenchmarkSet(bench_list, experiment_factory.telemetry_perfv2_tests,
- '', 1, False, '', 'telemetry_Crosperf', False, 0,
- False, '', 0)
- self.assertEqual(
- len(bench_list), len(experiment_factory.telemetry_perfv2_tests))
+ ef.AppendBenchmarkSet(bench_list,
+ experiment_factory.telemetry_perfv2_tests, '', 1,
+ False, '', 'telemetry_Crosperf', False, 0, False, '',
+ 0)
+ self.assertEqual(len(bench_list),
+ len(experiment_factory.telemetry_perfv2_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
bench_list = []
@@ -252,17 +254,17 @@ class ExperimentFactoryTest(unittest.TestCase):
experiment_factory.telemetry_pagecycler_tests, '', 1,
False, '', 'telemetry_Crosperf', False, 0, False, '',
0)
- self.assertEqual(
- len(bench_list), len(experiment_factory.telemetry_pagecycler_tests))
+ self.assertEqual(len(bench_list),
+ len(experiment_factory.telemetry_pagecycler_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
bench_list = []
ef.AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_toolchain_perf_tests, '',
- 1, False, '', 'telemetry_Crosperf', False, 0, False,
- '', 0)
- self.assertEqual(
- len(bench_list), len(experiment_factory.telemetry_toolchain_perf_tests))
+ experiment_factory.telemetry_toolchain_perf_tests,
+ '', 1, False, '', 'telemetry_Crosperf', False, 0,
+ False, '', 0)
+ self.assertEqual(len(bench_list),
+ len(experiment_factory.telemetry_toolchain_perf_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
@mock.patch.object(socket, 'gethostname')
@@ -370,7 +372,8 @@ class ExperimentFactoryTest(unittest.TestCase):
global_settings.SetField('same_machine', 'true')
global_settings.SetField('same_specs', 'true')
- self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '', '')
+ self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '',
+ '')
label_settings.SetField('remote', '')
global_settings.SetField('remote', '123.45.67.89')
exp = ef.GetExperiment(mock_experiment_file, '', '')
@@ -399,7 +402,7 @@ class ExperimentFactoryTest(unittest.TestCase):
def test_get_default_remotes(self):
board_list = [
- 'bob', 'chell', 'coral', 'elm', 'kefka', 'lulu', 'nautilus', 'snappy',
+ 'bob', 'chell', 'coral', 'elm', 'kefka', 'nautilus', 'snappy',
'veyron_tiger'
]
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 49aff425..6daef780 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -160,8 +160,8 @@ class ExperimentRunner(object):
cache.Init(br.label.chromeos_image, br.label.chromeos_root,
br.benchmark.test_name, br.iteration, br.test_args,
br.profiler_args, br.machine_manager, br.machine,
- br.label.board, br.cache_conditions, br.logger(), br.log_level,
- br.label, br.share_cache, br.benchmark.suite,
+ br.label.board, br.cache_conditions, br.logger(),
+ br.log_level, br.label, br.share_cache, br.benchmark.suite,
br.benchmark.show_all_results, br.benchmark.run_local,
br.benchmark.cwp_dso)
cache_dir = cache.GetCacheDirForWrite()
@@ -176,7 +176,7 @@ class ExperimentRunner(object):
# no-op task on the DUT and new test created will be hanging there.
# TODO(zhizhouy): Need to check whether machine is ready or not before
# assigning a test to it.
- if not experiment.crosfleet:
+ if not experiment.no_lock and not experiment.crosfleet:
self._LockAllMachines(experiment)
# Calculate all checksums of avaiable/locked machines, to ensure same
# label has same machines for testing
@@ -236,8 +236,8 @@ class ExperimentRunner(object):
if not benchmark_run.cache_hit:
send_mail = True
break
- if (not send_mail and not experiment.email_to or
- config.GetConfig('no_email')):
+ if (not send_mail and not experiment.email_to
+ or config.GetConfig('no_email')):
return
label_names = []
@@ -245,7 +245,8 @@ class ExperimentRunner(object):
label_names.append(label.name)
subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
- text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
+ text_report = TextResultsReport.FromExperiment(experiment,
+ True).GetReport()
text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
@@ -253,12 +254,11 @@ class ExperimentRunner(object):
attachment = EmailSender.Attachment('report.html', html_report)
email_to = experiment.email_to or []
email_to.append(getpass.getuser())
- EmailSender().SendEmail(
- email_to,
- subject,
- text_report,
- attachments=[attachment],
- msg_type='html')
+ EmailSender().SendEmail(email_to,
+ subject,
+ text_report,
+ attachments=[attachment],
+ msg_type='html')
def _StoreResults(self, experiment):
if self._terminated:
@@ -300,9 +300,10 @@ class ExperimentRunner(object):
self.l.LogOutput('Storing results of each benchmark run.')
for benchmark_run in experiment.benchmark_runs:
if benchmark_run.result:
- benchmark_run_name = ''.join(
- ch for ch in benchmark_run.name if ch.isalnum())
- benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
+ benchmark_run_name = ''.join(ch for ch in benchmark_run.name
+ if ch.isalnum())
+ benchmark_run_path = os.path.join(results_directory,
+ benchmark_run_name)
if experiment.compress_results:
benchmark_run.result.CompressResultsTo(benchmark_run_path)
else:
@@ -313,15 +314,16 @@ class ExperimentRunner(object):
results_table_path = os.path.join(results_directory, 'results.html')
report = HTMLResultsReport.FromExperiment(experiment).GetReport()
if self.json_report:
- json_report = JSONResultsReport.FromExperiment(
- experiment, json_args={'indent': 2})
+ json_report = JSONResultsReport.FromExperiment(experiment,
+ json_args={'indent': 2})
_WriteJSONReportToFile(experiment, results_directory, json_report)
FileUtils().WriteFile(results_table_path, report)
self.l.LogOutput('Storing email message body in %s.' % results_directory)
msg_file_path = os.path.join(results_directory, 'msg_body.html')
- text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
+ text_report = TextResultsReport.FromExperiment(experiment,
+ True).GetReport()
text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 87e30ecc..5525858c 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -27,7 +27,7 @@ import results_report
import test_flag
SCRATCH_DIR = os.path.expanduser('~/cros_scratch')
-RESULTS_FILE = 'results.txt'
+RESULTS_FILE = 'results.pickle'
MACHINE_FILE = 'machine.txt'
AUTOTEST_TARBALL = 'autotest.tbz2'
RESULTS_TARBALL = 'results.tbz2'
@@ -197,9 +197,9 @@ class Result(object):
keyvals_dict[key] = result_dict['value']
elif 'values' in result_dict:
values = result_dict['values']
- if ('type' in result_dict and
- result_dict['type'] == 'list_of_scalar_values' and values and
- values != 'null'):
+ if ('type' in result_dict
+ and result_dict['type'] == 'list_of_scalar_values' and values
+ and values != 'null'):
keyvals_dict[key] = sum(values) / float(len(values))
else:
keyvals_dict[key] = values
@@ -245,13 +245,14 @@ class Result(object):
results_in_chroot = os.path.join(self.chromeos_root, 'chroot', 'tmp')
if not self.temp_dir:
self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
- command = 'cp -r {0}/* {1}'.format(self.results_dir, self.temp_dir)
+ command = f'cp -r {self.results_dir}/* {self.temp_dir}'
self.ce.RunCommand(command, print_to_console=False)
command = ('./generate_test_report --no-color --csv %s' %
(os.path.join('/tmp', os.path.basename(self.temp_dir))))
- _, out, _ = self.ce.ChrootRunCommandWOutput(
- self.chromeos_root, command, print_to_console=False)
+ _, out, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root,
+ command,
+ print_to_console=False)
keyvals_dict = {}
tmp_dir_in_chroot = misc.GetInsideChrootPath(self.chromeos_root,
self.temp_dir)
@@ -322,8 +323,8 @@ class Result(object):
idle_functions = {
'[kernel.kallsyms]':
- ('intel_idle', 'arch_cpu_idle', 'intel_idle', 'cpu_startup_entry',
- 'default_idle', 'cpu_idle_loop', 'do_idle'),
+ ('intel_idle', 'arch_cpu_idle', 'intel_idle', 'cpu_startup_entry',
+ 'default_idle', 'cpu_idle_loop', 'do_idle'),
}
idle_samples = 0
@@ -390,8 +391,8 @@ class Result(object):
result = (
self.FindFilesInResultsDir('-name histograms.json').splitlines())
else:
- result = (
- self.FindFilesInResultsDir('-name results-chart.json').splitlines())
+ result = (self.FindFilesInResultsDir(
+ '-name results-chart.json').splitlines())
return result
def GetTurbostatFile(self):
@@ -449,8 +450,8 @@ class Result(object):
if debug_path:
symfs = '--symfs ' + debug_path
- vmlinux = '--vmlinux ' + os.path.join(debug_path, 'usr', 'lib', 'debug',
- 'boot', 'vmlinux')
+ vmlinux = '--vmlinux ' + os.path.join(debug_path, 'usr', 'lib',
+ 'debug', 'boot', 'vmlinux')
kallsyms = ''
print('** WARNING **: --kallsyms option not applied, no System.map-* '
'for downloaded image.')
@@ -546,9 +547,9 @@ class Result(object):
values = value_dict['values']
if not values:
continue
- if ('type' in value_dict and
- value_dict['type'] == 'list_of_scalar_values' and
- values != 'null'):
+ if ('type' in value_dict
+ and value_dict['type'] == 'list_of_scalar_values'
+ and values != 'null'):
result = sum(values) / float(len(values))
else:
result = values
@@ -746,8 +747,9 @@ class Result(object):
# order.
heapq.heappush(cmd_top5_cpu_use[cmd_with_pid], round(cpu_use, 1))
- for consumer, usage in sorted(
- cmd_total_cpu_use.items(), key=lambda x: x[1], reverse=True):
+ for consumer, usage in sorted(cmd_total_cpu_use.items(),
+ key=lambda x: x[1],
+ reverse=True):
# Iterate through commands by descending order of total CPU usage.
topcmd = {
'cmd': consumer,
@@ -913,7 +915,8 @@ class Result(object):
self.chromeos_root, path_str)
if status:
# Error of reading a perf.data profile is fatal.
- raise PerfDataReadError(f'Failed to read perf.data profile: {path_str}')
+ raise PerfDataReadError(
+ f'Failed to read perf.data profile: {path_str}')
# Pattern to search a line with "perf record" command line:
# # cmdline : /usr/bin/perf record -e instructions -p 123"
@@ -938,7 +941,8 @@ class Result(object):
break
else:
# cmdline wasn't found in the header. It's a fatal error.
- raise PerfDataReadError(f'Perf command line is not found in {path_str}')
+ raise PerfDataReadError(
+ f'Perf command line is not found in {path_str}')
return pids
def VerifyPerfDataPID(self):
@@ -976,11 +980,11 @@ class Result(object):
# Note that this function doesn't know anything about whether there is a
# cache hit or miss. It should process results agnostic of the cache hit
# state.
- if (self.results_file and self.suite == 'telemetry_Crosperf' and
- 'histograms.json' in self.results_file[0]):
+ if (self.results_file and self.suite == 'telemetry_Crosperf'
+ and 'histograms.json' in self.results_file[0]):
self.keyvals = self.ProcessHistogramsResults()
- elif (self.results_file and self.suite != 'telemetry_Crosperf' and
- 'results-chart.json' in self.results_file[0]):
+ elif (self.results_file and self.suite != 'telemetry_Crosperf'
+ and 'results-chart.json' in self.results_file[0]):
self.keyvals = self.ProcessChartResults()
else:
if not use_cache:
@@ -1134,15 +1138,16 @@ class Result(object):
f.write(machine_manager.machine_checksum_string[self.label.name])
if os.path.exists(cache_dir):
- command = 'rm -rf {0}'.format(cache_dir)
+ command = f'rm -rf {cache_dir}'
self.ce.RunCommand(command)
- command = 'mkdir -p {0} && '.format(os.path.dirname(cache_dir))
- command += 'chmod g+x {0} && '.format(temp_dir)
- command += 'mv {0} {1}'.format(temp_dir, cache_dir)
+ parent_dir = os.path.dirname(cache_dir)
+ command = f'mkdir -p {parent_dir} && '
+ command += f'chmod g+x {temp_dir} && '
+ command += f'mv {temp_dir} {cache_dir}'
ret = self.ce.RunCommand(command)
if ret:
- command = 'rm -rf {0}'.format(temp_dir)
+ command = f'rm -rf {temp_dir}'
self.ce.RunCommand(command)
raise RuntimeError('Could not move dir %s to dir %s' %
(temp_dir, cache_dir))
@@ -1241,8 +1246,8 @@ class TelemetryResult(Result):
self.err = pickle.load(f)
self.retval = pickle.load(f)
- self.chrome_version = (
- super(TelemetryResult, self).GetChromeVersionFromCache(cache_dir))
+ self.chrome_version = (super(TelemetryResult,
+ self).GetChromeVersionFromCache(cache_dir))
self.ProcessResults()
@@ -1304,10 +1309,10 @@ class ResultsCache(object):
self.run_local = None
self.cwp_dso = None
- def Init(self, chromeos_image, chromeos_root, test_name, iteration, test_args,
- profiler_args, machine_manager, machine, board, cache_conditions,
- logger_to_use, log_level, label, share_cache, suite,
- show_all_results, run_local, cwp_dso):
+ def Init(self, chromeos_image, chromeos_root, test_name, iteration,
+ test_args, profiler_args, machine_manager, machine, board,
+ cache_conditions, logger_to_use, log_level, label, share_cache,
+ suite, show_all_results, run_local, cwp_dso):
self.chromeos_image = chromeos_image
self.chromeos_root = chromeos_root
self.test_name = test_name
@@ -1319,8 +1324,8 @@ class ResultsCache(object):
self.machine_manager = machine_manager
self.machine = machine
self._logger = logger_to_use
- self.ce = command_executer.GetCommandExecuter(
- self._logger, log_level=log_level)
+ self.ce = command_executer.GetCommandExecuter(self._logger,
+ log_level=log_level)
self.label = label
self.share_cache = share_cache
self.suite = suite
@@ -1406,15 +1411,16 @@ class ResultsCache(object):
temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
self.run_local)
- test_args_checksum = hashlib.md5(temp_test_args.encode('utf-8')).hexdigest()
+ test_args_checksum = hashlib.md5(
+ temp_test_args.encode('utf-8')).hexdigest()
return (image_path_checksum, self.test_name, str(self.iteration),
- test_args_checksum, checksum, machine_checksum, machine_id_checksum,
- str(self.CACHE_VERSION))
+ test_args_checksum, checksum, machine_checksum,
+ machine_id_checksum, str(self.CACHE_VERSION))
def ReadResult(self):
if CacheConditions.FALSE in self.cache_conditions:
cache_dir = self.GetCacheDirForWrite()
- command = 'rm -rf %s' % (cache_dir,)
+ command = 'rm -rf %s' % (cache_dir, )
self.ce.RunCommand(command)
return None
cache_dir = self.GetCacheDirForRead()
@@ -1427,14 +1433,15 @@ class ResultsCache(object):
if self.log_level == 'verbose':
self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir)
- result = Result.CreateFromCacheHit(self._logger, self.log_level, self.label,
- self.machine, cache_dir, self.test_name,
- self.suite, self.cwp_dso)
+ result = Result.CreateFromCacheHit(self._logger, self.log_level,
+ self.label, self.machine, cache_dir,
+ self.test_name, self.suite,
+ self.cwp_dso)
if not result:
return None
- if (result.retval == 0 or
- CacheConditions.RUN_SUCCEEDED not in self.cache_conditions):
+ if (result.retval == 0
+ or CacheConditions.RUN_SUCCEEDED not in self.cache_conditions):
return result
return None
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index df3a35e9..d6953eed 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -11,6 +11,7 @@ from __future__ import print_function
import io
import os
+import pickle
import shutil
import tempfile
import unittest
@@ -31,6 +32,8 @@ from cros_utils import command_executer
from cros_utils import logger
from cros_utils import misc
+# The following hardcoded string has blocked words replaced, and thus
+# is not representative of a true crosperf output.
# pylint: disable=line-too-long
OUTPUT = """CMD (True): ./test_that.sh\
--remote=172.17.128.241 --board=lumpy LibCBench
@@ -42,13 +45,13 @@ INFO : Running the following control files 1 times:
INFO : * 'client/site_tests/platform_LibCBench/control'
INFO : Running client test client/site_tests/platform_LibCBench/control
-./server/autoserv -m 172.17.128.241 --ssh-port 22 -c client/site_tests/platform_LibCBench/control -r /tmp/test_that.PO1234567/platform_LibCBench --test-retry=0 --args
+./server/autoserv -m 172.17.128.241 --ssh-port 22 -c client/site_tests/platform_LibCBench/control -r /tmp/test_that.PO1234567/platform_LibCBench --test-retry=0 --args
ERROR:root:import statsd failed, no stats will be reported.
14:20:22 INFO | Results placed in /tmp/test_that.PO1234567/platform_LibCBench
14:20:22 INFO | Processing control file
-14:20:23 INFO | Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_VIIP67ssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'
+14:20:23 INFO | Starting main ssh connection '/usr/bin/ssh -a -x -N -o ControlMain=yes -o ControlPath=/tmp/_autotmp_VIIP67ssh-main/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'
14:20:23 ERROR| [stderr] Warning: Permanently added '172.17.128.241' (RSA) to the list of known hosts.
-14:20:23 INFO | INFO ---- ---- kernel=3.8.11 localtime=May 22 14:20:23 timestamp=1369257623
+14:20:23 INFO | INFO\t----\t----\tkernel=3.8.11\tlocaltime=May 22 14:20:23\ttimestamp=1369257623
14:20:23 INFO | Installing autotest on 172.17.128.241
14:20:23 INFO | Using installation dir /usr/local/autotest
14:20:23 WARNI| No job_repo_url for <remote host: 172.17.128.241>
@@ -59,11 +62,11 @@ ERROR:root:import statsd failed, no stats will be reported.
14:20:24 INFO | Entered autotestd_monitor.
14:20:24 INFO | Finished launching tail subprocesses.
14:20:24 INFO | Finished waiting on autotestd to start.
-14:20:26 INFO | START ---- ---- timestamp=1369257625 localtime=May 22 14:20:25
-14:20:26 INFO | START platform_LibCBench platform_LibCBench timestamp=1369257625 localtime=May 22 14:20:25
-14:20:30 INFO | GOOD platform_LibCBench platform_LibCBench timestamp=1369257630 localtime=May 22 14:20:30 completed successfully
-14:20:30 INFO | END GOOD platform_LibCBench platform_LibCBench timestamp=1369257630 localtime=May 22 14:20:30
-14:20:31 INFO | END GOOD ---- ---- timestamp=1369257630 localtime=May 22 14:20:30
+14:20:26 INFO | START\t----\t----\ttimestamp=1369257625\tlocaltime=May 22 14:20:25
+14:20:26 INFO | \tSTART\tplatform_LibCBench\tplatform_LibCBench\ttimestamp=1369257625\tlocaltime=May 22 14:20:25
+14:20:30 INFO | \t\tGOOD\tplatform_LibCBench\tplatform_LibCBench\ttimestamp=1369257630\tlocaltime=May 22 14:20:30\tcompleted successfully
+14:20:30 INFO | \tEND GOOD\tplatform_LibCBench\tplatform_LibCBench\ttimestamp=1369257630\tlocaltime=May 22 14:20:30
+14:20:31 INFO | END GOOD\t----\t----\ttimestamp=1369257630\tlocaltime=May 22 14:20:30
14:20:31 INFO | Got lock of exit_code_file.
14:20:31 INFO | Released lock of exit_code_file and closed it.
OUTPUT: ==============================
@@ -72,14 +75,14 @@ Done: 0% [ ]
OUTPUT: Thread Status:
RUNNING: 1 ('ttt: LibCBench (1)' 0:01:21)
Machine Status:
-Machine Thread Lock Status Checksum
+Machine Thread Lock Status Checksum
172.17.128.241 ttt: LibCBench (1) True RUNNING 3ba9f2ecbb222f20887daea5583d86ba
OUTPUT: ==============================
14:20:33 INFO | Killing child processes.
14:20:33 INFO | Client complete
14:20:33 INFO | Finished processing control file
-14:20:33 INFO | Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_aVJUgmssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'
+14:20:33 INFO | Starting main ssh connection '/usr/bin/ssh -a -x -N -o ControlMain=yes -o ControlPath=/tmp/_autotmp_aVJUgmssh-main/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'
14:20:33 ERROR| [stderr] Warning: Permanently added '172.17.128.241' (RSA) to the list of known hosts.
INFO : Test results:
@@ -116,7 +119,7 @@ platform_LibCBench/platform_LibCBench b_utf8_onebyone__0_
-------------------------------------------------------------------
Total PASS: 2/2 (100%)
-INFO : Elapsed time: 0m16s
+INFO : Elapsed time: 0m16s
"""
error = """
@@ -177,7 +180,7 @@ PERF_DATA_HEADER = """
# total memory : 5911496 kB
# cmdline : /usr/bin/perf record -e instructions -p {pid}
# event : name = instructions, , id = ( 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193 ), type = 8, size = 112
-# event : name = dummy:u, , id = ( 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204 ), type = 1, size = 112, config = 0x9
+# event : name = placeholder:u, , id = ( 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204 ), type = 1, size = 112, config = 0x9
# CPU_TOPOLOGY info available, use -I to display
# pmu mappings: software = 1, uprobe = 6, cs_etm = 8, breakpoint = 5, tracepoint = 2, armv8_pmuv3 = 7
# contains AUX area data (e.g. instruction trace)
@@ -439,7 +442,6 @@ HISTOGRAMSET = ("""
class MockResult(Result):
"""Mock result class."""
-
def __init__(self, mylogger, label, logging_level, machine):
super(MockResult, self).__init__(mylogger, label, logging_level, machine)
@@ -455,7 +457,6 @@ class MockResult(Result):
class ResultTest(unittest.TestCase):
"""Result test class."""
-
def __init__(self, *args, **kwargs):
super(ResultTest, self).__init__(*args, **kwargs)
self.callFakeProcessResults = False
@@ -484,8 +485,8 @@ class ResultTest(unittest.TestCase):
def testCreateFromRun(self):
result = MockResult.CreateFromRun(logger.GetLogger(), 'average',
- self.mock_label, 'remote1', OUTPUT, error,
- 0, True)
+ self.mock_label, 'remote1', OUTPUT,
+ error, 0, True)
self.assertEqual(result.keyvals, keyvals)
self.assertEqual(result.chroot_results_dir,
'/tmp/test_that.PO1234567/platform_LibCBench')
@@ -533,7 +534,8 @@ class ResultTest(unittest.TestCase):
mock_runcmd.call_args_list[1])
self.assertEqual(mock_runcmd.call_args_list[0],
mock_runcmd.call_args_list[2])
- self.assertEqual(mock_runcmd.call_args_list[0][0], ('mkdir -p /tmp/test',))
+ self.assertEqual(mock_runcmd.call_args_list[0][0],
+ ('mkdir -p /tmp/test', ))
# test 3. CopyFiles returns 1 (fails).
mock_copyfiles.return_value = 1
@@ -715,7 +717,8 @@ class ResultTest(unittest.TestCase):
mock_mkdtemp.return_value = TMP_DIR1
mock_chrootruncmd.return_value = [
- '', ('%s,PASS\n%s/telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1), ''
+ '', ('%s,PASS\n%s/telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1),
+ ''
]
mock_getpath.return_value = TMP_DIR1
self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
@@ -730,7 +733,7 @@ class ResultTest(unittest.TestCase):
self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
self.assertEqual(mock_runcmd.call_count, 1)
self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1,))
+ ('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1, ))
self.assertEqual(mock_chrootruncmd.call_count, 1)
self.assertEqual(
mock_chrootruncmd.call_args_list[0][0],
@@ -770,7 +773,8 @@ class ResultTest(unittest.TestCase):
@mock.patch.object(command_executer.CommandExecuter,
'ChrootRunCommandWOutput')
@mock.patch.object(os.path, 'exists')
- def test_get_samples(self, mock_exists, mock_get_total_samples, mock_getpath):
+ def test_get_samples(self, mock_exists, mock_get_total_samples,
+ mock_getpath):
self.result.perf_data_files = ['/tmp/results/perf.data']
self.result.board = 'samus'
mock_getpath.return_value = '/usr/chromeos/chroot/tmp/results/perf.data'
@@ -811,7 +815,7 @@ class ResultTest(unittest.TestCase):
res = self.result.FindFilesInResultsDir('-name perf.data')
self.assertEqual(mock_runcmd.call_count, 1)
self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('find /tmp/test_results -name perf.data',))
+ ('find /tmp/test_results -name perf.data', ))
self.assertEqual(res, '/tmp/test_results/perf.data')
mock_runcmd.reset_mock()
@@ -827,7 +831,8 @@ class ResultTest(unittest.TestCase):
self.result.FindFilesInResultsDir = mock_findfiles
res = self.result.GetPerfDataFiles()
self.assertEqual(res, ['line1', 'line1'])
- self.assertEqual(mock_findfiles.call_args_list[0][0], ('-name perf.data',))
+ self.assertEqual(mock_findfiles.call_args_list[0][0],
+ ('-name perf.data', ))
def test_get_perf_report_files(self):
self.args = None
@@ -958,16 +963,18 @@ class ResultTest(unittest.TestCase):
"""Verify perf PID which is present in TOP_DATA."""
self.result.top_cmds = TOP_DATA
# pid is present in TOP_DATA.
- with mock.patch.object(
- Result, 'ReadPidFromPerfData', return_value=['5713']):
+ with mock.patch.object(Result,
+ 'ReadPidFromPerfData',
+ return_value=['5713']):
self.result.VerifyPerfDataPID()
def test_verify_perf_data_pid_fail(self):
"""Test perf PID missing in top raises the error."""
self.result.top_cmds = TOP_DATA
# pid is not in the list of top processes.
- with mock.patch.object(
- Result, 'ReadPidFromPerfData', return_value=['9999']):
+ with mock.patch.object(Result,
+ 'ReadPidFromPerfData',
+ return_value=['9999']):
with self.assertRaises(PidVerificationError):
self.result.VerifyPerfDataPID()
@@ -976,7 +983,9 @@ class ResultTest(unittest.TestCase):
def test_read_pid_from_perf_data_ok(self, mock_runcmd):
"""Test perf header parser, normal flow."""
self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = ['/tmp/chromeos/chroot/tmp/results/perf.data']
+ self.result.perf_data_files = [
+ '/tmp/chromeos/chroot/tmp/results/perf.data'
+ ]
exp_pid = '12345'
mock_runcmd.return_value = (0, PERF_DATA_HEADER.format(pid=exp_pid), '')
pids = self.result.ReadPidFromPerfData()
@@ -1007,7 +1016,9 @@ class ResultTest(unittest.TestCase):
def test_read_pid_from_perf_data_no_pid(self, mock_runcmd):
"""Test perf.data without PID."""
self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = ['/tmp/chromeos/chroot/tmp/results/perf.data']
+ self.result.perf_data_files = [
+ '/tmp/chromeos/chroot/tmp/results/perf.data'
+ ]
cmd_line = '# cmdline : /usr/bin/perf record -e instructions'
mock_runcmd.return_value = (0, cmd_line, '')
pids = self.result.ReadPidFromPerfData()
@@ -1019,7 +1030,9 @@ class ResultTest(unittest.TestCase):
def test_read_pid_from_perf_data_system_wide(self, mock_runcmd):
"""Test reading from system-wide profile with PID."""
self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = ['/tmp/chromeos/chroot/tmp/results/perf.data']
+ self.result.perf_data_files = [
+ '/tmp/chromeos/chroot/tmp/results/perf.data'
+ ]
# There is '-p <pid>' in command line but it's still system-wide: '-a'.
cmd_line = '# cmdline : /usr/bin/perf record -e instructions -a -p 1234'
mock_runcmd.return_value = (0, cmd_line, '')
@@ -1032,7 +1045,9 @@ class ResultTest(unittest.TestCase):
def test_read_pid_from_perf_data_read_fail(self, mock_runcmd):
"""Failure to read perf.data raises the error."""
self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = ['/tmp/chromeos/chroot/tmp/results/perf.data']
+ self.result.perf_data_files = [
+ '/tmp/chromeos/chroot/tmp/results/perf.data'
+ ]
# Error status of the profile read.
mock_runcmd.return_value = (1, '', '')
with self.assertRaises(PerfDataReadError):
@@ -1043,7 +1058,9 @@ class ResultTest(unittest.TestCase):
def test_read_pid_from_perf_data_fail(self, mock_runcmd):
"""Failure to find cmdline in perf.data header raises the error."""
self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = ['/tmp/chromeos/chroot/tmp/results/perf.data']
+ self.result.perf_data_files = [
+ '/tmp/chromeos/chroot/tmp/results/perf.data'
+ ]
# Empty output.
mock_runcmd.return_value = (0, '', '')
with self.assertRaises(PerfDataReadError):
@@ -1273,7 +1290,6 @@ class ResultTest(unittest.TestCase):
@mock.patch.object(misc, 'GetOutsideChrootPath')
def test_populate_from_run(self, mock_getpath):
-
def FakeGetResultsDir():
self.callGetResultsDir = True
return '/tmp/results_dir'
@@ -1361,7 +1377,6 @@ class ResultTest(unittest.TestCase):
return {'Total': 10}
def test_process_results(self):
-
def FakeGatherPerfResults():
self.callGatherPerfResults = True
@@ -1407,16 +1422,17 @@ class ResultTest(unittest.TestCase):
self.result.ProcessResults()
shutil.rmtree(os.path.dirname(self.result.results_file[0]))
# Verify the summary for the story is correct
- self.assertEqual(self.result.keyvals['timeToFirstContentfulPaint__typical'],
- [880.000, u'ms_smallerIsBetter'])
+ self.assertEqual(
+ self.result.keyvals['timeToFirstContentfulPaint__typical'],
+ [880.000, u'ms_smallerIsBetter'])
# Veirfy the summary for a certain stroy tag is correct
self.assertEqual(
- self.result
- .keyvals['timeToFirstContentfulPaint__cache_temperature:cold'],
+ self.result.
+ keyvals['timeToFirstContentfulPaint__cache_temperature:cold'],
[1000.000, u'ms_smallerIsBetter'])
self.assertEqual(
- self.result
- .keyvals['timeToFirstContentfulPaint__cache_temperature:warm'],
+ self.result.
+ keyvals['timeToFirstContentfulPaint__cache_temperature:warm'],
[800.000, u'ms_smallerIsBetter'])
@mock.patch.object(Result, 'ProcessCpustatsResults')
@@ -1572,7 +1588,8 @@ class ResultTest(unittest.TestCase):
u'telemetry_page_measurement_results__num_errored': [0, u'count'],
u'string-fasta__string-fasta': [23.2, u'ms'],
u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
- u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': [3.2, u'ms'],
+ u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte':
+ [3.2, u'ms'],
u'access-nsieve__access-nsieve': [7.9, u'ms'],
u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
u'string-validate-input__string-validate-input': [19.3, u'ms'],
@@ -1610,7 +1627,8 @@ class ResultTest(unittest.TestCase):
u'telemetry_page_measurement_results__num_errored': [0, u'count'],
u'string-fasta__string-fasta': [23.2, u'ms'],
u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
- u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': [3.2, u'ms'],
+ u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte':
+ [3.2, u'ms'],
u'access-nsieve__access-nsieve': [7.9, u'ms'],
u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
u'string-validate-input__string-validate-input': [19.3, u'ms'],
@@ -1657,8 +1675,9 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_getroot.call_count, 1)
self.assertEqual(mock_runcmd.call_count, 2)
self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('rm -rf test_results_dir',))
- self.assertEqual(mock_runcmd.call_args_list[1][0], ('rm -rf testtemp_dir',))
+ ('rm -rf test_results_dir', ))
+ self.assertEqual(mock_runcmd.call_args_list[1][0],
+ ('rm -rf testtemp_dir', ))
# Test 2. Same, except ath results_dir name does not contain
# 'test_that_results_'
@@ -1672,8 +1691,9 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_getroot.call_count, 1)
self.assertEqual(mock_runcmd.call_count, 2)
self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('rm -rf /tmp/tmp_AbcXyz',))
- self.assertEqual(mock_runcmd.call_args_list[1][0], ('rm -rf testtemp_dir',))
+ ('rm -rf /tmp/tmp_AbcXyz', ))
+ self.assertEqual(mock_runcmd.call_args_list[1][0],
+ ('rm -rf testtemp_dir', ))
# Test 3. mock_getroot returns nothing; 'rm_chroot_tmp' is False.
mock_getroot.reset_mock()
@@ -1681,7 +1701,8 @@ class ResultTest(unittest.TestCase):
self.result.CleanUp(False)
self.assertEqual(mock_getroot.call_count, 0)
self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0], ('rm -rf testtemp_dir',))
+ self.assertEqual(mock_runcmd.call_args_list[0][0],
+ ('rm -rf testtemp_dir', ))
# Test 4. 'rm_chroot_tmp' is True, but result_dir & temp_dir are None.
mock_getroot.reset_mock()
@@ -1695,7 +1716,6 @@ class ResultTest(unittest.TestCase):
@mock.patch.object(misc, 'GetInsideChrootPath')
@mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_store_to_cache_dir(self, mock_chrootruncmd, mock_getpath):
-
def FakeMkdtemp(directory=''):
if directory:
pass
@@ -1730,7 +1750,7 @@ class ResultTest(unittest.TestCase):
base_dir = os.path.join(os.getcwd(), 'test_cache/compare_output')
self.assertTrue(os.path.exists(os.path.join(test_dir, 'autotest.tbz2')))
self.assertTrue(os.path.exists(os.path.join(test_dir, 'machine.txt')))
- self.assertTrue(os.path.exists(os.path.join(test_dir, 'results.txt')))
+ self.assertTrue(os.path.exists(os.path.join(test_dir, 'results.pickle')))
f1 = os.path.join(test_dir, 'machine.txt')
f2 = os.path.join(base_dir, 'machine.txt')
@@ -1738,11 +1758,13 @@ class ResultTest(unittest.TestCase):
[_, out, _] = self.result.ce.RunCommandWOutput(cmd)
self.assertEqual(len(out), 0)
- f1 = os.path.join(test_dir, 'results.txt')
- f2 = os.path.join(base_dir, 'results.txt')
- cmd = 'diff %s %s' % (f1, f2)
- [_, out, _] = self.result.ce.RunCommandWOutput(cmd)
- self.assertEqual(len(out), 0)
+ f1 = os.path.join(test_dir, 'results.pickle')
+ f2 = os.path.join(base_dir, 'results.pickle')
+ with open(f1, 'rb') as f:
+ f1_obj = pickle.load(f)
+ with open(f2, 'rb') as f:
+ f2_obj = pickle.load(f)
+ self.assertEqual(f1_obj, f2_obj)
# Clean up after test.
tempfile.mkdtemp = save_real_mkdtemp
@@ -1753,87 +1775,87 @@ class ResultTest(unittest.TestCase):
TELEMETRY_RESULT_KEYVALS = {
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'math-cordic (ms)':
- '11.4',
+ '11.4',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'access-nbody (ms)':
- '6.9',
+ '6.9',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'access-fannkuch (ms)':
- '26.3',
+ '26.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'math-spectral-norm (ms)':
- '6.3',
+ '6.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'bitops-nsieve-bits (ms)':
- '9.3',
+ '9.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'math-partial-sums (ms)':
- '32.8',
+ '32.8',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'regexp-dna (ms)':
- '16.1',
+ '16.1',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'3d-cube (ms)':
- '42.7',
+ '42.7',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'crypto-md5 (ms)':
- '10.8',
+ '10.8',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'crypto-sha1 (ms)':
- '12.4',
+ '12.4',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-tagcloud (ms)':
- '47.2',
+ '47.2',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-fasta (ms)':
- '36.3',
+ '36.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'access-binary-trees (ms)':
- '7.3',
+ '7.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'date-format-xparb (ms)':
- '138.1',
+ '138.1',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'crypto-aes (ms)':
- '19.2',
+ '19.2',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'Total (ms)':
- '656.5',
+ '656.5',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-base64 (ms)':
- '17.5',
+ '17.5',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-validate-input (ms)':
- '24.8',
+ '24.8',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'3d-raytrace (ms)':
- '28.7',
+ '28.7',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'controlflow-recursive (ms)':
- '5.3',
+ '5.3',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'bitops-bits-in-byte (ms)':
- '9.8',
+ '9.8',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'3d-morph (ms)':
- '50.2',
+ '50.2',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'bitops-bitwise-and (ms)':
- '8.8',
+ '8.8',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'access-nsieve (ms)':
- '8.6',
+ '8.6',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'date-format-tofte (ms)':
- '31.2',
+ '31.2',
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'bitops-3bit-bits-in-byte (ms)':
- '3.5',
+ '3.5',
'retval':
- 0,
+ 0,
'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
'string-unpack-code (ms)':
- '45.0'
+ '45.0'
}
PURE_TELEMETRY_OUTPUT = """
@@ -1843,7 +1865,6 @@ page_name,3d-cube (ms),3d-morph (ms),3d-raytrace (ms),Total (ms),access-binary-t
class TelemetryResultTest(unittest.TestCase):
"""Telemetry result test."""
-
def __init__(self, *args, **kwargs):
super(TelemetryResultTest, self).__init__(*args, **kwargs)
self.callFakeProcessResults = False
@@ -1854,12 +1875,10 @@ class TelemetryResultTest(unittest.TestCase):
'autotest_dir', 'debug_dir', '/tmp', 'lumpy',
'remote', 'image_args', 'cache_dir', 'average',
'gcc', False, None)
- self.mock_machine = machine_manager.MockCrosMachine('falco.cros',
- '/tmp/chromeos',
- 'average')
+ self.mock_machine = machine_manager.MockCrosMachine(
+ 'falco.cros', '/tmp/chromeos', 'average')
def test_populate_from_run(self):
-
def FakeProcessResults():
self.callFakeProcessResults = True
@@ -1890,7 +1909,6 @@ class TelemetryResultTest(unittest.TestCase):
class ResultsCacheTest(unittest.TestCase):
"""Resultcache test class."""
-
def __init__(self, *args, **kwargs):
super(ResultsCacheTest, self).__init__(*args, **kwargs)
self.fakeCacheReturnResult = None
@@ -1932,7 +1950,6 @@ class ResultsCacheTest(unittest.TestCase):
@mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
def test_get_cache_dir_for_write(self, mock_checksum):
-
def FakeGetMachines(label):
if label:
pass
@@ -2041,7 +2058,8 @@ class ResultsCacheTest(unittest.TestCase):
# Test 5. Generating cache name for writing, with local image type, and
# specifying that the image path must match the cached image path.
self.results_cache.label.image_type = 'local'
- self.results_cache.cache_conditions.append(CacheConditions.IMAGE_PATH_MATCH)
+ self.results_cache.cache_conditions.append(
+ CacheConditions.IMAGE_PATH_MATCH)
key_list = self.results_cache.GetCacheKeyList(False)
self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127')
self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 34326b68..78834c63 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -22,14 +22,13 @@ class BenchmarkSettings(Settings):
def __init__(self, name):
super(BenchmarkSettings, self).__init__(name, 'benchmark')
self.AddField(
- TextField(
- 'test_name',
- description='The name of the test to run. '
- 'Defaults to the name of the benchmark.'))
+ TextField('test_name',
+ description='The name of the test to run. '
+ 'Defaults to the name of the benchmark.'))
self.AddField(
- TextField(
- 'test_args', description='Arguments to be passed to the '
- 'test.'))
+ TextField('test_args',
+ description='Arguments to be passed to the '
+ 'test.'))
self.AddField(
IntegerField(
'iterations',
@@ -39,24 +38,21 @@ class BenchmarkSettings(Settings):
'If not set, will run each benchmark test the optimum number of '
'times to get a stable result.'))
self.AddField(
- TextField(
- 'suite',
- default='test_that',
- description='The type of the benchmark.'))
+ TextField('suite',
+ default='test_that',
+ description='The type of the benchmark.'))
self.AddField(
- IntegerField(
- 'retries',
- default=0,
- description='Number of times to retry a '
- 'benchmark run.'))
+ IntegerField('retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
self.AddField(
- BooleanField(
- 'run_local',
- description='Run benchmark harness on the DUT. '
- 'Currently only compatible with the suite: '
- 'telemetry_Crosperf.',
- required=False,
- default=True))
+ BooleanField('run_local',
+ description='Run benchmark harness on the DUT. '
+ 'Currently only compatible with the suite: '
+ 'telemetry_Crosperf.',
+ required=False,
+ default=True))
self.AddField(
FloatField(
'weight',
@@ -70,12 +66,11 @@ class LabelSettings(Settings):
def __init__(self, name):
super(LabelSettings, self).__init__(name, 'label')
self.AddField(
- TextField(
- 'chromeos_image',
- required=False,
- description='The path to the image to run tests '
- 'on, for local/custom-built images. See the '
- "'build' option for official or trybot images."))
+ TextField('chromeos_image',
+ required=False,
+ description='The path to the image to run tests '
+ 'on, for local/custom-built images. See the '
+ "'build' option for official or trybot images."))
self.AddField(
TextField(
'autotest_path',
@@ -90,53 +85,46 @@ class LabelSettings(Settings):
description='Debug info directory relative to chroot which has '
'symbols and vmlinux that can be used by perf tool.'))
self.AddField(
- TextField(
- 'chromeos_root',
- description='The path to a chromeos checkout which '
- 'contains a src/scripts directory. Defaults to '
- 'the chromeos checkout which contains the '
- 'chromeos_image.'))
- self.AddField(
- ListField(
- 'remote',
- description='A comma-separated list of IPs of chromeos'
- 'devices to run experiments on.'))
- self.AddField(
- TextField(
- 'image_args',
- required=False,
- default='',
- description='Extra arguments to pass to '
- 'image_chromeos.py.'))
- self.AddField(
- TextField(
- 'cache_dir',
- default='',
- description='The cache dir for this image.'))
- self.AddField(
- TextField(
- 'compiler',
- default='gcc',
- description='The compiler used to build the '
- 'ChromeOS image (gcc or llvm).'))
- self.AddField(
- TextField(
- 'chrome_src',
- description='The path to the source of chrome. '
- 'This is used to run telemetry benchmarks. '
- 'The default one is the src inside chroot.',
- required=False,
- default=''))
- self.AddField(
- TextField(
- 'build',
- description='The xbuddy specification for an '
- 'official or trybot image to use for tests. '
- "'/remote' is assumed, and the board is given "
- "elsewhere, so omit the '/remote/<board>/' xbuddy "
- 'prefix.',
- required=False,
- default=''))
+ TextField('chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ ListField('remote',
+ description='A comma-separated list of IPs of chromeos'
+ 'devices to run experiments on.'))
+ self.AddField(
+ TextField('image_args',
+ required=False,
+ default='',
+ description='Extra arguments to pass to '
+ 'image_chromeos.py.'))
+ self.AddField(
+ TextField('cache_dir',
+ default='',
+ description='The cache dir for this image.'))
+ self.AddField(
+ TextField('compiler',
+ default='gcc',
+ description='The compiler used to build the '
+ 'ChromeOS image (gcc or llvm).'))
+ self.AddField(
+ TextField('chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(
+ TextField('build',
+ description='The xbuddy specification for an '
+ 'official or trybot image to use for tests. '
+ "'/remote' is assumed, and the board is given "
+ "elsewhere, so omit the '/remote/<board>/' xbuddy "
+ 'prefix.',
+ required=False,
+ default=''))
class GlobalSettings(Settings):
@@ -145,67 +133,56 @@ class GlobalSettings(Settings):
def __init__(self, name):
super(GlobalSettings, self).__init__(name, 'global')
self.AddField(
- TextField(
- 'name',
- description='The name of the experiment. Just an '
- 'identifier.'))
- self.AddField(
- TextField(
- 'board',
- description='The target board for running '
- 'experiments on, e.g. x86-alex.'))
- self.AddField(
- BooleanField(
- 'crosfleet',
- description='Whether to run experiments via crosfleet.',
- default=False))
- self.AddField(
- ListField(
- 'remote',
- description='A comma-separated list of IPs of '
- 'chromeos devices to run experiments on.'))
- self.AddField(
- BooleanField(
- 'rerun_if_failed',
- description='Whether to re-run failed test runs '
- 'or not.',
- default=False))
- self.AddField(
- BooleanField(
- 'rm_chroot_tmp',
- default=False,
- description='Whether to remove the test_that '
- 'result in the chroot.'))
- self.AddField(
- ListField(
- 'email',
- description='Space-separated list of email '
- 'addresses to send email to.'))
- self.AddField(
- BooleanField(
- 'rerun',
- description='Whether to ignore the cache and '
- 'for tests to be re-run.',
- default=False))
- self.AddField(
- BooleanField(
- 'same_specs',
- default=True,
- description='Ensure cached runs are run on the '
- 'same kind of devices which are specified as a '
- 'remote.'))
- self.AddField(
- BooleanField(
- 'same_machine',
- default=False,
- description='Ensure cached runs are run on the '
- 'same remote.'))
- self.AddField(
- BooleanField(
- 'use_file_locks',
- default=False,
- description='DEPRECATED: Whether to use the file locks '
- 'or AFE server lock mechanism.'))
+ TextField('name',
+ description='The name of the experiment. Just an '
+ 'identifier.'))
+ self.AddField(
+ TextField('board',
+ description='The target board for running '
+ 'experiments on, e.g. x86-alex.'))
+ self.AddField(
+ BooleanField('crosfleet',
+ description='Whether to run experiments via crosfleet.',
+ default=False))
+ self.AddField(
+ ListField('remote',
+ description='A comma-separated list of IPs of '
+ 'chromeos devices to run experiments on.'))
+ self.AddField(
+ BooleanField('rerun_if_failed',
+ description='Whether to re-run failed test runs '
+ 'or not.',
+ default=False))
+ self.AddField(
+ BooleanField('rm_chroot_tmp',
+ default=False,
+ description='Whether to remove the test_that '
+ 'result in the chroot.'))
+ self.AddField(
+ ListField('email',
+ description='Space-separated list of email '
+ 'addresses to send email to.'))
+ self.AddField(
+ BooleanField('rerun',
+ description='Whether to ignore the cache and '
+ 'for tests to be re-run.',
+ default=False))
+ self.AddField(
+ BooleanField('same_specs',
+ default=True,
+ description='Ensure cached runs are run on the '
+ 'same kind of devices which are specified as a '
+ 'remote.'))
+ self.AddField(
+ BooleanField('same_machine',
+ default=False,
+ description='Ensure cached runs are run on the '
+ 'same remote.'))
+ self.AddField(
+ BooleanField('use_file_locks',
+ default=False,
+ description='DEPRECATED: Whether to use the file locks '
+ 'or AFE server lock mechanism.'))
self.AddField(
IntegerField(
'iterations',
@@ -215,79 +192,68 @@ class GlobalSettings(Settings):
'If not set, will run each benchmark test the optimum number of '
'times to get a stable result.'))
self.AddField(
- TextField(
- 'chromeos_root',
- description='The path to a chromeos checkout which '
- 'contains a src/scripts directory. Defaults to '
- 'the chromeos checkout which contains the '
- 'chromeos_image.'))
- self.AddField(
- TextField(
- 'logging_level',
- default='average',
- description='The level of logging desired. '
- "Options are 'quiet', 'average', and 'verbose'."))
- self.AddField(
- IntegerField(
- 'acquire_timeout',
- default=0,
- description='Number of seconds to wait for '
- 'machine before exit if all the machines in '
- 'the experiment file are busy. Default is 0.'))
- self.AddField(
- TextField(
- 'perf_args',
- default='',
- description='The optional profile command. It '
- 'enables perf commands to record perforamance '
- 'related counters. It must start with perf '
- 'command record or stat followed by arguments.'))
- self.AddField(
- BooleanField(
- 'download_debug',
- default=True,
- description='Download compressed debug symbols alongwith '
- 'image. This can provide more info matching symbols for'
- 'profiles, but takes larger space. By default, download'
- 'it only when perf_args is specified.'))
- self.AddField(
- TextField(
- 'cache_dir',
- default='',
- description='The abs path of cache dir. '
- 'Default is /home/$(whoami)/cros_scratch.'))
- self.AddField(
- BooleanField(
- 'cache_only',
- default=False,
- description='Whether to use only cached '
- 'results (do not rerun failed tests).'))
- self.AddField(
- BooleanField(
- 'no_email',
- default=False,
- description='Whether to disable the email to '
- 'user after crosperf finishes.'))
- self.AddField(
- BooleanField(
- 'json_report',
- default=False,
- description='Whether to generate a json version '
- 'of the report, for archiving.'))
- self.AddField(
- BooleanField(
- 'show_all_results',
- default=False,
- description='When running Telemetry tests, '
- 'whether to all the results, instead of just '
- 'the default (summary) results.'))
- self.AddField(
- TextField(
- 'share_cache',
- default='',
- description='Path to alternate cache whose data '
- 'you want to use. It accepts multiple directories '
- 'separated by a ",".'))
+ TextField('chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ TextField('logging_level',
+ default='average',
+ description='The level of logging desired. '
+ "Options are 'quiet', 'average', and 'verbose'."))
+ self.AddField(
+ IntegerField('acquire_timeout',
+ default=0,
+ description='Number of seconds to wait for '
+ 'machine before exit if all the machines in '
+ 'the experiment file are busy. Default is 0.'))
+ self.AddField(
+ TextField('perf_args',
+ default='',
+ description='The optional profile command. It '
+ 'enables perf commands to record perforamance '
+ 'related counters. It must start with perf '
+ 'command record or stat followed by arguments.'))
+ self.AddField(
+ BooleanField('download_debug',
+ default=True,
+ description='Download compressed debug symbols alongwith '
+ 'image. This can provide more info matching symbols for'
+ 'profiles, but takes larger space. By default, download'
+ 'it only when perf_args is specified.'))
+ self.AddField(
+ TextField('cache_dir',
+ default='',
+ description='The abs path of cache dir. '
+ 'Default is /home/$(whoami)/cros_scratch.'))
+ self.AddField(
+ BooleanField('cache_only',
+ default=False,
+ description='Whether to use only cached '
+ 'results (do not rerun failed tests).'))
+ self.AddField(
+ BooleanField('no_email',
+ default=False,
+ description='Whether to disable the email to '
+ 'user after crosperf finishes.'))
+ self.AddField(
+ BooleanField('json_report',
+ default=False,
+ description='Whether to generate a json version '
+ 'of the report, for archiving.'))
+ self.AddField(
+ BooleanField('show_all_results',
+ default=False,
+ description='When running Telemetry tests, '
+ 'whether to all the results, instead of just '
+ 'the default (summary) results.'))
+ self.AddField(
+ TextField('share_cache',
+ default='',
+ description='Path to alternate cache whose data '
+ 'you want to use. It accepts multiple directories '
+ 'separated by a ",".'))
self.AddField(
TextField('results_dir', default='', description='The results dir.'))
self.AddField(
@@ -297,55 +263,49 @@ class GlobalSettings(Settings):
description='Whether to compress all test results other than '
'reports into a tarball to save disk space.'))
self.AddField(
- TextField(
- 'locks_dir',
- default='',
- description='An alternate directory to use for '
- 'storing/checking machine file locks for local machines. '
- 'By default the file locks directory is '
- '/google/data/rw/users/mo/mobiletc-prebuild/locks.\n'
- 'WARNING: If you use your own locks directory, '
- 'there is no guarantee that someone else might not '
- 'hold a lock on the same machine in a different '
- 'locks directory.'))
- self.AddField(
- TextField(
- 'chrome_src',
- description='The path to the source of chrome. '
- 'This is used to run telemetry benchmarks. '
- 'The default one is the src inside chroot.',
- required=False,
- default=''))
- self.AddField(
- IntegerField(
- 'retries',
- default=0,
- description='Number of times to retry a '
- 'benchmark run.'))
- self.AddField(
- TextField(
- 'cwp_dso',
- description='The DSO type that we want to use for '
- 'CWP approximation. This is used to run telemetry '
- 'benchmarks. Valid DSO types can be found from dso_list '
- 'in experiment_factory.py. The default value is set to '
- 'be empty.',
- required=False,
- default=''))
- self.AddField(
- BooleanField(
- 'enable_aslr',
- description='Enable ASLR on the machine to run the '
- 'benchmarks. ASLR is disabled by default',
- required=False,
- default=False))
- self.AddField(
- BooleanField(
- 'ignore_min_max',
- description='When doing math for the raw results, '
- 'ignore min and max values to reduce noise.',
- required=False,
- default=False))
+ TextField('locks_dir',
+ default='',
+ description='An alternate directory to use for '
+ 'storing/checking machine file locks for local machines. '
+ 'By default the file locks directory is '
+ '/google/data/rw/users/mo/mobiletc-prebuild/locks.\n'
+ 'WARNING: If you use your own locks directory, '
+ 'there is no guarantee that someone else might not '
+ 'hold a lock on the same machine in a different '
+ 'locks directory.'))
+ self.AddField(
+ TextField('chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(
+ IntegerField('retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
+ self.AddField(
+ TextField('cwp_dso',
+ description='The DSO type that we want to use for '
+ 'CWP approximation. This is used to run telemetry '
+ 'benchmarks. Valid DSO types can be found from dso_list '
+ 'in experiment_factory.py. The default value is set to '
+ 'be empty.',
+ required=False,
+ default=''))
+ self.AddField(
+ BooleanField('enable_aslr',
+ description='Enable ASLR on the machine to run the '
+ 'benchmarks. ASLR is disabled by default',
+ required=False,
+ default=False))
+ self.AddField(
+ BooleanField('ignore_min_max',
+ description='When doing math for the raw results, '
+ 'ignore min and max values to reduce noise.',
+ required=False,
+ default=False))
self.AddField(
TextField(
'intel_pstate',
@@ -356,12 +316,11 @@ class GlobalSettings(Settings):
required=False,
default='no_hwp'))
self.AddField(
- BooleanField(
- 'turbostat',
- description='Run turbostat process in the background'
- ' of a benchmark. Enabled by default.',
- required=False,
- default=True))
+ BooleanField('turbostat',
+ description='Run turbostat process in the background'
+ ' of a benchmark. Enabled by default.',
+ required=False,
+ default=True))
self.AddField(
FloatField(
'top_interval',
@@ -377,22 +336,20 @@ class GlobalSettings(Settings):
required=False,
default=1))
self.AddField(
- IntegerField(
- 'cooldown_temp',
- required=False,
- default=40,
- description='Wait until CPU temperature goes down below'
- ' specified temperature in Celsius'
- ' prior starting a benchmark. '
- 'By default the value is set to 40 degrees.'))
- self.AddField(
- IntegerField(
- 'cooldown_time',
- required=False,
- default=10,
- description='Wait specified time in minutes allowing'
- ' CPU to cool down. Zero value disables cooldown. '
- 'The default value is 10 minutes.'))
+ IntegerField('cooldown_temp',
+ required=False,
+ default=40,
+ description='Wait until CPU temperature goes down below'
+ ' specified temperature in Celsius'
+ ' prior starting a benchmark. '
+ 'By default the value is set to 40 degrees.'))
+ self.AddField(
+ IntegerField('cooldown_time',
+ required=False,
+ default=10,
+ description='Wait specified time in minutes allowing'
+ ' CPU to cool down. Zero value disables cooldown. '
+ 'The default value is 10 minutes.'))
self.AddField(
EnumField(
'governor',
@@ -439,6 +396,12 @@ class GlobalSettings(Settings):
' or equal to a percent of max_freq. '
'CPU frequency is reduced to 95%% by default to reduce thermal '
'throttling.'))
+ self.AddField(
+ BooleanField(
+ 'no_lock',
+ default=False,
+ description='Do not attempt to lock the DUT.'
+ ' Useful when lock is held externally, say with crosfleet.'))
class SettingsFactory(object):
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index 035da8d7..8277e870 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -50,7 +50,7 @@ class GlobalSettingsTest(unittest.TestCase):
def test_init(self):
res = settings_factory.GlobalSettings('g_settings')
self.assertIsNotNone(res)
- self.assertEqual(len(res.fields), 39)
+ self.assertEqual(len(res.fields), 40)
self.assertEqual(res.GetField('name'), '')
self.assertEqual(res.GetField('board'), '')
self.assertEqual(res.GetField('crosfleet'), False)
@@ -108,7 +108,7 @@ class SettingsFactoryTest(unittest.TestCase):
g_settings = settings_factory.SettingsFactory().GetSettings(
'global', 'global')
self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
- self.assertEqual(len(g_settings.fields), 39)
+ self.assertEqual(len(g_settings.fields), 40)
if __name__ == '__main__':
diff --git a/crosperf/test_cache/compare_output/results.txt b/crosperf/test_cache/compare_output/results.pickle
index 592e7161..587863c5 100644
--- a/crosperf/test_cache/compare_output/results.txt
+++ b/crosperf/test_cache/compare_output/results.pickle
Binary files differ
diff --git a/crosperf/test_cache/test_input/results.txt b/crosperf/test_cache/test_input/results.pickle
index 33ba6ab7..33ba6ab7 100644
--- a/crosperf/test_cache/test_input/results.txt
+++ b/crosperf/test_cache/test_input/results.pickle
diff --git a/crosperf/test_cache/test_puretelemetry_input/results.txt b/crosperf/test_cache/test_puretelemetry_input/results.pickle
index 497d1cf3..497d1cf3 100644
--- a/crosperf/test_cache/test_puretelemetry_input/results.txt
+++ b/crosperf/test_cache/test_puretelemetry_input/results.pickle