aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCaroline Tice <cmtice@google.com>2017-04-06 17:16:05 -0700
committerchrome-bot <chrome-bot@chromium.org>2017-04-08 19:40:20 -0700
commitf6ef4395fe1896ba68c80e52cb24763b0fcfe7f8 (patch)
tree7612123711db98f0746e56b66368d16b388192bd
parent8c18be1425c8a4ecfc059a7c637fc93f33edab1f (diff)
downloadtoolchain-utils-f6ef4395fe1896ba68c80e52cb24763b0fcfe7f8.tar.gz
[toolchain-utils] Fix remaining lint errors in toolchain-utils.
In addition to fixing the lint errors, this also fixes the Python formatting issues (ran tc_pyformat on nearly all the files). BUG=chromium:570450 TEST=Ran all crosperf & bisect tool unit tests. Ran afe_lock_machine.py (check machine status) Ran full crosperf test (octane, speedometer, BootPerf) on alex. Change-Id: Ic86f9192801ac67769f3de30f1c5f0d203ce0831 Reviewed-on: https://chromium-review.googlesource.com/471886 Commit-Ready: Caroline Tice <cmtice@chromium.org> Tested-by: Caroline Tice <cmtice@chromium.org> Reviewed-by: Manoj Gupta <manojgupta@chromium.org>
-rwxr-xr-xafe_lock_machine.py25
-rwxr-xr-xbinary_search_tool/binary_search_perforce.py22
-rwxr-xr-xbinary_search_tool/binary_search_state.py6
-rwxr-xr-xbinary_search_tool/bisect.py8
-rwxr-xr-xbinary_search_tool/compiler_wrapper.py2
-rwxr-xr-xbinary_search_tool/cros_pkg/create_cleanup_script.py6
-rwxr-xr-xbinary_search_tool/run_bisect_test.py21
-rwxr-xr-xbinary_search_tool/sysroot_wrapper/testing_test.py8
-rwxr-xr-xbinary_search_tool/test/binary_search_tool_tester.py132
-rwxr-xr-xbinary_search_tool/test/common.py2
-rwxr-xr-xbinary_search_tool/test/gen_init_list.py2
-rwxr-xr-xbinary_search_tool/test/gen_obj.py6
-rwxr-xr-xbinary_search_tool/test/is_good.py2
-rwxr-xr-xbinary_search_tool/test/is_good_noinc_prune.py2
-rwxr-xr-xbinary_search_tool/test/switch_tmp.py2
-rwxr-xr-xbinary_search_tool/test/switch_to_bad.py2
-rwxr-xr-xbinary_search_tool/test/switch_to_bad_noinc_prune.py2
-rwxr-xr-xbinary_search_tool/test/switch_to_bad_set_file.py2
-rwxr-xr-xbinary_search_tool/test/switch_to_good.py2
-rwxr-xr-xbinary_search_tool/test/switch_to_good_noinc_prune.py2
-rwxr-xr-xbinary_search_tool/test/switch_to_good_set_file.py2
-rwxr-xr-xbinary_search_tool/test/test_setup.py2
-rwxr-xr-xbinary_search_tool/test/test_setup_bad.py2
-rwxr-xr-xbuild_chrome_browser.py168
-rwxr-xr-xbuild_chromeos.py167
-rwxr-xr-xbuild_tc.py196
-rwxr-xr-xbuild_tool.py319
-rwxr-xr-xchromiumos_image_diff.py114
-rwxr-xr-xcommand_executer_timeout_test.py2
-rwxr-xr-xcros_login.py37
-rw-r--r--cros_utils/misc.py78
-rw-r--r--cros_utils/tabulator.py143
-rw-r--r--crosperf/benchmark.py27
-rw-r--r--crosperf/benchmark_run.py26
-rwxr-xr-xcrosperf/benchmark_run_unittest.py76
-rwxr-xr-xcrosperf/benchmark_unittest.py54
-rw-r--r--crosperf/compare_machines.py9
-rwxr-xr-xcrosperf/crosperf_unittest.py13
-rw-r--r--crosperf/download_images.py32
-rwxr-xr-xcrosperf/download_images_unittest.py4
-rw-r--r--crosperf/experiment.py9
-rw-r--r--crosperf/experiment_factory.py61
-rwxr-xr-xcrosperf/experiment_factory_unittest.py12
-rw-r--r--crosperf/experiment_file.py11
-rwxr-xr-xcrosperf/experiment_file_unittest.py2
-rw-r--r--crosperf/experiment_runner.py41
-rwxr-xr-xcrosperf/experiment_runner_unittest.py144
-rw-r--r--crosperf/experiment_status.py8
-rw-r--r--crosperf/field.py4
-rwxr-xr-xcrosperf/flag_test_unittest.py2
-rwxr-xr-xcrosperf/generate_report.py91
-rwxr-xr-xcrosperf/generate_report_unittest.py62
-rw-r--r--crosperf/image_checksummer.py8
-rw-r--r--crosperf/machine_image_manager.py42
-rwxr-xr-xcrosperf/machine_image_manager_unittest.py114
-rw-r--r--crosperf/machine_manager.py80
-rwxr-xr-xcrosperf/machine_manager_unittest.py24
-rw-r--r--crosperf/results_cache.py22
-rwxr-xr-xcrosperf/results_cache_unittest.py39
-rw-r--r--crosperf/results_organizer.py10
-rwxr-xr-xcrosperf/results_organizer_unittest.py145
-rw-r--r--crosperf/results_report.py141
-rw-r--r--crosperf/results_report_templates.py33
-rwxr-xr-xcrosperf/results_report_unittest.py84
-rw-r--r--crosperf/schedv2.py79
-rwxr-xr-xcrosperf/schedv2_unittest.py49
-rw-r--r--crosperf/settings_factory.py7
-rwxr-xr-xcrosperf/settings_factory_unittest.py13
-rwxr-xr-xcrosperf/settings_unittest.py26
-rw-r--r--crosperf/suite_runner.py13
-rwxr-xr-xcrosperf/suite_runner_unittest.py11
-rwxr-xr-xfile_lock_machine.py101
-rwxr-xr-xgenerate-waterfall-reports.py40
-rwxr-xr-xget_common_image_version.py13
-rwxr-xr-xheat_map.py44
-rwxr-xr-ximage_chromeos.py144
-rwxr-xr-xproduce_output.py6
-rwxr-xr-xremote_gcc_build.py132
-rwxr-xr-xremote_kill_test.py17
-rwxr-xr-xremote_test.py96
-rwxr-xr-xrepo_to_repo.py82
-rwxr-xr-xrun_tests.py4
-rwxr-xr-xsetup_chromeos.py86
-rwxr-xr-xtc_enter_chroot.py116
-rwxr-xr-xtest_gcc_dejagnu.py101
-rwxr-xr-xtest_gdb_dejagnu.py75
-rwxr-xr-xtest_toolchains.py3
-rwxr-xr-xupdate_telemetry_defaults.py7
-rwxr-xr-xverify_compiler.py21
-rwxr-xr-xweekly_report.py33
90 files changed, 2258 insertions, 1977 deletions
diff --git a/afe_lock_machine.py b/afe_lock_machine.py
index 125ac971..370579db 100755
--- a/afe_lock_machine.py
+++ b/afe_lock_machine.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2015 Google INc. All Rights Reserved.
"""This module controls locking and unlocking of test machines."""
@@ -97,6 +97,7 @@ class AFELockManager(object):
machines that are not in the ChromeOS HW lab.
local: A Boolean indicating whether or not to use/allow a local AFE
server to be used (see local_server argument).
+ use_local: Use the local server instead of the official one.
log: If not None, this is the logger object to be used for writing out
informational output messages. It is expected to be an instance of
Logger class from cros_utils/logger.py.
@@ -272,9 +273,9 @@ class AFELockManager(object):
for m in self.machines:
for cros_name in [m, m + '.cros']:
if cros_name in self.toolchain_lab_machines:
- raise UpdateNonLocalMachine('Machine %s is already in the ChromeOS HW'
- 'Lab. Cannot add it to local server.' %
- cros_name)
+ raise UpdateNonLocalMachine(
+ 'Machine %s is already in the ChromeOS HW'
+ 'Lab. Cannot add it to local server.' % cros_name)
host_info = self.local_afe.get_hosts(hostname=m)
if host_info:
raise DuplicateAdd('Machine %s is already on the local server.' % m)
@@ -380,9 +381,10 @@ class AFELockManager(object):
afe_server = self.local_afe
try:
- afe_server.run('modify_hosts',
- host_filter_data={'hostname__in': [m]},
- update_data=kwargs)
+ afe_server.run(
+ 'modify_hosts',
+ host_filter_data={'hostname__in': [m]},
+ update_data=kwargs)
except Exception as e:
traceback.print_exc()
raise LockingError('Unable to %s machine %s. %s' % (action, m, str(e)))
@@ -426,8 +428,9 @@ class AFELockManager(object):
if machine.find('.cros') == -1:
cros_machine = cros_machine + '.cros'
- self.machines = [m for m in self.machines
- if m != cros_machine and m != machine]
+ self.machines = [
+ m for m in self.machines if m != cros_machine and m != machine
+ ]
def CheckMachineLocks(self, machine_states, cmd):
"""Check that every machine in requested list is in the proper state.
@@ -456,8 +459,8 @@ class AFELockManager(object):
'else (%s).' % (k, state['locked_by']))
elif cmd == 'lock':
if state['locked']:
- self.logger.LogWarning('Attempt to lock already locked machine (%s)' %
- k)
+ self.logger.LogWarning(
+ 'Attempt to lock already locked machine (%s)' % k)
self._InternalRemoveMachine(k)
def HasAFEServer(self, local):
diff --git a/binary_search_tool/binary_search_perforce.py b/binary_search_tool/binary_search_perforce.py
index 7ac2fba6..aaa09eef 100755
--- a/binary_search_tool/binary_search_perforce.py
+++ b/binary_search_tool/binary_search_perforce.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Module of binary serch for perforce."""
from __future__ import print_function
@@ -368,8 +368,8 @@ class P4GCCBinarySearcher(P4BinarySearcher):
self.CleanupCLs()
# Change the revision of only the gcc part of the toolchain.
command = ('cd %s/gcctools/google_vendor_src_branch/gcc '
- '&& g4 revert ...; g4 sync @%s' %
- (self.checkout_dir, current_revision))
+ '&& g4 revert ...; g4 sync @%s' % (self.checkout_dir,
+ current_revision))
self.current_ce.RunCommand(command)
self.HandleBrokenCLs(current_revision)
@@ -402,11 +402,13 @@ def Main(argv):
'-s', '--script', dest='script', help='Script to run for every version.')
options = parser.parse_args(argv)
# First get all revisions
- p4_paths = ['//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/...',
- '//depot2/gcctools/google_vendor_src_branch/binutils/'
- 'binutils-2.20.1-mobile/...',
- '//depot2/gcctools/google_vendor_src_branch/'
- 'binutils/binutils-20100303/...']
+ p4_paths = [
+ '//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/...',
+ '//depot2/gcctools/google_vendor_src_branch/binutils/'
+ 'binutils-2.20.1-mobile/...',
+ '//depot2/gcctools/google_vendor_src_branch/'
+ 'binutils/binutils-20100303/...'
+ ]
p4gccbs = P4GCCBinarySearcher('perforce2:2666', p4_paths, '')
# Main loop:
@@ -425,8 +427,8 @@ def Main(argv):
ce = command_executer.GetCommandExecuter()
command = '%s %s' % (script, p4gccbs.checkout_dir)
status = ce.RunCommand(command)
- message = ('Revision: %s produced: %d status\n' %
- (current_revision, status))
+ message = ('Revision: %s produced: %d status\n' % (current_revision,
+ status))
logger.GetLogger().LogOutput(message, print_to_console=verbose)
terminated = p4gccbs.SetStatus(status)
num_tries -= 1
diff --git a/binary_search_tool/binary_search_state.py b/binary_search_tool/binary_search_state.py
index a10e90b9..19065252 100755
--- a/binary_search_tool/binary_search_state.py
+++ b/binary_search_tool/binary_search_state.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""The binary search wrapper."""
from __future__ import print_function
@@ -559,8 +559,8 @@ def Run(get_initial_items,
try:
bss.DoSearch()
bss.RemoveState()
- logger.GetLogger().LogOutput('Total execution time: %s' %
- bss.ElapsedTimeString())
+ logger.GetLogger().LogOutput(
+ 'Total execution time: %s' % bss.ElapsedTimeString())
except Error as e:
logger.GetLogger().LogError(e)
return 1
diff --git a/binary_search_tool/bisect.py b/binary_search_tool/bisect.py
index d5a8b710..c7dd5238 100755
--- a/binary_search_tool/bisect.py
+++ b/binary_search_tool/bisect.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""The unified package/object bisecting tool."""
from __future__ import print_function
@@ -193,9 +193,9 @@ class BisectObject(Bisector):
if options.dir:
os.environ['BISECT_DIR'] = options.dir
self.options.dir = os.environ.get('BISECT_DIR', '/tmp/sysroot_bisect')
- self.setup_cmd = ('%s %s %s %s' % (self.sysroot_wrapper_setup,
- self.options.board, self.options.remote,
- self.options.package))
+ self.setup_cmd = ('%s %s %s %s' %
+ (self.sysroot_wrapper_setup, self.options.board,
+ self.options.remote, self.options.package))
self.ArgOverride(self.default_kwargs, overrides)
diff --git a/binary_search_tool/compiler_wrapper.py b/binary_search_tool/compiler_wrapper.py
index 3d6403a2..a6d189b5 100755
--- a/binary_search_tool/compiler_wrapper.py
+++ b/binary_search_tool/compiler_wrapper.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Prototype compiler wrapper.
Only tested with: gcc, g++, clang, clang++
diff --git a/binary_search_tool/cros_pkg/create_cleanup_script.py b/binary_search_tool/cros_pkg/create_cleanup_script.py
index 32a1f160..ed4eab61 100755
--- a/binary_search_tool/cros_pkg/create_cleanup_script.py
+++ b/binary_search_tool/cros_pkg/create_cleanup_script.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2015 Google Inc. All Rights Reserved
"""The script to generate a cleanup script after setup.sh.
@@ -90,8 +90,8 @@ def Main(argv):
if options.renamed_tree:
# Old build tree existed and was a real tree, so it got
# renamed. Move the renamed tree back to the original tree.
- out_file.write('sudo mv /build/%s.save /build/%s\n' %
- (options.board, options.board))
+ out_file.write('sudo mv /build/%s.save /build/%s\n' % (options.board,
+ options.board))
else:
# Old tree existed and was already a soft link. Re-create the
# original soft link.
diff --git a/binary_search_tool/run_bisect_test.py b/binary_search_tool/run_bisect_test.py
index c2776cce..53acc805 100755
--- a/binary_search_tool/run_bisect_test.py
+++ b/binary_search_tool/run_bisect_test.py
@@ -1,4 +1,5 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
+"""Run full bisection test."""
from __future__ import print_function
@@ -89,7 +90,7 @@ Is your compiler wrapper properly set up? [Y/n]
inp = sys.stdin.readline()
inp = inp.strip()
inp = inp.lower()
- return (not inp or inp == 'y' or inp == 'yes')
+ return not inp or inp == 'y' or inp == 'yes'
def Main(argv):
@@ -121,13 +122,13 @@ def Main(argv):
if not bisect_dir:
bisect_dir = DEFAULT_BISECT_DIR
- retval = populate_good_files(cwd, ce, bisect_dir)
- if retval != 0:
- return retval
+ retv = populate_good_files(cwd, ce, bisect_dir)
+ if retv != 0:
+ return retv
- retval = populate_bad_files(cwd, ce, bisect_dir)
- if retval != 0:
- return retval
+ retv = populate_bad_files(cwd, ce, bisect_dir)
+ if retv != 0:
+ return retv
# Set up good/bad work soft links
cmd = ('rm -f %s/%s/good-objects; ln -s %s/good %s/%s/good-objects' %
@@ -146,8 +147,8 @@ def Main(argv):
print('Error executing: %s; exiting now.' % cmd)
return status
- retval = run_main_bisection_test(cwd, ce)
- return retval
+ retv = run_main_bisection_test(cwd, ce)
+ return retv
if __name__ == '__main__':
diff --git a/binary_search_tool/sysroot_wrapper/testing_test.py b/binary_search_tool/sysroot_wrapper/testing_test.py
index 2f7bc4c3..a0d6ca10 100755
--- a/binary_search_tool/sysroot_wrapper/testing_test.py
+++ b/binary_search_tool/sysroot_wrapper/testing_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Test for sysroot_wrapper bisector.
All files in bad_files will be determined to be bad. This test was made for
@@ -15,9 +15,9 @@ import os
base_path = ('/var/cache/chromeos-chrome/chrome-src-internal/src/out_daisy/'
'Release/obj/')
bad_files = [
- os.path.join(base_path, 'base/base.cpu.o'),
- os.path.join(base_path, 'base/base.version.o'),
- os.path.join(base_path, 'apps/apps.launcher.o')
+ os.path.join(base_path, 'base/base.cpu.o'), os.path.join(
+ base_path, 'base/base.version.o'), os.path.join(base_path,
+ 'apps/apps.launcher.o')
]
bisect_dir = os.environ.get('BISECT_DIR', '/tmp/sysroot_bisect')
diff --git a/binary_search_tool/test/binary_search_tool_tester.py b/binary_search_tool/test/binary_search_tool_tester.py
index 775c1715..e733d9c4 100755
--- a/binary_search_tool/test/binary_search_tool_tester.py
+++ b/binary_search_tool/test/binary_search_tool_tester.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
# Copyright 2012 Google Inc. All Rights Reserved.
"""Tests for bisecting tool."""
@@ -66,12 +66,13 @@ class BisectTest(unittest.TestCase):
return 0
def Run(self):
- return binary_search_state.Run(get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True)
+ return binary_search_state.Run(
+ get_initial_items='./gen_init_list.py',
+ switch_to_good='./switch_to_good.py',
+ switch_to_bad='./switch_to_bad.py',
+ test_script='./is_good.py',
+ prune=True,
+ file_args=True)
def PostRun(self):
CleanObj()
@@ -127,26 +128,31 @@ class BisectingUtilsTest(unittest.TestCase):
except OSError:
pass
- cleanup_list = ['./is_setup', binary_search_state.STATE_FILE,
- 'noinc_prune_bad', 'noinc_prune_good']
+ cleanup_list = [
+ './is_setup', binary_search_state.STATE_FILE, 'noinc_prune_bad',
+ 'noinc_prune_good'
+ ]
for f in cleanup_list:
if os.path.exists(f):
os.remove(f)
def runTest(self):
- ret = binary_search_state.Run(get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True)
+ ret = binary_search_state.Run(
+ get_initial_items='./gen_init_list.py',
+ switch_to_good='./switch_to_good.py',
+ switch_to_bad='./switch_to_bad.py',
+ test_script='./is_good.py',
+ prune=True,
+ file_args=True)
self.assertEquals(ret, 0)
self.check_output()
def test_arg_parse(self):
- args = ['--get_initial_items', './gen_init_list.py', '--switch_to_good',
- './switch_to_good.py', '--switch_to_bad', './switch_to_bad.py',
- '--test_script', './is_good.py', '--prune', '--file_args']
+ args = [
+ '--get_initial_items', './gen_init_list.py', '--switch_to_good',
+ './switch_to_good.py', '--switch_to_bad', './switch_to_bad.py',
+ '--test_script', './is_good.py', '--prune', '--file_args'
+ ]
ret = binary_search_state.Main(args)
self.assertEquals(ret, 0)
self.check_output()
@@ -154,32 +160,35 @@ class BisectingUtilsTest(unittest.TestCase):
def test_test_setup_script(self):
os.remove('./is_setup')
with self.assertRaises(AssertionError):
- ret = binary_search_state.Run(get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True)
-
- ret = binary_search_state.Run(get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- test_setup_script='./test_setup.py',
- prune=True,
- file_args=True)
+ ret = binary_search_state.Run(
+ get_initial_items='./gen_init_list.py',
+ switch_to_good='./switch_to_good.py',
+ switch_to_bad='./switch_to_bad.py',
+ test_script='./is_good.py',
+ prune=True,
+ file_args=True)
+
+ ret = binary_search_state.Run(
+ get_initial_items='./gen_init_list.py',
+ switch_to_good='./switch_to_good.py',
+ switch_to_bad='./switch_to_bad.py',
+ test_script='./is_good.py',
+ test_setup_script='./test_setup.py',
+ prune=True,
+ file_args=True)
self.assertEquals(ret, 0)
self.check_output()
def test_bad_test_setup_script(self):
with self.assertRaises(AssertionError):
- binary_search_state.Run(get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- test_setup_script='./test_setup_bad.py',
- prune=True,
- file_args=True)
+ binary_search_state.Run(
+ get_initial_items='./gen_init_list.py',
+ switch_to_good='./switch_to_good.py',
+ switch_to_bad='./switch_to_bad.py',
+ test_script='./is_good.py',
+ test_setup_script='./test_setup_bad.py',
+ prune=True,
+ file_args=True)
def test_bad_save_state(self):
state_file = binary_search_state.STATE_FILE
@@ -294,13 +303,14 @@ class BisectingUtilsTest(unittest.TestCase):
self.assertEquals(bad_objs[found_obj], 1)
def test_set_file(self):
- binary_search_state.Run(get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good_set_file.py',
- switch_to_bad='./switch_to_bad_set_file.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True,
- verify=True)
+ binary_search_state.Run(
+ get_initial_items='./gen_init_list.py',
+ switch_to_good='./switch_to_good_set_file.py',
+ switch_to_bad='./switch_to_bad_set_file.py',
+ test_script='./is_good.py',
+ prune=True,
+ file_args=True,
+ verify=True)
self.check_output()
def test_noincremental_prune(self):
@@ -343,13 +353,14 @@ class BisectStressTest(unittest.TestCase):
def test_every_obj_bad(self):
amt = 25
gen_obj.Main(['--obj_num', str(amt), '--bad_obj_num', str(amt)])
- ret = binary_search_state.Run(get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True,
- verify=False)
+ ret = binary_search_state.Run(
+ get_initial_items='./gen_init_list.py',
+ switch_to_good='./switch_to_good.py',
+ switch_to_bad='./switch_to_bad.py',
+ test_script='./is_good.py',
+ prune=True,
+ file_args=True,
+ verify=False)
self.assertEquals(ret, 0)
self.check_output()
@@ -360,13 +371,14 @@ class BisectStressTest(unittest.TestCase):
obj_list[i] = '1'
obj_list = ','.join(obj_list)
gen_obj.Main(['--obj_list', obj_list])
- ret = binary_search_state.Run(get_initial_items='./gen_init_list.py',
- switch_to_good='./switch_to_good.py',
- switch_to_bad='./switch_to_bad.py',
- test_setup_script='./test_setup.py',
- test_script='./is_good.py',
- prune=True,
- file_args=True)
+ ret = binary_search_state.Run(
+ get_initial_items='./gen_init_list.py',
+ switch_to_good='./switch_to_good.py',
+ switch_to_bad='./switch_to_bad.py',
+ test_setup_script='./test_setup.py',
+ test_script='./is_good.py',
+ prune=True,
+ file_args=True)
self.assertEquals(ret, 0)
self.check_output()
diff --git a/binary_search_tool/test/common.py b/binary_search_tool/test/common.py
index baac9434..5c3ff538 100755
--- a/binary_search_tool/test/common.py
+++ b/binary_search_tool/test/common.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Common utility functions."""
DEFAULT_OBJECT_NUMBER = 1238
diff --git a/binary_search_tool/test/gen_init_list.py b/binary_search_tool/test/gen_init_list.py
index 4a79a1b1..002fc352 100755
--- a/binary_search_tool/test/gen_init_list.py
+++ b/binary_search_tool/test/gen_init_list.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Prints out index for every object file, starting from 0."""
from __future__ import print_function
diff --git a/binary_search_tool/test/gen_obj.py b/binary_search_tool/test/gen_obj.py
index 265729d2..d17e93f5 100755
--- a/binary_search_tool/test/gen_obj.py
+++ b/binary_search_tool/test/gen_obj.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Script to generate a list of object files.
0 represents a good object file.
@@ -86,8 +86,8 @@ def Main(argv):
obj_num = len(obj_list)
bad_obj_num = obj_list.count('1')
- print('Generated {0} object files, with {1} bad ones.'.format(obj_num,
- bad_obj_num))
+ print('Generated {0} object files, with {1} bad ones.'.format(
+ obj_num, bad_obj_num))
return 0
diff --git a/binary_search_tool/test/is_good.py b/binary_search_tool/test/is_good.py
index bfe9cc32..a0be4a08 100755
--- a/binary_search_tool/test/is_good.py
+++ b/binary_search_tool/test/is_good.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Check to see if the working set produces a good executable."""
from __future__ import print_function
diff --git a/binary_search_tool/test/is_good_noinc_prune.py b/binary_search_tool/test/is_good_noinc_prune.py
index 5aafd6c2..a900bd32 100755
--- a/binary_search_tool/test/is_good_noinc_prune.py
+++ b/binary_search_tool/test/is_good_noinc_prune.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Check to see if the working set produces a good executable.
This test script is made for the noincremental-prune test. This makes sure
diff --git a/binary_search_tool/test/switch_tmp.py b/binary_search_tool/test/switch_tmp.py
index 165004ed..51b7110e 100755
--- a/binary_search_tool/test/switch_tmp.py
+++ b/binary_search_tool/test/switch_tmp.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Change portions of the object files to good.
This file is a test switch script. Used only for the test test_tmp_cleanup.
diff --git a/binary_search_tool/test/switch_to_bad.py b/binary_search_tool/test/switch_to_bad.py
index b8602421..a1b6bd59 100755
--- a/binary_search_tool/test/switch_to_bad.py
+++ b/binary_search_tool/test/switch_to_bad.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Switch part of the objects file in working set to (possible) bad ones."""
from __future__ import print_function
diff --git a/binary_search_tool/test/switch_to_bad_noinc_prune.py b/binary_search_tool/test/switch_to_bad_noinc_prune.py
index 87bf1584..db76acad 100755
--- a/binary_search_tool/test/switch_to_bad_noinc_prune.py
+++ b/binary_search_tool/test/switch_to_bad_noinc_prune.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Switch part of the objects file in working set to (possible) bad ones.
The "portion" is defined by the file (which is passed as the only argument to
diff --git a/binary_search_tool/test/switch_to_bad_set_file.py b/binary_search_tool/test/switch_to_bad_set_file.py
index f535fdfd..edf226d3 100755
--- a/binary_search_tool/test/switch_to_bad_set_file.py
+++ b/binary_search_tool/test/switch_to_bad_set_file.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Switch part of the objects file in working set to (possible) bad ones.
This script is meant to be specifically used with the set_file test. This uses
diff --git a/binary_search_tool/test/switch_to_good.py b/binary_search_tool/test/switch_to_good.py
index 68e9633f..59a118c1 100755
--- a/binary_search_tool/test/switch_to_good.py
+++ b/binary_search_tool/test/switch_to_good.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Change portions of the object files to good.
The "portion" is defined by the file (which is passed as the only argument to
diff --git a/binary_search_tool/test/switch_to_good_noinc_prune.py b/binary_search_tool/test/switch_to_good_noinc_prune.py
index c5e78e45..00488a74 100755
--- a/binary_search_tool/test/switch_to_good_noinc_prune.py
+++ b/binary_search_tool/test/switch_to_good_noinc_prune.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Change portions of the object files to good.
The "portion" is defined by the file (which is passed as the only argument to
diff --git a/binary_search_tool/test/switch_to_good_set_file.py b/binary_search_tool/test/switch_to_good_set_file.py
index 83777af0..b5e521f9 100755
--- a/binary_search_tool/test/switch_to_good_set_file.py
+++ b/binary_search_tool/test/switch_to_good_set_file.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Change portions of the object files to good.
The "portion" is defined by the file (which is passed as the only argument to
diff --git a/binary_search_tool/test/test_setup.py b/binary_search_tool/test/test_setup.py
index 3fb5a23c..0d6a410e 100755
--- a/binary_search_tool/test/test_setup.py
+++ b/binary_search_tool/test/test_setup.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Emulate running of test setup script, is_good.py should fail without this."""
from __future__ import print_function
diff --git a/binary_search_tool/test/test_setup_bad.py b/binary_search_tool/test/test_setup_bad.py
index 8d72763e..d715f57a 100755
--- a/binary_search_tool/test/test_setup_bad.py
+++ b/binary_search_tool/test/test_setup_bad.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Emulate test setup that fails (i.e. failed flash to device)"""
from __future__ import print_function
diff --git a/build_chrome_browser.py b/build_chrome_browser.py
index e0bbac51..c3b78870 100755
--- a/build_chrome_browser.py
+++ b/build_chrome_browser.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Script to checkout the ChromeOS source.
@@ -32,71 +32,81 @@ def Main(argv):
cmd_executer = command_executer.GetCommandExecuter()
parser = argparse.ArgumentParser()
- parser.add_argument('--chromeos_root',
- dest='chromeos_root',
- help='Target directory for ChromeOS installation.')
+ parser.add_argument(
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='Target directory for ChromeOS installation.')
parser.add_argument('--version', dest='version')
- parser.add_argument('--clean',
- dest='clean',
- default=False,
- action='store_true',
- help=('Clean the /var/cache/chromeos-chrome/'
- 'chrome-src/src/out_$board dir'))
- parser.add_argument('--env',
- dest='env',
- default='',
- help='Use the following env')
- parser.add_argument('--ebuild_version',
- dest='ebuild_version',
- help='Use this ebuild instead of the default one.')
- parser.add_argument('--cflags',
- dest='cflags',
- default='',
- help='CFLAGS for the ChromeOS packages')
- parser.add_argument('--cxxflags',
- dest='cxxflags',
- default='',
- help='CXXFLAGS for the ChromeOS packages')
- parser.add_argument('--ldflags',
- dest='ldflags',
- default='',
- help='LDFLAGS for the ChromeOS packages')
- parser.add_argument('--board',
- dest='board',
- help='ChromeOS target board, e.g. x86-generic')
- parser.add_argument('--no_build_image',
- dest='no_build_image',
- default=False,
- action='store_true',
- help=('Skip build image after building browser.'
- 'Defaults to False.'))
- parser.add_argument('--label',
- dest='label',
- help='Optional label to apply to the ChromeOS image.')
- parser.add_argument('--build_image_args',
- default='',
- dest='build_image_args',
- help='Optional arguments to build_image.')
- parser.add_argument('--cros_workon',
- dest='cros_workon',
- help='Build using external source tree.')
- parser.add_argument('--dev',
- dest='dev',
- default=False,
- action='store_true',
- help=('Build a dev (eg. writable/large) image. '
- 'Defaults to False.'))
- parser.add_argument('--debug',
- dest='debug',
- default=False,
- action='store_true',
- help=('Build chrome browser using debug mode. '
- 'This option implies --dev. Defaults to false.'))
- parser.add_argument('--verbose',
- dest='verbose',
- default=False,
- action='store_true',
- help='Build with verbose information.')
+ parser.add_argument(
+ '--clean',
+ dest='clean',
+ default=False,
+ action='store_true',
+ help=('Clean the /var/cache/chromeos-chrome/'
+ 'chrome-src/src/out_$board dir'))
+ parser.add_argument(
+ '--env', dest='env', default='', help='Use the following env')
+ parser.add_argument(
+ '--ebuild_version',
+ dest='ebuild_version',
+ help='Use this ebuild instead of the default one.')
+ parser.add_argument(
+ '--cflags',
+ dest='cflags',
+ default='',
+ help='CFLAGS for the ChromeOS packages')
+ parser.add_argument(
+ '--cxxflags',
+ dest='cxxflags',
+ default='',
+ help='CXXFLAGS for the ChromeOS packages')
+ parser.add_argument(
+ '--ldflags',
+ dest='ldflags',
+ default='',
+ help='LDFLAGS for the ChromeOS packages')
+ parser.add_argument(
+ '--board', dest='board', help='ChromeOS target board, e.g. x86-generic')
+ parser.add_argument(
+ '--no_build_image',
+ dest='no_build_image',
+ default=False,
+ action='store_true',
+ help=('Skip build image after building browser.'
+ 'Defaults to False.'))
+ parser.add_argument(
+ '--label',
+ dest='label',
+ help='Optional label to apply to the ChromeOS image.')
+ parser.add_argument(
+ '--build_image_args',
+ default='',
+ dest='build_image_args',
+ help='Optional arguments to build_image.')
+ parser.add_argument(
+ '--cros_workon',
+ dest='cros_workon',
+ help='Build using external source tree.')
+ parser.add_argument(
+ '--dev',
+ dest='dev',
+ default=False,
+ action='store_true',
+ help=('Build a dev (eg. writable/large) image. '
+ 'Defaults to False.'))
+ parser.add_argument(
+ '--debug',
+ dest='debug',
+ default=False,
+ action='store_true',
+ help=('Build chrome browser using debug mode. '
+ 'This option implies --dev. Defaults to false.'))
+ parser.add_argument(
+ '--verbose',
+ dest='verbose',
+ default=False,
+ action='store_true',
+ help='Build with verbose information.')
options = parser.parse_args(argv)
@@ -130,8 +140,8 @@ def Main(argv):
ebuild_version = 'chromeos-chrome'
if options.cros_workon and not (
- os.path.isdir(options.cros_workon) and os.path.exists(os.path.join(
- options.cros_workon, 'src/chromeos/BUILD.gn'))):
+ os.path.isdir(options.cros_workon) and os.path.exists(
+ os.path.join(options.cros_workon, 'src/chromeos/BUILD.gn'))):
Usage(parser, '--cros_workon must be a valid chromium browser checkout.')
if options.verbose:
@@ -179,9 +189,10 @@ def Main(argv):
if options.cros_workon:
cros_sdk_options = '--chrome_root={0}'.format(options.cros_workon)
- ret = cmd_executer.ChrootRunCommand(options.chromeos_root,
- emerge_browser_command,
- cros_sdk_options=cros_sdk_options)
+ ret = cmd_executer.ChrootRunCommand(
+ options.chromeos_root,
+ emerge_browser_command,
+ cros_sdk_options=cros_sdk_options)
logger.GetLogger().LogFatalIf(ret, 'build_packages failed')
@@ -197,13 +208,12 @@ def Main(argv):
return ret
# Finally build the image
- ret = cmd_executer.ChrootRunCommand(
- options.chromeos_root,
- '{0} {1} {2} {3}'.format(unmask_env,
- options.env,
- misc.GetBuildImageCommand(options.board,
- dev=options.dev),
- options.build_image_args))
+ ret = cmd_executer.ChrootRunCommand(options.chromeos_root,
+ '{0} {1} {2} {3}'.format(
+ unmask_env, options.env,
+ misc.GetBuildImageCommand(
+ options.board, dev=options.dev),
+ options.build_image_args))
logger.GetLogger().LogFatalIf(ret, 'build_image failed')
@@ -226,8 +236,8 @@ def Main(argv):
options.label)
ret = cmd_executer.RunCommand(command)
- logger.GetLogger().LogFatalIf(ret, 'Failed to apply symlink label %s' %
- options.label)
+ logger.GetLogger().LogFatalIf(
+ ret, 'Failed to apply symlink label %s' % options.label)
return ret
diff --git a/build_chromeos.py b/build_chromeos.py
index cb68fd00..6b4f4dcd 100755
--- a/build_chromeos.py
+++ b/build_chromeos.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Script to checkout the ChromeOS source.
@@ -35,73 +35,81 @@ def Main(argv):
cmd_executer = command_executer.GetCommandExecuter()
parser = argparse.ArgumentParser()
- parser.add_argument('--chromeos_root',
- dest='chromeos_root',
- help='Target directory for ChromeOS installation.')
- parser.add_argument('--clobber_chroot',
- dest='clobber_chroot',
- action='store_true',
- help='Delete the chroot and start fresh',
- default=False)
- parser.add_argument('--clobber_board',
- dest='clobber_board',
- action='store_true',
- help='Delete the board and start fresh',
- default=False)
- parser.add_argument('--rebuild',
- dest='rebuild',
- action='store_true',
- help='Rebuild all board packages except the toolchain.',
- default=False)
- parser.add_argument('--cflags',
- dest='cflags',
- default='',
- help='CFLAGS for the ChromeOS packages')
- parser.add_argument('--cxxflags',
- dest='cxxflags',
- default='',
- help='CXXFLAGS for the ChromeOS packages')
- parser.add_argument('--ldflags',
- dest='ldflags',
- default='',
- help='LDFLAGS for the ChromeOS packages')
- parser.add_argument('--board',
- dest='board',
- help='ChromeOS target board, e.g. x86-generic')
- parser.add_argument('--package',
- dest='package',
- help='The package needs to be built')
- parser.add_argument('--label',
- dest='label',
- help='Optional label symlink to point to build dir.')
- parser.add_argument('--dev',
- dest='dev',
- default=False,
- action='store_true',
- help=('Make the final image in dev mode (eg writable, '
- 'more space on image). Defaults to False.'))
- parser.add_argument('--debug',
- dest='debug',
- default=False,
- action='store_true',
- help=("Optional. Build chrome browser with \"-g -O0\". "
- "Notice, this also turns on \'--dev\'. "
- 'Defaults to False.'))
- parser.add_argument('--env',
- dest='env',
- default='',
- help='Env to pass to build_packages.')
- parser.add_argument('--vanilla',
- dest='vanilla',
- default=False,
- action='store_true',
- help='Use default ChromeOS toolchain.')
- parser.add_argument('--vanilla_image',
- dest='vanilla_image',
- default=False,
- action='store_true',
- help=('Use prebuild packages for building the image. '
- 'It also implies the --vanilla option is set.'))
+ parser.add_argument(
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='Target directory for ChromeOS installation.')
+ parser.add_argument(
+ '--clobber_chroot',
+ dest='clobber_chroot',
+ action='store_true',
+ help='Delete the chroot and start fresh',
+ default=False)
+ parser.add_argument(
+ '--clobber_board',
+ dest='clobber_board',
+ action='store_true',
+ help='Delete the board and start fresh',
+ default=False)
+ parser.add_argument(
+ '--rebuild',
+ dest='rebuild',
+ action='store_true',
+ help='Rebuild all board packages except the toolchain.',
+ default=False)
+ parser.add_argument(
+ '--cflags',
+ dest='cflags',
+ default='',
+ help='CFLAGS for the ChromeOS packages')
+ parser.add_argument(
+ '--cxxflags',
+ dest='cxxflags',
+ default='',
+ help='CXXFLAGS for the ChromeOS packages')
+ parser.add_argument(
+ '--ldflags',
+ dest='ldflags',
+ default='',
+ help='LDFLAGS for the ChromeOS packages')
+ parser.add_argument(
+ '--board', dest='board', help='ChromeOS target board, e.g. x86-generic')
+ parser.add_argument(
+ '--package', dest='package', help='The package needs to be built')
+ parser.add_argument(
+ '--label',
+ dest='label',
+ help='Optional label symlink to point to build dir.')
+ parser.add_argument(
+ '--dev',
+ dest='dev',
+ default=False,
+ action='store_true',
+ help=('Make the final image in dev mode (eg writable, '
+ 'more space on image). Defaults to False.'))
+ parser.add_argument(
+ '--debug',
+ dest='debug',
+ default=False,
+ action='store_true',
+ help=("Optional. Build chrome browser with \"-g -O0\". "
+ "Notice, this also turns on \'--dev\'. "
+ 'Defaults to False.'))
+ parser.add_argument(
+ '--env', dest='env', default='', help='Env to pass to build_packages.')
+ parser.add_argument(
+ '--vanilla',
+ dest='vanilla',
+ default=False,
+ action='store_true',
+ help='Use default ChromeOS toolchain.')
+ parser.add_argument(
+ '--vanilla_image',
+ dest='vanilla_image',
+ default=False,
+ action='store_true',
+ help=('Use prebuild packages for building the image. '
+ 'It also implies the --vanilla option is set.'))
options = parser.parse_args(argv[1:])
@@ -137,9 +145,7 @@ def Main(argv):
build_packages_env, {'USE': 'chrome_internal afdo_use'})
build_packages_command = misc.GetBuildPackagesCommand(
- board=options.board,
- usepkg=options.vanilla_image,
- debug=options.debug)
+ board=options.board, usepkg=options.vanilla_image, debug=options.debug)
if options.package:
build_packages_command += ' {0}'.format(options.package)
@@ -147,9 +153,10 @@ def Main(argv):
build_image_command = misc.GetBuildImageCommand(options.board, options.dev)
if options.vanilla or options.vanilla_image:
- command = misc.GetSetupBoardCommand(options.board,
- usepkg=options.vanilla_image,
- force=options.clobber_board)
+ command = misc.GetSetupBoardCommand(
+ options.board,
+ usepkg=options.vanilla_image,
+ force=options.clobber_board)
command += '; ' + build_packages_env + ' ' + build_packages_command
command += '&& ' + build_packages_env + ' ' + build_image_command
ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command)
@@ -237,10 +244,10 @@ def Main(argv):
"LDFLAGS=\"$(portageq-%s envvar LDFLAGS) %s\" "
'CHROME_ORIGIN=SERVER_SOURCE '
'%s '
- '%s --skip_chroot_upgrade' % (options.board, options.cflags,
- options.board, options.cxxflags,
- options.board, options.ldflags,
- build_packages_env, build_packages_command))
+ '%s --skip_chroot_upgrade' %
+ (options.board, options.cflags, options.board, options.cxxflags,
+ options.board, options.ldflags, build_packages_env,
+ build_packages_command))
logger.GetLogger().LogFatalIf(ret, 'build_packages failed')
if options.package:
@@ -269,8 +276,8 @@ def Main(argv):
os.path.dirname(real_image_dir_path), options.label))
ret = cmd_executer.RunCommand(command)
- logger.GetLogger().LogFatalIf(ret, 'Failed to apply symlink label %s' %
- options.label)
+ logger.GetLogger().LogFatalIf(
+ ret, 'Failed to apply symlink label %s' % options.label)
return ret
diff --git a/build_tc.py b/build_tc.py
index 55fc5b70..4f022d29 100755
--- a/build_tc.py
+++ b/build_tc.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -44,9 +44,9 @@ class ToolchainPart(object):
self._chromeos_root)
self.tag = '%s-%s' % (name, self._ctarget)
self._ce = command_executer.GetCommandExecuter()
- self._mask_file = os.path.join(self._chromeos_root, 'chroot',
- 'etc/portage/package.mask/cross-%s' %
- self._ctarget)
+ self._mask_file = os.path.join(
+ self._chromeos_root, 'chroot',
+ 'etc/portage/package.mask/cross-%s' % self._ctarget)
self._new_mask_file = None
self._chroot_source_path = os.path.join(constants.MOUNTED_TOOLCHAIN_ROOT,
@@ -59,8 +59,8 @@ class ToolchainPart(object):
cross_symlink = os.path.join(self._chromeos_root, 'chroot',
'usr/local/bin/emerge-%s' % self._board)
if not os.path.exists(cross_symlink):
- command = ('%s/setup_board --board=%s' %
- (misc.CHROMEOS_SCRIPTS_DIR, self._board))
+ command = ('%s/setup_board --board=%s' % (misc.CHROMEOS_SCRIPTS_DIR,
+ self._board))
self._ce.ChrootRunCommand(self._chromeos_root, command)
def Build(self):
@@ -114,9 +114,9 @@ class ToolchainPart(object):
mount_statuses = [mp.DoMount() == 0 for mp in mount_points]
if not all(mount_statuses):
- mounted = [mp
- for mp, status in zip(mount_points, mount_statuses)
- if status]
+ mounted = [
+ mp for mp, status in zip(mount_points, mount_statuses) if status
+ ]
unmount_statuses = [mp.UnMount() == 0 for mp in mounted]
assert all(unmount_statuses), 'Could not unmount all mount points!'
@@ -149,8 +149,8 @@ class ToolchainPart(object):
if self._name == 'gcc' and not self._gcc_enable_ccache:
env['USE'] += ' -wrapper_ccache'
- env['%s_SOURCE_PATH' % self._name.upper()] = (
- os.path.join('/', self._chroot_source_path))
+ env['%s_SOURCE_PATH' % self._name.upper()] = (os.path.join(
+ '/', self._chroot_source_path))
env['ACCEPT_KEYWORDS'] = '~*'
env_string = ' '.join(["%s=\"%s\"" % var for var in env.items()])
command = 'emerge =cross-%s/%s-9999' % (self._ctarget, self._name)
@@ -159,8 +159,8 @@ class ToolchainPart(object):
if rv != 0:
return rv
if self._name == 'gcc':
- command = ('sudo cp -r /usr/lib/gcc/%s %s' %
- (self._ctarget, self._gcc_libs_dest))
+ command = ('sudo cp -r /usr/lib/gcc/%s %s' % (self._ctarget,
+ self._gcc_libs_dest))
rv = self._ce.ChrootRunCommand(self._chromeos_root, command)
return rv
@@ -181,83 +181,99 @@ def Main(argv):
"""The main function."""
# Common initializations
parser = argparse.ArgumentParser()
- parser.add_argument('-c',
- '--chromeos_root',
- dest='chromeos_root',
- default='../../',
- help=('ChromeOS root checkout directory'
- ' uses ../.. if none given.'))
- parser.add_argument('-g',
- '--gcc_dir',
- dest='gcc_dir',
- help='The directory where gcc resides.')
- parser.add_argument('--binutils_dir',
- dest='binutils_dir',
- help='The directory where binutils resides.')
- parser.add_argument('-x',
- '--gdb_dir',
- dest='gdb_dir',
- help='The directory where gdb resides.')
- parser.add_argument('-b',
- '--board',
- dest='board',
- default='x86-alex',
- help='The target board.')
- parser.add_argument('-n',
- '--noincremental',
- dest='noincremental',
- default=False,
- action='store_true',
- help='Use FEATURES=keepwork to do incremental builds.')
- parser.add_argument('--cflags',
- dest='cflags',
- default='',
- help='Build a compiler with specified CFLAGS')
- parser.add_argument('--cxxflags',
- dest='cxxflags',
- default='',
- help='Build a compiler with specified CXXFLAGS')
- parser.add_argument('--cflags_for_target',
- dest='cflags_for_target',
- default='',
- help='Build the target libraries with specified flags')
- parser.add_argument('--cxxflags_for_target',
- dest='cxxflags_for_target',
- default='',
- help='Build the target libraries with specified flags')
- parser.add_argument('--ldflags',
- dest='ldflags',
- default='',
- help='Build a compiler with specified LDFLAGS')
- parser.add_argument('-d',
- '--debug',
- dest='debug',
- default=False,
- action='store_true',
- help='Build a compiler with -g3 -O0 appended to both'
- ' CFLAGS and CXXFLAGS.')
- parser.add_argument('-m',
- '--mount_only',
- dest='mount_only',
- default=False,
- action='store_true',
- help='Just mount the tool directories.')
- parser.add_argument('-u',
- '--unmount_only',
- dest='unmount_only',
- default=False,
- action='store_true',
- help='Just unmount the tool directories.')
- parser.add_argument('--extra_use_flags',
- dest='extra_use_flags',
- default='',
- help='Extra flag for USE, to be passed to the ebuild. '
- "('multislot' and 'mounted_<tool>' are always passed.)")
- parser.add_argument('--gcc_enable_ccache',
- dest='gcc_enable_ccache',
- default=False,
- action='store_true',
- help='Enable ccache for the gcc invocations')
+ parser.add_argument(
+ '-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ default='../../',
+ help=('ChromeOS root checkout directory'
+ ' uses ../.. if none given.'))
+ parser.add_argument(
+ '-g',
+ '--gcc_dir',
+ dest='gcc_dir',
+ help='The directory where gcc resides.')
+ parser.add_argument(
+ '--binutils_dir',
+ dest='binutils_dir',
+ help='The directory where binutils resides.')
+ parser.add_argument(
+ '-x',
+ '--gdb_dir',
+ dest='gdb_dir',
+ help='The directory where gdb resides.')
+ parser.add_argument(
+ '-b',
+ '--board',
+ dest='board',
+ default='x86-alex',
+ help='The target board.')
+ parser.add_argument(
+ '-n',
+ '--noincremental',
+ dest='noincremental',
+ default=False,
+ action='store_true',
+ help='Use FEATURES=keepwork to do incremental builds.')
+ parser.add_argument(
+ '--cflags',
+ dest='cflags',
+ default='',
+ help='Build a compiler with specified CFLAGS')
+ parser.add_argument(
+ '--cxxflags',
+ dest='cxxflags',
+ default='',
+ help='Build a compiler with specified CXXFLAGS')
+ parser.add_argument(
+ '--cflags_for_target',
+ dest='cflags_for_target',
+ default='',
+ help='Build the target libraries with specified flags')
+ parser.add_argument(
+ '--cxxflags_for_target',
+ dest='cxxflags_for_target',
+ default='',
+ help='Build the target libraries with specified flags')
+ parser.add_argument(
+ '--ldflags',
+ dest='ldflags',
+ default='',
+ help='Build a compiler with specified LDFLAGS')
+ parser.add_argument(
+ '-d',
+ '--debug',
+ dest='debug',
+ default=False,
+ action='store_true',
+ help='Build a compiler with -g3 -O0 appended to both'
+ ' CFLAGS and CXXFLAGS.')
+ parser.add_argument(
+ '-m',
+ '--mount_only',
+ dest='mount_only',
+ default=False,
+ action='store_true',
+ help='Just mount the tool directories.')
+ parser.add_argument(
+ '-u',
+ '--unmount_only',
+ dest='unmount_only',
+ default=False,
+ action='store_true',
+ help='Just unmount the tool directories.')
+ parser.add_argument(
+ '--extra_use_flags',
+ dest='extra_use_flags',
+ default='',
+ help='Extra flag for USE, to be passed to the ebuild. '
+ "('multislot' and 'mounted_<tool>' are always passed.)")
+ parser.add_argument(
+ '--gcc_enable_ccache',
+ dest='gcc_enable_ccache',
+ default=False,
+ action='store_true',
+ help='Enable ccache for the gcc invocations')
options = parser.parse_args(argv)
diff --git a/build_tool.py b/build_tool.py
index 1df46952..3bd357c0 100755
--- a/build_tool.py
+++ b/build_tool.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Script to bootstrap the chroot using new toolchain.
This script allows you to build/install a customized version of gcc/binutils,
@@ -34,7 +34,6 @@ import os
import re
import sys
-
from cros_utils import command_executer
from cros_utils import logger
from cros_utils import misc
@@ -92,14 +91,14 @@ class Bootstrapper(object):
def SubmitToLocalBranch(self):
"""Copy source code to the chromium source tree and submit it locally."""
if self._gcc_dir:
- if not self.SubmitToolToLocalBranch(tool_name='gcc',
- tool_dir=self._gcc_dir):
+ if not self.SubmitToolToLocalBranch(
+ tool_name='gcc', tool_dir=self._gcc_dir):
return False
self._gcc_branch = TEMP_BRANCH_NAME
if self._binutils_dir:
- if not self.SubmitToolToLocalBranch(tool_name='binutils',
- tool_dir=self._binutils_dir):
+ if not self.SubmitToolToLocalBranch(
+ tool_name='binutils', tool_dir=self._binutils_dir):
return False
self._binutils_branch = TEMP_BRANCH_NAME
@@ -122,8 +121,8 @@ class Bootstrapper(object):
# 0. Test to see if git tree is free of local changes.
if not misc.IsGitTreeClean(chrome_tool_dir):
- self._logger.LogError('Git repository "{0}" not clean, aborted.'.format(
- chrome_tool_dir))
+ self._logger.LogError(
+ 'Git repository "{0}" not clean, aborted.'.format(chrome_tool_dir))
return False
# 1. Checkout/create a (new) branch for testing.
@@ -135,9 +134,8 @@ class Bootstrapper(object):
return False
if self.IsTreeSame(tool_dir, chrome_tool_dir):
- self._logger.LogOutput(
- '"{0}" and "{1}" are the same, sync skipped.'.format(tool_dir,
- chrome_tool_dir))
+ self._logger.LogOutput('"{0}" and "{1}" are the same, sync skipped.'.
+ format(tool_dir, chrome_tool_dir))
return True
# 2. Sync sources from user provided tool dir to chromiumos tool git.
@@ -163,17 +161,15 @@ class Bootstrapper(object):
cmd = 'cd {0} && git log -1 --pretty=oneline'.format(tool_dir)
tool_dir_extra_info = None
ret, tool_dir_extra_info, _ = self._ce.RunCommandWOutput(
- cmd,
- print_to_console=False)
+ cmd, print_to_console=False)
commit_message = 'Synced with tool source tree at - "{0}".'.format(tool_dir)
if not ret:
commit_message += '\nGit log for {0}:\n{1}'.format(
tool_dir, tool_dir_extra_info.strip())
if chrome_tool_repo.CommitLocally(commit_message):
- self._logger.LogError(
- 'Commit to local branch "{0}" failed, aborted.'.format(
- TEMP_BRANCH_NAME))
+ self._logger.LogError('Commit to local branch "{0}" failed, aborted.'.
+ format(TEMP_BRANCH_NAME))
return False
return True
@@ -219,12 +215,12 @@ class Bootstrapper(object):
command = ('cd "{0}" && git cat-file -p {1} '
'| grep -E "^tree [a-f0-9]+$" '
'| cut -d" " -f2').format(chrome_tool_dir, tool_branch)
- ret, stdout, _ = self._ce.RunCommandWOutput(command,
- print_to_console=False)
+ ret, stdout, _ = self._ce.RunCommandWOutput(
+ command, print_to_console=False)
# Pipe operation always has a zero return value. So need to check if
# stdout is valid.
- if not ret and stdout and re.match('[0-9a-h]{40}', stdout.strip(),
- re.IGNORECASE):
+ if not ret and stdout and re.match('[0-9a-h]{40}',
+ stdout.strip(), re.IGNORECASE):
tool_branch_tree = stdout.strip()
self._logger.LogOutput('Find tree for {0} branch "{1}" - "{2}"'.format(
tool_name, tool_branch, tool_branch_tree))
@@ -270,8 +266,8 @@ class Bootstrapper(object):
"""
# To get the active gcc ebuild file, we need a workable chroot first.
- if not os.path.exists(os.path.join(
- self._chromeos_root, 'chroot')) and self._ce.RunCommand(
+ if not os.path.exists(
+ os.path.join(self._chromeos_root, 'chroot')) and self._ce.RunCommand(
'cd "{0}" && cros_sdk --create'.format(self._chromeos_root)):
self._logger.LogError(('Failed to install a initial chroot, aborted.\n'
'If previous bootstrap failed, do a '
@@ -284,12 +280,12 @@ class Bootstrapper(object):
'equery w sys-devel/{0}'.format(tool_name),
print_to_console=True)
if rv:
- self._logger.LogError(('Failed to execute inside chroot '
- '"equery w sys-devel/{0}", aborted.').format(
- tool_name))
+ self._logger.LogError(
+ ('Failed to execute inside chroot '
+ '"equery w sys-devel/{0}", aborted.').format(tool_name))
return (False, None, None)
- m = re.match(r'^.*/({0}/(.*\.ebuild))$'.format(EBUILD_PATH_PATTERN.format(
- tool_name)), stdout)
+ m = re.match(r'^.*/({0}/(.*\.ebuild))$'.format(
+ EBUILD_PATH_PATTERN.format(tool_name)), stdout)
if not m:
self._logger.LogError(
('Failed to find {0} ebuild file, aborted. '
@@ -324,7 +320,6 @@ class Bootstrapper(object):
tooltree = self._binutils_branch_tree
toolebuild = self._binutils_ebuild_file
-
assert tool
# An example for the following variables would be:
@@ -336,10 +331,8 @@ class Bootstrapper(object):
if not toolgithash:
return False
toolcomponents = 'toolchain/{}'.format(tool)
- return self.InplaceModifyToolEbuildFile(toolcomponents,
- toolgithash,
- tooltree,
- toolebuild)
+ return self.InplaceModifyToolEbuildFile(toolcomponents, toolgithash,
+ tooltree, toolebuild)
@staticmethod
def ResetToolEbuildFile(chromeos_root, tool_name):
@@ -357,8 +350,8 @@ class Bootstrapper(object):
path=('sys-devel/{0}/{0}-*.ebuild'.format(tool_name)),
staged=False)
if rv:
- cmd = 'cd {0} && git checkout --'.format(os.path.join(
- chromeos_root, CHROMIUMOS_OVERLAY_PATH))
+ cmd = 'cd {0} && git checkout --'.format(
+ os.path.join(chromeos_root, CHROMIUMOS_OVERLAY_PATH))
for g in rv:
cmd += ' ' + g
rv = command_executer.GetCommandExecuter().RunCommand(cmd)
@@ -401,12 +394,8 @@ class Bootstrapper(object):
repo, print_to_console=True))
return repo
-
- def InplaceModifyToolEbuildFile(self,
- tool_components,
- tool_branch_githash,
- tool_branch_tree,
- tool_ebuild_file):
+ def InplaceModifyToolEbuildFile(self, tool_components, tool_branch_githash,
+ tool_branch_tree, tool_ebuild_file):
"""Using sed to fill properly values into the ebuild file.
Args:
@@ -433,10 +422,8 @@ class Bootstrapper(object):
' # The following line is modified by script.\' '
'-e \'s!^CROS_WORKON_TREE=".*"$!CROS_WORKON_TREE="{3}"!\' '
'{4}').format('/home/{}/ndk-root'.format(os.environ['USER']),
- tool_components,
- tool_branch_githash,
- tool_branch_tree,
- tool_ebuild_file)
+ tool_components, tool_branch_githash,
+ tool_branch_tree, tool_ebuild_file)
rv = self._ce.RunCommand(command)
if rv:
self._logger.LogError(
@@ -477,12 +464,11 @@ class Bootstrapper(object):
True if operation succeeds.
"""
- chroot_ndk_root = os.path.join(self._chromeos_root, 'chroot',
- 'home', os.environ['USER'],
- 'ndk-root')
+ chroot_ndk_root = os.path.join(self._chromeos_root, 'chroot', 'home',
+ os.environ['USER'], 'ndk-root')
self._ce.RunCommand('mkdir -p {}'.format(chroot_ndk_root))
- if self._ce.RunCommand('sudo mount --bind {} {}'.format(
- self._ndk_dir, chroot_ndk_root)):
+ if self._ce.RunCommand(
+ 'sudo mount --bind {} {}'.format(self._ndk_dir, chroot_ndk_root)):
self._logger.LogError('Failed to mount ndk dir into chroot')
return False
@@ -509,25 +495,24 @@ class Bootstrapper(object):
target_built.add(target)
command = 'sudo emerge cross-{0}/{1}'.format(target, tool_name)
- rv = self._ce.ChrootRunCommand(self._chromeos_root,
- command,
- print_to_console=True)
+ rv = self._ce.ChrootRunCommand(
+ self._chromeos_root, command, print_to_console=True)
if rv:
- self._logger.LogError('Build {0} failed for {1}, aborted.'.format(
- tool_name, board))
+ self._logger.LogError(
+ 'Build {0} failed for {1}, aborted.'.format(tool_name, board))
failed.append(board)
else:
- self._logger.LogOutput('Successfully built {0} for board {1}.'.format(
- tool_name, board))
+ self._logger.LogOutput(
+ 'Successfully built {0} for board {1}.'.format(tool_name, board))
finally:
# Make sure we un-mount ndk-root before we leave here, regardless of the
# build result of the tool. Otherwise we may inadvertently delete ndk-root
# dir, which is not part of the chroot and could be disastrous.
if chroot_ndk_root:
if self._ce.RunCommand('sudo umount {}'.format(chroot_ndk_root)):
- self._logger.LogWarning(('Failed to umount "{}", please check '
- 'before deleting chroot.').format(
- chroot_ndk_root))
+ self._logger.LogWarning(
+ ('Failed to umount "{}", please check '
+ 'before deleting chroot.').format(chroot_ndk_root))
# Clean up soft links created during build.
self._ce.RunCommand('cd {}/toolchain/{} && git clean -df'.format(
@@ -556,8 +541,8 @@ class Bootstrapper(object):
self._chromeos_root, logfile)
rv = self._ce.RunCommand(command, print_to_console=True)
if rv:
- self._logger.LogError('Bootstrapping failed, log file - "{0}"\n'.format(
- logfile))
+ self._logger.LogError(
+ 'Bootstrapping failed, log file - "{0}"\n'.format(logfile))
return False
self._logger.LogOutput('Bootstrap succeeded.')
@@ -642,87 +627,99 @@ class Bootstrapper(object):
def Main(argv):
parser = argparse.ArgumentParser()
- parser.add_argument('-c',
- '--chromeos_root',
- dest='chromeos_root',
- help=('Optional. ChromeOs root dir. '
- 'When not specified, chromeos root will be deduced'
- ' from current working directory.'))
- parser.add_argument('--ndk_dir',
- dest='ndk_dir',
- help=('Topmost android ndk dir, required. '
- 'Do not need to include the "toolchain/*" part.'))
- parser.add_argument('--gcc_branch',
- dest='gcc_branch',
- help=('The branch to test against. '
- 'This branch must be a local branch '
- 'inside "src/third_party/gcc". '
- 'Notice, this must not be used with "--gcc_dir".'))
- parser.add_argument('--binutils_branch',
- dest='binutils_branch',
- help=('The branch to test against binutils. '
- 'This branch must be a local branch '
- 'inside "src/third_party/binutils". '
- 'Notice, this must not be used with '
- '"--binutils_dir".'))
- parser.add_argument('-g',
- '--gcc_dir',
- dest='gcc_dir',
- help=('Use a local gcc tree to do bootstrapping. '
- 'Notice, this must not be used with '
- '"--gcc_branch".'))
- parser.add_argument('--binutils_dir',
- dest='binutils_dir',
- help=('Use a local binutils tree to do bootstrapping. '
- 'Notice, this must not be used with '
- '"--binutils_branch".'))
- parser.add_argument('--fixperm',
- dest='fixperm',
- default=False,
- action='store_true',
- help=('Fix the (notorious) permission error '
- 'while trying to bootstrap the chroot. '
- 'Note this takes an extra 10-15 minutes '
- 'and is only needed once per chromiumos tree.'))
- parser.add_argument('--setup_tool_ebuild_file_only',
- dest='setup_tool_ebuild_file_only',
- default=False,
- action='store_true',
- help=('Setup gcc and/or binutils ebuild file '
- 'to pick up the branch (--gcc/binutils_branch) or '
- 'use gcc and/or binutils source '
- '(--gcc/binutils_dir) and exit. Keep chroot as is.'
- ' This should not be used with '
- '--gcc/binutils_dir/branch options.'))
- parser.add_argument('--reset_tool_ebuild_file',
- dest='reset_tool_ebuild_file',
- default=False,
- action='store_true',
- help=('Reset the modification that is done by this '
- 'script. Note, when this script is running, it '
- 'will modify the active gcc/binutils ebuild file. '
- 'Use this option to reset (what this script has '
- 'done) and exit. This should not be used with -- '
- 'gcc/binutils_dir/branch options.'))
- parser.add_argument('--board',
- dest='board',
- default=None,
- help=('Only build toolchain for specific board(s). '
- 'Use "host" to build for host. '
- 'Use "," to seperate multiple boards. '
- 'This does not perform a chroot bootstrap.'))
- parser.add_argument('--bootstrap',
- dest='bootstrap',
- default=False,
- action='store_true',
- help=('Performs a chroot bootstrap. '
- 'Note, this will *destroy* your current chroot.'))
- parser.add_argument('--disable-2nd-bootstrap',
- dest='disable_2nd_bootstrap',
- default=False,
- action='store_true',
- help=('Disable a second bootstrap '
- '(build of amd64-host stage).'))
+ parser.add_argument(
+ '-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help=('Optional. ChromeOs root dir. '
+ 'When not specified, chromeos root will be deduced'
+ ' from current working directory.'))
+ parser.add_argument(
+ '--ndk_dir',
+ dest='ndk_dir',
+ help=('Topmost android ndk dir, required. '
+ 'Do not need to include the "toolchain/*" part.'))
+ parser.add_argument(
+ '--gcc_branch',
+ dest='gcc_branch',
+ help=('The branch to test against. '
+ 'This branch must be a local branch '
+ 'inside "src/third_party/gcc". '
+ 'Notice, this must not be used with "--gcc_dir".'))
+ parser.add_argument(
+ '--binutils_branch',
+ dest='binutils_branch',
+ help=('The branch to test against binutils. '
+ 'This branch must be a local branch '
+ 'inside "src/third_party/binutils". '
+ 'Notice, this must not be used with '
+ '"--binutils_dir".'))
+ parser.add_argument(
+ '-g',
+ '--gcc_dir',
+ dest='gcc_dir',
+ help=('Use a local gcc tree to do bootstrapping. '
+ 'Notice, this must not be used with '
+ '"--gcc_branch".'))
+ parser.add_argument(
+ '--binutils_dir',
+ dest='binutils_dir',
+ help=('Use a local binutils tree to do bootstrapping. '
+ 'Notice, this must not be used with '
+ '"--binutils_branch".'))
+ parser.add_argument(
+ '--fixperm',
+ dest='fixperm',
+ default=False,
+ action='store_true',
+ help=('Fix the (notorious) permission error '
+ 'while trying to bootstrap the chroot. '
+ 'Note this takes an extra 10-15 minutes '
+ 'and is only needed once per chromiumos tree.'))
+ parser.add_argument(
+ '--setup_tool_ebuild_file_only',
+ dest='setup_tool_ebuild_file_only',
+ default=False,
+ action='store_true',
+ help=('Setup gcc and/or binutils ebuild file '
+ 'to pick up the branch (--gcc/binutils_branch) or '
+ 'use gcc and/or binutils source '
+ '(--gcc/binutils_dir) and exit. Keep chroot as is.'
+ ' This should not be used with '
+ '--gcc/binutils_dir/branch options.'))
+ parser.add_argument(
+ '--reset_tool_ebuild_file',
+ dest='reset_tool_ebuild_file',
+ default=False,
+ action='store_true',
+ help=('Reset the modification that is done by this '
+ 'script. Note, when this script is running, it '
+ 'will modify the active gcc/binutils ebuild file. '
+ 'Use this option to reset (what this script has '
+ 'done) and exit. This should not be used with -- '
+ 'gcc/binutils_dir/branch options.'))
+ parser.add_argument(
+ '--board',
+ dest='board',
+ default=None,
+ help=('Only build toolchain for specific board(s). '
+ 'Use "host" to build for host. '
+ 'Use "," to seperate multiple boards. '
+ 'This does not perform a chroot bootstrap.'))
+ parser.add_argument(
+ '--bootstrap',
+ dest='bootstrap',
+ default=False,
+ action='store_true',
+ help=('Performs a chroot bootstrap. '
+ 'Note, this will *destroy* your current chroot.'))
+ parser.add_argument(
+ '--disable-2nd-bootstrap',
+ dest='disable_2nd_bootstrap',
+ default=False,
+ action='store_true',
+ help=('Disable a second bootstrap '
+ '(build of amd64-host stage).'))
options = parser.parse_args(argv)
# Trying to deduce chromeos root from current directory.
@@ -740,12 +737,12 @@ def Main(argv):
parser.error('Missing or failing to deduce mandatory option "--chromeos".')
return 1
- options.chromeos_root = os.path.abspath(os.path.expanduser(
- options.chromeos_root))
+ options.chromeos_root = os.path.abspath(
+ os.path.expanduser(options.chromeos_root))
if not os.path.isdir(options.chromeos_root):
- logger.GetLogger().LogError('"{0}" does not exist.'.format(
- options.chromeos_root))
+ logger.GetLogger().LogError(
+ '"{0}" does not exist.'.format(options.chromeos_root))
return 1
options.ndk_dir = os.path.expanduser(options.ndk_dir)
@@ -755,8 +752,8 @@ def Main(argv):
# Some tolerance regarding user input. We only need the ndk_root part, do not
# include toolchain/(gcc|binutils)/ part in this option.
- options.ndk_dir = re.sub(
- '/toolchain(/gcc|/binutils)?/?$', '', options.ndk_dir)
+ options.ndk_dir = re.sub('/toolchain(/gcc|/binutils)?/?$', '',
+ options.ndk_dir)
if not (os.path.isdir(options.ndk_dir) and
os.path.isdir(os.path.join(options.ndk_dir, 'toolchain'))):
@@ -766,11 +763,11 @@ def Main(argv):
if options.fixperm:
# Fix perm error before continuing.
- cmd = (
- r'sudo find "{0}" \( -name ".cache" -type d -prune \) -o '
- r'\( -name "chroot" -type d -prune \) -o '
- r'\( -type f -exec chmod a+r {{}} \; \) -o '
- r'\( -type d -exec chmod a+rx {{}} \; \)').format(options.chromeos_root)
+ cmd = (r'sudo find "{0}" \( -name ".cache" -type d -prune \) -o '
+ r'\( -name "chroot" -type d -prune \) -o '
+ r'\( -type f -exec chmod a+r {{}} \; \) -o '
+ r'\( -type d -exec chmod a+rx {{}} \; \)'
+ ).format(options.chromeos_root)
logger.GetLogger().LogOutput(
'Fixing perm issues for chromeos root, this might take some time.')
command_executer.GetCommandExecuter().RunCommand(cmd)
@@ -792,8 +789,8 @@ def Main(argv):
if options.gcc_dir:
options.gcc_dir = os.path.abspath(os.path.expanduser(options.gcc_dir))
if not os.path.isdir(options.gcc_dir):
- logger.GetLogger().LogError('"{0}" does not exist.'.format(
- options.gcc_dir))
+ logger.GetLogger().LogError(
+ '"{0}" does not exist.'.format(options.gcc_dir))
return 1
if options.gcc_branch and options.gcc_dir:
@@ -801,11 +798,11 @@ def Main(argv):
return 1
if options.binutils_dir:
- options.binutils_dir = os.path.abspath(os.path.expanduser(
- options.binutils_dir))
+ options.binutils_dir = os.path.abspath(
+ os.path.expanduser(options.binutils_dir))
if not os.path.isdir(options.binutils_dir):
- logger.GetLogger().LogError('"{0}" does not exist.'.format(
- options.binutils_dir))
+ logger.GetLogger().LogError(
+ '"{0}" does not exist.'.format(options.binutils_dir))
return 1
if options.binutils_branch and options.binutils_dir:
@@ -813,8 +810,8 @@ def Main(argv):
'"--binutils_branch" can be specified.')
return 1
- if (not (options.binutils_branch or options.binutils_dir or options.gcc_branch
- or options.gcc_dir)):
+ if (not (options.binutils_branch or options.binutils_dir or
+ options.gcc_branch or options.gcc_dir)):
parser.error(('At least one of "--gcc_dir", "--gcc_branch", '
'"--binutils_dir" and "--binutils_branch" must '
'be specified.'))
diff --git a/chromiumos_image_diff.py b/chromiumos_image_diff.py
index 68791ac5..82e4e17d 100755
--- a/chromiumos_image_diff.py
+++ b/chromiumos_image_diff.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Diff 2 chromiumos images by comparing each elf file.
The script diffs every *ELF* files by dissembling every *executable*
@@ -53,8 +53,8 @@ class CrosImage(object):
self.stateful = '/tmp/{0}.stateful'.format(mount_basename)
self.unmount_script = '/tmp/{0}.unmount.sh'.format(mount_basename)
else:
- self.rootfs = tempfile.mkdtemp(suffix='.rootfs',
- prefix='chromiumos_image_diff')
+ self.rootfs = tempfile.mkdtemp(
+ suffix='.rootfs', prefix='chromiumos_image_diff')
## rootfs is like /tmp/tmpxyz012.rootfs.
match = re.match(r'^(.*)\.rootfs$', self.rootfs)
basename = match.group(1)
@@ -78,14 +78,15 @@ class CrosImage(object):
def CreateUnmountScript(self):
command = ('sudo umount {r}/usr/local {r}/usr/share/oem '
'{r}/var {r}/mnt/stateful_partition {r}; sudo umount {s} ; '
- 'rmdir {r} ; rmdir {s}\n').format(r=self.rootfs, s=self.stateful)
+ 'rmdir {r} ; rmdir {s}\n').format(
+ r=self.rootfs, s=self.stateful)
f = open(self.unmount_script, 'w')
f.write(command)
f.close()
- self._ce.RunCommand('chmod +x {}'.format(self.unmount_script),
- print_to_console=False)
- self.logger.LogOutput('Created an unmount script - "{0}"'.format(
- self.unmount_script))
+ self._ce.RunCommand(
+ 'chmod +x {}'.format(self.unmount_script), print_to_console=False)
+ self.logger.LogOutput(
+ 'Created an unmount script - "{0}"'.format(self.unmount_script))
def UnmountImage(self):
"""Unmount the image and delete mount point."""
@@ -114,11 +115,12 @@ class CrosImage(object):
Always true
"""
- self.logger.LogOutput('Finding all elf files in "{0}" ...'.format(
- self.rootfs))
+ self.logger.LogOutput(
+ 'Finding all elf files in "{0}" ...'.format(self.rootfs))
# Note '\;' must be prefixed by 'r'.
command = ('find "{0}" -type f -exec '
- 'bash -c \'file -b "{{}}" | grep -q "ELF"\'' r' \; '
+ 'bash -c \'file -b "{{}}" | grep -q "ELF"\''
+ r' \; '
r'-exec echo "{{}}" \;').format(self.rootfs)
self.logger.LogCmd(command)
_, out, _ = self._ce.RunCommandWOutput(command, print_to_console=False)
@@ -142,8 +144,8 @@ class ImageComparator(object):
if self.tempf1 and self.tempf2:
command_executer.GetCommandExecuter().RunCommand(
'rm {0} {1}'.format(self.tempf1, self.tempf2))
- logger.GetLogger('Removed "{0}" and "{1}".'.format(
- self.tempf1, self.tempf2))
+ logger.GetLogger(
+ 'Removed "{0}" and "{1}".'.format(self.tempf1, self.tempf2))
def CheckElfFileSetEquality(self):
"""Checking whether images have exactly number of elf files."""
@@ -183,8 +185,8 @@ class ImageComparator(object):
match_count = 0
i1 = self.images[0]
i2 = self.images[1]
- self.logger.LogOutput('Start comparing {0} elf file by file ...'.format(
- len(i1.elf_files)))
+ self.logger.LogOutput(
+ 'Start comparing {0} elf file by file ...'.format(len(i1.elf_files)))
## Note - i1.elf_files and i2.elf_files have exactly the same entries here.
## Create 2 temp files to be used for all disassembed files.
@@ -205,35 +207,41 @@ class ImageComparator(object):
'Error: We\'re comparing the SAME file - {0}'.format(f1))
continue
- command = ('objdump -d "{f1}" > {tempf1} ; '
- 'objdump -d "{f2}" > {tempf2} ; '
- # Remove path string inside the dissemble
- 'sed -i \'s!{rootfs1}!!g\' {tempf1} ; '
- 'sed -i \'s!{rootfs2}!!g\' {tempf2} ; '
- 'diff {tempf1} {tempf2} 1>/dev/null 2>&1').format(
- f1=full_path1, f2=full_path2,
- rootfs1=i1.rootfs, rootfs2=i2.rootfs,
- tempf1=self.tempf1, tempf2=self.tempf2)
+ command = (
+ 'objdump -d "{f1}" > {tempf1} ; '
+ 'objdump -d "{f2}" > {tempf2} ; '
+ # Remove path string inside the dissemble
+ 'sed -i \'s!{rootfs1}!!g\' {tempf1} ; '
+ 'sed -i \'s!{rootfs2}!!g\' {tempf2} ; '
+ 'diff {tempf1} {tempf2} 1>/dev/null 2>&1').format(
+ f1=full_path1,
+ f2=full_path2,
+ rootfs1=i1.rootfs,
+ rootfs2=i2.rootfs,
+ tempf1=self.tempf1,
+ tempf2=self.tempf2)
ret = cmde.RunCommand(command, print_to_console=False)
if ret != 0:
- self.logger.LogOutput('*** Not match - "{0}" "{1}"'.format(
- full_path1, full_path2))
+ self.logger.LogOutput(
+ '*** Not match - "{0}" "{1}"'.format(full_path1, full_path2))
mismatch_list.append(f1)
if self.diff_file:
- command = (
- 'echo "Diffs of disassemble of \"{f1}\" and \"{f2}\"" '
- '>> {diff_file} ; diff {tempf1} {tempf2} '
- '>> {diff_file}').format(
- f1=full_path1, f2=full_path2, diff_file=self.diff_file,
- tempf1=self.tempf1, tempf2=self.tempf2)
+ command = ('echo "Diffs of disassemble of \"{f1}\" and \"{f2}\"" '
+ '>> {diff_file} ; diff {tempf1} {tempf2} '
+ '>> {diff_file}').format(
+ f1=full_path1,
+ f2=full_path2,
+ diff_file=self.diff_file,
+ tempf1=self.tempf1,
+ tempf2=self.tempf2)
cmde.RunCommand(command, print_to_console=False)
else:
match_count += 1
## End of comparing every elf files.
if not mismatch_list:
- self.logger.LogOutput('** COOL, ALL {0} BINARIES MATCHED!! **'.format(
- match_count))
+ self.logger.LogOutput(
+ '** COOL, ALL {0} BINARIES MATCHED!! **'.format(match_count))
return True
mismatch_str = 'Found {0} mismatch:\n'.format(len(mismatch_list))
@@ -252,24 +260,44 @@ def Main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
- '--no_unmount', action='store_true', dest='no_unmount', default=False,
+ '--no_unmount',
+ action='store_true',
+ dest='no_unmount',
+ default=False,
help='Do not unmount after finish, this is useful for debugging.')
parser.add_argument(
- '--chromeos_root', dest='chromeos_root', default=None, action='store',
+ '--chromeos_root',
+ dest='chromeos_root',
+ default=None,
+ action='store',
help=('[Optional] Specify a chromeos tree instead of '
'deducing it from image path so that we can compare '
'2 images that are downloaded.'))
parser.add_argument(
- '--mount_basename', dest='mount_basename', default=None, action='store',
+ '--mount_basename',
+ dest='mount_basename',
+ default=None,
+ action='store',
help=('Specify a meaningful name for the mount point. With this being '
'set, the mount points would be "/tmp/mount_basename.x.rootfs" '
' and "/tmp/mount_basename.x.stateful". (x is 1 or 2).'))
- parser.add_argument('--diff_file', dest='diff_file', default=None,
- help='Dumping all the diffs (if any) to the diff file')
- parser.add_argument('--image1', dest='image1', default=None,
- required=True, help=('Image 1 file name.'))
- parser.add_argument('--image2', dest='image2', default=None,
- required=True, help=('Image 2 file name.'))
+ parser.add_argument(
+ '--diff_file',
+ dest='diff_file',
+ default=None,
+ help='Dumping all the diffs (if any) to the diff file')
+ parser.add_argument(
+ '--image1',
+ dest='image1',
+ default=None,
+ required=True,
+ help=('Image 1 file name.'))
+ parser.add_argument(
+ '--image2',
+ dest='image2',
+ default=None,
+ required=True,
+ help=('Image 2 file name.'))
options = parser.parse_args(argv[1:])
if options.mount_basename and options.mount_basename.find('/') >= 0:
diff --git a/command_executer_timeout_test.py b/command_executer_timeout_test.py
index ba0207ef..26f39334 100755
--- a/command_executer_timeout_test.py
+++ b/command_executer_timeout_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Timeout test for command_executer."""
diff --git a/cros_login.py b/cros_login.py
index 32dfcb70..06ff8ff0 100755
--- a/cros_login.py
+++ b/cros_login.py
@@ -1,9 +1,8 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010~2015 Google Inc. All Rights Reserved.
-"""Script to get past the login screen of ChromeOS.
+"""Script to get past the login screen of ChromeOS."""
-"""
from __future__ import print_function
import argparse
@@ -82,15 +81,17 @@ def RestartUI(remote, chromeos_root, login=True):
with open(host_login_script, 'w') as f:
f.write(full_login_script_contents)
- ce.CopyFiles(host_login_script,
- device_login_script,
- dest_machine=remote,
- chromeos_root=chromeos_root,
- recursive=False,
- dest_cros=True)
- ret = ce.CrosRunCommand('python %s' % device_login_script,
- chromeos_root=chromeos_root,
- machine=remote)
+ ce.CopyFiles(
+ host_login_script,
+ device_login_script,
+ dest_machine=remote,
+ chromeos_root=chromeos_root,
+ recursive=False,
+ dest_cros=True)
+ ret = ce.CrosRunCommand(
+ 'python %s' % device_login_script,
+ chromeos_root=chromeos_root,
+ machine=remote)
if os.path.exists(host_login_script):
os.remove(host_login_script)
return ret
@@ -99,14 +100,10 @@ def RestartUI(remote, chromeos_root, login=True):
def Main(argv):
"""The main function."""
parser = argparse.ArgumentParser()
- parser.add_argument('-r',
- '--remote',
- dest='remote',
- help='The remote ChromeOS box.')
- parser.add_argument('-c',
- '--chromeos_root',
- dest='chromeos_root',
- help='The ChromeOS root.')
+ parser.add_argument(
+ '-r', '--remote', dest='remote', help='The remote ChromeOS box.')
+ parser.add_argument(
+ '-c', '--chromeos_root', dest='chromeos_root', help='The ChromeOS root.')
options = parser.parse_args(argv)
diff --git a/cros_utils/misc.py b/cros_utils/misc.py
index 6c7d2909..939ed66b 100644
--- a/cros_utils/misc.py
+++ b/cros_utils/misc.py
@@ -66,7 +66,11 @@ def UnitToNumber(unit_num, base=1000):
def GetFilenameFromString(string):
- return ApplySubs(string, (r'/', '__'), (r'\s', '_'), (r'[\\$="?^]', ''),)
+ return ApplySubs(
+ string,
+ (r'/', '__'),
+ (r'\s', '_'),
+ (r'[\\$="?^]', ''),)
def GetRoot(scr_name):
@@ -143,16 +147,16 @@ def GetBuildPackagesCommand(board, usepkg=False, debug=False):
withdebug_flag = '--nowithdebug'
return ('%s/build_packages %s --withdev --withtest --withautotest '
'--skip_toolchain_update %s --board=%s '
- '--accept_licenses=@CHROMEOS' %
- (CHROMEOS_SCRIPTS_DIR, usepkg_flag, withdebug_flag, board))
+ '--accept_licenses=@CHROMEOS' % (CHROMEOS_SCRIPTS_DIR, usepkg_flag,
+ withdebug_flag, board))
def GetBuildImageCommand(board, dev=False):
dev_args = ''
if dev:
dev_args = '--noenable_rootfs_verification --disk_layout=2gb-rootfs'
- return ('%s/build_image --board=%s %s test' %
- (CHROMEOS_SCRIPTS_DIR, board, dev_args))
+ return ('%s/build_image --board=%s %s test' % (CHROMEOS_SCRIPTS_DIR, board,
+ dev_args))
def GetSetupBoardCommand(board,
@@ -179,8 +183,8 @@ def GetSetupBoardCommand(board,
options.append('--accept_licenses=@CHROMEOS')
- return ('%s/setup_board --board=%s %s' %
- (CHROMEOS_SCRIPTS_DIR, board, ' '.join(options)))
+ return ('%s/setup_board --board=%s %s' % (CHROMEOS_SCRIPTS_DIR, board,
+ ' '.join(options)))
def CanonicalizePath(path):
@@ -192,8 +196,8 @@ def CanonicalizePath(path):
def GetCtargetFromBoard(board, chromeos_root):
"""Get Ctarget from board."""
base_board = board.split('_')[0]
- command = ('source %s; get_ctarget_from_board %s' %
- (TOOLCHAIN_UTILS_PATH, base_board))
+ command = ('source %s; get_ctarget_from_board %s' % (TOOLCHAIN_UTILS_PATH,
+ base_board))
ce = command_executer.GetCommandExecuter()
ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
if ret != 0:
@@ -206,8 +210,8 @@ def GetCtargetFromBoard(board, chromeos_root):
def GetArchFromBoard(board, chromeos_root):
"""Get Arch from board."""
base_board = board.split('_')[0]
- command = ('source %s; get_board_arch %s' %
- (TOOLCHAIN_UTILS_PATH, base_board))
+ command = ('source %s; get_board_arch %s' % (TOOLCHAIN_UTILS_PATH,
+ base_board))
ce = command_executer.GetCommandExecuter()
ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
if ret != 0:
@@ -318,16 +322,14 @@ def HasGitStagedChanges(git_dir):
command = 'cd {0} && git diff --quiet --cached --exit-code HEAD'.format(
git_dir)
return command_executer.GetCommandExecuter().RunCommand(
- command,
- print_to_console=False)
+ command, print_to_console=False)
def HasGitUnstagedChanges(git_dir):
"""Return True if git repository has un-staged changes."""
command = 'cd {0} && git diff --quiet --exit-code HEAD'.format(git_dir)
return command_executer.GetCommandExecuter().RunCommand(
- command,
- print_to_console=False)
+ command, print_to_console=False)
def HasGitUntrackedChanges(git_dir):
@@ -335,8 +337,7 @@ def HasGitUntrackedChanges(git_dir):
command = ('cd {0} && test -z '
'$(git ls-files --exclude-standard --others)').format(git_dir)
return command_executer.GetCommandExecuter().RunCommand(
- command,
- print_to_console=False)
+ command, print_to_console=False)
def GitGetCommitHash(git_dir, commit_symbolic_name):
@@ -357,8 +358,7 @@ def GitGetCommitHash(git_dir, commit_symbolic_name):
command = ('cd {0} && git log -n 1 --pretty="format:%H" {1}').format(
git_dir, commit_symbolic_name)
rv, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- command,
- print_to_console=False)
+ command, print_to_console=False)
if rv == 0:
return out.strip()
return None
@@ -402,8 +402,7 @@ def GetGitChangesAsList(git_dir, path=None, staged=False):
if path:
command += ' -- ' + path
_, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- command,
- print_to_console=False)
+ command, print_to_console=False)
rv = []
for line in out.splitlines():
rv.append(line)
@@ -411,8 +410,8 @@ def GetGitChangesAsList(git_dir, path=None, staged=False):
def IsChromeOsTree(chromeos_root):
- return (os.path.isdir(os.path.join(chromeos_root,
- 'src/third_party/chromiumos-overlay')) and
+ return (os.path.isdir(
+ os.path.join(chromeos_root, 'src/third_party/chromiumos-overlay')) and
os.path.isdir(os.path.join(chromeos_root, 'manifest')))
@@ -436,25 +435,22 @@ def DeleteChromeOsTree(chromeos_root, dry_run=False):
print(cmd0)
else:
if command_executer.GetCommandExecuter().RunCommand(
- cmd0,
- print_to_console=True) != 0:
+ cmd0, print_to_console=True) != 0:
return False
cmd1 = ('export CHROMEOSDIRNAME="$(dirname $(cd {0} && pwd))" && '
'export CHROMEOSBASENAME="$(basename $(cd {0} && pwd))" && '
- 'cd $CHROMEOSDIRNAME && sudo rm -fr $CHROMEOSBASENAME').format(
- chromeos_root)
+ 'cd $CHROMEOSDIRNAME && sudo rm -fr $CHROMEOSBASENAME'
+ ).format(chromeos_root)
if dry_run:
print(cmd1)
return True
return command_executer.GetCommandExecuter().RunCommand(
- cmd1,
- print_to_console=True) == 0
+ cmd1, print_to_console=True) == 0
-def ApplyGerritPatches(chromeos_root,
- gerrit_patch_string,
+def ApplyGerritPatches(chromeos_root, gerrit_patch_string,
branch='cros/master'):
"""Apply gerrit patches on a chromeos tree.
@@ -491,8 +487,8 @@ def ApplyGerritPatches(chromeos_root,
pi_str = '{project}:{ref}'.format(project=pi.project, ref=pi.ref)
try:
project_git_path = project_checkout.GetPath(absolute=True)
- logger.GetLogger().LogOutput('Applying patch "{0}" in "{1}" ...'.format(
- pi_str, project_git_path))
+ logger.GetLogger().LogOutput(
+ 'Applying patch "{0}" in "{1}" ...'.format(pi_str, project_git_path))
pi.Apply(project_git_path, branch, trivial=False)
except Exception:
traceback.print_exc(file=sys.stdout)
@@ -521,8 +517,8 @@ def BooleanPrompt(prompt='Do you want to continue?',
true_value, false_value = true_value.lower(), false_value.lower()
true_text, false_text = true_value, false_value
if true_value == false_value:
- raise ValueError('true_value and false_value must differ: got %r' %
- true_value)
+ raise ValueError(
+ 'true_value and false_value must differ: got %r' % true_value)
if default:
true_text = true_text[0].upper() + true_text[1:]
@@ -556,14 +552,16 @@ def BooleanPrompt(prompt='Do you want to continue?',
elif false_value.startswith(response):
return False
+
+# pylint: disable=unused-argument
def rgb2short(r, g, b):
- """ Converts RGB values to xterm-256 color. """
+ """Converts RGB values to xterm-256 color."""
- redcolor = [255, 124, 160, 196, 9 ]
- greencolor = [255, 118, 82, 46, 10 ]
+ redcolor = [255, 124, 160, 196, 9]
+ greencolor = [255, 118, 82, 46, 10]
if g == 0:
- return redcolor[r/52]
+ return redcolor[r / 52]
if r == 0:
- return greencolor[g/52]
+ return greencolor[g / 52]
return 4
diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py
index 98f126bc..6936d35f 100644
--- a/cros_utils/tabulator.py
+++ b/cros_utils/tabulator.py
@@ -57,7 +57,6 @@ table:
cell_table = tf.GetCellTable()
tp = TablePrinter(cell_table, out_to)
print tp.Print()
-
"""
from __future__ import print_function
@@ -464,12 +463,13 @@ class KeyAwareComparisonResult(ComparisonResult):
# --texture_upload_count--texture_upload_count--count (high is good)
# --total_deferred_image_decode_count--count (low is good)
# --total_tiles_analyzed--total_tiles_analyzed--count (high is good)
- lower_is_better_keys = ['milliseconds', 'ms_', 'seconds_', 'KB', 'rdbytes',
- 'wrbytes', 'dropped_percent', '(ms)', '(seconds)',
- '--ms', '--average_num_missing_tiles',
- '--experimental_jank', '--experimental_mean_frame',
- '--experimental_median_frame_time',
- '--total_deferred_image_decode_count', '--seconds']
+ lower_is_better_keys = [
+ 'milliseconds', 'ms_', 'seconds_', 'KB', 'rdbytes', 'wrbytes',
+ 'dropped_percent', '(ms)', '(seconds)', '--ms',
+ '--average_num_missing_tiles', '--experimental_jank',
+ '--experimental_mean_frame', '--experimental_median_frame_time',
+ '--total_deferred_image_decode_count', '--seconds'
+ ]
return any([l in key for l in lower_is_better_keys])
@@ -608,12 +608,13 @@ class PValueFormat(Format):
def _ComputeFloat(self, cell):
cell.string_value = '%0.2f' % float(cell.value)
if float(cell.value) < 0.05:
- cell.bgcolor = self._GetColor(cell.value,
- Color(255, 255, 0, 0),
- Color(255, 255, 255, 0),
- Color(255, 255, 255, 0),
- mid_value=0.05,
- power=1)
+ cell.bgcolor = self._GetColor(
+ cell.value,
+ Color(255, 255, 0, 0),
+ Color(255, 255, 255, 0),
+ Color(255, 255, 255, 0),
+ mid_value=0.05,
+ power=1)
class StorageFormat(Format):
@@ -647,12 +648,13 @@ class CoeffVarFormat(Format):
def _ComputeFloat(self, cell):
cell.string_value = '%1.1f%%' % (float(cell.value) * 100)
- cell.color = self._GetColor(cell.value,
- Color(0, 255, 0, 0),
- Color(0, 0, 0, 0),
- Color(255, 0, 0, 0),
- mid_value=0.02,
- power=1)
+ cell.color = self._GetColor(
+ cell.value,
+ Color(0, 255, 0, 0),
+ Color(0, 0, 0, 0),
+ Color(255, 0, 0, 0),
+ mid_value=0.02,
+ power=1)
class PercentFormat(Format):
@@ -664,7 +666,8 @@ class PercentFormat(Format):
def _ComputeFloat(self, cell):
cell.string_value = '%+1.1f%%' % ((float(cell.value) - 1) * 100)
- cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
+ cell.color = self._GetColor(cell.value,
+ Color(255, 0, 0, 0),
Color(0, 0, 0, 0), Color(0, 255, 0, 0))
@@ -677,7 +680,8 @@ class RatioFormat(Format):
def _ComputeFloat(self, cell):
cell.string_value = '%+1.1f%%' % ((cell.value - 1) * 100)
- cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
+ cell.color = self._GetColor(cell.value,
+ Color(255, 0, 0, 0),
Color(0, 0, 0, 0), Color(0, 255, 0, 0))
@@ -693,7 +697,8 @@ class ColorBoxFormat(Format):
def _ComputeFloat(self, cell):
cell.string_value = '--'
- bgcolor = self._GetColor(cell.value, Color(255, 0, 0, 0),
+ bgcolor = self._GetColor(cell.value,
+ Color(255, 0, 0, 0),
Color(255, 255, 255, 0), Color(0, 255, 0, 0))
cell.bgcolor = bgcolor
cell.color = bgcolor
@@ -889,8 +894,8 @@ class TableFormatter(object):
def AddLabelName(self):
"""Put label on the top of the table."""
top_header = []
- base_colspan = len([c for c in self._columns if not c.result.NeedsBaseline()
- ])
+ base_colspan = len(
+ [c for c in self._columns if not c.result.NeedsBaseline()])
compare_colspan = len(self._columns)
# Find the row with the key 'retval', if it exists. This
# will be used to calculate the number of iterations that passed and
@@ -1179,14 +1184,17 @@ def GetComplexTable(runs, labels, out_to=TablePrinter.CONSOLE):
"""
tg = TableGenerator(runs, labels, TableGenerator.SORT_BY_VALUES_DESC)
table = tg.GetTable()
- columns = [Column(LiteralResult(), Format(), 'Literal'),
- Column(AmeanResult(), Format()), Column(StdResult(), Format()),
- Column(CoeffVarResult(), CoeffVarFormat()),
- Column(NonEmptyCountResult(), Format()),
- Column(AmeanRatioResult(), PercentFormat()),
- Column(AmeanRatioResult(), RatioFormat()),
- Column(GmeanRatioResult(), RatioFormat()),
- Column(PValueResult(), PValueFormat())]
+ columns = [
+ Column(LiteralResult(), Format(), 'Literal'), Column(
+ AmeanResult(), Format()), Column(StdResult(), Format()), Column(
+ CoeffVarResult(), CoeffVarFormat()), Column(
+ NonEmptyCountResult(), Format()),
+ Column(AmeanRatioResult(), PercentFormat()), Column(
+ AmeanRatioResult(), RatioFormat()), Column(GmeanRatioResult(),
+ RatioFormat()), Column(
+ PValueResult(),
+ PValueFormat())
+ ]
tf = TableFormatter(table, columns)
cell_table = tf.GetCellTable()
tp = TablePrinter(cell_table, out_to)
@@ -1195,38 +1203,55 @@ def GetComplexTable(runs, labels, out_to=TablePrinter.CONSOLE):
if __name__ == '__main__':
# Run a few small tests here.
- runs = [[{'k1': '10',
- 'k2': '12',
- 'k5': '40',
- 'k6': '40',
- 'ms_1': '20',
- 'k7': 'FAIL',
- 'k8': 'PASS',
- 'k9': 'PASS',
- 'k10': '0'}, {'k1': '13',
- 'k2': '14',
- 'k3': '15',
- 'ms_1': '10',
- 'k8': 'PASS',
- 'k9': 'FAIL',
- 'k10': '0'}], [{'k1': '50',
- 'k2': '51',
- 'k3': '52',
- 'k4': '53',
- 'k5': '35',
- 'k6': '45',
- 'ms_1': '200',
- 'ms_2': '20',
- 'k7': 'FAIL',
- 'k8': 'PASS',
- 'k9': 'PASS'}]]
+ runs = [[{
+ 'k1': '10',
+ 'k2': '12',
+ 'k5': '40',
+ 'k6': '40',
+ 'ms_1': '20',
+ 'k7': 'FAIL',
+ 'k8': 'PASS',
+ 'k9': 'PASS',
+ 'k10': '0'
+ }, {
+ 'k1': '13',
+ 'k2': '14',
+ 'k3': '15',
+ 'ms_1': '10',
+ 'k8': 'PASS',
+ 'k9': 'FAIL',
+ 'k10': '0'
+ }], [{
+ 'k1': '50',
+ 'k2': '51',
+ 'k3': '52',
+ 'k4': '53',
+ 'k5': '35',
+ 'k6': '45',
+ 'ms_1': '200',
+ 'ms_2': '20',
+ 'k7': 'FAIL',
+ 'k8': 'PASS',
+ 'k9': 'PASS'
+ }]]
labels = ['vanilla', 'modified']
t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
print(t)
email = GetComplexTable(runs, labels, TablePrinter.EMAIL)
- runs = [[{'k1': '1'}, {'k1': '1.1'}, {'k1': '1.2'}],
- [{'k1': '5'}, {'k1': '5.1'}, {'k1': '5.2'}]]
+ runs = [[{
+ 'k1': '1'
+ }, {
+ 'k1': '1.1'
+ }, {
+ 'k1': '1.2'
+ }], [{
+ 'k1': '5'
+ }, {
+ 'k1': '5.1'
+ }, {
+ 'k1': '5.2'
+ }]]
t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
print(t)
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index 3f0a842a..bbb1cdfc 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -1,9 +1,8 @@
-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Define a type that wraps a Benchmark instance."""
+from __future__ import print_function
import math
from scipy import stats
@@ -20,20 +19,22 @@ _estimated_stddev = {
'page_cycler_v2.typical_25': 0.021,
}
+
# Get #samples needed to guarantee a given confidence interval, assuming the
# samples follow normal distribution.
def _samples(b):
- # TODO: Make this an option
- # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%.
- p = 0.9
- e = 0.02
- if b not in _estimated_stddev:
- return 1
- d = _estimated_stddev[b]
- # Get at least 2 samples so as to calculate standard deviation, which is
- # needed in T-test for p-value.
- n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e) ** 2))
- return n if n > 1 else 2
+ # TODO: Make this an option
+ # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%.
+ p = 0.9
+ e = 0.02
+ if b not in _estimated_stddev:
+ return 1
+ d = _estimated_stddev[b]
+ # Get at least 2 samples so as to calculate standard deviation, which is
+ # needed in T-test for p-value.
+ n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e)**2))
+ return n if n > 1 else 2
+
class Benchmark(object):
"""Class representing a benchmark to be run.
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index e53187e2..bba71a36 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -1,8 +1,6 @@
-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Module of benchmark runs."""
from __future__ import print_function
@@ -30,6 +28,7 @@ STATUS_PENDING = 'PENDING'
class BenchmarkRun(threading.Thread):
"""The benchmarkrun class."""
+
def __init__(self, name, benchmark, label, iteration, cache_conditions,
machine_manager, logger_to_use, log_level, share_cache):
threading.Thread.__init__(self)
@@ -53,8 +52,8 @@ class BenchmarkRun(threading.Thread):
self.test_args = benchmark.test_args
self.cache = None
self.profiler_args = self.GetExtraAutotestArgs()
- self._ce = command_executer.GetCommandExecuter(self._logger,
- log_level=self.log_level)
+ self._ce = command_executer.GetCommandExecuter(
+ self._logger, log_level=self.log_level)
self.timeline = timeline.Timeline()
self.timeline.Record(STATUS_PENDING)
self.share_cache = share_cache
@@ -96,8 +95,7 @@ class BenchmarkRun(threading.Thread):
err = 'No cache hit.'
self.result = Result.CreateFromRun(
self._logger, self.log_level, self.label, self.machine, output, err,
- retval, self.benchmark.test_name,
- self.benchmark.suite)
+ retval, self.benchmark.test_name, self.benchmark.suite)
else:
self._logger.LogOutput('%s: No cache hit.' % self.name)
@@ -141,8 +139,8 @@ class BenchmarkRun(threading.Thread):
pass
elif self.machine:
if not self.machine.IsReachable():
- self._logger.LogOutput('Machine %s is not reachable, removing it.' %
- self.machine.name)
+ self._logger.LogOutput(
+ 'Machine %s is not reachable, removing it.' % self.machine.name)
self.machine_manager.RemoveMachine(self.machine.name)
self._logger.LogOutput('Releasing machine: %s' % self.machine.name)
self.machine_manager.ReleaseMachine(self.machine)
@@ -190,8 +188,10 @@ class BenchmarkRun(threading.Thread):
perf_args = ' '.join(perf_args_list)
if not perf_args_list[0] in ['record', 'stat']:
raise SyntaxError('perf_args must start with either record or stat')
- extra_test_args = ['--profiler=custom_perf',
- ("--profiler_args='perf_options=\"%s\"'" % perf_args)]
+ extra_test_args = [
+ '--profiler=custom_perf',
+ ("--profiler_args='perf_options=\"%s\"'" % perf_args)
+ ]
return ' '.join(extra_test_args)
else:
return ''
@@ -254,9 +254,9 @@ class MockBenchmarkRun(BenchmarkRun):
self.timeline.Record(STATUS_IMAGING)
self.machine_manager.ImageMachine(machine, self.label)
self.timeline.Record(STATUS_RUNNING)
- [retval, out, err] = self.suite_runner.Run(machine.name, self.label,
- self.benchmark, self.test_args,
- self.profiler_args)
+ [retval, out,
+ err] = self.suite_runner.Run(machine.name, self.label, self.benchmark,
+ self.test_args, self.profiler_args)
self.run_completed = True
rr = MockResult('logger', self.label, self.log_level, machine)
rr.out = out
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 9af66a33..74757ac2 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -117,11 +117,10 @@ class BenchmarkRunTest(unittest.TestCase):
pass
def test_run(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogOutput(msg, print_to_console=False):
'Helper function for test_run.'
@@ -258,11 +257,10 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, ['FAILED'])
def test_terminate_pass(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventPassed():
'Helper function for test_terminate_pass'
@@ -286,11 +284,10 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
def test_terminate_fail(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventFailed():
'Helper function for test_terminate_fail'
@@ -314,11 +311,10 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
def test_acquire_machine(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
br.terminated = True
self.assertRaises(Exception, br.AcquireMachine)
@@ -332,11 +328,10 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
def test_get_extra_autotest_args(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogError(err_msg):
'Helper function for test_get_extra_autotest_args'
@@ -372,11 +367,10 @@ class BenchmarkRunTest(unittest.TestCase):
@mock.patch.object(SuiteRunner, 'Run')
@mock.patch.object(Result, 'CreateFromRun')
def test_run_test(self, mock_result, mock_runner):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
self.status = []
@@ -391,9 +385,9 @@ class BenchmarkRunTest(unittest.TestCase):
br.RunTest(mock_machine)
self.assertTrue(br.run_completed)
- self.assertEqual(
- self.status,
- [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING])
+ self.assertEqual(self.status, [
+ benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING
+ ])
self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
br.machine_manager.ImageMachine.assert_called_with(mock_machine,
@@ -403,17 +397,15 @@ class BenchmarkRunTest(unittest.TestCase):
'', br.profiler_args)
self.assertEqual(mock_result.call_count, 1)
- mock_result.assert_called_with(self.mock_logger, 'average', self.test_label,
- None, "{'Score':100}", '', 0,
- 'page_cycler.netsim.top_10',
- 'telemetry_Crosperf')
+ mock_result.assert_called_with(
+ self.mock_logger, 'average', self.test_label, None, "{'Score':100}", '',
+ 0, 'page_cycler.netsim.top_10', 'telemetry_Crosperf')
def test_set_cache_conditions(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '')
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
phony_cache_conditions = [123, 456, True, False]
diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py
index 320ede65..24c364c0 100755
--- a/crosperf/benchmark_unittest.py
+++ b/crosperf/benchmark_unittest.py
@@ -16,43 +16,47 @@ class BenchmarkTestCase(unittest.TestCase):
def test_benchmark(self):
# Test creating a benchmark with all the fields filled out.
- b1 = Benchmark('b1_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry_Crosperf', # suite
- True) # show_all_results
+ b1 = Benchmark(
+ 'b1_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry_Crosperf', # suite
+ True) # show_all_results
self.assertTrue(b1.suite, 'telemetry_Crosperf')
# Test creating a benchmark field with default fields left out.
- b2 = Benchmark('b2_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles') # perf_args
+ b2 = Benchmark(
+ 'b2_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles') # perf_args
self.assertEqual(b2.suite, '')
self.assertFalse(b2.show_all_results)
# Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
# and see what happens.
- b3 = Benchmark('b3_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry', # suite
- False) # show_all_results
+ b3 = Benchmark(
+ 'b3_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry', # suite
+ False) # show_all_results
self.assertTrue(b3.show_all_results)
# Check to see if the args to Benchmark have changed since the last time
# this test was updated.
- args_list = ['self', 'name', 'test_name', 'test_args', 'iterations',
- 'rm_chroot_tmp', 'perf_args', 'suite', 'show_all_results',
- 'retries', 'run_local']
+ args_list = [
+ 'self', 'name', 'test_name', 'test_args', 'iterations', 'rm_chroot_tmp',
+ 'perf_args', 'suite', 'show_all_results', 'retries', 'run_local'
+ ]
arg_spec = inspect.getargspec(Benchmark.__init__)
self.assertEqual(len(arg_spec.args), len(args_list))
for arg in args_list:
diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py
index 0a61eeb9..34513a87 100644
--- a/crosperf/compare_machines.py
+++ b/crosperf/compare_machines.py
@@ -22,10 +22,11 @@ def PrintUsage(msg):
def Main(argv):
parser = argparse.ArgumentParser()
- parser.add_argument('--chromeos_root',
- default='/path/to/chromeos',
- dest='chromeos_root',
- help='ChromeOS root checkout directory')
+ parser.add_argument(
+ '--chromeos_root',
+ default='/path/to/chromeos',
+ dest='chromeos_root',
+ help='ChromeOS root checkout directory')
parser.add_argument('remotes', nargs=argparse.REMAINDER)
options = parser.parse_args(argv)
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 4a468967..b361f15b 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -42,12 +42,13 @@ class CrosperfTest(unittest.TestCase):
def test_convert_options_to_settings(self):
parser = argparse.ArgumentParser()
- parser.add_argument('-l',
- '--log_dir',
- dest='log_dir',
- default='',
- help='The log_dir, default is under '
- '<crosperf_logs>/logs')
+ parser.add_argument(
+ '-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under '
+ '<crosperf_logs>/logs')
crosperf.SetupParserOptions(parser)
argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
options, _ = parser.parse_known_args(argv)
diff --git a/crosperf/download_images.py b/crosperf/download_images.py
index 8ceaa874..ad0a812b 100644
--- a/crosperf/download_images.py
+++ b/crosperf/download_images.py
@@ -56,8 +56,8 @@ class ImageDownloader(object):
# image name.
command = ('cd ~/trunk/src/third_party/toolchain-utils/crosperf; '
"python translate_xbuddy.py '%s'" % xbuddy_label)
- _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(chromeos_root,
- command)
+ _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(
+ chromeos_root, command)
if not build_id_tuple_str:
raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
@@ -143,8 +143,8 @@ class ImageDownloader(object):
cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
status = self._ce.RunCommand(cmd)
if status != 0:
- raise MissingFile('Cannot find autotest package file: %s.' %
- package_file_name)
+ raise MissingFile(
+ 'Cannot find autotest package file: %s.' % package_file_name)
if self.log_level == 'average':
self._logger.LogOutput('Preparing to download %s package to local '
@@ -171,8 +171,8 @@ class ImageDownloader(object):
package_file_name, uncompress_cmd):
# Uncompress file
download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- command = ('cd %s ; %s %s' %
- (download_path, uncompress_cmd, package_file_name))
+ command = ('cd %s ; %s %s' % (download_path, uncompress_cmd,
+ package_file_name))
if self.log_level != 'verbose':
self._logger.LogOutput('CMD: %s' % command)
@@ -193,8 +193,8 @@ class ImageDownloader(object):
def VerifyAutotestFilesExist(self, chromeos_root, build_id, package_file):
# Quickly verify if the files are there
status = 0
- gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
- (build_id, package_file))
+ gs_package_name = ('gs://chromeos-image-archive/%s/%s' % (build_id,
+ package_file))
gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
if not test_flag.GetTestMode():
cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
@@ -227,9 +227,9 @@ class ImageDownloader(object):
autotest_packages_name)
if status != 0:
default_autotest_dir = '~/trunk/src/third_party/autotest/files'
- print('(Warning: Could not find autotest packages .)\n'
- '(Warning: Defaulting autotest path to %s .' %
- default_autotest_dir)
+ print(
+ '(Warning: Could not find autotest packages .)\n'
+ '(Warning: Defaulting autotest path to %s .' % default_autotest_dir)
return default_autotest_dir
# Files exist on server, download and uncompress them
@@ -242,12 +242,10 @@ class ImageDownloader(object):
self.UncompressSingleAutotestFile(chromeos_root, build_id,
autotest_packages_name, 'tar -xvf ')
- self.UncompressSingleAutotestFile(chromeos_root, build_id,
- autotest_server_package_name,
- 'tar -jxvf ')
- self.UncompressSingleAutotestFile(chromeos_root, build_id,
- autotest_control_files_name,
- 'tar -xvf ')
+ self.UncompressSingleAutotestFile(
+ chromeos_root, build_id, autotest_server_package_name, 'tar -jxvf ')
+ self.UncompressSingleAutotestFile(
+ chromeos_root, build_id, autotest_control_files_name, 'tar -xvf ')
# Rename created autotest directory to autotest_files
command = ('cd %s ; mv autotest autotest_files' % download_path)
if self.log_level != 'verbose':
diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py
index 7a4f3850..349a2dbb 100755
--- a/crosperf/download_images_unittest.py
+++ b/crosperf/download_images_unittest.py
@@ -126,8 +126,8 @@ class ImageDownloaderTestcast(unittest.TestCase):
# 2nd arg must be exception handler
except_handler_string = 'RunCommandExceptionHandler.HandleException'
self.assertTrue(
- except_handler_string in
- repr(mock_cmd_exec.RunCommand.call_args_list[0][1]))
+ except_handler_string in repr(
+ mock_cmd_exec.RunCommand.call_args_list[0][1]))
# Call 2, should have 2 arguments
self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[1]), 2)
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index dbcde213..987318a5 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -126,10 +126,11 @@ class Experiment(object):
full_name = '%s_%s_%s' % (label.name, benchmark.name, iteration)
logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
True)
- benchmark_runs.append(benchmark_run.BenchmarkRun(
- benchmark_run_name, benchmark, label, iteration,
- self.cache_conditions, self.machine_manager, logger_to_use,
- self.log_level, self.share_cache))
+ benchmark_runs.append(
+ benchmark_run.BenchmarkRun(benchmark_run_name, benchmark, label,
+ iteration, self.cache_conditions,
+ self.machine_manager, logger_to_use,
+ self.log_level, self.share_cache))
return benchmark_runs
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 2278015b..9d58048e 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -78,11 +78,13 @@ crosbolt_perf_tests = [
'power_Resume',
'video_PlaybackPerf.h264',
'build_RootFilesystemSize',
+]
+
# 'cheets_AntutuTest',
# 'cheets_PerfBootServer',
# 'cheets_CandyCrushTest',
# 'cheets_LinpackTest',
-]
+#]
class ExperimentFactory(object):
@@ -98,10 +100,9 @@ class ExperimentFactory(object):
show_all_results, retries, run_local):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(test_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries,
- run_local)
+ telemetry_benchmark = Benchmark(
+ test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
+ suite, show_all_results, retries, run_local)
benchmarks.append(telemetry_benchmark)
def GetExperiment(self, experiment_file, working_directory, log_dir):
@@ -210,20 +211,33 @@ class ExperimentFactory(object):
benchmarks.append(benchmark)
else:
if test_name == 'all_graphics_perf':
- self.AppendBenchmarkSet(benchmarks,
- graphics_perf_tests, '',
- iterations, rm_chroot_tmp, perf_args, '',
- show_all_results, retries, run_local=False)
+ self.AppendBenchmarkSet(
+ benchmarks,
+ graphics_perf_tests,
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False)
elif test_name == 'all_crosbolt_perf':
- self.AppendBenchmarkSet(benchmarks,
- telemetry_crosbolt_perf_tests, test_args,
- iterations, rm_chroot_tmp, perf_args,
- 'telemetry_Crosperf', show_all_results,
- retries, run_local)
- self.AppendBenchmarkSet(benchmarks,
- crosbolt_perf_tests, '',
- iterations, rm_chroot_tmp, perf_args, '',
- show_all_results, retries, run_local=False)
+ self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, 'telemetry_Crosperf',
+ show_all_results, retries, run_local)
+ self.AppendBenchmarkSet(
+ benchmarks,
+ crosbolt_perf_tests,
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False)
else:
# Add the single benchmark.
benchmark = Benchmark(
@@ -265,11 +279,8 @@ class ExperimentFactory(object):
build = label_settings.GetField('build')
if len(build) == 0:
raise RuntimeError("Can not have empty 'build' field!")
- image, autotest_path = label_settings.GetXbuddyPath(build,
- autotest_path,
- board,
- chromeos_root,
- log_level)
+ image, autotest_path = label_settings.GetXbuddyPath(
+ build, autotest_path, board, chromeos_root, log_level)
cache_dir = label_settings.GetField('cache_dir')
chrome_src = label_settings.GetField('chrome_src')
@@ -277,8 +288,8 @@ class ExperimentFactory(object):
# TODO(yunlian): We should consolidate code in machine_manager.py
# to derermine whether we are running from within google or not
if ('corp.google.com' in socket.gethostname() and
- (not my_remote or my_remote == remote and
- global_settings.GetField('board') != board)):
+ (not my_remote or
+ my_remote == remote and global_settings.GetField('board') != board)):
my_remote = self.GetDefaultRemotes(board)
if global_settings.GetField('same_machine') and len(my_remote) > 1:
raise RuntimeError('Only one remote is allowed when same_machine '
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 02bfd0a1..44090e5c 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -175,9 +175,9 @@ class ExperimentFactoryTest(unittest.TestCase):
test_flag.SetTestMode(True)
label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros')
exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(
- exp.remote,
- ['chromeos1.cros', 'chromeos2.cros', '123.45.67.89', '123.45.76.80'])
+ self.assertEqual(exp.remote, [
+ 'chromeos1.cros', 'chromeos2.cros', '123.45.67.89', '123.45.76.80'
+ ])
# Third test: Automatic fixing of bad logging_level param:
global_settings.SetField('logging_level', 'really loud!')
@@ -213,9 +213,9 @@ class ExperimentFactoryTest(unittest.TestCase):
self.assertEqual(len(exp.labels), 2)
self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path')
self.assertEqual(exp.labels[1].autotest_path, 'fake_autotest_path')
- self.assertEqual(
- exp.remote,
- ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'])
+ self.assertEqual(exp.remote, [
+ 'fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'
+ ])
def test_get_default_remotes(self):
board_list = [
diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py
index 016e9d86..57eb52dc 100644
--- a/crosperf/experiment_file.py
+++ b/crosperf/experiment_file.py
@@ -114,8 +114,8 @@ class ExperimentFile(object):
elif ExperimentFile._OPEN_SETTINGS_RE.match(line):
new_settings = self._ParseSettings(reader)
if new_settings.name in settings_names:
- raise SyntaxError("Duplicate settings name: '%s'." %
- new_settings.name)
+ raise SyntaxError(
+ "Duplicate settings name: '%s'." % new_settings.name)
settings_names[new_settings.name] = True
self.all_settings.append(new_settings)
elif ExperimentFile._FIELD_VALUE_RE.match(line):
@@ -160,11 +160,8 @@ class ExperimentFile(object):
autotest_path = ''
if autotest_field.assigned:
autotest_path = autotest_field.GetString()
- image_path, autotest_path = settings.GetXbuddyPath(value,
- autotest_path,
- board,
- chromeos_root,
- 'quiet')
+ image_path, autotest_path = settings.GetXbuddyPath(
+ value, autotest_path, board, chromeos_root, 'quiet')
res += '\t#actual_image: %s\n' % image_path
if not autotest_field.assigned:
res += '\t#actual_autotest_path: %s\n' % autotest_path
diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py
index ed1f176c..d4a02107 100755
--- a/crosperf/experiment_file_unittest.py
+++ b/crosperf/experiment_file_unittest.py
@@ -3,7 +3,6 @@
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The unittest of experiment_file."""
from __future__ import print_function
import StringIO
@@ -87,6 +86,7 @@ label: image2 {
class ExperimentFileTest(unittest.TestCase):
"""The main class for Experiment File test."""
+
def testLoadExperimentFile1(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
experiment_file = ExperimentFile(input_file)
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index b30c8bd5..b583743b 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -26,14 +26,15 @@ from results_report import TextResultsReport
from results_report import JSONResultsReport
from schedv2 import Schedv2
+
def _WriteJSONReportToFile(experiment, results_dir, json_report):
"""Writes a JSON report to a file in results_dir."""
has_llvm = any('llvm' in l.compiler for l in experiment.labels)
compiler_string = 'llvm' if has_llvm else 'gcc'
board = experiment.labels[0].board
- filename = 'report_%s_%s_%s.%s.json' % (
- board, json_report.date, json_report.time.replace(':', '.'),
- compiler_string)
+ filename = 'report_%s_%s_%s.%s.json' % (board, json_report.date,
+ json_report.time.replace(':', '.'),
+ compiler_string)
fullname = os.path.join(results_dir, filename)
report_text = json_report.GetReport()
with open(fullname, 'w') as out_file:
@@ -151,9 +152,10 @@ class ExperimentRunner(object):
cache.Init(br.label.chromeos_image, br.label.chromeos_root,
br.benchmark.test_name, br.iteration, br.test_args,
br.profiler_args, br.machine_manager, br.machine,
- br.label.board, br.cache_conditions, br._logger, br.log_level,
- br.label, br.share_cache, br.benchmark.suite,
- br.benchmark.show_all_results, br.benchmark.run_local)
+ br.label.board, br.cache_conditions,
+ br.logger(), br.log_level, br.label, br.share_cache,
+ br.benchmark.suite, br.benchmark.show_all_results,
+ br.benchmark.run_local)
cache_dir = cache.GetCacheDirForWrite()
if os.path.exists(cache_dir):
self.l.LogOutput('Removing cache dir: %s' % cache_dir)
@@ -229,18 +231,19 @@ class ExperimentRunner(object):
subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
- text_report += ('\nResults are stored in %s.\n' %
- experiment.results_directory)
+ text_report += (
+ '\nResults are stored in %s.\n' % experiment.results_directory)
text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
attachment = EmailSender.Attachment('report.html', html_report)
email_to = experiment.email_to or []
email_to.append(getpass.getuser())
- EmailSender().SendEmail(email_to,
- subject,
- text_report,
- attachments=[attachment],
- msg_type='html')
+ EmailSender().SendEmail(
+ email_to,
+ subject,
+ text_report,
+ attachments=[attachment],
+ msg_type='html')
def _StoreResults(self, experiment):
if self._terminated:
@@ -256,8 +259,8 @@ class ExperimentRunner(object):
results_table_path = os.path.join(results_directory, 'results.html')
report = HTMLResultsReport.FromExperiment(experiment).GetReport()
if self.json_report:
- json_report = JSONResultsReport.FromExperiment(experiment,
- json_args={'indent': 2})
+ json_report = JSONResultsReport.FromExperiment(
+ experiment, json_args={'indent': 2})
_WriteJSONReportToFile(experiment, results_directory, json_report)
FileUtils().WriteFile(results_table_path, report)
@@ -265,8 +268,8 @@ class ExperimentRunner(object):
self.l.LogOutput('Storing email message body in %s.' % results_directory)
msg_file_path = os.path.join(results_directory, 'msg_body.html')
text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
- text_report += ('\nResults are stored in %s.\n' %
- experiment.results_directory)
+ text_report += (
+ '\nResults are stored in %s.\n' % experiment.results_directory)
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
FileUtils().WriteFile(msg_file_path, msg_body)
@@ -296,8 +299,8 @@ class MockExperimentRunner(ExperimentRunner):
super(MockExperimentRunner, self).__init__(experiment, json_report)
def _Run(self, experiment):
- self.l.LogOutput("Would run the following experiment: '%s'." %
- experiment.name)
+ self.l.LogOutput(
+ "Would run the following experiment: '%s'." % experiment.name)
def _PrintTable(self, experiment):
self.l.LogOutput('Would print the experiment table.')
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index 38ac3874..4809894f 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -106,9 +106,8 @@ class ExperimentRunnerTest(unittest.TestCase):
def make_fake_experiment(self):
test_flag.SetTestMode(True)
experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
- experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory='',
- log_dir='')
+ experiment = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory='', log_dir='')
return experiment
@mock.patch.object(machine_manager.MachineManager, 'AddMachine')
@@ -120,20 +119,22 @@ class ExperimentRunnerTest(unittest.TestCase):
self.exp = self.make_fake_experiment()
def test_init(self):
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
self.assertFalse(er._terminated)
self.assertEqual(er.STATUS_TIME_DELAY, 10)
self.exp.log_level = 'verbose'
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
self.assertEqual(er.STATUS_TIME_DELAY, 30)
@mock.patch.object(experiment_status.ExperimentStatus, 'GetStatusString')
@@ -164,11 +165,12 @@ class ExperimentRunnerTest(unittest.TestCase):
# Test 1: log_level == "quiet"
self.exp.log_level = 'quiet'
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
mock_status_string.return_value = 'Fake status string'
er._Run(self.exp)
@@ -180,9 +182,10 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(self.mock_logger.dot_count, 2)
self.assertEqual(mock_progress_string.call_count, 0)
self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs,
- ['==============================', 'Fake status string',
- '=============================='])
+ self.assertEqual(self.mock_logger.output_msgs, [
+ '==============================', 'Fake status string',
+ '=============================='
+ ])
self.assertEqual(len(self.mock_logger.error_msgs), 0)
# Test 2: log_level == "average"
@@ -190,11 +193,12 @@ class ExperimentRunnerTest(unittest.TestCase):
reset()
self.exp.log_level = 'average'
mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
mock_status_string.return_value = 'Fake status string'
er._Run(self.exp)
@@ -206,9 +210,10 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(self.mock_logger.dot_count, 2)
self.assertEqual(mock_progress_string.call_count, 0)
self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs,
- ['==============================', 'Fake status string',
- '=============================='])
+ self.assertEqual(self.mock_logger.output_msgs, [
+ '==============================', 'Fake status string',
+ '=============================='
+ ])
self.assertEqual(len(self.mock_logger.error_msgs), 0)
# Test 3: log_level == "verbose"
@@ -216,11 +221,12 @@ class ExperimentRunnerTest(unittest.TestCase):
reset()
self.exp.log_level = 'verbose'
mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
mock_status_string.return_value = 'Fake status string'
mock_progress_string.return_value = 'Fake progress string'
@@ -233,22 +239,24 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(self.mock_logger.dot_count, 0)
self.assertEqual(mock_progress_string.call_count, 2)
self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs,
- ['==============================', 'Fake progress string',
- 'Fake status string', '==============================',
- '==============================', 'Fake progress string',
- 'Fake status string', '=============================='])
+ self.assertEqual(self.mock_logger.output_msgs, [
+ '==============================', 'Fake progress string',
+ 'Fake status string', '==============================',
+ '==============================', 'Fake progress string',
+ 'Fake status string', '=============================='
+ ])
self.assertEqual(len(self.mock_logger.error_msgs), 0)
@mock.patch.object(TextResultsReport, 'GetReport')
def test_print_table(self, mock_report):
self.mock_logger.Reset()
mock_report.return_value = 'This is a fake experiment report.'
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er._PrintTable(self.exp)
self.assertEqual(mock_report.call_count, 1)
self.assertEqual(self.mock_logger.output_msgs,
@@ -269,11 +277,12 @@ class ExperimentRunnerTest(unittest.TestCase):
self.mock_logger.Reset()
config.AddConfig('no_email', True)
self.exp.email_to = ['jane.doe@google.com']
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
# Test 1. Config:no_email; exp.email_to set ==> no email sent
er._Email(self.exp)
self.assertEqual(mock_getuser.call_count, 0)
@@ -295,8 +304,8 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_html_report.call_count, 1)
self.assertEqual(len(mock_emailer.call_args), 2)
self.assertEqual(mock_emailer.call_args[0],
- (['jane.doe@google.com', 'john.smith@google.com'],
- ': image1 vs. image2',
+ (['jane.doe@google.com',
+ 'john.smith@google.com'], ': image1 vs. image2',
"<pre style='font-size: 13px'>This is a fake text "
'report.\nResults are stored in _results.\n</pre>'))
self.assertTrue(type(mock_emailer.call_args[1]) is dict)
@@ -325,8 +334,10 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_html_report.call_count, 1)
self.assertEqual(len(mock_emailer.call_args), 2)
self.assertEqual(mock_emailer.call_args[0],
- (['jane.doe@google.com', 'john.smith@google.com',
- 'john.smith@google.com'], ': image1 vs. image2',
+ ([
+ 'jane.doe@google.com', 'john.smith@google.com',
+ 'john.smith@google.com'
+ ], ': image1 vs. image2',
"<pre style='font-size: 13px'>This is a fake text "
'report.\nResults are stored in _results.\n</pre>'))
self.assertTrue(type(mock_emailer.call_args[1]) is dict)
@@ -393,15 +404,16 @@ class ExperimentRunnerTest(unittest.TestCase):
self.mock_logger.Reset()
self.exp.results_directory = '/usr/local/crosperf-results'
bench_run = self.exp.benchmark_runs[5]
- bench_path = '/usr/local/crosperf-results/' + filter(str.isalnum,
- bench_run.name)
+ bench_path = '/usr/local/crosperf-results/' + filter(
+ str.isalnum, bench_run.name)
self.assertEqual(len(self.exp.benchmark_runs), 6)
- er = experiment_runner.ExperimentRunner(self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
# Test 1. Make sure nothing is done if _terminated is true.
er._terminated = True
@@ -438,12 +450,12 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_rmdir.call_count, 1)
mock_rmdir.called_with('/usr/local/crosperf-results')
self.assertEqual(self.mock_logger.LogOutputCount, 4)
- self.assertEqual(
- self.mock_logger.output_msgs,
- ['Storing experiment file in /usr/local/crosperf-results.',
- 'Storing results report in /usr/local/crosperf-results.',
- 'Storing email message body in /usr/local/crosperf-results.',
- 'Storing results of each benchmark run.'])
+ self.assertEqual(self.mock_logger.output_msgs, [
+ 'Storing experiment file in /usr/local/crosperf-results.',
+ 'Storing results report in /usr/local/crosperf-results.',
+ 'Storing email message body in /usr/local/crosperf-results.',
+ 'Storing results of each benchmark run.'
+ ])
if __name__ == '__main__':
diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py
index 627db99e..c6610433 100644
--- a/crosperf/experiment_status.py
+++ b/crosperf/experiment_status.py
@@ -80,8 +80,8 @@ class ExperimentStatus(object):
strings.append('Current time: %s Elapsed: %s ETA: %s' %
(datetime.datetime.now(),
datetime.timedelta(seconds=int(elapsed_time)), eta))
- strings.append(self._GetProgressBar(self.experiment.num_complete,
- self.num_total))
+ strings.append(
+ self._GetProgressBar(self.experiment.num_complete, self.num_total))
return '\n'.join(strings)
def GetStatusString(self):
@@ -107,8 +107,8 @@ class ExperimentStatus(object):
self.experiment.machine_manager.AsString())
elif self.experiment.schedv2():
# In schedv2 mode, we always print out thread status.
- thread_status = thread_status_format.format(self.experiment.schedv2(
- ).threads_status_as_string())
+ thread_status = thread_status_format.format(
+ self.experiment.schedv2().threads_status_as_string())
result = '{}{}'.format(thread_status, '\n'.join(status_strings))
diff --git a/crosperf/field.py b/crosperf/field.py
index bc92e2cc..6821d4d3 100644
--- a/crosperf/field.py
+++ b/crosperf/field.py
@@ -68,8 +68,8 @@ class BooleanField(Field):
return True
elif value.lower() == 'false':
return False
- raise TypeError("Invalid value for '%s'. Must be true or false." %
- self.name)
+ raise TypeError(
+ "Invalid value for '%s'. Must be true or false." % self.name)
class IntegerField(Field):
diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py
index 9f2a7136..0e743274 100755
--- a/crosperf/flag_test_unittest.py
+++ b/crosperf/flag_test_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python2
#
# Copyright 2014 Google Inc. All Rights Reserved.
-
"""The unittest of flags."""
from __future__ import print_function
@@ -12,6 +11,7 @@ import unittest
class FlagTestCase(unittest.TestCase):
"""The unittest class."""
+
def test_test_flag(self):
# Verify that test_flag.is_test exists, that it is a list,
# and that it contains 1 element.
diff --git a/crosperf/generate_report.py b/crosperf/generate_report.py
index e0add994..fd7a2cf7 100755
--- a/crosperf/generate_report.py
+++ b/crosperf/generate_report.py
@@ -3,7 +3,6 @@
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Given a specially-formatted JSON object, generates results report(s).
The JSON object should look like:
@@ -62,10 +61,12 @@ from results_report import TextResultsReport
def CountBenchmarks(benchmark_runs):
"""Counts the number of iterations for each benchmark in benchmark_runs."""
+
# Example input for benchmark_runs:
# {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]}
def _MaxLen(results):
return 0 if not results else max(len(r) for r in results)
+
return [(name, _MaxLen(results))
for name, results in benchmark_runs.iteritems()]
@@ -121,8 +122,8 @@ def CutResultsInPlace(results, max_keys=50, complain_on_update=True):
len(retained_keys) != len(removable_keys)
if actually_updated and complain_on_update:
- print("Warning: Some benchmark keyvals have been truncated.",
- file=sys.stderr)
+ print(
+ 'Warning: Some benchmark keyvals have been truncated.', file=sys.stderr)
return results
@@ -144,7 +145,7 @@ def _ConvertToASCII(obj):
def _PositiveInt(s):
i = int(s)
if i < 0:
- raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i, ))
+ raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i,))
return i
@@ -182,13 +183,13 @@ def WriteFile(output_prefix, extension, get_contents, overwrite, verbose):
"""
if output_prefix == '-':
if verbose:
- print('Writing %s report to stdout' % (extension, ), file=sys.stderr)
+ print('Writing %s report to stdout' % (extension,), file=sys.stderr)
sys.stdout.write(get_contents())
return
file_name = '%s.%s' % (output_prefix, extension)
if not overwrite and os.path.exists(file_name):
- raise IOError('Refusing to write %s -- it already exists' % (file_name, ))
+ raise IOError('Refusing to write %s -- it already exists' % (file_name,))
with open(file_name, 'w') as out_file:
if verbose:
@@ -200,7 +201,7 @@ def RunActions(actions, benchmark_results, output_prefix, overwrite, verbose):
"""Runs `actions`, returning True if all succeeded."""
failed = False
- report_ctor = None # Make the linter happy
+ report_ctor = None # Make the linter happy
for report_ctor, extension in actions:
try:
get_contents = lambda: report_ctor(benchmark_results).GetReport()
@@ -225,27 +226,49 @@ def _NoPerfReport(_label_name, _benchmark_name, _benchmark_iteration):
def _ParseArgs(argv):
parser = argparse.ArgumentParser(description='Turns JSON into results '
'report(s).')
- parser.add_argument('-v', '--verbose', action='store_true',
- help='Be a tiny bit more verbose.')
- parser.add_argument('-f', '--force', action='store_true',
- help='Overwrite existing results files.')
- parser.add_argument('-o', '--output', default='report', type=str,
- help='Prefix of the output filename (default: report). '
- '- means stdout.')
- parser.add_argument('-i', '--input', required=True, type=str,
- help='Where to read the JSON from. - means stdin.')
- parser.add_argument('-l', '--statistic-limit', default=0, type=_PositiveInt,
- help='The maximum number of benchmark statistics to '
- 'display from a single run. 0 implies unlimited.')
- parser.add_argument('--json', action='store_true',
- help='Output a JSON report.')
- parser.add_argument('--text', action='store_true',
- help='Output a text report.')
- parser.add_argument('--email', action='store_true',
- help='Output a text report suitable for email.')
- parser.add_argument('--html', action='store_true',
- help='Output an HTML report (this is the default if no '
- 'other output format is specified).')
+ parser.add_argument(
+ '-v',
+ '--verbose',
+ action='store_true',
+ help='Be a tiny bit more verbose.')
+ parser.add_argument(
+ '-f',
+ '--force',
+ action='store_true',
+ help='Overwrite existing results files.')
+ parser.add_argument(
+ '-o',
+ '--output',
+ default='report',
+ type=str,
+ help='Prefix of the output filename (default: report). '
+ '- means stdout.')
+ parser.add_argument(
+ '-i',
+ '--input',
+ required=True,
+ type=str,
+ help='Where to read the JSON from. - means stdin.')
+ parser.add_argument(
+ '-l',
+ '--statistic-limit',
+ default=0,
+ type=_PositiveInt,
+ help='The maximum number of benchmark statistics to '
+ 'display from a single run. 0 implies unlimited.')
+ parser.add_argument(
+ '--json', action='store_true', help='Output a JSON report.')
+ parser.add_argument(
+ '--text', action='store_true', help='Output a text report.')
+ parser.add_argument(
+ '--email',
+ action='store_true',
+ help='Output a text report suitable for email.')
+ parser.add_argument(
+ '--html',
+ action='store_true',
+ help='Output an HTML report (this is the default if no '
+ 'other output format is specified).')
return parser.parse_args(argv)
@@ -263,13 +286,13 @@ def Main(argv):
benches = CountBenchmarks(results)
# In crosperf, a label is essentially a platform+configuration. So, a name of
# a label and a name of a platform are equivalent for our purposes.
- bench_results = BenchmarkResults(label_names=platform_names,
- benchmark_names_and_iterations=benches,
- run_keyvals=results,
- read_perf_report=_NoPerfReport)
+ bench_results = BenchmarkResults(
+ label_names=platform_names,
+ benchmark_names_and_iterations=benches,
+ run_keyvals=results,
+ read_perf_report=_NoPerfReport)
actions = _AccumulateActions(args)
- ok = RunActions(actions, bench_results, args.output, args.force,
- args.verbose)
+ ok = RunActions(actions, bench_results, args.output, args.force, args.verbose)
return 0 if ok else 1
diff --git a/crosperf/generate_report_unittest.py b/crosperf/generate_report_unittest.py
index a5d00635..bbb0c0ae 100755
--- a/crosperf/generate_report_unittest.py
+++ b/crosperf/generate_report_unittest.py
@@ -19,8 +19,10 @@ import unittest
import generate_report
import results_report
+
class _ContextualStringIO(StringIO):
"""StringIO that can be used in `with` statements."""
+
def __init__(self, *args):
StringIO.__init__(self, *args)
@@ -33,6 +35,7 @@ class _ContextualStringIO(StringIO):
class GenerateReportTests(unittest.TestCase):
"""Tests for generate_report.py."""
+
def testCountBenchmarks(self):
runs = {
'foo': [[{}, {}, {}], [{}, {}, {}, {}]],
@@ -45,16 +48,33 @@ class GenerateReportTests(unittest.TestCase):
def testCutResultsInPlace(self):
bench_data = {
- 'foo': [[{'a': 1, 'b': 2, 'c': 3}, {'a': 3, 'b': 2.5, 'c': 1}]],
- 'bar': [[{'d': 11, 'e': 12, 'f': 13}]],
- 'baz': [[{'g': 12, 'h': 13}]],
- 'qux': [[{'i': 11}]],
+ 'foo': [[{
+ 'a': 1,
+ 'b': 2,
+ 'c': 3
+ }, {
+ 'a': 3,
+ 'b': 2.5,
+ 'c': 1
+ }]],
+ 'bar': [[{
+ 'd': 11,
+ 'e': 12,
+ 'f': 13
+ }]],
+ 'baz': [[{
+ 'g': 12,
+ 'h': 13
+ }]],
+ 'qux': [[{
+ 'i': 11
+ }]],
}
original_bench_data = copy.deepcopy(bench_data)
max_keys = 2
- results = generate_report.CutResultsInPlace(bench_data, max_keys=max_keys,
- complain_on_update=False)
+ results = generate_report.CutResultsInPlace(
+ bench_data, max_keys=max_keys, complain_on_update=False)
# Cuts should be in-place.
self.assertIs(results, bench_data)
self.assertItemsEqual(original_bench_data.keys(), bench_data.keys())
@@ -68,15 +88,21 @@ class GenerateReportTests(unittest.TestCase):
# sub_keyvals must be a subset of original_keyvals
self.assertDictContainsSubset(sub_keyvals, original_keyvals)
-
def testCutResultsInPlaceLeavesRetval(self):
bench_data = {
- 'foo': [[{'retval': 0, 'a': 1}]],
- 'bar': [[{'retval': 1}]],
- 'baz': [[{'RETVAL': 1}]],
+ 'foo': [[{
+ 'retval': 0,
+ 'a': 1
+ }]],
+ 'bar': [[{
+ 'retval': 1
+ }]],
+ 'baz': [[{
+ 'RETVAL': 1
+ }]],
}
- results = generate_report.CutResultsInPlace(bench_data, max_keys=0,
- complain_on_update=False)
+ results = generate_report.CutResultsInPlace(
+ bench_data, max_keys=0, complain_on_update=False)
# Just reach into results assuming we know it otherwise outputs things
# sanely. If it doesn't, testCutResultsInPlace should give an indication as
# to what, exactly, is broken.
@@ -121,12 +147,12 @@ class GenerateReportTests(unittest.TestCase):
# We only mock print_exc so we don't have exception info printed to stdout.
@mock.patch('generate_report.WriteFile', side_effect=ValueError('Oh noo'))
@mock.patch('traceback.print_exc')
- def testRunActionsRunsAllActionsRegardlessOfExceptions(self, mock_print_exc,
- mock_write_file):
+ def testRunActionsRunsAllActionsRegardlessOfExceptions(
+ self, mock_print_exc, mock_write_file):
actions = [(None, 'json'), (None, 'html'), (None, 'text'), (None, 'email')]
output_prefix = '-'
- ok = generate_report.RunActions(actions, {}, output_prefix, overwrite=False,
- verbose=False)
+ ok = generate_report.RunActions(
+ actions, {}, output_prefix, overwrite=False, verbose=False)
self.assertFalse(ok)
self.assertEqual(mock_write_file.call_count, len(actions))
self.assertEqual(mock_print_exc.call_count, len(actions))
@@ -135,8 +161,8 @@ class GenerateReportTests(unittest.TestCase):
def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file):
actions = [(None, 'json'), (None, 'html'), (None, 'text')]
output_prefix = '-'
- ok = generate_report.RunActions(actions, {}, output_prefix, overwrite=False,
- verbose=False)
+ ok = generate_report.RunActions(
+ actions, {}, output_prefix, overwrite=False, verbose=False)
self.assertEqual(mock_write_file.call_count, len(actions))
self.assertTrue(ok)
diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py
index e330084e..f5862e4d 100644
--- a/crosperf/image_checksummer.py
+++ b/crosperf/image_checksummer.py
@@ -25,8 +25,8 @@ class ImageChecksummer(object):
def Checksum(self):
with self._lock:
if not self._checksum:
- logger.GetLogger().LogOutput("Acquiring checksum for '%s'." %
- self.label.name)
+ logger.GetLogger().LogOutput(
+ "Acquiring checksum for '%s'." % self.label.name)
self._checksum = None
if self.label.image_type != 'local':
raise RuntimeError('Called Checksum on non-local image!')
@@ -48,8 +48,8 @@ class ImageChecksummer(object):
def __new__(cls, *args, **kwargs):
with cls._lock:
if not cls._instance:
- cls._instance = super(ImageChecksummer, cls).__new__(cls, *args,
- **kwargs)
+ cls._instance = super(ImageChecksummer, cls).__new__(
+ cls, *args, **kwargs)
return cls._instance
def Checksum(self, label, log_level):
diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py
index 3cc464bb..2ad750d3 100644
--- a/crosperf/machine_image_manager.py
+++ b/crosperf/machine_image_manager.py
@@ -1,10 +1,9 @@
-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""MachineImageManager allocates images to duts."""
+
class MachineImageManager(object):
"""Management of allocating images to duts.
@@ -132,8 +131,7 @@ class MachineImageManager(object):
* Special / common case to handle seperately
We have only 1 dut or if we have only 1 label, that's simple enough.
-
- """
+ """
def __init__(self, labels, duts):
self.labels_ = labels
@@ -158,13 +156,13 @@ class MachineImageManager(object):
def compute_initial_allocation(self):
"""Compute the initial label-dut allocation.
- This method finds the most efficient way that every label gets imaged at
- least once.
+ This method finds the most efficient way that every label gets imaged at
+ least once.
- Returns:
- False, only if not all labels could be imaged to a certain machine,
- otherwise True.
- """
+ Returns:
+ False, only if not all labels could be imaged to a certain machine,
+ otherwise True.
+ """
if self.n_duts_ == 1:
for i, v in self.matrix_vertical_generator(0):
@@ -196,15 +194,15 @@ class MachineImageManager(object):
def allocate(self, dut, schedv2=None):
"""Allocate a label for dut.
- Args:
- dut: the dut that asks for a new image.
- schedv2: the scheduling instance, we need the benchmark run
- information with schedv2 for a better allocation.
+ Args:
+ dut: the dut that asks for a new image.
+ schedv2: the scheduling instance, we need the benchmark run
+ information with schedv2 for a better allocation.
- Returns:
- a label to image onto the dut or None if no more available images for
- the dut.
- """
+ Returns:
+ a label to image onto the dut or None if no more available images for
+ the dut.
+ """
j = self.dut_name_ordinal_[dut.name]
# 'can_' prefix means candidate label's.
can_reimage_number = 999
@@ -270,16 +268,16 @@ class MachineImageManager(object):
def matrix_vertical_generator(self, col):
"""Iterate matrix vertically at column 'col'.
- Yield row number i and value at matrix_[i][col].
- """
+ Yield row number i and value at matrix_[i][col].
+ """
for i, _ in enumerate(self.labels_):
yield i, self.matrix_[i][col]
def matrix_horizontal_generator(self, row):
"""Iterate matrix horizontally at row 'row'.
- Yield col number j and value at matrix_[row][j].
- """
+ Yield col number j and value at matrix_[row][j].
+ """
for j, _ in enumerate(self.duts_):
yield j, self.matrix_[row][j]
diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py
index fe41dc09..02afaa06 100755
--- a/crosperf/machine_image_manager_unittest.py
+++ b/crosperf/machine_image_manager_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python2
# Copyright 2015 Google Inc. All Rights Reserved.
-
"""Unit tests for the MachineImageManager class."""
from __future__ import print_function
@@ -23,14 +22,14 @@ class MockLabel(object):
"""Provide hash function for label.
This is required because Label object is used inside a dict as key.
- """
+ """
return hash(self.name)
def __eq__(self, other):
"""Provide eq function for label.
This is required because Label object is used inside a dict as key.
- """
+ """
return isinstance(other, MockLabel) and other.name == self.name
@@ -52,6 +51,7 @@ class MachineImageManagerTester(unittest.TestCase):
return duts
def print_matrix(self, matrix):
+ # pylint: disable=expression-not-assigned
for r in matrix:
for v in r:
print('{} '.format('.' if v == ' ' else v)),
@@ -97,53 +97,63 @@ class MachineImageManagerTester(unittest.TestCase):
self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
def test_case1(self):
- labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
+ labels = [
+ MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+ 'l3', ['m1'])
+ ]
duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '],
+ [' ', 'X', 'X']])
mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+ ['Y', 'X', 'X']])
def test_case2(self):
- labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
+ labels = [
+ MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+ 'l3', ['m1'])
+ ]
duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '],
+ [' ', 'X', 'X']])
mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+ ['Y', 'X', 'X']])
def test_case3(self):
- labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
+ labels = [
+ MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+ 'l3', ['m1'])
+ ]
duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')]
mim = MachineImageManager(labels, duts)
mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+ ['Y', 'X', 'X']])
def test_case4(self):
- labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
+ labels = [
+ MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']), MockLabel(
+ 'l3', ['m1'])
+ ]
duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')]
mim = MachineImageManager(labels, duts)
mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'],
+ ['Y', 'X', 'X']])
def test_case5(self):
- labels = [MockLabel('l1', ['m3']), MockLabel('l2', ['m3']),
- MockLabel('l3', ['m1'])]
+ labels = [
+ MockLabel('l1', ['m3']), MockLabel('l2', ['m3']), MockLabel(
+ 'l3', ['m1'])
+ ]
duts = self.gen_duts_by_name('m1', 'm2', 'm3')
mim = MachineImageManager(labels, duts)
self.assertTrue(mim.compute_initial_allocation())
- self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'], ['Y', 'X',
- 'X']])
+ self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'],
+ ['Y', 'X', 'X']])
def test_2x2_with_allocation(self):
labels = [MockLabel('l0'), MockLabel('l1')]
@@ -193,29 +203,37 @@ class MachineImageManagerTester(unittest.TestCase):
self.assertTrue(mim.compute_initial_allocation())
def test_10x10_fully_random(self):
- inp = ['X . . . X X . X X .', 'X X . X . X . X X .',
- 'X X X . . X . X . X', 'X . X X . . X X . X',
- 'X X X X . . . X . .', 'X X . X . X . . X .',
- '. X . X . X X X . .', '. X . X X . X X . .',
- 'X X . . . X X X . .', '. X X X X . . . . X']
- output = ['X Y . . X X . X X .', 'X X Y X . X . X X .',
- 'X X X Y . X . X . X', 'X . X X Y . X X . X',
- 'X X X X . Y . X . .', 'X X . X . X Y . X .',
- 'Y X . X . X X X . .', '. X . X X . X X Y .',
- 'X X . . . X X X . Y', '. X X X X . . Y . X']
+ inp = [
+ 'X . . . X X . X X .', 'X X . X . X . X X .',
+ 'X X X . . X . X . X', 'X . X X . . X X . X',
+ 'X X X X . . . X . .', 'X X . X . X . . X .',
+ '. X . X . X X X . .', '. X . X X . X X . .',
+ 'X X . . . X X X . .', '. X X X X . . . . X'
+ ]
+ output = [
+ 'X Y . . X X . X X .', 'X X Y X . X . X X .',
+ 'X X X Y . X . X . X', 'X . X X Y . X X . X',
+ 'X X X X . Y . X . .', 'X X . X . X Y . X .',
+ 'Y X . X . X X X . .', '. X . X X . X X Y .',
+ 'X X . . . X X X . Y', '. X X X X . . Y . X'
+ ]
self.pattern_based_test(inp, output)
def test_10x10_fully_random2(self):
- inp = ['X . X . . X . X X X', 'X X X X X X . . X .',
- 'X . X X X X X . . X', 'X X X . X . X X . .',
- '. X . X . X X X X X', 'X X X X X X X . . X',
- 'X . X X X X X . . X', 'X X X . X X X X . .',
- 'X X X . . . X X X X', '. X X . X X X . X X']
- output = ['X . X Y . X . X X X', 'X X X X X X Y . X .',
- 'X Y X X X X X . . X', 'X X X . X Y X X . .',
- '. X Y X . X X X X X', 'X X X X X X X Y . X',
- 'X . X X X X X . Y X', 'X X X . X X X X . Y',
- 'X X X . Y . X X X X', 'Y X X . X X X . X X']
+ inp = [
+ 'X . X . . X . X X X', 'X X X X X X . . X .',
+ 'X . X X X X X . . X', 'X X X . X . X X . .',
+ '. X . X . X X X X X', 'X X X X X X X . . X',
+ 'X . X X X X X . . X', 'X X X . X X X X . .',
+ 'X X X . . . X X X X', '. X X . X X X . X X'
+ ]
+ output = [
+ 'X . X Y . X . X X X', 'X X X X X X Y . X .',
+ 'X Y X X X X X . . X', 'X X X . X Y X X . .',
+ '. X Y X . X X X X X', 'X X X X X X X Y . X',
+ 'X . X X X X X . Y X', 'X X X . X X X X . Y',
+ 'X X X . Y . X X X X', 'Y X X . X X X . X X'
+ ]
self.pattern_based_test(inp, output)
def test_3x4_with_allocation(self):
@@ -273,7 +291,7 @@ class MachineImageManagerTester(unittest.TestCase):
l1 Y X X
l2 Y X X
- """
+ """
inp = ['. X X', '. X X', '. X X']
output = ['Y X X', 'Y X X', 'Y X X']
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index 2fdf141b..b9dda148 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -78,9 +78,8 @@ class CrosMachine(object):
def IsReachable(self):
command = 'ls'
- ret = self.ce.CrosRunCommand(command,
- machine=self.name,
- chromeos_root=self.chromeos_root)
+ ret = self.ce.CrosRunCommand(
+ command, machine=self.name, chromeos_root=self.chromeos_root)
if ret:
return False
return True
@@ -121,9 +120,7 @@ class CrosMachine(object):
#meminfo, the assert does not catch it either
command = 'cat /proc/meminfo'
ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput(
- command,
- machine=self.name,
- chromeos_root=self.chromeos_root)
+ command, machine=self.name, chromeos_root=self.chromeos_root)
assert ret == 0, 'Could not get meminfo from machine: %s' % self.name
if ret == 0:
self._ParseMemoryInfo()
@@ -131,9 +128,7 @@ class CrosMachine(object):
def _GetCPUInfo(self):
command = 'cat /proc/cpuinfo'
ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput(
- command,
- machine=self.name,
- chromeos_root=self.chromeos_root)
+ command, machine=self.name, chromeos_root=self.chromeos_root)
assert ret == 0, 'Could not get cpuinfo from machine: %s' % self.name
def _ComputeMachineChecksumString(self):
@@ -153,9 +148,7 @@ class CrosMachine(object):
def _GetMachineID(self):
command = 'dump_vpd_log --full --stdout'
_, if_out, _ = self.ce.CrosRunCommandWOutput(
- command,
- machine=self.name,
- chromeos_root=self.chromeos_root)
+ command, machine=self.name, chromeos_root=self.chromeos_root)
b = if_out.splitlines()
a = [l for l in b if 'Product' in l]
if len(a):
@@ -163,9 +156,7 @@ class CrosMachine(object):
return
command = 'ifconfig'
_, if_out, _ = self.ce.CrosRunCommandWOutput(
- command,
- machine=self.name,
- chromeos_root=self.chromeos_root)
+ command, machine=self.name, chromeos_root=self.chromeos_root)
b = if_out.splitlines()
a = [l for l in b if 'HWaddr' in l]
if len(a):
@@ -222,8 +213,8 @@ class MachineManager(object):
self.logger = lgr or logger.GetLogger()
if self.locks_dir and not os.path.isdir(self.locks_dir):
- raise MissingLocksDirectory('Cannot access locks directory: %s' %
- self.locks_dir)
+ raise MissingLocksDirectory(
+ 'Cannot access locks directory: %s' % self.locks_dir)
self._initialized_machines = []
self.chromeos_root = chromeos_root
@@ -242,12 +233,10 @@ class MachineManager(object):
cmd = '/opt/google/chrome/chrome --version'
ret, version, _ = self.ce.CrosRunCommandWOutput(
- cmd,
- machine=machine.name,
- chromeos_root=self.chromeos_root)
+ cmd, machine=machine.name, chromeos_root=self.chromeos_root)
if ret != 0:
- raise CrosCommandError("Couldn't get Chrome version from %s." %
- machine.name)
+ raise CrosCommandError(
+ "Couldn't get Chrome version from %s." % machine.name)
if ret != 0:
version = ''
@@ -261,11 +250,13 @@ class MachineManager(object):
chromeos_root = label.chromeos_root
if not chromeos_root:
chromeos_root = self.chromeos_root
- image_chromeos_args = [image_chromeos.__file__, '--no_lock',
- '--chromeos_root=%s' % chromeos_root,
- '--image=%s' % label.chromeos_image,
- '--image_args=%s' % label.image_args, '--remote=%s' %
- machine.name, '--logging_level=%s' % self.log_level]
+ image_chromeos_args = [
+ image_chromeos.__file__, '--no_lock',
+ '--chromeos_root=%s' % chromeos_root,
+ '--image=%s' % label.chromeos_image,
+ '--image_args=%s' % label.image_args, '--remote=%s' % machine.name,
+ '--logging_level=%s' % self.log_level
+ ]
if label.board:
image_chromeos_args.append('--board=%s' % label.board)
@@ -287,9 +278,8 @@ class MachineManager(object):
cmd = 'reboot && exit'
if self.log_level != 'verbose':
self.logger.LogOutput('reboot & exit.')
- self.ce.CrosRunCommand(cmd,
- machine=machine.name,
- chromeos_root=self.chromeos_root)
+ self.ce.CrosRunCommand(
+ cmd, machine=machine.name, chromeos_root=self.chromeos_root)
time.sleep(60)
if self.log_level != 'verbose':
self.logger.LogOutput('Pushing image onto machine.')
@@ -349,8 +339,8 @@ class MachineManager(object):
locked = True
if self.locks_dir:
locked = file_lock_machine.Machine(cros_machine.name,
- self.locks_dir).Lock(True,
- sys.argv[0])
+ self.locks_dir).Lock(
+ True, sys.argv[0])
if locked:
self._machines.append(cros_machine)
command = 'cat %s' % CHECKSUM_FILE
@@ -371,8 +361,8 @@ class MachineManager(object):
if self.log_level != 'verbose':
self.logger.LogOutput('Setting up remote access to %s' % machine_name)
- self.logger.LogOutput('Checking machine characteristics for %s' %
- machine_name)
+ self.logger.LogOutput(
+ 'Checking machine characteristics for %s' % machine_name)
cm = CrosMachine(machine_name, self.chromeos_root, self.log_level)
if cm.machine_checksum:
self._all_machines.append(cm)
@@ -411,17 +401,19 @@ class MachineManager(object):
self.acquire_timeout -= sleep_time
if self.acquire_timeout < 0:
- self.logger.LogFatal('Could not acquire any of the '
- "following machines: '%s'" %
- ', '.join(machine.name for machine in machines))
+ self.logger.LogFatal(
+ 'Could not acquire any of the '
+ "following machines: '%s'" % ', '.join(machine.name
+ for machine in machines))
### for m in self._machines:
### if (m.locked and time.time() - m.released_time < 10 and
### m.checksum == image_checksum):
### return None
- unlocked_machines = [machine
- for machine in self.GetAvailableMachines(label)
- if not machine.locked]
+ unlocked_machines = [
+ machine for machine in self.GetAvailableMachines(label)
+ if not machine.locked
+ ]
for m in unlocked_machines:
if image_checksum and m.checksum == image_checksum:
m.locked = True
@@ -651,8 +643,8 @@ class MockMachineManager(MachineManager):
"""Mock machine manager class."""
def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir):
- super(MockMachineManager, self).__init__(
- chromeos_root, acquire_timeout, log_level, locks_dir)
+ super(MockMachineManager, self).__init__(chromeos_root, acquire_timeout,
+ log_level, locks_dir)
def _TryToLockMachine(self, cros_machine):
self._machines.append(cros_machine)
@@ -663,8 +655,8 @@ class MockMachineManager(MachineManager):
for m in self._all_machines:
assert m.name != machine_name, 'Tried to double-add %s' % machine_name
cm = MockCrosMachine(machine_name, self.chromeos_root, self.log_level)
- assert cm.machine_checksum, ('Could not find checksum for machine %s' %
- machine_name)
+ assert cm.machine_checksum, (
+ 'Could not find checksum for machine %s' % machine_name)
# In Original MachineManager, the test is 'if cm.machine_checksum:' - if a
# machine is unreachable, then its machine_checksum is None. Here we
# cannot do this, because machine_checksum is always faked, so we directly
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index 8652f171..b267d698 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -41,22 +41,21 @@ class MyMachineManager(machine_manager.MachineManager):
assert m.name != machine_name, 'Tried to double-add %s' % machine_name
cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root,
'average')
- assert cm.machine_checksum, ('Could not find checksum for machine %s' %
- machine_name)
+ assert cm.machine_checksum, (
+ 'Could not find checksum for machine %s' % machine_name)
self._all_machines.append(cm)
CHROMEOS_ROOT = '/tmp/chromeos-root'
MACHINE_NAMES = ['lumpy1', 'lumpy2', 'lumpy3', 'daisy1', 'daisy2']
-LABEL_LUMPY = label.MockLabel('lumpy', 'lumpy_chromeos_image', 'autotest_dir',
- CHROMEOS_ROOT, 'lumpy',
- ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'], '', '',
- False, 'average,'
- 'gcc', None)
+LABEL_LUMPY = label.MockLabel(
+ 'lumpy', 'lumpy_chromeos_image', 'autotest_dir', CHROMEOS_ROOT, 'lumpy',
+ ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'], '', '', False, 'average,'
+ 'gcc', None)
LABEL_MIX = label.MockLabel('mix', 'chromeos_image', 'autotest_dir',
CHROMEOS_ROOT, 'mix',
- ['daisy1', 'daisy2', 'lumpy3', 'lumpy4'], '', '',
- False, 'average', 'gcc', None)
+ ['daisy1', 'daisy2', 'lumpy3',
+ 'lumpy4'], '', '', False, 'average', 'gcc', None)
class MachineManagerTest(unittest.TestCase):
@@ -85,10 +84,9 @@ class MachineManagerTest(unittest.TestCase):
def setUp(self, mock_isdir):
mock_isdir.return_value = True
- self.mm = machine_manager.MachineManager('/usr/local/chromeos', 0,
- 'average', None,
- self.mock_cmd_exec,
- self.mock_logger)
+ self.mm = machine_manager.MachineManager(
+ '/usr/local/chromeos', 0, 'average', None, self.mock_cmd_exec,
+ self.mock_logger)
self.mock_lumpy1.name = 'lumpy1'
self.mock_lumpy2.name = 'lumpy2'
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 29e118e8..04e6590b 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -12,7 +12,6 @@ import pickle
import re
import tempfile
import json
-import sys
from cros_utils import command_executer
from cros_utils import misc
@@ -67,9 +66,8 @@ class Result(object):
if not os.path.isdir(dest_dir):
command = 'mkdir -p %s' % dest_dir
self.ce.RunCommand(command)
- dest_file = os.path.join(dest_dir,
- ('%s.%s' % (os.path.basename(file_to_copy),
- file_index)))
+ dest_file = os.path.join(
+ dest_dir, ('%s.%s' % (os.path.basename(file_to_copy), file_index)))
ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
if ret:
raise IOError('Could not copy results file: %s' % file_to_copy)
@@ -230,10 +228,10 @@ class Result(object):
perf_data_file)
perf_report_file = '%s.report' % perf_data_file
if os.path.exists(perf_report_file):
- raise RuntimeError('Perf report file already exists: %s' %
- perf_report_file)
- chroot_perf_report_file = misc.GetInsideChrootPath(self.chromeos_root,
- perf_report_file)
+ raise RuntimeError(
+ 'Perf report file already exists: %s' % perf_report_file)
+ chroot_perf_report_file = misc.GetInsideChrootPath(
+ self.chromeos_root, perf_report_file)
perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
perf_file = '/usr/sbin/perf'
@@ -366,8 +364,8 @@ class Result(object):
self.retval = pickle.load(f)
# Untar the tarball to a temporary directory
- self.temp_dir = tempfile.mkdtemp(
- dir=os.path.join(self.chromeos_root, 'chroot', 'tmp'))
+ self.temp_dir = tempfile.mkdtemp(dir=os.path.join(self.chromeos_root,
+ 'chroot', 'tmp'))
command = ('cd %s && tar xf %s' %
(self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
@@ -439,8 +437,8 @@ class Result(object):
if ret:
command = 'rm -rf {0}'.format(temp_dir)
self.ce.RunCommand(command)
- raise RuntimeError('Could not move dir %s to dir %s' %
- (temp_dir, cache_dir))
+ raise RuntimeError('Could not move dir %s to dir %s' % (temp_dir,
+ cache_dir))
@classmethod
def CreateFromRun(cls,
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 9e97c9b1..a2480d21 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -268,10 +268,10 @@ class ResultTest(unittest.TestCase):
self.result.CopyResultsTo('/tmp/results/')
self.assertEqual(mockCopyFilesTo.call_count, 2)
self.assertEqual(len(mockCopyFilesTo.call_args_list), 2)
- self.assertEqual(mockCopyFilesTo.call_args_list[0][0],
- ('/tmp/results/', perf_data_files))
- self.assertEqual(mockCopyFilesTo.call_args_list[1][0],
- ('/tmp/results/', perf_report_files))
+ self.assertEqual(mockCopyFilesTo.call_args_list[0][0], ('/tmp/results/',
+ perf_data_files))
+ self.assertEqual(mockCopyFilesTo.call_args_list[1][0], ('/tmp/results/',
+ perf_report_files))
def test_get_new_keyvals(self):
kv_dict = {}
@@ -436,8 +436,10 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_runcmd.call_args_list[0][0],
('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1,))
self.assertEqual(mock_chrootruncmd.call_count, 1)
- self.assertEqual(mock_chrootruncmd.call_args_list[0][0], (
- '/tmp', ('python generate_test_report --no-color --csv %s') % TMP_DIR1))
+ self.assertEqual(
+ mock_chrootruncmd.call_args_list[0][0],
+ ('/tmp',
+ ('python generate_test_report --no-color --csv %s') % TMP_DIR1))
self.assertEqual(mock_getpath.call_count, 1)
self.assertEqual(mock_mkdtemp.call_count, 1)
self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
@@ -899,9 +901,8 @@ class TelemetryResultTest(unittest.TestCase):
self.mock_label = MockLabel('mock_label', 'chromeos_image', 'autotest_dir',
'/tmp', 'lumpy', 'remote', 'image_args',
'cache_dir', 'average', 'gcc', None)
- self.mock_machine = machine_manager.MockCrosMachine('falco.cros',
- '/tmp/chromeos',
- 'average')
+ self.mock_machine = machine_manager.MockCrosMachine(
+ 'falco.cros', '/tmp/chromeos', 'average')
def test_populate_from_run(self):
@@ -979,12 +980,10 @@ class ResultsCacheTest(unittest.TestCase):
def FakeGetMachines(label):
if label:
pass
- m1 = machine_manager.MockCrosMachine('lumpy1.cros',
- self.results_cache.chromeos_root,
- 'average')
- m2 = machine_manager.MockCrosMachine('lumpy2.cros',
- self.results_cache.chromeos_root,
- 'average')
+ m1 = machine_manager.MockCrosMachine(
+ 'lumpy1.cros', self.results_cache.chromeos_root, 'average')
+ m2 = machine_manager.MockCrosMachine(
+ 'lumpy2.cros', self.results_cache.chromeos_root, 'average')
return [m1, m2]
mock_checksum.return_value = 'FakeImageChecksumabc123'
@@ -1026,12 +1025,10 @@ class ResultsCacheTest(unittest.TestCase):
def FakeGetMachines(label):
if label:
pass
- m1 = machine_manager.MockCrosMachine('lumpy1.cros',
- self.results_cache.chromeos_root,
- 'average')
- m2 = machine_manager.MockCrosMachine('lumpy2.cros',
- self.results_cache.chromeos_root,
- 'average')
+ m1 = machine_manager.MockCrosMachine(
+ 'lumpy1.cros', self.results_cache.chromeos_root, 'average')
+ m2 = machine_manager.MockCrosMachine(
+ 'lumpy2.cros', self.results_cache.chromeos_root, 'average')
return [m1, m2]
mock_checksum.return_value = 'FakeImageChecksumabc123'
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 097c744d..62972027 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -69,7 +69,7 @@ def _GetNonDupLabel(max_dup, runs):
new_run[key] = value
else:
new_key, index_str = match.groups()
- added_runs[int(index_str)-1][new_key] = str(value)
+ added_runs[int(index_str) - 1][new_key] = str(value)
new_runs.append(new_run)
new_runs += added_runs
return new_runs
@@ -135,6 +135,7 @@ def _MakeOrganizeResultOutline(benchmark_runs, labels):
result[name] = _Repeat(make_dicts, len(labels))
return result
+
def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
"""Create a dict from benchmark_runs.
@@ -181,9 +182,10 @@ def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
if len(cur_dict) == 1 and cur_dict['retval'] == 0:
cur_dict['retval'] = 1
# TODO: This output should be sent via logger.
- print("WARNING: Test '%s' appears to have succeeded but returned"
- ' no results.' % benchmark.name,
- file=sys.stderr)
+ print(
+ "WARNING: Test '%s' appears to have succeeded but returned"
+ ' no results.' % benchmark.name,
+ file=sys.stderr)
if json_report and benchmark_run.machine:
cur_dict['machine'] = benchmark_run.machine.name
cur_dict['machine_checksum'] = benchmark_run.machine.checksum
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
index ccf02973..25783a65 100755
--- a/crosperf/results_organizer_unittest.py
+++ b/crosperf/results_organizer_unittest.py
@@ -3,12 +3,11 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Testing of ResultsOrganizer
We create some labels, benchmark_runs and then create a ResultsOrganizer,
after that, we compare the result of ResultOrganizer.
- """
+"""
from __future__ import print_function
@@ -20,55 +19,99 @@ from results_organizer import OrganizeResults
import mock_instance
-result = {'benchmark1': [[{'': 'PASS',
- 'bool': 'True',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'ms_1': '2.1',
- 'total': '5'}, {'test': '2'}, {'test': '4'},
- {'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6'}, {'test': '3'}, {'test': '4'}],
- [{'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6'}, {'test': '5'}, {'test': '4'},
- {'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7'}, {'test': '2'}, {'test': '6'}]],
- 'benchmark2': [[{'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7'}, {'test': '2'}, {'test': '6'},
- {'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7'}, {'test': '2'}, {'test': '2'}],
- [{'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7'}, {'test': '2'}, {'test': '4'},
- {'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7'}, {'test': '1'}, {'test': '6'}]]}
+result = {
+ 'benchmark1': [[{
+ '': 'PASS',
+ 'bool': 'True',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'ms_1': '2.1',
+ 'total': '5'
+ }, {
+ 'test': '2'
+ }, {
+ 'test': '4'
+ }, {
+ '': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6'
+ }, {
+ 'test': '3'
+ }, {
+ 'test': '4'
+ }], [{
+ '': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6'
+ }, {
+ 'test': '5'
+ }, {
+ 'test': '4'
+ }, {
+ '': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7'
+ }, {
+ 'test': '2'
+ }, {
+ 'test': '6'
+ }]],
+ 'benchmark2': [[{
+ '': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7'
+ }, {
+ 'test': '2'
+ }, {
+ 'test': '6'
+ }, {
+ '': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7'
+ }, {
+ 'test': '2'
+ }, {
+ 'test': '2'
+ }], [{
+ '': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7'
+ }, {
+ 'test': '2'
+ }, {
+ 'test': '4'
+ }, {
+ '': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7'
+ }, {
+ 'test': '1'
+ }, {
+ 'test': '6'
+ }]]
+}
class ResultOrganizerTest(unittest.TestCase):
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 7a465349..fac044fb 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -92,9 +92,13 @@ def _AppendUntilLengthIs(gen, the_list, target_len):
def _FilterPerfReport(event_threshold, report):
"""Filters out entries with `< event_threshold` percent in a perf report."""
+
def filter_dict(m):
- return {fn_name: pct for fn_name, pct in m.iteritems()
- if pct >= event_threshold}
+ return {
+ fn_name: pct
+ for fn_name, pct in m.iteritems() if pct >= event_threshold
+ }
+
return {event: filter_dict(m) for event, m in report.iteritems()}
@@ -109,8 +113,11 @@ class _PerfTable(object):
percentage of time spent in function_name).
"""
- def __init__(self, benchmark_names_and_iterations, label_names,
- read_perf_report, event_threshold=None):
+ def __init__(self,
+ benchmark_names_and_iterations,
+ label_names,
+ read_perf_report,
+ event_threshold=None):
"""Constructor.
read_perf_report is a function that takes a label name, benchmark name, and
@@ -143,8 +150,8 @@ class _PerfTable(object):
def _GetResultsTableHeader(ben_name, iterations):
- benchmark_info = ('Benchmark: {0}; Iterations: {1}'
- .format(ben_name, iterations))
+ benchmark_info = ('Benchmark: {0}; Iterations: {1}'.format(
+ ben_name, iterations))
cell = Cell()
cell.string_value = benchmark_info
cell.header = True
@@ -157,8 +164,9 @@ def _ParseColumn(columns, iteration):
if column.result.__class__.__name__ != 'RawResult':
new_column.append(column)
else:
- new_column.extend(Column(LiteralResult(i), Format(), str(i + 1))
- for i in xrange(iteration))
+ new_column.extend(
+ Column(LiteralResult(i), Format(), str(i + 1))
+ for i in xrange(iteration))
return new_column
@@ -199,9 +207,10 @@ def _GetPerfTables(benchmark_results, columns, table_type):
benchmark_data = p_table.perf_data[benchmark]
table = []
for event in benchmark_data:
- tg = TableGenerator(benchmark_data[event],
- benchmark_results.label_names,
- sort=TableGenerator.SORT_BY_VALUES_DESC)
+ tg = TableGenerator(
+ benchmark_data[event],
+ benchmark_results.label_names,
+ sort=TableGenerator.SORT_BY_VALUES_DESC)
table = tg.GetTable(ResultsReport.PERF_ROWS)
parsed_columns = _ParseColumn(columns, iterations)
tf = TableFormatter(table, parsed_columns)
@@ -227,22 +236,24 @@ class ResultsReport(object):
return get_tables(self.benchmark_results, columns, table_type)
def GetFullTables(self, perf=False):
- columns = [Column(RawResult(), Format()),
- Column(MinResult(), Format()),
- Column(MaxResult(), Format()),
- Column(AmeanResult(), Format()),
- Column(StdResult(), Format(), 'StdDev'),
- Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
- Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
- Column(PValueResult(), PValueFormat(), 'p-value')]
+ columns = [
+ Column(RawResult(), Format()), Column(MinResult(), Format()), Column(
+ MaxResult(), Format()), Column(AmeanResult(), Format()), Column(
+ StdResult(), Format(), 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'), Column(
+ GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'), Column(
+ PValueResult(), PValueFormat(), 'p-value')
+ ]
return self._GetTablesWithColumns(columns, 'full', perf)
def GetSummaryTables(self, perf=False):
- columns = [Column(AmeanResult(), Format()),
- Column(StdResult(), Format(), 'StdDev'),
- Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
- Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
- Column(PValueResult(), PValueFormat(), 'p-value')]
+ columns = [
+ Column(AmeanResult(), Format()), Column(StdResult(), Format(),
+ 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'), Column(
+ GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'), Column(
+ PValueResult(), PValueFormat(), 'p-value')
+ ]
return self._GetTablesWithColumns(columns, 'summary', perf)
@@ -299,12 +310,16 @@ class TextResultsReport(ResultsReport):
def GetStatusTable(self):
"""Generate the status table by the tabulator."""
table = [['', '']]
- columns = [Column(LiteralResult(iteration=0), Format(), 'Status'),
- Column(LiteralResult(iteration=1), Format(), 'Failing Reason')]
+ columns = [
+ Column(LiteralResult(iteration=0), Format(), 'Status'), Column(
+ LiteralResult(iteration=1), Format(), 'Failing Reason')
+ ]
for benchmark_run in self.experiment.benchmark_runs:
- status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
- benchmark_run.failure_reason]]
+ status = [
+ benchmark_run.name,
+ [benchmark_run.timeline.GetLastEvent(), benchmark_run.failure_reason]
+ ]
table.append(status)
cell_table = TableFormatter(table, columns).GetCellTable('status')
return [cell_table]
@@ -316,7 +331,7 @@ class TextResultsReport(ResultsReport):
sections = []
if experiment is not None:
- title_contents = "Results report for '%s'" % (experiment.name, )
+ title_contents = "Results report for '%s'" % (experiment.name,)
else:
title_contents = 'Results report'
sections.append(self._MakeTitle(title_contents))
@@ -348,8 +363,10 @@ def _GetHTMLCharts(label_names, test_results):
# Fun fact: label_names is actually *entirely* useless as a param, since we
# never add headers. We still need to pass it anyway.
table = TableGenerator(runs, label_names).GetTable()
- columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
- Column(MaxResult(), Format())]
+ columns = [
+ Column(AmeanResult(), Format()), Column(MinResult(), Format()), Column(
+ MaxResult(), Format())
+ ]
tf = TableFormatter(table, columns)
data_table = tf.GetCellTable('full', headers=False)
@@ -365,10 +382,10 @@ def _GetHTMLCharts(label_names, test_results):
chart.AddSeries('Max', 'line', 'black')
cur_index = 1
for label in label_names:
- chart.AddRow([label,
- cur_row_data[cur_index].value,
- cur_row_data[cur_index + 1].value,
- cur_row_data[cur_index + 2].value])
+ chart.AddRow([
+ label, cur_row_data[cur_index].value,
+ cur_row_data[cur_index + 1].value, cur_row_data[cur_index + 2].value
+ ])
if isinstance(cur_row_data[cur_index].value, str):
chart = None
break
@@ -387,8 +404,8 @@ class HTMLResultsReport(ResultsReport):
@staticmethod
def FromExperiment(experiment):
- return HTMLResultsReport(BenchmarkResults.FromExperiment(experiment),
- experiment=experiment)
+ return HTMLResultsReport(
+ BenchmarkResults.FromExperiment(experiment), experiment=experiment)
def GetReport(self):
label_names = self.benchmark_results.label_names
@@ -404,13 +421,14 @@ class HTMLResultsReport(ResultsReport):
if self.experiment is not None:
experiment_file = self.experiment.experiment_file
# Use kwargs for sanity, and so that testing is a bit easier.
- return templates.GenerateHTMLPage(perf_table=perf_table,
- chart_js=chart_javascript,
- summary_table=summary_table,
- print_table=_PrintTable,
- chart_divs=chart_divs,
- full_table=full_table,
- experiment_file=experiment_file)
+ return templates.GenerateHTMLPage(
+ perf_table=perf_table,
+ chart_js=chart_javascript,
+ summary_table=summary_table,
+ print_table=_PrintTable,
+ chart_divs=chart_divs,
+ full_table=full_table,
+ experiment_file=experiment_file)
def ParseStandardPerfReport(report_data):
@@ -446,12 +464,12 @@ def ParseStandardPerfReport(report_data):
#
# Note that we're looking at stripped lines, so there is no space at the
# start.
- perf_regex = re.compile(r'^(\d+(?:.\d*)?)%' # N.NN%
- r'\s*\d+' # samples count (ignored)
- r'\s*\S+' # command (ignored)
- r'\s*\S+' # shared_object (ignored)
- r'\s*\[.\]' # location (ignored)
- r'\s*(\S.+)' # function
+ perf_regex = re.compile(r'^(\d+(?:.\d*)?)%' # N.NN%
+ r'\s*\d+' # samples count (ignored)
+ r'\s*\S+' # command (ignored)
+ r'\s*\S+' # shared_object (ignored)
+ r'\s*\[.\]' # location (ignored)
+ r'\s*(\S.+)' # function
)
stripped_lines = (l.strip() for l in report_data)
@@ -511,17 +529,23 @@ def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name,
# Split out so that testing (specifically: mocking) is easier
def _ExperimentToKeyvals(experiment, for_json_report):
"""Converts an experiment to keyvals."""
- return OrganizeResults(experiment.benchmark_runs, experiment.labels,
- json_report=for_json_report)
+ return OrganizeResults(
+ experiment.benchmark_runs, experiment.labels, json_report=for_json_report)
class BenchmarkResults(object):
"""The minimum set of fields that any ResultsReport will take."""
- def __init__(self, label_names, benchmark_names_and_iterations, run_keyvals,
+
+ def __init__(self,
+ label_names,
+ benchmark_names_and_iterations,
+ run_keyvals,
read_perf_report=None):
if read_perf_report is None:
+
def _NoPerfReport(*_args, **_kwargs):
return {}
+
read_perf_report = _NoPerfReport
self.label_names = label_names
@@ -557,10 +581,15 @@ def _Unlist(l):
"""If l is a list, extracts the first element of l. Otherwise, returns l."""
return l[0] if isinstance(l, list) else l
+
class JSONResultsReport(ResultsReport):
"""Class that generates JSON reports for experiments."""
- def __init__(self, benchmark_results, date=None, time=None, experiment=None,
+ def __init__(self,
+ benchmark_results,
+ date=None,
+ time=None,
+ experiment=None,
json_args=None):
"""Construct a JSONResultsReport.
@@ -589,8 +618,8 @@ class JSONResultsReport(ResultsReport):
@staticmethod
def FromExperiment(experiment, date=None, time=None, json_args=None):
- benchmark_results = BenchmarkResults.FromExperiment(experiment,
- for_json_report=True)
+ benchmark_results = BenchmarkResults.FromExperiment(
+ experiment, for_json_report=True)
return JSONResultsReport(benchmark_results, date, time, experiment,
json_args)
diff --git a/crosperf/results_report_templates.py b/crosperf/results_report_templates.py
index 827649fd..15ce5827 100644
--- a/crosperf/results_report_templates.py
+++ b/crosperf/results_report_templates.py
@@ -14,6 +14,7 @@ _TabMenuTemplate = Template("""
<a href="javascript:switchTab('$table_name', 'tsv')">TSV</a>
</div>""")
+
def _GetTabMenuHTML(table_name):
# N.B. cgi.escape does some very basic HTML escaping. Nothing more.
escaped = cgi.escape(table_name, quote=True)
@@ -28,10 +29,11 @@ _ExperimentFileHTML = """
</div>
"""
+
def _GetExperimentFileHTML(experiment_file_text):
if not experiment_file_text:
return ''
- return _ExperimentFileHTML % (cgi.escape(experiment_file_text), )
+ return _ExperimentFileHTML % (cgi.escape(experiment_file_text),)
_ResultsSectionHTML = Template("""
@@ -46,16 +48,17 @@ _ResultsSectionHTML = Template("""
</div>
""")
+
def _GetResultsSectionHTML(print_table, table_name, data):
first_word = table_name.strip().split()[0]
short_name = first_word.lower()
- return _ResultsSectionHTML.substitute(sect_name=table_name,
- html_table=print_table(data, 'HTML'),
- text_table=print_table(data, 'PLAIN'),
- tsv_table=print_table(data, 'TSV'),
- tab_menu=_GetTabMenuHTML(short_name),
- short_name=short_name)
-
+ return _ResultsSectionHTML.substitute(
+ sect_name=table_name,
+ html_table=print_table(data, 'HTML'),
+ text_table=print_table(data, 'PLAIN'),
+ tsv_table=print_table(data, 'TSV'),
+ tab_menu=_GetTabMenuHTML(short_name),
+ short_name=short_name)
_MainHTML = Template("""
@@ -166,6 +169,7 @@ _MainHTML = Template("""
</html>
""")
+
# It's a bit ugly that we take some HTML things, and some non-HTML things, but I
# need to balance prettiness with time spent making things pretty.
def GenerateHTMLPage(perf_table, chart_js, summary_table, print_table,
@@ -189,8 +193,11 @@ def GenerateHTMLPage(perf_table, chart_js, summary_table, print_table,
full_table_html = _GetResultsSectionHTML(print_table, 'Full Table',
full_table)
experiment_file_html = _GetExperimentFileHTML(experiment_file)
- return _MainHTML.substitute(perf_init=perf_init, chart_js=chart_js,
- summary_table=summary_table_html,
- perf_html=perf_html, chart_divs=chart_divs,
- full_table=full_table_html,
- experiment_file=experiment_file_html)
+ return _MainHTML.substitute(
+ perf_init=perf_init,
+ chart_js=chart_js,
+ summary_table=summary_table_html,
+ perf_html=perf_html,
+ chart_divs=chart_divs,
+ full_table=full_table_html,
+ experiment_file=experiment_file_html)
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index ed5c74fa..2a23aa78 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -3,7 +3,6 @@
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Unittest for the results reporter."""
from __future__ import division
@@ -50,10 +49,11 @@ class FreeFunctionsTest(unittest.TestCase):
'/chromiumos_test_image.bin'
buildbot_img = buildbot_case.split('/chroot/tmp')[1]
- self.assertEqual(ParseChromeosImage(buildbot_case),
- ('R02-1.0', buildbot_img))
- self.assertEqual(ParseChromeosImage(os.path.dirname(buildbot_case)),
- ('', os.path.dirname(buildbot_img)))
+ self.assertEqual(
+ ParseChromeosImage(buildbot_case), ('R02-1.0', buildbot_img))
+ self.assertEqual(
+ ParseChromeosImage(os.path.dirname(buildbot_case)),
+ ('', os.path.dirname(buildbot_img)))
# Ensure we don't act completely insanely given a few mildly insane paths.
fun_case = '/chromiumos_test_image.bin'
@@ -66,6 +66,8 @@ class FreeFunctionsTest(unittest.TestCase):
# There are many ways for this to be done better, but the linter complains
# about all of them (that I can think of, at least).
_fake_path_number = [0]
+
+
def FakePath(ext):
"""Makes a unique path that shouldn't exist on the host system.
@@ -73,7 +75,7 @@ def FakePath(ext):
error message, it may be easier to track it to its source.
"""
_fake_path_number[0] += 1
- prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0], )
+ prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0],)
return os.path.join(prefix, ext)
@@ -121,14 +123,15 @@ def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
share_cache = ''
locks_dir = ''
log = logger.GetLogger()
- machine_manager = MockMachineManager(FakePath('chromeos_root'), 0,
- log_level, locks_dir)
+ machine_manager = MockMachineManager(
+ FakePath('chromeos_root'), 0, log_level, locks_dir)
machine_manager.AddMachine('testing_machine')
machine = next(m for m in machine_manager.GetMachines()
if m.name == 'testing_machine')
for label in experiment.labels:
+
def MakeSuccessfulRun(n):
- run = MockBenchmarkRun('mock_success%d' % (n, ), bench, label,
+ run = MockBenchmarkRun('mock_success%d' % (n,), bench, label,
1 + n + num_runs, cache_conditions,
machine_manager, log, log_level, share_cache)
mock_result = MockResult(log, label, log_level, machine)
@@ -136,8 +139,8 @@ def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
run.result = mock_result
return run
- experiment.benchmark_runs.extend(MakeSuccessfulRun(n)
- for n in xrange(how_many))
+ experiment.benchmark_runs.extend(
+ MakeSuccessfulRun(n) for n in xrange(how_many))
return experiment
@@ -160,7 +163,6 @@ class TextResultsReportTest(unittest.TestCase):
self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
return text_report
-
def testOutput(self):
email_report = self._checkReport(email=True)
text_report = self._checkReport(email=False)
@@ -177,12 +179,10 @@ class HTMLResultsReportTest(unittest.TestCase):
things are displayed. It just cares that they're present.
"""
- _TestOutput = collections.namedtuple('TestOutput', ['summary_table',
- 'perf_html',
- 'chart_js',
- 'charts',
- 'full_table',
- 'experiment_file'])
+ _TestOutput = collections.namedtuple('TestOutput', [
+ 'summary_table', 'perf_html', 'chart_js', 'charts', 'full_table',
+ 'experiment_file'
+ ])
@staticmethod
def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
@@ -192,12 +192,13 @@ class HTMLResultsReportTest(unittest.TestCase):
summary_table = print_table(summary_table, 'HTML')
perf_html = print_table(perf_table, 'HTML')
full_table = print_table(full_table, 'HTML')
- return HTMLResultsReportTest._TestOutput(summary_table=summary_table,
- perf_html=perf_html,
- chart_js=chart_js,
- charts=chart_divs,
- full_table=full_table,
- experiment_file=experiment_file)
+ return HTMLResultsReportTest._TestOutput(
+ summary_table=summary_table,
+ perf_html=perf_html,
+ chart_js=chart_js,
+ charts=chart_divs,
+ full_table=full_table,
+ experiment_file=experiment_file)
def _GetOutput(self, experiment=None, benchmark_results=None):
with mock.patch('results_report_templates.GenerateHTMLPage') as standin:
@@ -222,8 +223,8 @@ class HTMLResultsReportTest(unittest.TestCase):
def testSuccessfulOutput(self):
num_success = 2
success_keyvals = {'retval': 0, 'a_float': 3.96}
- output = self._GetOutput(_InjectSuccesses(MakeMockExperiment(), num_success,
- success_keyvals))
+ output = self._GetOutput(
+ _InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals))
self.assertNotIn('no result', output.summary_table)
#self.assertIn(success_keyvals['machine'], output.summary_table)
@@ -321,8 +322,17 @@ class JSONResultsReportTest(unittest.TestCase):
benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2),
('bench3', 1), ('bench4', 0)]
benchmark_keyvals = {
- 'bench1': [[{'retval': 1, 'foo': 2.0}]],
- 'bench2': [[{'retval': 1, 'foo': 4.0}, {'retval': -1, 'bar': 999}]],
+ 'bench1': [[{
+ 'retval': 1,
+ 'foo': 2.0
+ }]],
+ 'bench2': [[{
+ 'retval': 1,
+ 'foo': 4.0
+ }, {
+ 'retval': -1,
+ 'bar': 999
+ }]],
# lack of retval is considered a failure.
'bench3': [[{}]],
'bench4': [[]]
@@ -341,8 +351,8 @@ class JSONResultsReportTest(unittest.TestCase):
benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]}
bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
benchmark_keyvals)
- reporter = JSONResultsReport(bench_results,
- json_args={'separators': separators})
+ reporter = JSONResultsReport(
+ bench_results, json_args={'separators': separators})
result_str = reporter.GetReport()
self.assertIn(separators[0], result_str)
self.assertIn(separators[1], result_str)
@@ -351,8 +361,17 @@ class JSONResultsReportTest(unittest.TestCase):
labels = ['label1']
benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)]
benchmark_keyvals = {
- 'bench1': [[{'retval': 0, 'foo': 2.0}]],
- 'bench2': [[{'retval': 0, 'foo': 4.0}, {'retval': 0, 'bar': 999}]]
+ 'bench1': [[{
+ 'retval': 0,
+ 'foo': 2.0
+ }]],
+ 'bench2': [[{
+ 'retval': 0,
+ 'foo': 4.0
+ }, {
+ 'retval': 0,
+ 'bar': 999
+ }]]
}
bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
benchmark_keyvals)
@@ -374,6 +393,7 @@ class JSONResultsReportTest(unittest.TestCase):
class PerfReportParserTest(unittest.TestCase):
"""Tests for the perf report parser in results_report."""
+
@staticmethod
def _ReadRealPerfReport():
my_dir = os.path.dirname(os.path.realpath(__file__))
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index 90fe83a3..e661f307 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -3,7 +3,6 @@
# found in the LICENSE file.
"""Module to optimize the scheduling of benchmark_run tasks."""
-
from __future__ import print_function
import sys
@@ -48,7 +47,7 @@ class DutWorker(Thread):
"""Do the "run-test->(optionally reimage)->run-test" chore.
Note - 'br' below means 'benchmark_run'.
- """
+ """
# Firstly, handle benchmarkruns that have cache hit.
br = self._sched.get_cached_benchmark_run()
@@ -93,12 +92,12 @@ class DutWorker(Thread):
def _reimage(self, label):
"""Reimage image to label.
- Args:
- label: the label to remimage onto dut.
+ Args:
+ label: the label to remimage onto dut.
- Returns:
- 0 if successful, otherwise 1.
- """
+ Returns:
+ 0 if successful, otherwise 1.
+ """
# Termination could happen anywhere, check it.
if self._terminated:
@@ -111,8 +110,7 @@ class DutWorker(Thread):
# Note, only 1 reimage at any given time, this is guaranteed in
# ImageMachine, so no sync needed below.
retval = self._sched.get_experiment().machine_manager.ImageMachine(
- self._dut,
- label)
+ self._dut, label)
if retval:
return 1
@@ -126,7 +124,7 @@ class DutWorker(Thread):
"""Execute a single benchmark_run.
Note - this function never throws exceptions.
- """
+ """
# Termination could happen anywhere, check it.
if self._terminated:
@@ -152,7 +150,7 @@ class DutWorker(Thread):
If such match is found, we just skip doing reimage and jump to execute
some benchmark_runs.
- """
+ """
checksum_file = '/usr/local/osimage_checksum_file'
try:
@@ -166,8 +164,8 @@ class DutWorker(Thread):
checksum = checksum.strip()
for l in self._sched.get_labels():
if l.checksum == checksum:
- self._logger.LogOutput("Dut '{}' is pre-installed with '{}'".format(
- self._dut.name, l))
+ self._logger.LogOutput(
+ "Dut '{}' is pre-installed with '{}'".format(self._dut.name, l))
self._dut.label = l
return
except RuntimeError:
@@ -196,7 +194,7 @@ class BenchmarkRunCacheReader(Thread):
On creation, each instance of this class is given a br_list, which is a
subset of experiment._benchmark_runs.
- """
+ """
def __init__(self, schedv2, br_list):
super(BenchmarkRunCacheReader, self).__init__()
@@ -272,7 +270,7 @@ class Schedv2(object):
We do this by firstly creating a few threads, and then assign each
thread a segment of all brs. Each thread will check cache status for
each br and put those with cache into '_cached_br_list'.
- """
+ """
self._cached_br_list = []
n_benchmarkruns = len(self._experiment.benchmark_runs)
@@ -287,16 +285,16 @@ class Schedv2(object):
# a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
n_threads = max(2, min(20, (n_benchmarkruns + 3) / 4))
self._logger.LogOutput(('Starting {} threads to read cache status for '
- '{} benchmark runs ...').format(n_threads,
- n_benchmarkruns))
+ '{} benchmark runs ...').format(
+ n_threads, n_benchmarkruns))
benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) / n_threads
benchmarkrun_segments = []
for i in range(n_threads - 1):
start = i * benchmarkruns_per_thread
end = (i + 1) * benchmarkruns_per_thread
benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end])
- benchmarkrun_segments.append(self._experiment.benchmark_runs[
- (n_threads - 1) * benchmarkruns_per_thread:])
+ benchmarkrun_segments.append(self._experiment.benchmark_runs[(
+ n_threads - 1) * benchmarkruns_per_thread:])
# Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
assert sum(len(x) for x in benchmarkrun_segments) == n_benchmarkruns
@@ -314,9 +312,8 @@ class Schedv2(object):
x.join()
# Summarize.
- self._logger.LogOutput(
- 'Total {} cache hit out of {} benchmark_runs.'.format(
- len(self._cached_br_list), n_benchmarkruns))
+ self._logger.LogOutput('Total {} cache hit out of {} benchmark_runs.'.
+ format(len(self._cached_br_list), n_benchmarkruns))
def get_cached_run_list(self):
return self._cached_br_list
@@ -338,9 +335,9 @@ class Schedv2(object):
def get_cached_benchmark_run(self):
"""Get a benchmark_run with 'cache hit'.
- Returns:
- The benchmark that has cache hit, if any. Otherwise none.
- """
+ Returns:
+ The benchmark that has cache hit, if any. Otherwise none.
+ """
with self.lock_on('_cached_br_list'):
if self._cached_br_list:
@@ -350,14 +347,14 @@ class Schedv2(object):
def get_benchmark_run(self, dut):
"""Get a benchmark_run (br) object for a certain dut.
- Args:
- dut: the dut for which a br is returned.
+ Args:
+ dut: the dut for which a br is returned.
- Returns:
- A br with its label matching that of the dut. If no such br could be
- found, return None (this usually means a reimage is required for the
- dut).
- """
+ Returns:
+ A br with its label matching that of the dut. If no such br could be
+ found, return None (this usually means a reimage is required for the
+ dut).
+ """
# If terminated, stop providing any br.
if self._terminated:
@@ -384,12 +381,12 @@ class Schedv2(object):
The dut_worker calling this method is responsible for reimage the dut to
this label.
- Args:
- dut: the new label that is to be reimaged onto the dut.
+ Args:
+ dut: the new label that is to be reimaged onto the dut.
- Returns:
- The label or None.
- """
+ Returns:
+ The label or None.
+ """
if self._terminated:
return None
@@ -399,9 +396,9 @@ class Schedv2(object):
def dut_worker_finished(self, dut_worker):
"""Notify schedv2 that the dut_worker thread finished.
- Args:
- dut_worker: the thread that is about to end.
- """
+ Args:
+ dut_worker: the thread that is about to end.
+ """
self._logger.LogOutput('{} finished.'.format(dut_worker))
with self._workers_lock:
@@ -418,7 +415,7 @@ class Schedv2(object):
"""Mark flag so we stop providing br/reimages.
Also terminate each DutWorker, so they refuse to execute br or reimage.
- """
+ """
self._terminated = True
for dut_worker in self._active_workers:
diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py
index be0fde4b..250968dc 100755
--- a/crosperf/schedv2_unittest.py
+++ b/crosperf/schedv2_unittest.py
@@ -72,11 +72,10 @@ class Schedv2Test(unittest.TestCase):
"""Create fake experiment from string.
Note - we mock out BenchmarkRun in this step.
- """
+ """
experiment_file = ExperimentFile(StringIO.StringIO(expstr))
- experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory='',
- log_dir='')
+ experiment = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory='', log_dir='')
return experiment
def test_remote(self):
@@ -99,8 +98,8 @@ class Schedv2Test(unittest.TestCase):
return (cm.name != 'chromeos-daisy3.cros' and
cm.name != 'chromeos-daisy5.cros')
- with mock.patch('machine_manager.MockCrosMachine.IsReachable',
- new=MockIsReachable):
+ with mock.patch(
+ 'machine_manager.MockCrosMachine.IsReachable', new=MockIsReachable):
self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
self.assertIn('chromeos-daisy1.cros', self.exp.remote)
self.assertIn('chromeos-daisy2.cros', self.exp.remote)
@@ -119,8 +118,8 @@ class Schedv2Test(unittest.TestCase):
def test_BenchmarkRunCacheReader_1(self, reader):
"""Test benchmarkrun set is split into 5 segments."""
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=9))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=9))
my_schedv2 = Schedv2(self.exp)
self.assertFalse(my_schedv2.is_complete())
# We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4,
@@ -141,8 +140,8 @@ class Schedv2Test(unittest.TestCase):
def test_BenchmarkRunCacheReader_2(self, reader):
"""Test benchmarkrun set is split into 4 segments."""
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=8))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=8))
my_schedv2 = Schedv2(self.exp)
self.assertFalse(my_schedv2.is_complete())
# We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs.
@@ -156,8 +155,8 @@ class Schedv2Test(unittest.TestCase):
def test_BenchmarkRunCacheReader_3(self, reader):
"""Test benchmarkrun set is split into 2 segments."""
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=3))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=3))
my_schedv2 = Schedv2(self.exp)
self.assertFalse(my_schedv2.is_complete())
# We have 3 * 2 == 6 brs, we use 2 threads.
@@ -169,8 +168,8 @@ class Schedv2Test(unittest.TestCase):
def test_BenchmarkRunCacheReader_4(self, reader):
"""Test benchmarkrun set is not splitted."""
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=1))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=1))
my_schedv2 = Schedv2(self.exp)
self.assertFalse(my_schedv2.is_complete())
# We have 1 * 2 == 2 br, so only 1 instance.
@@ -183,18 +182,17 @@ class Schedv2Test(unittest.TestCase):
def MockReadCache(br):
br.cache_hit = (br.label.name == 'image2')
- with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
- new=MockReadCache):
+ with mock.patch(
+ 'benchmark_run.MockBenchmarkRun.ReadCache', new=MockReadCache):
# We have 2 * 30 brs, half of which are put into _cached_br_list.
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=30))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
my_schedv2 = Schedv2(self.exp)
self.assertEquals(len(my_schedv2.get_cached_run_list()), 30)
# The non-cache-hit brs are put into Schedv2._label_brl_map.
self.assertEquals(
reduce(lambda a, x: a + len(x[1]),
- my_schedv2.get_label_map().iteritems(),
- 0), 30)
+ my_schedv2.get_label_map().iteritems(), 0), 30)
def test_nocachehit(self):
"""Test no cache-hit."""
@@ -202,18 +200,17 @@ class Schedv2Test(unittest.TestCase):
def MockReadCache(br):
br.cache_hit = False
- with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
- new=MockReadCache):
+ with mock.patch(
+ 'benchmark_run.MockBenchmarkRun.ReadCache', new=MockReadCache):
# We have 2 * 30 brs, none of which are put into _cached_br_list.
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
- kraken_iterations=30))
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
my_schedv2 = Schedv2(self.exp)
self.assertEquals(len(my_schedv2.get_cached_run_list()), 0)
# The non-cache-hit brs are put into Schedv2._label_brl_map.
self.assertEquals(
reduce(lambda a, x: a + len(x[1]),
- my_schedv2.get_label_map().iteritems(),
- 0), 60)
+ my_schedv2.get_label_map().iteritems(), 0), 60)
if __name__ == '__main__':
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 05e3fbbb..efbb534f 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -70,8 +70,8 @@ class LabelSettings(Settings):
'autotest_path',
required=False,
description='Autotest directory path relative to chroot which '
- 'has autotest files for the image to run tests requiring autotest files'
- ))
+ 'has autotest files for the image to run tests requiring autotest '
+ 'files.'))
self.AddField(
TextField(
'chromeos_root',
@@ -260,8 +260,7 @@ class GlobalSettings(Settings):
'you want to use. It accepts multiple directories '
'separated by a ",".'))
self.AddField(
- TextField(
- 'results_dir', default='', description='The results dir.'))
+ TextField('results_dir', default='', description='The results dir.'))
self.AddField(
TextField(
'locks_dir',
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index b4726d34..1ff6a133 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -3,7 +3,6 @@
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Unittest for crosperf."""
from __future__ import print_function
@@ -80,18 +79,18 @@ class SettingsFactoryTest(unittest.TestCase):
self.assertRaises(Exception, settings_factory.SettingsFactory.GetSettings,
'global', 'bad_type')
- l_settings = settings_factory.SettingsFactory().GetSettings('label',
- 'label')
+ l_settings = settings_factory.SettingsFactory().GetSettings(
+ 'label', 'label')
self.assertIsInstance(l_settings, settings_factory.LabelSettings)
self.assertEqual(len(l_settings.fields), 9)
- b_settings = settings_factory.SettingsFactory().GetSettings('benchmark',
- 'benchmark')
+ b_settings = settings_factory.SettingsFactory().GetSettings(
+ 'benchmark', 'benchmark')
self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings)
self.assertEqual(len(b_settings.fields), 6)
- g_settings = settings_factory.SettingsFactory().GetSettings('global',
- 'global')
+ g_settings = settings_factory.SettingsFactory().GetSettings(
+ 'global', 'global')
self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
self.assertEqual(len(g_settings.fields), 25)
diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py
index f1062f0d..fea55c05 100755
--- a/crosperf/settings_unittest.py
+++ b/crosperf/settings_unittest.py
@@ -48,14 +48,12 @@ class TestSettings(unittest.TestCase):
'run the test.'))
self.assertEqual(len(self.settings.fields), 1)
# Adding the same field twice raises an exception.
- self.assertRaises(
- Exception,
- self.settings.AddField, (IntegerField(
- 'iterations',
- default=1,
- required=False,
- description='Number of iterations to run '
- 'the test.')))
+ self.assertRaises(Exception, self.settings.AddField, (IntegerField(
+ 'iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to run '
+ 'the test.')))
res = self.settings.fields['iterations']
self.assertIsInstance(res, IntegerField)
self.assertEqual(res.Get(), 1)
@@ -116,10 +114,10 @@ class TestSettings(unittest.TestCase):
self.assertEqual(res, 5)
def test_inherit(self):
- parent_settings = settings_factory.SettingsFactory().GetSettings('global',
- 'global')
- label_settings = settings_factory.SettingsFactory().GetSettings('label',
- 'label')
+ parent_settings = settings_factory.SettingsFactory().GetSettings(
+ 'global', 'global')
+ label_settings = settings_factory.SettingsFactory().GetSettings(
+ 'label', 'label')
self.assertEqual(parent_settings.GetField('chromeos_root'), '')
self.assertEqual(label_settings.GetField('chromeos_root'), '')
self.assertIsNone(label_settings.parent)
@@ -140,8 +138,8 @@ class TestSettings(unittest.TestCase):
'list of email addresses to send '
'email to.'))
- global_settings = settings_factory.SettingsFactory().GetSettings('global',
- 'global')
+ global_settings = settings_factory.SettingsFactory().GetSettings(
+ 'global', 'global')
global_settings.SetField('email', 'john.doe@google.com', append=True)
global_settings.SetField('email', 'jane.smith@google.com', append=True)
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 66d8109f..bd27f282 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -78,8 +78,8 @@ class SuiteRunner(object):
(benchmark.name, i))
break
else:
- self.logger.LogOutput('benchmark %s succeded on first try' %
- benchmark.name)
+ self.logger.LogOutput(
+ 'benchmark %s succeded on first try' % benchmark.name)
break
return ret_tup
@@ -98,6 +98,7 @@ class SuiteRunner(object):
# Uncomment rest of lines to enable setting frequency by crosperf
#'val=0; '
#'if [[ -e scaling_available_frequencies ]]; then '
+ # pylint: disable=line-too-long
#' val=`cat scaling_available_frequencies | tr " " "\\n" | sort -n -b -r`; '
#'else '
#' val=`cat scaling_max_freq | tr " " "\\n" | sort -n -b -r`; fi ; '
@@ -112,12 +113,12 @@ class SuiteRunner(object):
)
# pyformat: enable
if self.log_level == 'average':
- self.logger.LogOutput('Pinning governor execution frequencies for %s' %
- machine_name)
+ self.logger.LogOutput(
+ 'Pinning governor execution frequencies for %s' % machine_name)
ret = self._ce.CrosRunCommand(
set_cpu_freq, machine=machine_name, chromeos_root=chromeos_root)
- self.logger.LogFatalIf(ret, 'Could not pin frequencies on machine: %s' %
- machine_name)
+ self.logger.LogFatalIf(
+ ret, 'Could not pin frequencies on machine: %s' % machine_name)
def DecreaseWaitTime(self, machine_name, chromeos_root):
"""Change the ten seconds wait time for pagecycler to two seconds."""
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index 4b87f9c7..78bdfbdf 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -327,11 +327,12 @@ class SuiteRunnerTest(unittest.TestCase):
self.telemetry_bench, '')
self.assertEqual(res, 0)
self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0], (
- ('cd src/tools/perf && ./run_measurement '
- '--browser=cros-chrome --output-format=csv '
- '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
- '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0],
+ (('cd src/tools/perf && ./run_measurement '
+ '--browser=cros-chrome --output-format=csv '
+ '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
+ '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
self.real_logger.LogMsg = save_log_msg
diff --git a/file_lock_machine.py b/file_lock_machine.py
index 9b1d3367..8493b082 100755
--- a/file_lock_machine.py
+++ b/file_lock_machine.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Script to lock/unlock machines."""
@@ -79,9 +79,11 @@ class LockDescription(object):
return self.counter or self.exclusive
def __str__(self):
- return ' '.join(['Owner: %s' % self.owner, 'Exclusive: %s' % self.exclusive,
- 'Counter: %s' % self.counter, 'Time: %s' % self.time,
- 'Reason: %s' % self.reason, 'Auto: %s' % self.auto])
+ return ' '.join([
+ 'Owner: %s' % self.owner, 'Exclusive: %s' % self.exclusive,
+ 'Counter: %s' % self.counter, 'Time: %s' % self.time,
+ 'Reason: %s' % self.reason, 'Auto: %s' % self.auto
+ ])
class FileLock(object):
@@ -120,9 +122,8 @@ class FileLock(object):
(os.path.basename(file_lock.getFilePath),
file_lock.getDescription().owner,
file_lock.getDescription().exclusive,
- file_lock.getDescription().counter,
- elapsed_time, file_lock.getDescription().reason,
- file_lock.getDescription().auto))
+ file_lock.getDescription().counter, elapsed_time,
+ file_lock.getDescription().reason, file_lock.getDescription().auto))
table = '\n'.join(lock_strings)
return '\n'.join([header, table])
@@ -199,8 +200,8 @@ class Lock(object):
with FileLock(self._lock_file) as lock:
if lock.exclusive:
self._logger.LogError(
- 'Exclusive lock already acquired by %s. Reason: %s' %
- (lock.owner, lock.reason))
+ 'Exclusive lock already acquired by %s. Reason: %s' % (lock.owner,
+ lock.reason))
return False
if exclusive:
@@ -245,9 +246,10 @@ class Lock(object):
lock.owner = ''
if self._auto:
- del_list = [i
- for i in FileLock.FILE_OPS
- if i.name == FileCheckName(self._lock_file)]
+ del_list = [
+ i for i in FileLock.FILE_OPS
+ if i.name == FileCheckName(self._lock_file)
+ ]
for i in del_list:
FileLock.FILE_OPS.remove(i)
for f in del_list:
@@ -287,8 +289,7 @@ class Machine(object):
if locked or not timeout >= 0:
break
print('Lock not acquired for {0}, wait {1} seconds ...'.format(
- self._name,
- sleep))
+ self._name, sleep))
time.sleep(sleep)
timeout -= sleep
return locked
@@ -302,41 +303,43 @@ def Main(argv):
"""The main function."""
parser = argparse.ArgumentParser()
- parser.add_argument('-r',
- '--reason',
- dest='reason',
- default='',
- help='The lock reason.')
- parser.add_argument('-u',
- '--unlock',
- dest='unlock',
- action='store_true',
- default=False,
- help='Use this to unlock.')
- parser.add_argument('-l',
- '--list_locks',
- dest='list_locks',
- action='store_true',
- default=False,
- help='Use this to list locks.')
- parser.add_argument('-f',
- '--ignore_ownership',
- dest='ignore_ownership',
- action='store_true',
- default=False,
- help="Use this to force unlock on a lock you don't own.")
- parser.add_argument('-s',
- '--shared',
- dest='shared',
- action='store_true',
- default=False,
- help='Use this for a shared (non-exclusive) lock.')
- parser.add_argument('-d',
- '--dir',
- dest='locks_dir',
- action='store',
- default=Machine.LOCKS_DIR,
- help='Use this to set different locks_dir')
+ parser.add_argument(
+ '-r', '--reason', dest='reason', default='', help='The lock reason.')
+ parser.add_argument(
+ '-u',
+ '--unlock',
+ dest='unlock',
+ action='store_true',
+ default=False,
+ help='Use this to unlock.')
+ parser.add_argument(
+ '-l',
+ '--list_locks',
+ dest='list_locks',
+ action='store_true',
+ default=False,
+ help='Use this to list locks.')
+ parser.add_argument(
+ '-f',
+ '--ignore_ownership',
+ dest='ignore_ownership',
+ action='store_true',
+ default=False,
+ help="Use this to force unlock on a lock you don't own.")
+ parser.add_argument(
+ '-s',
+ '--shared',
+ dest='shared',
+ action='store_true',
+ default=False,
+ help='Use this for a shared (non-exclusive) lock.')
+ parser.add_argument(
+ '-d',
+ '--dir',
+ dest='locks_dir',
+ action='store',
+ default=Machine.LOCKS_DIR,
+ help='Use this to set different locks_dir')
parser.add_argument('args', nargs='*', help='Machine arg.')
options = parser.parse_args(argv)
diff --git a/generate-waterfall-reports.py b/generate-waterfall-reports.py
index ed8e3696..9fbb5637 100755
--- a/generate-waterfall-reports.py
+++ b/generate-waterfall-reports.py
@@ -32,13 +32,10 @@ import time
from cros_utils import command_executer
# All the test suites whose data we might want for the reports.
-TESTS = (
- ('bvt-inline', 'HWTest'),
- ('bvt-cq', 'HWTest'),
- ('toolchain-tests', 'HWTest'),
- ('security', 'HWTest'),
- ('kernel_daily_regression', 'HWTest'),
- ('kernel_daily_benchmarks', 'HWTest'),)
+TESTS = (('bvt-inline', 'HWTest'), ('bvt-cq', 'HWTest'),
+ ('toolchain-tests', 'HWTest'), ('security', 'HWTest'),
+ ('kernel_daily_regression', 'HWTest'), ('kernel_daily_benchmarks',
+ 'HWTest'),)
# The main waterfall builders, IN THE ORDER IN WHICH WE WANT THEM
# LISTED IN THE REPORT.
@@ -127,8 +124,8 @@ def PruneOldFailures(failure_dict, int_date):
def GetBuildID(build_bot, date):
"""Get the build id for a build_bot at a given date."""
- day = '{day:02d}'.format(day=date%100)
- mon = MONTHS[date/100%100]
+ day = '{day:02d}'.format(day=date % 100)
+ mon = MONTHS[date / 100 % 100]
date_string = mon + ' ' + day
if build_bot in WATERFALL_BUILDERS:
url = 'https://uberchromegw.corp.google.com/i/chromeos/' + \
@@ -136,7 +133,7 @@ def GetBuildID(build_bot, date):
if build_bot in ROTATING_BUILDERS:
url = 'https://uberchromegw.corp.google.com/i/chromiumos.tryserver/' + \
'builders/%s?numbuilds=200' % build_bot
- command = 'sso_client %s' %url
+ command = 'sso_client %s' % url
retval = 1
retry_time = 3
while retval and retry_time:
@@ -275,15 +272,14 @@ def GenerateWaterfallReport(report_dict, fail_dict, waterfall_type, date,
' %6s %6s %6s %6s\n' %
(inline_color, cq_color, toolchain_color,
security_color, regression_color, bench_color))
- out_file.write('%25s %3s %s %s %s %s %s %s\n' % (builder, status,
- inline, cq,
- toolchain, security,
- regression, bench))
+ out_file.write('%25s %3s %s %s %s %s %s %s\n' %
+ (builder, status, inline, cq, toolchain, security,
+ regression, bench))
else:
out_file.write(' %6s %6s'
- ' %6s %6s\n' % (inline_color, cq_color,
- toolchain_color,
- security_color))
+ ' %6s %6s\n' %
+ (inline_color, cq_color, toolchain_color,
+ security_color))
out_file.write('%25s %3s %s %s %s %s\n' % (builder, status, inline,
cq, toolchain, security))
else:
@@ -372,8 +368,9 @@ def UpdateReport(report_dict, builder, test, report_date, build_link,
build_dict['date'] = report_date
if 'board' in build_dict and build_dict['board'] != board:
- raise RuntimeError('Error: Two different boards (%s,%s) in one build (%s)!'
- % (board, build_dict['board'], build_link))
+ raise RuntimeError(
+ 'Error: Two different boards (%s,%s) in one build (%s)!' %
+ (board, build_dict['board'], build_link))
build_dict['board'] = board
color_key = '%s-color' % test
@@ -819,9 +816,8 @@ def Main(argv):
EmailReport(main_report, 'Main', format_date(int_date))
shutil.copy(main_report, ARCHIVE_DIR)
if rotating_report_dict and not main_only and not failures_report:
- rotating_report = GenerateWaterfallReport(rotating_report_dict,
- failure_dict, 'rotating',
- int_date, omit_failures)
+ rotating_report = GenerateWaterfallReport(
+ rotating_report_dict, failure_dict, 'rotating', int_date, omit_failures)
EmailReport(rotating_report, 'Rotating', format_date(int_date))
shutil.copy(rotating_report, ARCHIVE_DIR)
diff --git a/get_common_image_version.py b/get_common_image_version.py
index da36b98f..4bb6949f 100755
--- a/get_common_image_version.py
+++ b/get_common_image_version.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2013 Google Inc. All Rights Reserved.
"""Script to find list of common images (first beta releases) in Chromeos.
@@ -63,11 +63,12 @@ def Main(argv):
"""Get ChromeOS first betas list from history URL."""
parser = argparse.ArgumentParser()
- parser.add_argument('--serialize',
- dest='serialize',
- default=None,
- help='Save list of common images into the specified '
- 'file.')
+ parser.add_argument(
+ '--serialize',
+ dest='serialize',
+ default=None,
+ help='Save list of common images into the specified '
+ 'file.')
options = parser.parse_args(argv)
try:
diff --git a/heat_map.py b/heat_map.py
index ae234b51..39e3f8fd 100755
--- a/heat_map.py
+++ b/heat_map.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -15,6 +15,7 @@ from sets import Set
from cros_utils import command_executer
+
def IsARepoRoot(directory):
"""Returns True if directory is the root of a repo checkout."""
return os.path.exists(os.path.join(directory, '.repo'))
@@ -37,8 +38,8 @@ class HeatMapProducer(object):
self.perf_report = ''
def copyFileToChroot(self):
- self.tempDir = tempfile.mkdtemp(
- prefix=os.path.join(self.chromeos_root, 'src/'))
+ self.tempDir = tempfile.mkdtemp(prefix=os.path.join(self.chromeos_root,
+ 'src/'))
self.temp_perf = os.path.join(self.tempDir, 'perf.data')
shutil.copy2(self.perf_data, self.temp_perf)
self.temp_perf_inchroot = os.path.join('~/trunk/src',
@@ -101,24 +102,25 @@ def main(argv):
"""
parser = argparse.ArgumentParser()
- parser.add_argument('--chromeos_root',
- dest='chromeos_root',
- required=True,
- help='ChromeOS root to use for generate heatmaps.')
- parser.add_argument('--perf_data',
- dest='perf_data',
- required=True,
- help='The raw perf data.')
- parser.add_argument('--binary',
- dest='binary',
- required=False,
- help='The name of the binary.',
- default='chrome')
- parser.add_argument('--page_size',
- dest='page_size',
- required=False,
- help='The page size for heat maps.',
- default=4096)
+ parser.add_argument(
+ '--chromeos_root',
+ dest='chromeos_root',
+ required=True,
+ help='ChromeOS root to use for generate heatmaps.')
+ parser.add_argument(
+ '--perf_data', dest='perf_data', required=True, help='The raw perf data.')
+ parser.add_argument(
+ '--binary',
+ dest='binary',
+ required=False,
+ help='The name of the binary.',
+ default='chrome')
+ parser.add_argument(
+ '--page_size',
+ dest='page_size',
+ required=False,
+ help='The page size for heat maps.',
+ default=4096)
options = parser.parse_args(argv)
if not IsARepoRoot(options.chromeos_root):
diff --git a/image_chromeos.py b/image_chromeos.py
index d95434a7..0ea6d390 100755
--- a/image_chromeos.py
+++ b/image_chromeos.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2011 Google Inc. All Rights Reserved.
"""Script to image a ChromeOS device.
@@ -41,9 +41,8 @@ def CheckForCrosFlash(chromeos_root, remote, log_level):
# Check to see if remote machine has cherrypy, ctypes
command = "python -c 'import cherrypy, ctypes'"
- ret = cmd_executer.CrosRunCommand(command,
- chromeos_root=chromeos_root,
- machine=remote)
+ ret = cmd_executer.CrosRunCommand(
+ command, chromeos_root=chromeos_root, machine=remote)
logger.GetLogger().LogFatalIf(
ret == 255, 'Failed ssh to %s (for checking cherrypy)' % remote)
logger.GetLogger().LogFatalIf(
@@ -55,37 +54,39 @@ def DoImage(argv):
"""Image ChromeOS."""
parser = argparse.ArgumentParser()
- parser.add_argument('-c',
- '--chromeos_root',
- dest='chromeos_root',
- help='Target directory for ChromeOS installation.')
+ parser.add_argument(
+ '-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='Target directory for ChromeOS installation.')
parser.add_argument('-r', '--remote', dest='remote', help='Target device.')
parser.add_argument('-i', '--image', dest='image', help='Image binary file.')
- parser.add_argument('-b',
- '--board',
- dest='board',
- help='Target board override.')
- parser.add_argument('-f',
- '--force',
- dest='force',
- action='store_true',
- default=False,
- help='Force an image even if it is non-test.')
- parser.add_argument('-n',
- '--no_lock',
- dest='no_lock',
- default=False,
- action='store_true',
- help='Do not attempt to lock remote before imaging. '
- 'This option should only be used in cases where the '
- 'exclusive lock has already been acquired (e.g. in '
- 'a script that calls this one).')
- parser.add_argument('-l',
- '--logging_level',
- dest='log_level',
- default='verbose',
- help='Amount of logging to be used. Valid levels are '
- "'quiet', 'average', and 'verbose'.")
+ parser.add_argument(
+ '-b', '--board', dest='board', help='Target board override.')
+ parser.add_argument(
+ '-f',
+ '--force',
+ dest='force',
+ action='store_true',
+ default=False,
+ help='Force an image even if it is non-test.')
+ parser.add_argument(
+ '-n',
+ '--no_lock',
+ dest='no_lock',
+ default=False,
+ action='store_true',
+ help='Do not attempt to lock remote before imaging. '
+ 'This option should only be used in cases where the '
+ 'exclusive lock has already been acquired (e.g. in '
+ 'a script that calls this one).')
+ parser.add_argument(
+ '-l',
+ '--logging_level',
+ dest='log_level',
+ default='verbose',
+ help='Amount of logging to be used. Valid levels are '
+ "'quiet', 'average', and 'verbose'.")
parser.add_argument('-a', '--image_args', dest='image_args')
options = parser.parse_args(argv[1:])
@@ -148,9 +149,7 @@ def DoImage(argv):
command = 'cat ' + checksum_file
ret, device_checksum, _ = cmd_executer.CrosRunCommandWOutput(
- command,
- chromeos_root=options.chromeos_root,
- machine=options.remote)
+ command, chromeos_root=options.chromeos_root, machine=options.remote)
device_checksum = device_checksum.strip()
image_checksum = str(image_checksum)
@@ -159,9 +158,8 @@ def DoImage(argv):
l.LogOutput('Device checksum: ' + device_checksum)
if image_checksum != device_checksum:
- [found, located_image] = LocateOrCopyImage(options.chromeos_root,
- image,
- board=board)
+ [found, located_image] = LocateOrCopyImage(
+ options.chromeos_root, image, board=board)
reimage = True
l.LogOutput('Checksums do not match. Re-imaging...')
@@ -180,9 +178,8 @@ def DoImage(argv):
if reimage:
# If the device has /tmp mounted as noexec, image_to_live.sh can fail.
command = 'mount -o remount,rw,exec /tmp'
- cmd_executer.CrosRunCommand(command,
- chromeos_root=options.chromeos_root,
- machine=options.remote)
+ cmd_executer.CrosRunCommand(
+ command, chromeos_root=options.chromeos_root, machine=options.remote)
real_src_dir = os.path.join(
os.path.realpath(options.chromeos_root), 'src')
@@ -202,8 +199,10 @@ def DoImage(argv):
# Check to see if cros flash will work for the remote machine.
CheckForCrosFlash(options.chromeos_root, options.remote, log_level)
- cros_flash_args = ['cros', 'flash', '--board=%s' % board,
- '--clobber-stateful', options.remote]
+ cros_flash_args = [
+ 'cros', 'flash', '--board=%s' % board, '--clobber-stateful',
+ options.remote
+ ]
if local_image:
cros_flash_args.append(chroot_image)
else:
@@ -220,9 +219,8 @@ def DoImage(argv):
while True:
if log_level == 'quiet':
l.LogOutput('CMD : %s' % command)
- ret = cmd_executer.ChrootRunCommand(options.chromeos_root,
- command,
- command_timeout=1800)
+ ret = cmd_executer.ChrootRunCommand(
+ options.chromeos_root, command, command_timeout=1800)
if ret == 0 or retries >= 2:
break
retries += 1
@@ -255,17 +253,15 @@ def DoImage(argv):
if log_level == 'average':
l.LogOutput('Verifying image.')
command = 'echo %s > %s && chmod -w %s' % (image_checksum,
- checksum_file,
- checksum_file)
+ checksum_file, checksum_file)
ret = cmd_executer.CrosRunCommand(
command,
chromeos_root=options.chromeos_root,
machine=options.remote)
logger.GetLogger().LogFatalIf(ret, 'Writing checksum failed.')
- successfully_imaged = VerifyChromeChecksum(options.chromeos_root,
- image, options.remote,
- log_level)
+ successfully_imaged = VerifyChromeChecksum(options.chromeos_root, image,
+ options.remote, log_level)
logger.GetLogger().LogFatalIf(not successfully_imaged,
'Image verification failed!')
TryRemountPartitionAsRW(options.chromeos_root, options.remote,
@@ -297,8 +293,7 @@ def LocateOrCopyImage(chromeos_root, image, board=None):
images_list = glob.glob(images_glob)
for potential_image in images_list:
if filecmp.cmp(potential_image, image):
- l.LogOutput('Found matching image %s in chromeos_root.' %
- potential_image)
+ l.LogOutput('Found matching image %s in chromeos_root.' % potential_image)
return [True, potential_image]
# We did not find an image. Copy it in the src dir and return the copied
# file.
@@ -321,9 +316,9 @@ def GetImageMountCommand(chromeos_root, image, rootfs_mp, stateful_mp):
'./mount_gpt_image.sh --from=%s --image=%s'
' --safe --read_only'
' --rootfs_mountpt=%s'
- ' --stateful_mountpt=%s' % (chromeos_root, image_dir,
- image_file, rootfs_mp,
- stateful_mp))
+ ' --stateful_mountpt=%s' %
+ (chromeos_root, image_dir, image_file, rootfs_mp,
+ stateful_mp))
return mount_command
@@ -351,12 +346,8 @@ def IsImageModdedForTest(chromeos_root, image, log_level):
lsb_release_file = os.path.join(rootfs_mp, 'etc/lsb-release')
lsb_release_contents = open(lsb_release_file).read()
is_test_image = re.search('test', lsb_release_contents, re.IGNORECASE)
- MountImage(chromeos_root,
- image,
- rootfs_mp,
- stateful_mp,
- log_level,
- unmount=True)
+ MountImage(
+ chromeos_root, image, rootfs_mp, stateful_mp, log_level, unmount=True)
return is_test_image
@@ -365,20 +356,14 @@ def VerifyChromeChecksum(chromeos_root, image, remote, log_level):
rootfs_mp = tempfile.mkdtemp()
stateful_mp = tempfile.mkdtemp()
MountImage(chromeos_root, image, rootfs_mp, stateful_mp, log_level)
- image_chrome_checksum = FileUtils().Md5File('%s/opt/google/chrome/chrome' %
- rootfs_mp,
- log_level=log_level)
- MountImage(chromeos_root,
- image,
- rootfs_mp,
- stateful_mp,
- log_level,
- unmount=True)
+ image_chrome_checksum = FileUtils().Md5File(
+ '%s/opt/google/chrome/chrome' % rootfs_mp, log_level=log_level)
+ MountImage(
+ chromeos_root, image, rootfs_mp, stateful_mp, log_level, unmount=True)
command = 'md5sum /opt/google/chrome/chrome'
- [_, o, _] = cmd_executer.CrosRunCommandWOutput(command,
- chromeos_root=chromeos_root,
- machine=remote)
+ [_, o, _] = cmd_executer.CrosRunCommandWOutput(
+ command, chromeos_root=chromeos_root, machine=remote)
device_chrome_checksum = o.split()[0]
if image_chrome_checksum.strip() == device_chrome_checksum.strip():
return True
@@ -415,12 +400,11 @@ def EnsureMachineUp(chromeos_root, remote, log_level):
while True:
current_time = time.time()
if current_time - start_time > timeout:
- l.LogError('Timeout of %ss reached. Machine still not up. Aborting.' %
- timeout)
+ l.LogError(
+ 'Timeout of %ss reached. Machine still not up. Aborting.' % timeout)
return False
- ret = cmd_executer.CrosRunCommand(command,
- chromeos_root=chromeos_root,
- machine=remote)
+ ret = cmd_executer.CrosRunCommand(
+ command, chromeos_root=chromeos_root, machine=remote)
if not ret:
return True
diff --git a/produce_output.py b/produce_output.py
index 30deea8c..46512c49 100755
--- a/produce_output.py
+++ b/produce_output.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
-"""This simulates a real job by producing a lot of output.
-
-"""
+"""This simulates a real job by producing a lot of output."""
from __future__ import print_function
diff --git a/remote_gcc_build.py b/remote_gcc_build.py
index 52cedfbc..edd0d2b6 100755
--- a/remote_gcc_build.py
+++ b/remote_gcc_build.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -28,6 +28,7 @@ SLEEP_TIME = 600
# pylint: disable=anomalous-backslash-in-string
+
def GetPatchNum(output):
lines = output.splitlines()
line = [l for l in lines if 'googlesource' in l][0]
@@ -137,16 +138,18 @@ def DownloadImage(target, index, dest, version):
print(str(rversion))
# ls_cmd = ("gsutil ls gs://chromeos-image-archive/trybot-{0}/{1}-b{2}"
# .format(target, rversion, index))
- ls_cmd = ('gsutil ls gs://chromeos-image-archive/trybot-{0}/*-b{2}'
- .format(target, index))
+ ls_cmd = ('gsutil ls gs://chromeos-image-archive/trybot-{0}/*-b{2}'.format(
+ target, index))
download_cmd = ('$(which gsutil) cp {0} {1}'.format('{0}', dest))
ce = command_executer.GetCommandExecuter()
_, out, _ = ce.RunCommandWOutput(ls_cmd, print_to_console=True)
lines = out.splitlines()
- download_files = ['autotest.tar', 'chromeos-chrome', 'chromiumos_test_image',
- 'debug.tgz', 'sysroot_chromeos-base_chromeos-chrome.tar.xz']
+ download_files = [
+ 'autotest.tar', 'chromeos-chrome', 'chromiumos_test_image', 'debug.tgz',
+ 'sysroot_chromeos-base_chromeos-chrome.tar.xz'
+ ]
for line in lines:
if any([e in line for e in download_files]):
cmd = download_cmd.format(line)
@@ -163,8 +166,8 @@ def UnpackImage(dest):
'tar xjf {1} -C {0} &&'
'tar xzf {0}/debug.tgz -C {0}/usr/lib/ &&'
'tar xf {0}/autotest.tar -C {0}/usr/local/ &&'
- 'tar xJf {0}/chromiumos_test_image.tar.xz -C {0}'
- .format(dest, chrome_tbz2))
+ 'tar xJf {0}/chromiumos_test_image.tar.xz -C {0}'.format(
+ dest, chrome_tbz2))
ce = command_executer.GetCommandExecuter()
return ce.RunCommand(commands)
@@ -220,8 +223,8 @@ def UploadPatch(source):
"""Up load patch to gerrit, return patch number."""
commands = ('git add -A . &&'
"git commit -m 'test' -m 'BUG=None' -m 'TEST=None' "
- "-m 'hostname={0}' -m 'source={1}'"
- .format(socket.gethostname(), source))
+ "-m 'hostname={0}' -m 'source={1}'".format(
+ socket.gethostname(), source))
ce = command_executer.GetCommandExecuter()
ce.RunCommand(commands)
@@ -335,59 +338,64 @@ def Main(argv):
"""The main function."""
# Common initializations
parser = argparse.ArgumentParser()
- parser.add_argument('-c',
- '--chromeos_root',
- required=True,
- dest='chromeos_root',
- help='The chromeos_root')
- parser.add_argument('-g',
- '--gcc_dir',
- default='',
- dest='gcc_dir',
- help='The gcc dir')
- parser.add_argument('-t',
- '--target',
- required=True,
- dest='target',
- help=('The target to be build, the list is at'
- ' $(chromeos_root)/chromite/buildbot/cbuildbot'
- ' --list -all'))
+ parser.add_argument(
+ '-c',
+ '--chromeos_root',
+ required=True,
+ dest='chromeos_root',
+ help='The chromeos_root')
+ parser.add_argument(
+ '-g', '--gcc_dir', default='', dest='gcc_dir', help='The gcc dir')
+ parser.add_argument(
+ '-t',
+ '--target',
+ required=True,
+ dest='target',
+ help=('The target to be build, the list is at'
+ ' $(chromeos_root)/chromite/buildbot/cbuildbot'
+ ' --list -all'))
parser.add_argument('-l', '--local', action='store_true')
- parser.add_argument('-d',
- '--dest_dir',
- dest='dest_dir',
- help=('The dir to build the whole chromeos if'
- ' --local is set'))
- parser.add_argument('--chrome_version',
- dest='chrome_version',
- default='',
- help='The chrome version to use. '
- 'Default it will use the latest one.')
- parser.add_argument('--chromeos_version',
- dest='chromeos_version',
- default='',
- help=('The chromeos version to use.'
- '(1) A release version in the format: '
- "'\d+\.\d+\.\d+\.\d+.*'"
- "(2) 'latest_lkgm' for the latest lkgm version"))
- parser.add_argument('-r',
- '--replace_sysroot',
- action='store_true',
- help=('Whether or not to replace the build/$board dir'
- 'under the chroot of chromeos_root and copy '
- 'the image to src/build/image/$board/latest.'
- ' Default is False'))
- parser.add_argument('-b',
- '--branch',
- dest='branch',
- default='',
- help=('The branch to run trybot, default is None'))
- parser.add_argument('-p',
- '--patch',
- dest='patch',
- default='',
- help=('The patches to be applied, the patches numbers '
- "be seperated by ','"))
+ parser.add_argument(
+ '-d',
+ '--dest_dir',
+ dest='dest_dir',
+ help=('The dir to build the whole chromeos if'
+ ' --local is set'))
+ parser.add_argument(
+ '--chrome_version',
+ dest='chrome_version',
+ default='',
+ help='The chrome version to use. '
+ 'Default it will use the latest one.')
+ parser.add_argument(
+ '--chromeos_version',
+ dest='chromeos_version',
+ default='',
+ help=('The chromeos version to use.'
+ '(1) A release version in the format: '
+ "'\d+\.\d+\.\d+\.\d+.*'"
+ "(2) 'latest_lkgm' for the latest lkgm version"))
+ parser.add_argument(
+ '-r',
+ '--replace_sysroot',
+ action='store_true',
+ help=('Whether or not to replace the build/$board dir'
+ 'under the chroot of chromeos_root and copy '
+ 'the image to src/build/image/$board/latest.'
+ ' Default is False'))
+ parser.add_argument(
+ '-b',
+ '--branch',
+ dest='branch',
+ default='',
+ help=('The branch to run trybot, default is None'))
+ parser.add_argument(
+ '-p',
+ '--patch',
+ dest='patch',
+ default='',
+ help=('The patches to be applied, the patches numbers '
+ "be seperated by ','"))
script_dir = os.path.dirname(os.path.realpath(__file__))
@@ -400,7 +408,7 @@ def Main(argv):
chromeos_root = misc.CanonicalizePath(args.chromeos_root)
if args.chromeos_version and args.branch:
raise RuntimeError('You can not set chromeos_version and branch at the '
- 'same time.')
+ 'same time.')
manifests = None
if args.branch:
diff --git a/remote_kill_test.py b/remote_kill_test.py
index 71a66907..e0f29d0c 100755
--- a/remote_kill_test.py
+++ b/remote_kill_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Script to wrap test_that script.
@@ -26,14 +26,13 @@ def Usage(parser, message):
def Main(argv):
parser = argparse.ArgumentParser()
- parser.add_argument('-c',
- '--chromeos_root',
- dest='chromeos_root',
- help='ChromeOS root checkout directory')
- parser.add_argument('-r',
- '--remote',
- dest='remote',
- help='Remote chromeos device.')
+ parser.add_argument(
+ '-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='ChromeOS root checkout directory')
+ parser.add_argument(
+ '-r', '--remote', dest='remote', help='Remote chromeos device.')
_ = parser.parse_args(argv)
ce = command_executer.GetCommandExecuter()
diff --git a/remote_test.py b/remote_test.py
index 82f54ed5..62598d5a 100755
--- a/remote_test.py
+++ b/remote_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Script to wrap test_that script.
@@ -26,14 +26,13 @@ def Usage(parser, message):
def Main(argv):
parser = argparse.ArgumentParser()
- parser.add_argument('-c',
- '--chromeos_root',
- dest='chromeos_root',
- help='ChromeOS root checkout directory')
- parser.add_argument('-r',
- '--remote',
- dest='remote',
- help='Remote chromeos device.')
+ parser.add_argument(
+ '-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='ChromeOS root checkout directory')
+ parser.add_argument(
+ '-r', '--remote', dest='remote', help='Remote chromeos device.')
options = parser.parse_args(argv)
if options.chromeos_root is None:
Usage(parser, 'chromeos_root must be given')
@@ -45,50 +44,55 @@ def Main(argv):
command = 'ls -lt /'
ce = command_executer.GetCommandExecuter()
- ce.CrosRunCommand(command,
- chromeos_root=options.chromeos_root,
- machine=options.remote)
+ ce.CrosRunCommand(
+ command, chromeos_root=options.chromeos_root, machine=options.remote)
version_dir_path, script_name = misc.GetRoot(sys.argv[0])
version_dir = misc.GetRoot(version_dir_path)[1]
# Tests to copy directories and files to the chromeos box.
- ce.CopyFiles(version_dir_path,
- '/tmp/' + version_dir,
- dest_machine=options.remote,
- dest_cros=True,
- chromeos_root=options.chromeos_root)
- ce.CopyFiles(version_dir_path,
- '/tmp/' + version_dir + '1',
- dest_machine=options.remote,
- dest_cros=True,
- chromeos_root=options.chromeos_root)
- ce.CopyFiles(sys.argv[0],
- '/tmp/' + script_name,
- recursive=False,
- dest_machine=options.remote,
- dest_cros=True,
- chromeos_root=options.chromeos_root)
- ce.CopyFiles(sys.argv[0],
- '/tmp/' + script_name + '1',
- recursive=False,
- dest_machine=options.remote,
- dest_cros=True,
- chromeos_root=options.chromeos_root)
+ ce.CopyFiles(
+ version_dir_path,
+ '/tmp/' + version_dir,
+ dest_machine=options.remote,
+ dest_cros=True,
+ chromeos_root=options.chromeos_root)
+ ce.CopyFiles(
+ version_dir_path,
+ '/tmp/' + version_dir + '1',
+ dest_machine=options.remote,
+ dest_cros=True,
+ chromeos_root=options.chromeos_root)
+ ce.CopyFiles(
+ sys.argv[0],
+ '/tmp/' + script_name,
+ recursive=False,
+ dest_machine=options.remote,
+ dest_cros=True,
+ chromeos_root=options.chromeos_root)
+ ce.CopyFiles(
+ sys.argv[0],
+ '/tmp/' + script_name + '1',
+ recursive=False,
+ dest_machine=options.remote,
+ dest_cros=True,
+ chromeos_root=options.chromeos_root)
# Test to copy directories and files from the chromeos box.
- ce.CopyFiles('/tmp/' + script_name,
- '/tmp/hello',
- recursive=False,
- src_machine=options.remote,
- src_cros=True,
- chromeos_root=options.chromeos_root)
- ce.CopyFiles('/tmp/' + script_name,
- '/tmp/' + script_name,
- recursive=False,
- src_machine=options.remote,
- src_cros=True,
- chromeos_root=options.chromeos_root)
+ ce.CopyFiles(
+ '/tmp/' + script_name,
+ '/tmp/hello',
+ recursive=False,
+ src_machine=options.remote,
+ src_cros=True,
+ chromeos_root=options.chromeos_root)
+ ce.CopyFiles(
+ '/tmp/' + script_name,
+ '/tmp/' + script_name,
+ recursive=False,
+ src_machine=options.remote,
+ src_cros=True,
+ chromeos_root=options.chromeos_root)
board = ce.CrosLearnBoard(options.chromeos_root, options.remote)
print(board)
return 0
diff --git a/repo_to_repo.py b/repo_to_repo.py
index 3b3b9bc4..91c5d580 100755
--- a/repo_to_repo.py
+++ b/repo_to_repo.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Module for transferring files between various types of repositories."""
@@ -23,6 +23,7 @@ from cros_utils import misc
# pylint: disable=anomalous-backslash-in-string
+
def GetCanonicalMappings(mappings):
canonical_mappings = []
for mapping in mappings:
@@ -101,8 +102,8 @@ class Repo(object):
return self._ce.RunCommand(command)
def __str__(self):
- return '\n'.join(str(s)
- for s in [self.repo_type, self.address, self.mappings])
+ return '\n'.join(
+ str(s) for s in [self.repo_type, self.address, self.mappings])
# Note - this type of repo is used only for "readonly", in other words, this
@@ -130,7 +131,6 @@ class FileRepo(Repo):
class P4Repo(Repo):
"""Class for P4 repositories."""
-
def __init__(self, address, mappings, revision=None):
Repo.__init__(self)
self.repo_type = 'p4'
@@ -143,9 +143,8 @@ class P4Repo(Repo):
client_name += tempfile.mkstemp()[1].replace('/', '-')
mappings = self.mappings
p4view = perforce.View('depot2', GetCanonicalMappings(mappings))
- p4client = perforce.CommandsFactory(self._root_dir,
- p4view,
- name=client_name)
+ p4client = perforce.CommandsFactory(
+ self._root_dir, p4view, name=client_name)
command = p4client.SetupAndDo(p4client.Sync(self.revision))
ret = self._ce.RunCommand(command)
assert ret == 0, 'Could not setup client.'
@@ -225,16 +224,16 @@ class GitRepo(Repo):
def SetupForPush(self):
with misc.WorkingDirectory(self._root_dir):
ret = self._CloneSources()
- logger.GetLogger().LogFatalIf(ret, 'Could not clone git repo %s.' %
- self.address)
+ logger.GetLogger().LogFatalIf(
+ ret, 'Could not clone git repo %s.' % self.address)
command = 'git branch -a | grep -wq %s' % self.branch
ret = self._ce.RunCommand(command)
if ret == 0:
if self.branch != 'master':
- command = ('git branch --track %s remotes/origin/%s' %
- (self.branch, self.branch))
+ command = ('git branch --track %s remotes/origin/%s' % (self.branch,
+ self.branch))
else:
command = 'pwd'
command += '&& git checkout %s' % self.branch
@@ -270,8 +269,8 @@ class GitRepo(Repo):
if self.gerrit:
label = 'somelabel'
command = 'git remote add %s %s' % (label, self.address)
- command += ('&& git push %s %s HEAD:refs/for/master' %
- (push_args, label))
+ command += ('&& git push %s %s HEAD:refs/for/master' % (push_args,
+ label))
else:
command = 'git push -v %s origin %s:%s' % (push_args, self.branch,
self.branch)
@@ -334,11 +333,12 @@ class RepoReader(object):
elif repo_type == 'svn':
repo = SvnRepo(repo_address, repo_mappings)
elif repo_type == 'git':
- repo = GitRepo(repo_address,
- repo_branch,
- mappings=repo_mappings,
- ignores=repo_ignores,
- gerrit=gerrit)
+ repo = GitRepo(
+ repo_address,
+ repo_branch,
+ mappings=repo_mappings,
+ ignores=repo_ignores,
+ gerrit=gerrit)
elif repo_type == 'file':
repo = FileRepo(repo_address)
else:
@@ -349,24 +349,27 @@ class RepoReader(object):
@logger.HandleUncaughtExceptions
def Main(argv):
parser = argparse.ArgumentParser()
- parser.add_argument('-i',
- '--input_file',
- dest='input_file',
- help='The input file that contains repo descriptions.')
-
- parser.add_argument('-n',
- '--dry_run',
- dest='dry_run',
- action='store_true',
- default=False,
- help='Do a dry run of the push.')
-
- parser.add_argument('-F',
- '--message_file',
- dest='message_file',
- default=None,
- help=('Use contents of the log file as the commit '
- 'message.'))
+ parser.add_argument(
+ '-i',
+ '--input_file',
+ dest='input_file',
+ help='The input file that contains repo descriptions.')
+
+ parser.add_argument(
+ '-n',
+ '--dry_run',
+ dest='dry_run',
+ action='store_true',
+ default=False,
+ help='Do a dry run of the push.')
+
+ parser.add_argument(
+ '-F',
+ '--message_file',
+ dest='message_file',
+ default=None,
+ help=('Use contents of the log file as the commit '
+ 'message.'))
options = parser.parse_args(argv)
if not options.input_file:
@@ -401,9 +404,10 @@ def Main(argv):
commit_message = 'Synced repos to: %s' % ','.join(input_revisions)
for output_repo in output_repos:
- ret = output_repo.PushSources(commit_message=commit_message,
- dry_run=options.dry_run,
- message_file=options.message_file)
+ ret = output_repo.PushSources(
+ commit_message=commit_message,
+ dry_run=options.dry_run,
+ message_file=options.message_file)
if ret:
return ret
diff --git a/run_tests.py b/run_tests.py
index c755278e..e1b8ca2f 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Script to wrap run_remote_tests.sh script.
@@ -10,9 +10,9 @@ from __future__ import print_function
__author__ = 'asharif@google.com (Ahmad Sharif)'
-
import sys
+
def Main():
"""The main function."""
print('This script is deprecated. Use crosperf for running tests.')
diff --git a/setup_chromeos.py b/setup_chromeos.py
index b6f9f4df..c81fae92 100755
--- a/setup_chromeos.py
+++ b/setup_chromeos.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -99,13 +99,15 @@ def TimeToCommonVersion(timestamp):
def Main(argv):
"""Checkout the ChromeOS source."""
parser = argparse.ArgumentParser()
- parser.add_argument('--dir',
- dest='directory',
- help='Target directory for ChromeOS installation.')
- parser.add_argument('--version',
- dest='version',
- default='latest_lkgm',
- help="""ChromeOS version. Can be:
+ parser.add_argument(
+ '--dir',
+ dest='directory',
+ help='Target directory for ChromeOS installation.')
+ parser.add_argument(
+ '--version',
+ dest='version',
+ default='latest_lkgm',
+ help="""ChromeOS version. Can be:
(1) A release version in the format: 'X.X.X.X'
(2) 'top' for top of trunk
(3) 'latest_lkgm' for the latest lkgm version
@@ -113,31 +115,32 @@ def Main(argv):
(5) 'latest_common' for the latest team common stable version
(6) 'common' for the team common stable version before timestamp
Default is 'latest_lkgm'.""")
- parser.add_argument('--timestamp',
- dest='timestamp',
- default=None,
- help='Timestamps in epoch format. It will check out the'
- 'latest LKGM or the latest COMMON version of ChromeOS'
- ' before the timestamp. Use in combination with'
- ' --version=latest or --version=common. Use '
- '"date -d <date string> +%s" to find epoch time')
- parser.add_argument('--minilayout',
- dest='minilayout',
- default=False,
- action='store_true',
- help='Whether to checkout the minilayout (smaller '
- 'checkout).')
- parser.add_argument('--jobs',
- '-j',
- dest='jobs',
- help='Number of repo sync threads to use.')
- parser.add_argument('--public',
- '-p',
- dest='public',
- default=False,
- action='store_true',
- help='Use the public checkout instead of the private '
- 'one.')
+ parser.add_argument(
+ '--timestamp',
+ dest='timestamp',
+ default=None,
+ help='Timestamps in epoch format. It will check out the'
+ 'latest LKGM or the latest COMMON version of ChromeOS'
+ ' before the timestamp. Use in combination with'
+ ' --version=latest or --version=common. Use '
+ '"date -d <date string> +%s" to find epoch time')
+ parser.add_argument(
+ '--minilayout',
+ dest='minilayout',
+ default=False,
+ action='store_true',
+ help='Whether to checkout the minilayout (smaller '
+ 'checkout).')
+ parser.add_argument(
+ '--jobs', '-j', dest='jobs', help='Number of repo sync threads to use.')
+ parser.add_argument(
+ '--public',
+ '-p',
+ dest='public',
+ default=False,
+ action='store_true',
+ help='Use the public checkout instead of the private '
+ 'one.')
options = parser.parse_args(argv)
@@ -167,14 +170,10 @@ Default is 'latest_lkgm'.""")
versions_repo = ('https://chromium.googlesource.com/'
'chromiumos/manifest-versions.git')
else:
- manifest_repo = (
- 'https://chrome-internal.googlesource.com/chromeos/'
- 'manifest-internal.git'
- )
- versions_repo = (
- 'https://chrome-internal.googlesource.com/chromeos/'
- 'manifest-versions.git'
- )
+ manifest_repo = ('https://chrome-internal.googlesource.com/chromeos/'
+ 'manifest-internal.git')
+ versions_repo = ('https://chrome-internal.googlesource.com/chromeos/'
+ 'manifest-versions.git')
if version == 'top':
init = 'repo init -u %s' % manifest_repo
@@ -194,8 +193,9 @@ Default is 'latest_lkgm'.""")
manifests = manifest_versions.ManifestVersions()
version = manifests.TimeToVersion(timestamp)
version, manifest = version.split('.', 1)
- logger.GetLogger().LogOutput('found version %s.%s for LKGM at timestamp %s'
- % (version, manifest, timestamp))
+ logger.GetLogger().LogOutput(
+ 'found version %s.%s for LKGM at timestamp %s' % (version, manifest,
+ timestamp))
init = ('repo init -u %s -m paladin/buildspecs/%s/%s.xml' %
(versions_repo, version, manifest))
del manifests
diff --git a/tc_enter_chroot.py b/tc_enter_chroot.py
index 573e5585..d919c96d 100755
--- a/tc_enter_chroot.py
+++ b/tc_enter_chroot.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Script to enter the ChromeOS chroot with mounted sources.
@@ -86,41 +86,47 @@ def Main(argv, return_output=False):
"""The main function."""
parser = argparse.ArgumentParser()
- parser.add_argument('-c',
- '--chromeos_root',
- dest='chromeos_root',
- default='../..',
- help='ChromeOS root checkout directory.')
- parser.add_argument('-t',
- '--toolchain_root',
- dest='toolchain_root',
- help='Toolchain root directory.')
- parser.add_argument('-o',
- '--output',
- dest='output',
- help='Toolchain output directory')
- parser.add_argument('--sudo',
- dest='sudo',
- action='store_true',
- default=False,
- help='Run the command with sudo.')
- parser.add_argument('-r',
- '--third_party',
- dest='third_party',
- help='The third_party directory to mount.')
- parser.add_argument('-m',
- '--other_mounts',
- dest='other_mounts',
- help='Other mount points in the form: '
- 'dir:mounted_dir:options')
- parser.add_argument('-s',
- '--mount-scripts-only',
- dest='mount_scripts_only',
- action='store_true',
- default=False,
- help='Mount only the scripts dir, and not the sources.')
- parser.add_argument('passthrough_argv', nargs='*',
- help='Command to be executed inside the chroot.')
+ parser.add_argument(
+ '-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ default='../..',
+ help='ChromeOS root checkout directory.')
+ parser.add_argument(
+ '-t',
+ '--toolchain_root',
+ dest='toolchain_root',
+ help='Toolchain root directory.')
+ parser.add_argument(
+ '-o', '--output', dest='output', help='Toolchain output directory')
+ parser.add_argument(
+ '--sudo',
+ dest='sudo',
+ action='store_true',
+ default=False,
+ help='Run the command with sudo.')
+ parser.add_argument(
+ '-r',
+ '--third_party',
+ dest='third_party',
+ help='The third_party directory to mount.')
+ parser.add_argument(
+ '-m',
+ '--other_mounts',
+ dest='other_mounts',
+ help='Other mount points in the form: '
+ 'dir:mounted_dir:options')
+ parser.add_argument(
+ '-s',
+ '--mount-scripts-only',
+ dest='mount_scripts_only',
+ action='store_true',
+ default=False,
+ help='Mount only the scripts dir, and not the sources.')
+ parser.add_argument(
+ 'passthrough_argv',
+ nargs='*',
+ help='Command to be executed inside the chroot.')
options = parser.parse_args(argv)
@@ -137,8 +143,10 @@ def Main(argv, return_output=False):
m = 'toolchain_root not specified. Will not mount toolchain dirs.'
logger.GetLogger().LogWarning(m)
else:
- tc_dirs = [options.toolchain_root + '/google_vendor_src_branch/gcc',
- options.toolchain_root + '/google_vendor_src_branch/binutils']
+ tc_dirs = [
+ options.toolchain_root + '/google_vendor_src_branch/gcc',
+ options.toolchain_root + '/google_vendor_src_branch/binutils'
+ ]
for tc_dir in tc_dirs:
if not os.path.exists(tc_dir):
@@ -154,9 +162,9 @@ def Main(argv, return_output=False):
sys.exit(1)
if not os.path.exists(chromeos_root + '/src/scripts/build_packages'):
- logger.GetLogger(
- ).LogError(options.chromeos_root + '/src/scripts/build_packages'
- ' not found!')
+ logger.GetLogger().LogError(options.chromeos_root +
+ '/src/scripts/build_packages'
+ ' not found!')
parser.print_help()
sys.exit(1)
@@ -176,16 +184,16 @@ def Main(argv, return_output=False):
# Add the third_party mount point if it exists
if options.third_party:
third_party_dir = options.third_party
- logger.GetLogger().LogFatalIf(
- not os.path.isdir(third_party_dir),
- '--third_party option is not a valid dir.')
+ logger.GetLogger().LogFatalIf(not os.path.isdir(third_party_dir),
+ '--third_party option is not a valid dir.')
else:
- third_party_dir = os.path.abspath('%s/../../../third_party' %
- os.path.dirname(__file__))
+ third_party_dir = os.path.abspath(
+ '%s/../../../third_party' % os.path.dirname(__file__))
if os.path.isdir(third_party_dir):
- mount_point = MountPoint(third_party_dir, ('%s/%s' % (
- full_mounted_tc_root, os.path.basename(third_party_dir))),
+ mount_point = MountPoint(third_party_dir,
+ ('%s/%s' % (full_mounted_tc_root,
+ os.path.basename(third_party_dir))),
getpass.getuser())
mount_points.append(mount_point)
@@ -195,8 +203,8 @@ def Main(argv, return_output=False):
output = options.toolchain_root + '/output'
if output:
- mount_points.append(MountPoint(output, full_mounted_tc_root + '/output',
- getpass.getuser()))
+ mount_points.append(
+ MountPoint(output, full_mounted_tc_root + '/output', getpass.getuser()))
# Mount the other mount points
mount_points += CreateMountPointsFromString(options.other_mounts,
@@ -235,16 +243,16 @@ def Main(argv, return_output=False):
inner_command = inner_command[3:]
command_file = 'tc_enter_chroot.cmd'
command_file_path = chromeos_root + '/src/scripts/' + command_file
- retv = command_executer.GetCommandExecuter().RunCommand('sudo rm -f ' +
- command_file_path)
+ retv = command_executer.GetCommandExecuter().RunCommand(
+ 'sudo rm -f ' + command_file_path)
if retv != 0:
return retv
f = open(command_file_path, 'w')
f.write(inner_command)
f.close()
logger.GetLogger().LogCmd(inner_command)
- retv = command_executer.GetCommandExecuter().RunCommand('chmod +x ' +
- command_file_path)
+ retv = command_executer.GetCommandExecuter().RunCommand(
+ 'chmod +x ' + command_file_path)
if retv != 0:
return retv
diff --git a/test_gcc_dejagnu.py b/test_gcc_dejagnu.py
index 41304a03..cd2e0cde 100755
--- a/test_gcc_dejagnu.py
+++ b/test_gcc_dejagnu.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Script adapter used by automation client for testing dejagnu.
@@ -41,8 +41,10 @@ class DejagnuAdapter(object):
self._cleanup = cleanup
def SetupChromeOS(self):
- cmd = [setup_chromeos.__file__, '--dir=' + self._chromeos_root,
- '--minilayout', '--jobs=8']
+ cmd = [
+ setup_chromeos.__file__, '--dir=' + self._chromeos_root, '--minilayout',
+ '--jobs=8'
+ ]
ret = setup_chromeos.Main(cmd)
if ret:
raise RuntimeError('Failed to checkout chromeos')
@@ -54,9 +56,8 @@ class DejagnuAdapter(object):
def SetupBoard(self):
cmd = './setup_board --board=' + self._board
- ret = self._cmd_exec.ChrootRunCommand(self._chromeos_root,
- cmd,
- terminated_timeout=4000)
+ ret = self._cmd_exec.ChrootRunCommand(
+ self._chromeos_root, cmd, terminated_timeout=4000)
if ret:
raise RuntimeError('Failed to setup board.')
@@ -73,17 +74,20 @@ class DejagnuAdapter(object):
ret = self._cmd_exec.RunCommand(cmd)
def BuildGCC(self):
- build_gcc_args = [build_tc.__file__, '--board=' + self._board,
- '--chromeos_root=' + self._chromeos_root,
- '--gcc_dir=' + self._gcc_dir]
+ build_gcc_args = [
+ build_tc.__file__, '--board=' + self._board,
+ '--chromeos_root=' + self._chromeos_root, '--gcc_dir=' + self._gcc_dir
+ ]
ret = build_tc.Main(build_gcc_args)
if ret:
raise RuntimeError('Building gcc failed.')
def CheckGCC(self):
- args = [run_dejagnu.__file__, '--board=' + self._board,
- '--chromeos_root=' + self._chromeos_root,
- '--mount=' + self._gcc_dir, '--remote=' + self._remote]
+ args = [
+ run_dejagnu.__file__, '--board=' + self._board,
+ '--chromeos_root=' + self._chromeos_root, '--mount=' + self._gcc_dir,
+ '--remote=' + self._remote
+ ]
if self._cleanup:
args.append('--cleanup=' + self._cleanup)
if self._runtestflags:
@@ -102,9 +106,9 @@ def GetNumNewFailures(input_str):
print(l)
if not start_counting and 'Build results not in the manifest' in l:
start_counting = True
- elif start_counting and l and (
- l.find('UNRESOLVED:') == 0 or l.find('FAIL:') == 0 or
- l.find('XFAIL:') == 0 or l.find('XPASS:') == 0):
+ elif start_counting and l and (l.find('UNRESOLVED:') == 0 or
+ l.find('FAIL:') == 0 or l.find('XFAIL:') == 0
+ or l.find('XPASS:') == 0):
n_failures = n_failures + 1
if not start_counting:
return -1
@@ -146,8 +150,7 @@ def EmailResult(result):
# email exception? Just log it on console.
print('Sending email failed - {0}'
'Subject: {1}'
- 'Text: {2}').format(
- str(e), subject, email_text)
+ 'Text: {2}').format(str(e), subject, email_text)
def ProcessArguments(argv):
@@ -156,35 +159,41 @@ def ProcessArguments(argv):
description=('This script is used by nightly client to test gcc. '
'DO NOT run it unless you know what you are doing.'),
usage='test_gcc_dejagnu.py options')
- parser.add_argument('-b',
- '--board',
- dest='board',
- help=('Required. Specify board type. For example '
- '\'lumpy\' and \'daisy\''))
- parser.add_argument('-r',
- '--remote',
- dest='remote',
- help=('Required. Specify remote board address'))
- parser.add_argument('-g',
- '--gcc_dir',
- dest='gcc_dir',
- default='gcc.live',
- help=('Optional. Specify gcc checkout directory.'))
- parser.add_argument('-c',
- '--chromeos_root',
- dest='chromeos_root',
- default='chromeos.live',
- help=('Optional. Specify chromeos checkout directory.'))
- parser.add_argument('--cleanup',
- dest='cleanup',
- default=None,
- help=('Optional. Do cleanup after the test.'))
- parser.add_argument('--runtestflags',
- dest='runtestflags',
- default=None,
- help=('Optional. Options to RUNTESTFLAGS env var '
- 'while invoking make check. '
- '(Mainly used for testing purpose.)'))
+ parser.add_argument(
+ '-b',
+ '--board',
+ dest='board',
+ help=('Required. Specify board type. For example '
+ '\'lumpy\' and \'daisy\''))
+ parser.add_argument(
+ '-r',
+ '--remote',
+ dest='remote',
+ help=('Required. Specify remote board address'))
+ parser.add_argument(
+ '-g',
+ '--gcc_dir',
+ dest='gcc_dir',
+ default='gcc.live',
+ help=('Optional. Specify gcc checkout directory.'))
+ parser.add_argument(
+ '-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ default='chromeos.live',
+ help=('Optional. Specify chromeos checkout directory.'))
+ parser.add_argument(
+ '--cleanup',
+ dest='cleanup',
+ default=None,
+ help=('Optional. Do cleanup after the test.'))
+ parser.add_argument(
+ '--runtestflags',
+ dest='runtestflags',
+ default=None,
+ help=('Optional. Options to RUNTESTFLAGS env var '
+ 'while invoking make check. '
+ '(Mainly used for testing purpose.)'))
options = parser.parse_args(argv[1:])
diff --git a/test_gdb_dejagnu.py b/test_gdb_dejagnu.py
index 4f44527f..c2a4ba9a 100755
--- a/test_gdb_dejagnu.py
+++ b/test_gdb_dejagnu.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
"""Script adapter used by automation client for testing dejagnu.
This is not intended to be run on command line.
@@ -28,8 +28,10 @@ class DejagnuAdapter(object):
self._cmd_exec = command_executer.GetCommandExecuter()
def SetupChromeOS(self):
- cmd = [setup_chromeos.__file__, '--dir=' + self._chromeos_root,
- '--minilayout', '--jobs=8']
+ cmd = [
+ setup_chromeos.__file__, '--dir=' + self._chromeos_root, '--minilayout',
+ '--jobs=8'
+ ]
ret = setup_chromeos.Main(cmd)
if ret:
raise RuntimeError('Failed to checkout chromeos')
@@ -41,16 +43,17 @@ class DejagnuAdapter(object):
def SetupBoard(self):
cmd = './setup_board --board=' + self._board
- ret = self._cmd_exec.ChrootRunCommand(self._chromeos_root,
- cmd,
- terminated_timeout=4000)
+ ret = self._cmd_exec.ChrootRunCommand(
+ self._chromeos_root, cmd, terminated_timeout=4000)
if ret:
raise RuntimeError('Failed to setup board.')
def CheckGDB(self):
- args = [gdb_dejagnu.__file__, '--board=' + self._board,
- '--chromeos_root=' + self._chromeos_root,
- '--mount=' + self._gdb_dir, '--remote=' + self._remote]
+ args = [
+ gdb_dejagnu.__file__, '--board=' + self._board,
+ '--chromeos_root=' + self._chromeos_root, '--mount=' + self._gdb_dir,
+ '--remote=' + self._remote
+ ]
if self._cleanup:
args.append('--cleanup=' + self._cleanup)
return gdb_dejagnu.Main(args)
@@ -98,8 +101,7 @@ def EmailResult(result):
# email exception? Just log it on console.
print('Sending email failed - {0}'
'Subject: {1}'
- 'Text: {2}').format(
- str(e), subject, email_text)
+ 'Text: {2}').format(str(e), subject, email_text)
def ProcessArguments(argv):
@@ -108,29 +110,34 @@ def ProcessArguments(argv):
description=('This script is used by nightly client to test gdb. '
'DO NOT run it unless you know what you are doing.'),
usage='test_gdb_dejagnu.py options')
- parser.add_argument('-b',
- '--board',
- dest='board',
- help=('Required. Specify board type. For example '
- '\'lumpy\' and \'daisy\''))
- parser.add_argument('-r',
- '--remote',
- dest='remote',
- help=('Required. Specify remote board address'))
- parser.add_argument('-g',
- '--gdb_dir',
- dest='gdb_dir',
- default='',
- help=('Optional. Specify gdb checkout directory.'))
- parser.add_argument('-c',
- '--chromeos_root',
- dest='chromeos_root',
- default='chromeos.live',
- help=('Optional. Specify chromeos checkout directory.'))
- parser.add_argument('--cleanup',
- dest='cleanup',
- default=None,
- help=('Optional. Do cleanup after the test.'))
+ parser.add_argument(
+ '-b',
+ '--board',
+ dest='board',
+ help=('Required. Specify board type. For example '
+ '\'lumpy\' and \'daisy\''))
+ parser.add_argument(
+ '-r',
+ '--remote',
+ dest='remote',
+ help=('Required. Specify remote board address'))
+ parser.add_argument(
+ '-g',
+ '--gdb_dir',
+ dest='gdb_dir',
+ default='',
+ help=('Optional. Specify gdb checkout directory.'))
+ parser.add_argument(
+ '-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ default='chromeos.live',
+ help=('Optional. Specify chromeos checkout directory.'))
+ parser.add_argument(
+ '--cleanup',
+ dest='cleanup',
+ default=None,
+ help=('Optional. Do cleanup after the test.'))
options = parser.parse_args(argv)
diff --git a/test_toolchains.py b/test_toolchains.py
index ecae6f48..8684653f 100755
--- a/test_toolchains.py
+++ b/test_toolchains.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
# Script to test different toolchains against ChromeOS benchmarks.
"""Toolchain team nightly performance test script (local builds)."""
@@ -11,7 +11,6 @@ import os
import sys
import build_chromeos
import setup_chromeos
-import time
from cros_utils import command_executer
from cros_utils import misc
from cros_utils import logger
diff --git a/update_telemetry_defaults.py b/update_telemetry_defaults.py
index 9ee7d8b0..943dc261 100755
--- a/update_telemetry_defaults.py
+++ b/update_telemetry_defaults.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright 2013 Google Inc. All Rights Reserved.
"""Script to maintain the Telemetry benchmark default results file.
@@ -6,7 +6,6 @@
This script allows the user to see and update the set of default
results to be used in generating reports from running the Telemetry
benchmarks.
-
"""
from __future__ import print_function
@@ -84,8 +83,8 @@ class TelemetryDefaults(object):
print("Updated results set for '%s': " % benchmark)
print('%s : %s' % (benchmark, repr(self._defaults[benchmark])))
else:
- print("'%s' is not in '%s's default results list." %
- (result, benchmark))
+ print("'%s' is not in '%s's default results list." % (result,
+ benchmark))
else:
print("Cannot find benchmark named '%s'" % benchmark)
diff --git a/verify_compiler.py b/verify_compiler.py
index 9eafbb8a..b70c1257 100755
--- a/verify_compiler.py
+++ b/verify_compiler.py
@@ -34,13 +34,13 @@ def CreateTmpDwarfFile(filename, dwarf_file, cmd_executer):
return retval
-def FindAllFiles(root_dir, cmd_executer):
+def FindAllFiles(root_dir):
"""Create a list of all the *.debug and *.dwp files to be checked."""
file_list = []
tmp_list = [
os.path.join(dirpath, f)
- for dirpath, dirnames, files in os.walk(root_dir)
+ for dirpath, _, files in os.walk(root_dir)
for f in fnmatch.filter(files, '*.debug')
]
for f in tmp_list:
@@ -48,7 +48,7 @@ def FindAllFiles(root_dir, cmd_executer):
file_list.append(f)
tmp_list = [
os.path.join(dirpath, f)
- for dirpath, dirnames, files in os.walk(root_dir)
+ for dirpath, _, files in os.walk(root_dir)
for f in fnmatch.filter(files, '*.dwp')
]
file_list += tmp_list
@@ -99,8 +99,8 @@ def CheckFile(filename, compiler, tmp_dir, options, cmd_executer):
status = CreateTmpDwarfFile(filename, dwarf_file, cmd_executer)
if status != 0:
- print('Unable to create dwarf file for %s (status: %d).' %
- (filename, status))
+ print('Unable to create dwarf file for %s (status: %d).' % (filename,
+ status))
return status
comp_str = COMPILER_STRINGS[compiler]
@@ -121,8 +121,8 @@ def CheckFile(filename, compiler, tmp_dir, options, cmd_executer):
if 'DW_AT_name' in line:
words = line.split(':')
bad_file = words[-1]
- print('FAIL: %s was not compiled with %s.' %
- (bad_file.rstrip(), compiler))
+ print('FAIL: %s was not compiled with %s.' % (bad_file.rstrip(),
+ compiler))
looking_for_name = False
elif 'DW_TAG_' in line:
looking_for_name = False
@@ -189,7 +189,7 @@ def Main(argv):
if filename:
file_list.append(filename)
else:
- file_list = FindAllFiles(root_dir, cmd_executer)
+ file_list = FindAllFiles(root_dir)
bad_files = []
unknown_files = []
@@ -222,9 +222,8 @@ def Main(argv):
for f in bad_files:
print(f)
if len(unknown_files) > 0:
- print(
- '\n\nUnable to verify the following files (no debug info in them):\n'
- )
+ print('\n\nUnable to verify the following files (no debug info in '
+ 'them):\n')
for f in unknown_files:
print(f)
return 1
diff --git a/weekly_report.py b/weekly_report.py
index e74c623d..01db867d 100755
--- a/weekly_report.py
+++ b/weekly_report.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
#
# Copyright Google Inc. 2014
"""Module to generate the 7-day crosperf reports."""
@@ -55,8 +55,8 @@ benchmark: all_toolchain_perf {
chromeos_root: %s
chromeos_image: %s
}
-""" % (test_name, chromeos_root, os.path.join(test_path,
- 'chromiumos_test_image.bin'))
+""" % (test_name, chromeos_root,
+ os.path.join(test_path, 'chromiumos_test_image.bin'))
f.write(test_image)
return filename
@@ -109,8 +109,8 @@ benchmark: all_toolchain_perf {
chromeos_root: %s
chromeos_image: %s
}
-""" % (test_name, chromeos_root, os.path.join(test_path,
- 'chromiumos_test_image.bin'))
+""" % (test_name, chromeos_root,
+ os.path.join(test_path, 'chromiumos_test_image.bin'))
f.write(test_image)
return filename
@@ -121,13 +121,14 @@ def Main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--board', dest='board', help='Target board.')
parser.add_argument('-r', '--remote', dest='remote', help='Target device.')
- parser.add_argument('-v',
- '--vanilla_only',
- dest='vanilla_only',
- action='store_true',
- default=False,
- help='Generate a report comparing only the vanilla '
- 'images.')
+ parser.add_argument(
+ '-v',
+ '--vanilla_only',
+ dest='vanilla_only',
+ action='store_true',
+ default=False,
+ help='Generate a report comparing only the vanilla '
+ 'images.')
options = parser.parse_args(argv[1:])
@@ -200,8 +201,8 @@ def Main(argv):
timestamp = datetime.datetime.strftime(datetime.datetime.now(),
'%Y-%m-%d_%H:%M:%S')
results_dir = os.path.join(
- os.path.expanduser('~/nightly_test_reports'), '%s.%s' % (
- timestamp, options.board), 'weekly_tests')
+ os.path.expanduser('~/nightly_test_reports'),
+ '%s.%s' % (timestamp, options.board), 'weekly_tests')
for day in WEEKDAYS:
startdir = os.path.join(constants.CROSTC_WORKSPACE, day)
@@ -232,8 +233,8 @@ def Main(argv):
# Run Crosperf on the file to generate the weekly report.
cmd = ('%s/toolchain-utils/crosperf/crosperf '
- '%s --no_email=True --results_dir=%s' %
- (constants.CROSTC_WORKSPACE, filename, results_dir))
+ '%s --no_email=True --results_dir=%s' % (constants.CROSTC_WORKSPACE,
+ filename, results_dir))
retv = cmd_executer.RunCommand(cmd)
if retv == 0:
# Send the email, if the crosperf command worked.