aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuis Lozano <llozano@chromium.org>2015-12-15 13:49:30 -0800
committerLuis Lozano <llozano@chromium.org>2015-12-16 17:36:06 +0000
commitf2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe (patch)
tree185d243c7eed7c7a0db6f0e640746cadc1479ea9
parent2a66f70fef907c1cb15229cb58e5129cb620ac98 (diff)
downloadtoolchain-utils-f2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe.tar.gz
Run pyformat on all the toolchain-utils files.
This gets rid of a lot of lint issues. Ran by doing this: for f in *.py; do echo -n "$f " ; if [ -x $f ]; then pyformat -i --remove_trailing_comma --yapf --force_quote_type=double $f ; else pyformat -i --remove_shebang --remove_trailing_comma --yapf --force_quote_type=double $f ; fi ; done BUG=chromium:567921 TEST=Ran simple crosperf run. Change-Id: I59778835fdaa5f706d2e1765924389f9e97433d1 Reviewed-on: https://chrome-internal-review.googlesource.com/242031 Reviewed-by: Luis Lozano <llozano@chromium.org> Commit-Queue: Luis Lozano <llozano@chromium.org> Tested-by: Luis Lozano <llozano@chromium.org> Reviewed-by: Yunlian Jiang <yunlian@google.com>
-rwxr-xr-xafe_lock_machine.py96
-rwxr-xr-xauto_delete_nightly_test_data.py49
-rw-r--r--automation/__init__.py1
-rw-r--r--automation/all_tests.py12
-rw-r--r--automation/clients/__init__.py1
-rwxr-xr-xautomation/clients/android.py1
-rwxr-xr-xautomation/clients/chromeos.py12
-rwxr-xr-xautomation/clients/crosstool.py30
-rwxr-xr-xautomation/clients/dejagnu_compiler.py42
-rw-r--r--automation/clients/helper/android.py101
-rw-r--r--automation/clients/helper/chromeos.py145
-rw-r--r--automation/clients/helper/crosstool.py85
-rw-r--r--automation/clients/helper/jobs.py8
-rw-r--r--automation/clients/helper/perforce.py37
-rwxr-xr-xautomation/clients/nightly.py39
-rwxr-xr-xautomation/clients/output_test.py14
-rwxr-xr-xautomation/clients/pwd_test.py16
-rw-r--r--automation/clients/report/dejagnu/__init__.py1
-rw-r--r--automation/clients/report/dejagnu/main.py49
-rw-r--r--automation/clients/report/dejagnu/manifest.py5
-rw-r--r--automation/clients/report/dejagnu/report.py23
-rw-r--r--automation/clients/report/dejagnu/summary.py23
-rwxr-xr-xautomation/clients/report/validate_failures.py48
-rw-r--r--automation/common/__init__.py1
-rw-r--r--automation/common/command.py13
-rw-r--r--automation/common/command_executer.py49
-rwxr-xr-xautomation/common/command_executer_test.py18
-rw-r--r--automation/common/events.py22
-rw-r--r--automation/common/job.py25
-rw-r--r--automation/common/job_group.py32
-rw-r--r--automation/common/logger.py21
-rw-r--r--automation/common/machine.py35
-rwxr-xr-xautomation/common/machine_test.py12
-rw-r--r--automation/common/state_machine.py1
-rw-r--r--automation/server/__init__.py1
-rw-r--r--automation/server/job_executer.py48
-rw-r--r--automation/server/job_group_manager.py12
-rw-r--r--automation/server/job_manager.py13
-rw-r--r--automation/server/machine_manager.py5
-rwxr-xr-xautomation/server/machine_manager_test.py8
-rw-r--r--automation/server/monitor/__init__.py1
-rw-r--r--automation/server/monitor/dashboard.py50
-rwxr-xr-xautomation/server/monitor/manage.py4
-rw-r--r--automation/server/monitor/settings.py6
-rw-r--r--automation/server/monitor/urls.py18
-rwxr-xr-xautomation/server/server.py40
-rwxr-xr-xautomation/server/server_test.py7
-rw-r--r--bestflags/example_algorithms.py18
-rw-r--r--bestflags/flags.py1
-rw-r--r--bestflags/flags_test.py1
-rw-r--r--bestflags/flags_util.py2
-rw-r--r--bestflags/generation.py1
-rw-r--r--bestflags/generation_test.py3
-rw-r--r--bestflags/genetic_algorithm.py2
-rw-r--r--bestflags/hill_climb_best_neighbor.py2
-rw-r--r--bestflags/iterative_elimination.py1
-rw-r--r--bestflags/mock_task.py1
-rw-r--r--bestflags/pipeline_process.py16
-rw-r--r--bestflags/pipeline_process_test.py6
-rw-r--r--bestflags/pipeline_worker.py1
-rw-r--r--bestflags/pipeline_worker_test.py9
-rw-r--r--bestflags/steering.py1
-rw-r--r--bestflags/steering_test.py17
-rw-r--r--bestflags/task.py7
-rw-r--r--bestflags/task_test.py2
-rw-r--r--bestflags/testing_batch.py12
-rwxr-xr-xbinary_search_tool/__init__.py1
-rwxr-xr-xbinary_search_tool/binary_search_perforce.py291
-rwxr-xr-xbinary_search_tool/binary_search_state.py208
-rwxr-xr-xbinary_search_tool/cros_pkg/cros_pkg_create_cleanup_script.py148
-rwxr-xr-xbinary_search_tool/cros_pkg/cros_pkg_undo_eclean.py48
-rwxr-xr-xbinary_search_tool/test/__init__.py1
-rwxr-xr-xbinary_search_tool/test/binary_search_tool_tester.py13
-rwxr-xr-xbinary_search_tool/test/common.py1
-rwxr-xr-xbinary_search_tool/test/gen_init_list.py4
-rwxr-xr-xbinary_search_tool/test/gen_obj.py11
-rwxr-xr-xbinary_search_tool/test/is_good.py1
-rwxr-xr-xbuild_benchmarks.py222
-rwxr-xr-xbuild_chrome_browser.py212
-rwxr-xr-xbuild_chromeos.py281
-rwxr-xr-xbuild_tc.py315
-rwxr-xr-xbuild_tool.py214
-rwxr-xr-xbuildbot_test_toolchains.py189
-rwxr-xr-xcommand_executer_timeout_test.py9
-rwxr-xr-xcompare_benchmarks.py46
-rw-r--r--crb/autotest_gatherer.py20
-rw-r--r--crb/autotest_run.py175
-rwxr-xr-xcrb/crb_driver.py211
-rw-r--r--crb/machine_manager_singleton.py58
-rw-r--r--crb/table_formatter.py83
-rwxr-xr-xcros_login.py18
-rw-r--r--crosperf/benchmark.py19
-rw-r--r--crosperf/benchmark_run.py183
-rwxr-xr-xcrosperf/benchmark_run_unittest.py352
-rwxr-xr-xcrosperf/benchmark_unittest.py49
-rw-r--r--crosperf/column_chart.py16
-rw-r--r--crosperf/compare_machines.py28
-rw-r--r--crosperf/config.py1
-rwxr-xr-xcrosperf/config_unittest.py1
-rwxr-xr-xcrosperf/crosperf.py62
-rwxr-xr-xcrosperf/crosperf_test.py6
-rwxr-xr-xcrosperf/crosperf_unittest.py28
-rw-r--r--crosperf/download_images.py61
-rwxr-xr-xcrosperf/download_images_buildid_test.py78
-rwxr-xr-xcrosperf/download_images_unittest.py78
-rw-r--r--crosperf/experiment.py52
-rw-r--r--crosperf/experiment_factory.py186
-rwxr-xr-xcrosperf/experiment_factory_unittest.py172
-rw-r--r--crosperf/experiment_file.py57
-rwxr-xr-xcrosperf/experiment_file_unittest.py43
-rw-r--r--crosperf/experiment_runner.py81
-rwxr-xr-xcrosperf/experiment_runner_unittest.py125
-rw-r--r--crosperf/experiment_status.py64
-rw-r--r--crosperf/field.py68
-rwxr-xr-xcrosperf/flag_test_unittest.py2
-rw-r--r--crosperf/help.py28
-rw-r--r--crosperf/image_checksummer.py32
-rw-r--r--crosperf/label.py77
-rw-r--r--crosperf/machine_image_manager.py273
-rwxr-xr-xcrosperf/machine_image_manager_unittest.py485
-rw-r--r--crosperf/machine_manager.py227
-rwxr-xr-xcrosperf/machine_manager_unittest.py344
-rw-r--r--crosperf/mock_instance.py175
-rw-r--r--crosperf/perf_table.py15
-rw-r--r--crosperf/results_cache.py322
-rwxr-xr-xcrosperf/results_cache_unittest.py480
-rw-r--r--crosperf/results_organizer.py23
-rwxr-xr-xcrosperf/results_organizer_unittest.py154
-rw-r--r--crosperf/results_report.py218
-rw-r--r--crosperf/results_sorter.py4
-rw-r--r--crosperf/schedv2.py605
-rwxr-xr-xcrosperf/schedv2_unittest.py283
-rw-r--r--crosperf/settings.py19
-rw-r--r--crosperf/settings_factory.py315
-rwxr-xr-xcrosperf/settings_factory_unittest.py27
-rwxr-xr-xcrosperf/settings_unittest.py109
-rw-r--r--crosperf/suite_runner.py245
-rwxr-xr-xcrosperf/suite_runner_unittest.py181
-rw-r--r--crosperf/test_flag.py4
-rw-r--r--crosperf/translate_xbuddy.py10
-rwxr-xr-xcwp/bartlett/server.py53
-rw-r--r--cwp/bartlett/test/server_tester.py35
-rw-r--r--cwp/interpreter/app_engine_pull.py122
-rw-r--r--cwp/interpreter/symbolizer.py46
-rw-r--r--cwp/performance/experiment_gen.py66
-rw-r--r--dejagnu/__init__.py1
-rwxr-xr-xdejagnu/gdb_dejagnu.py127
-rwxr-xr-xdejagnu/run_dejagnu.py141
-rwxr-xr-xfdo_scripts/divide_and_merge_profiles.py77
-rwxr-xr-xfdo_scripts/divide_and_merge_profiles_test.py65
-rwxr-xr-xfdo_scripts/profile_cycler.py150
-rw-r--r--fdo_scripts/summarize_hot_blocks.py99
-rw-r--r--fdo_scripts/vanilla_vs_fdo.py279
-rwxr-xr-xfile_lock_machine.py164
-rwxr-xr-xget_common_image_version.py34
-rwxr-xr-xheat_map.py53
-rwxr-xr-ximage_chromeos.py298
-rw-r--r--lock_machine_test.py40
-rwxr-xr-xmem_tests/clean_data.py11
-rwxr-xr-xmem_tests/mem_groups.py47
-rwxr-xr-xmem_tests/total_mem_actual.py23
-rwxr-xr-xmem_tests/total_mem_sampled.py17
-rw-r--r--mem_tests/utils.py13
-rwxr-xr-xproduce_output.py10
-rwxr-xr-xremote_gcc_build.py306
-rwxr-xr-xremote_kill_test.py23
-rwxr-xr-xremote_test.py42
-rwxr-xr-xrepo_to_repo.py68
-rwxr-xr-xreport_generator.py87
-rwxr-xr-xrun_benchmarks.py128
-rwxr-xr-xrun_tests.py8
-rwxr-xr-xsetup_chromeos.py169
-rwxr-xr-xsheriff_rotation.py67
-rwxr-xr-xsummarize_results.py64
-rwxr-xr-xtc_enter_chroot.py195
-rwxr-xr-xtest_gcc_dejagnu.py109
-rwxr-xr-xtest_gdb_dejagnu.py74
-rwxr-xr-xtest_toolchains.py276
-rwxr-xr-xupdate_telemetry_defaults.py80
-rw-r--r--utils/__init__.py1
-rwxr-xr-xutils/buildbot_json.py282
-rw-r--r--utils/buildbot_utils.py437
-rw-r--r--utils/colortrans.py334
-rw-r--r--utils/command_executer.py273
-rwxr-xr-xutils/command_executer_unittest.py9
-rw-r--r--utils/constants.py3
-rwxr-xr-xutils/email_sender.py73
-rw-r--r--utils/file_utils.py27
-rw-r--r--utils/html_tools.py30
-rw-r--r--utils/locks.py12
-rw-r--r--utils/logger.py161
-rw-r--r--utils/machines.py2
-rw-r--r--utils/manifest_versions.py63
-rw-r--r--utils/misc.py245
-rw-r--r--utils/misc_test.py11
-rw-r--r--utils/no_pseudo_terminal_test.py6
-rwxr-xr-xutils/perf_diff.py92
-rw-r--r--utils/pstat.py1151
-rw-r--r--utils/stats.py4883
-rw-r--r--utils/tabulator.py338
-rw-r--r--utils/tabulator_test.py70
-rw-r--r--utils/timeline.py6
-rw-r--r--utils/timeline_test.py10
-rwxr-xr-xweekly_report.py357
204 files changed, 11790 insertions, 11383 deletions
diff --git a/afe_lock_machine.py b/afe_lock_machine.py
index dd958ca6..a6766899 100755
--- a/afe_lock_machine.py
+++ b/afe_lock_machine.py
@@ -1,7 +1,6 @@
#!/usr/bin/python2
#
# Copyright 2015 Google INc. All Rights Reserved.
-
"""This module controls locking and unlocking of test machines."""
from __future__ import print_function
@@ -15,6 +14,7 @@ import traceback
from utils import logger
from utils import machines
+
class AFELockException(Exception):
"""Base class for exceptions in this module."""
@@ -76,8 +76,13 @@ class AFELockManager(object):
LOCAL_SERVER = 'chrotomation2.mtv.corp.google.com'
- def __init__(self, remotes, force_option, chromeos_root, local_server,
- local=True, log=None):
+ def __init__(self,
+ remotes,
+ force_option,
+ chromeos_root,
+ local_server,
+ local=True,
+ log=None):
"""Initializes an AFELockManager object.
Args:
@@ -124,8 +129,8 @@ class AFELockManager(object):
dargs = {}
dargs['server'] = local_server or AFELockManager.LOCAL_SERVER
# Make sure local server is pingable.
- error_msg = ('Local autotest server machine %s not responding to ping.'
- % dargs['server'])
+ error_msg = ('Local autotest server machine %s not responding to ping.' %
+ dargs['server'])
self.CheckMachine(dargs['server'], error_msg)
self.local_afe = frontend_wrappers.RetryingAFE(timeout_min=30,
delay_sec=10,
@@ -176,8 +181,8 @@ class AFELockManager(object):
Returns:
A list of names of the toolchain machines in the ChromeOS HW lab.
"""
- machines_file = os.path.join(os.path.dirname(__file__),
- 'crosperf', 'default_remotes')
+ machines_file = os.path.join(
+ os.path.dirname(__file__), 'crosperf', 'default_remotes')
machine_list = []
with open(machines_file, 'r') as input_file:
lines = input_file.readlines()
@@ -258,8 +263,8 @@ class AFELockManager(object):
for cros_name in [m, m + '.cros']:
if cros_name in self.toolchain_lab_machines:
raise UpdateNonLocalMachine('Machine %s is already in the ChromeOS HW'
- 'Lab. Cannot add it to local server.'
- % cros_name)
+ 'Lab. Cannot add it to local server.' %
+ cros_name)
host_info = self.local_afe.get_hosts(hostname=m)
if host_info:
raise DuplicateAdd('Machine %s is already on the local server.' % m)
@@ -268,8 +273,8 @@ class AFELockManager(object):
self.logger.LogOutput('Successfully added %s to local server.' % m)
except Exception as e:
traceback.print_exc()
- raise UpdateServerError('Error occurred while attempting to add %s. %s'
- % (m, str(e)))
+ raise UpdateServerError(
+ 'Error occurred while attempting to add %s. %s' % (m, str(e)))
def RemoveMachinesFromLocalServer(self):
"""Removes one or more machines from the local AFE server.
@@ -285,9 +290,9 @@ class AFELockManager(object):
for m in self.machines:
for cros_name in [m, m + '.cros']:
if cros_name in self.toolchain_lab_machines:
- raise UpdateNonLocalMachine('Machine %s is in the ChromeOS HW Lab. '
- 'This script cannot remove lab machines.'
- % cros_name)
+ raise UpdateNonLocalMachine(
+ 'Machine %s is in the ChromeOS HW Lab. '
+ 'This script cannot remove lab machines.' % cros_name)
try:
self.RemoveLocalMachine(m)
self.logger.LogOutput('Successfully removed %s from local server.' % m)
@@ -336,7 +341,6 @@ class AFELockManager(object):
else:
print('%s (%s)\tunlocked' % (m, state['board']))
-
def UpdateLockInAFE(self, should_lock_machine, machine):
"""Calls an AFE server to lock/unlock a machine.
@@ -412,8 +416,9 @@ class AFELockManager(object):
if machine.find('.cros') == -1:
cros_machine = cros_machine + '.cros'
- self.machines = [m for m in self.machines if m != cros_machine and
- m != machine]
+ self.machines = [m
+ for m in self.machines
+ if m != cros_machine and m != machine]
def CheckMachineLocks(self, machine_states, cmd):
"""Check that every machine in requested list is in the proper state.
@@ -442,8 +447,8 @@ class AFELockManager(object):
'else (%s).' % (k, state['locked_by']))
elif cmd == 'lock':
if state['locked']:
- self.logger.LogWarning('Attempt to lock already locked machine (%s)'
- % k)
+ self.logger.LogWarning('Attempt to lock already locked machine (%s)' %
+ k)
self._InternalRemoveMachine(k)
def HasAFEServer(self, local):
@@ -533,32 +538,55 @@ def Main(argv):
"""
parser = argparse.ArgumentParser()
- parser.add_argument('--list', dest='cmd', action='store_const',
+ parser.add_argument('--list',
+ dest='cmd',
+ action='store_const',
const='status',
help='List current status of all known machines.')
- parser.add_argument('--lock', dest='cmd', action='store_const',
- const='lock', help='Lock given machine(s).')
- parser.add_argument('--unlock', dest='cmd', action='store_const',
- const='unlock', help='Unlock given machine(s).')
- parser.add_argument('--status', dest='cmd', action='store_const',
+ parser.add_argument('--lock',
+ dest='cmd',
+ action='store_const',
+ const='lock',
+ help='Lock given machine(s).')
+ parser.add_argument('--unlock',
+ dest='cmd',
+ action='store_const',
+ const='unlock',
+ help='Unlock given machine(s).')
+ parser.add_argument('--status',
+ dest='cmd',
+ action='store_const',
const='status',
help='List current status of given machine(s).')
- parser.add_argument('--add_machine', dest='cmd', action='store_const',
+ parser.add_argument('--add_machine',
+ dest='cmd',
+ action='store_const',
const='add',
help='Add machine to local machine server.')
- parser.add_argument('--remove_machine', dest='cmd',
- action='store_const', const='remove',
+ parser.add_argument('--remove_machine',
+ dest='cmd',
+ action='store_const',
+ const='remove',
help='Remove machine from the local machine server.')
- parser.add_argument('--nolocal', dest='local',
- action='store_false', default=True,
+ parser.add_argument('--nolocal',
+ dest='local',
+ action='store_false',
+ default=True,
help='Do not try to use local machine server.')
- parser.add_argument('--remote', dest='remote',
+ parser.add_argument('--remote',
+ dest='remote',
help='machines on which to operate')
- parser.add_argument('--chromeos_root', dest='chromeos_root', required=True,
+ parser.add_argument('--chromeos_root',
+ dest='chromeos_root',
+ required=True,
help='ChromeOS root to use for autotest scripts.')
- parser.add_argument('--local_server', dest='local_server', default=None,
+ parser.add_argument('--local_server',
+ dest='local_server',
+ default=None,
help='Alternate local autotest server to use.')
- parser.add_argument('--force', dest='force', action='store_true',
+ parser.add_argument('--force',
+ dest='force',
+ action='store_true',
default=False,
help='Force lock/unlock of machines, even if not'
' current lock owner.')
diff --git a/auto_delete_nightly_test_data.py b/auto_delete_nightly_test_data.py
index f8eaeacc..ea6ea7cc 100755
--- a/auto_delete_nightly_test_data.py
+++ b/auto_delete_nightly_test_data.py
@@ -1,5 +1,4 @@
#!/usr/bin/python
-
"""A crontab script to delete night test data."""
__author__ = 'shenhan@google.com (Han Shen)'
@@ -18,9 +17,10 @@ DIR_BY_WEEKDAY = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
def CleanNumberedDir(s, dry_run=False):
"""Deleted directories under each dated_dir."""
- chromeos_dirs = [os.path.join(s, x) for x in os.listdir(s)
+ chromeos_dirs = [os.path.join(s, x)
+ for x in os.listdir(s)
if misc.IsChromeOsTree(os.path.join(s, x))]
- ce = command_executer.GetCommandExecuter(log_level="none")
+ ce = command_executer.GetCommandExecuter(log_level='none')
all_succeeded = True
for cd in chromeos_dirs:
if misc.DeleteChromeOsTree(cd, dry_run=dry_run):
@@ -36,11 +36,11 @@ def CleanNumberedDir(s, dry_run=False):
## Now delete the numbered dir Before forcibly removing the directory, just
## check 's' to make sure it is sane. A valid dir to be removed must be
## '/usr/local/google/crostc/(SUN|MON|TUE...|SAT)'.
- valid_dir_pattern = ('^' + constants.CROSTC_WORKSPACE + '/(' +
- '|'.join(DIR_BY_WEEKDAY) + ')')
+ valid_dir_pattern = (
+ '^' + constants.CROSTC_WORKSPACE + '/(' + '|'.join(DIR_BY_WEEKDAY) + ')')
if not re.search(valid_dir_pattern, s):
- print ('Trying to delete an invalid dir "{0}" (must match "{1}"), '
- 'please check.'.format(s, valid_dir_pattern))
+ print('Trying to delete an invalid dir "{0}" (must match "{1}"), '
+ 'please check.'.format(s, valid_dir_pattern))
return False
cmd = 'rm -fr {0}'.format(s)
@@ -57,7 +57,8 @@ def CleanNumberedDir(s, dry_run=False):
def CleanDatedDir(dated_dir, dry_run=False):
# List subdirs under dir
- subdirs = [os.path.join(dated_dir, x) for x in os.listdir(dated_dir)
+ subdirs = [os.path.join(dated_dir, x)
+ for x in os.listdir(dated_dir)
if os.path.isdir(os.path.join(dated_dir, x))]
all_succeeded = True
for s in subdirs:
@@ -71,10 +72,15 @@ def ProcessArguments(argv):
parser = optparse.OptionParser(
description='Automatically delete nightly test data directories.',
usage='auto_delete_nightly_test_data.py options')
- parser.add_option('-d', '--dry_run', dest='dry_run',
- default=False, action='store_true',
+ parser.add_option('-d',
+ '--dry_run',
+ dest='dry_run',
+ default=False,
+ action='store_true',
help='Only print command line, do not execute anything.')
- parser.add_option('--days_to_preserve', dest='days_to_preserve', default=3,
+ parser.add_option('--days_to_preserve',
+ dest='days_to_preserve',
+ default=3,
help=('Specify the number of days (not including today), '
'test data generated on these days will *NOT* be '
'deleted. Defaults to 3.'))
@@ -85,8 +91,8 @@ def ProcessArguments(argv):
def CleanChromeOsTmpAndImages():
"""Delete temporaries, images under crostc/chromeos."""
- chromeos_chroot_tmp = os.path.join(constants.CROSTC_WORKSPACE,
- 'chromeos', 'chroot', 'tmp')
+ chromeos_chroot_tmp = os.path.join(constants.CROSTC_WORKSPACE, 'chromeos',
+ 'chroot', 'tmp')
ce = command_executer.GetCommandExecuter()
# Clean chroot/tmp/test_that_* and chroot/tmp/tmpxxxxxx, that were last
@@ -98,11 +104,11 @@ def CleanChromeOsTmpAndImages():
r'-exec bash -c "rm -fr {{}}" \;').format(chromeos_chroot_tmp)
rv = ce.RunCommand(cmd, print_to_console=False)
if rv == 0:
- print ('Successfully cleaned chromeos tree tmp directory '
- '"{0}".'.format(chromeos_chroot_tmp))
+ print('Successfully cleaned chromeos tree tmp directory '
+ '"{0}".'.format(chromeos_chroot_tmp))
else:
- print ('Some directories were not removed under chromeos tree '
- 'tmp directory -"{0}".'.format(chromeos_chroot_tmp))
+ print('Some directories were not removed under chromeos tree '
+ 'tmp directory -"{0}".'.format(chromeos_chroot_tmp))
# Clean image tar files, which were last accessed 1 hour ago and clean image
# bin files that were last accessed more than 24 hours ago.
@@ -135,11 +141,12 @@ def Main(argv):
if i <= 0:
## Wrap around if index is negative. 6 is from i + 7 - 1, because
## DIR_BY_WEEKDAY starts from 0, while isoweekday is from 1-7.
- dated_dir = DIR_BY_WEEKDAY[i+6]
+ dated_dir = DIR_BY_WEEKDAY[i + 6]
else:
- dated_dir = DIR_BY_WEEKDAY[i-1]
- rv += 0 if CleanDatedDir(os.path.join(
- constants.CROSTC_WORKSPACE, dated_dir), options.dry_run) else 1
+ dated_dir = DIR_BY_WEEKDAY[i - 1]
+ rv += 0 if CleanDatedDir(
+ os.path.join(constants.CROSTC_WORKSPACE,
+ dated_dir), options.dry_run) else 1
## Finally clean temporaries, images under crostc/chromeos
rv2 = CleanChromeOsTmpAndImages()
diff --git a/automation/__init__.py b/automation/__init__.py
index e69de29b..8b137891 100644
--- a/automation/__init__.py
+++ b/automation/__init__.py
@@ -0,0 +1 @@
+
diff --git a/automation/all_tests.py b/automation/all_tests.py
index 3ca4f52a..e7b70884 100644
--- a/automation/all_tests.py
+++ b/automation/all_tests.py
@@ -2,15 +2,15 @@ import glob
import sys
import unittest
-sys.path.insert(0, "server")
-sys.path.insert(0, "clients")
-sys.path.insert(0, "common")
+sys.path.insert(0, 'server')
+sys.path.insert(0, 'clients')
+sys.path.insert(0, 'common')
test_file_strings = glob.glob('*/*_test.py')
module_strings = [str[0:len(str) - 3] for str in test_file_strings]
for i in range(len(module_strings)):
- module_strings[i] = module_strings[i].split("/")[-1]
-suites = [unittest.defaultTestLoader.loadTestsFromName(str) for str
- in module_strings]
+ module_strings[i] = module_strings[i].split('/')[-1]
+suites = [unittest.defaultTestLoader.loadTestsFromName(str)
+ for str in module_strings]
testSuite = unittest.TestSuite(suites)
text_runner = unittest.TextTestRunner().run(testSuite)
diff --git a/automation/clients/__init__.py b/automation/clients/__init__.py
index e69de29b..8b137891 100644
--- a/automation/clients/__init__.py
+++ b/automation/clients/__init__.py
@@ -0,0 +1 @@
+
diff --git a/automation/clients/android.py b/automation/clients/android.py
index a65cba2e..603744b1 100755
--- a/automation/clients/android.py
+++ b/automation/clients/android.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Client for Android nightly jobs.
Does the following jobs:
diff --git a/automation/clients/chromeos.py b/automation/clients/chromeos.py
index 18b29b7c..084f7840 100755
--- a/automation/clients/chromeos.py
+++ b/automation/clients/chromeos.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""chromeos.py: Build & Test ChromeOS using custom compilers."""
__author__ = 'asharif@google.com (Ahmad Sharif)'
-
import logging
import optparse
import os
@@ -38,7 +36,8 @@ class ChromeOSNightlyClient(object):
def CheckoutV14Dir(self):
p4view = perforce.View(self.DEPOT2_DIR, [
- perforce.PathMapping('gcctools/chromeos/v14/...')])
+ perforce.PathMapping('gcctools/chromeos/v14/...')
+ ])
return self.GetP4Snapshot(p4view)
def GetP4Snapshot(self, p4view):
@@ -56,12 +55,12 @@ class ChromeOSNightlyClient(object):
os.path.join(self.P4_VERSION_DIR, 'test_toolchains.py'),
'--force-mismatch',
'--clean',
- '--public', # crbug.com/145822
+ '--public', # crbug.com/145822
'--board=%s' % self._board,
'--remote=%s' % self._remote,
'--githashes=%s' % self._gcc_githash))
label = 'testlabel'
- job = jobs.CreateLinuxJob(label, chain, timeout=24*60*60)
+ job = jobs.CreateLinuxJob(label, chain, timeout=24 * 60 * 60)
return job_group.JobGroup(label, [job], True, False)
@@ -92,7 +91,8 @@ def Main(argv):
logging.error('Specify a board, remote and gcc_githash')
return 1
- client = ChromeOSNightlyClient(options.board, options.remote,
+ client = ChromeOSNightlyClient(options.board,
+ options.remote,
options.gcc_githash,
p4_snapshot=options.p4_snapshot)
client.Run()
diff --git a/automation/clients/crosstool.py b/automation/clients/crosstool.py
index 450d6030..65720343 100755
--- a/automation/clients/crosstool.py
+++ b/automation/clients/crosstool.py
@@ -43,8 +43,8 @@ class CrosstoolNightlyClient(object):
all_jobs = [checkout_crosstool_job]
# Build crosstool target
- build_release_job, build_tree_dir = factory.BuildRelease(
- checkout_dir, self._target)
+ build_release_job, build_tree_dir = factory.BuildRelease(checkout_dir,
+ self._target)
all_jobs.append(build_release_job)
testruns = []
@@ -52,8 +52,8 @@ class CrosstoolNightlyClient(object):
# Perform crosstool tests
for board in self._boards:
for component in ('gcc', 'binutils'):
- test_job, testrun_dir = factory.RunTests(
- checkout_dir, build_tree_dir, self._target, board, component)
+ test_job, testrun_dir = factory.RunTests(checkout_dir, build_tree_dir,
+ self._target, board, component)
all_jobs.append(test_job)
testruns.append(testrun_dir)
@@ -61,8 +61,8 @@ class CrosstoolNightlyClient(object):
all_jobs.append(factory.GenerateReport(testruns, manifests_dir,
self._target, self._boards))
- return job_group.JobGroup(
- 'Crosstool Nightly Build (%s)' % self._target, all_jobs, True, False)
+ return job_group.JobGroup('Crosstool Nightly Build (%s)' % self._target,
+ all_jobs, True, False)
@logger.HandleUncaughtExceptions
@@ -70,14 +70,14 @@ def Main(argv):
valid_boards_string = ', '.join(CrosstoolNightlyClient.VALID_BOARDS)
parser = optparse.OptionParser()
- parser.add_option('-b',
- '--board',
- dest='boards',
- action='append',
- choices=CrosstoolNightlyClient.VALID_BOARDS,
- default=[],
- help=('Run DejaGNU tests on selected boards: %s.' %
- valid_boards_string))
+ parser.add_option(
+ '-b',
+ '--board',
+ dest='boards',
+ action='append',
+ choices=CrosstoolNightlyClient.VALID_BOARDS,
+ default=[],
+ help=('Run DejaGNU tests on selected boards: %s.' % valid_boards_string))
options, args = parser.parse_args(argv)
if len(args) == 2:
@@ -86,7 +86,7 @@ def Main(argv):
logging.error('Exactly one target required as a command line argument!')
logging.info('List of valid targets:')
for pair in enumerate(CrosstoolNightlyClient.VALID_TARGETS, start=1):
- logging.info('%d) %s' % pair)
+ logging.info('%d) %s', pair)
sys.exit(1)
option_list = [opt.dest for opt in parser.option_list if opt.dest]
diff --git a/automation/clients/dejagnu_compiler.py b/automation/clients/dejagnu_compiler.py
index e4342160..eb923d5d 100755
--- a/automation/clients/dejagnu_compiler.py
+++ b/automation/clients/dejagnu_compiler.py
@@ -1,12 +1,10 @@
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
-
"""dejagnu_compiler.py: Run dejagnu test."""
__author__ = 'shenhan@google.com (Han Shen)'
-
import logging
import optparse
import os
@@ -38,7 +36,8 @@ class DejagnuCompilerNightlyClient:
def CheckoutV14Dir(self):
p4view = perforce.View(self.DEPOT2_DIR, [
- perforce.PathMapping('gcctools/chromeos/v14/...')])
+ perforce.PathMapping('gcctools/chromeos/v14/...')
+ ])
return self.GetP4Snapshot(p4view)
def GetP4Snapshot(self, p4view):
@@ -50,30 +49,37 @@ class DejagnuCompilerNightlyClient:
return p4client.SetupAndDo(p4client.Sync(), p4client.Remove())
def CreateJobGroup(self):
- chain = cmd.Chain(
- self.CheckoutV14Dir(),
- cmd.Shell('python',
- os.path.join(self.P4_VERSION_DIR, 'test_gcc_dejagnu.py'),
- '--board=%s' % self._board,
- '--remote=%s' % self._remote,
- '--cleanup=%s' % self._cleanup))
+ chain = cmd.Chain(self.CheckoutV14Dir(), cmd.Shell(
+ 'python', os.path.join(self.P4_VERSION_DIR, 'test_gcc_dejagnu.py'),
+ '--board=%s' % self._board, '--remote=%s' % self._remote,
+ '--cleanup=%s' % self._cleanup))
label = 'dejagnu'
- job = jobs.CreateLinuxJob(label, chain, timeout=8*60*60)
- return job_group.JobGroup(label, [job], cleanup_on_failure=True,
+ job = jobs.CreateLinuxJob(label, chain, timeout=8 * 60 * 60)
+ return job_group.JobGroup(label,
+ [job],
+ cleanup_on_failure=True,
cleanup_on_completion=True)
@logger.HandleUncaughtExceptions
def Main(argv):
parser = optparse.OptionParser()
- parser.add_option('-b', '--board', dest='board',
+ parser.add_option('-b',
+ '--board',
+ dest='board',
help='Run performance tests on these boards')
- parser.add_option('-r', '--remote', dest='remote',
+ parser.add_option('-r',
+ '--remote',
+ dest='remote',
help='Run performance tests on these remotes')
- parser.add_option('-p', '--p4_snapshot', dest='p4_snapshot',
+ parser.add_option('-p',
+ '--p4_snapshot',
+ dest='p4_snapshot',
help=('For development only. '
'Use snapshot instead of checking out.'))
- parser.add_option('--cleanup', dest='cleanup', default='mount',
+ parser.add_option('--cleanup',
+ dest='cleanup',
+ default='mount',
help=('Cleanup test directory, values could be one of '
'"mount", "chroot" or "chromeos"'))
options, _ = parser.parse_args(argv)
@@ -82,8 +88,8 @@ def Main(argv):
logging.error('Specify a board and remote.')
return 1
- client = DejagnuCompilerNightlyClient(
- options.board, options.remote, options.p4_snapshot, options.cleanup)
+ client = DejagnuCompilerNightlyClient(options.board, options.remote,
+ options.p4_snapshot, options.cleanup)
client.Run()
return 0
diff --git a/automation/clients/helper/android.py b/automation/clients/helper/android.py
index fc068aca..7ff2ac1c 100644
--- a/automation/clients/helper/android.py
+++ b/automation/clients/helper/android.py
@@ -1,7 +1,4 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Helper modules for Android toolchain test infrastructure.
Provides following Android toolchain test jobs and commands.
@@ -24,6 +21,7 @@ from automation.common import job
class JobsFactory(object):
+
def __init__(self, gcc_version='4.4.3', build_type='DEVELOPMENT'):
assert gcc_version in ['4.4.3', '4.6', 'google_main', 'fsf_trunk']
assert build_type in ['DEVELOPMENT', 'RELEASE']
@@ -46,11 +44,13 @@ class JobsFactory(object):
new_job = jobs.CreateLinuxJob('AndroidBuildToolchain(%s)' % self.tc_tag,
command)
new_job.DependsOnFolder(checkout_dir_dep)
- tc_prefix_dep = job.FolderDependency(
- new_job, self.commands.toolchain_prefix_dir)
+ tc_prefix_dep = job.FolderDependency(new_job,
+ self.commands.toolchain_prefix_dir)
return new_job, tc_prefix_dep
- def BuildAndroidImage(self, tc_prefix_dep, product='stingray',
+ def BuildAndroidImage(self,
+ tc_prefix_dep,
+ product='stingray',
branch='ics-release'):
assert product in ['stingray', 'passion', 'trygon', 'soju']
assert branch in ['honeycomb-release', 'ics-release']
@@ -89,8 +89,7 @@ class CommandsFactory(object):
self.gcc_version = gcc_version
self.binutils_version = '2.21'
self.gold_version = '2.21'
- self.toolchain_prefix_dir = 'install-gcc-%s-%s' % (
- gcc_version, build_type)
+ self.toolchain_prefix_dir = 'install-gcc-%s-%s' % (gcc_version, build_type)
self.p4client = self._CreatePerforceClient()
self.scripts = ScriptsFactory(self.gcc_version, self.binutils_version,
self.gold_version)
@@ -105,11 +104,9 @@ class CommandsFactory(object):
'google_vendor_src_branch')
# Common views for tools
- p4view = perforce.View('depot2',
- perforce.PathMapping.ListFromPathTuples(
- [('gcctools/android/build/...', 'src/build/...'),
- ('gcctools/android/Tarballs/...',
- 'src/tarballs/...')]))
+ p4view = perforce.View('depot2', perforce.PathMapping.ListFromPathTuples([(
+ 'gcctools/android/build/...', 'src/build/...'), (
+ 'gcctools/android/Tarballs/...', 'src/tarballs/...')]))
for mapping in perforce.PathMapping.ListFromPathDict(
{'gcctools/android': ['tools/scripts/...', 'master/...']}):
p4view.add(mapping)
@@ -123,13 +120,11 @@ class CommandsFactory(object):
binutils_branch = mobile_rel_branch
else:
binutils_branch = p4_dev_path
- p4view.add(perforce.PathMapping(binutils_branch, 'src',
- ('binutils/binutils-%s/...' %
- self.binutils_version)))
+ p4view.add(perforce.PathMapping(binutils_branch, 'src', (
+ 'binutils/binutils-%s/...' % self.binutils_version)))
if self.binutils_version != self.gold_version:
- p4view.add(perforce.PathMapping(binutils_branch, 'src',
- ('binutils/binutils-%s/...' %
- self.gold_version)))
+ p4view.add(perforce.PathMapping(binutils_branch, 'src', (
+ 'binutils/binutils-%s/...' % self.gold_version)))
# Add view for gcc if gcc_version is '4.4.3'.
if self.gcc_version == '4.4.3':
@@ -172,9 +167,11 @@ class CommandsFactory(object):
gcc_required_dir = os.path.join(self.TOOLCHAIN_SRC_DIR, 'gcc',
'gcc-%s' % self.gcc_version)
- return cmd.Chain(cmd.MakeDir(gcc_required_dir),
- cmd.Wrapper(cmd.Chain(svn_co_command, svn_get_revision),
- cwd=gcc_required_dir))
+ return cmd.Chain(
+ cmd.MakeDir(gcc_required_dir),
+ cmd.Wrapper(
+ cmd.Chain(svn_co_command, svn_get_revision),
+ cwd=gcc_required_dir))
def CheckoutAndroidToolchain(self):
p4client = self.p4client
@@ -187,25 +184,24 @@ class CommandsFactory(object):
return command
def BuildAndroidToolchain(self):
- script_cmd = self.scripts.BuildAndroidToolchain(self.toolchain_prefix_dir,
- self.CHECKOUT_DIR,
- self.TOOLCHAIN_BUILD_DIR,
- self.TOOLCHAIN_SRC_DIR)
+ script_cmd = self.scripts.BuildAndroidToolchain(
+ self.toolchain_prefix_dir, self.CHECKOUT_DIR, self.TOOLCHAIN_BUILD_DIR,
+ self.TOOLCHAIN_SRC_DIR)
# Record toolchain and gcc CL number
- record_cl_cmd = cmd.Copy(os.path.join(self.CHECKOUT_DIR, 'CLNUM*'),
- to_dir=self.toolchain_prefix_dir)
- save_cmd = cmd.Tar(os.path.join('$JOB_TMP', 'results',
- '%s.tar.bz2' % self.toolchain_prefix_dir),
- self.toolchain_prefix_dir)
+ record_cl_cmd = cmd.Copy(
+ os.path.join(self.CHECKOUT_DIR, 'CLNUM*'),
+ to_dir=self.toolchain_prefix_dir)
+ save_cmd = cmd.Tar(
+ os.path.join('$JOB_TMP', 'results', '%s.tar.bz2' %
+ self.toolchain_prefix_dir), self.toolchain_prefix_dir)
return cmd.Chain(script_cmd, record_cl_cmd, save_cmd)
def _BuildAndroidTree(self, local_android_branch_dir, product):
target_tools_prefix = os.path.join('$JOB_TMP', self.toolchain_prefix_dir,
'bin', 'arm-linux-androideabi-')
java_path = '/usr/lib/jvm/java-6-sun/bin'
- build_cmd = cmd.Shell('make', '-j8',
- 'PRODUCT-%s-userdebug' % product,
+ build_cmd = cmd.Shell('make', '-j8', 'PRODUCT-%s-userdebug' % product,
'TARGET_TOOLS_PREFIX=%s' % target_tools_prefix,
'PATH=%s:$PATH' % java_path)
return cmd.Wrapper(build_cmd, cwd=local_android_branch_dir)
@@ -219,9 +215,8 @@ class CommandsFactory(object):
'android_trees')
remote_android_branch_path = os.path.join(androidtrees_path, branch)
local_android_branch_dir = os.path.join(self.ANDROID_TREES_DIR, branch)
- gettree_cmd = cmd.RemoteCopyFrom(androidtrees_host,
- remote_android_branch_path,
- local_android_branch_dir)
+ gettree_cmd = cmd.RemoteCopyFrom(
+ androidtrees_host, remote_android_branch_path, local_android_branch_dir)
# Configure and build the tree
buildtree_cmd = self._BuildAndroidTree(local_android_branch_dir, product)
@@ -235,8 +230,9 @@ class CommandsFactory(object):
return cmd.Chain(gettree_cmd, buildtree_cmd, copy_img, compress_img)
def CheckoutScripts(self):
- p4view = perforce.View('depot2', [perforce.PathMapping(
- 'gcctools/android/tools/...', 'tools/...')])
+ p4view = perforce.View('depot2',
+ [perforce.PathMapping('gcctools/android/tools/...',
+ 'tools/...')])
p4client = perforce.CommandsFactory(self.TOOLS_DIR, p4view)
return p4client.SetupAndDo(p4client.Sync(), p4client.Remove())
@@ -246,19 +242,15 @@ class CommandsFactory(object):
base_benchbin_path = ('/usr/local/google2/home/mobiletc-prebuild/'
'archive/v3binaries/2011-10-18')
local_basebenchbin_dir = 'base_benchmark_bin'
- getbase_cmd = cmd.RemoteCopyFrom(base_benchbin_host,
- base_benchbin_path,
+ getbase_cmd = cmd.RemoteCopyFrom(base_benchbin_host, base_benchbin_path,
local_basebenchbin_dir)
# Build and run benchmark.
android_arch = 'android_%s' % arch
run_label = 'normal'
- benchmark_cmd = self.scripts.RunBenchmark(self.toolchain_prefix_dir,
- self.TOOLS_DIR,
- self.BENCHMARK_OUT_DIR,
- run_label, run_experiment,
- android_arch,
- local_basebenchbin_dir)
+ benchmark_cmd = self.scripts.RunBenchmark(
+ self.toolchain_prefix_dir, self.TOOLS_DIR, self.BENCHMARK_OUT_DIR,
+ run_label, run_experiment, android_arch, local_basebenchbin_dir)
# Extract jobid from BENCHMARK_OUT_DIR/log/jobid_normal.log file.
# Copy jobid to www server to generate performance dashboard.
@@ -268,6 +260,7 @@ class CommandsFactory(object):
class ScriptsFactory(object):
+
def __init__(self, gcc_version, binutils_version, gold_version):
self._gcc_version = gcc_version
self._binutils_version = binutils_version
@@ -292,14 +285,20 @@ class ScriptsFactory(object):
'--with-gold-version=%s' % self._gold_version,
'--with-gdb-version=7.1.x-android',
'--log-path=%s/logs' % '$JOB_HOME',
- '--android-sysroot=%s' %
- os.path.join('$JOB_TMP', checkout_dir, 'gcctools', 'android',
- 'master', 'honeycomb_generic_sysroot'),
+ '--android-sysroot=%s' % os.path.join('$JOB_TMP', checkout_dir,
+ 'gcctools', 'android', 'master',
+ 'honeycomb_generic_sysroot'),
path=os.path.join(checkout_dir, 'gcctools', 'android', 'tools',
'scripts'))
- def RunBenchmark(self, toolchain_prefix_dir, checkout_dir, output_dir,
- run_label, run_experiment, arch, base_bench_bin=None):
+ def RunBenchmark(self,
+ toolchain_prefix_dir,
+ checkout_dir,
+ output_dir,
+ run_label,
+ run_experiment,
+ arch,
+ base_bench_bin=None):
if base_bench_bin:
base_bench_opt = '--base_benchmark_bin=%s' % base_bench_bin
else:
diff --git a/automation/clients/helper/chromeos.py b/automation/clients/helper/chromeos.py
index ddfd59fe..e7157451 100644
--- a/automation/clients/helper/chromeos.py
+++ b/automation/clients/helper/chromeos.py
@@ -1,8 +1,6 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import os.path
import re
@@ -14,64 +12,54 @@ from automation.common import machine
class ScriptsFactory(object):
+
def __init__(self, chromeos_root, scripts_path):
self._chromeos_root = chromeos_root
self._scripts_path = scripts_path
def SummarizeResults(self, logs_path):
- return cmd.Shell("summarize_results.py",
- logs_path,
- path=self._scripts_path)
+ return cmd.Shell('summarize_results.py', logs_path, path=self._scripts_path)
def Buildbot(self, config_name):
buildbot = os.path.join(self._chromeos_root,
- "chromite/cbuildbot/cbuildbot.py")
-
- return cmd.Shell(buildbot,
- "--buildroot=%s" % self._chromeos_root,
- "--resume",
- "--noarchive",
- "--noprebuilts",
- "--nosync",
- "--nouprev",
- "--notests",
- "--noclean",
- config_name)
+ 'chromite/cbuildbot/cbuildbot.py')
+
+ return cmd.Shell(buildbot, '--buildroot=%s' % self._chromeos_root,
+ '--resume', '--noarchive', '--noprebuilts', '--nosync',
+ '--nouprev', '--notests', '--noclean', config_name)
def RunBenchmarks(self, board, tests):
- image_path = os.path.join(self._chromeos_root,
- "src/build/images",
- board,
- "latest/chromiumos_image.bin")
-
- return cmd.Shell("cros_run_benchmarks.py",
- "--remote=$SECONDARY_MACHINES[0]",
- "--board=%s" % board,
- "--tests=%s" % tests,
- "--full_table",
+ image_path = os.path.join(self._chromeos_root, 'src/build/images', board,
+ 'latest/chromiumos_image.bin')
+
+ return cmd.Shell('cros_run_benchmarks.py',
+ '--remote=$SECONDARY_MACHINES[0]',
+ '--board=%s' % board,
+ '--tests=%s' % tests,
+ '--full_table',
image_path,
- path="/home/mobiletc-prebuild")
+ path='/home/mobiletc-prebuild')
- def SetupChromeOS(self, version="latest", use_minilayout=False):
- setup_chromeos = cmd.Shell("setup_chromeos.py",
- "--public",
- "--dir=%s" % self._chromeos_root,
- "--version=%s" % version,
+ def SetupChromeOS(self, version='latest', use_minilayout=False):
+ setup_chromeos = cmd.Shell('setup_chromeos.py',
+ '--public',
+ '--dir=%s' % self._chromeos_root,
+ '--version=%s' % version,
path=self._scripts_path)
if use_minilayout:
- setup_chromeos.AddOption("--minilayout")
+ setup_chromeos.AddOption('--minilayout')
return setup_chromeos
class CommandsFactory(object):
- DEPOT2_DIR = "//depot2/"
- P4_CHECKOUT_DIR = "perforce2/"
- P4_VERSION_DIR = os.path.join(P4_CHECKOUT_DIR, "gcctools/chromeos/v14")
+ DEPOT2_DIR = '//depot2/'
+ P4_CHECKOUT_DIR = 'perforce2/'
+ P4_VERSION_DIR = os.path.join(P4_CHECKOUT_DIR, 'gcctools/chromeos/v14')
- CHROMEOS_ROOT = "chromeos"
- CHROMEOS_SCRIPTS_DIR = os.path.join(CHROMEOS_ROOT, "src/scripts")
- CHROMEOS_BUILDS_DIR = "/home/mobiletc-prebuild/www/chromeos_builds"
+ CHROMEOS_ROOT = 'chromeos'
+ CHROMEOS_SCRIPTS_DIR = os.path.join(CHROMEOS_ROOT, 'src/scripts')
+ CHROMEOS_BUILDS_DIR = '/home/mobiletc-prebuild/www/chromeos_builds'
def __init__(self, chromeos_version, board, toolchain, p4_snapshot):
self.chromeos_version = chromeos_version
@@ -82,33 +70,34 @@ class CommandsFactory(object):
self.scripts = ScriptsFactory(self.CHROMEOS_ROOT, self.P4_VERSION_DIR)
def AddBuildbotConfig(self, config_name, config_list):
- config_header = "add_config(%r, [%s])" % (config_name,
- ", ".join(config_list))
+ config_header = 'add_config(%r, [%s])' % (config_name,
+ ', '.join(config_list))
config_file = os.path.join(self.CHROMEOS_ROOT,
- "chromite/cbuildbot/cbuildbot_config.py")
- quoted_config_header = "%r" % config_header
+ 'chromite/cbuildbot/cbuildbot_config.py')
+ quoted_config_header = '%r' % config_header
quoted_config_header = re.sub("'", "\\\"", quoted_config_header)
- return cmd.Pipe(cmd.Shell("echo", quoted_config_header),
- cmd.Shell("tee", "--append", config_file))
+ return cmd.Pipe(
+ cmd.Shell('echo', quoted_config_header),
+ cmd.Shell('tee', '--append', config_file))
def RunBuildbot(self):
- config_dict = {"board": self.board,
- "build_tests": True,
- "chrome_tests": True,
- "unittests": False,
- "vm_tests": False,
- "prebuilts": False,
- "latest_toolchain": True,
- "useflags": ["chrome_internal"],
- "usepkg_chroot": True,
+ config_dict = {'board': self.board,
+ 'build_tests': True,
+ 'chrome_tests': True,
+ 'unittests': False,
+ 'vm_tests': False,
+ 'prebuilts': False,
+ 'latest_toolchain': True,
+ 'useflags': ['chrome_internal'],
+ 'usepkg_chroot': True,
self.toolchain: True}
- config_name = "%s-toolchain-test" % self.board
- if "arm" in self.board:
- config_list = ["arm"]
+ config_name = '%s-toolchain-test' % self.board
+ if 'arm' in self.board:
+ config_list = ['arm']
else:
config_list = []
- config_list.extend(["internal", "full", "official", str(config_dict)])
+ config_list.extend(['internal', 'full', 'official', str(config_dict)])
add_config_shell = self.AddBuildbotConfig(config_name, config_list)
return cmd.Chain(add_config_shell, self.scripts.Buildbot(config_name))
@@ -118,7 +107,7 @@ class CommandsFactory(object):
self.CheckoutV14Dir(),
self.SetupChromeOSCheckout(self.chromeos_version, True),
self.RunBuildbot(),
- self.scripts.RunBenchmarks(self.board, "BootPerfServer,10:Page,3"))
+ self.scripts.RunBenchmarks(self.board, 'BootPerfServer,10:Page,3'))
def GetP4Snapshot(self, p4view):
p4client = perforce.CommandsFactory(self.P4_CHECKOUT_DIR, p4view)
@@ -130,38 +119,43 @@ class CommandsFactory(object):
def CheckoutV14Dir(self):
p4view = perforce.View(self.DEPOT2_DIR, [
- perforce.PathMapping("gcctools/chromeos/v14/...")])
+ perforce.PathMapping('gcctools/chromeos/v14/...')
+ ])
return self.GetP4Snapshot(p4view)
def SetupChromeOSCheckout(self, version, use_minilayout=False):
- version_re = "^\d+\.\d+\.\d+\.[a-zA-Z0-9]+$"
+ version_re = '^\d+\.\d+\.\d+\.[a-zA-Z0-9]+$'
location = os.path.join(self.CHROMEOS_BUILDS_DIR, version)
- if version in ["weekly", "quarterly"]:
- assert os.path.islink(location), "Symlink %s does not exist." % location
+ if version in ['weekly', 'quarterly']:
+ assert os.path.islink(location), 'Symlink %s does not exist.' % location
location_expanded = os.path.abspath(os.path.realpath(location))
version = os.path.basename(location_expanded)
- if version in ["top", "latest"] or re.match(version_re, version):
+ if version in ['top', 'latest'] or re.match(version_re, version):
return self.scripts.SetupChromeOS(version, use_minilayout)
- elif version.endswith("bz2") or version.endswith("gz"):
+ elif version.endswith('bz2') or version.endswith('gz'):
return cmd.UnTar(location_expanded, self.CHROMEOS_ROOT)
else:
signature_file_location = os.path.join(location,
- "src/scripts/enter_chroot.sh")
+ 'src/scripts/enter_chroot.sh')
assert os.path.exists(signature_file_location), (
- "Signature file %s does not exist." % signature_file_location)
+ 'Signature file %s does not exist.' % signature_file_location)
return cmd.Copy(location, to_dir=self.CHROMEOS_ROOT, recursive=True)
class JobsFactory(object):
- def __init__(self, chromeos_version="top", board="x86-mario",
- toolchain="trunk", p4_snapshot=""):
+
+ def __init__(self,
+ chromeos_version='top',
+ board='x86-mario',
+ toolchain='trunk',
+ p4_snapshot=''):
self.chromeos_version = chromeos_version
self.board = board
self.toolchain = toolchain
@@ -172,14 +166,15 @@ class JobsFactory(object):
def BuildAndBenchmark(self):
command = self.commands.BuildAndBenchmark()
- label = "BuildAndBenchmark(%s,%s,%s)" % (
- self.toolchain, self.board, self.chromeos_version)
+ label = 'BuildAndBenchmark(%s,%s,%s)' % (self.toolchain, self.board,
+ self.chromeos_version)
- machine_label = "chromeos-%s" % self.board
+ machine_label = 'chromeos-%s' % self.board
job = jobs.CreateLinuxJob(label, command)
job.DependsOnMachine(
- machine.MachineSpecification(label=machine_label, lock_required=True),
+ machine.MachineSpecification(label=machine_label,
+ lock_required=True),
False)
return job
diff --git a/automation/clients/helper/crosstool.py b/automation/clients/helper/crosstool.py
index f3b24005..80154b25 100644
--- a/automation/clients/helper/crosstool.py
+++ b/automation/clients/helper/crosstool.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
__author__ = 'kbaclawski@google.com (Krystian Baclawski)'
@@ -14,14 +12,15 @@ from automation.common import job
class JobsFactory(object):
+
def __init__(self):
self.commands = CommandsFactory()
def CheckoutCrosstool(self, target):
command = self.commands.CheckoutCrosstool()
new_job = jobs.CreateLinuxJob('CheckoutCrosstool(%s)' % target, command)
- checkout_dir_dep = job.FolderDependency(
- new_job, CommandsFactory.CHECKOUT_DIR)
+ checkout_dir_dep = job.FolderDependency(new_job,
+ CommandsFactory.CHECKOUT_DIR)
manifests_dir_dep = job.FolderDependency(
new_job, os.path.join(self.commands.buildit_path, target), 'manifests')
return new_job, checkout_dir_dep, manifests_dir_dep
@@ -30,14 +29,14 @@ class JobsFactory(object):
command = self.commands.BuildRelease(target)
new_job = jobs.CreateLinuxJob('BuildRelease(%s)' % target, command)
new_job.DependsOnFolder(checkout_dir)
- build_tree_dep = job.FolderDependency(
- new_job, self.commands.buildit_work_dir_path)
+ build_tree_dep = job.FolderDependency(new_job,
+ self.commands.buildit_work_dir_path)
return new_job, build_tree_dep
def RunTests(self, checkout_dir, build_tree_dir, target, board, component):
command = self.commands.RunTests(target, board, component)
- new_job = jobs.CreateLinuxJob(
- 'RunTests(%s, %s, %s)' % (target, component, board), command)
+ new_job = jobs.CreateLinuxJob('RunTests(%s, %s, %s)' %
+ (target, component, board), command)
new_job.DependsOnFolder(checkout_dir)
new_job.DependsOnFolder(build_tree_dir)
testrun_dir_dep = job.FolderDependency(
@@ -57,24 +56,26 @@ class CommandsFactory(object):
CHECKOUT_DIR = 'crosstool-checkout-dir'
def __init__(self):
- self.buildit_path = os.path.join(
- self.CHECKOUT_DIR, 'gcctools', 'crosstool', 'v15')
+ self.buildit_path = os.path.join(self.CHECKOUT_DIR, 'gcctools', 'crosstool',
+ 'v15')
self.buildit_work_dir = 'buildit-tmp'
self.buildit_work_dir_path = os.path.join('$JOB_TMP', self.buildit_work_dir)
- self.dejagnu_output_path = os.path.join(
- self.buildit_work_dir_path, 'dejagnu-output')
+ self.dejagnu_output_path = os.path.join(self.buildit_work_dir_path,
+ 'dejagnu-output')
paths = {
'gcctools': [
- 'crosstool/v15/...',
- 'scripts/...'],
+ 'crosstool/v15/...', 'scripts/...'
+ ],
'gcctools/google_vendor_src_branch': [
- 'binutils/binutils-2.21/...',
- 'gdb/gdb-7.2.x/...',
- 'zlib/zlib-1.2.3/...'],
+ 'binutils/binutils-2.21/...', 'gdb/gdb-7.2.x/...',
+ 'zlib/zlib-1.2.3/...'
+ ],
'gcctools/vendor_src': [
- 'gcc/google/gcc-4_6/...']}
+ 'gcc/google/gcc-4_6/...'
+ ]
+ }
p4view = perforce.View('depot2',
perforce.PathMapping.ListFromPathDict(paths))
@@ -91,22 +92,21 @@ class CommandsFactory(object):
def BuildRelease(self, target):
clnum_path = os.path.join('$JOB_TMP', self.CHECKOUT_DIR, 'CLNUM')
- toolchain_root = os.path.join(
- '/google/data/rw/projects/toolchains', target, 'unstable')
+ toolchain_root = os.path.join('/google/data/rw/projects/toolchains', target,
+ 'unstable')
toolchain_path = os.path.join(toolchain_root, '${CLNUM}')
build_toolchain = cmd.Wrapper(
cmd.Chain(
- cmd.MakeDir(toolchain_path),
- cmd.Shell(
- 'buildit',
- '--keep-work-dir',
- '--build-type=release',
- '--work-dir=%s' % self.buildit_work_dir_path,
- '--results-dir=%s' % toolchain_path,
- '--force-release=%s' % '${CLNUM}',
- target,
- path='.')),
+ cmd.MakeDir(toolchain_path),
+ cmd.Shell('buildit',
+ '--keep-work-dir',
+ '--build-type=release',
+ '--work-dir=%s' % self.buildit_work_dir_path,
+ '--results-dir=%s' % toolchain_path,
+ '--force-release=%s' % '${CLNUM}',
+ target,
+ path='.')),
cwd=self.buildit_path,
umask='0022',
env={'CLNUM': '$(< %s)' % clnum_path})
@@ -114,8 +114,7 @@ class CommandsFactory(object):
# remove all but 10 most recent directories
remove_old_toolchains_from_x20 = cmd.Wrapper(
cmd.Pipe(
- cmd.Shell('ls', '-1', '-r'),
- cmd.Shell('sed', '-e', '1,10d'),
+ cmd.Shell('ls', '-1', '-r'), cmd.Shell('sed', '-e', '1,10d'),
cmd.Shell('xargs', 'rm', '-r', '-f')),
cwd=toolchain_root)
@@ -132,13 +131,15 @@ class CommandsFactory(object):
'experimental/users/kbaclawski',
'dejagnu/site.exp')
- build_dir_path = os.path.join(
- target, 'rpmbuild/BUILD/crosstool*-0.0', 'build-%s' % component)
+ build_dir_path = os.path.join(target, 'rpmbuild/BUILD/crosstool*-0.0',
+ 'build-%s' % component)
run_dejagnu = cmd.Wrapper(
cmd.Chain(
cmd.MakeDir(self.dejagnu_output_path),
- cmd.Shell('make', 'check', '-k',
+ cmd.Shell('make',
+ 'check',
+ '-k',
'-j $(grep -c processor /proc/cpuinfo)',
'RUNTESTFLAGS="%s"' % ' '.join(dejagnu_flags),
'DEJAGNU="%s"' % site_exp_file,
@@ -146,8 +147,9 @@ class CommandsFactory(object):
cwd=os.path.join(self.buildit_work_dir_path, build_dir_path),
env={'REMOTE_TMPDIR': 'job-$JOB_ID'})
- save_results = cmd.Copy(
- self.dejagnu_output_path, to_dir='$JOB_TMP/results', recursive=True)
+ save_results = cmd.Copy(self.dejagnu_output_path,
+ to_dir='$JOB_TMP/results',
+ recursive=True)
return cmd.Chain(run_dejagnu, save_results)
@@ -155,9 +157,12 @@ class CommandsFactory(object):
sumfiles = [os.path.join('$JOB_TMP', board, '*.sum') for board in boards]
return cmd.Wrapper(
- cmd.Shell('dejagnu.sh', 'report',
- '-m', '$JOB_TMP/manifests/*.xfail',
- '-o', '$JOB_TMP/results/report.html',
+ cmd.Shell('dejagnu.sh',
+ 'report',
+ '-m',
+ '$JOB_TMP/manifests/*.xfail',
+ '-o',
+ '$JOB_TMP/results/report.html',
*sumfiles,
path='.'),
cwd='$HOME/automation/clients/report')
diff --git a/automation/clients/helper/jobs.py b/automation/clients/helper/jobs.py
index ea9c9691..96a1c408 100644
--- a/automation/clients/helper/jobs.py
+++ b/automation/clients/helper/jobs.py
@@ -1,13 +1,11 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
from automation.common import job
from automation.common import machine
-def CreateLinuxJob(label, command, lock=False, timeout=4*60*60):
+def CreateLinuxJob(label, command, lock=False, timeout=4 * 60 * 60):
to_return = job.Job(label, command, timeout)
- to_return.DependsOnMachine(
- machine.MachineSpecification(os="linux", lock_required=lock))
+ to_return.DependsOnMachine(machine.MachineSpecification(os='linux',
+ lock_required=lock))
return to_return
diff --git a/automation/clients/helper/perforce.py b/automation/clients/helper/perforce.py
index 1100a1ee..1f2dfe79 100644
--- a/automation/clients/helper/perforce.py
+++ b/automation/clients/helper/perforce.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
__author__ = 'kbaclawski@google.com (Krystian Baclawski)'
@@ -153,17 +151,16 @@ class CommandsFactory(object):
self.p4config_path = os.path.join(self.checkout_dir, '.p4config')
def Initialize(self):
- return cmd.Chain(
- 'mkdir -p %s' % self.checkout_dir,
- 'cp ~/.p4config %s' % self.checkout_dir,
- 'chmod u+w %s' % self.p4config_path,
- 'echo "P4PORT=%s" >> %s' % (self.port, self.p4config_path),
- 'echo "P4CLIENT=%s" >> %s' % (self.view.client, self.p4config_path))
+ return cmd.Chain('mkdir -p %s' % self.checkout_dir, 'cp ~/.p4config %s' %
+ self.checkout_dir, 'chmod u+w %s' % self.p4config_path,
+ 'echo "P4PORT=%s" >> %s' % (self.port, self.p4config_path),
+ 'echo "P4CLIENT=%s" >> %s' %
+ (self.view.client, self.p4config_path))
def Create(self):
# TODO(kbaclawski): Could we support value list for options consistently?
- mappings = ['-a \"%s %s\"' % mapping for mapping in
- self.view.AbsoluteMappings()]
+ mappings = ['-a \"%s %s\"' % mapping
+ for mapping in self.view.AbsoluteMappings()]
# First command will create client with default mappings. Second one will
# replace default mapping with desired. Unfortunately, it seems that it
@@ -176,14 +173,12 @@ class CommandsFactory(object):
env={'P4EDITOR': '/bin/true'})
def SaveSpecification(self, filename=None):
- return cmd.Pipe(
- cmd.Shell('g4', 'client', '-o'),
- output=filename)
+ return cmd.Pipe(cmd.Shell('g4', 'client', '-o'), output=filename)
def Sync(self, revision=None):
sync_arg = '...'
if revision:
- sync_arg = "%s@%s" % (sync_arg, revision)
+ sync_arg = '%s@%s' % (sync_arg, revision)
return cmd.Shell('g4', 'sync', sync_arg)
def SaveCurrentCLNumber(self, filename=None):
@@ -196,14 +191,11 @@ class CommandsFactory(object):
return cmd.Shell('g4', 'client', '-d', self.view.client)
def SetupAndDo(self, *commands):
- return cmd.Chain(
- self.Initialize(),
- self.InCheckoutDir(self.Create(), *commands))
+ return cmd.Chain(self.Initialize(),
+ self.InCheckoutDir(self.Create(), *commands))
def InCheckoutDir(self, *commands):
- return cmd.Wrapper(
- cmd.Chain(*commands),
- cwd=self.checkout_dir)
+ return cmd.Wrapper(cmd.Chain(*commands), cwd=self.checkout_dir)
def CheckoutFromSnapshot(self, snapshot):
cmds = cmd.Chain()
@@ -216,7 +208,8 @@ class CommandsFactory(object):
local_dir = os.path.join(self.checkout_dir, os.path.dirname(local_path))
cmds.extend([
- cmd.Shell('mkdir', '-p', local_dir),
- cmd.Shell('rsync', '-lr', remote_dir, local_dir)])
+ cmd.Shell('mkdir', '-p', local_dir), cmd.Shell(
+ 'rsync', '-lr', remote_dir, local_dir)
+ ])
return cmds
diff --git a/automation/clients/nightly.py b/automation/clients/nightly.py
index a6157b0a..98e2b081 100755
--- a/automation/clients/nightly.py
+++ b/automation/clients/nightly.py
@@ -13,21 +13,21 @@ from automation.common import job_group
def Main(argv):
parser = optparse.OptionParser()
- parser.add_option("-c",
- "--chromeos_version",
- dest="chromeos_version",
- default="quarterly",
- help="ChromeOS version to use.")
- parser.add_option("-t",
- "--toolchain",
- dest="toolchain",
- default="latest-toolchain",
- help="Toolchain to use {latest-toolchain,gcc_46}.")
- parser.add_option("-b",
- "--board",
- dest="board",
- default="x86-generic",
- help="Board to use for the nightly job.")
+ parser.add_option('-c',
+ '--chromeos_version',
+ dest='chromeos_version',
+ default='quarterly',
+ help='ChromeOS version to use.')
+ parser.add_option('-t',
+ '--toolchain',
+ dest='toolchain',
+ default='latest-toolchain',
+ help='Toolchain to use {latest-toolchain,gcc_46}.')
+ parser.add_option('-b',
+ '--board',
+ dest='board',
+ default='x86-generic',
+ help='Board to use for the nightly job.')
options = parser.parse_args(argv)[0]
toolchain = options.toolchain
@@ -36,15 +36,16 @@ def Main(argv):
# Build toolchain
jobs_factory = chromeos.JobsFactory(chromeos_version=chromeos_version,
- board=board, toolchain=toolchain)
+ board=board,
+ toolchain=toolchain)
benchmark_job = jobs_factory.BuildAndBenchmark()
- group_label = "nightly_client_%s" % board
+ group_label = 'nightly_client_%s' % board
group = job_group.JobGroup(group_label, [benchmark_job], True, False)
- server = xmlrpclib.Server("http://localhost:8000")
+ server = xmlrpclib.Server('http://localhost:8000')
server.ExecuteJobGroup(pickle.dumps(group))
-if __name__ == "__main__":
+if __name__ == '__main__':
Main(sys.argv)
diff --git a/automation/clients/output_test.py b/automation/clients/output_test.py
index 3369e768..3126f050 100755
--- a/automation/clients/output_test.py
+++ b/automation/clients/output_test.py
@@ -13,17 +13,17 @@ from automation.common import machine
def Main():
- server = xmlrpclib.Server("http://localhost:8000")
+ server = xmlrpclib.Server('http://localhost:8000')
- command = os.path.join(os.path.dirname(sys.argv[0]),
- "../../produce_output.py")
+ command = os.path.join(
+ os.path.dirname(sys.argv[0]), '../../produce_output.py')
- pwd_job = job.Job("pwd_job", command)
- pwd_job.DependsOnMachine(machine.MachineSpecification(os="linux"))
+ pwd_job = job.Job('pwd_job', command)
+ pwd_job.DependsOnMachine(machine.MachineSpecification(os='linux'))
- group = job_group.JobGroup("pwd_client", [pwd_job])
+ group = job_group.JobGroup('pwd_client', [pwd_job])
server.ExecuteJobGroup(pickle.dumps(group))
-if __name__ == "__main__":
+if __name__ == '__main__':
Main()
diff --git a/automation/clients/pwd_test.py b/automation/clients/pwd_test.py
index 565fe739..a4b28552 100755
--- a/automation/clients/pwd_test.py
+++ b/automation/clients/pwd_test.py
@@ -11,19 +11,17 @@ from automation.common import machine
def Main():
- server = xmlrpclib.Server("http://localhost:8000")
+ server = xmlrpclib.Server('http://localhost:8000')
- command = ["echo These following 3 lines should be the same",
- "pwd",
- "$(pwd)",
- "echo ${PWD}"]
+ command = ['echo These following 3 lines should be the same', 'pwd', '$(pwd)',
+ 'echo ${PWD}']
- pwd_job = job.Job("pwd_job", " && ".join(command))
- pwd_job.DependsOnMachine(machine.MachineSpecification(os="linux"))
+ pwd_job = job.Job('pwd_job', ' && '.join(command))
+ pwd_job.DependsOnMachine(machine.MachineSpecification(os='linux'))
- group = job_group.JobGroup("pwd_client", [pwd_job])
+ group = job_group.JobGroup('pwd_client', [pwd_job])
server.ExecuteJobGroup(pickle.dumps(group))
-if __name__ == "__main__":
+if __name__ == '__main__':
Main()
diff --git a/automation/clients/report/dejagnu/__init__.py b/automation/clients/report/dejagnu/__init__.py
index e69de29b..8b137891 100644
--- a/automation/clients/report/dejagnu/__init__.py
+++ b/automation/clients/report/dejagnu/__init__.py
@@ -0,0 +1 @@
+
diff --git a/automation/clients/report/dejagnu/main.py b/automation/clients/report/dejagnu/main.py
index 6e5e6872..62f095e1 100644
--- a/automation/clients/report/dejagnu/main.py
+++ b/automation/clients/report/dejagnu/main.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
# Author: kbaclawski@google.com (Krystian Baclawski)
#
@@ -35,12 +33,12 @@ def OptionChecker(parser):
def ManifestCommand(argv):
parser = optparse.OptionParser(
- description=(
- 'Read in one or more DejaGNU summary files (.sum), parse their '
- 'content and generate manifest files. Manifest files store a list '
- 'of failed tests that should be ignored. Generated files are '
- 'stored in current directory under following name: '
- '${tool}-${board}.xfail (e.g. "gcc-unix.xfail").'),
+ description=
+ ('Read in one or more DejaGNU summary files (.sum), parse their '
+ 'content and generate manifest files. Manifest files store a list '
+ 'of failed tests that should be ignored. Generated files are '
+ 'stored in current directory under following name: '
+ '${tool}-${board}.xfail (e.g. "gcc-unix.xfail").'),
usage='Usage: %prog manifest [file.sum] (file2.sum ...)')
_, args = parser.parse_args(argv[2:])
@@ -53,8 +51,7 @@ def ManifestCommand(argv):
test_run = DejaGnuTestRun.FromFile(filename)
manifest = Manifest.FromDejaGnuTestRun(test_run)
- manifest_filename = '%s-%s.xfail' % (
- test_run.tool, test_run.board)
+ manifest_filename = '%s-%s.xfail' % (test_run.tool, test_run.board)
with open(manifest_filename, 'w') as manifest_file:
manifest_file.write(manifest.Generate())
@@ -64,18 +61,25 @@ def ManifestCommand(argv):
def ReportCommand(argv):
parser = optparse.OptionParser(
- description=(
- 'Read in one or more DejaGNU summary files (.sum), parse their '
- 'content and generate a single report file in selected format '
- '(currently only HTML).'),
+ description=
+ ('Read in one or more DejaGNU summary files (.sum), parse their '
+ 'content and generate a single report file in selected format '
+ '(currently only HTML).'),
usage=('Usage: %prog report (-m manifest.xfail) [-o report.html] '
'[file.sum (file2.sum ...)'))
parser.add_option(
- '-o', dest='output', type='string', default=None,
+ '-o',
+ dest='output',
+ type='string',
+ default=None,
help=('Suppress failures for test listed in provided manifest files. '
'(use -m for each manifest file you want to read)'))
parser.add_option(
- '-m', dest='manifests', type='string', action='append', default=None,
+ '-m',
+ dest='manifests',
+ type='string',
+ action='append',
+ default=None,
help=('Suppress failures for test listed in provided manifest files. '
'(use -m for each manifest file you want to read)'))
@@ -109,11 +113,11 @@ def ReportCommand(argv):
def HelpCommand(argv):
sys.exit('\n'.join([
- 'Usage: %s command [options]' % os.path.basename(argv[0]),
- '',
- 'Commands:',
+ 'Usage: %s command [options]' % os.path.basename(argv[
+ 0]), '', 'Commands:',
' manifest - manage files containing a list of suppressed test failures',
- ' report - generate report file for selected test runs']))
+ ' report - generate report file for selected test runs'
+ ]))
def Main(argv):
@@ -122,11 +126,10 @@ def Main(argv):
except IndexError:
cmd_name = None
- cmd_map = {
- 'manifest': ManifestCommand,
- 'report': ReportCommand}
+ cmd_map = {'manifest': ManifestCommand, 'report': ReportCommand}
cmd_map.get(cmd_name, HelpCommand)(argv)
+
if __name__ == '__main__':
FORMAT = '%(asctime)-15s %(levelname)s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
diff --git a/automation/clients/report/dejagnu/manifest.py b/automation/clients/report/dejagnu/manifest.py
index 2124d577..5831d1b0 100644
--- a/automation/clients/report/dejagnu/manifest.py
+++ b/automation/clients/report/dejagnu/manifest.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
# Author: kbaclawski@google.com (Krystian Baclawski)
#
@@ -42,7 +40,8 @@ class Manifest(namedtuple('Manifest', 'tool board results')):
@classmethod
def FromDejaGnuTestRun(cls, test_run):
- results = [result for result in test_run.results
+ results = [result
+ for result in test_run.results
if result.result in cls.SUPPRESSIBLE_RESULTS]
return cls(test_run.tool, test_run.board, results)
diff --git a/automation/clients/report/dejagnu/report.py b/automation/clients/report/dejagnu/report.py
index 7b56590d..191a5389 100644
--- a/automation/clients/report/dejagnu/report.py
+++ b/automation/clients/report/dejagnu/report.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
# Author: kbaclawski@google.com (Krystian Baclawski)
#
@@ -7,7 +5,6 @@
import logging
import os.path
-
RESULT_DESCRIPTION = {
'ERROR': 'DejaGNU errors',
'FAIL': 'Failed tests',
@@ -18,13 +15,15 @@ RESULT_DESCRIPTION = {
'UNTESTED': 'Not executed tests',
'WARNING': 'DejaGNU warnings',
'XFAIL': 'Expected test failures',
- 'XPASS': 'Unexpectedly passed tests'}
+ 'XPASS': 'Unexpectedly passed tests'
+}
RESULT_GROUPS = {
'Successes': ['PASS', 'XFAIL'],
'Failures': ['FAIL', 'XPASS', 'UNRESOLVED'],
'Suppressed': ['!FAIL', '!XPASS', '!UNRESOLVED', '!ERROR'],
- 'Framework': ['UNTESTED', 'UNSUPPORTED', 'ERROR', 'WARNING', 'NOTE']}
+ 'Framework': ['UNTESTED', 'UNSUPPORTED', 'ERROR', 'WARNING', 'NOTE']
+}
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
@@ -40,18 +39,18 @@ def _GetResultDescription(name):
def _PrepareSummary(res_types, summary):
+
def GetResultCount(res_type):
return summary.get(res_type, 0)
- return [(_GetResultDescription(rt), GetResultCount(rt))
- for rt in res_types]
+ return [(_GetResultDescription(rt), GetResultCount(rt)) for rt in res_types]
def _PrepareTestList(res_types, tests):
+
def GetTestsByResult(res_type):
return [(test.name, test.variant or '')
- for test in sorted(tests)
- if test.result == res_type]
+ for test in sorted(tests) if test.result == res_type]
return [(_GetResultDescription(rt), GetTestsByResult(rt))
for rt in res_types if rt != 'PASS']
@@ -92,7 +91,8 @@ def Generate(test_runs, manifests):
tmpl_args.append({
'id': test_run_id,
'name': '%s @%s' % (test_run.tool, test_run.board),
- 'groups': groups})
+ 'groups': groups
+ })
logging.info('Rendering report in HTML format.')
@@ -105,7 +105,8 @@ def Generate(test_runs, manifests):
logging.error('Failed to generate report in HTML format!')
return ''
- settings.configure(DEBUG=True, TEMPLATE_DEBUG=True,
+ settings.configure(DEBUG=True,
+ TEMPLATE_DEBUG=True,
TEMPLATE_DIRS=(ROOT_PATH,))
tmpl = loader.get_template('report.html')
diff --git a/automation/clients/report/dejagnu/summary.py b/automation/clients/report/dejagnu/summary.py
index 269793e6..d573c691 100644
--- a/automation/clients/report/dejagnu/summary.py
+++ b/automation/clients/report/dejagnu/summary.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
# Author: kbaclawski@google.com (Krystian Baclawski)
#
@@ -60,7 +58,8 @@ class DejaGnuTestResult(namedtuple('Result', 'name variant result flaky')):
# remove include paths - they contain name of tmp directory
('-I\S+', ''),
# compress white spaces
- ('\s+', ' ')]
+ ('\s+', ' ')
+ ]
for pattern, replacement in substitutions:
variant = re.sub(pattern, replacement, variant)
@@ -168,12 +167,11 @@ class DejaGnuTestRun(object):
with open(filename, 'r') as report:
lines = [line.strip() for line in report.readlines() if line.strip()]
- parsers = (
- (re.compile(r'Running target (.*)'), self._ParseBoard),
- (re.compile(r'Test Run By (.*) on (.*)'), self._ParseDate),
- (re.compile(r'=== (.*) tests ==='), self._ParseTool),
- (re.compile(r'Target(\s+)is (.*)'), self._ParseTarget),
- (re.compile(r'Host(\s+)is (.*)'), self._ParseHost))
+ parsers = ((re.compile(r'Running target (.*)'), self._ParseBoard),
+ (re.compile(r'Test Run By (.*) on (.*)'), self._ParseDate),
+ (re.compile(r'=== (.*) tests ==='), self._ParseTool),
+ (re.compile(r'Target(\s+)is (.*)'), self._ParseTarget),
+ (re.compile(r'Host(\s+)is (.*)'), self._ParseHost))
for line in lines:
result = DejaGnuTestResult.FromLine(line)
@@ -219,8 +217,7 @@ class DejaGnuTestRun(object):
# Remove all UNRESOLVED results that were also marked as UNSUPPORTED.
unresolved = [res._replace(result='UNRESOLVED')
- for res in results
- if res.result == 'UNSUPPORTED']
+ for res in results if res.result == 'UNSUPPORTED']
for res in unresolved:
if res in self.results:
@@ -257,8 +254,8 @@ class DejaGnuTestRun(object):
self.results.add(result._replace(result=new_result))
for result in sorted(manifest_results):
- logging.warning(
- 'Result {%s} listed in manifest but not suppressed.', result)
+ logging.warning('Result {%s} listed in manifest but not suppressed.',
+ result)
def __str__(self):
return '{0}, {1} @{2} on {3}'.format(self.target, self.tool, self.board,
diff --git a/automation/clients/report/validate_failures.py b/automation/clients/report/validate_failures.py
index e99c9054..5db356d1 100755
--- a/automation/clients/report/validate_failures.py
+++ b/automation/clients/report/validate_failures.py
@@ -24,7 +24,6 @@
# along with GCC; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
-
"""This script provides a coarser XFAILing mechanism that requires no
detailed DejaGNU markings. This is useful in a variety of scenarios:
@@ -78,15 +77,13 @@ def GetSumFiles(build_dir):
for root, _, filenames in os.walk(build_dir):
summaries.extend([os.path.join(root, filename)
- for filename in filenames
- if filename.endswith('.sum')])
+ for filename in filenames if filename.endswith('.sum')])
return map(os.path.normpath, summaries)
def ValidBuildDirectory(build_dir, target):
- mandatory_paths = [build_dir,
- os.path.join(build_dir, 'Makefile')]
+ mandatory_paths = [build_dir, os.path.join(build_dir, 'Makefile')]
extra_paths = [os.path.join(build_dir, target),
os.path.join(build_dir, 'build-%s' % target)]
@@ -104,8 +101,7 @@ def GetManifestPath(build_dir):
target = makefile['target_alias']
if not ValidBuildDirectory(build_dir, target):
- logging.error(
- '%s is not a valid GCC top level build directory.', build_dir)
+ logging.error('%s is not a valid GCC top level build directory.', build_dir)
sys.exit(1)
logging.info('Discovered source directory: "%s"', srcdir)
@@ -123,8 +119,8 @@ def CompareResults(manifest, actual):
actual_vs_manifest = actual - manifest
# Filter out tests marked flaky.
- manifest_without_flaky_tests = set(
- filter(lambda result: not result.flaky, manifest))
+ manifest_without_flaky_tests = set(filter(lambda result: not result.flaky,
+ manifest))
# Simlarly for all the tests in the manifest.
manifest_vs_actual = manifest_without_flaky_tests - actual
@@ -136,7 +132,7 @@ def LogResults(level, results):
log_fun = getattr(logging, level)
for num, result in enumerate(sorted(results), start=1):
- log_fun(" %d) %s", num, result)
+ log_fun(' %d) %s', num, result)
def CheckExpectedResults(manifest_path, build_dir):
@@ -194,26 +190,37 @@ def ProduceManifest(manifest_path, build_dir, overwrite):
with open(manifest_path, 'w') as manifest_file:
manifest_strings = [manifest.Generate() for manifest in manifests]
- logging.info('Writing manifest to "%s".' % manifest_path)
+ logging.info('Writing manifest to "%s".', manifest_path)
manifest_file.write('\n'.join(manifest_strings))
def Main(argv):
parser = optparse.OptionParser(usage=__doc__)
parser.add_option(
- '-b', '--build_dir',
- dest='build_dir', action='store',metavar='PATH', default=os.getcwd(),
+ '-b',
+ '--build_dir',
+ dest='build_dir',
+ action='store',
+ metavar='PATH',
+ default=os.getcwd(),
help='Build directory to check. (default: current directory)')
+ parser.add_option('-m',
+ '--manifest',
+ dest='manifest',
+ action='store_true',
+ help='Produce the manifest for the current build.')
parser.add_option(
- '-m', '--manifest', dest='manifest', action='store_true',
- help='Produce the manifest for the current build.')
- parser.add_option(
- '-f', '--force', dest='force', action='store_true',
+ '-f',
+ '--force',
+ dest='force',
+ action='store_true',
help=('Overwrite an existing manifest file, if user requested creating '
'new one. (default: False)'))
- parser.add_option(
- '-v', '--verbose', dest='verbose', action='store_true',
- help='Increase verbosity.')
+ parser.add_option('-v',
+ '--verbose',
+ dest='verbose',
+ action='store_true',
+ help='Increase verbosity.')
options, _ = parser.parse_args(argv[1:])
if options.verbose:
@@ -226,6 +233,7 @@ def Main(argv):
else:
CheckExpectedResults(manifest_path, options.build_dir)
+
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
Main(sys.argv)
diff --git a/automation/common/__init__.py b/automation/common/__init__.py
index e69de29b..8b137891 100644
--- a/automation/common/__init__.py
+++ b/automation/common/__init__.py
@@ -0,0 +1 @@
+
diff --git a/automation/common/command.py b/automation/common/command.py
index 192e2bd8..c56e9fad 100644
--- a/automation/common/command.py
+++ b/automation/common/command.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
__author__ = 'kbaclawski@google.com (Krystian Baclawski)'
@@ -197,8 +195,8 @@ def RemoteCopyFrom(from_machine, from_path, to_path, username=None):
login = '%s@%s' % (username, from_machine)
return Chain(
- MakeDir(to_path),
- Shell('rsync', '-a', '%s:%s' % (login, from_path), to_path))
+ MakeDir(to_path), Shell('rsync', '-a', '%s:%s' %
+ (login, from_path), to_path))
def MakeSymlink(to_path, link_name):
@@ -224,8 +222,7 @@ def RmTree(*dirs):
def UnTar(tar_file, dest_dir):
return Chain(
- MakeDir(dest_dir),
- Shell('tar', '-x', '-f', tar_file, '-C', dest_dir))
+ MakeDir(dest_dir), Shell('tar', '-x', '-f', tar_file, '-C', dest_dir))
def Tar(tar_file, *args):
@@ -241,6 +238,4 @@ def Tar(tar_file, *args):
options.extend(['-f', tar_file])
options.extend(args)
- return Chain(
- MakeDir(os.path.dirname(tar_file)),
- Shell('tar', *options))
+ return Chain(MakeDir(os.path.dirname(tar_file)), Shell('tar', *options))
diff --git a/automation/common/command_executer.py b/automation/common/command_executer.py
index 2db0048e..c0f314f5 100644
--- a/automation/common/command_executer.py
+++ b/automation/common/command_executer.py
@@ -1,15 +1,12 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
#
-
"""Classes that help running commands in a subshell.
Commands can be run locally, or remotly using SSH connection. You may log the
output of a command to a terminal or a file, or any other destination.
"""
-__author__ = "kbaclawski@google.com (Krystian Baclawski)"
+__author__ = 'kbaclawski@google.com (Krystian Baclawski)'
import fcntl
import logging
@@ -32,8 +29,12 @@ class CommandExecuter(object):
def Configure(cls, dry_run):
cls.DRY_RUN = dry_run
- def RunCommand(self, cmd, machine=None, username=None,
- command_terminator=None, command_timeout=None):
+ def RunCommand(self,
+ cmd,
+ machine=None,
+ username=None,
+ command_terminator=None,
+ command_timeout=None):
cmd = str(cmd)
if self._dry_run:
@@ -43,13 +44,13 @@ class CommandExecuter(object):
command_terminator = CommandTerminator()
if command_terminator.IsTerminated():
- self._logger.warning("Command has been already terminated!")
+ self._logger.warning('Command has been already terminated!')
return 1
# Rewrite command for remote execution.
if machine:
if username:
- login = "%s@%s" % (username, machine)
+ login = '%s@%s' % (username, machine)
else:
login = machine
@@ -62,8 +63,8 @@ class CommandExecuter(object):
child = self._SpawnProcess(cmd, command_terminator, command_timeout)
- self._logger.debug(
- "{PID: %d} Finished with %d code.", child.pid, child.returncode)
+ self._logger.debug('{PID: %d} Finished with %d code.', child.pid,
+ child.returncode)
return child.returncode
@@ -71,10 +72,10 @@ class CommandExecuter(object):
"""Gracefully shutdown the child by sending SIGTERM."""
if command_timeout:
- self._logger.warning("{PID: %d} Timeout of %s seconds reached since "
- "process started.", child.pid, command_timeout)
+ self._logger.warning('{PID: %d} Timeout of %s seconds reached since '
+ 'process started.', child.pid, command_timeout)
- self._logger.warning("{PID: %d} Terminating child.", child.pid)
+ self._logger.warning('{PID: %d} Terminating child.', child.pid)
try:
child.terminate()
@@ -92,16 +93,18 @@ class CommandExecuter(object):
def _Kill(self, child):
"""Kill the child with immediate result."""
- self._logger.warning("{PID: %d} Process still alive.", child.pid)
- self._logger.warning("{PID: %d} Killing child.", child.pid)
+ self._logger.warning('{PID: %d} Process still alive.', child.pid)
+ self._logger.warning('{PID: %d} Killing child.', child.pid)
child.kill()
child.wait()
def _SpawnProcess(self, cmd, command_terminator, command_timeout):
# Create a child process executing provided command.
- child = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- stdin=subprocess.PIPE, shell=True)
+ child = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ shell=True)
# Close stdin so the child won't be able to block on read.
child.stdin.close()
@@ -157,10 +160,10 @@ class CommandExecuter(object):
data = os.read(fd, 4096)
except OSError:
# terminate loop if EWOULDBLOCK (EAGAIN) is received
- data = ""
+ data = ''
if not already_terminated:
- self._logger.debug("Waiting for command to finish.")
+ self._logger.debug('Waiting for command to finish.')
child.wait()
return child
@@ -175,6 +178,7 @@ class CommandExecuter(object):
class LoggingCommandExecuter(CommandExecuter):
+
def __init__(self, *args, **kwargs):
super(LoggingCommandExecuter, self).__init__(*args, **kwargs)
@@ -183,8 +187,8 @@ class LoggingCommandExecuter(CommandExecuter):
def OpenLog(self, log_path):
"""The messages are going to be saved to gzip compressed file."""
- formatter = logging.Formatter(
- '%(asctime)s %(prefix)s: %(message)s', '%Y-%m-%d %H:%M:%S')
+ formatter = logging.Formatter('%(asctime)s %(prefix)s: %(message)s',
+ '%Y-%m-%d %H:%M:%S')
handler = logger.CompressedFileHandler(log_path, delay=True)
handler.setFormatter(formatter)
self._output.addHandler(handler)
@@ -215,6 +219,7 @@ class LoggingCommandExecuter(CommandExecuter):
class CommandTerminator(object):
+
def __init__(self):
self.terminated = False
diff --git a/automation/common/command_executer_test.py b/automation/common/command_executer_test.py
index 5b6ca639..4aa245f0 100755
--- a/automation/common/command_executer_test.py
+++ b/automation/common/command_executer_test.py
@@ -32,6 +32,7 @@ from automation.common.command_executer import CommandExecuter
class LoggerMock(object):
+
def LogCmd(self, cmd, machine='', user=''):
if machine:
logging.info('[%s] Executing: %s', machine, cmd)
@@ -49,6 +50,7 @@ class LoggerMock(object):
class CommandExecuterUnderTest(CommandExecuter):
+
def __init__(self):
CommandExecuter.__init__(self, logger_to_set=LoggerMock())
@@ -83,8 +85,9 @@ class CommandExecuterLocalTests(unittest.TestCase):
def RunCommand(self, method, **kwargs):
program = os.path.abspath(sys.argv[0])
- return self._executer.RunCommand(
- '%s runHelper %s' % (program, method), machine=self.HOSTNAME, **kwargs)
+ return self._executer.RunCommand('%s runHelper %s' % (program, method),
+ machine=self.HOSTNAME,
+ **kwargs)
def testCommandTimeout(self):
exit_code = self.RunCommand('SleepForMinute', command_timeout=3)
@@ -114,12 +117,14 @@ class CommandExecuterLocalTests(unittest.TestCase):
self.assertEquals(self._executer.stdout, '')
def testOutputStreamNonInteractive(self):
- self.assertFalse(self.RunCommand('IsOutputStreamInteractive'),
- 'stdout stream is a terminal!')
+ self.assertFalse(
+ self.RunCommand('IsOutputStreamInteractive'),
+ 'stdout stream is a terminal!')
def testErrorStreamNonInteractive(self):
- self.assertFalse(self.RunCommand('IsErrorStreamInteractive'),
- 'stderr stream is a terminal!')
+ self.assertFalse(
+ self.RunCommand('IsErrorStreamInteractive'),
+ 'stderr stream is a terminal!')
def testAttemptToRead(self):
self.assertFalse(self.RunCommand('WaitForInput', command_timeout=3))
@@ -149,6 +154,7 @@ class CommandExecuterRemoteTests(CommandExecuterLocalTests):
class CommandExecuterTestHelpers(object):
+
def SleepForMinute(self):
time.sleep(60)
return 1
diff --git a/automation/common/events.py b/automation/common/events.py
index 851a982c..ad3ec844 100644
--- a/automation/common/events.py
+++ b/automation/common/events.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
-
"""Tools for recording and reporting timeline of abstract events.
You can store any events provided that they can be stringified.
@@ -56,8 +54,7 @@ class _EventRecord(object):
return self._time_elapsed is not None
def GetTimeStartedFormatted(self):
- return time.strftime('%m/%d/%Y %H:%M:%S',
- time.gmtime(self._time_started))
+ return time.strftime('%m/%d/%Y %H:%M:%S', time.gmtime(self._time_started))
def GetTimeElapsedRounded(self):
return datetime.timedelta(seconds=int(self.time_elapsed.seconds))
@@ -104,8 +101,8 @@ class EventHistory(collections.Sequence):
def GetTotalTime(self):
if self._records:
- total_time_elapsed = sum(evrec.time_elapsed.seconds for evrec in
- self._records)
+ total_time_elapsed = sum(evrec.time_elapsed.seconds
+ for evrec in self._records)
return datetime.timedelta(seconds=int(total_time_elapsed))
@@ -114,15 +111,16 @@ class EventHistory(collections.Sequence):
if self._records:
for num, next_evrec in enumerate(self._records[1:], start=1):
- evrec = self._records[num-1]
+ evrec = self._records[num - 1]
- records.append(_EventRecord(_Transition(evrec.event, next_evrec.event),
- evrec.time_started, evrec.time_elapsed))
+ records.append(_EventRecord(
+ _Transition(evrec.event, next_evrec.event), evrec.time_started,
+ evrec.time_elapsed))
if not self.last.has_finished:
- records.append(_EventRecord(_Transition(self.last.event, 'NOW'),
- self.last.time_started,
- self.last.time_elapsed))
+ records.append(_EventRecord(
+ _Transition(self.last.event,
+ 'NOW'), self.last.time_started, self.last.time_elapsed))
return EventHistory(records)
diff --git a/automation/common/job.py b/automation/common/job.py
index d32fa59a..e845ab25 100644
--- a/automation/common/job.py
+++ b/automation/common/job.py
@@ -1,14 +1,9 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
#
-
"""A module for a job in the infrastructure."""
-
__author__ = 'raymes@google.com (Raymes Khoury)'
-
import os.path
from automation.common import state_machine
@@ -22,6 +17,7 @@ STATUS_FAILED = 'FAILED'
class FolderDependency(object):
+
def __init__(self, job, src, dest=None):
if not dest:
dest = src
@@ -41,12 +37,14 @@ class JobStateMachine(state_machine.BasicStateMachine):
STATUS_NOT_EXECUTED: [STATUS_SETUP],
STATUS_SETUP: [STATUS_COPYING, STATUS_FAILED],
STATUS_COPYING: [STATUS_RUNNING, STATUS_FAILED],
- STATUS_RUNNING: [STATUS_SUCCEEDED, STATUS_FAILED]}
+ STATUS_RUNNING: [STATUS_SUCCEEDED, STATUS_FAILED]
+ }
final_states = [STATUS_SUCCEEDED, STATUS_FAILED]
class JobFailure(Exception):
+
def __init__(self, message, exit_code):
Exception.__init__(self, message)
self.exit_code = exit_code
@@ -57,7 +55,7 @@ class Job(object):
WORKDIR_PREFIX = '/usr/local/google/tmp/automation'
- def __init__(self, label, command, timeout=4*60*60):
+ def __init__(self, label, command, timeout=4 * 60 * 60):
self._state = JobStateMachine(STATUS_NOT_EXECUTED)
self.predecessors = set()
self.successors = set()
@@ -110,15 +108,15 @@ class Job(object):
def GetCommand(self):
substitutions = [
- ('$JOB_ID', str(self.id)),
- ('$JOB_TMP', self.work_dir),
+ ('$JOB_ID', str(self.id)), ('$JOB_TMP', self.work_dir),
('$JOB_HOME', self.home_dir),
- ('$PRIMARY_MACHINE', self.primary_machine.hostname)]
+ ('$PRIMARY_MACHINE', self.primary_machine.hostname)
+ ]
if len(self.machines) > 1:
for num, machine in enumerate(self.machines[1:]):
- substitutions.append(
- ('$SECONDARY_MACHINES[%d]' % num, machine.hostname))
+ substitutions.append(('$SECONDARY_MACHINES[%d]' % num, machine.hostname
+ ))
return self._FormatCommand(str(self.command), substitutions)
@@ -127,7 +125,8 @@ class Job(object):
# non existing Command class. If one is created then PrettyFormatCommand
# shall become its method.
return self._FormatCommand(self.GetCommand(), [
- ('\{ ', ''), ('; \}', ''), ('\} ', '\n'), ('\s*&&\s*', '\n')])
+ ('\{ ', ''), ('; \}', ''), ('\} ', '\n'), ('\s*&&\s*', '\n')
+ ])
def DependsOnFolder(self, dependency):
self.folder_dependencies.append(dependency)
diff --git a/automation/common/job_group.py b/automation/common/job_group.py
index 09321e1a..96912fc1 100644
--- a/automation/common/job_group.py
+++ b/automation/common/job_group.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
#
@@ -8,25 +6,30 @@ import os
from automation.common.state_machine import BasicStateMachine
-STATUS_NOT_EXECUTED = "NOT_EXECUTED"
-STATUS_EXECUTING = "EXECUTING"
-STATUS_SUCCEEDED = "SUCCEEDED"
-STATUS_FAILED = "FAILED"
+STATUS_NOT_EXECUTED = 'NOT_EXECUTED'
+STATUS_EXECUTING = 'EXECUTING'
+STATUS_SUCCEEDED = 'SUCCEEDED'
+STATUS_FAILED = 'FAILED'
class JobGroupStateMachine(BasicStateMachine):
state_machine = {
STATUS_NOT_EXECUTED: [STATUS_EXECUTING],
- STATUS_EXECUTING: [STATUS_SUCCEEDED, STATUS_FAILED]}
+ STATUS_EXECUTING: [STATUS_SUCCEEDED, STATUS_FAILED]
+ }
final_states = [STATUS_SUCCEEDED, STATUS_FAILED]
class JobGroup(object):
- HOMEDIR_PREFIX = os.path.join("/home", getpass.getuser(), "www", "automation")
-
- def __init__(self, label, jobs=None, cleanup_on_completion=True,
- cleanup_on_failure=False, description=""):
+ HOMEDIR_PREFIX = os.path.join('/home', getpass.getuser(), 'www', 'automation')
+
+ def __init__(self,
+ label,
+ jobs=None,
+ cleanup_on_completion=True,
+ cleanup_on_failure=False,
+ description=''):
self._state = JobGroupStateMachine(STATUS_NOT_EXECUTED)
self.id = 0
self.label = label
@@ -49,7 +52,7 @@ class JobGroup(object):
@property
def home_dir(self):
- return os.path.join(self.HOMEDIR_PREFIX, "job-group-%d" % self.id)
+ return os.path.join(self.HOMEDIR_PREFIX, 'job-group-%d' % self.id)
@property
def time_submitted(self):
@@ -62,9 +65,8 @@ class JobGroup(object):
return '{%s: %s}' % (self.__class__.__name__, self.id)
def __str__(self):
- return "\n".join(["Job-Group:",
- "ID: %s" % self.id] +
- [str(job) for job in self.jobs])
+ return '\n'.join(['Job-Group:', 'ID: %s' % self.id] + [str(
+ job) for job in self.jobs])
def AddJob(self, job):
self.jobs.append(job)
diff --git a/automation/common/logger.py b/automation/common/logger.py
index efa1a904..4aeee052 100644
--- a/automation/common/logger.py
+++ b/automation/common/logger.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
from itertools import chain
@@ -17,7 +15,10 @@ def SetUpRootLogger(filename=None, level=None, display_flags={}):
if filename:
file_handler = logging.handlers.RotatingFileHandler(
- filename, maxBytes=10*1024*1024, backupCount=9, delay=True)
+ filename,
+ maxBytes=10 * 1024 * 1024,
+ backupCount=9,
+ delay=True)
file_handler.setFormatter(CustomFormatter(NullColorCoder(), display_flags))
logging.root.addHandler(file_handler)
@@ -26,12 +27,13 @@ def SetUpRootLogger(filename=None, level=None, display_flags={}):
class NullColorCoder(object):
+
def __call__(self, *args):
return ''
class AnsiColorCoder(object):
- CODES = {'reset': (0, ),
+ CODES = {'reset': (0,),
'bold': (1, 22),
'italics': (3, 23),
'underline': (4, 24),
@@ -82,8 +84,8 @@ class CustomFormatter(logging.Formatter):
def formatTime(self, record):
ct = self.converter(record.created)
- t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
- return "%s.%02d" % (t, record.msecs / 10)
+ t = time.strftime('%Y-%m-%d %H:%M:%S', ct)
+ return '%s.%02d' % (t, record.msecs / 10)
def formatLevelName(self, record):
if record.levelname in ['WARNING', 'CRITICAL']:
@@ -96,8 +98,8 @@ class CustomFormatter(logging.Formatter):
def formatMessagePrefix(self, record):
try:
- return ' %s%s:%s ' % (
- self._coder('black', 'bold'), record.prefix, self._coder('reset'))
+ return ' %s%s:%s ' % (self._coder('black', 'bold'), record.prefix,
+ self._coder('reset'))
except AttributeError:
return ''
@@ -125,6 +127,7 @@ class CustomFormatter(logging.Formatter):
class CompressedFileHandler(logging.FileHandler):
+
def _open(self):
return gzip.open(self.baseFilename + '.gz', self.mode, 9)
@@ -136,6 +139,6 @@ def HandleUncaughtExceptions(fun):
try:
return fun(*args, **kwargs)
except StandardError:
- logging.exception("Uncaught exception:")
+ logging.exception('Uncaught exception:')
return _Interceptor
diff --git a/automation/common/machine.py b/automation/common/machine.py
index ae9f0b49..4db0db0d 100644
--- a/automation/common/machine.py
+++ b/automation/common/machine.py
@@ -1,8 +1,6 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
from fnmatch import fnmatch
@@ -38,24 +36,20 @@ class Machine(object):
self.locked = False
def __repr__(self):
- return "{%s: %s@%s}" % (
- self.__class__.__name__, self.username, self.hostname)
+ return '{%s: %s@%s}' % (self.__class__.__name__, self.username,
+ self.hostname)
def __str__(self):
- return "\n".join(["Machine Information:",
- "Hostname: %s" % self.hostname,
- "Label: %s" % self.label,
- "CPU: %s" % self.cpu,
- "Cores: %d" % self.cores,
- "OS: %s" % self.os,
- "Uses: %d" % self.uses,
- "Locked: %s" % self.locked])
+ return '\n'.join(
+ ['Machine Information:', 'Hostname: %s' % self.hostname, 'Label: %s' %
+ self.label, 'CPU: %s' % self.cpu, 'Cores: %d' % self.cores, 'OS: %s' %
+ self.os, 'Uses: %d' % self.uses, 'Locked: %s' % self.locked])
class MachineSpecification(object):
"""Helper class used to find a machine matching your requirements."""
- def __init__(self, hostname="*", label="*", os="*", lock_required=False):
+ def __init__(self, hostname='*', label='*', os='*', lock_required=False):
self.hostname = hostname
self.label = label
self.os = os
@@ -63,16 +57,13 @@ class MachineSpecification(object):
self.preferred_machines = []
def __str__(self):
- return "\n".join(["Machine Specification:",
- "Name: %s" % self.name,
- "OS: %s" % self.os,
- "Lock required: %s" % self.lock_required])
+ return '\n'.join(['Machine Specification:', 'Name: %s' % self.name, 'OS: %s'
+ % self.os, 'Lock required: %s' % self.lock_required])
def IsMatch(self, machine):
- return all([not machine.locked,
- fnmatch(machine.hostname, self.hostname),
- fnmatch(machine.label, self.label),
- fnmatch(machine.os, self.os)])
+ return all([not machine.locked, fnmatch(machine.hostname, self.hostname),
+ fnmatch(machine.label, self.label), fnmatch(machine.os,
+ self.os)])
def AddPreferredMachine(self, hostname):
if hostname not in self.preferred_machines:
diff --git a/automation/common/machine_test.py b/automation/common/machine_test.py
index 96e8f823..c9c200a9 100755
--- a/automation/common/machine_test.py
+++ b/automation/common/machine_test.py
@@ -1,28 +1,26 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Machine manager unittest.
MachineManagerTest tests MachineManager.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
-
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import machine
import unittest
class MachineTest(unittest.TestCase):
+
def setUp(self):
pass
-
def testPrintMachine(self):
- mach = machine.Machine("ahmad.mtv", "core2duo", 4, "linux", "asharif")
- self.assertTrue("ahmad.mtv" in str(mach))
+ mach = machine.Machine('ahmad.mtv', 'core2duo', 4, 'linux', 'asharif')
+ self.assertTrue('ahmad.mtv' in str(mach))
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/automation/common/state_machine.py b/automation/common/state_machine.py
index 4d8dc49b..d1cf42c8 100644
--- a/automation/common/state_machine.py
+++ b/automation/common/state_machine.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc. All Rights Reserved.
diff --git a/automation/server/__init__.py b/automation/server/__init__.py
index e69de29b..8b137891 100644
--- a/automation/server/__init__.py
+++ b/automation/server/__init__.py
@@ -0,0 +1 @@
+
diff --git a/automation/server/job_executer.py b/automation/server/job_executer.py
index 8f48ca08..30b59463 100644
--- a/automation/server/job_executer.py
+++ b/automation/server/job_executer.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
#
@@ -15,9 +13,10 @@ from automation.common.command_executer import CommandTerminator
class JobExecuter(threading.Thread):
+
def __init__(self, job_to_execute, machines, listeners):
threading.Thread.__init__(self)
-
+
assert machines
self.job = job_to_execute
@@ -25,13 +24,13 @@ class JobExecuter(threading.Thread):
self.machines = machines
# Set Thread name.
- self.name = "%s-%s" % (self.__class__.__name__, self.job.id)
+ self.name = '%s-%s' % (self.__class__.__name__, self.job.id)
self._logger = logging.getLogger(self.__class__.__name__)
self._executer = LoggingCommandExecuter(self.job.dry_run)
self._terminator = CommandTerminator()
- def _RunRemotely(self, command, fail_msg, command_timeout=1*60*60):
+ def _RunRemotely(self, command, fail_msg, command_timeout=1 * 60 * 60):
exit_code = self._executer.RunCommand(command,
self.job.primary_machine.hostname,
self.job.primary_machine.username,
@@ -40,7 +39,7 @@ class JobExecuter(threading.Thread):
if exit_code:
raise job.JobFailure(fail_msg, exit_code)
- def _RunLocally(self, command, fail_msg, command_timeout=1*60*60):
+ def _RunLocally(self, command, fail_msg, command_timeout=1 * 60 * 60):
exit_code = self._executer.RunCommand(command,
command_terminator=self._terminator,
command_timeout=command_timeout)
@@ -52,22 +51,20 @@ class JobExecuter(threading.Thread):
def CleanUpWorkDir(self):
self._logger.debug('Cleaning up %r work directory.', self.job)
- self._RunRemotely(
- cmd.RmTree(self.job.work_dir), "Cleanup workdir failed.")
+ self._RunRemotely(cmd.RmTree(self.job.work_dir), 'Cleanup workdir failed.')
def CleanUpHomeDir(self):
self._logger.debug('Cleaning up %r home directory.', self.job)
- self._RunLocally(
- cmd.RmTree(self.job.home_dir), "Cleanup homedir failed.")
+ self._RunLocally(cmd.RmTree(self.job.home_dir), 'Cleanup homedir failed.')
def _PrepareRuntimeEnvironment(self):
self._RunRemotely(
cmd.MakeDir(self.job.work_dir, self.job.logs_dir, self.job.results_dir),
- "Creating new job directory failed.")
+ 'Creating new job directory failed.')
# The log directory is ready, so we can prepare to log command's output.
- self._executer.OpenLog(
- os.path.join(self.job.logs_dir, self.job.log_filename_prefix))
+ self._executer.OpenLog(os.path.join(self.job.logs_dir,
+ self.job.log_filename_prefix))
def _SatisfyFolderDependencies(self):
for dependency in self.job.folder_dependencies:
@@ -79,18 +76,21 @@ class JobExecuter(threading.Thread):
# No need to make a copy, just symlink it
self._RunRemotely(
cmd.MakeSymlink(from_folder, to_folder),
- "Failed to create symlink to required directory.")
+ 'Failed to create symlink to required directory.')
else:
self._RunRemotely(
- cmd.RemoteCopyFrom(from_machine.hostname, from_folder, to_folder,
+ cmd.RemoteCopyFrom(from_machine.hostname,
+ from_folder,
+ to_folder,
username=from_machine.username),
- "Failed to copy required files.")
+ 'Failed to copy required files.')
def _LaunchJobCommand(self):
command = self.job.GetCommand()
- self._RunRemotely("%s; %s" % ("PS1=. TERM=linux source ~/.bashrc",
- cmd.Wrapper(command, cwd=self.job.work_dir)),
+ self._RunRemotely('%s; %s' % ('PS1=. TERM=linux source ~/.bashrc',
+ cmd.Wrapper(command,
+ cwd=self.job.work_dir)),
"Command failed to execute: '%s'." % command,
self.job.timeout)
@@ -101,14 +101,13 @@ class JobExecuter(threading.Thread):
self.job.results_dir,
self.job.home_dir,
username=self.job.primary_machine.username),
- "Failed to copy results.")
+ 'Failed to copy results.')
def run(self):
self.job.status = job.STATUS_SETUP
self.job.machines = self.machines
- self._logger.debug(
- "Executing %r on %r in directory %s.",
- self.job, self.job.primary_machine.hostname, self.job.work_dir)
+ self._logger.debug('Executing %r on %r in directory %s.', self.job,
+ self.job.primary_machine.hostname, self.job.work_dir)
try:
self.CleanUpWorkDir()
@@ -127,10 +126,9 @@ class JobExecuter(threading.Thread):
# If we get here, the job succeeded.
self.job.status = job.STATUS_SUCCEEDED
except job.JobFailure as ex:
- self._logger.error(
- "Job failed. Exit code %s. %s", ex.exit_code, ex)
+ self._logger.error('Job failed. Exit code %s. %s', ex.exit_code, ex)
if self._terminator.IsTerminated():
- self._logger.info("%r was killed", self.job)
+ self._logger.info('%r was killed', self.job)
self.job.status = job.STATUS_FAILED
diff --git a/automation/server/job_group_manager.py b/automation/server/job_group_manager.py
index 1a0b1b08..d66f5e07 100644
--- a/automation/server/job_group_manager.py
+++ b/automation/server/job_group_manager.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
#
@@ -16,6 +14,7 @@ from automation.server.job_manager import IdProducerPolicy
class JobGroupManager(object):
+
def __init__(self, job_manager):
self.all_job_groups = []
@@ -27,7 +26,7 @@ class JobGroupManager(object):
self._id_producer = IdProducerPolicy()
self._id_producer.Initialize(job_group.JobGroup.HOMEDIR_PREFIX,
- "job-group-(?P<id>\d+)")
+ 'job-group-(?P<id>\d+)')
self._logger = logging.getLogger(self.__class__.__name__)
@@ -49,9 +48,8 @@ class JobGroupManager(object):
self._logger.debug('Creating runtime environment for %r.', group)
- CommandExecuter().RunCommand(
- cmd.Chain(cmd.RmTree(group.home_dir),
- cmd.MakeDir(group.home_dir)))
+ CommandExecuter().RunCommand(cmd.Chain(
+ cmd.RmTree(group.home_dir), cmd.MakeDir(group.home_dir)))
with self._lock:
self.all_job_groups.append(group)
@@ -61,7 +59,7 @@ class JobGroupManager(object):
group.status = job_group.STATUS_EXECUTING
- self._logger.info("Added %r to queue.", group)
+ self._logger.info('Added %r to queue.', group)
return group.id
diff --git a/automation/server/job_manager.py b/automation/server/job_manager.py
index de5bc47c..7a65b918 100644
--- a/automation/server/job_manager.py
+++ b/automation/server/job_manager.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
#
@@ -60,6 +58,7 @@ class IdProducerPolicy(object):
class JobManager(threading.Thread):
+
def __init__(self, machine_manager):
threading.Thread.__init__(self, name=self.__class__.__name__)
self.all_jobs = []
@@ -81,14 +80,14 @@ class JobManager(threading.Thread):
self._logger = logging.getLogger(self.__class__.__name__)
def StartJobManager(self):
- self._logger.info("Starting...")
+ self._logger.info('Starting...')
with self._lock:
self.start()
self._jobs_available.notifyAll()
def StopJobManager(self):
- self._logger.info("Shutdown request received.")
+ self._logger.info('Shutdown request received.')
with self._lock:
for job_ in self.all_jobs:
@@ -117,7 +116,7 @@ class JobManager(threading.Thread):
return None
def _KillJob(self, job_id):
- self._logger.info("Killing [Job: %d].", job_id)
+ self._logger.info('Killing [Job: %d].', job_id)
if job_id in self.job_executer_mapping:
self.job_executer_mapping[job_id].Kill()
@@ -165,7 +164,7 @@ class JobManager(threading.Thread):
@logger.HandleUncaughtExceptions
def run(self):
- self._logger.info("Started.")
+ self._logger.info('Started.')
while not self._exit_request:
with self._lock:
@@ -192,4 +191,4 @@ class JobManager(threading.Thread):
executer.start()
self.job_executer_mapping[ready_job.id] = executer
- self._logger.info("Stopped.")
+ self._logger.info('Stopped.')
diff --git a/automation/server/machine_manager.py b/automation/server/machine_manager.py
index b54f19d4..b7186077 100644
--- a/automation/server/machine_manager.py
+++ b/automation/server/machine_manager.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
__author__ = 'asharif@google.com (Ahmad Sharif)'
@@ -41,7 +39,8 @@ class MachineManager(object):
mach = min(available_pool, key=uses)
if mach_spec.preferred_machines:
- preferred_pool = [m for m in available_pool
+ preferred_pool = [m
+ for m in available_pool
if m.hostname in mach_spec.preferred_machines]
if preferred_pool:
mach = min(preferred_pool, key=uses)
diff --git a/automation/server/machine_manager_test.py b/automation/server/machine_manager_test.py
index ebdaea5f..67fdcc2b 100755
--- a/automation/server/machine_manager_test.py
+++ b/automation/server/machine_manager_test.py
@@ -2,7 +2,7 @@
#
# Copyright 2010 Google Inc. All Rights Reserved.
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import unittest
from automation.common import machine
@@ -18,15 +18,15 @@ class MachineManagerTest(unittest.TestCase):
print self.machine_manager
def testGetLinuxBox(self):
- mach_spec_list = [machine.MachineSpecification(os="linux")]
+ mach_spec_list = [machine.MachineSpecification(os='linux')]
machines = self.machine_manager.GetMachines(mach_spec_list)
self.assertTrue(machines)
def testGetChromeOSBox(self):
- mach_spec_list = [machine.MachineSpecification(os="chromeos")]
+ mach_spec_list = [machine.MachineSpecification(os='chromeos')]
machines = self.machine_manager.GetMachines(mach_spec_list)
self.assertTrue(machines)
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/automation/server/monitor/__init__.py b/automation/server/monitor/__init__.py
index e69de29b..8b137891 100644
--- a/automation/server/monitor/__init__.py
+++ b/automation/server/monitor/__init__.py
@@ -0,0 +1 @@
+
diff --git a/automation/server/monitor/dashboard.py b/automation/server/monitor/dashboard.py
index 33d7c3d7..f6befed8 100644
--- a/automation/server/monitor/dashboard.py
+++ b/automation/server/monitor/dashboard.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
#
@@ -19,7 +17,6 @@ from django.shortcuts import render_to_response
from django.template import Context
from django.views import static
-
Link = namedtuple('Link', 'href name')
@@ -29,8 +26,8 @@ def GetServerConnection():
def MakeDefaultContext(*args):
context = Context({'links': [
- Link('/job-group', 'Job Groups'),
- Link('/machine', 'Machines')]})
+ Link('/job-group', 'Job Groups'), Link('/machine', 'Machines')
+ ]})
for arg in args:
context.update(arg)
@@ -39,6 +36,7 @@ def MakeDefaultContext(*args):
class JobInfo(object):
+
def __init__(self, job_id):
self._job = pickle.loads(GetServerConnection().GetJob(job_id))
@@ -60,12 +58,9 @@ class JobInfo(object):
commands = enumerate(job.PrettyFormatCommand().split('\n'), start=1)
- return {'text': [('Label', job.label),
- ('Directory', job.work_dir)],
- 'link': [('Group', group),
- ('Predecessors', predecessors),
- ('Successors', successors),
- ('Machines', machines),
+ return {'text': [('Label', job.label), ('Directory', job.work_dir)],
+ 'link': [('Group', group), ('Predecessors', predecessors),
+ ('Successors', successors), ('Machines', machines),
('Logs', logs)],
'code': [('Command', commands)]}
@@ -77,8 +72,8 @@ class JobInfo(object):
for evlog in self._job.timeline.GetTransitionEventHistory()]
def GetLog(self):
- log_path = os.path.join(
- self._job.logs_dir, '%s.gz' % self._job.log_filename_prefix)
+ log_path = os.path.join(self._job.logs_dir,
+ '%s.gz' % self._job.log_filename_prefix)
try:
log = gzip.open(log_path, 'r')
@@ -104,9 +99,10 @@ class JobInfo(object):
class JobGroupInfo(object):
+
def __init__(self, job_group_id):
- self._job_group = pickle.loads(
- GetServerConnection().GetJobGroup(job_group_id))
+ self._job_group = pickle.loads(GetServerConnection().GetJobGroup(
+ job_group_id))
def GetAttributes(self):
group = self._job_group
@@ -159,9 +155,9 @@ class JobGroupInfo(object):
class JobGroupListInfo(object):
+
def __init__(self):
- self._all_job_groups = pickle.loads(
- GetServerConnection().GetAllJobGroups())
+ self._all_job_groups = pickle.loads(GetServerConnection().GetAllJobGroups())
def _GetJobGroupState(self, group):
return str(group.status)
@@ -188,7 +184,8 @@ def JobPageHandler(request, job_id):
ctx = MakeDefaultContext({
'job_id': job_id,
'attributes': job.GetAttributes(),
- 'timeline': job.GetTimeline()})
+ 'timeline': job.GetTimeline()
+ })
return render_to_response('job.html', ctx)
@@ -196,9 +193,7 @@ def JobPageHandler(request, job_id):
def LogPageHandler(request, job_id):
job = JobInfo(int(job_id))
- ctx = MakeDefaultContext({
- 'job_id': job_id,
- 'log_lines': job.GetLog()})
+ ctx = MakeDefaultContext({'job_id': job_id, 'log_lines': job.GetLog()})
return render_to_response('job_log.html', ctx)
@@ -210,7 +205,8 @@ def JobGroupPageHandler(request, job_group_id):
'group_id': job_group_id,
'attributes': group.GetAttributes(),
'job_list': group.GetJobList(),
- 'reports': group.GetReportList()})
+ 'reports': group.GetReportList()
+ })
return render_to_response('job_group.html', ctx)
@@ -218,8 +214,10 @@ def JobGroupPageHandler(request, job_group_id):
def JobGroupFilesPageHandler(request, job_group_id, path):
group = JobGroupInfo(int(job_group_id))
- return static.serve(
- request, path, document_root=group.GetHomeDirectory(), show_indexes=True)
+ return static.serve(request,
+ path,
+ document_root=group.GetHomeDirectory(),
+ show_indexes=True)
class FilterJobGroupsForm(forms.Form):
@@ -245,9 +243,7 @@ def JobGroupListPageHandler(request):
else:
form = FilterJobGroupsForm({'initial': '*'})
- ctx = MakeDefaultContext({
- 'filter': form,
- 'groups': group_list})
+ ctx = MakeDefaultContext({'filter': form, 'groups': group_list})
return render_to_response('job_group_list.html', ctx)
diff --git a/automation/server/monitor/manage.py b/automation/server/monitor/manage.py
index 1733753c..57deb5c2 100755
--- a/automation/server/monitor/manage.py
+++ b/automation/server/monitor/manage.py
@@ -8,7 +8,7 @@ __author__ = 'kbaclawski@google.com (Krystian Baclawski)'
from django.core.management import execute_manager
try:
- import settings # Assumed to be in the same directory.
+ import settings # Assumed to be in the same directory.
except ImportError:
import sys
@@ -16,5 +16,5 @@ except ImportError:
'containing %r.' % __file__)
sys.exit(1)
-if __name__ == "__main__":
+if __name__ == '__main__':
execute_manager(settings)
diff --git a/automation/server/monitor/settings.py b/automation/server/monitor/settings.py
index 9048da50..8cd20e35 100644
--- a/automation/server/monitor/settings.py
+++ b/automation/server/monitor/settings.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Django settings for monitor project.
@@ -48,6 +46,4 @@ SECRET_KEY = '13p5p_4q91*8@yo+tvvt#2k&6#d_&e_zvxdpdil53k419i5sop'
ROOT_URLCONF = 'monitor.urls'
# List of locations of the template source files, in search order.
-TEMPLATE_DIRS = (
- os.path.join(ROOT_PATH, 'templates'),
-)
+TEMPLATE_DIRS = (os.path.join(ROOT_PATH, 'templates'),)
diff --git a/automation/server/monitor/urls.py b/automation/server/monitor/urls.py
index 19a1ef9c..1a6b2485 100644
--- a/automation/server/monitor/urls.py
+++ b/automation/server/monitor/urls.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
#
@@ -9,17 +7,15 @@ from django.conf import settings
from django.conf.urls.defaults import patterns
urlpatterns = patterns(
- 'dashboard',
- (r'^job-group$', 'JobGroupListPageHandler'),
+ 'dashboard', (r'^job-group$', 'JobGroupListPageHandler'),
(r'^machine$', 'MachineListPageHandler'),
(r'^job/(?P<job_id>\d+)/log$', 'LogPageHandler'),
- (r'^job/(?P<job_id>\d+)$', 'JobPageHandler'),
- (r'^job-group/(?P<job_group_id>\d+)/files/(?P<path>.*)$',
- 'JobGroupFilesPageHandler'),
+ (r'^job/(?P<job_id>\d+)$', 'JobPageHandler'), (
+ r'^job-group/(?P<job_group_id>\d+)/files/(?P<path>.*)$',
+ 'JobGroupFilesPageHandler'),
(r'^job-group/(?P<job_group_id>\d+)$', 'JobGroupPageHandler'),
(r'^$', 'DefaultPageHandler'))
-urlpatterns += patterns(
- '',
- (r'^static/(?P<path>.*)$', 'django.views.static.serve',
- {'document_root': settings.MEDIA_ROOT}))
+urlpatterns += patterns('',
+ (r'^static/(?P<path>.*)$', 'django.views.static.serve',
+ {'document_root': settings.MEDIA_ROOT}))
diff --git a/automation/server/server.py b/automation/server/server.py
index e9243cbb..f02a1d0f 100755
--- a/automation/server/server.py
+++ b/automation/server/server.py
@@ -38,7 +38,7 @@ class Server(object):
def ExecuteJobGroup(self, job_group, dry_run=False):
job_group = pickle.loads(job_group)
- self._logger.info("Received ExecuteJobGroup(%r, dry_run=%s) request.",
+ self._logger.info('Received ExecuteJobGroup(%r, dry_run=%s) request.',
job_group, dry_run)
for job in job_group.jobs:
@@ -46,25 +46,25 @@ class Server(object):
return self.job_group_manager.AddJobGroup(job_group)
def GetAllJobGroups(self):
- self._logger.info("Received GetAllJobGroups() request.")
+ self._logger.info('Received GetAllJobGroups() request.')
return pickle.dumps(self.job_group_manager.GetAllJobGroups())
def KillJobGroup(self, job_group_id):
- self._logger.info("Received KillJobGroup(%d) request.", job_group_id)
+ self._logger.info('Received KillJobGroup(%d) request.', job_group_id)
self.job_group_manager.KillJobGroup(pickle.loads(job_group_id))
def GetJobGroup(self, job_group_id):
- self._logger.info("Received GetJobGroup(%d) request.", job_group_id)
+ self._logger.info('Received GetJobGroup(%d) request.', job_group_id)
return pickle.dumps(self.job_group_manager.GetJobGroup(job_group_id))
def GetJob(self, job_id):
- self._logger.info("Received GetJob(%d) request.", job_id)
+ self._logger.info('Received GetJob(%d) request.', job_id)
return pickle.dumps(self.job_manager.GetJob(job_id))
def GetMachineList(self):
- self._logger.info("Received GetMachineList() request.")
+ self._logger.info('Received GetMachineList() request.')
return pickle.dumps(self.job_manager.machine_manager.GetMachineList())
@@ -79,18 +79,18 @@ class Server(object):
def GetServerOptions():
"""Get server's settings from command line options."""
parser = optparse.OptionParser()
- parser.add_option("-m",
- "--machines-file",
- dest="machines_file",
- help="The location of the file "
- "containing the machines database",
+ parser.add_option('-m',
+ '--machines-file',
+ dest='machines_file',
+ help='The location of the file '
+ 'containing the machines database',
default=machine_manager.DEFAULT_MACHINES_FILE)
- parser.add_option("-n",
- "--dry-run",
- dest="dry_run",
- help="Start the server in dry-run mode, where jobs will "
- "not actually be executed.",
- action="store_true",
+ parser.add_option('-n',
+ '--dry-run',
+ dest='dry_run',
+ help='Start the server in dry-run mode, where jobs will '
+ 'not actually be executed.',
+ action='store_true',
default=False)
return parser.parse_args()[0]
@@ -110,7 +110,9 @@ def Main():
try:
xmlserver = SimpleXMLRPCServer(
- ("localhost", 8000), allow_none=True, logRequests=False)
+ ('localhost', 8000),
+ allow_none=True,
+ logRequests=False)
xmlserver.register_instance(server)
xmlserver.serve_forever()
except Exception as ex:
@@ -119,5 +121,5 @@ def Main():
sys.exit(1)
-if __name__ == "__main__":
+if __name__ == '__main__':
Main()
diff --git a/automation/server/server_test.py b/automation/server/server_test.py
index c2e4b0ae..bcf1b9f5 100755
--- a/automation/server/server_test.py
+++ b/automation/server/server_test.py
@@ -1,27 +1,26 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Machine manager unittest.
MachineManagerTest tests MachineManager.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import server
import unittest
class ServerTest(unittest.TestCase):
+
def setUp(self):
pass
-
def testGetAllJobs(self):
s = server.Server()
print s.GetAllJobs()
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/bestflags/example_algorithms.py b/bestflags/example_algorithms.py
index 945ff0e2..9775d491 100644
--- a/bestflags/example_algorithms.py
+++ b/bestflags/example_algorithms.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""An example main file running the algorithms.
Part of the Chrome build flags optimization.
@@ -30,8 +29,11 @@ import testing_batch
parser = OptionParser()
-parser.add_option('-f', '--file', dest='filename',
- help='configuration file FILE input', metavar='FILE')
+parser.add_option('-f',
+ '--file',
+ dest='filename',
+ help='configuration file FILE input',
+ metavar='FILE')
# The meta data for the genetic algorithm.
BUILD_CMD = 'BUILD_CMD'
@@ -112,8 +114,8 @@ def _ProcessGA(meta_data):
Task.InitLogCommand(build_cmd, test_cmd, output_file)
# Initiate the build/test command and the log directory.
- GAGeneration.InitMetaData(stop_threshold, num_chromosomes, num_trials,
- specs, mutation_rate)
+ GAGeneration.InitMetaData(stop_threshold, num_chromosomes, num_trials, specs,
+ mutation_rate)
# Generate the initial generations.
generation_tasks = testing_batch.GenerateRandomGATasks(specs, num_chromosomes,
@@ -169,9 +171,9 @@ def _StartExperiment(num_builders, num_testers, generations):
build_test, pipeline_worker.Helper,
pipeline_worker.Worker, test_steering)
- steer_process = multiprocessing.Process(target=Steering,
- args=(set([]), generations,
- test_steering, steering_build))
+ steer_process = multiprocessing.Process(
+ target=Steering,
+ args=(set([]), generations, test_steering, steering_build))
# Start the processes.
build_process.start()
diff --git a/bestflags/flags.py b/bestflags/flags.py
index 7e7ea674..b316421e 100644
--- a/bestflags/flags.py
+++ b/bestflags/flags.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Manage bundles of flags used for the optimizing of ChromeOS.
Part of the Chrome build flags optimization.
diff --git a/bestflags/flags_test.py b/bestflags/flags_test.py
index 8ab0a9a5..dbbea77c 100644
--- a/bestflags/flags_test.py
+++ b/bestflags/flags_test.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Unit tests for the classes in module 'flags'.
Part of the Chrome build flags optimization.
diff --git a/bestflags/flags_util.py b/bestflags/flags_util.py
index ae19a60b..20be57fb 100644
--- a/bestflags/flags_util.py
+++ b/bestflags/flags_util.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Utility functions to explore the neighbor flags.
Part of the Chrome build flags optimization.
@@ -9,7 +8,6 @@ Part of the Chrome build flags optimization.
__author__ = 'yuhenglong@google.com (Yuheng Long)'
-
import flags
from flags import Flag
diff --git a/bestflags/generation.py b/bestflags/generation.py
index 331c8105..67c379f5 100644
--- a/bestflags/generation.py
+++ b/bestflags/generation.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""A generation of a set of tasks.
Part of the Chrome build flags optimization.
diff --git a/bestflags/generation_test.py b/bestflags/generation_test.py
index cdb4b7ab..2e042d49 100644
--- a/bestflags/generation_test.py
+++ b/bestflags/generation_test.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Generation unittest.
Part of the Chrome build flags optimization.
@@ -15,7 +14,6 @@ import unittest
from generation import Generation
from mock_task import IdentifierMockTask
-
# Pick an integer at random.
TEST_STAGE = -125
@@ -69,5 +67,6 @@ class GenerationTest(unittest.TestCase):
# list is set.
assert gen.Done()
+
if __name__ == '__main__':
unittest.main()
diff --git a/bestflags/genetic_algorithm.py b/bestflags/genetic_algorithm.py
index 15ad1f74..deb83f12 100644
--- a/bestflags/genetic_algorithm.py
+++ b/bestflags/genetic_algorithm.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The hill genetic algorithm.
Part of the Chrome build flags optimization.
@@ -87,6 +86,7 @@ def RandomMutate(specs, flag_set, mutation_rate):
class GATask(Task):
+
def __init__(self, flag_set):
Task.__init__(self, flag_set)
diff --git a/bestflags/hill_climb_best_neighbor.py b/bestflags/hill_climb_best_neighbor.py
index 4f59bca3..7bb5a7ff 100644
--- a/bestflags/hill_climb_best_neighbor.py
+++ b/bestflags/hill_climb_best_neighbor.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""A variation of the hill climbing algorithm.
Part of the Chrome build flags optimization.
@@ -13,7 +12,6 @@ neighbor.
__author__ = 'yuhenglong@google.com (Yuheng Long)'
-
from flags import FlagSet
import flags_util
from generation import Generation
diff --git a/bestflags/iterative_elimination.py b/bestflags/iterative_elimination.py
index 618917e2..2f4c41d1 100644
--- a/bestflags/iterative_elimination.py
+++ b/bestflags/iterative_elimination.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Iterative flags elimination.
Part of the Chrome build flags optimization.
diff --git a/bestflags/mock_task.py b/bestflags/mock_task.py
index 144b7747..6de2b35c 100644
--- a/bestflags/mock_task.py
+++ b/bestflags/mock_task.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""This module defines the common mock tasks used by various unit tests.
Part of the Chrome build flags optimization.
diff --git a/bestflags/pipeline_process.py b/bestflags/pipeline_process.py
index e77d92cc..31f5f21f 100644
--- a/bestflags/pipeline_process.py
+++ b/bestflags/pipeline_process.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Pipeline process that encapsulates the actual content.
Part of the Chrome build flags optimization.
@@ -90,11 +89,10 @@ class PipelineProcess(multiprocessing.Process):
work_pool = multiprocessing.Pool(self._num_processes)
# the helper process
- helper_process = multiprocessing.Process(target=self._helper,
- args=(self._stage, self._cache,
- self._helper_queue,
- self._work_queue,
- self._result_queue))
+ helper_process = multiprocessing.Process(
+ target=self._helper,
+ args=(self._stage, self._cache, self._helper_queue, self._work_queue,
+ self._result_queue))
helper_process.start()
mycache = self._cache.keys()
@@ -112,9 +110,9 @@ class PipelineProcess(multiprocessing.Process):
self._helper_queue.put(task)
else:
# Let the workers do the actual work.
- work_pool.apply_async(self._worker, args=(self._stage, task,
- self._work_queue,
- self._result_queue))
+ work_pool.apply_async(
+ self._worker,
+ args=(self._stage, task, self._work_queue, self._result_queue))
mycache.append(task_key)
# Shutdown the workers pool and the helper process.
diff --git a/bestflags/pipeline_process_test.py b/bestflags/pipeline_process_test.py
index 77d72db5..b9d84067 100644
--- a/bestflags/pipeline_process_test.py
+++ b/bestflags/pipeline_process_test.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Pipeline Process unittest.
Part of the Chrome build flags optimization.
@@ -62,9 +61,8 @@ class PipelineProcessTest(unittest.TestCase):
inp = manager.Queue()
output = manager.Queue()
- process = pipeline_process.PipelineProcess(2, 'testing', {}, TEST_STAGE,
- inp, MockHelper, MockWorker,
- output)
+ process = pipeline_process.PipelineProcess(
+ 2, 'testing', {}, TEST_STAGE, inp, MockHelper, MockWorker, output)
process.start()
inp.put(MockTask(TEST_STAGE, 1))
diff --git a/bestflags/pipeline_worker.py b/bestflags/pipeline_worker.py
index 7cccaead..e21ec2c8 100644
--- a/bestflags/pipeline_worker.py
+++ b/bestflags/pipeline_worker.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The pipeline_worker functions of the build and test stage of the framework.
Part of the Chrome build flags optimization.
diff --git a/bestflags/pipeline_worker_test.py b/bestflags/pipeline_worker_test.py
index 8c8f315c..e3de5e12 100644
--- a/bestflags/pipeline_worker_test.py
+++ b/bestflags/pipeline_worker_test.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Unittest for the pipeline_worker functions in the build/test stage.
Part of the Chrome build flags optimization.
@@ -20,7 +19,6 @@ from mock_task import MockTask
import pipeline_process
import pipeline_worker
-
# Pick an integer at random.
TEST_STAGE = -3
@@ -51,10 +49,9 @@ class PipelineWorkerTest(unittest.TestCase):
completed_queue = manager.Queue()
# Set up the helper process that holds the helper method.
- helper_process = multiprocessing.Process(target=pipeline_worker.Helper,
- args=(TEST_STAGE, {}, helper_queue,
- completed_queue,
- result_queue))
+ helper_process = multiprocessing.Process(
+ target=pipeline_worker.Helper,
+ args=(TEST_STAGE, {}, helper_queue, completed_queue, result_queue))
helper_process.start()
# A dictionary defines the mock result to the helper.
diff --git a/bestflags/steering.py b/bestflags/steering.py
index a7a559e2..320f7c37 100644
--- a/bestflags/steering.py
+++ b/bestflags/steering.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The framework stage that produces the next generation of tasks to run.
Part of the Chrome build flags optimization.
diff --git a/bestflags/steering_test.py b/bestflags/steering_test.py
index 2000dc45..c96e362f 100644
--- a/bestflags/steering_test.py
+++ b/bestflags/steering_test.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Steering stage unittest.
Part of the Chrome build flags optimization.
@@ -17,7 +16,6 @@ from mock_task import IdentifierMockTask
import pipeline_process
import steering
-
# Pick an integer at random.
STEERING_TEST_STAGE = -8
@@ -110,10 +108,9 @@ class SteeringTest(unittest.TestCase):
input_queue = manager.Queue()
result_queue = manager.Queue()
- steering_process = multiprocessing.Process(target=steering.Steering,
- args=(set(),
- [current_generation],
- input_queue, result_queue))
+ steering_process = multiprocessing.Process(
+ target=steering.Steering,
+ args=(set(), [current_generation], input_queue, result_queue))
steering_process.start()
# Test that each generation is processed properly. I.e., the generations are
@@ -158,10 +155,9 @@ class SteeringTest(unittest.TestCase):
input_queue = manager.Queue()
result_queue = manager.Queue()
- steering_process = multiprocessing.Process(target=steering.Steering,
- args=(steering_tasks,
- [current_generation],
- input_queue, result_queue))
+ steering_process = multiprocessing.Process(
+ target=steering.Steering,
+ args=(steering_tasks, [current_generation], input_queue, result_queue))
steering_process.start()
@@ -169,5 +165,6 @@ class SteeringTest(unittest.TestCase):
assert result_queue.get() == pipeline_process.POISONPILL
steering_process.join()
+
if __name__ == '__main__':
unittest.main()
diff --git a/bestflags/task.py b/bestflags/task.py
index ee85b1a4..f055fc75 100644
--- a/bestflags/task.py
+++ b/bestflags/task.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""A reproducing entity.
Part of the Chrome build flags optimization.
@@ -257,7 +256,8 @@ class Task(object):
for _ in range(BUILD_TRIES):
try:
# Execute the command and get the execution status/results.
- p = subprocess.Popen(command.split(), stdout=subprocess.PIPE,
+ p = subprocess.Popen(command.split(),
+ stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
@@ -311,7 +311,8 @@ class Task(object):
# Try TEST_TRIES number of times before confirming that the build fails.
for _ in range(TEST_TRIES):
try:
- p = subprocess.Popen(command.split(), stdout=subprocess.PIPE,
+ p = subprocess.Popen(command.split(),
+ stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
diff --git a/bestflags/task_test.py b/bestflags/task_test.py
index a40e4ad4..68a7bf78 100644
--- a/bestflags/task_test.py
+++ b/bestflags/task_test.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Task unittest.
Part of the Chrome build flags optimization.
@@ -170,5 +169,6 @@ class TaskTest(unittest.TestCase):
assert work_task.Done(task.TEST_STAGE)
assert work_task.Done(task.BUILD_STAGE)
+
if __name__ == '__main__':
unittest.main()
diff --git a/bestflags/testing_batch.py b/bestflags/testing_batch.py
index 7bfda09b..ffe19448 100644
--- a/bestflags/testing_batch.py
+++ b/bestflags/testing_batch.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Hill climbing unitest.
Part of the Chrome build flags optimization.
@@ -30,7 +29,6 @@ from task import BUILD_STAGE
from task import Task
from task import TEST_STAGE
-
# The number of flags be tested.
NUM_FLAGS = 5
@@ -296,9 +294,9 @@ def _TestAlgorithm(cost_func, specs, generations, best_result):
manager = multiprocessing.Manager()
input_queue = manager.Queue()
output_queue = manager.Queue()
- pp_steer = multiprocessing.Process(target=Steering,
- args=(set(), generations, output_queue,
- input_queue))
+ pp_steer = multiprocessing.Process(
+ target=Steering,
+ args=(set(), generations, output_queue, input_queue))
pp_steer.start()
# The best result of the algorithm so far.
@@ -401,8 +399,7 @@ class MockAlgorithmsTest(unittest.TestCase):
specs, MUTATION_RATE)
# Generate the initial generations.
- generation_tasks = GenerateRandomGATasks(specs, NUM_CHROMOSOMES,
- NUM_TRIALS)
+ generation_tasks = GenerateRandomGATasks(specs, NUM_CHROMOSOMES, NUM_TRIALS)
generations = [GAGeneration(generation_tasks, set([]), 0)]
# Test the algorithm.
@@ -448,5 +445,6 @@ class MockAlgorithmsTest(unittest.TestCase):
# be generated.
_TestAlgorithm(cost_function, specs, generations, cost)
+
if __name__ == '__main__':
unittest.main()
diff --git a/binary_search_tool/__init__.py b/binary_search_tool/__init__.py
index e69de29b..8b137891 100755
--- a/binary_search_tool/__init__.py
+++ b/binary_search_tool/__init__.py
@@ -0,0 +1 @@
+
diff --git a/binary_search_tool/binary_search_perforce.py b/binary_search_tool/binary_search_perforce.py
index 3bfa2b12..129f78c5 100755
--- a/binary_search_tool/binary_search_perforce.py
+++ b/binary_search_tool/binary_search_perforce.py
@@ -10,52 +10,61 @@ from utils import command_executer
from utils import logger
from utils import misc
+
def _GetP4ClientSpec(client_name, p4_paths):
- p4_string = ""
+ p4_string = ''
for p4_path in p4_paths:
- if " " not in p4_path:
- p4_string += " -a %s" % p4_path
+ if ' ' not in p4_path:
+ p4_string += ' -a %s' % p4_path
else:
- p4_string += " -a \"" + (" //" + client_name + "/").join(p4_path) + "\""
+ p4_string += " -a \"" + (' //' + client_name + '/').join(p4_path) + "\""
return p4_string
-def GetP4Command(client_name, p4_port, p4_paths, revision, checkoutdir, p4_snapshot=""):
- command = ""
+def GetP4Command(client_name,
+ p4_port,
+ p4_paths,
+ revision,
+ checkoutdir,
+ p4_snapshot=''):
+ command = ''
if p4_snapshot:
- command += "mkdir -p " + checkoutdir
+ command += 'mkdir -p ' + checkoutdir
for p4_path in p4_paths:
real_path = p4_path[1]
- if real_path.endswith("..."):
- real_path = real_path.replace("/...", "")
- command += ("; mkdir -p " + checkoutdir + "/" +
- os.path.dirname(real_path))
- command += ("&& rsync -lr " + p4_snapshot + "/" + real_path +
- " " + checkoutdir + "/" + os.path.dirname(real_path))
+ if real_path.endswith('...'):
+ real_path = real_path.replace('/...', '')
+ command += (
+ '; mkdir -p ' + checkoutdir + '/' + os.path.dirname(real_path))
+ command += ('&& rsync -lr ' + p4_snapshot + '/' + real_path + ' ' +
+ checkoutdir + '/' + os.path.dirname(real_path))
return command
- command += " export P4CONFIG=.p4config"
- command += " && mkdir -p " + checkoutdir
- command += " && cd " + checkoutdir
- command += " && cp ${HOME}/.p4config ."
- command += " && chmod u+w .p4config"
+ command += ' export P4CONFIG=.p4config'
+ command += ' && mkdir -p ' + checkoutdir
+ command += ' && cd ' + checkoutdir
+ command += ' && cp ${HOME}/.p4config .'
+ command += ' && chmod u+w .p4config'
command += " && echo \"P4PORT=" + p4_port + "\" >> .p4config"
command += " && echo \"P4CLIENT=" + client_name + "\" >> .p4config"
- command += (" && g4 client " +
- _GetP4ClientSpec(client_name, p4_paths))
- command += " && g4 sync "
- command += " && cd -"
+ command += (' && g4 client ' + _GetP4ClientSpec(client_name, p4_paths))
+ command += ' && g4 sync '
+ command += ' && cd -'
return command
+
class BinarySearchPoint:
+
def __init__(self, revision, status, tag=None):
self.revision = revision
self.status = status
self.tag = tag
+
class BinarySearcher:
+
def __init__(self):
self.sorted_list = []
self.index_log = []
@@ -66,23 +75,21 @@ class BinarySearcher:
pass
def SetSortedList(self, sorted_list):
- assert(len(sorted_list) > 0)
+ assert (len(sorted_list) > 0)
self.sorted_list = sorted_list
self.index_log = []
self.hi = len(sorted_list) - 1
self.lo = 0
self.points = {}
for i in range(len(self.sorted_list)):
- bsp = BinarySearchPoint(self.sorted_list[i], -1, "Not yet done.")
+ bsp = BinarySearchPoint(self.sorted_list[i], -1, 'Not yet done.')
self.points[i] = bsp
def SetStatus(self, status, tag=None):
- message = ("Revision: %s index: %d returned: %d" %
- (self.sorted_list[self.current],
- self.current,
- status))
+ message = ('Revision: %s index: %d returned: %d' %
+ (self.sorted_list[self.current], self.current, status))
logger.GetLogger().LogOutput(message)
- assert(status == 0 or status == 1 or status == 2)
+ assert (status == 0 or status == 1 or status == 2)
self.index_log.append(self.current)
self.status_log.append(status)
bsp = BinarySearchPoint(self.sorted_list[self.current], status, tag)
@@ -96,22 +103,20 @@ class BinarySearcher:
self.lo = self.current + 1
elif status == 1:
self.hi = self.current
- logger.GetLogger().LogOutput("lo: %d hi: %d\n" % (self.lo, self.hi))
- self.current = (self.lo + self.hi)/2
+ logger.GetLogger().LogOutput('lo: %d hi: %d\n' % (self.lo, self.hi))
+ self.current = (self.lo + self.hi) / 2
if self.lo == self.hi:
- message = ("Search complete. First bad version: %s"
- " at index: %d" %
- (self.sorted_list[self.current],
- self.lo))
+ message = ('Search complete. First bad version: %s'
+ ' at index: %d' % (self.sorted_list[self.current], self.lo))
logger.GetLogger().LogOutput(message)
return True
for index in range(self.lo, self.hi):
if index not in self.skipped_indices:
return False
- logger.GetLogger().LogOutput(
- "All skipped indices between: %d and %d\n" % (self.lo, self.hi))
+ logger.GetLogger().LogOutput('All skipped indices between: %d and %d\n' %
+ (self.lo, self.hi))
return True
# Does a better job with chromeos flakiness.
@@ -130,23 +135,23 @@ class BinarySearcher:
else:
self.current = element[1]
return
- assert len(q), "Queue should never be 0-size!"
+ assert len(q), 'Queue should never be 0-size!'
def GetNextFlakyLinear(self):
- current_hi = self.current
- current_lo = self.current
- while True:
- if current_hi < self.hi and current_hi not in self.skipped_indices:
- self.current = current_hi
- break
- if current_lo >= self.lo and current_lo not in self.skipped_indices:
- self.current = current_lo
- break
- if current_lo < self.lo and current_hi >= self.hi:
- break
-
- current_hi += 1
- current_lo -= 1
+ current_hi = self.current
+ current_lo = self.current
+ while True:
+ if current_hi < self.hi and current_hi not in self.skipped_indices:
+ self.current = current_hi
+ break
+ if current_lo >= self.lo and current_lo not in self.skipped_indices:
+ self.current = current_lo
+ break
+ if current_lo < self.lo and current_hi >= self.hi:
+ break
+
+ current_hi += 1
+ current_lo -= 1
def GetNext(self):
self.current = (self.hi + self.lo) / 2
@@ -155,52 +160,56 @@ class BinarySearcher:
self.GetNextFlakyBinary()
# TODO: Add an estimated time remaining as well.
- message = ("Estimated tries: min: %d max: %d\n" %
+ message = ('Estimated tries: min: %d max: %d\n' %
(1 + math.log(self.hi - self.lo, 2),
self.hi - self.lo - len(self.skipped_indices)))
logger.GetLogger().LogOutput(message)
- message = ("lo: %d hi: %d current: %d version: %s\n" %
- (self.lo, self.hi, self.current,
- self.sorted_list[self.current]))
+ message = ('lo: %d hi: %d current: %d version: %s\n' %
+ (self.lo, self.hi, self.current, self.sorted_list[self.current]))
logger.GetLogger().LogOutput(message)
logger.GetLogger().LogOutput(str(self))
return self.sorted_list[self.current]
def SetLoRevision(self, lo_revision):
self.lo = self.sorted_list.index(lo_revision)
+
def SetHiRevision(self, hi_revision):
self.hi = self.sorted_list.index(hi_revision)
+
def GetAllPoints(self):
- to_return = ""
+ to_return = ''
for i in range(len(self.sorted_list)):
- to_return += ("%d %d %s\n" %
- (self.points[i].status,
- i,
- self.points[i].revision))
+ to_return += ('%d %d %s\n' % (self.points[i].status, i,
+ self.points[i].revision))
return to_return
+
def __str__(self):
- to_return = ""
- to_return += "Current: %d\n" % self.current
- to_return += str(self.index_log) + "\n"
+ to_return = ''
+ to_return += 'Current: %d\n' % self.current
+ to_return += str(self.index_log) + '\n'
revision_log = []
for index in self.index_log:
revision_log.append(self.sorted_list[index])
- to_return += str(revision_log) + "\n"
- to_return += str(self.status_log) + "\n"
- to_return += "Skipped indices:\n"
- to_return += str(self.skipped_indices) + "\n"
+ to_return += str(revision_log) + '\n'
+ to_return += str(self.status_log) + '\n'
+ to_return += 'Skipped indices:\n'
+ to_return += str(self.skipped_indices) + '\n'
to_return += self.GetAllPoints()
return to_return
+
class RevisionInfo:
+
def __init__(self, date, client, description):
self.date = date
self.client = client
self.description = description
self.status = -1
+
class VCSBinarySearcher:
+
def __init__(self):
self.bs = BinarySearcher()
self.rim = {}
@@ -208,28 +217,37 @@ class VCSBinarySearcher:
self.checkout_dir = None
self.current_revision = None
pass
+
def Initialize(self):
pass
+
def GetNextRevision(self):
pass
+
def CheckoutRevision(self, revision):
pass
+
def SetStatus(self, status):
pass
+
def Cleanup(self):
pass
+
def SetGoodRevision(self, revision):
if revision is None:
return
- assert(revision in self.bs.sorted_list)
+ assert (revision in self.bs.sorted_list)
self.bs.SetLoRevision(revision)
+
def SetBadRevision(self, revision):
if revision is None:
return
- assert(revision in self.bs.sorted_list)
+ assert (revision in self.bs.sorted_list)
self.bs.SetHiRevision(revision)
+
class P4BinarySearcher(VCSBinarySearcher):
+
def __init__(self, p4_port, p4_paths, test_command):
VCSBinarySearcher.__init__(self)
self.p4_port = p4_port
@@ -237,50 +255,56 @@ class P4BinarySearcher(VCSBinarySearcher):
self.test_command = test_command
self.checkout_dir = tempfile.mkdtemp()
self.ce = command_executer.GetCommandExecuter()
- self.client_name = "binary-searcher-$HOSTNAME-$USER"
- self.job_log_root = "/home/asharif/www/coreboot_triage/"
+ self.client_name = 'binary-searcher-$HOSTNAME-$USER'
+ self.job_log_root = '/home/asharif/www/coreboot_triage/'
+
def Initialize(self, good_revision=None, bad_revision=None):
self.Cleanup()
- command = GetP4Command(self.client_name, self.p4_port, self.p4_paths, 1, self.checkout_dir)
+ command = GetP4Command(self.client_name, self.p4_port, self.p4_paths, 1,
+ self.checkout_dir)
self.ce.RunCommand(command)
- command = "cd %s && g4 changes ..." % self.checkout_dir
+ command = 'cd %s && g4 changes ...' % self.checkout_dir
[retval, out, err] = self.ce.RunCommand(command, True)
- self.changes = re.findall("Change (\d+)", out)
- change_infos = re.findall("Change (\d+) on ([\d/]+) by ([^\s]+) ('[^']*')", out)
+ self.changes = re.findall('Change (\d+)', out)
+ change_infos = re.findall("Change (\d+) on ([\d/]+) by ([^\s]+) ('[^']*')",
+ out)
for change_info in change_infos:
ri = RevisionInfo(change_info[1], change_info[2], change_info[3])
self.rim[change_info[0]] = ri
# g4 gives changes in reverse chronological order.
self.changes.reverse()
self.bs.SetSortedList(self.changes)
+
def SetStatus(self, status):
self.rim[self.current_revision].status = status
return self.bs.SetStatus(status)
+
def GetNextRevision(self):
next_revision = self.bs.GetNext()
self.current_revision = next_revision
return next_revision
+
def CleanupCLs(self):
- if not os.path.isfile(self.checkout_dir + "/.p4config"):
- command = "cd %s" % self.checkout_dir
- command += " && cp ${HOME}/.p4config ."
+ if not os.path.isfile(self.checkout_dir + '/.p4config'):
+ command = 'cd %s' % self.checkout_dir
+ command += ' && cp ${HOME}/.p4config .'
command += " && echo \"P4PORT=" + self.p4_port + "\" >> .p4config"
command += " && echo \"P4CLIENT=" + self.client_name + "\" >> .p4config"
self.ce.RunCommand(command)
- command = "cd %s" % self.checkout_dir
- command += "; g4 changes -c %s" % self.client_name
+ command = 'cd %s' % self.checkout_dir
+ command += '; g4 changes -c %s' % self.client_name
[retval, out, err] = self.ce.RunCommand(command, True)
- changes = re.findall("Change (\d+)", out)
+ changes = re.findall('Change (\d+)', out)
if len(changes) != 0:
- command = "cd %s" % self.checkout_dir
+ command = 'cd %s' % self.checkout_dir
for change in changes:
- command += "; g4 revert -c %s" % change
+ command += '; g4 revert -c %s' % change
self.ce.RunCommand(command)
def CleanupClient(self):
- command = "cd %s" % self.checkout_dir
- command += "; g4 revert ..."
- command += "; g4 client -d %s" % self.client_name
+ command = 'cd %s' % self.checkout_dir
+ command += '; g4 revert ...'
+ command += '; g4 client -d %s' % self.client_name
self.ce.RunCommand(command)
def Cleanup(self):
@@ -288,25 +312,20 @@ class P4BinarySearcher(VCSBinarySearcher):
self.CleanupClient()
def __str__(self):
- to_return = ""
+ to_return = ''
for change in self.changes:
ri = self.rim[change]
if ri.status == -1:
- to_return = "%s\t%d\n" % (change, ri.status)
+ to_return = '%s\t%d\n' % (change, ri.status)
else:
- to_return += ("%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\n" %
- (change,
- ri.status,
- ri.date,
- ri.client,
- ri.description,
- self.job_log_root + change + ".cmd",
- self.job_log_root + change + ".out",
- self.job_log_root + change + ".err"))
+ to_return += ('%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\n' %
+ (change, ri.status, ri.date, ri.client, ri.description,
+ self.job_log_root + change + '.cmd',
+ self.job_log_root + change + '.out',
+ self.job_log_root + change + '.err'))
return to_return
-
class P4GCCBinarySearcher(P4BinarySearcher):
# TODO: eventually get these patches from g4 instead of creating them manually
def HandleBrokenCLs(self, current_revision):
@@ -315,30 +334,33 @@ class P4GCCBinarySearcher(P4BinarySearcher):
problematic_ranges.append([44528, 44539])
problematic_ranges.append([44528, 44760])
problematic_ranges.append([44335, 44882])
- command = "pwd"
+ command = 'pwd'
for pr in problematic_ranges:
if cr in range(pr[0], pr[1]):
- patch_file = "/home/asharif/triage_tool/%d-%d.patch" % (pr[0], pr[1])
+ patch_file = '/home/asharif/triage_tool/%d-%d.patch' % (pr[0], pr[1])
f = open(patch_file)
patch = f.read()
f.close()
- files = re.findall("--- (//.*)", patch)
- command += "; cd %s" % self.checkout_dir
+ files = re.findall('--- (//.*)', patch)
+ command += '; cd %s' % self.checkout_dir
for f in files:
- command += "; g4 open %s" % f
- command += "; patch -p2 < %s" % patch_file
+ command += '; g4 open %s' % f
+ command += '; patch -p2 < %s' % patch_file
self.current_ce.RunCommand(command)
def CheckoutRevision(self, current_revision):
job_logger = logger.Logger(self.job_log_root,
current_revision,
- True, subdir="")
+ True,
+ subdir='')
self.current_ce = command_executer.GetCommandExecuter(job_logger)
self.CleanupCLs()
# Change the revision of only the gcc part of the toolchain.
- command = ("cd %s/gcctools/google_vendor_src_branch/gcc && g4 revert ...; g4 sync @%s" %
- (self.checkout_dir, current_revision))
+ command = (
+ 'cd %s/gcctools/google_vendor_src_branch/gcc && g4 revert ...; g4 sync'
+ ' @%s'
+ % (self.checkout_dir, current_revision))
self.current_ce.RunCommand(command)
self.HandleBrokenCLs(current_revision)
@@ -347,30 +369,37 @@ class P4GCCBinarySearcher(P4BinarySearcher):
def Main(argv):
"""The main function."""
# Common initializations
-### command_executer.InitCommandExecuter(True)
+ ### command_executer.InitCommandExecuter(True)
ce = command_executer.GetCommandExecuter()
rootdir = misc.GetRoot(sys.argv[0])[0]
parser = optparse.OptionParser()
- parser.add_option("-n", "--num_tries", dest="num_tries",
- default="100",
- help="Number of tries.")
- parser.add_option("-g", "--good_revision", dest="good_revision",
- help="Last known good revision.")
- parser.add_option("-b", "--bad_revision", dest="bad_revision",
- help="Last known bad revision.")
- parser.add_option("-s",
- "--script",
- dest="script",
- help="Script to run for every version.")
+ parser.add_option('-n',
+ '--num_tries',
+ dest='num_tries',
+ default='100',
+ help='Number of tries.')
+ parser.add_option('-g',
+ '--good_revision',
+ dest='good_revision',
+ help='Last known good revision.')
+ parser.add_option('-b',
+ '--bad_revision',
+ dest='bad_revision',
+ help='Last known bad revision.')
+ parser.add_option('-s',
+ '--script',
+ dest='script',
+ help='Script to run for every version.')
[options, args] = parser.parse_args(argv)
# First get all revisions
-### p4_paths = ["//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/README.google"]
- p4_paths = ["//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/...",
- "//depot2/gcctools/google_vendor_src_branch/binutils/binutils-2.20.1-mobile/...",
- "//depot2/gcctools/google_vendor_src_branch/binutils/binutils-20100303/..."]
- p4gccbs = P4GCCBinarySearcher("perforce2:2666", p4_paths, "")
-
+ ### p4_paths = ["//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/README.google"]
+ p4_paths = [
+ '//depot2/gcctools/google_vendor_src_branch/gcc/gcc-4.4.3/...',
+ '//depot2/gcctools/google_vendor_src_branch/binutils/binutils-2.20.1-mobile/...',
+ '//depot2/gcctools/google_vendor_src_branch/binutils/binutils-20100303/...'
+ ]
+ p4gccbs = P4GCCBinarySearcher('perforce2:2666', p4_paths, '')
# Main loop:
terminated = False
@@ -387,9 +416,9 @@ def Main(argv):
# Now run command to get the status
ce = command_executer.GetCommandExecuter()
- command = "%s %s" % (script, p4gccbs.checkout_dir)
+ command = '%s %s' % (script, p4gccbs.checkout_dir)
status = ce.RunCommand(command)
- message = ("Revision: %s produced: %d status\n" %
+ message = ('Revision: %s produced: %d status\n' %
(current_revision, status))
logger.GetLogger().LogOutput(message)
terminated = p4gccbs.SetStatus(status)
@@ -397,14 +426,14 @@ def Main(argv):
logger.GetLogger().LogOutput(str(p4gccbs))
if not terminated:
- logger.GetLogger().LogOutput("Tries: %d expired." % num_tries)
+ logger.GetLogger().LogOutput('Tries: %d expired.' % num_tries)
logger.GetLogger().LogOutput(str(p4gccbs.bs))
except (KeyboardInterrupt, SystemExit):
- logger.GetLogger().LogOutput("Cleaning up...")
+ logger.GetLogger().LogOutput('Cleaning up...')
finally:
logger.GetLogger().LogOutput(str(p4gccbs.bs))
status = p4gccbs.Cleanup()
-if __name__ == "__main__":
+if __name__ == '__main__':
Main(sys.argv)
diff --git a/binary_search_tool/binary_search_state.py b/binary_search_tool/binary_search_state.py
index 2f90f78d..274f2c6b 100755
--- a/binary_search_tool/binary_search_state.py
+++ b/binary_search_tool/binary_search_state.py
@@ -8,12 +8,12 @@ import tempfile
# Programtically adding utils python path to PYTHONPATH
if os.path.isabs(sys.argv[0]):
- utils_pythonpath = os.path.abspath(
- '{0}/..'.format(os.path.dirname(sys.argv[0])))
+ utils_pythonpath = os.path.abspath('{0}/..'.format(os.path.dirname(sys.argv[
+ 0])))
else:
wdir = os.getcwd()
- utils_pythonpath = os.path.abspath(
- '{0}/{1}/..'.format(wdir, os.path.dirname(sys.argv[0])))
+ utils_pythonpath = os.path.abspath('{0}/{1}/..'.format(wdir, os.path.dirname(
+ sys.argv[0])))
sys.path.append(utils_pythonpath)
# Now we do import from utils
from utils import command_executer
@@ -21,10 +21,11 @@ from utils import logger
import binary_search_perforce
-STATE_FILE = "%s.state" % sys.argv[0]
+STATE_FILE = '%s.state' % sys.argv[0]
class BinarySearchState(object):
+
def __init__(self, get_initial_items, switch_to_good, switch_to_bad,
test_script, incremental, prune, iterations, prune_iterations,
verify_level, file_args):
@@ -49,36 +50,38 @@ class BinarySearchState(object):
def SwitchToGood(self, item_list):
if self.incremental:
- self.l.LogOutput(
- "Incremental set. Wanted to switch %s to good" % str(item_list))
+ self.l.LogOutput('Incremental set. Wanted to switch %s to good' %
+ str(item_list))
incremental_items = [
- item for item in item_list if item not in self.currently_good_items]
+ item for item in item_list if item not in self.currently_good_items
+ ]
item_list = incremental_items
- self.l.LogOutput(
- "Incremental set. Actually switching %s to good" % str(item_list))
+ self.l.LogOutput('Incremental set. Actually switching %s to good' %
+ str(item_list))
if not item_list:
return
- self.l.LogOutput("Switching %s to good" % str(item_list))
+ self.l.LogOutput('Switching %s to good' % str(item_list))
self.RunSwitchScript(self.switch_to_good, item_list)
self.currently_good_items = self.currently_good_items.union(set(item_list))
self.currently_bad_items.difference_update(set(item_list))
def SwitchToBad(self, item_list):
if self.incremental:
- self.l.LogOutput(
- "Incremental set. Wanted to switch %s to bad" % str(item_list))
+ self.l.LogOutput('Incremental set. Wanted to switch %s to bad' %
+ str(item_list))
incremental_items = [
- item for item in item_list if item not in self.currently_bad_items]
+ item for item in item_list if item not in self.currently_bad_items
+ ]
item_list = incremental_items
- self.l.LogOutput(
- "Incremental set. Actually switching %s to bad" % str(item_list))
+ self.l.LogOutput('Incremental set. Actually switching %s to bad' %
+ str(item_list))
if not item_list:
return
- self.l.LogOutput("Switching %s to bad" % str(item_list))
+ self.l.LogOutput('Switching %s to bad' % str(item_list))
self.RunSwitchScript(self.switch_to_bad, item_list)
self.currently_bad_items = self.currently_bad_items.union(set(item_list))
self.currently_good_items.difference_update(set(item_list))
@@ -86,14 +89,14 @@ class BinarySearchState(object):
def RunSwitchScript(self, switch_script, item_list):
if self.file_args:
temp_file = tempfile.mktemp()
- f = open(temp_file, "wb")
- f.write("\n".join(item_list))
+ f = open(temp_file, 'wb')
+ f.write('\n'.join(item_list))
f.close()
- command = "%s %s" % (switch_script, temp_file)
+ command = '%s %s' % (switch_script, temp_file)
else:
- command = "%s %s" % (switch_script, " ".join(item_list))
+ command = '%s %s' % (switch_script, ' '.join(item_list))
ret = self.ce.RunCommand(command)
- assert ret == 0, "Switch script %s returned %d" % (switch_script, ret)
+ assert ret == 0, 'Switch script %s returned %d' % (switch_script, ret)
def TestScript(self):
command = self.test_script
@@ -101,15 +104,15 @@ class BinarySearchState(object):
def DoVerify(self):
for _ in range(int(self.verify_level)):
- self.l.LogOutput("Resetting all items to good to verify.")
+ self.l.LogOutput('Resetting all items to good to verify.')
self.SwitchToGood(self.all_items)
status = self.TestScript()
- assert status == 0, "When reset_to_good, status should be 0."
+ assert status == 0, 'When reset_to_good, status should be 0.'
- self.l.LogOutput("Resetting all items to bad to verify.")
+ self.l.LogOutput('Resetting all items to bad to verify.')
self.SwitchToBad(self.all_items)
status = self.TestScript()
- assert status == 1, "When reset_to_bad, status should be 1."
+ assert status == 1, 'When reset_to_bad, status should be 1.'
def DoSearch(self):
num_bad_items_history = []
@@ -120,14 +123,14 @@ class BinarySearchState(object):
if not terminated:
break
if not self.prune:
- self.l.LogOutput("Not continuning further, --prune is not set")
+ self.l.LogOutput('Not continuning further, --prune is not set')
break
# Prune is set.
prune_index = self.bs.current
if prune_index == len(self.all_items) - 1:
- self.l.LogOutput("First bad item is the last item. Breaking.")
- self.l.LogOutput("Only bad item is: %s" % self.all_items[-1])
+ self.l.LogOutput('First bad item is the last item. Breaking.')
+ self.l.LogOutput('Only bad item is: %s' % self.all_items[-1])
break
num_bad_items = len(self.all_items) - prune_index
@@ -135,12 +138,11 @@ class BinarySearchState(object):
if (num_bad_items_history[-num_bad_items:] ==
[num_bad_items for _ in range(num_bad_items)]):
- self.l.LogOutput("num_bad_items_history: %s for past %d iterations. "
- "Breaking." %
- (str(num_bad_items_history),
- num_bad_items))
- self.l.LogOutput(
- "Bad items are: %s" % " ".join(self.all_items[prune_index:]))
+ self.l.LogOutput('num_bad_items_history: %s for past %d iterations. '
+ 'Breaking.' % (str(num_bad_items_history),
+ num_bad_items))
+ self.l.LogOutput('Bad items are: %s' %
+ ' '.join(self.all_items[prune_index:]))
break
new_all_items = list(self.all_items)
@@ -150,9 +152,8 @@ class BinarySearchState(object):
if prune_index:
new_all_items = new_all_items[prune_index - 1:]
- self.l.LogOutput("Old list: %s. New list: %s" %
- (str(self.all_items),
- str(new_all_items)))
+ self.l.LogOutput('Old list: %s. New list: %s' % (str(self.all_items),
+ str(new_all_items)))
# FIXME: Do we need to Convert the currently good items to bad
self.PopulateItemsUsingList(new_all_items)
@@ -171,9 +172,9 @@ class BinarySearchState(object):
terminated = self.bs.SetStatus(status)
if terminated:
- self.l.LogOutput("Terminated!")
+ self.l.LogOutput('Terminated!')
if not terminated:
- self.l.LogOutput("Ran out of iterations searching...")
+ self.l.LogOutput('Ran out of iterations searching...')
self.l.LogOutput(str(self))
return terminated
@@ -192,8 +193,8 @@ class BinarySearchState(object):
self.l = None
self.ce = None
# TODO Implement save/restore
-### return
- f = open(STATE_FILE, "wb")
+ ### return
+ f = open(STATE_FILE, 'wb')
pickle.dump(self, f)
f.close()
@@ -217,24 +218,24 @@ class BinarySearchState(object):
border_item = self.bs.GetNext()
index = self.all_items.index(border_item)
- next_bad_items = self.all_items[:index+1]
- next_good_items = self.all_items[index+1:]
+ next_bad_items = self.all_items[:index + 1]
+ next_good_items = self.all_items[index + 1:]
return [next_bad_items, next_good_items]
def __str__(self):
- ret = ""
- ret += "all: %s\n" % str(self.all_items)
- ret += "currently_good: %s\n" % str(self.currently_good_items)
- ret += "currently_bad: %s\n" % str(self.currently_bad_items)
+ ret = ''
+ ret += 'all: %s\n' % str(self.all_items)
+ ret += 'currently_good: %s\n' % str(self.currently_good_items)
+ ret += 'currently_bad: %s\n' % str(self.currently_bad_items)
ret += str(self.bs)
return ret
def _CanonicalizeScript(script_name):
script_name = os.path.expanduser(script_name)
- if not script_name.startswith("/"):
- return os.path.join(".", script_name)
+ if not script_name.startswith('/'):
+ return os.path.join('.', script_name)
def Main(argv):
@@ -242,60 +243,60 @@ def Main(argv):
# Common initializations
parser = optparse.OptionParser()
- parser.add_option("-n",
- "--iterations",
- dest="iterations",
- help="Number of iterations to try in the search.",
- default="50")
- parser.add_option("-i",
- "--get_initial_items",
- dest="get_initial_items",
- help="Script to run to get the initial objects.")
- parser.add_option("-g",
- "--switch_to_good",
- dest="switch_to_good",
- help="Script to run to switch to good.")
- parser.add_option("-b",
- "--switch_to_bad",
- dest="switch_to_bad",
- help="Script to run to switch to bad.")
- parser.add_option("-t",
- "--test_script",
- dest="test_script",
- help=("Script to run to test the "
- "output after packages are built."))
- parser.add_option("-p",
- "--prune",
- dest="prune",
- action="store_true",
+ parser.add_option('-n',
+ '--iterations',
+ dest='iterations',
+ help='Number of iterations to try in the search.',
+ default='50')
+ parser.add_option('-i',
+ '--get_initial_items',
+ dest='get_initial_items',
+ help='Script to run to get the initial objects.')
+ parser.add_option('-g',
+ '--switch_to_good',
+ dest='switch_to_good',
+ help='Script to run to switch to good.')
+ parser.add_option('-b',
+ '--switch_to_bad',
+ dest='switch_to_bad',
+ help='Script to run to switch to bad.')
+ parser.add_option('-t',
+ '--test_script',
+ dest='test_script',
+ help=('Script to run to test the '
+ 'output after packages are built.'))
+ parser.add_option('-p',
+ '--prune',
+ dest='prune',
+ action='store_true',
default=False,
- help=("Script to run to test the output after "
- "packages are built."))
- parser.add_option("-c",
- "--noincremental",
- dest="noincremental",
- action="store_true",
+ help=('Script to run to test the output after '
+ 'packages are built.'))
+ parser.add_option('-c',
+ '--noincremental',
+ dest='noincremental',
+ action='store_true',
default=False,
- help="Do not propagate good/bad changes incrementally.")
- parser.add_option("-f",
- "--file_args",
- dest="file_args",
- action="store_true",
+ help='Do not propagate good/bad changes incrementally.')
+ parser.add_option('-f',
+ '--file_args',
+ dest='file_args',
+ action='store_true',
default=False,
- help="Use a file to pass arguments to scripts.")
- parser.add_option("-v",
- "--verify_level",
- dest="verify_level",
- default="1",
- help=("Check binary search assumptions N times "
- "before starting."))
- parser.add_option("-N",
- "--prune_iterations",
- dest="prune_iterations",
- help="Number of prune iterations to try in the search.",
- default="100")
-
- logger.GetLogger().LogOutput(" ".join(argv))
+ help='Use a file to pass arguments to scripts.')
+ parser.add_option('-v',
+ '--verify_level',
+ dest='verify_level',
+ default='1',
+ help=('Check binary search assumptions N times '
+ 'before starting.'))
+ parser.add_option('-N',
+ '--prune_iterations',
+ dest='prune_iterations',
+ help='Number of prune iterations to try in the search.',
+ default='100')
+
+ logger.GetLogger().LogOutput(' '.join(argv))
[options, _] = parser.parse_args(argv)
if not (options.get_initial_items and options.switch_to_good and
@@ -326,9 +327,10 @@ def Main(argv):
bss.DoSearch()
except (KeyboardInterrupt, SystemExit):
- print "C-c pressed"
+ print 'C-c pressed'
bss.SaveState()
return 0
-if __name__ == "__main__":
+
+if __name__ == '__main__':
sys.exit(Main(sys.argv))
diff --git a/binary_search_tool/cros_pkg/cros_pkg_create_cleanup_script.py b/binary_search_tool/cros_pkg/cros_pkg_create_cleanup_script.py
index bed7106d..d2158c87 100755
--- a/binary_search_tool/cros_pkg/cros_pkg_create_cleanup_script.py
+++ b/binary_search_tool/cros_pkg/cros_pkg_create_cleanup_script.py
@@ -15,12 +15,13 @@ import sys
def Usage(parser, msg):
- print "ERROR: " + msg
- parser.print_help()
- sys.exit(1)
+ print 'ERROR: ' + msg
+ parser.print_help()
+ sys.exit(1)
+
def Main(argv):
- """
+ """
The script cros_pkg_setup.sh make two main changes that need to be
undone: 1). It creates a soft link making /build/${board} point to
/build/${board}.work, and 2). It saves a copy of the build_image
@@ -29,76 +30,83 @@ def Main(argv):
it was a real tree or a soft link. If it was soft link, it saved the old
value of the link, then deleted it and created the new link. If it was a
real tree, it renamed the tree to /build/${board}.save, and then created the
- new soft link. If the /build/${board} did not previously exist, then it just
+ new soft link. If the /build/${board} did not previously exist, then it
+ just
created the new soft link.
This function takes arguments that tell it exactly what cros_pkg_setup.sh
actually did, then generates a script to undo those exact changes.
"""
- parser = argparse.ArgumentParser()
- parser.add_argument("--board", dest="board", required=True,
- help="Chromeos board for packages/image.")
-
- parser.add_argument("--old_tree_missing", dest="tree_existed",
- action="store_false",
- help="Did /build/${BOARD} exist.", default=True)
-
- parser.add_argument("--renamed_tree", dest="renamed_tree",
- action="store_true",
- help="Was /build/${BOARD} saved & renamed.",
- default=False)
-
- parser.add_argument("--old_link", dest="old_link",
- help=("The original build tree soft link."))
-
- options = parser.parse_args(argv[1:])
-
- if options.old_link or options.renamed_tree:
- if not options.tree_existed:
- Usage(parser, "If --tree_existed is False, cannot have "
- "--renamed_tree or --old_link")
-
- if options.old_link and options.renamed_tree:
- Usage(parser, "--old_link and --renamed_tree are incompatible options.")
-
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--board',
+ dest='board',
+ required=True,
+ help='Chromeos board for packages/image.')
+
+ parser.add_argument('--old_tree_missing',
+ dest='tree_existed',
+ action='store_false',
+ help='Did /build/${BOARD} exist.',
+ default=True)
+
+ parser.add_argument('--renamed_tree',
+ dest='renamed_tree',
+ action='store_true',
+ help='Was /build/${BOARD} saved & renamed.',
+ default=False)
+
+ parser.add_argument('--old_link',
+ dest='old_link',
+ help=('The original build tree soft link.'))
+
+ options = parser.parse_args(argv[1:])
+
+ if options.old_link or options.renamed_tree:
+ if not options.tree_existed:
+ Usage(parser, 'If --tree_existed is False, cannot have '
+ '--renamed_tree or --old_link')
+
+ if options.old_link and options.renamed_tree:
+ Usage(parser, '--old_link and --renamed_tree are incompatible options.')
+
+ if options.tree_existed:
+ if not options.old_link and not options.renamed_tree:
+ Usage(parser, 'If --tree_existed is True, then must have either '
+ '--old_link or --renamed_tree')
+
+ out_filename = 'cros_pkg_' + options.board + '_cleanup.sh'
+
+ with open(out_filename, 'w') as out_file:
+ out_file.write('#!/bin/bash\n\n')
+ # First, remove the 'new' soft link.
+ out_file.write('sudo rm /build/%s\n' % options.board)
if options.tree_existed:
- if not options.old_link and not options.renamed_tree:
- Usage(parser, "If --tree_existed is True, then must have either "
- "--old_link or --renamed_tree")
-
- out_filename = "cros_pkg_" + options.board + "_cleanup.sh"
-
- with open(out_filename, "w") as out_file:
- out_file.write("#!/bin/bash\n\n")
- # First, remove the 'new' soft link.
- out_file.write("sudo rm /build/%s\n" % options.board)
- if options.tree_existed:
- if options.renamed_tree:
- # Old build tree existed and was a real tree, so it got
- # renamed. Move the renamed tree back to the original tree.
- out_file.write("sudo mv /build/%s.save /build/%s\n"
- % (options.board, options.board))
- else:
- # Old tree existed and was already a soft link. Re-create the
- # original soft link.
- original_link = options.old_link
- if original_link[0] == "'":
- original_link = original_link[1:]
- if original_link[-1] == "'":
- original_link = original_link[:-1]
- out_file.write("sudo ln -s %s /build/%s\n" % (original_link,
- options.board))
- out_file.write("\n")
- # Restore the original saved version of build_image script.
- out_file.write("mv ~/trunk/src/scripts/build_image.save "
- "~/trunk/src/scripts/build_image\n\n")
- # Remove cros_pkg_common.sh file
- out_file.write("rm cros_pkg_common.sh\n")
-
- return 0
-
-
-if __name__ == "__main__":
- retval = Main(sys.argv)
- sys.exit(retval)
+ if options.renamed_tree:
+ # Old build tree existed and was a real tree, so it got
+ # renamed. Move the renamed tree back to the original tree.
+ out_file.write('sudo mv /build/%s.save /build/%s\n' %
+ (options.board, options.board))
+ else:
+ # Old tree existed and was already a soft link. Re-create the
+ # original soft link.
+ original_link = options.old_link
+ if original_link[0] == "'":
+ original_link = original_link[1:]
+ if original_link[-1] == "'":
+ original_link = original_link[:-1]
+ out_file.write('sudo ln -s %s /build/%s\n' % (original_link,
+ options.board))
+ out_file.write('\n')
+ # Restore the original saved version of build_image script.
+ out_file.write('mv ~/trunk/src/scripts/build_image.save '
+ '~/trunk/src/scripts/build_image\n\n')
+ # Remove cros_pkg_common.sh file
+ out_file.write('rm cros_pkg_common.sh\n')
+
+ return 0
+
+
+if __name__ == '__main__':
+ retval = Main(sys.argv)
+ sys.exit(retval)
diff --git a/binary_search_tool/cros_pkg/cros_pkg_undo_eclean.py b/binary_search_tool/cros_pkg/cros_pkg_undo_eclean.py
index a5c612c5..1600d2ce 100755
--- a/binary_search_tool/cros_pkg/cros_pkg_undo_eclean.py
+++ b/binary_search_tool/cros_pkg/cros_pkg_undo_eclean.py
@@ -13,28 +13,30 @@
import sys
import os
+
def Main(args):
- if args:
- filename = args[0]
- if not os.path.exists(filename):
- return 1
- else:
- return 1
-
- outname = filename + ".edited"
- with open(filename, "r") as input_file:
- lines = input_file.readlines()
- with open(outname, "w") as out_file:
- for line in lines:
- if line.find("eclean") >= 0:
- out_line = "# " + line
- else:
- out_line = line
- out_file.write(out_line)
-
- return 0
-
-if __name__ == "__main__":
- retval = Main(sys.argv[1:])
- sys.exit(retval)
+ if args:
+ filename = args[0]
+ if not os.path.exists(filename):
+ return 1
+ else:
+ return 1
+
+ outname = filename + '.edited'
+ with open(filename, 'r') as input_file:
+ lines = input_file.readlines()
+ with open(outname, 'w') as out_file:
+ for line in lines:
+ if line.find('eclean') >= 0:
+ out_line = '# ' + line
+ else:
+ out_line = line
+ out_file.write(out_line)
+
+ return 0
+
+
+if __name__ == '__main__':
+ retval = Main(sys.argv[1:])
+ sys.exit(retval)
diff --git a/binary_search_tool/test/__init__.py b/binary_search_tool/test/__init__.py
index e69de29b..8b137891 100755
--- a/binary_search_tool/test/__init__.py
+++ b/binary_search_tool/test/__init__.py
@@ -0,0 +1 @@
+
diff --git a/binary_search_tool/test/binary_search_tool_tester.py b/binary_search_tool/test/binary_search_tool_tester.py
index 91b5845d..3b8e44ce 100755
--- a/binary_search_tool/test/binary_search_tool_tester.py
+++ b/binary_search_tool/test/binary_search_tool_tester.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
-
"""Tests for bisecting tool."""
__author__ = 'shenhan@google.com (Han Shen)'
@@ -32,15 +31,13 @@ class BisectingUtilsTest(unittest.TestCase):
"""Cleanup temp files."""
os.remove(common.OBJECTS_FILE)
os.remove(common.WORKING_SET_FILE)
- print 'Deleted "{0}" and "{1}"'.format(
- common.OBJECTS_FILE, common.WORKING_SET_FILE)
+ print 'Deleted "{0}" and "{1}"'.format(common.OBJECTS_FILE,
+ common.WORKING_SET_FILE)
def runTest(self):
- args = ['--get_initial_items', './gen_init_list.py',
- '--switch_to_good', './switch_to_good.py',
- '--switch_to_bad', './switch_to_bad.py',
- '--test_script', './is_good.py',
- '--prune', '--file_args']
+ args = ['--get_initial_items', './gen_init_list.py', '--switch_to_good',
+ './switch_to_good.py', '--switch_to_bad', './switch_to_bad.py',
+ '--test_script', './is_good.py', '--prune', '--file_args']
binary_search_state.Main(args)
_, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
diff --git a/binary_search_tool/test/common.py b/binary_search_tool/test/common.py
index c0e1a3b3..cc0a02e4 100755
--- a/binary_search_tool/test/common.py
+++ b/binary_search_tool/test/common.py
@@ -5,6 +5,7 @@ DEFAULT_BAD_OBJECT_NUMBER = 23
OBJECTS_FILE = 'objects.txt'
WORKING_SET_FILE = 'working_set.txt'
+
def ReadWorkingSet():
working_set = []
f = open(WORKING_SET_FILE, 'r')
diff --git a/binary_search_tool/test/gen_init_list.py b/binary_search_tool/test/gen_init_list.py
index 4ac5da83..02a111ab 100755
--- a/binary_search_tool/test/gen_init_list.py
+++ b/binary_search_tool/test/gen_init_list.py
@@ -9,8 +9,8 @@ import common
def Main():
ce = command_executer.GetCommandExecuter()
- _, l, _ = ce.RunCommandWOutput(
- 'cat {0} | wc -l'.format(common.OBJECTS_FILE), print_to_console=False)
+ _, l, _ = ce.RunCommandWOutput('cat {0} | wc -l'.format(common.OBJECTS_FILE),
+ print_to_console=False)
for i in range(0, int(l)):
print i
diff --git a/binary_search_tool/test/gen_obj.py b/binary_search_tool/test/gen_obj.py
index 3d1e32f9..f817049f 100755
--- a/binary_search_tool/test/gen_obj.py
+++ b/binary_search_tool/test/gen_obj.py
@@ -25,10 +25,14 @@ def Main(argv):
0 always.
"""
parser = optparse.OptionParser()
- parser.add_option('-n', '--obj_num', dest='obj_num',
+ parser.add_option('-n',
+ '--obj_num',
+ dest='obj_num',
default=common.DEFAULT_OBJECT_NUMBER,
help=('Number of total objects.'))
- parser.add_option('-b', '--bad_obj_num', dest='bad_obj_num',
+ parser.add_option('-b',
+ '--bad_obj_num',
+ dest='bad_obj_num',
default=common.DEFAULT_BAD_OBJECT_NUMBER,
help=('Number of bad objects. Must be great than or equal '
'to zero and less than total object number.'))
@@ -39,8 +43,7 @@ def Main(argv):
bad_to_gen = int(options.bad_obj_num)
obj_list = []
for i in range(obj_num):
- if (bad_to_gen > 0 and
- random.randint(1, obj_num) <= bad_obj_num):
+ if (bad_to_gen > 0 and random.randint(1, obj_num) <= bad_obj_num):
obj_list.append(1)
bad_to_gen -= 1
else:
diff --git a/binary_search_tool/test/is_good.py b/binary_search_tool/test/is_good.py
index 60b2b50d..a420aa59 100755
--- a/binary_search_tool/test/is_good.py
+++ b/binary_search_tool/test/is_good.py
@@ -17,4 +17,3 @@ def Main():
if __name__ == '__main__':
retval = Main()
sys.exit(retval)
-
diff --git a/build_benchmarks.py b/build_benchmarks.py
index 906f0ccb..5734eb8d 100755
--- a/build_benchmarks.py
+++ b/build_benchmarks.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to build ChromeOS benchmarks
Inputs:
@@ -26,7 +25,7 @@ Inputs:
"""
-__author__ = "bjanakiraman@google.com (Bhaskar Janakiraman)"
+__author__ = 'bjanakiraman@google.com (Bhaskar Janakiraman)'
import optparse
import os
@@ -38,37 +37,37 @@ import tc_enter_chroot
from utils import command_executer
from utils import logger
-
KNOWN_BENCHMARKS = [
- 'chromeos/startup',
- 'chromeos/browser/pagecycler',
- 'chromeos/browser/sunspider',
- 'chromeos/browser/v8bench',
- 'chromeos/cpu/bikjmp' ]
+ 'chromeos/startup', 'chromeos/browser/pagecycler',
+ 'chromeos/browser/sunspider', 'chromeos/browser/v8bench',
+ 'chromeos/cpu/bikjmp'
+]
# Commands to build CPU benchmarks.
-CPU_BUILDCMD_CLEAN = "cd /usr/local/toolchain_root/third_party/android_bench/v2_0/CLOSED_SOURCE/%s;\
-python ../../scripts/bench.py --toolchain=/usr/bin --action=clean;"
-
-CPU_BUILDCMD_BUILD = "cd /usr/local/toolchain_root/third_party/android_bench/v2_0/CLOSED_SOURCE/%s;\
-python ../../scripts/bench.py --toolchain=/usr/bin --add_cflags=%s --add_ldflags=%s --makeopts=%s --action=build"
+CPU_BUILDCMD_CLEAN = 'cd /usr/local/toolchain_root/third_party/android_bench/v2_0/CLOSED_SOURCE/%s;\
+python ../../scripts/bench.py --toolchain=/usr/bin --action=clean;'
+CPU_BUILDCMD_BUILD = 'cd /usr/local/toolchain_root/third_party/android_bench/v2_0/CLOSED_SOURCE/%s;\
+python ../../scripts/bench.py --toolchain=/usr/bin --add_cflags=%s --add_ldflags=%s --makeopts=%s --action=build'
# Common initializations
cmd_executer = command_executer.GetCommandExecuter()
def Usage(parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
def CreateRunsh(destdir, benchmark):
- """Create run.sh script to run benchmark. Perflab needs a run.sh that runs the benchmark."""
- run_cmd = os.path.dirname(os.path.abspath(__file__)) + "/run_benchmarks.py"
- contents = "#!/bin/sh\n%s $@ %s\n" % (run_cmd, benchmark)
+ """Create run.sh script to run benchmark.
+
+ Perflab needs a run.sh that runs the benchmark.
+ """
+ run_cmd = os.path.dirname(os.path.abspath(__file__)) + '/run_benchmarks.py'
+ contents = '#!/bin/sh\n%s $@ %s\n' % (run_cmd, benchmark)
runshfile = destdir + '/run.sh'
f = open(runshfile, 'w')
f.write(contents)
@@ -77,8 +76,9 @@ def CreateRunsh(destdir, benchmark):
return retval
-def CreateBinaryCopy(sourcedir, destdir, copy = None):
+def CreateBinaryCopy(sourcedir, destdir, copy=None):
"""Create links in perflab-bin/destdir/* to sourcedir/* for now, instead of copies
+
Args:
copy: when none, make soft links to everything under sourcedir, otherwise
copy all to destdir.
@@ -88,7 +88,7 @@ def CreateBinaryCopy(sourcedir, destdir, copy = None):
retval = 0
# check if sourcedir exists
if not os.path.exists(sourcedir):
- logger.GetLogger().LogError("benchmark results %s does not exist." %
+ logger.GetLogger().LogError('benchmark results %s does not exist.' %
sourcedir)
return 1
@@ -118,53 +118,85 @@ def Main(argv):
# Common initializations
parser = optparse.OptionParser()
- parser.add_option("-c", "--chromeos_root", dest="chromeos_root",
- help="Target directory for ChromeOS installation.")
- parser.add_option("-t", "--toolchain_root", dest="toolchain_root",
- help="This is obsolete. Do not use.")
- parser.add_option("-r", "--third_party", dest="third_party",
- help="The third_party dir containing android benchmarks.")
- parser.add_option("-C", "--clean", dest="clean", action="store_true",
- default=False, help="Clean up build."),
- parser.add_option("-B", "--build", dest="build", action="store_true",
- default=False, help="Build benchmark."),
- parser.add_option("-O", "--only_copy", dest="only_copy", action="store_true",
- default=False, help="Only copy to perflab-bin - no builds."),
- parser.add_option("--workdir", dest="workdir", default=".",
- help="Work directory for perflab outputs.")
- parser.add_option("--clobber_chroot", dest="clobber_chroot",
- action="store_true", help=
- "Delete the chroot and start fresh", default=False)
- parser.add_option("--clobber_board", dest="clobber_board",
- action="store_true",
- help="Delete the board and start fresh", default=False)
- parser.add_option("--cflags", dest="cflags", default="",
- help="CFLAGS for the ChromeOS packages")
- parser.add_option("--cxxflags", dest="cxxflags",default="",
- help="CXXFLAGS for the ChromeOS packages")
- parser.add_option("--ldflags", dest="ldflags", default="",
- help="LDFLAGS for the ChromeOS packages")
- parser.add_option("--makeopts", dest="makeopts", default="",
- help="Make options for the ChromeOS packages")
- parser.add_option("--board", dest="board",
- help="ChromeOS target board, e.g. x86-generic")
-
- (options,args) = parser.parse_args(argv[1:])
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='Target directory for ChromeOS installation.')
+ parser.add_option('-t',
+ '--toolchain_root',
+ dest='toolchain_root',
+ help='This is obsolete. Do not use.')
+ parser.add_option('-r',
+ '--third_party',
+ dest='third_party',
+ help='The third_party dir containing android benchmarks.')
+ parser.add_option('-C',
+ '--clean',
+ dest='clean',
+ action='store_true',
+ default=False,
+ help='Clean up build.'),
+ parser.add_option('-B',
+ '--build',
+ dest='build',
+ action='store_true',
+ default=False,
+ help='Build benchmark.'),
+ parser.add_option('-O',
+ '--only_copy',
+ dest='only_copy',
+ action='store_true',
+ default=False,
+ help='Only copy to perflab-bin - no builds.'),
+ parser.add_option('--workdir',
+ dest='workdir',
+ default='.',
+ help='Work directory for perflab outputs.')
+ parser.add_option('--clobber_chroot',
+ dest='clobber_chroot',
+ action='store_true',
+ help='Delete the chroot and start fresh',
+ default=False)
+ parser.add_option('--clobber_board',
+ dest='clobber_board',
+ action='store_true',
+ help='Delete the board and start fresh',
+ default=False)
+ parser.add_option('--cflags',
+ dest='cflags',
+ default='',
+ help='CFLAGS for the ChromeOS packages')
+ parser.add_option('--cxxflags',
+ dest='cxxflags',
+ default='',
+ help='CXXFLAGS for the ChromeOS packages')
+ parser.add_option('--ldflags',
+ dest='ldflags',
+ default='',
+ help='LDFLAGS for the ChromeOS packages')
+ parser.add_option('--makeopts',
+ dest='makeopts',
+ default='',
+ help='Make options for the ChromeOS packages')
+ parser.add_option('--board',
+ dest='board',
+ help='ChromeOS target board, e.g. x86-generic')
+
+ (options, args) = parser.parse_args(argv[1:])
# validate args
for arg in args:
if arg not in KNOWN_BENCHMARKS:
- logger.GetLogger().LogFatal("Bad benchmark %s specified" % arg)
-
+ logger.GetLogger().LogFatal('Bad benchmark %s specified' % arg)
if options.chromeos_root is None:
- Usage(parser, "--chromeos_root must be set")
+ Usage(parser, '--chromeos_root must be set')
if options.board is None:
- Usage(parser, "--board must be set")
+ Usage(parser, '--board must be set')
if options.toolchain_root:
- logger.GetLogger().LogWarning("--toolchain_root should not be set")
+ logger.GetLogger().LogWarning('--toolchain_root should not be set')
options.chromeos_root = os.path.expanduser(options.chromeos_root)
options.workdir = os.path.expanduser(options.workdir)
@@ -173,7 +205,7 @@ def Main(argv):
if options.third_party:
third_party = options.third_party
else:
- third_party = "%s/../../../third_party" % os.path.dirname(__file__)
+ third_party = '%s/../../../third_party' % os.path.dirname(__file__)
third_party = os.path.realpath(third_party)
for arg in args:
# CPU benchmarks
@@ -183,22 +215,21 @@ def Main(argv):
tec_options = []
if third_party:
- tec_options.append("--third_party=%s" % third_party)
+ tec_options.append('--third_party=%s' % third_party)
if options.clean:
retval = cmd_executer.ChrootRunCommand(options.chromeos_root,
CPU_BUILDCMD_CLEAN % benchname,
- tec_options=tec_options
- )
+ tec_options=tec_options)
logger.GetLogger().LogErrorIf(retval,
- "clean of benchmark %s failed." % arg)
+ 'clean of benchmark %s failed.' % arg)
if options.build:
retval = cmd_executer.ChrootRunCommand(
options.chromeos_root,
- CPU_BUILDCMD_BUILD % (benchname, options.cflags,
- options.ldflags, options.makeopts),
+ CPU_BUILDCMD_BUILD % (benchname, options.cflags, options.ldflags,
+ options.makeopts),
tec_options=tec_options)
logger.GetLogger().LogErrorIf(retval,
- "Build of benchmark %s failed." % arg)
+ 'Build of benchmark %s failed.' % arg)
if retval == 0 and (options.build or options.only_copy):
benchdir = ('%s/android_bench/v2_0/CLOSED_SOURCE/%s' %
(third_party, benchname))
@@ -207,50 +238,57 @@ def Main(argv):
# For cpu/*, we need to copy (not symlinks) of all the contents,
# because they are part of the test fixutre.
retval = CreateBinaryCopy(benchdir, linkdir, True)
- if retval != 0: return retval
+ if retval != 0:
+ return retval
retval = CreateRunsh(linkdir, arg)
- if retval != 0: return retval
+ if retval != 0:
+ return retval
elif re.match('chromeos/startup', arg):
if options.build:
- # Clean for chromeos/browser and chromeos/startup is a Nop since builds are always from scratch.
- build_args = [os.path.dirname(os.path.abspath(__file__)) + "/build_chromeos.py",
- "--chromeos_root=" + options.chromeos_root,
- "--board=" + options.board,
- "--cflags=" + options.cflags,
- "--cxxflags=" + options.cxxflags,
- "--ldflags=" + options.ldflags,
- "--clobber_board"
- ]
+ # Clean for chromeos/browser and chromeos/startup is a Nop since builds are always from scratch.
+ build_args = [
+ os.path.dirname(os.path.abspath(__file__)) + '/build_chromeos.py',
+ '--chromeos_root=' + options.chromeos_root,
+ '--board=' + options.board, '--cflags=' + options.cflags,
+ '--cxxflags=' + options.cxxflags, '--ldflags=' + options.ldflags,
+ '--clobber_board'
+ ]
retval = build_chromeos.Main(build_args)
- logger.GetLogger().LogErrorIf(retval, "Build of ChromeOS failed.")
+ logger.GetLogger().LogErrorIf(retval, 'Build of ChromeOS failed.')
if retval == 0 and (options.build or options.only_copy):
- benchdir = '%s/src/build/images/%s/latest' % (options.chromeos_root, options.board)
+ benchdir = '%s/src/build/images/%s/latest' % (options.chromeos_root,
+ options.board)
linkdir = '%s/perflab-bin/%s' % (options.workdir, arg)
retval = CreateBinaryCopy(benchdir, linkdir)
- if retval != 0: return retval
+ if retval != 0:
+ return retval
CreateRunsh(linkdir, arg)
- if retval != 0: return retval
+ if retval != 0:
+ return retval
elif re.match('chromeos/browser', arg):
if options.build:
# For now, re-build os. TBD: Change to call build_browser
- build_args = [os.path.dirname(os.path.abspath(__file__)) + "/build_chrome_browser.py",
- "--chromeos_root=" + options.chromeos_root,
- "--board=" + options.board,
- "--cflags=" + options.cflags,
- "--cxxflags=" + options.cxxflags,
- "--ldflags=" + options.ldflags
- ]
+ build_args = [os.path.dirname(os.path.abspath(__file__)) +
+ '/build_chrome_browser.py',
+ '--chromeos_root=' + options.chromeos_root,
+ '--board=' + options.board, '--cflags=' + options.cflags,
+ '--cxxflags=' + options.cxxflags,
+ '--ldflags=' + options.ldflags]
retval = build_chromeos.Main(build_args)
- logger.GetLogger().LogErrorIf(retval, "Build of ChromeOS failed.")
+ logger.GetLogger().LogErrorIf(retval, 'Build of ChromeOS failed.')
if retval == 0 and (options.build or options.only_copy):
- benchdir = '%s/src/build/images/%s/latest' % (options.chromeos_root, options.board)
+ benchdir = '%s/src/build/images/%s/latest' % (options.chromeos_root,
+ options.board)
linkdir = '%s/perflab-bin/%s' % (options.workdir, arg)
- retval = CreateBinaryCopy(benchdir,linkdir)
- if retval != 0: return retval
+ retval = CreateBinaryCopy(benchdir, linkdir)
+ if retval != 0:
+ return retval
retval = CreateRunsh(linkdir, arg)
- if retval != 0: return retval
+ if retval != 0:
+ return retval
return 0
-if __name__ == "__main__":
+
+if __name__ == '__main__':
sys.exit(Main(sys.argv))
diff --git a/build_chrome_browser.py b/build_chrome_browser.py
index 63365692..4bec27c2 100755
--- a/build_chrome_browser.py
+++ b/build_chrome_browser.py
@@ -1,14 +1,13 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to checkout the ChromeOS source.
This script sets up the ChromeOS source in the given directory, matching a
particular release of ChromeOS.
"""
-__author__ = "raymes@google.com (Raymes Khoury)"
+__author__ = 'raymes@google.com (Raymes Khoury)'
import optparse
import os
@@ -24,7 +23,7 @@ cmd_executer = None
def Usage(parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
@@ -36,116 +35,133 @@ def Main(argv):
cmd_executer = command_executer.GetCommandExecuter()
parser = optparse.OptionParser()
- parser.add_option("--chromeos_root", dest="chromeos_root",
- help="Target directory for ChromeOS installation.")
- parser.add_option("--version", dest="version")
- parser.add_option("--clean",
- dest="clean",
+ parser.add_option('--chromeos_root',
+ dest='chromeos_root',
+ help='Target directory for ChromeOS installation.')
+ parser.add_option('--version', dest='version')
+ parser.add_option('--clean',
+ dest='clean',
+ default=False,
+ action='store_true',
+ help=('Clean the /var/cache/chromeos-chrome/'
+ 'chrome-src/src/out_$board dir'))
+ parser.add_option('--env',
+ dest='env',
+ default='',
+ help='Use the following env')
+ parser.add_option('--ebuild_version',
+ dest='ebuild_version',
+ help='Use this ebuild instead of the default one.')
+ parser.add_option('--cflags',
+ dest='cflags',
+ default='',
+ help='CFLAGS for the ChromeOS packages')
+ parser.add_option('--cxxflags',
+ dest='cxxflags',
+ default='',
+ help='CXXFLAGS for the ChromeOS packages')
+ parser.add_option('--ldflags',
+ dest='ldflags',
+ default='',
+ help='LDFLAGS for the ChromeOS packages')
+ parser.add_option('--board',
+ dest='board',
+ help='ChromeOS target board, e.g. x86-generic')
+ parser.add_option('--no_build_image',
+ dest='no_build_image',
+ default=False,
+ action='store_true',
+ help=('Skip build image after building browser.'
+ 'Defaults to False.'))
+ parser.add_option('--label',
+ dest='label',
+ help='Optional label to apply to the ChromeOS image.')
+ parser.add_option('--build_image_args',
+ default='',
+ dest='build_image_args',
+ help='Optional arguments to build_image.')
+ parser.add_option('--cros_workon',
+ dest='cros_workon',
+ help='Build using external source tree.')
+ parser.add_option('--dev',
+ dest='dev',
+ default=False,
+ action='store_true',
+ help=('Build a dev (eg. writable/large) image. '
+ 'Defaults to False.'))
+ parser.add_option('--debug',
+ dest='debug',
+ default=False,
+ action='store_true',
+ help=('Build chrome browser using debug mode. '
+ 'This option implies --dev. Defaults to false.'))
+ parser.add_option('--verbose',
+ dest='verbose',
default=False,
- action="store_true",
- help=("Clean the /var/cache/chromeos-chrome/"
- "chrome-src/src/out_$board dir"))
- parser.add_option("--env",
- dest="env",
- default="",
- help="Use the following env")
- parser.add_option("--ebuild_version",
- dest="ebuild_version",
- help="Use this ebuild instead of the default one.")
- parser.add_option("--cflags", dest="cflags",
- default="",
- help="CFLAGS for the ChromeOS packages")
- parser.add_option("--cxxflags", dest="cxxflags",
- default="",
- help="CXXFLAGS for the ChromeOS packages")
- parser.add_option("--ldflags", dest="ldflags",
- default="",
- help="LDFLAGS for the ChromeOS packages")
- parser.add_option("--board", dest="board",
- help="ChromeOS target board, e.g. x86-generic")
- parser.add_option("--no_build_image", dest="no_build_image", default=False,
- action="store_true",
- help=("Skip build image after building browser."
- "Defaults to False."))
- parser.add_option("--label", dest="label",
- help="Optional label to apply to the ChromeOS image.")
- parser.add_option("--build_image_args",
- default="",
- dest="build_image_args",
- help="Optional arguments to build_image.")
- parser.add_option("--cros_workon", dest="cros_workon",
- help="Build using external source tree.")
- parser.add_option("--dev", dest="dev", default=False, action="store_true",
- help=("Build a dev (eg. writable/large) image. "
- "Defaults to False."))
- parser.add_option("--debug", dest="debug", default=False, action="store_true",
- help=("Build chrome browser using debug mode. "
- "This option implies --dev. Defaults to false."))
- parser.add_option("--verbose", dest="verbose", default=False,
- action="store_true",
- help="Build with verbose information.")
+ action='store_true',
+ help='Build with verbose information.')
options = parser.parse_args(argv)[0]
if options.chromeos_root is None:
- Usage(parser, "--chromeos_root must be set")
+ Usage(parser, '--chromeos_root must be set')
if options.board is None:
- Usage(parser, "--board must be set")
+ Usage(parser, '--board must be set')
if options.version is None:
- logger.GetLogger().LogOutput("No Chrome version given so "
- "using the default checked in version.")
- chrome_version = ""
+ logger.GetLogger().LogOutput('No Chrome version given so '
+ 'using the default checked in version.')
+ chrome_version = ''
else:
- chrome_version = "CHROME_VERSION=%s" % options.version
+ chrome_version = 'CHROME_VERSION=%s' % options.version
if options.dev and options.no_build_image:
logger.GetLogger().LogOutput(
- "\"--dev\" is meaningless if \"--no_build_image\" is given.")
+ "\"--dev\" is meaningless if \"--no_build_image\" is given.")
if options.debug:
options.dev = True
options.chromeos_root = misc.CanonicalizePath(options.chromeos_root)
- unmask_env = "ACCEPT_KEYWORDS=~*"
+ unmask_env = 'ACCEPT_KEYWORDS=~*'
if options.ebuild_version:
- ebuild_version = "=%s" % options.ebuild_version
- options.env = "%s %s" % (options.env, unmask_env)
+ ebuild_version = '=%s' % options.ebuild_version
+ options.env = '%s %s' % (options.env, unmask_env)
else:
- ebuild_version = "chromeos-chrome"
+ ebuild_version = 'chromeos-chrome'
if options.cros_workon and not (
- os.path.isdir(options.cros_workon) and os.path.exists(
- os.path.join(options.cros_workon, "src/chromeos/chromeos.gyp"))):
- Usage(parser, "--cros_workon must be a valid chromium browser checkout.")
+ os.path.isdir(options.cros_workon) and os.path.exists(os.path.join(
+ options.cros_workon, 'src/chromeos/chromeos.gyp'))):
+ Usage(parser, '--cros_workon must be a valid chromium browser checkout.')
if options.verbose:
options.env = misc.MergeEnvStringWithDict(
- options.env, {"USE": "chrome_internal verbose"})
+ options.env, {'USE': 'chrome_internal verbose'})
else:
options.env = misc.MergeEnvStringWithDict(options.env,
- {"USE": "chrome_internal"})
+ {'USE': 'chrome_internal'})
if options.debug:
options.env = misc.MergeEnvStringWithDict(options.env,
- {"BUILDTYPE": "Debug"})
+ {'BUILDTYPE': 'Debug'})
if options.clean:
misc.RemoveChromeBrowserObjectFiles(options.chromeos_root, options.board)
- chrome_origin = "SERVER_SOURCE"
+ chrome_origin = 'SERVER_SOURCE'
if options.cros_workon:
- chrome_origin = "LOCAL_SOURCE"
+ chrome_origin = 'LOCAL_SOURCE'
command = 'cros_workon --board={0} start chromeos-chrome'.format(
- options.board)
+ options.board)
ret = cmd_executer.ChrootRunCommandWOutput(options.chromeos_root, command)
# cros_workon start returns non-zero if chromeos-chrome is already a
# cros_workon package.
if ret[0] and ret[2].find(
- "WARNING : Already working on chromeos-base/chromeos-chrome") == -1:
- logger.GetLogger().LogFatal("cros_workon chromeos-chrome failed.")
+ 'WARNING : Already working on chromeos-base/chromeos-chrome') == -1:
+ logger.GetLogger().LogFatal('cros_workon chromeos-chrome failed.')
# Return value is non-zero means we do find the "Already working on..."
# message, keep the information, so later on we do not revert the
@@ -154,11 +170,11 @@ def Main(argv):
# Emerge the browser
emerge_browser_command = \
- ("CHROME_ORIGIN={0} {1} "
+ ('CHROME_ORIGIN={0} {1} '
"CFLAGS=\"$(portageq-{2} envvar CFLAGS) {3}\" "
"LDFLAGS=\"$(portageq-{2} envvar LDFLAGS) {4}\" "
"CXXFLAGS=\"$(portageq-{2} envvar CXXFLAGS) {5}\" "
- "{6} emerge-{2} --buildpkg {7}").format(
+ '{6} emerge-{2} --buildpkg {7}').format(
chrome_origin, chrome_version, options.board, options.cflags,
options.ldflags, options.cxxflags, options.env, ebuild_version)
@@ -170,53 +186,55 @@ def Main(argv):
emerge_browser_command,
cros_sdk_options=cros_sdk_options)
- logger.GetLogger().LogFatalIf(ret, "build_packages failed")
+ logger.GetLogger().LogFatalIf(ret, 'build_packages failed')
if options.cros_workon and not cros_workon_keep:
command = 'cros_workon --board={0} stop chromeos-chrome'.format(
- options.board)
+ options.board)
ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command)
# cros_workon failed, not a fatal one, just report it.
if ret:
- print "cros_workon stop chromeos-chrome failed."
+ print 'cros_workon stop chromeos-chrome failed.'
if options.no_build_image:
return ret
# Finally build the image
ret = cmd_executer.ChrootRunCommand(
- options.chromeos_root, "{0} {1} {2} {3}".format(
- unmask_env, options.env, misc.GetBuildImageCommand(
- options.board, dev=options.dev), options.build_image_args))
-
- logger.GetLogger().LogFatalIf(ret, "build_image failed")
-
-
- flags_file_name = "chrome_flags.txt"
- flags_file_path = "{0}/src/build/images/{1}/latest/{2}".format(
- options.chromeos_root, options.board, flags_file_name)
- flags_file = open(flags_file_path, "wb")
- flags_file.write("CFLAGS={0}\n".format(options.cflags))
- flags_file.write("CXXFLAGS={0}\n".format(options.cxxflags))
- flags_file.write("LDFLAGS={0}\n".format(options.ldflags))
+ options.chromeos_root,
+ '{0} {1} {2} {3}'.format(unmask_env,
+ options.env,
+ misc.GetBuildImageCommand(options.board,
+ dev=options.dev),
+ options.build_image_args))
+
+ logger.GetLogger().LogFatalIf(ret, 'build_image failed')
+
+ flags_file_name = 'chrome_flags.txt'
+ flags_file_path = '{0}/src/build/images/{1}/latest/{2}'.format(
+ options.chromeos_root, options.board, flags_file_name)
+ flags_file = open(flags_file_path, 'wb')
+ flags_file.write('CFLAGS={0}\n'.format(options.cflags))
+ flags_file.write('CXXFLAGS={0}\n'.format(options.cxxflags))
+ flags_file.write('LDFLAGS={0}\n'.format(options.ldflags))
flags_file.close()
-
if options.label:
- image_dir_path = "{0}/src/build/images/{1}/latest".format(
- options.chromeos_root, options.board)
+ image_dir_path = '{0}/src/build/images/{1}/latest'.format(
+ options.chromeos_root, options.board)
real_image_dir_path = os.path.realpath(image_dir_path)
- command = "ln -sf -T {0} {1}/{2}".format(
+ command = 'ln -sf -T {0} {1}/{2}'.format(
os.path.basename(real_image_dir_path),\
os.path.dirname(real_image_dir_path),\
options.label)
ret = cmd_executer.RunCommand(command)
- logger.GetLogger().LogFatalIf(ret, "Failed to apply symlink label %s" %
+ logger.GetLogger().LogFatalIf(ret, 'Failed to apply symlink label %s' %
options.label)
return ret
-if __name__ == "__main__":
+
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/build_chromeos.py b/build_chromeos.py
index 673f32b3..e4f64268 100755
--- a/build_chromeos.py
+++ b/build_chromeos.py
@@ -1,17 +1,16 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to checkout the ChromeOS source.
This script sets up the ChromeOS source in the given directory, matching a
particular release of ChromeOS.
"""
-__author__ = ("asharif@google.com (Ahmad Sharif) "
- "llozano@google.com (Luis Lozano) "
- "raymes@google.com (Raymes Khoury) "
- "shenhan@google.com (Han Shen)")
+__author__ = ('asharif@google.com (Ahmad Sharif) '
+ 'llozano@google.com (Luis Lozano) '
+ 'raymes@google.com (Raymes Khoury) '
+ 'shenhan@google.com (Han Shen)')
import optparse
import os
@@ -24,7 +23,7 @@ from utils import misc
def Usage(parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
@@ -35,62 +34,85 @@ def Main(argv):
cmd_executer = command_executer.GetCommandExecuter()
parser = optparse.OptionParser()
- parser.add_option("--chromeos_root", dest="chromeos_root",
- help="Target directory for ChromeOS installation.")
- parser.add_option("--clobber_chroot", dest="clobber_chroot",
- action="store_true", help=
- "Delete the chroot and start fresh", default=False)
- parser.add_option("--clobber_board", dest="clobber_board",
- action="store_true",
- help="Delete the board and start fresh", default=False)
- parser.add_option("--rebuild", dest="rebuild",
- action="store_true",
- help="Rebuild all board packages except the toolchain.",
+ parser.add_option('--chromeos_root',
+ dest='chromeos_root',
+ help='Target directory for ChromeOS installation.')
+ parser.add_option('--clobber_chroot',
+ dest='clobber_chroot',
+ action='store_true',
+ help='Delete the chroot and start fresh',
+ default=False)
+ parser.add_option('--clobber_board',
+ dest='clobber_board',
+ action='store_true',
+ help='Delete the board and start fresh',
default=False)
- parser.add_option("--cflags", dest="cflags", default="",
- help="CFLAGS for the ChromeOS packages")
- parser.add_option("--cxxflags", dest="cxxflags", default="",
- help="CXXFLAGS for the ChromeOS packages")
- parser.add_option("--ldflags", dest="ldflags", default="",
- help="LDFLAGS for the ChromeOS packages")
- parser.add_option("--board", dest="board",
- help="ChromeOS target board, e.g. x86-generic")
- parser.add_option("--package", dest="package",
- help="The package needs to be built")
- parser.add_option("--label", dest="label",
- help="Optional label symlink to point to build dir.")
- parser.add_option("--dev", dest="dev", default=False, action="store_true",
- help=("Make the final image in dev mode (eg writable, "
- "more space on image). Defaults to False."))
- parser.add_option("--debug", dest="debug", default=False, action="store_true",
+ parser.add_option('--rebuild',
+ dest='rebuild',
+ action='store_true',
+ help='Rebuild all board packages except the toolchain.',
+ default=False)
+ parser.add_option('--cflags',
+ dest='cflags',
+ default='',
+ help='CFLAGS for the ChromeOS packages')
+ parser.add_option('--cxxflags',
+ dest='cxxflags',
+ default='',
+ help='CXXFLAGS for the ChromeOS packages')
+ parser.add_option('--ldflags',
+ dest='ldflags',
+ default='',
+ help='LDFLAGS for the ChromeOS packages')
+ parser.add_option('--board',
+ dest='board',
+ help='ChromeOS target board, e.g. x86-generic')
+ parser.add_option('--package',
+ dest='package',
+ help='The package needs to be built')
+ parser.add_option('--label',
+ dest='label',
+ help='Optional label symlink to point to build dir.')
+ parser.add_option('--dev',
+ dest='dev',
+ default=False,
+ action='store_true',
+ help=('Make the final image in dev mode (eg writable, '
+ 'more space on image). Defaults to False.'))
+ parser.add_option('--debug',
+ dest='debug',
+ default=False,
+ action='store_true',
help=("Optional. Build chrome browser with \"-g -O0\". "
"Notice, this also turns on \'--dev\'. "
- "Defaults to False."))
- parser.add_option("--env",
- dest="env",
- default="",
- help="Env to pass to build_packages.")
- parser.add_option("--vanilla", dest="vanilla",
+ 'Defaults to False.'))
+ parser.add_option('--env',
+ dest='env',
+ default='',
+ help='Env to pass to build_packages.')
+ parser.add_option('--vanilla',
+ dest='vanilla',
default=False,
- action="store_true",
- help="Use default ChromeOS toolchain.")
- parser.add_option("--vanilla_image", dest="vanilla_image",
+ action='store_true',
+ help='Use default ChromeOS toolchain.')
+ parser.add_option('--vanilla_image',
+ dest='vanilla_image',
default=False,
- action="store_true",
- help=("Use prebuild packages for building the image. "
- "It also implies the --vanilla option is set."))
+ action='store_true',
+ help=('Use prebuild packages for building the image. '
+ 'It also implies the --vanilla option is set.'))
options = parser.parse_args(argv[1:])[0]
if options.chromeos_root is None:
- Usage(parser, "--chromeos_root must be set")
+ Usage(parser, '--chromeos_root must be set')
options.chromeos_root = os.path.expanduser(options.chromeos_root)
scripts_dir = os.path.join(options.chromeos_root, 'src', 'scripts')
if not os.path.isdir(scripts_dir):
- Usage(parser, "--chromeos_root must be set up first. Use setup_chromeos.py")
+ Usage(parser, '--chromeos_root must be set up first. Use setup_chromeos.py')
if options.board is None:
- Usage(parser, "--board must be set")
+ Usage(parser, '--board must be set')
if options.debug:
options.dev = True
@@ -98,12 +120,12 @@ def Main(argv):
build_packages_env = options.env
if build_packages_env.find('EXTRA_BOARD_FLAGS=') != -1:
logger.GetLogger().LogFatal(
- ('Passing "EXTRA_BOARD_FLAGS" in "--env" is not supported. '
- 'This flags is used internally by this script. '
- 'Contact the author for more detail.'))
+ ('Passing "EXTRA_BOARD_FLAGS" in "--env" is not supported. '
+ 'This flags is used internally by this script. '
+ 'Contact the author for more detail.'))
if options.rebuild == True:
- build_packages_env += " EXTRA_BOARD_FLAGS=-e"
+ build_packages_env += ' EXTRA_BOARD_FLAGS=-e'
# EXTRA_BOARD_FLAGS=-e should clean up the object files for the chrome
# browser but it doesn't. So do it here.
misc.RemoveChromeBrowserObjectFiles(options.chromeos_root, options.board)
@@ -111,14 +133,15 @@ def Main(argv):
# Build with afdo_use by default.
# To change the default use --env="USE=-afdo_use".
build_packages_env = misc.MergeEnvStringWithDict(
- build_packages_env,
- {"USE": "chrome_internal afdo_use"})
+ build_packages_env, {'USE': 'chrome_internal afdo_use'})
build_packages_command = misc.GetBuildPackagesCommand(
- board=options.board, usepkg=options.vanilla_image, debug=options.debug)
+ board=options.board,
+ usepkg=options.vanilla_image,
+ debug=options.debug)
if options.package:
- build_packages_command += " {0}".format(options.package)
+ build_packages_command += ' {0}'.format(options.package)
build_image_command = misc.GetBuildImageCommand(options.board, options.dev)
@@ -126,25 +149,25 @@ def Main(argv):
command = misc.GetSetupBoardCommand(options.board,
usepkg=options.vanilla_image,
force=options.clobber_board)
- command += "; " + build_packages_env + " " + build_packages_command
- command += "&& " + build_packages_env + " " + build_image_command
+ command += '; ' + build_packages_env + ' ' + build_packages_command
+ command += '&& ' + build_packages_env + ' ' + build_image_command
ret = cmd_executer.ChrootRunCommand(options.chromeos_root, command)
return ret
# Setup board
- if not os.path.isdir(options.chromeos_root + "/chroot/build/"
- + options.board) or options.clobber_board:
+ if not os.path.isdir(options.chromeos_root + '/chroot/build/' +
+ options.board) or options.clobber_board:
# Run build_tc.py from binary package
rootdir = misc.GetRoot(argv[0])[0]
version_number = misc.GetRoot(rootdir)[1]
- ret = cmd_executer.ChrootRunCommand(
- options.chromeos_root,
- misc.GetSetupBoardCommand(options.board,
- force=options.clobber_board))
- logger.GetLogger().LogFatalIf(ret, "setup_board failed")
+ ret = cmd_executer.ChrootRunCommand(options.chromeos_root,
+ misc.GetSetupBoardCommand(
+ options.board,
+ force=options.clobber_board))
+ logger.GetLogger().LogFatalIf(ret, 'setup_board failed')
else:
- logger.GetLogger().LogOutput("Did not setup_board "
- "because it already exists")
+ logger.GetLogger().LogOutput('Did not setup_board '
+ 'because it already exists')
if options.debug:
# Perform 2-step build_packages to build a debug chrome browser.
@@ -154,57 +177,50 @@ def Main(argv):
# Give warning about "--rebuild" and "--debug". Under this combination,
# only dependencies of "chromeos-chrome" get rebuilt.
logger.GetLogger().LogWarning(
- "\"--rebuild\" does not correctly re-build every package when "
- "\"--debug\" is enabled. ")
+ "\"--rebuild\" does not correctly re-build every package when "
+ "\"--debug\" is enabled. ")
# Replace EXTRA_BOARD_FLAGS=-e with "-e --onlydeps"
build_packages_env = build_packages_env.replace(
- 'EXTRA_BOARD_FLAGS=-e', 'EXTRA_BOARD_FLAGS=\"-e --onlydeps\"')
+ 'EXTRA_BOARD_FLAGS=-e', 'EXTRA_BOARD_FLAGS=\"-e --onlydeps\"')
else:
build_packages_env += ' EXTRA_BOARD_FLAGS=--onlydeps'
ret = cmd_executer.ChrootRunCommand(
- options.chromeos_root,
- "CFLAGS=\"$(portageq-%s envvar CFLAGS) %s\" "
- "CXXFLAGS=\"$(portageq-%s envvar CXXFLAGS) %s\" "
- "LDFLAGS=\"$(portageq-%s envvar LDFLAGS) %s\" "
- "CHROME_ORIGIN=SERVER_SOURCE "
- "%s "
- "%s --skip_chroot_upgrade"
- "chromeos-chrome"
- % (options.board, options.cflags,
- options.board, options.cxxflags,
- options.board, options.ldflags,
- build_packages_env,
- build_packages_command))
+ options.chromeos_root, "CFLAGS=\"$(portageq-%s envvar CFLAGS) %s\" "
+ "CXXFLAGS=\"$(portageq-%s envvar CXXFLAGS) %s\" "
+ "LDFLAGS=\"$(portageq-%s envvar LDFLAGS) %s\" "
+ 'CHROME_ORIGIN=SERVER_SOURCE '
+ '%s '
+ '%s --skip_chroot_upgrade'
+ 'chromeos-chrome' % (options.board, options.cflags, options.board,
+ options.cxxflags, options.board, options.ldflags,
+ build_packages_env, build_packages_command))
logger.GetLogger().LogFatalIf(\
- ret, "build_packages failed while trying to build chromeos-chrome deps.")
+ ret, 'build_packages failed while trying to build chromeos-chrome deps.')
# Secondly, build chromeos-chrome using debug mode.
# Replace '--onlydeps' with '--nodeps'.
if options.rebuild == True:
build_packages_env = build_packages_env.replace(
- 'EXTRA_BOARD_FLAGS=\"-e --onlydeps\"', 'EXTRA_BOARD_FLAGS=--nodeps')
+ 'EXTRA_BOARD_FLAGS=\"-e --onlydeps\"', 'EXTRA_BOARD_FLAGS=--nodeps')
else:
build_packages_env = build_packages_env.replace(
- 'EXTRA_BOARD_FLAGS=--onlydeps', 'EXTRA_BOARD_FLAGS=--nodeps')
+ 'EXTRA_BOARD_FLAGS=--onlydeps', 'EXTRA_BOARD_FLAGS=--nodeps')
ret = cmd_executer.ChrootRunCommand(
- options.chromeos_root,
- "CFLAGS=\"$(portageq-%s envvar CFLAGS) %s\" "
- "CXXFLAGS=\"$(portageq-%s envvar CXXFLAGS) %s\" "
- "LDFLAGS=\"$(portageq-%s envvar LDFLAGS) %s\" "
- "CHROME_ORIGIN=SERVER_SOURCE BUILDTYPE=Debug "
- "%s "
- "%s --skip_chroot_upgrade"
- "chromeos-chrome"
- % (options.board, options.cflags,
- options.board, options.cxxflags,
- options.board, options.ldflags,
- build_packages_env,
- build_packages_command))
+ options.chromeos_root, "CFLAGS=\"$(portageq-%s envvar CFLAGS) %s\" "
+ "CXXFLAGS=\"$(portageq-%s envvar CXXFLAGS) %s\" "
+ "LDFLAGS=\"$(portageq-%s envvar LDFLAGS) %s\" "
+ 'CHROME_ORIGIN=SERVER_SOURCE BUILDTYPE=Debug '
+ '%s '
+ '%s --skip_chroot_upgrade'
+ 'chromeos-chrome' % (options.board, options.cflags, options.board,
+ options.cxxflags, options.board, options.ldflags,
+ build_packages_env, build_packages_command))
logger.GetLogger().LogFatalIf(
- ret, "build_packages failed while trying to build debug chromeos-chrome.")
+ ret,
+ 'build_packages failed while trying to build debug chromeos-chrome.')
# Now, we have built chromeos-chrome and all dependencies.
# Finally, remove '-e' from EXTRA_BOARD_FLAGS,
@@ -215,58 +231,51 @@ def Main(argv):
# Up to now, we have a debug built chromos-chrome browser.
# Fall through to build the rest of the world.
- # Build packages
+ # Build packages
ret = cmd_executer.ChrootRunCommand(
- options.chromeos_root,
- "CFLAGS=\"$(portageq-%s envvar CFLAGS) %s\" "
+ options.chromeos_root, "CFLAGS=\"$(portageq-%s envvar CFLAGS) %s\" "
"CXXFLAGS=\"$(portageq-%s envvar CXXFLAGS) %s\" "
"LDFLAGS=\"$(portageq-%s envvar LDFLAGS) %s\" "
- "CHROME_ORIGIN=SERVER_SOURCE "
- "%s "
- "%s --skip_chroot_upgrade"
- % (options.board, options.cflags,
- options.board, options.cxxflags,
- options.board, options.ldflags,
- build_packages_env,
- build_packages_command))
-
- logger.GetLogger().LogFatalIf(ret, "build_packages failed")
+ 'CHROME_ORIGIN=SERVER_SOURCE '
+ '%s '
+ '%s --skip_chroot_upgrade' % (options.board, options.cflags,
+ options.board, options.cxxflags,
+ options.board, options.ldflags,
+ build_packages_env, build_packages_command))
+
+ logger.GetLogger().LogFatalIf(ret, 'build_packages failed')
if options.package:
return 0
# Build image
- ret = cmd_executer.ChrootRunCommand(options.chromeos_root,
- build_packages_env + " " +
- build_image_command)
-
- logger.GetLogger().LogFatalIf(ret, "build_image failed")
-
- flags_file_name = "flags.txt"
- flags_file_path = ("%s/src/build/images/%s/latest/%s" %
- (options.chromeos_root,
- options.board,
- flags_file_name))
- flags_file = open(flags_file_path, "wb")
- flags_file.write("CFLAGS=%s\n" % options.cflags)
- flags_file.write("CXXFLAGS=%s\n" % options.cxxflags)
- flags_file.write("LDFLAGS=%s\n" % options.ldflags)
+ ret = cmd_executer.ChrootRunCommand(
+ options.chromeos_root, build_packages_env + ' ' + build_image_command)
+
+ logger.GetLogger().LogFatalIf(ret, 'build_image failed')
+
+ flags_file_name = 'flags.txt'
+ flags_file_path = ('%s/src/build/images/%s/latest/%s' %
+ (options.chromeos_root, options.board, flags_file_name))
+ flags_file = open(flags_file_path, 'wb')
+ flags_file.write('CFLAGS=%s\n' % options.cflags)
+ flags_file.write('CXXFLAGS=%s\n' % options.cxxflags)
+ flags_file.write('LDFLAGS=%s\n' % options.ldflags)
flags_file.close()
if options.label:
- image_dir_path = ("%s/src/build/images/%s/latest" %
- (options.chromeos_root,
- options.board))
+ image_dir_path = ('%s/src/build/images/%s/latest' % (options.chromeos_root,
+ options.board))
real_image_dir_path = os.path.realpath(image_dir_path)
- command = ("ln -sf -T %s %s/%s" %
+ command = ('ln -sf -T %s %s/%s' %
(os.path.basename(real_image_dir_path),
- os.path.dirname(real_image_dir_path),
- options.label))
+ os.path.dirname(real_image_dir_path), options.label))
ret = cmd_executer.RunCommand(command)
- logger.GetLogger().LogFatalIf(ret, "Failed to apply symlink label %s" %
+ logger.GetLogger().LogFatalIf(ret, 'Failed to apply symlink label %s' %
options.label)
return ret
-if __name__ == "__main__":
+
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/build_tc.py b/build_tc.py
index 34cba543..2df1639d 100755
--- a/build_tc.py
+++ b/build_tc.py
@@ -3,13 +3,12 @@
# Copyright 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Script to build the ChromeOS toolchain.
This script sets up the toolchain if you give it the gcctools directory.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import getpass
import optparse
@@ -24,37 +23,40 @@ from utils import misc
class ToolchainPart(object):
- def __init__(self, name, source_path, chromeos_root, board, incremental,
- build_env, gcc_enable_ccache=False):
+
+ def __init__(self,
+ name,
+ source_path,
+ chromeos_root,
+ board,
+ incremental,
+ build_env,
+ gcc_enable_ccache=False):
self._name = name
self._source_path = misc.CanonicalizePath(source_path)
self._chromeos_root = chromeos_root
self._board = board
- self._ctarget = misc.GetCtargetFromBoard(self._board,
- self._chromeos_root)
+ self._ctarget = misc.GetCtargetFromBoard(self._board, self._chromeos_root)
self._gcc_libs_dest = misc.GetGccLibsDestForBoard(self._board,
- self._chromeos_root)
- self.tag = "%s-%s" % (name, self._ctarget)
+ self._chromeos_root)
+ self.tag = '%s-%s' % (name, self._ctarget)
self._ce = command_executer.GetCommandExecuter()
- self._mask_file = os.path.join(
- self._chromeos_root,
- "chroot",
- "etc/portage/package.mask/cross-%s" % self._ctarget)
+ self._mask_file = os.path.join(self._chromeos_root, 'chroot',
+ 'etc/portage/package.mask/cross-%s' %
+ self._ctarget)
self._new_mask_file = None
self._chroot_source_path = os.path.join(constants.MOUNTED_TOOLCHAIN_ROOT,
- self._name).lstrip("/")
+ self._name).lstrip('/')
self._incremental = incremental
self._build_env = build_env
self._gcc_enable_ccache = gcc_enable_ccache
def RunSetupBoardIfNecessary(self):
- cross_symlink = os.path.join(
- self._chromeos_root,
- "chroot",
- "usr/local/bin/emerge-%s" % self._board)
+ cross_symlink = os.path.join(self._chromeos_root, 'chroot',
+ 'usr/local/bin/emerge-%s' % self._board)
if not os.path.exists(cross_symlink):
- command = ("%s/setup_board --board=%s" %
+ command = ('%s/setup_board --board=%s' %
(misc.CHROMEOS_SCRIPTS_DIR, self._board))
self._ce.ChrootRunCommand(self._chromeos_root, command)
@@ -71,60 +73,53 @@ class ToolchainPart(object):
return rv
def RemoveCompiledFile(self):
- compiled_file = os.path.join(self._chromeos_root,
- "chroot",
- "var/tmp/portage/cross-%s" % self._ctarget,
- "%s-9999" % self._name,
- ".compiled")
- command = "rm -f %s" % compiled_file
+ compiled_file = os.path.join(self._chromeos_root, 'chroot',
+ 'var/tmp/portage/cross-%s' % self._ctarget,
+ '%s-9999' % self._name, '.compiled')
+ command = 'rm -f %s' % compiled_file
self._ce.RunCommand(command)
def MountSources(self, unmount_source):
mount_points = []
- mounted_source_path = os.path.join(self._chromeos_root,
- "chroot",
+ mounted_source_path = os.path.join(self._chromeos_root, 'chroot',
self._chroot_source_path)
- src_mp = tc_enter_chroot.MountPoint(
- self._source_path,
- mounted_source_path,
- getpass.getuser(),
- "ro")
+ src_mp = tc_enter_chroot.MountPoint(self._source_path, mounted_source_path,
+ getpass.getuser(), 'ro')
mount_points.append(src_mp)
- build_suffix = "build-%s" % self._ctarget
- build_dir = "%s-%s" % (self._source_path, build_suffix)
+ build_suffix = 'build-%s' % self._ctarget
+ build_dir = '%s-%s' % (self._source_path, build_suffix)
if not self._incremental and os.path.exists(build_dir):
- command = "rm -rf %s/*" % build_dir
+ command = 'rm -rf %s/*' % build_dir
self._ce.RunCommand(command)
# Create a -build directory for the objects.
- command = "mkdir -p %s" % build_dir
+ command = 'mkdir -p %s' % build_dir
self._ce.RunCommand(command)
- mounted_build_dir = os.path.join(
- self._chromeos_root, "chroot", "%s-%s" %
- (self._chroot_source_path, build_suffix))
- build_mp = tc_enter_chroot.MountPoint(
- build_dir,
- mounted_build_dir,
- getpass.getuser())
+ mounted_build_dir = os.path.join(self._chromeos_root, 'chroot', '%s-%s' %
+ (self._chroot_source_path, build_suffix))
+ build_mp = tc_enter_chroot.MountPoint(build_dir, mounted_build_dir,
+ getpass.getuser())
mount_points.append(build_mp)
if unmount_source:
unmount_statuses = [mp.UnMount() == 0 for mp in mount_points]
- assert all(unmount_statuses), "Could not unmount all mount points!"
+ assert all(unmount_statuses), 'Could not unmount all mount points!'
else:
mount_statuses = [mp.DoMount() == 0 for mp in mount_points]
if not all(mount_statuses):
- mounted = [mp for mp, status in zip(mount_points, mount_statuses) if status]
+ mounted = [mp
+ for mp, status in zip(mount_points, mount_statuses)
+ if status]
unmount_statuses = [mp.UnMount() == 0 for mp in mounted]
- assert all(unmount_statuses), "Could not unmount all mount points!"
-
+ assert all(unmount_statuses), 'Could not unmount all mount points!'
def UninstallTool(self):
- command = "sudo CLEAN_DELAY=0 emerge -C cross-%s/%s" % (self._ctarget, self._name)
+ command = 'sudo CLEAN_DELAY=0 emerge -C cross-%s/%s' % (self._ctarget,
+ self._name)
self._ce.ChrootRunCommand(self._chromeos_root, command)
def BuildTool(self):
@@ -132,35 +127,36 @@ class ToolchainPart(object):
# FEATURES=buildpkg adds minutes of time so we disable it.
# TODO(shenhan): keep '-sandbox' for a while for compatibility, then remove
# it after a while.
- features = "nostrip userpriv userfetch -usersandbox -sandbox noclean -buildpkg"
- env["FEATURES"] = features
+ features = ('nostrip userpriv userfetch -usersandbox -sandbox noclean '
+ '-buildpkg')
+ env['FEATURES'] = features
if self._incremental:
- env["FEATURES"] += " keepwork"
+ env['FEATURES'] += ' keepwork'
- if "USE" in env:
- env["USE"] += " multislot mounted_%s" % self._name
+ if 'USE' in env:
+ env['USE'] += ' multislot mounted_%s' % self._name
else:
- env["USE"] = "multislot mounted_%s" % self._name
+ env['USE'] = 'multislot mounted_%s' % self._name
# Disable ccache in our compilers. cache may be problematic for us.
# It ignores compiler environments settings and it is not clear if
# the cache hit algorithm verifies all the compiler binaries or
# just the driver.
- if self._name == "gcc" and not self._gcc_enable_ccache:
- env["USE"] += " -wrapper_ccache"
-
- env["%s_SOURCE_PATH" % self._name.upper()] = (
- os.path.join("/", self._chroot_source_path))
- env["ACCEPT_KEYWORDS"] = "~*"
- env_string = " ".join(["%s=\"%s\"" % var for var in env.items()])
- command = "emerge =cross-%s/%s-9999" % (self._ctarget, self._name)
- full_command = "sudo %s %s" % (env_string, command)
+ if self._name == 'gcc' and not self._gcc_enable_ccache:
+ env['USE'] += ' -wrapper_ccache'
+
+ env['%s_SOURCE_PATH' % self._name.upper()] = (
+ os.path.join('/', self._chroot_source_path))
+ env['ACCEPT_KEYWORDS'] = '~*'
+ env_string = ' '.join(["%s=\"%s\"" % var for var in env.items()])
+ command = 'emerge =cross-%s/%s-9999' % (self._ctarget, self._name)
+ full_command = 'sudo %s %s' % (env_string, command)
rv = self._ce.ChrootRunCommand(self._chromeos_root, full_command)
if rv != 0:
return rv
- if self._name == "gcc":
- command = ("sudo cp -r /usr/lib/gcc/%s %s" %
+ if self._name == 'gcc':
+ command = ('sudo cp -r /usr/lib/gcc/%s %s' %
(self._ctarget, self._gcc_libs_dest))
rv = self._ce.ChrootRunCommand(self._chromeos_root, command)
return rv
@@ -169,12 +165,12 @@ class ToolchainPart(object):
self._new_mask_file = None
if os.path.isfile(self._mask_file):
self._new_mask_file = tempfile.mktemp()
- command = "sudo mv %s %s" % (self._mask_file, self._new_mask_file)
+ command = 'sudo mv %s %s' % (self._mask_file, self._new_mask_file)
self._ce.RunCommand(command)
def UnMoveMaskFile(self):
if self._new_mask_file:
- command = "sudo mv %s %s" % (self._new_mask_file, self._mask_file)
+ command = 'sudo mv %s %s' % (self._new_mask_file, self._mask_file)
self._ce.RunCommand(command)
@@ -182,140 +178,140 @@ def Main(argv):
"""The main function."""
# Common initializations
parser = optparse.OptionParser()
- parser.add_option("-c",
- "--chromeos_root",
- dest="chromeos_root",
- default="../../",
- help=("ChromeOS root checkout directory"
- " uses ../.. if none given."))
- parser.add_option("-g",
- "--gcc_dir",
- dest="gcc_dir",
- help="The directory where gcc resides.")
- parser.add_option("--binutils_dir",
- dest="binutils_dir",
- help="The directory where binutils resides.")
- parser.add_option("-x",
- "--gdb_dir",
- dest="gdb_dir",
- help="The directory where gdb resides.")
- parser.add_option("-b",
- "--board",
- dest="board",
- default="x86-agz",
- help="The target board.")
- parser.add_option("-n",
- "--noincremental",
- dest="noincremental",
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ default='../../',
+ help=('ChromeOS root checkout directory'
+ ' uses ../.. if none given.'))
+ parser.add_option('-g',
+ '--gcc_dir',
+ dest='gcc_dir',
+ help='The directory where gcc resides.')
+ parser.add_option('--binutils_dir',
+ dest='binutils_dir',
+ help='The directory where binutils resides.')
+ parser.add_option('-x',
+ '--gdb_dir',
+ dest='gdb_dir',
+ help='The directory where gdb resides.')
+ parser.add_option('-b',
+ '--board',
+ dest='board',
+ default='x86-agz',
+ help='The target board.')
+ parser.add_option('-n',
+ '--noincremental',
+ dest='noincremental',
default=False,
- action="store_true",
- help="Use FEATURES=keepwork to do incremental builds.")
- parser.add_option("--cflags",
- dest="cflags",
- default="",
- help="Build a compiler with specified CFLAGS")
- parser.add_option("--cxxflags",
- dest="cxxflags",
- default="",
- help="Build a compiler with specified CXXFLAGS")
- parser.add_option("--cflags_for_target",
- dest="cflags_for_target",
- default="",
- help="Build the target libraries with specified flags")
- parser.add_option("--cxxflags_for_target",
- dest="cxxflags_for_target",
- default="",
- help="Build the target libraries with specified flags")
- parser.add_option("--ldflags",
- dest="ldflags",
- default="",
- help="Build a compiler with specified LDFLAGS")
- parser.add_option("-d",
- "--debug",
- dest="debug",
+ action='store_true',
+ help='Use FEATURES=keepwork to do incremental builds.')
+ parser.add_option('--cflags',
+ dest='cflags',
+ default='',
+ help='Build a compiler with specified CFLAGS')
+ parser.add_option('--cxxflags',
+ dest='cxxflags',
+ default='',
+ help='Build a compiler with specified CXXFLAGS')
+ parser.add_option('--cflags_for_target',
+ dest='cflags_for_target',
+ default='',
+ help='Build the target libraries with specified flags')
+ parser.add_option('--cxxflags_for_target',
+ dest='cxxflags_for_target',
+ default='',
+ help='Build the target libraries with specified flags')
+ parser.add_option('--ldflags',
+ dest='ldflags',
+ default='',
+ help='Build a compiler with specified LDFLAGS')
+ parser.add_option('-d',
+ '--debug',
+ dest='debug',
default=False,
- action="store_true",
- help="Build a compiler with -g3 -O0 appended to both"
- " CFLAGS and CXXFLAGS.")
- parser.add_option("-m",
- "--mount_only",
- dest="mount_only",
+ action='store_true',
+ help='Build a compiler with -g3 -O0 appended to both'
+ ' CFLAGS and CXXFLAGS.')
+ parser.add_option('-m',
+ '--mount_only',
+ dest='mount_only',
default=False,
- action="store_true",
- help="Just mount the tool directories.")
- parser.add_option("-u",
- "--unmount_only",
- dest="unmount_only",
+ action='store_true',
+ help='Just mount the tool directories.')
+ parser.add_option('-u',
+ '--unmount_only',
+ dest='unmount_only',
default=False,
- action="store_true",
- help="Just unmount the tool directories.")
- parser.add_option("--extra_use_flags",
- dest="extra_use_flags",
- default="",
- help="Extra flag for USE, to be passed to the ebuild. "
+ action='store_true',
+ help='Just unmount the tool directories.')
+ parser.add_option('--extra_use_flags',
+ dest='extra_use_flags',
+ default='',
+ help='Extra flag for USE, to be passed to the ebuild. '
"('multislot' and 'mounted_<tool>' are always passed.)")
- parser.add_option("--gcc_enable_ccache",
- dest="gcc_enable_ccache",
+ parser.add_option('--gcc_enable_ccache',
+ dest='gcc_enable_ccache',
default=False,
- action="store_true",
- help="Enable ccache for the gcc invocations")
+ action='store_true',
+ help='Enable ccache for the gcc invocations')
options, _ = parser.parse_args(argv)
chromeos_root = misc.CanonicalizePath(options.chromeos_root)
if options.gcc_dir:
gcc_dir = misc.CanonicalizePath(options.gcc_dir)
- assert gcc_dir and os.path.isdir(gcc_dir), "gcc_dir does not exist!"
+ assert gcc_dir and os.path.isdir(gcc_dir), 'gcc_dir does not exist!'
if options.binutils_dir:
binutils_dir = misc.CanonicalizePath(options.binutils_dir)
- assert os.path.isdir(binutils_dir), "binutils_dir does not exist!"
+ assert os.path.isdir(binutils_dir), 'binutils_dir does not exist!'
if options.gdb_dir:
gdb_dir = misc.CanonicalizePath(options.gdb_dir)
- assert os.path.isdir(gdb_dir), "gdb_dir does not exist!"
+ assert os.path.isdir(gdb_dir), 'gdb_dir does not exist!'
if options.unmount_only:
options.mount_only = False
elif options.mount_only:
options.unmount_only = False
build_env = {}
if options.cflags:
- build_env["CFLAGS"] = "`portageq envvar CFLAGS` " + options.cflags
+ build_env['CFLAGS'] = '`portageq envvar CFLAGS` ' + options.cflags
if options.cxxflags:
- build_env["CXXFLAGS"] = "`portageq envvar CXXFLAGS` " + options.cxxflags
+ build_env['CXXFLAGS'] = '`portageq envvar CXXFLAGS` ' + options.cxxflags
if options.cflags_for_target:
- build_env["CFLAGS_FOR_TARGET"] = options.cflags_for_target
+ build_env['CFLAGS_FOR_TARGET'] = options.cflags_for_target
if options.cxxflags_for_target:
- build_env["CXXFLAGS_FOR_TARGET"] = options.cxxflags_for_target
+ build_env['CXXFLAGS_FOR_TARGET'] = options.cxxflags_for_target
if options.ldflags:
- build_env["LDFLAGS"] = options.ldflags
+ build_env['LDFLAGS'] = options.ldflags
if options.debug:
- debug_flags = "-g3 -O0"
- if "CFLAGS" in build_env:
- build_env["CFLAGS"] += " %s" % (debug_flags)
+ debug_flags = '-g3 -O0'
+ if 'CFLAGS' in build_env:
+ build_env['CFLAGS'] += ' %s' % (debug_flags)
else:
- build_env["CFLAGS"] = debug_flags
- if "CXXFLAGS" in build_env:
- build_env["CXXFLAGS"] += " %s" % (debug_flags)
+ build_env['CFLAGS'] = debug_flags
+ if 'CXXFLAGS' in build_env:
+ build_env['CXXFLAGS'] += ' %s' % (debug_flags)
else:
- build_env["CXXFLAGS"] = debug_flags
+ build_env['CXXFLAGS'] = debug_flags
if options.extra_use_flags:
- build_env["USE"] = options.extra_use_flags
+ build_env['USE'] = options.extra_use_flags
# Create toolchain parts
toolchain_parts = {}
- for board in options.board.split(","):
+ for board in options.board.split(','):
if options.gcc_dir:
- tp = ToolchainPart("gcc", gcc_dir, chromeos_root, board,
+ tp = ToolchainPart('gcc', gcc_dir, chromeos_root, board,
not options.noincremental, build_env,
options.gcc_enable_ccache)
toolchain_parts[tp.tag] = tp
tp.RunSetupBoardIfNecessary()
if options.binutils_dir:
- tp = ToolchainPart("binutils", binutils_dir, chromeos_root, board,
+ tp = ToolchainPart('binutils', binutils_dir, chromeos_root, board,
not options.noincremental, build_env)
toolchain_parts[tp.tag] = tp
tp.RunSetupBoardIfNecessary()
if options.gdb_dir:
- tp = ToolchainPart("gdb", gdb_dir, chromeos_root, board,
+ tp = ToolchainPart('gdb', gdb_dir, chromeos_root, board,
not options.noincremental, build_env)
toolchain_parts[tp.tag] = tp
tp.RunSetupBoardIfNecessary()
@@ -329,9 +325,10 @@ def Main(argv):
else:
rv = rv + tp.Build()
finally:
- print "Exiting..."
+ print 'Exiting...'
return rv
-if __name__ == "__main__":
+
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/build_tool.py b/build_tool.py
index 3d7dc6ab..6ff20645 100755
--- a/build_tool.py
+++ b/build_tool.py
@@ -46,9 +46,14 @@ class Bootstrapper(object):
"""Class that handles bootstrap process.
"""
- def __init__(self, chromeos_root, gcc_branch=None, gcc_dir=None,
- binutils_branch=None, binutils_dir=None,
- board=None, disable_2nd_bootstrap=False,
+ def __init__(self,
+ chromeos_root,
+ gcc_branch=None,
+ gcc_dir=None,
+ binutils_branch=None,
+ binutils_dir=None,
+ board=None,
+ disable_2nd_bootstrap=False,
setup_tool_ebuild_file_only=False):
self._chromeos_root = chromeos_root
@@ -82,14 +87,14 @@ class Bootstrapper(object):
def SubmitToLocalBranch(self):
"""Copy source code to the chromium source tree and submit it locally."""
if self._gcc_dir:
- if not self.SubmitToolToLocalBranch(
- tool_name='gcc', tool_dir=self._gcc_dir):
+ if not self.SubmitToolToLocalBranch(tool_name='gcc',
+ tool_dir=self._gcc_dir):
return False
self._gcc_branch = TEMP_BRANCH_NAME
if self._binutils_dir:
- if not self.SubmitToolToLocalBranch(
- tool_name='binutils', tool_dir=self._binutils_dir):
+ if not self.SubmitToolToLocalBranch(tool_name='binutils',
+ tool_dir=self._binutils_dir):
return False
self._binutils_branch = TEMP_BRANCH_NAME
@@ -111,13 +116,13 @@ class Bootstrapper(object):
# 0. Test to see if git tree is free of local changes.
if not misc.IsGitTreeClean(chrome_tool_dir):
- self._logger.LogError(
- 'Git repository "{0}" not clean, aborted.'.format(chrome_tool_dir))
+ self._logger.LogError('Git repository "{0}" not clean, aborted.'.format(
+ chrome_tool_dir))
return False
# 1. Checkout/create a (new) branch for testing.
- command = 'cd "{0}" && git checkout -B {1}'.format(
- chrome_tool_dir, TEMP_BRANCH_NAME)
+ command = 'cd "{0}" && git checkout -B {1}'.format(chrome_tool_dir,
+ TEMP_BRANCH_NAME)
ret = self._ce.RunCommand(command)
if ret:
self._logger.LogError('Failed to create a temp branch for test, aborted.')
@@ -125,8 +130,8 @@ class Bootstrapper(object):
if self.IsTreeSame(tool_dir, chrome_tool_dir):
self._logger.LogOutput(
- '"{0}" and "{1}" are the same, sync skipped.'.format(
- tool_dir, chrome_tool_dir))
+ '"{0}" and "{1}" are the same, sync skipped.'.format(tool_dir,
+ chrome_tool_dir))
return True
# 2. Sync sources from user provided tool dir to chromiumos tool git.
@@ -137,8 +142,7 @@ class Bootstrapper(object):
self._ce.RunCommand(
'cd {0} && find . -maxdepth 1 -not -name ".git" -not -name "." '
r'\( -type f -exec rm {{}} \; -o '
- r' -type d -exec rm -fr {{}} \; \)'.format(
- chrome_tool_dir))
+ r' -type d -exec rm -fr {{}} \; \)'.format(chrome_tool_dir))
local_tool_repo.MapSources(chrome_tool_repo.GetRoot())
# 3. Ensure after sync tree is the same.
@@ -153,15 +157,17 @@ class Bootstrapper(object):
cmd = 'cd {0} && git log -1 --pretty=oneline'.format(tool_dir)
tool_dir_extra_info = None
ret, tool_dir_extra_info, _ = self._ce.RunCommandWOutput(
- cmd, print_to_console=False)
+ cmd,
+ print_to_console=False)
commit_message = 'Synced with tool source tree at - "{0}".'.format(tool_dir)
if not ret:
- commit_message += '\nGit log for {0}:\n{1}'.format(
- tool_dir, tool_dir_extra_info)
+ commit_message += '\nGit log for {0}:\n{1}'.format(tool_dir,
+ tool_dir_extra_info)
if chrome_tool_repo.CommitLocally(commit_message):
- self._logger.LogError('Commit to local branch "{0}" failed, aborted.'.
- format(TEMP_BRANCH_NAME))
+ self._logger.LogError(
+ 'Commit to local branch "{0}" failed, aborted.'.format(
+ TEMP_BRANCH_NAME))
return False
return True
@@ -199,19 +205,19 @@ class Bootstrapper(object):
"""
chrome_tool_dir = self.GetChromeOsToolDir(tool_name)
- command = 'cd "{0}" && git checkout {1}'.format(
- chrome_tool_dir, tool_branch)
+ command = 'cd "{0}" && git checkout {1}'.format(chrome_tool_dir,
+ tool_branch)
if not self._ce.RunCommand(command, print_to_console=True):
# Get 'TREE' value of this commit
command = ('cd "{0}" && git cat-file -p {1} '
'| grep -E "^tree [a-f0-9]+$" '
'| cut -d" " -f2').format(chrome_tool_dir, tool_branch)
- ret, stdout, _ = self._ce.RunCommandWOutput(
- command, print_to_console=False)
+ ret, stdout, _ = self._ce.RunCommandWOutput(command,
+ print_to_console=False)
# Pipe operation always has a zero return value. So need to check if
# stdout is valid.
- if not ret and stdout and re.match(
- '[0-9a-h]{40}', stdout.strip(), re.IGNORECASE):
+ if not ret and stdout and re.match('[0-9a-h]{40}', stdout.strip(),
+ re.IGNORECASE):
tool_branch_tree = stdout.strip()
self._logger.LogOutput('Find tree for {0} branch "{1}" - "{2}"'.format(
tool_name, tool_branch, tool_branch_tree))
@@ -256,8 +262,8 @@ class Bootstrapper(object):
"""
# To get the active gcc ebuild file, we need a workable chroot first.
- if not os.path.exists(
- os.path.join(self._chromeos_root, 'chroot')) and self._ce.RunCommand(
+ if not os.path.exists(os.path.join(
+ self._chromeos_root, 'chroot')) and self._ce.RunCommand(
'cd "{0}" && cros_sdk --create'.format(self._chromeos_root)):
self._logger.LogError(('Failed to install a initial chroot, aborted.\n'
'If previous bootstrap failed, do a '
@@ -266,15 +272,16 @@ class Bootstrapper(object):
return (False, None, None)
rv, stdout, _ = self._ce.ChrootRunCommandWOutput(
- self._chromeos_root, 'equery w sys-devel/{0}'.format(tool_name),
+ self._chromeos_root,
+ 'equery w sys-devel/{0}'.format(tool_name),
print_to_console=True)
if rv:
- self._logger.LogError(
- ('Failed to execute inside chroot '
- '"equery w sys-devel/{0}", aborted.').format(tool_name))
+ self._logger.LogError(('Failed to execute inside chroot '
+ '"equery w sys-devel/{0}", aborted.').format(
+ tool_name))
return (False, None, None)
- m = re.match(r'^.*/({0}/(.*\.ebuild))$'.format(
- EBUILD_PATH_PATTERN.format(tool_name)), stdout)
+ m = re.match(r'^.*/({0}/(.*\.ebuild))$'.format(EBUILD_PATH_PATTERN.format(
+ tool_name)), stdout)
if not m:
self._logger.LogError(
('Failed to find {0} ebuild file, aborted. '
@@ -308,9 +315,9 @@ class Bootstrapper(object):
if self._binutils_branch:
tool_branch_githash = misc.GitGetCommitHash(
self.GetChromeOsToolDir('binutils'), self._binutils_branch)
- if not self.InplaceModifyToolEbuildFile(
- tool_branch_githash, self._binutils_branch_tree,
- self._binutils_ebuild_file):
+ if not self.InplaceModifyToolEbuildFile(tool_branch_githash,
+ self._binutils_branch_tree,
+ self._binutils_ebuild_file):
return False
return True
@@ -353,11 +360,11 @@ class Bootstrapper(object):
Absolute git path for the tool.
"""
- return os.path.join(
- self._chromeos_root, REPO_PATH_PATTERN.format(tool_name))
+ return os.path.join(self._chromeos_root,
+ REPO_PATH_PATTERN.format(tool_name))
- def InplaceModifyToolEbuildFile(
- self, tool_branch_githash, tool_branch_tree, tool_ebuild_file):
+ def InplaceModifyToolEbuildFile(self, tool_branch_githash, tool_branch_tree,
+ tool_ebuild_file):
"""Using sed to fill properly values into the ebuild file.
Args:
@@ -375,8 +382,7 @@ class Bootstrapper(object):
'-e \'/^CROS_WORKON_TREE=".*"/i'
' # The following line is modified by script.\' '
'-e \'s!^CROS_WORKON_TREE=".*"$!CROS_WORKON_TREE="{1}"!\' '
- '{2}').format(tool_branch_githash,
- tool_branch_tree,
+ '{2}').format(tool_branch_githash, tool_branch_tree,
tool_ebuild_file)
rv = self._ce.RunCommand(command)
if rv:
@@ -389,8 +395,8 @@ class Bootstrapper(object):
self._logger.LogWarning(
('Ebuild file "{0}" is modified, to revert the file - \n'
'bootstrap_compiler.py --chromeos_root={1} '
- '--reset_tool_ebuild_file').format(
- tool_ebuild_file, self._chromeos_root))
+ '--reset_tool_ebuild_file').format(tool_ebuild_file,
+ self._chromeos_root))
return True
def DoBuildForBoard(self):
@@ -426,21 +432,21 @@ class Bootstrapper(object):
else:
target = misc.GetCtargetFromBoard(board, self._chromeos_root)
if not target:
- self._logger.LogError(
- 'Unsupported board "{0}", skip.'.format(board))
+ self._logger.LogError('Unsupported board "{0}", skip.'.format(board))
failed.append(board)
continue
command = 'sudo emerge cross-{0}/{1}'.format(target, tool_name)
- rv = self._ce.ChrootRunCommand(self._chromeos_root, command,
+ rv = self._ce.ChrootRunCommand(self._chromeos_root,
+ command,
print_to_console=True)
if rv:
- self._logger.LogError(
- 'Build {0} failed for {1}, aborted.'.format(tool_name, board))
+ self._logger.LogError('Build {0} failed for {1}, aborted.'.format(
+ tool_name, board))
failed.append(board)
else:
- self._logger.LogOutput(
- 'Successfully built {0} for board {1}.'.format(tool_name, board))
+ self._logger.LogOutput('Successfully built {0} for board {1}.'.format(
+ tool_name, board))
if failed:
self._logger.LogError(
@@ -510,8 +516,8 @@ class Bootstrapper(object):
# Install amd64-host into a new chroot.
cmd = ('cd {0} && cros_sdk --chroot new-sdk-chroot --download --replace '
- '--nousepkg --url file://{1}').format(
- self._chromeos_root, sdk_package)
+ '--nousepkg --url file://{1}').format(self._chromeos_root,
+ sdk_package)
rv = self._ce.RunCommand(cmd, print_to_console=True)
if rv:
self._logger.LogError('Failed to install "built-sdk.tar.xz".')
@@ -532,10 +538,8 @@ class Bootstrapper(object):
True if everything is ok.
"""
- if (self.SubmitToLocalBranch() and
- self.CheckoutBranch() and
- self.FindEbuildFile() and
- self.InplaceModifyEbuildFile()):
+ if (self.SubmitToLocalBranch() and self.CheckoutBranch() and
+ self.FindEbuildFile() and self.InplaceModifyEbuildFile()):
if self._setup_tool_ebuild_file_only:
# Everything is done, we are good.
ret = True
@@ -544,9 +548,8 @@ class Bootstrapper(object):
ret = self.DoBuildForBoard()
else:
# This implies '--bootstrap'.
- ret = (self.DoBootstrapping() and
- (self._disable_2nd_bootstrap or
- self.BuildAndInstallAmd64Host()))
+ ret = (self.DoBootstrapping() and (self._disable_2nd_bootstrap or
+ self.BuildAndInstallAmd64Host()))
else:
ret = False
return ret
@@ -554,61 +557,79 @@ class Bootstrapper(object):
def Main(argv):
parser = optparse.OptionParser()
- parser.add_option('-c', '--chromeos_root', dest='chromeos_root',
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
help=('Optional. ChromeOs root dir. '
'When not specified, chromeos root will be deduced '
'from current working directory.'))
- parser.add_option('--gcc_branch', dest='gcc_branch',
+ parser.add_option('--gcc_branch',
+ dest='gcc_branch',
help=('The branch to test against. '
'This branch must be a local branch '
'inside "src/third_party/gcc". '
'Notice, this must not be used with "--gcc_dir".'))
- parser.add_option('--binutils_branch', dest='binutils_branch',
+ parser.add_option('--binutils_branch',
+ dest='binutils_branch',
help=('The branch to test against binutils. '
'This branch must be a local branch '
'inside "src/third_party/binutils". '
'Notice, this must not be used with '
'"--binutils_dir".'))
- parser.add_option('-g', '--gcc_dir', dest='gcc_dir',
+ parser.add_option('-g',
+ '--gcc_dir',
+ dest='gcc_dir',
help=('Use a local gcc tree to do bootstrapping. '
'Notice, this must not be used with "--gcc_branch".'))
- parser.add_option('--binutils_dir', dest='binutils_dir',
+ parser.add_option('--binutils_dir',
+ dest='binutils_dir',
help=('Use a local binutils tree to do bootstrapping. '
'Notice, this must not be used with '
'"--binutils_branch".'))
- parser.add_option('--fixperm', dest='fixperm',
- default=False, action='store_true',
+ parser.add_option('--fixperm',
+ dest='fixperm',
+ default=False,
+ action='store_true',
help=('Fix the (notorious) permission error '
'while trying to bootstrap the chroot. '
'Note this takes an extra 10-15 minutes '
'and is only needed once per chromiumos tree.'))
parser.add_option('--setup_tool_ebuild_file_only',
dest='setup_tool_ebuild_file_only',
- default=False, action='store_true',
+ default=False,
+ action='store_true',
help=('Setup gcc and/or binutils ebuild file '
'to pick up the branch (--gcc/binutils_branch) or '
'use gcc and/or binutils source (--gcc/binutils_dir) '
'and exit. Keep chroot as is. This should not be '
'used with --gcc/binutils_dir/branch options.'))
- parser.add_option('--reset_tool_ebuild_file', dest='reset_tool_ebuild_file',
- default=False, action='store_true',
+ parser.add_option('--reset_tool_ebuild_file',
+ dest='reset_tool_ebuild_file',
+ default=False,
+ action='store_true',
help=('Reset the modification that is done by this script.'
'Note, when this script is running, it will modify '
'the active gcc/binutils ebuild file. Use this '
'option to reset (what this script has done) '
'and exit. This should not be used with -- '
'gcc/binutils_dir/branch options.'))
- parser.add_option('--board', dest='board', default=None,
+ parser.add_option('--board',
+ dest='board',
+ default=None,
help=('Only build toolchain for specific board(s). '
'Use "host" to build for host. '
'Use "," to seperate multiple boards. '
'This does not perform a chroot bootstrap.'))
- parser.add_option('--bootstrap', dest='bootstrap',
- default=False, action='store_true',
+ parser.add_option('--bootstrap',
+ dest='bootstrap',
+ default=False,
+ action='store_true',
help=('Performs a chroot bootstrap. '
'Note, this will *destroy* your current chroot.'))
- parser.add_option('--disable-2nd-bootstrap', dest='disable_2nd_bootstrap',
- default=False, action='store_true',
+ parser.add_option('--disable-2nd-bootstrap',
+ dest='disable_2nd_bootstrap',
+ default=False,
+ action='store_true',
help=('Disable a second bootstrap '
'(build of amd64-host stage).'))
@@ -628,28 +649,28 @@ def Main(argv):
parser.error('Missing or failing to deduce mandatory option "--chromeos".')
return 1
- options.chromeos_root = os.path.abspath(
- os.path.expanduser(options.chromeos_root))
+ options.chromeos_root = os.path.abspath(os.path.expanduser(
+ options.chromeos_root))
if not os.path.isdir(options.chromeos_root):
- logger.GetLogger().LogError(
- '"{0}" does not exist.'.format(options.chromeos_root))
+ logger.GetLogger().LogError('"{0}" does not exist.'.format(
+ options.chromeos_root))
return 1
if options.fixperm:
# Fix perm error before continuing.
- cmd = (r'sudo find "{0}" \( -name ".cache" -type d -prune \) -o '
- r'\( -name "chroot" -type d -prune \) -o '
- r'\( -type f -exec chmod a+r {{}} \; \) -o '
- r'\( -type d -exec chmod a+rx {{}} \; \)').format(
- options.chromeos_root)
+ cmd = (
+ r'sudo find "{0}" \( -name ".cache" -type d -prune \) -o '
+ r'\( -name "chroot" -type d -prune \) -o '
+ r'\( -type f -exec chmod a+r {{}} \; \) -o '
+ r'\( -type d -exec chmod a+rx {{}} \; \)').format(options.chromeos_root)
logger.GetLogger().LogOutput(
'Fixing perm issues for chromeos root, this might take some time.')
command_executer.GetCommandExecuter().RunCommand(cmd)
if options.reset_tool_ebuild_file:
- if (options.gcc_dir or options.gcc_branch or
- options.binutils_dir or options.binutils_branch):
+ if (options.gcc_dir or options.gcc_branch or options.binutils_dir or
+ options.binutils_branch):
logger.GetLogger().LogWarning(
'Ignoring any "--gcc/binutils_dir" and/or "--gcc/binutils_branch".')
if options.setup_tool_ebuild_file_only:
@@ -664,8 +685,8 @@ def Main(argv):
if options.gcc_dir:
options.gcc_dir = os.path.abspath(os.path.expanduser(options.gcc_dir))
if not os.path.isdir(options.gcc_dir):
- logger.GetLogger().LogError(
- '"{0}" does not exist.'.format(options.gcc_dir))
+ logger.GetLogger().LogError('"{0}" does not exist.'.format(
+ options.gcc_dir))
return 1
if options.gcc_branch and options.gcc_dir:
@@ -673,11 +694,11 @@ def Main(argv):
return 1
if options.binutils_dir:
- options.binutils_dir = os.path.abspath(
- os.path.expanduser(options.binutils_dir))
+ options.binutils_dir = os.path.abspath(os.path.expanduser(
+ options.binutils_dir))
if not os.path.isdir(options.binutils_dir):
- logger.GetLogger().LogError(
- '"{0}" does not exist.'.format(options.binutils_dir))
+ logger.GetLogger().LogError('"{0}" does not exist.'.format(
+ options.binutils_dir))
return 1
if options.binutils_branch and options.binutils_dir:
@@ -685,8 +706,8 @@ def Main(argv):
'"--binutils_branch" can be specified.')
return 1
- if (not (options.binutils_branch or options.binutils_dir or
- options.gcc_branch or options.gcc_dir)):
+ if (not (options.binutils_branch or options.binutils_dir or options.gcc_branch
+ or options.gcc_dir)):
parser.error(('At least one of "--gcc_dir", "--gcc_branch", '
'"--binutils_dir" and "--binutils_branch" must '
'be specified.'))
@@ -708,7 +729,8 @@ def Main(argv):
if Bootstrapper(
options.chromeos_root,
- gcc_branch=options.gcc_branch, gcc_dir=options.gcc_dir,
+ gcc_branch=options.gcc_branch,
+ gcc_dir=options.gcc_dir,
binutils_branch=options.binutils_branch,
binutils_dir=options.binutils_dir,
board=options.board,
diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py
index 8b5bce7b..feb11e16 100755
--- a/buildbot_test_toolchains.py
+++ b/buildbot_test_toolchains.py
@@ -24,47 +24,52 @@ from utils import logger
from utils import buildbot_utils
# CL that updated GCC ebuilds to use 'next_gcc'.
-USE_NEXT_GCC_PATCH = "230260"
+USE_NEXT_GCC_PATCH = '230260'
# CL that uses LLVM to build the peppy image.
-USE_LLVM_PATCH = "295217"
+USE_LLVM_PATCH = '295217'
# The boards on which we run weekly reports
-WEEKLY_REPORT_BOARDS = ["lumpy"]
+WEEKLY_REPORT_BOARDS = ['lumpy']
-CROSTC_ROOT = "/usr/local/google/crostc"
-ROLE_ACCOUNT = "mobiletc-prebuild"
+CROSTC_ROOT = '/usr/local/google/crostc'
+ROLE_ACCOUNT = 'mobiletc-prebuild'
TOOLCHAIN_DIR = os.path.dirname(os.path.realpath(__file__))
-MAIL_PROGRAM = "~/var/bin/mail-sheriff"
-WEEKLY_REPORTS_ROOT = os.path.join(CROSTC_ROOT, "weekly_test_data")
-PENDING_ARCHIVES_DIR = os.path.join(CROSTC_ROOT, "pending_archives")
-NIGHTLY_TESTS_DIR = os.path.join(CROSTC_ROOT, "nightly_test_reports")
+MAIL_PROGRAM = '~/var/bin/mail-sheriff'
+WEEKLY_REPORTS_ROOT = os.path.join(CROSTC_ROOT, 'weekly_test_data')
+PENDING_ARCHIVES_DIR = os.path.join(CROSTC_ROOT, 'pending_archives')
+NIGHTLY_TESTS_DIR = os.path.join(CROSTC_ROOT, 'nightly_test_reports')
+
class ToolchainComparator(object):
"""Class for doing the nightly tests work."""
- def __init__(self, board, remotes, chromeos_root, weekday,
- patches, noschedv2=False):
+ def __init__(self,
+ board,
+ remotes,
+ chromeos_root,
+ weekday,
+ patches,
+ noschedv2=False):
self._board = board
self._remotes = remotes
self._chromeos_root = chromeos_root
self._base_dir = os.getcwd()
self._ce = command_executer.GetCommandExecuter()
self._l = logger.GetLogger()
- self._build = "%s-release" % board
+ self._build = '%s-release' % board
self._patches = patches.split(',')
self._patches_string = '_'.join(str(p) for p in self._patches)
self._noschedv2 = noschedv2
if not weekday:
- self._weekday = time.strftime("%a")
+ self._weekday = time.strftime('%a')
else:
self._weekday = weekday
timestamp = datetime.datetime.strftime(datetime.datetime.now(),
- "%Y-%m-%d_%H:%M:%S")
+ '%Y-%m-%d_%H:%M:%S')
self._reports_dir = os.path.join(NIGHTLY_TESTS_DIR,
- "%s.%s" % (timestamp, board),
- )
+ '%s.%s' % (timestamp, board),)
def _ParseVanillaImage(self, trybot_image):
"""Parse a trybot artifact name to get corresponding vanilla image.
@@ -74,19 +79,19 @@ class ToolchainComparator(object):
corresponding official build name, e.g. 'daisy-release/R40-6394.0.0'.
"""
start_pos = trybot_image.find(self._build)
- end_pos = trybot_image.rfind("-b")
+ end_pos = trybot_image.rfind('-b')
vanilla_image = trybot_image[start_pos:end_pos]
return vanilla_image
def _FinishSetup(self):
"""Make sure testing_rsa file is properly set up."""
# Fix protections on ssh key
- command = ("chmod 600 /var/cache/chromeos-cache/distfiles/target"
- "/chrome-src-internal/src/third_party/chromite/ssh_keys"
- "/testing_rsa")
+ command = ('chmod 600 /var/cache/chromeos-cache/distfiles/target'
+ '/chrome-src-internal/src/third_party/chromite/ssh_keys'
+ '/testing_rsa')
ret_val = self._ce.ChrootRunCommand(self._chromeos_root, command)
if ret_val != 0:
- raise RuntimeError("chmod for testing_rsa failed")
+ raise RuntimeError('chmod for testing_rsa failed')
def _TestImages(self, trybot_image, vanilla_image):
"""Create crosperf experiment file.
@@ -94,17 +99,15 @@ class ToolchainComparator(object):
Given the names of the trybot and vanilla images, create the
appropriate crosperf experiment file and launch crosperf on it.
"""
- experiment_file_dir = os.path.join(self._chromeos_root, "..",
- self._weekday)
- experiment_file_name = "%s_toolchain_experiment.txt" % self._board
+ experiment_file_dir = os.path.join(self._chromeos_root, '..', self._weekday)
+ experiment_file_name = '%s_toolchain_experiment.txt' % self._board
- compiler_string = "gcc"
+ compiler_string = 'gcc'
if USE_LLVM_PATCH in self._patches_string:
- experiment_file_name = "%s_llvm_experiment.txt" % self._board
- compiler_string = "llvm"
+ experiment_file_name = '%s_llvm_experiment.txt' % self._board
+ compiler_string = 'llvm'
- experiment_file = os.path.join(experiment_file_dir,
- experiment_file_name)
+ experiment_file = os.path.join(experiment_file_dir, experiment_file_name)
experiment_header = """
board: %s
remote: %s
@@ -116,7 +119,8 @@ class ToolchainComparator(object):
iterations: 3
}
"""
- with open(experiment_file, "w") as f:
+
+ with open(experiment_file, 'w') as f:
f.write(experiment_header)
f.write(experiment_tests)
@@ -130,9 +134,9 @@ class ToolchainComparator(object):
""" % (self._chromeos_root, vanilla_image)
f.write(official_image)
- label_string = "%s_trybot_image" % compiler_string
+ label_string = '%s_trybot_image' % compiler_string
if USE_NEXT_GCC_PATCH in self._patches:
- label_string = "gcc_next_trybot_image"
+ label_string = 'gcc_next_trybot_image'
experiment_image = """
%s {
@@ -144,12 +148,10 @@ class ToolchainComparator(object):
compiler_string)
f.write(experiment_image)
- crosperf = os.path.join(TOOLCHAIN_DIR,
- "crosperf",
- "crosperf")
+ crosperf = os.path.join(TOOLCHAIN_DIR, 'crosperf', 'crosperf')
noschedv2_opts = '--noschedv2' if self._noschedv2 else ''
- command = ("{crosperf} --no_email=True --results_dir={r_dir} "
- "--json_report=True {noschedv2_opts} {exp_file}").format(
+ command = ('{crosperf} --no_email=True --results_dir={r_dir} '
+ '--json_report=True {noschedv2_opts} {exp_file}').format(
crosperf=crosperf,
r_dir=self._reports_dir,
noschedv2_opts=noschedv2_opts,
@@ -160,7 +162,7 @@ class ToolchainComparator(object):
raise RuntimeError("Couldn't run crosperf!")
else:
# Copy json report to pending archives directory.
- command = "cp %s/*.json %s/." % (self._reports_dir, PENDING_ARCHIVES_DIR)
+ command = 'cp %s/*.json %s/.' % (self._reports_dir, PENDING_ARCHIVES_DIR)
ret = self._ce.RunCommand(command)
return
@@ -175,12 +177,12 @@ class ToolchainComparator(object):
dry_run = False
if os.getlogin() != ROLE_ACCOUNT:
- self._l.LogOutput("Running this from non-role account; not copying "
- "tar files for weekly reports.")
+ self._l.LogOutput('Running this from non-role account; not copying '
+ 'tar files for weekly reports.')
dry_run = True
- images_path = os.path.join(os.path.realpath(self._chromeos_root),
- "chroot/tmp")
+ images_path = os.path.join(
+ os.path.realpath(self._chromeos_root), 'chroot/tmp')
data_dir = os.path.join(WEEKLY_REPORTS_ROOT, self._board)
dest_dir = os.path.join(data_dir, self._weekday)
@@ -188,46 +190,42 @@ class ToolchainComparator(object):
os.makedirs(dest_dir)
# Make sure dest_dir is empty (clean out last week's data).
- cmd = "cd %s; rm -Rf %s_*_image*" % (dest_dir, self._weekday)
+ cmd = 'cd %s; rm -Rf %s_*_image*' % (dest_dir, self._weekday)
if dry_run:
- print("CMD: %s" % cmd)
+ print('CMD: %s' % cmd)
else:
self._ce.RunCommand(cmd)
# Now create new tar files and copy them over.
- labels = ["test", "vanilla"]
+ labels = ['test', 'vanilla']
for label_name in labels:
- if label_name == "test":
+ if label_name == 'test':
test_path = trybot_image
else:
test_path = vanilla_image
- tar_file_name = "%s_%s_image.tar" % (self._weekday, label_name)
- cmd = ("cd %s; tar -cvf %s %s/chromiumos_test_image.bin; "
- "cp %s %s/.") % (images_path,
- tar_file_name,
- test_path,
- tar_file_name,
- dest_dir)
+ tar_file_name = '%s_%s_image.tar' % (self._weekday, label_name)
+ cmd = ('cd %s; tar -cvf %s %s/chromiumos_test_image.bin; '
+ 'cp %s %s/.') % (images_path, tar_file_name, test_path,
+ tar_file_name, dest_dir)
if dry_run:
- print("CMD: %s" % cmd)
+ print('CMD: %s' % cmd)
tar_ret = 0
else:
tar_ret = self._ce.RunCommand(cmd)
if tar_ret:
- self._l.LogOutput("Error while creating/copying test tar file(%s)."
- % tar_file_name)
+ self._l.LogOutput('Error while creating/copying test tar file(%s).' %
+ tar_file_name)
def _SendEmail(self):
"""Find email message generated by crosperf and send it."""
- filename = os.path.join(self._reports_dir,
- "msg_body.html")
+ filename = os.path.join(self._reports_dir, 'msg_body.html')
if (os.path.exists(filename) and
os.path.exists(os.path.expanduser(MAIL_PROGRAM))):
- email_title = "buildbot test results"
+ email_title = 'buildbot test results'
if self._patches_string == USE_LLVM_PATCH:
- email_title = "buildbot llvm test results"
- command = ('cat %s | %s -s "%s, %s" -team -html'
- % (filename, MAIL_PROGRAM, email_title, self._board))
+ email_title = 'buildbot llvm test results'
+ command = ('cat %s | %s -s "%s, %s" -team -html' %
+ (filename, MAIL_PROGRAM, email_title, self._board))
self._ce.RunCommand(command)
def DoAll(self):
@@ -237,8 +235,7 @@ class ToolchainComparator(object):
crosperf, and copy images into seven-day report directories.
"""
date_str = datetime.date.today()
- description = "master_%s_%s_%s" % (self._patches_string,
- self._build,
+ description = 'master_%s_%s_%s' % (self._patches_string, self._build,
date_str)
trybot_image = buildbot_utils.GetTrybotImage(self._chromeos_root,
self._build,
@@ -248,13 +245,13 @@ class ToolchainComparator(object):
vanilla_image = self._ParseVanillaImage(trybot_image)
- print ("trybot_image: %s" % trybot_image)
- print ("vanilla_image: %s" % vanilla_image)
+ print('trybot_image: %s' % trybot_image)
+ print('vanilla_image: %s' % vanilla_image)
if len(trybot_image) == 0:
- self._l.LogError("Unable to find trybot_image for %s!" % description)
+ self._l.LogError('Unable to find trybot_image for %s!' % description)
return 1
if len(vanilla_image) == 0:
- self._l.LogError("Unable to find vanilla image for %s!" % description)
+ self._l.LogError('Unable to find vanilla image for %s!' % description)
return 1
if os.getlogin() == ROLE_ACCOUNT:
self._FinishSetup()
@@ -274,51 +271,51 @@ def Main(argv):
# Common initializations
command_executer.InitCommandExecuter()
parser = optparse.OptionParser()
- parser.add_option("--remote",
- dest="remote",
- help="Remote machines to run tests on.")
- parser.add_option("--board",
- dest="board",
- default="x86-zgb",
- help="The target board.")
- parser.add_option("--chromeos_root",
- dest="chromeos_root",
- help="The chromeos root from which to run tests.")
- parser.add_option("--weekday", default="",
- dest="weekday",
- help="The day of the week for which to run tests.")
- parser.add_option("--patch",
- dest="patches",
- help="The patches to use for the testing, "
+ parser.add_option('--remote',
+ dest='remote',
+ help='Remote machines to run tests on.')
+ parser.add_option('--board',
+ dest='board',
+ default='x86-zgb',
+ help='The target board.')
+ parser.add_option('--chromeos_root',
+ dest='chromeos_root',
+ help='The chromeos root from which to run tests.')
+ parser.add_option('--weekday',
+ default='',
+ dest='weekday',
+ help='The day of the week for which to run tests.')
+ parser.add_option('--patch',
+ dest='patches',
+ help='The patches to use for the testing, '
"seprate the patch numbers with ',' "
- "for more than one patches.")
- parser.add_option("--noschedv2",
- dest="noschedv2",
- action="store_true",
+ 'for more than one patches.')
+ parser.add_option('--noschedv2',
+ dest='noschedv2',
+ action='store_true',
default=False,
- help="Pass --noschedv2 to crosperf.")
+ help='Pass --noschedv2 to crosperf.')
options, _ = parser.parse_args(argv)
if not options.board:
- print("Please give a board.")
+ print('Please give a board.')
return 1
if not options.remote:
- print("Please give at least one remote machine.")
+ print('Please give at least one remote machine.')
return 1
if not options.chromeos_root:
- print("Please specify the ChromeOS root directory.")
+ print('Please specify the ChromeOS root directory.')
return 1
if options.patches:
patches = options.patches
else:
patches = USE_NEXT_GCC_PATCH
- fc = ToolchainComparator(options.board, options.remote,
- options.chromeos_root, options.weekday, patches,
- options.noschedv2)
+ fc = ToolchainComparator(options.board, options.remote, options.chromeos_root,
+ options.weekday, patches, options.noschedv2)
return fc.DoAll()
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/command_executer_timeout_test.py b/command_executer_timeout_test.py
index 022ce441..c7c94cbf 100755
--- a/command_executer_timeout_test.py
+++ b/command_executer_timeout_test.py
@@ -2,7 +2,7 @@
#
# Copyright 2010 Google Inc. All Rights Reserved.
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import optparse
import os
@@ -13,19 +13,20 @@ from utils import command_executer
def Usage(parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
+
def Main(argv):
parser = optparse.OptionParser()
options = parser.parse_args(argv)[0]
- command = "sleep 1000"
+ command = 'sleep 1000'
ce = command_executer.GetCommandExecuter()
ce.RunCommand(command, command_timeout=1)
return 0
-if __name__ == "__main__":
+if __name__ == '__main__':
Main(sys.argv)
diff --git a/compare_benchmarks.py b/compare_benchmarks.py
index e3164bc9..5892aab0 100755
--- a/compare_benchmarks.py
+++ b/compare_benchmarks.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to compare ChromeOS benchmarks
Inputs:
@@ -15,7 +14,7 @@ the results and presents it, along with a geometric mean.
"""
-__author__ = "bjanakiraman@google.com (Bhaskar Janakiraman)"
+__author__ = 'bjanakiraman@google.com (Bhaskar Janakiraman)'
import glob
import math
@@ -29,15 +28,14 @@ import run_tests
from utils import command_executer
from utils import logger
-BENCHDIRS = "%s/default/default/*/gcc-4.4.3-glibc-2.11.1-grte-k8-opt/ref/*/results.txt"
-
+BENCHDIRS = '%s/default/default/*/gcc-4.4.3-glibc-2.11.1-grte-k8-opt/ref/*/results.txt'
# Common initializations
cmd_executer = command_executer.GetCommandExecuter()
def Usage(parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
@@ -55,7 +53,7 @@ def GetStats(file):
if not m:
continue
metric = m.group(1)
- if re.match(r"isolated_walltime", metric):
+ if re.match(r'isolated_walltime', metric):
continue
value = float(m.group(2))
@@ -63,6 +61,7 @@ def GetStats(file):
return dict(pairs)
+
def PrintDash(n):
tmpstr = ''
for i in range(n):
@@ -85,25 +84,29 @@ def PrintHeader(hdr):
tmpstr = ''
for i in range(len(hdr)):
- tmpstr += "%15.15s" % hdr[i]
+ tmpstr += '%15.15s' % hdr[i]
print tmpstr
PrintDash(tot_len * 15)
+
def Main(argv):
"""Compare Benchmarks."""
# Common initializations
parser = optparse.OptionParser()
- parser.add_option("-c", "--csv", dest="csv_output",
- action="store_true", default=False,
- help="Output in csv form.")
+ parser.add_option('-c',
+ '--csv',
+ dest='csv_output',
+ action='store_true',
+ default=False,
+ help='Output in csv form.')
(options, args) = parser.parse_args(argv[1:])
# validate args
if len(args) != 2:
- Usage(parser, "Needs <baseline output dir> <results output dir>")
+ Usage(parser, 'Needs <baseline output dir> <results output dir>')
base_dir = args[0]
res_dir = args[1]
@@ -128,7 +131,7 @@ def Main(argv):
# benchname (remove results.txt), basetime, restime, %speed-up
hdr = []
benchname = re.split('/', resfile)[-2:-1][0]
- benchname = benchname.replace('chromeos__', '',1)
+ benchname = benchname.replace('chromeos__', '', 1)
hdr.append(benchname)
hdr.append('basetime')
hdr.append('restime')
@@ -143,28 +146,29 @@ def Main(argv):
count = 0
for key in stats.keys():
if key in basestats.keys():
- # ignore very small values.
+ # ignore very small values.
if stats[key] < 0.01:
continue
count = count + 1
- prod = prod * (stats[key]/basestats[key])
- speedup = (basestats[key] - stats[key])/basestats[key]
+ prod = prod * (stats[key] / basestats[key])
+ speedup = (basestats[key] - stats[key]) / basestats[key]
speedup = speedup * 100.0
if options.csv_output:
- print "%s,%f,%f,%f" % (key, basestats[key], stats[key],speedup)
+ print '%s,%f,%f,%f' % (key, basestats[key], stats[key], speedup)
else:
- print "%15.15s%15.2f%15.2f%14.2f%%" % (key, basestats[key], stats[key],speedup)
+ print '%15.15s%15.2f%15.2f%14.2f%%' % (key, basestats[key],
+ stats[key], speedup)
- prod = math.exp(1.0/count * math.log(prod))
+ prod = math.exp(1.0 / count * math.log(prod))
prod = (1.0 - prod) * 100
if options.csv_output:
- print "%s,,,%f" % ('Geomean', prod)
+ print '%s,,,%f' % ('Geomean', prod)
else:
- print "%15.15s%15.15s%15.15s%14.2f%%" % ('Geomean', '', '', prod)
+ print '%15.15s%15.15s%15.15s%14.2f%%' % ('Geomean', '', '', prod)
print
return 0
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/crb/autotest_gatherer.py b/crb/autotest_gatherer.py
index da39040d..f8f7b43f 100644
--- a/crb/autotest_gatherer.py
+++ b/crb/autotest_gatherer.py
@@ -1,30 +1,34 @@
from table_formatter import TableFormatter as TableFormatter
+
class AutotestGatherer(TableFormatter):
+
def __init__(self):
self.runs = []
TableFormatter.__init__(self)
def GetFormattedMainTable(self, percents_only, fit_string):
- ret = ""
+ ret = ''
table = self.GetTableValues()
ret += self.GetTableLabels(table)
- ret += self.GetFormattedTable(table, percents_only=percents_only,
+ ret += self.GetFormattedTable(table,
+ percents_only=percents_only,
fit_string=fit_string)
return ret
def GetFormattedSummaryTable(self, percents_only, fit_string):
- ret = ""
+ ret = ''
table = self.GetTableValues()
summary_table = self.GetSummaryTableValues(table)
ret += self.GetTableLabels(summary_table)
- ret += self.GetFormattedTable(summary_table, percents_only=percents_only,
+ ret += self.GetFormattedTable(summary_table,
+ percents_only=percents_only,
fit_string=fit_string)
return ret
def GetBenchmarksString(self):
- ret = "Benchmarks (in order):"
- ret = "\n".join(self.GetAllBenchmarks())
+ ret = 'Benchmarks (in order):'
+ ret = '\n'.join(self.GetAllBenchmarks())
return ret
def GetAllBenchmarks(self):
@@ -40,7 +44,7 @@ class AutotestGatherer(TableFormatter):
table = []
row = []
- row.append("Benchmark")
+ row.append('Benchmark')
for i in range(len(self.runs)):
run = self.runs[i]
label = run.GetLabel()
@@ -57,7 +61,7 @@ class AutotestGatherer(TableFormatter):
if benchmark in results:
row.append(results[benchmark])
else:
- row.append("")
+ row.append('')
table.append(row)
return table
diff --git a/crb/autotest_run.py b/crb/autotest_run.py
index 565246db..b774419b 100644
--- a/crb/autotest_run.py
+++ b/crb/autotest_run.py
@@ -12,25 +12,33 @@ import table_formatter
from utils import command_executer
from utils import logger
-
-SCRATCH_DIR = "/home/%s/cros_scratch" % getpass.getuser()
-PICKLE_FILE = "pickle.txt"
-VERSION = "1"
+SCRATCH_DIR = '/home/%s/cros_scratch' % getpass.getuser()
+PICKLE_FILE = 'pickle.txt'
+VERSION = '1'
def ConvertToFilename(text):
ret = text
- ret = re.sub("/", "__", ret)
- ret = re.sub(" ", "_", ret)
- ret = re.sub("=", "", ret)
- ret = re.sub("\"", "", ret)
+ ret = re.sub('/', '__', ret)
+ ret = re.sub(' ', '_', ret)
+ ret = re.sub('=', '', ret)
+ ret = re.sub("\"", '', ret)
return ret
class AutotestRun(threading.Thread):
- def __init__(self, autotest, chromeos_root="", chromeos_image="",
- board="", remote="", iteration=0, image_checksum="",
- exact_remote=False, rerun=False, rerun_if_failed=False):
+
+ def __init__(self,
+ autotest,
+ chromeos_root='',
+ chromeos_image='',
+ board='',
+ remote='',
+ iteration=0,
+ image_checksum='',
+ exact_remote=False,
+ rerun=False,
+ rerun_if_failed=False):
self.autotest = autotest
self.chromeos_root = chromeos_root
self.chromeos_image = chromeos_image
@@ -44,7 +52,7 @@ class AutotestRun(threading.Thread):
threading.Thread.__init__(self)
self.terminate = False
self.retval = None
- self.status = "PENDING"
+ self.status = 'PENDING'
self.run_completed = False
self.exact_remote = exact_remote
self.rerun = rerun
@@ -58,7 +66,7 @@ class AutotestRun(threading.Thread):
array2 = []
for v in array:
- if mean != 0 and abs(v - mean)/mean < 0.2:
+ if mean != 0 and abs(v - mean) / mean < 0.2:
array2.append(v)
if array2:
@@ -71,7 +79,7 @@ class AutotestRun(threading.Thread):
composite_keys = []
composite_dict = {}
for key in results_dict:
- mo = re.match("(.*){\d+}", key)
+ mo = re.match('(.*){\d+}', key)
if mo:
composite_keys.append(mo.group(1))
for key in results_dict:
@@ -85,24 +93,24 @@ class AutotestRun(threading.Thread):
for composite_key in composite_dict:
v = composite_dict[composite_key]
- results_dict["%s[c]" % composite_key] = sum(v) / len(v)
+ results_dict['%s[c]' % composite_key] = sum(v) / len(v)
mean_excluding_slowest = AutotestRun.MeanExcludingSlowest(v)
- results_dict["%s[ce]" % composite_key] = mean_excluding_slowest
+ results_dict['%s[ce]' % composite_key] = mean_excluding_slowest
return results_dict
def ParseOutput(self):
- p = re.compile("^-+.*?^-+", re.DOTALL|re.MULTILINE)
+ p = re.compile('^-+.*?^-+', re.DOTALL | re.MULTILINE)
matches = p.findall(self.out)
for i in range(len(matches)):
results = matches[i]
results_dict = {}
for line in results.splitlines()[1:-1]:
- mo = re.match("(.*\S)\s+\[\s+(PASSED|FAILED)\s+\]", line)
+ mo = re.match('(.*\S)\s+\[\s+(PASSED|FAILED)\s+\]', line)
if mo:
results_dict[mo.group(1)] = mo.group(2)
continue
- mo = re.match("(.*\S)\s+(.*)", line)
+ mo = re.match('(.*\S)\s+(.*)', line)
if mo:
results_dict[mo.group(1)] = mo.group(2)
@@ -115,21 +123,21 @@ class AutotestRun(threading.Thread):
# Autotest recently added a secondary table
# That reports errors and screws up the final pretty output.
break
- mo = re.search("Results placed in (\S+)", self.out)
+ mo = re.search('Results placed in (\S+)', self.out)
if mo:
self.results_dir = mo.group(1)
self.full_name = os.path.basename(self.results_dir)
def GetCacheHashBase(self):
- ret = ("%s %s %s" %
+ ret = ('%s %s %s' %
(self.image_checksum, self.autotest.name, self.iteration))
if self.autotest.args:
- ret += " %s" % self.autotest.args
- ret += "-%s" % VERSION
+ ret += ' %s' % self.autotest.args
+ ret += '-%s' % VERSION
return ret
def GetLabel(self):
- ret = "%s %s remote:%s" % (self.chromeos_image, self.autotest.name,
+ ret = '%s %s remote:%s' % (self.chromeos_image, self.autotest.name,
self.remote)
return ret
@@ -138,23 +146,23 @@ class AutotestRun(threading.Thread):
if self.exact_remote:
if not self.remote:
return False
- cache_dir_glob = "%s_%s" % (ConvertToFilename(base), self.remote)
+ cache_dir_glob = '%s_%s' % (ConvertToFilename(base), self.remote)
else:
- cache_dir_glob = "%s*" % ConvertToFilename(base)
+ cache_dir_glob = '%s*' % ConvertToFilename(base)
cache_path_glob = os.path.join(SCRATCH_DIR, cache_dir_glob)
matching_dirs = glob.glob(cache_path_glob)
if matching_dirs:
matching_dir = matching_dirs[0]
cache_file = os.path.join(matching_dir, PICKLE_FILE)
assert os.path.isfile(cache_file)
- self._logger.LogOutput("Trying to read from cache file: %s" % cache_file)
+ self._logger.LogOutput('Trying to read from cache file: %s' % cache_file)
return self.ReadFromCache(cache_file)
- self._logger.LogOutput("Cache miss. AM going to run: %s for: %s" %
+ self._logger.LogOutput('Cache miss. AM going to run: %s for: %s' %
(self.autotest.name, self.chromeos_image))
return False
def ReadFromCache(self, cache_file):
- with open(cache_file, "rb") as f:
+ with open(cache_file, 'rb') as f:
self.retval = pickle.load(f)
self.out = pickle.load(f)
self.err = pickle.load(f)
@@ -164,48 +172,46 @@ class AutotestRun(threading.Thread):
def StoreToCache(self):
base = self.GetCacheHashBase()
- self.cache_dir = os.path.join(SCRATCH_DIR, "%s_%s" % (
- ConvertToFilename(base),
- self.remote))
+ self.cache_dir = os.path.join(SCRATCH_DIR,
+ '%s_%s' % (ConvertToFilename(base),
+ self.remote))
cache_file = os.path.join(self.cache_dir, PICKLE_FILE)
- command = "mkdir -p %s" % os.path.dirname(cache_file)
+ command = 'mkdir -p %s' % os.path.dirname(cache_file)
ret = self._ce.RunCommand(command)
assert ret == 0, "Couldn't create cache dir"
- with open(cache_file, "wb") as f:
+ with open(cache_file, 'wb') as f:
pickle.dump(self.retval, f)
pickle.dump(self.out, f)
pickle.dump(self.err, f)
def run(self):
self._logger = logger.Logger(
- os.path.dirname(__file__),
- "%s.%s" % (os.path.basename(__file__),
- self.name), True)
+ os.path.dirname(__file__), '%s.%s' % (os.path.basename(__file__),
+ self.name), True)
self._ce = command_executer.GetCommandExecuter(self._logger)
self.RunCached()
def RunCached(self):
- self.status = "WAITING"
+ self.status = 'WAITING'
cache_hit = False
if not self.rerun:
cache_hit = self.TryToLoadFromCache()
else:
- self._logger.LogOutput("--rerun passed. Not using cached results.")
+ self._logger.LogOutput('--rerun passed. Not using cached results.')
if self.rerun_if_failed and self.retval:
- self._logger.LogOutput("--rerun_if_failed passed and existing test "
- "failed. Rerunning...")
+ self._logger.LogOutput('--rerun_if_failed passed and existing test '
+ 'failed. Rerunning...')
cache_hit = False
if not cache_hit:
# Get machine
while True:
if self.terminate:
return 1
- self.machine = (
- machine_manager_singleton.MachineManagerSingleton().AcquireMachine(self.image_checksum))
+ self.machine = (machine_manager_singleton.MachineManagerSingleton(
+ ).AcquireMachine(self.image_checksum))
if self.machine:
- self._logger.LogOutput("%s: Machine %s acquired at %s" %
- (self.name,
- self.machine.name,
+ self._logger.LogOutput('%s: Machine %s acquired at %s' %
+ (self.name, self.machine.name,
datetime.datetime.now()))
break
else:
@@ -216,56 +222,56 @@ class AutotestRun(threading.Thread):
if self.machine.checksum != self.image_checksum:
self.retval = self.ImageTo(self.machine.name)
- if self.retval: return self.retval
+ if self.retval:
+ return self.retval
self.machine.checksum = self.image_checksum
self.machine.image = self.chromeos_image
- self.status = "RUNNING: %s" % self.autotest.name
+ self.status = 'RUNNING: %s' % self.autotest.name
[self.retval, self.out, self.err] = self.RunTestOn(self.machine.name)
self.run_completed = True
finally:
- self._logger.LogOutput("Releasing machine: %s" % self.machine.name)
- machine_manager_singleton.MachineManagerSingleton().ReleaseMachine(self.machine)
- self._logger.LogOutput("Released machine: %s" % self.machine.name)
+ self._logger.LogOutput('Releasing machine: %s' % self.machine.name)
+ machine_manager_singleton.MachineManagerSingleton().ReleaseMachine(
+ self.machine)
+ self._logger.LogOutput('Released machine: %s' % self.machine.name)
self.StoreToCache()
if not self.retval:
- self.status = "SUCCEEDED"
+ self.status = 'SUCCEEDED'
else:
- self.status = "FAILED"
+ self.status = 'FAILED'
self.ParseOutput()
# Copy results directory to the scratch dir
if (not cache_hit and not self.retval and self.autotest.args and
- "--profile" in self.autotest.args):
- results_dir = os.path.join(self.chromeos_root, "chroot",
- self.results_dir.lstrip("/"))
+ '--profile' in self.autotest.args):
+ results_dir = os.path.join(self.chromeos_root, 'chroot',
+ self.results_dir.lstrip('/'))
tarball = os.path.join(
- self.cache_dir,
- os.path.basename(os.path.dirname(self.results_dir)))
- command = ("cd %s && tar cjf %s.tbz2 ." % (results_dir, tarball))
+ self.cache_dir, os.path.basename(os.path.dirname(self.results_dir)))
+ command = ('cd %s && tar cjf %s.tbz2 .' % (results_dir, tarball))
self._ce.RunCommand(command)
perf_data_file = os.path.join(self.results_dir, self.full_name,
- "profiling/iteration.1/perf.data")
+ 'profiling/iteration.1/perf.data')
# Attempt to build a perf report and keep it with the results.
- command = ("cd %s/src/scripts &&"
- " cros_sdk -- /usr/sbin/perf report --symfs=/build/%s"
- " -i %s --stdio" % (self.chromeos_root, self.board,
+ command = ('cd %s/src/scripts &&'
+ ' cros_sdk -- /usr/sbin/perf report --symfs=/build/%s'
+ ' -i %s --stdio' % (self.chromeos_root, self.board,
perf_data_file))
ret, out, err = self._ce.RunCommandWOutput(command)
- with open(os.path.join(self.cache_dir, "perf.report"), "wb") as f:
+ with open(os.path.join(self.cache_dir, 'perf.report'), 'wb') as f:
f.write(out)
return self.retval
def ImageTo(self, machine_name):
- image_args = [image_chromeos.__file__,
- "--chromeos_root=%s" % self.chromeos_root,
- "--image=%s" % self.chromeos_image,
- "--remote=%s" % machine_name]
+ image_args = [image_chromeos.__file__, '--chromeos_root=%s' %
+ self.chromeos_root, '--image=%s' % self.chromeos_image,
+ '--remote=%s' % machine_name]
if self.board:
- image_args.append("--board=%s" % self.board)
+ image_args.append('--board=%s' % self.board)
### devserver_port = 8080
### mo = re.search("\d+", self.name)
@@ -280,33 +286,32 @@ class AutotestRun(threading.Thread):
### image_args.append("--image_to_live_args=--devserver_port=%s" %
### devserver_port)
- # Currently can't image two machines at once.
- # So have to serialized on this lock.
- self.status = "WAITING ON IMAGE_LOCK"
+# Currently can't image two machines at once.
+# So have to serialized on this lock.
+ self.status = 'WAITING ON IMAGE_LOCK'
with machine_manager_singleton.MachineManagerSingleton().image_lock:
- self.status = "IMAGING"
- retval = self._ce.RunCommand(" ".join(["python"] + image_args))
+ self.status = 'IMAGING'
+ retval = self._ce.RunCommand(' '.join(['python'] + image_args))
machine_manager_singleton.MachineManagerSingleton().num_reimages += 1
if retval:
- self.status = "ABORTED DUE TO IMAGE FAILURE"
+ self.status = 'ABORTED DUE TO IMAGE FAILURE'
return retval
def DoPowerdHack(self):
- command = "sudo initctl stop powerd"
- self._ce.CrosRunCommand(command, machine=self.machine.name,
+ command = 'sudo initctl stop powerd'
+ self._ce.CrosRunCommand(command,
+ machine=self.machine.name,
chromeos_root=self.chromeos_root)
def RunTestOn(self, machine_name):
- command = "cd %s/src/scripts" % self.chromeos_root
- options = ""
+ command = 'cd %s/src/scripts' % self.chromeos_root
+ options = ''
if self.board:
- options += " --board=%s" % self.board
+ options += ' --board=%s' % self.board
if self.autotest.args:
options += " --args='%s'" % self.autotest.args
- if "tegra2" in self.board:
+ if 'tegra2' in self.board:
self.DoPowerdHack()
- command += ("&& cros_sdk -- /usr/bin/test_that %s %s %s" %
- (options,
- machine_name,
- self.autotest.name))
+ command += ('&& cros_sdk -- /usr/bin/test_that %s %s %s' %
+ (options, machine_name, self.autotest.name))
return self._ce.RunCommand(command, True)
diff --git a/crb/crb_driver.py b/crb/crb_driver.py
index 9e6301d0..14fe6bab 100755
--- a/crb/crb_driver.py
+++ b/crb/crb_driver.py
@@ -19,36 +19,38 @@ from utils.file_utils import FileUtils
def CanonicalizeChromeOSRoot(chromeos_root):
chromeos_root = os.path.expanduser(chromeos_root)
- if os.path.isfile(os.path.join(chromeos_root,
- "src/scripts/enter_chroot.sh")):
+ if os.path.isfile(os.path.join(chromeos_root, 'src/scripts/enter_chroot.sh')):
return chromeos_root
else:
return None
class Autotest(object):
+
def __init__(self, autotest_string):
self.name = None
self.iterations = None
self.args = None
- fields = autotest_string.split(",", 1)
+ fields = autotest_string.split(',', 1)
self.name = fields[0]
if len(fields) > 1:
autotest_string = fields[1]
- fields = autotest_string.split(",", 1)
- else: return
+ fields = autotest_string.split(',', 1)
+ else:
+ return
self.iterations = int(fields[0])
if len(fields) > 1:
self.args = fields[1]
- else: return
+ else:
+ return
def __str__(self):
- return "\n".join([self.name, self.iterations, self.args])
+ return '\n'.join([self.name, self.iterations, self.args])
def CreateAutotestListFromString(autotest_strings, default_iterations=None):
autotest_list = []
- for autotest_string in autotest_strings.split(":"):
+ for autotest_string in autotest_strings.split(':'):
autotest = Autotest(autotest_string)
if default_iterations and not autotest.iterations:
autotest.iterations = default_iterations
@@ -57,24 +59,30 @@ def CreateAutotestListFromString(autotest_strings, default_iterations=None):
return autotest_list
-def CreateAutotestRuns(images, autotests, remote, board, exact_remote,
- rerun, rerun_if_failed, main_chromeos_root=None):
+def CreateAutotestRuns(images,
+ autotests,
+ remote,
+ board,
+ exact_remote,
+ rerun,
+ rerun_if_failed,
+ main_chromeos_root=None):
autotest_runs = []
for image in images:
- logger.GetLogger().LogOutput("Computing md5sum of: %s" % image)
+ logger.GetLogger().LogOutput('Computing md5sum of: %s' % image)
image_checksum = FileUtils().Md5File(image)
- logger.GetLogger().LogOutput("md5sum %s: %s" % (image, image_checksum))
-### image_checksum = "abcdefghi"
+ logger.GetLogger().LogOutput('md5sum %s: %s' % (image, image_checksum))
+ ### image_checksum = "abcdefghi"
chromeos_root = main_chromeos_root
if not main_chromeos_root:
- image_chromeos_root = os.path.join(os.path.dirname(image),
- "../../../../..")
+ image_chromeos_root = os.path.join(
+ os.path.dirname(image), '../../../../..')
chromeos_root = CanonicalizeChromeOSRoot(image_chromeos_root)
- assert chromeos_root, "chromeos_root: %s invalid" % image_chromeos_root
+ assert chromeos_root, 'chromeos_root: %s invalid' % image_chromeos_root
else:
chromeos_root = CanonicalizeChromeOSRoot(main_chromeos_root)
- assert chromeos_root, "chromeos_root: %s invalid" % main_chromeos_root
+ assert chromeos_root, 'chromeos_root: %s invalid' % main_chromeos_root
# We just need a single ChromeOS root in the MachineManagerSingleton. It is
# needed because we can save re-image time by checking the image checksum at
@@ -101,9 +109,9 @@ def CreateAutotestRuns(images, autotests, remote, board, exact_remote,
def GetNamesAndIterations(autotest_runs):
strings = []
for autotest_run in autotest_runs:
- strings.append("%s:%s" % (autotest_run.autotest.name,
+ strings.append('%s:%s' % (autotest_run.autotest.name,
autotest_run.iteration))
- return " %s (%s)" % (len(strings), " ".join(strings))
+ return ' %s (%s)' % (len(strings), ' '.join(strings))
def GetStatusString(autotest_runs):
@@ -115,19 +123,19 @@ def GetStatusString(autotest_runs):
status_strings = []
for key, val in status_bins.items():
- status_strings.append("%s: %s" % (key, GetNamesAndIterations(val)))
- return "Thread Status:\n%s" % "\n".join(status_strings)
+ status_strings.append('%s: %s' % (key, GetNamesAndIterations(val)))
+ return 'Thread Status:\n%s' % '\n'.join(status_strings)
def GetProgressBar(num_done, num_total):
- ret = "Done: %s%%" % int(100.0 * num_done / num_total)
+ ret = 'Done: %s%%' % int(100.0 * num_done / num_total)
bar_length = 50
- done_char = ">"
- undone_char = " "
+ done_char = '>'
+ undone_char = ' '
num_done_chars = bar_length * num_done / num_total
num_undone_chars = bar_length - num_done_chars
- ret += " [%s%s]" % (num_done_chars * done_char, num_undone_chars *
- undone_char)
+ ret += ' [%s%s]' % (num_done_chars * done_char,
+ num_undone_chars * undone_char)
return ret
@@ -139,14 +147,13 @@ def GetProgressString(start_time, num_remain, num_total):
eta_seconds = int(eta_seconds)
eta = datetime.timedelta(seconds=eta_seconds)
except ZeroDivisionError:
- eta = "Unknown"
+ eta = 'Unknown'
strings = []
- strings.append("Current time: %s Elapsed: %s ETA: %s" %
- (datetime.datetime.now(),
- datetime.timedelta(seconds=int(elapsed_time)),
- eta))
+ strings.append('Current time: %s Elapsed: %s ETA: %s' %
+ (datetime.datetime.now(),
+ datetime.timedelta(seconds=int(elapsed_time)), eta))
strings.append(GetProgressBar(num_total - num_remain, num_total))
- return "\n".join(strings)
+ return '\n'.join(strings)
def RunAutotestRunsInParallel(autotest_runs):
@@ -162,25 +169,23 @@ def RunAutotestRunsInParallel(autotest_runs):
last_printed_time = time.time()
while active_threads:
try:
- active_threads = [t for t in active_threads if t is not None
- and t.isAlive()]
+ active_threads = [t for t in active_threads
+ if t is not None and t.isAlive()]
for t in active_threads:
t.join(1)
if time.time() - last_printed_time > print_interval:
- border = "=============================="
+ border = '=============================='
logger.GetLogger().LogOutput(border)
- logger.GetLogger().LogOutput(GetProgressString(
- start_time,
- len([t for t in autotest_runs if t.status not in ["SUCCEEDED",
- "FAILED"]]),
- len(autotest_runs)))
+ logger.GetLogger().LogOutput(GetProgressString(start_time, len(
+ [t for t in autotest_runs if t.status not in ['SUCCEEDED', 'FAILED']
+ ]), len(autotest_runs)))
logger.GetLogger().LogOutput(GetStatusString(autotest_runs))
- logger.GetLogger().LogOutput("%s\n" %
+ logger.GetLogger().LogOutput('%s\n' %
MachineManagerSingleton().AsString())
logger.GetLogger().LogOutput(border)
last_printed_time = time.time()
except KeyboardInterrupt:
- print "C-c received... cleaning up threads."
+ print 'C-c received... cleaning up threads.'
for t in active_threads:
t.terminate = True
return 1
@@ -190,7 +195,8 @@ def RunAutotestRunsInParallel(autotest_runs):
def RunAutotestRunsSerially(autotest_runs):
for autotest_run in autotest_runs:
retval = autotest_run.Run()
- if retval: return retval
+ if retval:
+ return retval
def ProduceTables(autotest_runs, full_table, fit_string):
@@ -201,31 +207,31 @@ def ProduceTables(autotest_runs, full_table, fit_string):
if name not in ags_dict:
ags_dict[name] = AutotestGatherer()
ags_dict[name].runs.append(autotest_run)
- output = ""
+ output = ''
for b, ag in ags_dict.items():
- output += "Benchmark: %s\n" % b
+ output += 'Benchmark: %s\n' % b
output += ag.GetFormattedMainTable(percents_only=not full_table,
fit_string=fit_string)
- output += "\n"
+ output += '\n'
- summary = ""
+ summary = ''
for b, ag in ags_dict.items():
- summary += "Benchmark Summary Table: %s\n" % b
+ summary += 'Benchmark Summary Table: %s\n' % b
summary += ag.GetFormattedSummaryTable(percents_only=not full_table,
fit_string=fit_string)
- summary += "\n"
+ summary += '\n'
output += summary
- output += ("Number of re-images performed: %s" %
+ output += ('Number of re-images performed: %s' %
MachineManagerSingleton().num_reimages)
l.LogOutput(output)
if autotest_runs:
board = autotest_runs[0].board
else:
- board = ""
+ board = ''
- subject = "%s: %s" % (board, ", ".join(ags_dict.keys()))
+ subject = '%s: %s' % (board, ', '.join(ags_dict.keys()))
if any(autotest_run.run_completed for autotest_run in autotest_runs):
SendEmailToUser(subject, summary)
@@ -239,13 +245,13 @@ def SendEmailToUser(subject, text_to_send):
# you == the recipient's email address
me = os.path.basename(__file__)
you = os.getlogin()
- msg["Subject"] = "[%s] %s" % (os.path.basename(__file__), subject)
- msg["From"] = me
- msg["To"] = you
+ msg['Subject'] = '[%s] %s' % (os.path.basename(__file__), subject)
+ msg['From'] = me
+ msg['To'] = you
# Send the message via our own SMTP server, but don't include the
# envelope header.
- s = smtplib.SMTP("localhost")
+ s = smtplib.SMTP('localhost')
s.sendmail(me, [you], msg.as_string())
s.quit()
@@ -253,61 +259,70 @@ def SendEmailToUser(subject, text_to_send):
def Main(argv):
"""The main function."""
# Common initializations
-### command_executer.InitCommandExecuter(True)
+ ### command_executer.InitCommandExecuter(True)
l = logger.GetLogger()
parser = optparse.OptionParser()
- parser.add_option("-t", "--tests", dest="tests",
- help=("Tests to compare."
- "Optionally specify per-test iterations by:"
- "<test>,<iter>:<args>"))
- parser.add_option("-c", "--chromeos_root", dest="chromeos_root",
- help="A *single* chromeos_root where scripts can be found.")
- parser.add_option("-n", "--iterations", dest="iterations",
- help="Iterations to run per benchmark.",
+ parser.add_option('-t',
+ '--tests',
+ dest='tests',
+ help=('Tests to compare.'
+ 'Optionally specify per-test iterations by:'
+ '<test>,<iter>:<args>'))
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='A *single* chromeos_root where scripts can be found.')
+ parser.add_option('-n',
+ '--iterations',
+ dest='iterations',
+ help='Iterations to run per benchmark.',
default=1)
- parser.add_option("-r", "--remote", dest="remote",
- help="The remote chromeos machine.")
- parser.add_option("-b", "--board", dest="board",
- help="The remote board.")
- parser.add_option("--full_table", dest="full_table",
- help="Print full tables.",
- action="store_true",
+ parser.add_option('-r',
+ '--remote',
+ dest='remote',
+ help='The remote chromeos machine.')
+ parser.add_option('-b', '--board', dest='board', help='The remote board.')
+ parser.add_option('--full_table',
+ dest='full_table',
+ help='Print full tables.',
+ action='store_true',
default=True)
- parser.add_option("--exact_remote",
- dest="exact_remote",
- help="Run tests on the exact remote.",
- action="store_true",
+ parser.add_option('--exact_remote',
+ dest='exact_remote',
+ help='Run tests on the exact remote.',
+ action='store_true',
default=False)
- parser.add_option("--fit_string", dest="fit_string",
- help="Fit strings to fixed sizes.",
- action="store_true",
+ parser.add_option('--fit_string',
+ dest='fit_string',
+ help='Fit strings to fixed sizes.',
+ action='store_true',
default=False)
- parser.add_option("--rerun",
- dest="rerun",
- help="Re-run regardless of cache hit.",
- action="store_true",
+ parser.add_option('--rerun',
+ dest='rerun',
+ help='Re-run regardless of cache hit.',
+ action='store_true',
default=False)
- parser.add_option("--rerun_if_failed",
- dest="rerun_if_failed",
- help="Re-run if previous run was a failure.",
- action="store_true",
+ parser.add_option('--rerun_if_failed',
+ dest='rerun_if_failed',
+ help='Re-run if previous run was a failure.',
+ action='store_true',
default=False)
- parser.add_option("--no_lock",
- dest="no_lock",
- help="Do not lock the machine before running the tests.",
- action="store_true",
+ parser.add_option('--no_lock',
+ dest='no_lock',
+ help='Do not lock the machine before running the tests.',
+ action='store_true',
default=False)
- l.LogOutput(" ".join(argv))
+ l.LogOutput(' '.join(argv))
[options, args] = parser.parse_args(argv)
if options.remote is None:
- l.LogError("No remote machine specified.")
+ l.LogError('No remote machine specified.')
parser.print_help()
return 1
if not options.board:
- l.LogError("No board specified.")
+ l.LogError('No board specified.')
parser.print_help()
return 1
@@ -335,11 +350,12 @@ def Main(argv):
try:
# At this point we have all the autotest runs.
- for machine in remote.split(","):
+ for machine in remote.split(','):
MachineManagerSingleton().AddMachine(machine)
retval = RunAutotestRunsInParallel(autotest_runs)
- if retval: return retval
+ if retval:
+ return retval
# Now print tables
ProduceTables(autotest_runs, full_table, fit_string)
@@ -349,5 +365,6 @@ def Main(argv):
return 0
-if __name__ == "__main__":
+
+if __name__ == '__main__':
sys.exit(Main(sys.argv))
diff --git a/crb/machine_manager_singleton.py b/crb/machine_manager_singleton.py
index 80369216..f32e9ef6 100644
--- a/crb/machine_manager_singleton.py
+++ b/crb/machine_manager_singleton.py
@@ -8,6 +8,7 @@ from utils import logger
class CrosMachine(object):
+
def __init__(self, name):
self.name = name
self.image = None
@@ -23,7 +24,7 @@ class CrosMachine(object):
l.append(str(self.checksum))
l.append(str(self.locked))
l.append(str(self.released_time))
- return ", ".join(l)
+ return ', '.join(l)
class MachineManagerSingleton(object):
@@ -39,25 +40,26 @@ class MachineManagerSingleton(object):
def __new__(cls, *args, **kwargs):
with cls._lock:
if not cls._instance:
- cls._instance = super(MachineManagerSingleton, cls).__new__(
- cls, *args, **kwargs)
+ cls._instance = super(MachineManagerSingleton, cls).__new__(cls, *args,
+ **kwargs)
return cls._instance
def TryToLockMachine(self, cros_machine):
with self._lock:
assert cros_machine, "Machine can't be None"
for m in self._machines:
- assert m.name != cros_machine.name, (
- "Tried to double-lock %s" % cros_machine.name)
+ assert m.name != cros_machine.name, ('Tried to double-lock %s' %
+ cros_machine.name)
if self.no_lock:
locked = True
else:
locked = lock_machine.Machine(cros_machine.name).Lock(True, sys.argv[0])
if locked:
ce = command_executer.GetCommandExecuter()
- command = "cat %s" % image_chromeos.checksum_file
+ command = 'cat %s' % image_chromeos.checksum_file
ret, out, err = ce.CrosRunCommandWOutput(
- command, chromeos_root=self.chromeos_root,
+ command,
+ chromeos_root=self.chromeos_root,
machine=cros_machine.name)
if ret == 0:
cros_machine.checksum = out.strip()
@@ -70,7 +72,7 @@ class MachineManagerSingleton(object):
def AddMachine(self, machine_name):
with self._lock:
for m in self._all_machines:
- assert m.name != machine_name, "Tried to double-add %s" % machine_name
+ assert m.name != machine_name, 'Tried to double-add %s' % machine_name
self._all_machines.append(CrosMachine(machine_name))
def AcquireMachine(self, image_checksum):
@@ -79,13 +81,13 @@ class MachineManagerSingleton(object):
if not self._machines:
for m in self._all_machines:
self.TryToLockMachine(m)
- assert self._machines, (
- "Could not lock any machine in %s" % self._all_machines)
+ assert self._machines, ('Could not lock any machine in %s' %
+ self._all_machines)
-### for m in self._machines:
-### if (m.locked and time.time() - m.released_time < 10 and
-### m.checksum == image_checksum):
-### return None
+ ### for m in self._machines:
+ ### if (m.locked and time.time() - m.released_time < 10 and
+ ### m.checksum == image_checksum):
+ ### return None
for m in [machine for machine in self._machines if not machine.locked]:
if m.checksum == image_checksum:
m.locked = True
@@ -107,10 +109,10 @@ class MachineManagerSingleton(object):
with self._lock:
for m in self._machines:
if machine.name == m.name:
- assert m.locked == True, "Tried to double-release %s" % m.name
+ assert m.locked == True, 'Tried to double-release %s' % m.name
m.released_time = time.time()
m.locked = False
- m.status = "Available"
+ m.status = 'Available'
break
def __del__(self):
@@ -123,31 +125,29 @@ class MachineManagerSingleton(object):
def __str__(self):
with self._lock:
- l = ["MachineManager Status:"]
+ l = ['MachineManager Status:']
for m in self._machines:
l.append(str(m))
- return "\n".join(l)
+ return '\n'.join(l)
def AsString(self):
with self._lock:
- stringify_fmt = "%-30s %-10s %-4s %-25s %-32s"
- header = stringify_fmt % ("Machine", "Thread", "Lock", "Status", "Checksum")
+ stringify_fmt = '%-30s %-10s %-4s %-25s %-32s'
+ header = stringify_fmt % ('Machine', 'Thread', 'Lock', 'Status',
+ 'Checksum')
table = [header]
for m in self._machines:
if m.autotest_run:
autotest_name = m.autotest_run.name
autotest_status = m.autotest_run.status
else:
- autotest_name = ""
- autotest_status = ""
+ autotest_name = ''
+ autotest_status = ''
try:
- machine_string = stringify_fmt % (m.name,
- autotest_name,
- m.locked,
- autotest_status,
- m.checksum)
+ machine_string = stringify_fmt % (m.name, autotest_name, m.locked,
+ autotest_status, m.checksum)
except:
- machine_string = ""
+ machine_string = ''
table.append(machine_string)
- return "Machine Status:\n%s" % "\n".join(table)
+ return 'Machine Status:\n%s' % '\n'.join(table)
diff --git a/crb/table_formatter.py b/crb/table_formatter.py
index b3b82f09..b8e25d5f 100644
--- a/crb/table_formatter.py
+++ b/crb/table_formatter.py
@@ -1,6 +1,7 @@
import numpy
import re
+
def IsFloat(text):
if text is None:
return False
@@ -13,22 +14,22 @@ def IsFloat(text):
def RemoveTrailingZeros(x):
ret = x
- ret = re.sub("\.0*$", "", ret)
- ret = re.sub("(\.[1-9]*)0+$", "\\1", ret)
+ ret = re.sub('\.0*$', '', ret)
+ ret = re.sub('(\.[1-9]*)0+$', '\\1', ret)
return ret
def HumanizeFloat(x, n=2):
if not IsFloat(x):
return x
- digits = re.findall("[0-9.]", str(x))
+ digits = re.findall('[0-9.]', str(x))
decimal_found = False
- ret = ""
+ ret = ''
sig_figs = 0
for digit in digits:
- if digit == ".":
+ if digit == '.':
decimal_found = True
- elif sig_figs != 0 or digit != "0":
+ elif sig_figs != 0 or digit != '0':
sig_figs += 1
if decimal_found and sig_figs >= n:
break
@@ -39,23 +40,23 @@ def HumanizeFloat(x, n=2):
def GetNSigFigs(x, n=2):
if not IsFloat(x):
return x
- my_fmt = "%." + str(n-1) + "e"
+ my_fmt = '%.' + str(n - 1) + 'e'
x_string = my_fmt % x
f = float(x_string)
return f
-def GetFormattedPercent(baseline, other, bad_result="--"):
- result = "%8s" % GetPercent(baseline, other, bad_result)
+def GetFormattedPercent(baseline, other, bad_result='--'):
+ result = '%8s' % GetPercent(baseline, other, bad_result)
return result
-def GetPercent(baseline, other, bad_result="--"):
+def GetPercent(baseline, other, bad_result='--'):
result = bad_result
if IsFloat(baseline) and IsFloat(other):
try:
- pct = (float(other)/float(baseline) - 1) * 100
- result = "%+1.1f" % pct
+ pct = (float(other) / float(baseline) - 1) * 100
+ result = '%+1.1f' % pct
except ZeroDivisionError:
pass
return result
@@ -67,14 +68,15 @@ def FitString(text, length):
elif len(text) > length:
return text[-length:]
else:
- fmt = "%%%ds" % length
+ fmt = '%%%ds' % length
return fmt % text
class TableFormatter(object):
+
def __init__(self):
- self.d = "\t"
- self.bad_result = "x"
+ self.d = '\t'
+ self.bad_result = 'x'
def GetTablePercents(self, table):
# Assumes table is not transposed.
@@ -84,7 +86,7 @@ class TableFormatter(object):
for i in range(1, len(table)):
row = []
row.append(table[i][0])
- for j in range (1, len(table[0])):
+ for j in range(1, len(table[0])):
c = table[i][j]
b = table[i][1]
p = GetPercent(b, c, self.bad_result)
@@ -99,7 +101,7 @@ class TableFormatter(object):
ret = HumanizeFloat(f, 4)
ret = RemoveTrailingZeros(ret)
if len(ret) > max_length:
- ret = "%1.1ef" % f
+ ret = '%1.1ef' % f
return ret
def TransposeTable(self, table):
@@ -112,17 +114,20 @@ class TableFormatter(object):
return transposed_table
def GetTableLabels(self, table):
- ret = ""
+ ret = ''
header = table[0]
for i in range(1, len(header)):
- ret += "%d: %s\n" % (i, header[i])
+ ret += '%d: %s\n' % (i, header[i])
return ret
- def GetFormattedTable(self, table, transposed=False,
- first_column_width=30, column_width=14,
+ def GetFormattedTable(self,
+ table,
+ transposed=False,
+ first_column_width=30,
+ column_width=14,
percents_only=True,
fit_string=True):
- o = ""
+ o = ''
pct_table = self.GetTablePercents(table)
if transposed == True:
table = self.TransposeTable(table)
@@ -143,14 +148,14 @@ class TableFormatter(object):
c = self.FormatFloat(c)
if IsFloat(p) and not percents_only:
- p = "%s%%" % p
+ p = '%s%%' % p
# Print percent values side by side.
if j != 0:
if percents_only:
- c = "%s" % p
+ c = '%s' % p
else:
- c = "%s (%s)" % (c, p)
+ c = '%s (%s)' % (c, p)
if i == 0 and j != 0:
c = str(j)
@@ -159,7 +164,7 @@ class TableFormatter(object):
o += FitString(c, width) + self.d
else:
o += c + self.d
- o += "\n"
+ o += '\n'
return o
def GetGroups(self, table):
@@ -183,11 +188,11 @@ class TableFormatter(object):
labels = table[0]
- summary_labels = ["Summary Table"]
+ summary_labels = ['Summary Table']
for group in groups:
label = labels[group[0]]
stripped_label = self.GetStrippedLabel(label)
- group_label = "%s (%d runs)" % (stripped_label, len(group))
+ group_label = '%s (%d runs)' % (stripped_label, len(group))
summary_labels.append(group_label)
summary_table.append(summary_labels)
@@ -210,8 +215,8 @@ class TableFormatter(object):
def AverageWithDrops(numbers, slow_percent=20, fast_percent=20):
sorted_numbers = list(numbers)
sorted_numbers.sort()
- num_slow = int(slow_percent/100.0 * len(sorted_numbers))
- num_fast = int(fast_percent/100.0 * len(sorted_numbers))
+ num_slow = int(slow_percent / 100.0 * len(sorted_numbers))
+ num_fast = int(fast_percent / 100.0 * len(sorted_numbers))
sorted_numbers = sorted_numbers[num_slow:]
if num_fast:
sorted_numbers = sorted_numbers[:-num_fast]
@@ -219,7 +224,7 @@ class TableFormatter(object):
@staticmethod
def AggregateResults(group_results):
- ret = ""
+ ret = ''
if not group_results:
return ret
all_floats = True
@@ -228,26 +233,26 @@ class TableFormatter(object):
for group_result in group_results:
if not IsFloat(group_result):
all_floats = False
- if group_result != "PASSED":
+ if group_result != 'PASSED':
all_passes = False
- if group_result != "FAILED":
+ if group_result != 'FAILED':
all_fails = False
if all_floats == True:
float_results = [float(v) for v in group_results]
- ret = "%f" % TableFormatter.AverageWithDrops(float_results)
+ ret = '%f' % TableFormatter.AverageWithDrops(float_results)
# Add this line for standard deviation.
-### ret += " %f" % numpy.std(float_results)
+ ### ret += " %f" % numpy.std(float_results)
elif all_passes == True:
- ret = "ALL_PASS"
+ ret = 'ALL_PASS'
elif all_fails == True:
- ret = "ALL_FAILS"
+ ret = 'ALL_FAILS'
return ret
@staticmethod
def GetStrippedLabel(label):
- return re.sub("\s*\S+:\S+\s*", "", label)
+ return re.sub('\s*\S+:\S+\s*', '', label)
### return re.sub("\s*remote:\S*\s*i:\d+$", "", label)
@staticmethod
def GetLabelWithIteration(label, iteration):
- return "%s i:%d" % (label, iteration)
+ return '%s i:%d' % (label, iteration)
diff --git a/cros_login.py b/cros_login.py
index 264b4429..b510444d 100755
--- a/cros_login.py
+++ b/cros_login.py
@@ -1,12 +1,11 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to get past the login screen of ChromeOS.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import datetime
import fcntl
@@ -25,14 +24,13 @@ from utils import command_executer
LOGIN_PROMPT_VISIBLE_MAGIC_FILE = '/tmp/uptime-login-prompt-visible'
LOGGED_IN_MAGIC_FILE = '/var/run/state/logged-in'
-
-script_header="""
+script_header = """
import os
import autox
import time
"""
-wait_for_login_screen="""
+wait_for_login_screen = """
while True:
print 'Waiting for login screen to appear...'
@@ -44,8 +42,7 @@ while True:
time.sleep(20)
""" % LOGIN_PROMPT_VISIBLE_MAGIC_FILE
-
-do_login="""
+do_login = """
xauth_filename = '/home/chronos/.Xauthority'
os.environ.setdefault('XAUTHORITY', xauth_filename)
os.environ.setdefault('DISPLAY', ':0.0')
@@ -76,20 +73,20 @@ while True:
print 'Done'
""" % LOGGED_IN_MAGIC_FILE
+
def RestartUI(remote, chromeos_root, login=True):
chromeos_root = os.path.expanduser(chromeos_root)
ce = command_executer.GetCommandExecuter()
# First, restart ui.
command = 'rm -rf %s && restart ui' % LOGIN_PROMPT_VISIBLE_MAGIC_FILE
- ce.CrosRunCommand(command, machine=remote,
- chromeos_root=chromeos_root)
+ ce.CrosRunCommand(command, machine=remote, chromeos_root=chromeos_root)
host_login_script = tempfile.mktemp()
device_login_script = '/tmp/login.py'
login_script_list = [script_header, wait_for_login_screen]
if login:
login_script_list.append(do_login)
- full_login_script_contents = "\n".join(login_script_list)
+ full_login_script_contents = '\n'.join(login_script_list)
with open(host_login_script, 'w') as f:
f.write(full_login_script_contents)
@@ -123,6 +120,7 @@ def Main(argv):
return RestartUI(options.remote, options.chromeos_root)
+
if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index 7fabf0b4..91e0f7c8 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -15,9 +14,17 @@ class Benchmark(object):
arguments.
"""
- def __init__(self, name, test_name, test_args, iterations,
- rm_chroot_tmp, perf_args, suite="",
- show_all_results=False, retries=0, run_local=False):
+ def __init__(self,
+ name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite='',
+ show_all_results=False,
+ retries=0,
+ run_local=False):
self.name = name
#For telemetry, this is the benchmark name.
self.test_name = test_name
@@ -30,8 +37,8 @@ class Benchmark(object):
self.suite = suite
self.show_all_results = show_all_results
self.retries = retries
- if self.suite == "telemetry":
+ if self.suite == 'telemetry':
self.show_all_results = True
if run_local and self.suite != 'telemetry_Crosperf':
- raise Exception("run_local is only supported by telemetry_Crosperf.")
+ raise Exception('run_local is only supported by telemetry_Crosperf.')
self.run_local = run_local
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index f10326b0..fb3d6f33 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -20,23 +19,18 @@ from results_cache import Result
from results_cache import ResultsCache
from results_cache import TelemetryResult
+STATUS_FAILED = 'FAILED'
+STATUS_SUCCEEDED = 'SUCCEEDED'
+STATUS_IMAGING = 'IMAGING'
+STATUS_RUNNING = 'RUNNING'
+STATUS_WAITING = 'WAITING'
+STATUS_PENDING = 'PENDING'
-STATUS_FAILED = "FAILED"
-STATUS_SUCCEEDED = "SUCCEEDED"
-STATUS_IMAGING = "IMAGING"
-STATUS_RUNNING = "RUNNING"
-STATUS_WAITING = "WAITING"
-STATUS_PENDING = "PENDING"
class BenchmarkRun(threading.Thread):
- def __init__(self, name, benchmark,
- label,
- iteration,
- cache_conditions,
- machine_manager,
- logger_to_use,
- log_level,
- share_cache):
+
+ def __init__(self, name, benchmark, label, iteration, cache_conditions,
+ machine_manager, logger_to_use, log_level, share_cache):
threading.Thread.__init__(self)
self.name = name
self._logger = logger_to_use
@@ -54,7 +48,7 @@ class BenchmarkRun(threading.Thread):
self.cache_conditions = cache_conditions
self.runs_complete = 0
self.cache_hit = False
- self.failure_reason = ""
+ self.failure_reason = ''
self.test_args = benchmark.test_args
self.profiler_args = self._GetExtraAutotestArgs()
self._ce = command_executer.GetCommandExecuter(self._logger,
@@ -71,24 +65,13 @@ class BenchmarkRun(threading.Thread):
# Just use the first machine for running the cached version,
# without locking it.
self.cache = ResultsCache()
- self.cache.Init(self.label.chromeos_image,
- self.label.chromeos_root,
- self.benchmark.test_name,
- self.iteration,
- self.test_args,
- self.profiler_args,
- self.machine_manager,
- self.machine,
- self.label.board,
- self.cache_conditions,
- self._logger,
- self.log_level,
- self.label,
- self.share_cache,
- self.benchmark.suite,
- self.benchmark.show_all_results,
- self.benchmark.run_local
- )
+ self.cache.Init(self.label.chromeos_image, self.label.chromeos_root,
+ self.benchmark.test_name, self.iteration, self.test_args,
+ self.profiler_args, self.machine_manager, self.machine,
+ self.label.board, self.cache_conditions, self._logger,
+ self.log_level, self.label, self.share_cache,
+ self.benchmark.suite, self.benchmark.show_all_results,
+ self.benchmark.run_local)
self.result = self.cache.ReadResult()
self.cache_hit = (self.result is not None)
@@ -100,24 +83,22 @@ class BenchmarkRun(threading.Thread):
self.ReadCache()
if self.result:
- self._logger.LogOutput("%s: Cache hit." % self.name)
+ self._logger.LogOutput('%s: Cache hit.' % self.name)
self._logger.LogOutput(self.result.out, print_to_console=False)
self._logger.LogError(self.result.err, print_to_console=False)
elif self.label.cache_only:
- self._logger.LogOutput("%s: No cache hit." % self.name)
- output = "%s: No Cache hit." % self.name
+ self._logger.LogOutput('%s: No cache hit.' % self.name)
+ output = '%s: No Cache hit.' % self.name
retval = 1
- err = "No cache hit."
- self.result = Result.CreateFromRun(self._logger, self.log_level,
- self.label, self.machine,
- output, err, retval,
- self.benchmark.show_all_results,
- self.benchmark.test_name,
- self.benchmark.suite)
+ err = 'No cache hit.'
+ self.result = Result.CreateFromRun(
+ self._logger, self.log_level, self.label, self.machine, output, err,
+ retval, self.benchmark.show_all_results, self.benchmark.test_name,
+ self.benchmark.suite)
else:
- self._logger.LogOutput("%s: No cache hit." % self.name)
+ self._logger.LogOutput('%s: No cache hit.' % self.name)
self.timeline.Record(STATUS_WAITING)
# Try to acquire a machine now.
self.machine = self.AcquireMachine()
@@ -131,7 +112,7 @@ class BenchmarkRun(threading.Thread):
if self.machine and not self.label.chrome_version:
self.label.chrome_version = self.machine_manager.GetChromeVersion(
- self.machine)
+ self.machine)
if self.terminated:
return
@@ -140,7 +121,7 @@ class BenchmarkRun(threading.Thread):
self.timeline.Record(STATUS_SUCCEEDED)
else:
if self.timeline.GetLastEvent() != STATUS_FAILED:
- self.failure_reason = "Return value of test suite was non-zero."
+ self.failure_reason = 'Return value of test suite was non-zero.'
self.timeline.Record(STATUS_FAILED)
except Exception, e:
@@ -155,19 +136,19 @@ class BenchmarkRun(threading.Thread):
pass
elif self.machine:
if not self.machine.IsReachable():
- self._logger.LogOutput("Machine %s is not reachable, removing it."
- % self.machine.name)
+ self._logger.LogOutput('Machine %s is not reachable, removing it.' %
+ self.machine.name)
self.machine_manager.RemoveMachine(self.machine.name)
- self._logger.LogOutput("Releasing machine: %s" % self.machine.name)
+ self._logger.LogOutput('Releasing machine: %s' % self.machine.name)
self.machine_manager.ReleaseMachine(self.machine)
- self._logger.LogOutput("Released machine: %s" % self.machine.name)
+ self._logger.LogOutput('Released machine: %s' % self.machine.name)
def Terminate(self):
self.terminated = True
self.suite_runner.Terminate()
if self.timeline.GetLastEvent() != STATUS_FAILED:
self.timeline.Record(STATUS_FAILED)
- self.failure_reason = "Thread terminated."
+ self.failure_reason = 'Thread terminated.'
def AcquireMachine(self):
if self.owner_thread is not None:
@@ -177,14 +158,13 @@ class BenchmarkRun(threading.Thread):
while True:
machine = None
if self.terminated:
- raise Exception("Thread terminated while trying to acquire machine.")
+ raise Exception('Thread terminated while trying to acquire machine.')
machine = self.machine_manager.AcquireMachine(self.label)
if machine:
- self._logger.LogOutput("%s: Machine %s acquired at %s" %
- (self.name,
- machine.name,
+ self._logger.LogOutput('%s: Machine %s acquired at %s' %
+ (self.name, machine.name,
datetime.datetime.now()))
break
else:
@@ -193,26 +173,25 @@ class BenchmarkRun(threading.Thread):
return machine
def _GetExtraAutotestArgs(self):
- if self.benchmark.perf_args and self.benchmark.suite == "telemetry":
- self._logger.LogError("Telemetry does not support profiler.")
- self.benchmark.perf_args = ""
+ if self.benchmark.perf_args and self.benchmark.suite == 'telemetry':
+ self._logger.LogError('Telemetry does not support profiler.')
+ self.benchmark.perf_args = ''
- if self.benchmark.perf_args and self.benchmark.suite == "test_that":
- self._logger.LogError("test_that does not support profiler.")
- self.benchmark.perf_args = ""
+ if self.benchmark.perf_args and self.benchmark.suite == 'test_that':
+ self._logger.LogError('test_that does not support profiler.')
+ self.benchmark.perf_args = ''
if self.benchmark.perf_args:
- perf_args_list = self.benchmark.perf_args.split(" ")
- perf_args_list = [perf_args_list[0]] + ["-a"] + perf_args_list[1:]
- perf_args = " ".join(perf_args_list)
- if not perf_args_list[0] in ["record", "stat"]:
- raise Exception("perf_args must start with either record or stat")
- extra_test_args = ["--profiler=custom_perf",
- ("--profiler_args='perf_options=\"%s\"'" %
- perf_args)]
- return " ".join(extra_test_args)
+ perf_args_list = self.benchmark.perf_args.split(' ')
+ perf_args_list = [perf_args_list[0]] + ['-a'] + perf_args_list[1:]
+ perf_args = ' '.join(perf_args_list)
+ if not perf_args_list[0] in ['record', 'stat']:
+ raise Exception('perf_args must start with either record or stat')
+ extra_test_args = ['--profiler=custom_perf',
+ ("--profiler_args='perf_options=\"%s\"'" % perf_args)]
+ return ' '.join(extra_test_args)
else:
- return ""
+ return ''
def RunTest(self, machine):
self.timeline.Record(STATUS_IMAGING)
@@ -221,25 +200,16 @@ class BenchmarkRun(threading.Thread):
# guarenteed.
pass
else:
- self.machine_manager.ImageMachine(machine,
- self.label)
+ self.machine_manager.ImageMachine(machine, self.label)
self.timeline.Record(STATUS_RUNNING)
- [retval, out, err] = self.suite_runner.Run(machine.name,
- self.label,
- self.benchmark,
- self.test_args,
- self.profiler_args)
+ [retval, out, err] = self.suite_runner.Run(machine.name, self.label,
+ self.benchmark, self.test_args,
+ self.profiler_args)
self.run_completed = True
- return Result.CreateFromRun(self._logger,
- self.log_level,
- self.label,
- self.machine,
- out,
- err,
- retval,
+ return Result.CreateFromRun(self._logger, self.log_level, self.label,
+ self.machine, out, err, retval,
self.benchmark.show_all_results,
- self.benchmark.test_name,
- self.benchmark.suite)
+ self.benchmark.test_name, self.benchmark.suite)
def SetCacheConditions(self, cache_conditions):
self.cache_conditions = cache_conditions
@@ -257,42 +227,27 @@ class MockBenchmarkRun(BenchmarkRun):
# Just use the first machine for running the cached version,
# without locking it.
self.cache = MockResultsCache()
- self.cache.Init(self.label.chromeos_image,
- self.label.chromeos_root,
- self.benchmark.test_name,
- self.iteration,
- self.test_args,
- self.profiler_args,
- self.machine_manager,
- self.machine,
- self.label.board,
- self.cache_conditions,
- self._logger,
- self.log_level,
- self.label,
- self.share_cache,
- self.benchmark.suite,
- self.benchmark.show_all_results,
- self.benchmark.run_local
- )
+ self.cache.Init(self.label.chromeos_image, self.label.chromeos_root,
+ self.benchmark.test_name, self.iteration, self.test_args,
+ self.profiler_args, self.machine_manager, self.machine,
+ self.label.board, self.cache_conditions, self._logger,
+ self.log_level, self.label, self.share_cache,
+ self.benchmark.suite, self.benchmark.show_all_results,
+ self.benchmark.run_local)
self.result = self.cache.ReadResult()
self.cache_hit = (self.result is not None)
-
def RunTest(self, machine):
"""Remove Result.CreateFromRun for testing."""
self.timeline.Record(STATUS_IMAGING)
- self.machine_manager.ImageMachine(machine,
- self.label)
+ self.machine_manager.ImageMachine(machine, self.label)
self.timeline.Record(STATUS_RUNNING)
- [retval, out, err] = self.suite_runner.Run(machine.name,
- self.label,
- self.benchmark,
- self.test_args,
+ [retval, out, err] = self.suite_runner.Run(machine.name, self.label,
+ self.benchmark, self.test_args,
self.profiler_args)
self.run_completed = True
- rr = MockResult("logger", self.label, self.log_level, machine)
+ rr = MockResult('logger', self.label, self.log_level, machine)
rr.out = out
rr.err = err
rr.retval = retval
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 920b7d71..744f89c1 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -3,7 +3,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Testing of benchmark_run."""
import mock
@@ -28,57 +27,60 @@ from results_cache import ResultsCache
class BenchmarkRunTest(unittest.TestCase):
- """
- Unit tests for the BenchmarkRun class and all of its methods.
+ """Unit tests for the BenchmarkRun class and all of its methods.
"""
def setUp(self):
- self.test_benchmark = Benchmark("page_cycler.netsim.top_10", # name
- "page_cycler.netsim.top_10", # test_name
- "", # test_args
+ self.test_benchmark = Benchmark('page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
1, # iterations
False, # rm_chroot_tmp
- "", # perf_args
- suite="telemetry_Crosperf") # suite
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
- self.test_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos2-row1-rack4-host9.cros",
- image_args="", cache_dir="", cache_only=False,
- log_level="average", compiler="gcc")
+ self.test_label = MockLabel('test1',
+ 'image1',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos2-row1-rack4-host9.cros',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+ self.test_cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
+ CacheConditions.CHECKSUMS_MATCH]
- self.test_cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
- CacheConditions.CHECKSUMS_MATCH]
-
- self.mock_logger = logger.GetLogger(log_dir="", mock=True)
+ self.mock_logger = logger.GetLogger(log_dir='', mock=True)
self.mock_machine_manager = mock.Mock(spec=MachineManager)
def testDryRun(self):
- my_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos2-row1-rack4-host9.cros",
- image_args="", cache_dir="", cache_only=False,
- log_level="average", compiler="gcc")
-
- logging_level = "average"
- m = MockMachineManager("/tmp/chromeos_root", 0, logging_level)
- m.AddMachine("chromeos2-row1-rack4-host9.cros")
- bench = Benchmark("page_cycler.netsim.top_10", # name
- "page_cycler.netsim.top_10", # test_name
- "", # test_args
+ my_label = MockLabel('test1',
+ 'image1',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos2-row1-rack4-host9.cros',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+ logging_level = 'average'
+ m = MockMachineManager('/tmp/chromeos_root', 0, logging_level)
+ m.AddMachine('chromeos2-row1-rack4-host9.cros')
+ bench = Benchmark('page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
1, # iterations
False, # rm_chroot_tmp
- "", # perf_args
- suite="telemetry_Crosperf") # suite
- b = benchmark_run.MockBenchmarkRun("test run",
- bench,
- my_label,
- 1,
- [],
- m,
- logger.GetLogger(),
- logging_level,
- "")
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
+ b = benchmark_run.MockBenchmarkRun('test run', bench, my_label, 1, [], m,
+ logger.GetLogger(), logging_level, '')
b.cache = MockResultsCache()
b.suite_runner = MockSuiteRunner()
b.start()
@@ -90,8 +92,7 @@ class BenchmarkRunTest(unittest.TestCase):
'log_level', 'share_cache']
arg_spec = inspect.getargspec(benchmark_run.BenchmarkRun.__init__)
self.assertEqual(len(arg_spec.args), len(args_list))
- self.assertEqual (arg_spec.args, args_list)
-
+ self.assertEqual(arg_spec.args, args_list)
def test_init(self):
# Nothing really worth testing here; just field assignments.
@@ -102,64 +103,63 @@ class BenchmarkRunTest(unittest.TestCase):
pass
def test_run(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogOutput(msg, print_to_console=False):
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.log_output.append(msg)
def MockLogError(msg, print_to_console=False):
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.log_error.append(msg)
def MockRecordStatus(msg):
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.status.append(msg)
def FakeReadCache():
- "Helper function for test_run."
+ 'Helper function for test_run.'
br.cache = mock.Mock(spec=ResultsCache)
self.called_ReadCache = True
return 0
def FakeReadCacheSucceed():
- "Helper function for test_run."
+ 'Helper function for test_run.'
br.cache = mock.Mock(spec=ResultsCache)
br.result = mock.Mock(spec=Result)
- br.result.out = "result.out stuff"
- br.result.err = "result.err stuff"
+ br.result.out = 'result.out stuff'
+ br.result.err = 'result.err stuff'
br.result.retval = 0
self.called_ReadCache = True
return 0
def FakeReadCacheException():
- "Helper function for test_run."
- raise Exception("This is an exception test; it is supposed to happen")
+ 'Helper function for test_run.'
+ raise Exception('This is an exception test; it is supposed to happen')
def FakeAcquireMachine():
- "Helper function for test_run."
- mock_machine = MockCrosMachine ('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
+ 'Helper function for test_run.'
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
return mock_machine
def FakeRunTest(_machine):
- "Helper function for test_run."
+ 'Helper function for test_run.'
mock_result = mock.Mock(spec=Result)
mock_result.retval = 0
return mock_result
def FakeRunTestFail(_machine):
- "Helper function for test_run."
+ 'Helper function for test_run.'
mock_result = mock.Mock(spec=Result)
mock_result.retval = 1
return mock_result
def ResetTestValues():
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.log_output = []
self.log_error = []
self.status = []
@@ -177,53 +177,52 @@ class BenchmarkRunTest(unittest.TestCase):
# First test: No cache hit, all goes well.
ResetTestValues()
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (len(self.log_error), 0)
- self.assertEqual (self.status, ['WAITING', 'SUCCEEDED'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING', 'SUCCEEDED'])
# Second test: No cached result found; test run was "terminated" for some
# reason.
ResetTestValues()
br.terminated = True
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (len(self.log_error), 0)
- self.assertEqual (self.status, ['WAITING'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING'])
# Third test. No cached result found; RunTest failed for some reason.
ResetTestValues()
br.terminated = False
br.RunTest = FakeRunTestFail
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (len(self.log_error), 0)
- self.assertEqual (self.status, ['WAITING', 'FAILED'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING', 'FAILED'])
# Fourth test: ReadCache found a cached result.
ResetTestValues()
br.RunTest = FakeRunTest
br.ReadCache = FakeReadCacheSucceed
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: Cache hit.',
- 'result.out stuff',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (self.log_error, ['result.err stuff'])
- self.assertEqual (self.status, ['SUCCEEDED'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: Cache hit.', 'result.out stuff',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(self.log_error, ['result.err stuff'])
+ self.assertEqual(self.status, ['SUCCEEDED'])
# Fifth test: ReadCache generates an exception; does the try/finally block
# work?
@@ -231,183 +230,172 @@ class BenchmarkRunTest(unittest.TestCase):
br.ReadCache = FakeReadCacheException
br.machine = FakeAcquireMachine()
br.run()
- self.assertEqual (self.log_error,
- ["Benchmark run: 'test_run' failed: This is an exception test; it is supposed to happen"])
- self.assertEqual (self.status, ['FAILED'])
-
+ self.assertEqual(self.log_error, [
+ "Benchmark run: 'test_run' failed: This is an exception test; it is "
+ "supposed to happen"
+ ])
+ self.assertEqual(self.status, ['FAILED'])
def test_terminate_pass(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventPassed():
- "Helper function for test_terminate_pass"
+ 'Helper function for test_terminate_pass'
return benchmark_run.STATUS_SUCCEEDED
def RecordStub(status):
- "Helper function for test_terminate_pass"
+ 'Helper function for test_terminate_pass'
self.status = status
self.status = benchmark_run.STATUS_SUCCEEDED
- self.assertFalse (br.terminated)
- self.assertFalse (br.suite_runner._ct.IsTerminated())
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner._ct.IsTerminated())
br.timeline.GetLastEvent = GetLastEventPassed
br.timeline.Record = RecordStub
br.Terminate()
- self.assertTrue (br.terminated)
- self.assertTrue (br.suite_runner._ct.IsTerminated())
- self.assertEqual (self.status, benchmark_run.STATUS_FAILED)
-
-
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner._ct.IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
def test_terminate_fail(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventFailed():
- "Helper function for test_terminate_fail"
+ 'Helper function for test_terminate_fail'
return benchmark_run.STATUS_FAILED
def RecordStub(status):
- "Helper function for test_terminate_fail"
+ 'Helper function for test_terminate_fail'
self.status = status
self.status = benchmark_run.STATUS_SUCCEEDED
- self.assertFalse (br.terminated)
- self.assertFalse (br.suite_runner._ct.IsTerminated())
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner._ct.IsTerminated())
br.timeline.GetLastEvent = GetLastEventFailed
br.timeline.Record = RecordStub
br.Terminate()
- self.assertTrue (br.terminated)
- self.assertTrue (br.suite_runner._ct.IsTerminated())
- self.assertEqual (self.status, benchmark_run.STATUS_SUCCEEDED)
-
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner._ct.IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
def test_acquire_machine(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
-
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
br.terminated = True
- self.assertRaises (Exception, br.AcquireMachine)
+ self.assertRaises(Exception, br.AcquireMachine)
br.terminated = False
- mock_machine = MockCrosMachine ('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
self.mock_machine_manager.AcquireMachine.return_value = mock_machine
machine = br.AcquireMachine()
- self.assertEqual (machine.name, 'chromeos1-row3-rack5-host7.cros')
-
+ self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
def test_get_extra_autotest_args(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogError(err_msg):
- "Helper function for test_get_extra_autotest_args"
+ 'Helper function for test_get_extra_autotest_args'
self.err_msg = err_msg
self.mock_logger.LogError = MockLogError
result = br._GetExtraAutotestArgs()
- self.assertEqual(result, "")
+ self.assertEqual(result, '')
- self.test_benchmark.perf_args = "record -e cycles"
+ self.test_benchmark.perf_args = 'record -e cycles'
result = br._GetExtraAutotestArgs()
- self.assertEqual(result,
-"--profiler=custom_perf --profiler_args='perf_options=\"record -a -e cycles\"'")
+ self.assertEqual(
+ result,
+ "--profiler=custom_perf --profiler_args='perf_options=\"record -a -e "
+ "cycles\"'")
- self.test_benchmark.suite = "telemetry"
+ self.test_benchmark.suite = 'telemetry'
result = br._GetExtraAutotestArgs()
- self.assertEqual(result, "")
- self.assertEqual(self.err_msg, "Telemetry does not support profiler.")
+ self.assertEqual(result, '')
+ self.assertEqual(self.err_msg, 'Telemetry does not support profiler.')
- self.test_benchmark.perf_args = "record -e cycles"
- self.test_benchmark.suite = "test_that"
+ self.test_benchmark.perf_args = 'record -e cycles'
+ self.test_benchmark.suite = 'test_that'
result = br._GetExtraAutotestArgs()
- self.assertEqual(result, "")
- self.assertEqual(self.err_msg, "test_that does not support profiler.")
+ self.assertEqual(result, '')
+ self.assertEqual(self.err_msg, 'test_that does not support profiler.')
- self.test_benchmark.perf_args = "junk args"
- self.test_benchmark.suite = "telemetry_Crosperf"
+ self.test_benchmark.perf_args = 'junk args'
+ self.test_benchmark.suite = 'telemetry_Crosperf'
self.assertRaises(Exception, br._GetExtraAutotestArgs)
-
@mock.patch.object(SuiteRunner, 'Run')
@mock.patch.object(Result, 'CreateFromRun')
def test_run_test(self, mock_result, mock_runner):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
self.status = []
+
def MockRecord(status):
self.status.append(status)
br.timeline.Record = MockRecord
- mock_machine = MockCrosMachine ('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
- mock_runner.return_value = [0, "{'Score':100}", ""]
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
+ mock_runner.return_value = [0, "{'Score':100}", '']
br.RunTest(mock_machine)
self.assertTrue(br.run_completed)
- self.assertEqual (self.status, [ benchmark_run.STATUS_IMAGING,
- benchmark_run.STATUS_RUNNING])
-
- self.assertEqual (br.machine_manager.ImageMachine.call_count, 1)
- br.machine_manager.ImageMachine.assert_called_with (mock_machine,
- self.test_label)
- self.assertEqual (mock_runner.call_count, 1)
- mock_runner.assert_called_with (mock_machine.name, br.label,
- br.benchmark, "", br.profiler_args)
-
- self.assertEqual (mock_result.call_count, 1)
- mock_result.assert_called_with (self.mock_logger, 'average',
- self.test_label, None, "{'Score':100}",
- "", 0, False, 'page_cycler.netsim.top_10',
- 'telemetry_Crosperf')
+ self.assertEqual(self.status, [benchmark_run.STATUS_IMAGING,
+ benchmark_run.STATUS_RUNNING])
+ self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
+ br.machine_manager.ImageMachine.assert_called_with(mock_machine,
+ self.test_label)
+ self.assertEqual(mock_runner.call_count, 1)
+ mock_runner.assert_called_with(mock_machine.name, br.label, br.benchmark,
+ '', br.profiler_args)
+ self.assertEqual(mock_result.call_count, 1)
+ mock_result.assert_called_with(
+ self.mock_logger, 'average', self.test_label, None, "{'Score':100}", '',
+ 0, False, 'page_cycler.netsim.top_10', 'telemetry_Crosperf')
def test_set_cache_conditions(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
- phony_cache_conditions = [ 123, 456, True, False ]
+ phony_cache_conditions = [123, 456, True, False]
self.assertEqual(br.cache_conditions, self.test_cache_conditions)
- br.SetCacheConditions (phony_cache_conditions)
+ br.SetCacheConditions(phony_cache_conditions)
self.assertEqual(br.cache_conditions, phony_cache_conditions)
br.SetCacheConditions(self.test_cache_conditions)
self.assertEqual(br.cache_conditions, self.test_cache_conditions)
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py
index 469ac0e5..32fb721e 100755
--- a/crosperf/benchmark_unittest.py
+++ b/crosperf/benchmark_unittest.py
@@ -7,39 +7,40 @@ from benchmark import Benchmark
import unittest
+
class BenchmarkTestCase(unittest.TestCase):
def test_benchmark(self):
# Test creating a benchmark with all the fields filled out.
- b1 = Benchmark("b1_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles", # perf_args
- "telemetry_Crosperf", # suite
- True) # show_all_results
+ b1 = Benchmark('b1_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry_Crosperf', # suite
+ True) # show_all_results
# Test creating a benchmark field with default fields left out.
- b2 = Benchmark("b2_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles") # perf_args
- self.assertEqual(b2.suite, "")
+ b2 = Benchmark('b2_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles') # perf_args
+ self.assertEqual(b2.suite, '')
self.assertFalse(b2.show_all_results)
# Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
# and see what happens.
- b3 = Benchmark("b3_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles", # perf_args
- "telemetry", # suite
- False) # show_all_results
+ b3 = Benchmark('b3_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry', # suite
+ False) # show_all_results
self.assertTrue(b3.show_all_results)
# Check to see if the args to Benchmark have changed since the last time
@@ -50,7 +51,7 @@ class BenchmarkTestCase(unittest.TestCase):
arg_spec = inspect.getargspec(Benchmark.__init__)
self.assertEqual(len(arg_spec.args), len(args_list))
for arg in args_list:
- self.assertIn (arg, arg_spec.args)
+ self.assertIn(arg, arg_spec.args)
if __name__ == '__main__':
diff --git a/crosperf/column_chart.py b/crosperf/column_chart.py
index 3be0f19a..7e6821d0 100644
--- a/crosperf/column_chart.py
+++ b/crosperf/column_chart.py
@@ -1,9 +1,10 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Module to draw column chart."""
+
class ColumnChart(object):
"""class to draw column chart."""
+
def __init__(self, title, width, height):
self.title = title
self.chart_div = filter(str.isalnum, title)
@@ -26,18 +27,18 @@ class ColumnChart(object):
self.rows.append(row)
def GetJavascript(self):
- res = "var data = new google.visualization.DataTable();\n"
+ res = 'var data = new google.visualization.DataTable();\n'
for column in self.columns:
res += "data.addColumn('%s', '%s');\n" % column
- res += "data.addRows(%s);\n" % len(self.rows)
+ res += 'data.addRows(%s);\n' % len(self.rows)
for row in range(len(self.rows)):
for column in range(len(self.columns)):
val = self.rows[row][column]
if isinstance(val, str):
val = "'%s'" % val
- res += "data.setValue(%s, %s, %s);\n" % (row, column, val)
+ res += 'data.setValue(%s, %s, %s);\n' % (row, column, val)
- series_javascript = ""
+ series_javascript = ''
for series in self.series:
series_javascript += "%s: {type: '%s', color: '%s'}, " % series
@@ -48,9 +49,10 @@ chart_%s.draw(data, {width: %s, height: %s, title: '%s', legend: 'none',
seriesType: "bars", lineWidth: 0, pointSize: 5, series: {%s},
vAxis: {minValue: 0}})
"""
+
res += chart_add_javascript % (self.chart_div, self.chart_div,
- self.chart_div, self.width,
- self.height, self.title, series_javascript)
+ self.chart_div, self.width, self.height,
+ self.title, series_javascript)
return res
def GetDiv(self):
diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py
index f04fa2ed..0a61eeb9 100644
--- a/crosperf/compare_machines.py
+++ b/crosperf/compare_machines.py
@@ -1,7 +1,6 @@
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Module to compare two machines."""
from __future__ import print_function
@@ -12,30 +11,31 @@ import argparse
from machine_manager import CrosMachine
+
def PrintUsage(msg):
print(msg)
- print("Usage: ")
- print("\n compare_machines.py --chromeos_root=/path/to/chroot/ "
- "machine1 machine2 ...")
+ print('Usage: ')
+ print('\n compare_machines.py --chromeos_root=/path/to/chroot/ '
+ 'machine1 machine2 ...')
def Main(argv):
parser = argparse.ArgumentParser()
- parser.add_argument("--chromeos_root", default="/path/to/chromeos",
- dest="chromeos_root",
- help="ChromeOS root checkout directory")
- parser.add_argument("remotes", nargs=argparse.REMAINDER)
+ parser.add_argument('--chromeos_root',
+ default='/path/to/chromeos',
+ dest='chromeos_root',
+ help='ChromeOS root checkout directory')
+ parser.add_argument('remotes', nargs=argparse.REMAINDER)
options = parser.parse_args(argv)
machine_list = options.remotes
if len(machine_list) < 2:
- PrintUsage("ERROR: Must specify at least two machines.")
+ PrintUsage('ERROR: Must specify at least two machines.')
return 1
elif not os.path.exists(options.chromeos_root):
- PrintUsage("Error: chromeos_root does not exist %s" %
- options.chromeos_root)
+ PrintUsage('Error: chromeos_root does not exist %s' % options.chromeos_root)
return 1
chroot = options.chromeos_root
@@ -48,13 +48,13 @@ def Main(argv):
ret = 0
for cm in cros_machines:
- print("checksum for %s : %s" % (cm.name, cm.machine_checksum))
+ print('checksum for %s : %s' % (cm.name, cm.machine_checksum))
if cm.machine_checksum != test_machine_checksum:
ret = 1
- print("Machine checksums do not all match")
+ print('Machine checksums do not all match')
if ret == 0:
- print("Machines all match.")
+ print('Machines all match.')
return ret
diff --git a/crosperf/config.py b/crosperf/config.py
index 75f88256..58e053c8 100644
--- a/crosperf/config.py
+++ b/crosperf/config.py
@@ -1,5 +1,4 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""A configure file."""
config = {}
diff --git a/crosperf/config_unittest.py b/crosperf/config_unittest.py
index 098ea7c7..397f2c2c 100755
--- a/crosperf/config_unittest.py
+++ b/crosperf/config_unittest.py
@@ -6,6 +6,7 @@ import config
import unittest
+
class ConfigTestCase(unittest.TestCase):
def test_config(self):
diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py
index 21553909..28e78f5b 100755
--- a/crosperf/crosperf.py
+++ b/crosperf/crosperf.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""The driver script for running performance benchmarks on ChromeOS."""
import atexit
@@ -21,32 +20,33 @@ import test_flag
class MyIndentedHelpFormatter(optparse.IndentedHelpFormatter):
+
def format_description(self, description):
return description
def SetupParserOptions(parser):
"""Add all options to the parser."""
- parser.add_option("--dry_run",
- dest="dry_run",
- help=("Parse the experiment file and "
- "show what will be done"),
- action="store_true",
+ parser.add_option('--dry_run',
+ dest='dry_run',
+ help=('Parse the experiment file and '
+ 'show what will be done'),
+ action='store_true',
default=False)
# Allow each of the global fields to be overridden by passing in
# options. Add each global field as an option.
- option_settings = GlobalSettings("")
+ option_settings = GlobalSettings('')
for field_name in option_settings.fields:
field = option_settings.fields[field_name]
- parser.add_option("--%s" % field.name,
+ parser.add_option('--%s' % field.name,
dest=field.name,
help=field.description,
- action="store")
+ action='store')
def ConvertOptionsToSettings(options):
"""Convert options passed in into global settings."""
- option_settings = GlobalSettings("option_settings")
+ option_settings = GlobalSettings('option_settings')
for option_name in options.__dict__:
if (options.__dict__[option_name] is not None and
option_name in option_settings.fields):
@@ -73,18 +73,19 @@ def Main(argv):
parser = optparse.OptionParser(usage=Help().GetUsage(),
description=Help().GetHelp(),
formatter=MyIndentedHelpFormatter(),
- version="%prog 3.0")
+ version='%prog 3.0')
- parser.add_option("--noschedv2",
- dest="noschedv2",
+ parser.add_option('--noschedv2',
+ dest='noschedv2',
default=False,
- action="store_true",
- help=("Do not use new scheduler. "
- "Use original scheduler instead."))
- parser.add_option("-l", "--log_dir",
- dest="log_dir",
- default="",
- help="The log_dir, default is under <crosperf_logs>/logs")
+ action='store_true',
+ help=('Do not use new scheduler. '
+ 'Use original scheduler instead.'))
+ parser.add_option('-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under <crosperf_logs>/logs')
SetupParserOptions(parser)
options, args = parser.parse_args(argv)
@@ -98,22 +99,21 @@ def Main(argv):
if len(args) == 2:
experiment_filename = args[1]
else:
- parser.error("Invalid number arguments.")
+ parser.error('Invalid number arguments.')
working_directory = os.getcwd()
if options.dry_run:
test_flag.SetTestMode(True)
- experiment_file = ExperimentFile(open(experiment_filename, "rb"),
- option_settings)
- if not experiment_file.GetGlobalSettings().GetField("name"):
+ experiment_file = ExperimentFile(
+ open(experiment_filename, 'rb'), option_settings)
+ if not experiment_file.GetGlobalSettings().GetField('name'):
experiment_name = os.path.basename(experiment_filename)
- experiment_file.GetGlobalSettings().SetField("name", experiment_name)
+ experiment_file.GetGlobalSettings().SetField('name', experiment_name)
experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory,
- log_dir)
+ working_directory, log_dir)
- json_report = experiment_file.GetGlobalSettings().GetField("json_report")
+ json_report = experiment_file.GetGlobalSettings().GetField('json_report')
signal.signal(signal.SIGTERM, CallExitHandler)
atexit.register(Cleanup, experiment)
@@ -121,10 +121,12 @@ def Main(argv):
if options.dry_run:
runner = MockExperimentRunner(experiment, json_report)
else:
- runner = ExperimentRunner(experiment, json_report,
+ runner = ExperimentRunner(experiment,
+ json_report,
using_schedv2=(not options.noschedv2))
runner.Run()
-if __name__ == "__main__":
+
+if __name__ == '__main__':
Main(sys.argv)
diff --git a/crosperf/crosperf_test.py b/crosperf/crosperf_test.py
index 0c50e7b5..09aefcb6 100755
--- a/crosperf/crosperf_test.py
+++ b/crosperf/crosperf_test.py
@@ -8,7 +8,6 @@ import unittest
import crosperf
from utils.file_utils import FileUtils
-
EXPERIMENT_FILE_1 = """
board: x86-alex
remote: chromeos-alex3
@@ -28,13 +27,14 @@ EXPERIMENT_FILE_1 = """
class CrosPerfTest(unittest.TestCase):
+
def testDryRun(self):
filehandle, filename = tempfile.mkstemp()
os.write(filehandle, EXPERIMENT_FILE_1)
- crosperf.Main(["", filename, "--dry_run"])
+ crosperf.Main(['', filename, '--dry_run'])
os.remove(filename)
-if __name__ == "__main__":
+if __name__ == '__main__':
FileUtils.Configure(True)
unittest.main()
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 082d8a6c..42a78ef8 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -1,14 +1,12 @@
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
-
"""Unittest for crosperf."""
import atexit
import os
import optparse
import StringIO
-
import mock
import unittest
@@ -40,6 +38,7 @@ EXPERIMENT_FILE_1 = """
}
"""
+
class CrosperfTest(unittest.TestCase):
def setUp(self):
@@ -51,27 +50,28 @@ class CrosperfTest(unittest.TestCase):
parser = optparse.OptionParser(usage=Help().GetUsage(),
description=Help().GetHelp(),
formatter=crosperf.MyIndentedHelpFormatter(),
- version="%prog 3.0")
- parser.add_option("-l", "--log_dir",
- dest="log_dir",
- default="",
- help="The log_dir, default is under <crosperf_logs>/logs")
+ version='%prog 3.0')
+ parser.add_option('-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under <crosperf_logs>/logs')
options_before = parser._get_all_options()
self.assertEqual(len(options_before), 3)
crosperf.SetupParserOptions(parser)
options_after = parser._get_all_options()
self.assertEqual(len(options_after), 29)
-
def test_convert_options_to_settings(self):
parser = optparse.OptionParser(usage=Help().GetUsage(),
description=Help().GetHelp(),
formatter=crosperf.MyIndentedHelpFormatter(),
- version="%prog 3.0")
- parser.add_option("-l", "--log_dir",
- dest="log_dir",
- default="",
- help="The log_dir, default is under <crosperf_logs>/logs")
+ version='%prog 3.0')
+ parser.add_option('-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under <crosperf_logs>/logs')
crosperf.SetupParserOptions(parser)
argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
options, args = parser.parse_args(argv)
@@ -86,5 +86,5 @@ class CrosperfTest(unittest.TestCase):
self.assertFalse(settings.GetField('rerun'))
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/download_images.py b/crosperf/download_images.py
index 8fecf8b3..55fda51b 100644
--- a/crosperf/download_images.py
+++ b/crosperf/download_images.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (c) 2014, 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -11,27 +10,29 @@ import test_flag
from utils import command_executer
+
class MissingImage(Exception):
"""Raised when the requested image does not exist in gs://"""
+
class ImageDownloader(object):
- def __init__(self, logger_to_use=None, log_level="verbose",
- cmd_exec=None):
+ def __init__(self, logger_to_use=None, log_level='verbose', cmd_exec=None):
self._logger = logger_to_use
self.log_level = log_level
- self._ce = cmd_exec or command_executer.GetCommandExecuter(self._logger,
- log_level = self.log_level)
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger,
+ log_level=self.log_level)
- def _GetBuildID (self, chromeos_root, xbuddy_label):
+ def _GetBuildID(self, chromeos_root, xbuddy_label):
# Get the translation of the xbuddy_label into the real Google Storage
# image name.
- command = ("cd ~/trunk/src/third_party/toolchain-utils/crosperf; "
+ command = ('cd ~/trunk/src/third_party/toolchain-utils/crosperf; '
"python translate_xbuddy.py '%s'" % xbuddy_label)
retval, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(
chromeos_root, command)
if not build_id_tuple_str:
- raise MissingImage ("Unable to find image for '%s'" % xbuddy_label)
+ raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
build_id_tuple = ast.literal_eval(build_id_tuple_str)
build_id = build_id_tuple[0]
@@ -39,14 +40,13 @@ class ImageDownloader(object):
return build_id
def _DownloadImage(self, chromeos_root, build_id, image_name):
- if self.log_level == "average":
- self._logger.LogOutput ("Preparing to download %s image to local "
- "directory." % build_id)
+ if self.log_level == 'average':
+ self._logger.LogOutput('Preparing to download %s image to local '
+ 'directory.' % build_id)
# Make sure the directory for downloading the image exists.
- download_path = os.path.join(chromeos_root, "chroot/tmp",
- build_id)
- image_path = os.path.join(download_path, "chromiumos_test_image.bin")
+ download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
+ image_path = os.path.join(download_path, 'chromiumos_test_image.bin')
if not os.path.exists(download_path):
os.makedirs(download_path)
@@ -54,10 +54,10 @@ class ImageDownloader(object):
# download the image.
status = 0
if not os.path.exists(image_path):
- command = "gsutil cp %s /tmp/%s" % (image_name, build_id)
+ command = 'gsutil cp %s /tmp/%s' % (image_name, build_id)
- if self.log_level != "verbose":
- self._logger.LogOutput ("CMD: %s" % command)
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
status = self._ce.ChrootRunCommand(chromeos_root, command)
if status == 0:
@@ -67,34 +67,33 @@ class ImageDownloader(object):
def _UncompressImage(self, chromeos_root, build_id):
# Check to see if the file has already been uncompresssed, etc.
- if os.path.exists(os.path.join(chromeos_root, "chroot/tmp", build_id,
- "chromiumos_test_image.bin")):
+ if os.path.exists(os.path.join(chromeos_root, 'chroot/tmp', build_id,
+ 'chromiumos_test_image.bin')):
return 0
# Uncompress and untar the downloaded image.
- command = ("cd /tmp/%s ;unxz chromiumos_test_image.tar.xz; "
- "tar -xvf chromiumos_test_image.tar" % build_id)
- if self.log_level != "verbose":
- self._logger.LogOutput("CMD: %s" % command)
- print("(Uncompressing and un-tarring may take a couple of minutes..."
- "please be patient.)")
+ command = ('cd /tmp/%s ;unxz chromiumos_test_image.tar.xz; '
+ 'tar -xvf chromiumos_test_image.tar' % build_id)
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
+ print('(Uncompressing and un-tarring may take a couple of minutes...'
+ 'please be patient.)')
retval = self._ce.ChrootRunCommand(chromeos_root, command)
return retval
-
def Run(self, chromeos_root, xbuddy_label):
build_id = self._GetBuildID(chromeos_root, xbuddy_label)
- image_name = ("gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz"
+ image_name = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz'
% build_id)
# Verify that image exists for build_id, before attempting to
# download it.
status = 0
if not test_flag.GetTestMode():
- cmd = "gsutil ls %s" % image_name
+ cmd = 'gsutil ls %s' % image_name
status = self._ce.ChrootRunCommand(chromeos_root, cmd)
if status != 0:
- raise MissingImage("Cannot find official image: %s." % image_name)
+ raise MissingImage('Cannot find official image: %s.' % image_name)
image_path = self._DownloadImage(chromeos_root, build_id, image_name)
retval = 0
if image_path:
@@ -102,7 +101,7 @@ class ImageDownloader(object):
else:
retval = 1
- if retval == 0 and self.log_level != "quiet":
- self._logger.LogOutput("Using image from %s." % image_path)
+ if retval == 0 and self.log_level != 'quiet':
+ self._logger.LogOutput('Using image from %s.' % image_path)
return retval, image_path
diff --git a/crosperf/download_images_buildid_test.py b/crosperf/download_images_buildid_test.py
index bfa61006..e3352f8e 100755
--- a/crosperf/download_images_buildid_test.py
+++ b/crosperf/download_images_buildid_test.py
@@ -7,7 +7,6 @@ import sys
import download_images
-
#On May 1, 2014:
#latest : lumpy-release/R34-5500.132.0
#latest-beta : lumpy-release/R35-5712.43.0
@@ -20,90 +19,89 @@ class ImageDownloaderBuildIDTest(object):
def __init__(self):
parser = optparse.OptionParser()
- parser.add_option("-c", "--chromeos_root", dest="chromeos_root",
- help="Directory containing ChromeOS root.")
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='Directory containing ChromeOS root.')
options = parser.parse_args(sys.argv[1:])[0]
if options.chromeos_root is None:
- self._usage(parser, "--chromeos_root must be set")
+ self._usage(parser, '--chromeos_root must be set')
self.chromeos_root = options.chromeos_root
self.tests_passed = 0
self.tests_run = 0
self.tests_failed = 0
def _usage(self, parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
def print_test_status(self):
- print "----------------------------------------\n"
- print "Tests attempted: %d" % self.tests_run
- print "Tests passed: %d" % self.tests_passed
- print "Tests failed: %d" % self.tests_failed
- print "\n----------------------------------------"
-
- def assert_failure (self, msg):
- print "Assert failure: %s" % msg
+ print '----------------------------------------\n'
+ print 'Tests attempted: %d' % self.tests_run
+ print 'Tests passed: %d' % self.tests_passed
+ print 'Tests failed: %d' % self.tests_failed
+ print '\n----------------------------------------'
+
+ def assert_failure(self, msg):
+ print 'Assert failure: %s' % msg
self.print_test_status()
sys.exit(1)
-
def assertIsNotNone(self, arg, arg_name):
if arg == None:
self.tests_failed = self.tests_failed + 1
- self.assert_failure ("%s is not None" % arg_name)
-
+ self.assert_failure('%s is not None' % arg_name)
def assertNotEqual(self, arg1, arg2, arg1_name, arg2_name):
if arg1 == arg2:
self.tests_failed = self.tests_failed + 1
- self.assert_failure ("%s is not NotEqual to %s" % (arg1_name, arg2_name))
+ self.assert_failure('%s is not NotEqual to %s' % (arg1_name, arg2_name))
def assertEqual(self, arg1, arg2, arg1_name, arg2_name):
if arg1 != arg2:
self.tests_failed = self.tests_failed + 1
- self.assert_failure ("%s is not Equal to %s" % (arg1_name, arg2_name))
+ self.assert_failure('%s is not Equal to %s' % (arg1_name, arg2_name))
-
- def test_one_id (self, downloader, test_id, result_string, exact_match):
+ def test_one_id(self, downloader, test_id, result_string, exact_match):
print "Translating '%s'" % test_id
self.tests_run = self.tests_run + 1
result = downloader._GetBuildID(self.chromeos_root, test_id)
# Verify that we got a build id back.
- self.assertIsNotNone(result, "result")
+ self.assertIsNotNone(result, 'result')
# Verify that the result either contains or exactly matches the
# result_string, depending on the exact_match argument.
if exact_match:
- self.assertEqual (result, result_string, "result", result_string)
+ self.assertEqual(result, result_string, 'result', result_string)
else:
- self.assertNotEqual (result.find(result_string), -1, "result.find", "-1")
+ self.assertNotEqual(result.find(result_string), -1, 'result.find', '-1')
self.tests_passed = self.tests_passed + 1
def test_get_build_id(self):
+ """Test that the actual translating of xbuddy names is working properly.
"""
- Test that the actual translating of xbuddy names is working properly.
- """
- downloader = download_images.ImageDownloader(log_level="quiet")
-
- self.test_one_id (downloader, "remote/lumpy/latest-dev", "lumpy-release/R",
- False)
- self.test_one_id (downloader,
- "remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86",
- "trybot-lumpy-release-afdo-use/R35-5672.0.0-b86", True)
- self.test_one_id (downloader, "remote/lumpy-release/R35-5672.0.0",
- "lumpy-release/R35-5672.0.0", True)
- self.test_one_id (downloader, "remote/lumpy/latest-dev", "lumpy-release/R",
- False)
- self.test_one_id (downloader, "remote/lumpy/latest-official",
- "lumpy-release/R", False)
- self.test_one_id (downloader, "remote/lumpy/latest-beta", "lumpy-release/R",
- False)
+ downloader = download_images.ImageDownloader(log_level='quiet')
+
+ self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R',
+ False)
+ self.test_one_id(downloader,
+ 'remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86',
+ 'trybot-lumpy-release-afdo-use/R35-5672.0.0-b86', True)
+ self.test_one_id(downloader, 'remote/lumpy-release/R35-5672.0.0',
+ 'lumpy-release/R35-5672.0.0', True)
+ self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R',
+ False)
+ self.test_one_id(downloader, 'remote/lumpy/latest-official',
+ 'lumpy-release/R', False)
+ self.test_one_id(downloader, 'remote/lumpy/latest-beta', 'lumpy-release/R',
+ False)
self.print_test_status()
+
if __name__ == '__main__':
tester = ImageDownloaderBuildIDTest()
tester.test_get_build_id()
diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py
index 9ca40a81..237369b7 100755
--- a/crosperf/download_images_unittest.py
+++ b/crosperf/download_images_unittest.py
@@ -12,10 +12,10 @@ from cros_utils import logger
import test_flag
-MOCK_LOGGER = logger.GetLogger(log_dir="", mock=True)
+MOCK_LOGGER = logger.GetLogger(log_dir='', mock=True)
-class ImageDownloaderTestcast(unittest.TestCase):
+class ImageDownloaderTestcast(unittest.TestCase):
@mock.patch.object(os, 'makedirs')
@mock.patch.object(os.path, 'exists')
@@ -23,9 +23,9 @@ class ImageDownloaderTestcast(unittest.TestCase):
# Set mock and test values.
mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- test_chroot = "/usr/local/home/chromeos"
- test_build_id = "lumpy-release/R36-5814.0.0"
- image_path = ("gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz"
+ test_chroot = '/usr/local/home/chromeos'
+ test_build_id = 'lumpy-release/R36-5814.0.0'
+ image_path = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz'
% test_build_id)
downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER,
@@ -38,16 +38,23 @@ class ImageDownloaderTestcast(unittest.TestCase):
# Verify os.path.exists was called twice, with proper arguments.
self.assertEqual(mock_path_exists.call_count, 2)
- mock_path_exists.assert_called_with('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
- mock_path_exists.assert_any_call('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
+ mock_path_exists.assert_any_call(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
# Verify we called os.mkdirs
self.assertEqual(mock_mkdirs.call_count, 1)
- mock_mkdirs.assert_called_with('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+ mock_mkdirs.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
# Verify we called ChrootRunCommand once, with proper arguments.
- self.assertEqual (mock_cmd_exec.ChrootRunCommand.call_count, 1)
- mock_cmd_exec.ChrootRunCommand.assert_called_with('/usr/local/home/chromeos', 'gsutil cp gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/chromiumos_test_image.tar.xz /tmp/lumpy-release/R36-5814.0.0')
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 1)
+ mock_cmd_exec.ChrootRunCommand.assert_called_with(
+ '/usr/local/home/chromeos',
+ 'gsutil cp '
+ 'gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/chromiumos_test_image.tar.xz'
+ ' /tmp/lumpy-release/R36-5814.0.0')
# Reset the velues in the mocks; set os.path.exists to always return True.
mock_path_exists.reset_mock()
@@ -55,19 +62,19 @@ class ImageDownloaderTestcast(unittest.TestCase):
mock_path_exists.return_value = True
# Run downloader
- downloader._DownloadImage(test_chroot, test_build_id,image_path)
+ downloader._DownloadImage(test_chroot, test_build_id, image_path)
# Verify os.path.exists was called twice, with proper arguments.
self.assertEqual(mock_path_exists.call_count, 2)
- mock_path_exists.assert_called_with('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
- mock_path_exists.assert_any_call('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
+ mock_path_exists.assert_any_call(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
# Verify we made no RunCommand or ChrootRunCommand calls (since
# os.path.exists returned True, there was no work do be done).
- self.assertEqual (mock_cmd_exec.RunCommand.call_count, 0)
- self.assertEqual (mock_cmd_exec.ChrootRunCommand.call_count, 0)
-
-
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0)
@mock.patch.object(os.path, 'exists')
def test_uncompress_image(self, mock_path_exists):
@@ -85,12 +92,16 @@ class ImageDownloaderTestcast(unittest.TestCase):
downloader._UncompressImage(test_chroot, test_build_id)
# Verify os.path.exists was called once, with correct arguments.
- self.assertEqual (mock_path_exists.call_count, 1)
- mock_path_exists.assert_called_with('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
+ self.assertEqual(mock_path_exists.call_count, 1)
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
# Verify ChrootRunCommand was called, with correct arguments.
- self.assertEqual (mock_cmd_exec.ChrootRunCommand.call_count, 1)
- mock_cmd_exec.ChrootRunCommand.assert_called_with('/usr/local/home/chromeos', 'cd /tmp/lumpy-release/R36-5814.0.0 ;unxz chromiumos_test_image.tar.xz; tar -xvf chromiumos_test_image.tar')
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 1)
+ mock_cmd_exec.ChrootRunCommand.assert_called_with(
+ '/usr/local/home/chromeos',
+ 'cd /tmp/lumpy-release/R36-5814.0.0 ;unxz '
+ 'chromiumos_test_image.tar.xz; tar -xvf chromiumos_test_image.tar')
# Set os.path.exists to always return False and run uncompress.
mock_path_exists.reset_mock()
@@ -99,19 +110,18 @@ class ImageDownloaderTestcast(unittest.TestCase):
downloader._UncompressImage(test_chroot, test_build_id)
# Verify os.path.exists was called once, with correct arguments.
- self.assertEqual (mock_path_exists.call_count, 1)
- mock_path_exists.assert_called_with('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
+ self.assertEqual(mock_path_exists.call_count, 1)
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
# Verify ChrootRunCommand was not called.
- self.assertEqual (mock_cmd_exec.ChrootRunCommand.call_count, 0)
-
-
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0)
def test_run(self):
# Set test arguments
- test_chroot = "/usr/local/home/chromeos"
- test_build_id = "remote/lumpy/latest-dev"
+ test_chroot = '/usr/local/home/chromeos'
+ test_build_id = 'remote/lumpy/latest-dev'
# Set values to test/check.
self.called_download_image = False
@@ -125,7 +135,7 @@ class ImageDownloaderTestcast(unittest.TestCase):
def GoodDownloadImage(root, build_id, image_path):
self.called_download_image = True
- return "chromiumos_test_image.bin"
+ return 'chromiumos_test_image.bin'
def BadDownloadImage(root, build_id, image_path):
self.called_download_image = True
@@ -147,8 +157,8 @@ class ImageDownloaderTestcast(unittest.TestCase):
downloader.Run(test_chroot, test_build_id)
# Make sure it called both _DownloadImage and _UncompressImage
- self.assertTrue (self.called_download_image)
- self.assertTrue (self.called_uncompress_image)
+ self.assertTrue(self.called_download_image)
+ self.assertTrue(self.called_uncompress_image)
# Reset values; Now use fake stub that simulates DownloadImage failing.
self.called_download_image = False
@@ -156,11 +166,11 @@ class ImageDownloaderTestcast(unittest.TestCase):
downloader._DownloadImage = BadDownloadImage
# Call Run again.
- downloader.Run (test_chroot, test_build_id)
+ downloader.Run(test_chroot, test_build_id)
# Verify that UncompressImage was not called, since _DownloadImage "failed"
- self.assertTrue (self.called_download_image)
- self.assertFalse (self.called_uncompress_image)
+ self.assertTrue(self.called_download_image)
+ self.assertFalse(self.called_uncompress_image)
if __name__ == '__main__':
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index 0926193b..de172cb1 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The experiment setting module."""
from __future__ import print_function
@@ -21,13 +20,14 @@ from machine_manager import MachineManager
from machine_manager import MockMachineManager
import test_flag
+
class Experiment(object):
"""Class representing an Experiment to be run."""
- def __init__(self, name, remote, working_directory,
- chromeos_root, cache_conditions, labels, benchmarks,
- experiment_file, email_to, acquire_timeout, log_dir,
- log_level, share_cache, results_directory, locks_directory):
+ def __init__(self, name, remote, working_directory, chromeos_root,
+ cache_conditions, labels, benchmarks, experiment_file, email_to,
+ acquire_timeout, log_dir, log_level, share_cache,
+ results_directory, locks_directory):
self.name = name
self.working_directory = working_directory
self.remote = remote
@@ -37,7 +37,7 @@ class Experiment(object):
self.email_to = email_to
if not results_directory:
self.results_directory = os.path.join(self.working_directory,
- self.name + "_results")
+ self.name + '_results')
else:
self.results_directory = misc.CanonicalizePath(results_directory)
self.log_dir = log_dir
@@ -54,11 +54,11 @@ class Experiment(object):
self.locked_machines = []
if not remote:
- raise RuntimeError("No remote hosts specified")
+ raise RuntimeError('No remote hosts specified')
if not self.benchmarks:
- raise RuntimeError("No benchmarks specified")
+ raise RuntimeError('No benchmarks specified')
if not self.labels:
- raise RuntimeError("No labels specified")
+ raise RuntimeError('No labels specified')
# We need one chromeos_root to run the benchmarks in, but it doesn't
# matter where it is, unless the ABIs are different.
@@ -68,8 +68,8 @@ class Experiment(object):
chromeos_root = label.chromeos_root
break
if not chromeos_root:
- raise RuntimeError("No chromeos_root given and could not determine "
- "one from the image path.")
+ raise RuntimeError('No chromeos_root given and could not determine '
+ 'one from the image path.')
if test_flag.GetTestMode():
self.machine_manager = MockMachineManager(chromeos_root, acquire_timeout,
@@ -86,7 +86,7 @@ class Experiment(object):
# machines. This is a subset of self.remote. We make both lists the same.
self.remote = [m.name for m in self.machine_manager._all_machines]
if not self.remote:
- raise RuntimeError("No machine available for running experiment.")
+ raise RuntimeError('No machine available for running experiment.')
for label in labels:
# We filter out label remotes that are not reachable (not in
@@ -121,22 +121,15 @@ class Experiment(object):
for benchmark in self.benchmarks:
for iteration in range(1, benchmark.iterations + 1):
- benchmark_run_name = "%s: %s (%s)" % (label.name, benchmark.name,
+ benchmark_run_name = '%s: %s (%s)' % (label.name, benchmark.name,
iteration)
- full_name = "%s_%s_%s" % (label.name, benchmark.name, iteration)
- logger_to_use = logger.Logger(self.log_dir,
- "run.%s" % (full_name),
+ full_name = '%s_%s_%s' % (label.name, benchmark.name, iteration)
+ logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
True)
benchmark_runs.append(benchmark_run.BenchmarkRun(
- benchmark_run_name,
- benchmark,
- label,
- iteration,
- self.cache_conditions,
- self.machine_manager,
- logger_to_use,
- self.log_level,
- self.share_cache))
+ benchmark_run_name, benchmark, label, iteration,
+ self.cache_conditions, self.machine_manager, logger_to_use,
+ self.log_level, self.share_cache))
return benchmark_runs
@@ -211,10 +204,9 @@ class Experiment(object):
return
# If we locked any machines earlier, make sure we unlock them now.
- lock_mgr = afe_lock_machine.AFELockManager(all_machines, "",
- self.labels[0].chromeos_root,
- None)
- machine_states = lock_mgr.GetMachineStates("unlock")
+ lock_mgr = afe_lock_machine.AFELockManager(
+ all_machines, '', self.labels[0].chromeos_root, None)
+ machine_states = lock_mgr.GetMachineStates('unlock')
for k, state in machine_states.iteritems():
- if state["locked"]:
+ if state['locked']:
lock_mgr.UpdateLockInAFE(False, k)
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index bd31d78f..24508c9d 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""A module to generate experiments."""
from __future__ import print_function
@@ -29,8 +28,7 @@ telemetry_perfv2_tests = ['dromaeo.domcoreattr',
'kraken',
'octane',
'robohornet_pro',
- 'sunspider',
- ]
+ 'sunspider']
telemetry_pagecycler_tests = ['page_cycler.intl_ar_fa_he',
'page_cycler.intl_es_fr_pt-BR',
@@ -41,8 +39,7 @@ telemetry_pagecycler_tests = ['page_cycler.intl_ar_fa_he',
'page_cycler.moz',
'page_cycler.netsim.top_10',
'page_cycler.tough_layout_cases',
- 'page_cycler.typical_25',
- ]
+ 'page_cycler.typical_25']
telemetry_toolchain_old_perf_tests = ['dromaeo.domcoremodify',
'page_cycler.intl_es_fr_pt-BR',
@@ -53,8 +50,7 @@ telemetry_toolchain_old_perf_tests = ['dromaeo.domcoremodify',
'page_cycler.typical_25',
'robohornet_pro',
'spaceport',
- 'tab_switching.top_10',
- ]
+ 'tab_switching.top_10']
telemetry_toolchain_perf_tests = ['octane',
'kraken',
'speedometer',
@@ -62,8 +58,8 @@ telemetry_toolchain_perf_tests = ['octane',
'dromaeo.domcoremodify',
'smoothness.tough_webgl_cases',
'page_cycler.typical_25',
- 'media.tough_video_cases',
- ]
+ 'media.tough_video_cases']
+
class ExperimentFactory(object):
"""Factory class for building an Experiment, given an ExperimentFile as input.
@@ -78,19 +74,17 @@ class ExperimentFactory(object):
show_all_results, retries, run_local):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(test_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries,
- run_local)
+ telemetry_benchmark = Benchmark(
+ test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
+ suite, show_all_results, retries, run_local)
benchmarks.append(telemetry_benchmark)
-
def GetExperiment(self, experiment_file, working_directory, log_dir):
"""Construct an experiment from an experiment file."""
global_settings = experiment_file.GetGlobalSettings()
- experiment_name = global_settings.GetField("name")
- board = global_settings.GetField("board")
- remote = global_settings.GetField("remote")
+ experiment_name = global_settings.GetField('name')
+ board = global_settings.GetField('board')
+ remote = global_settings.GetField('remote')
# This is used to remove the ",' from the remote if user
# add them to the remote string.
new_remote = []
@@ -99,56 +93,56 @@ class ExperimentFactory(object):
c = re.sub('["\']', '', i)
new_remote.append(c)
remote = new_remote
- chromeos_root = global_settings.GetField("chromeos_root")
- rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
- perf_args = global_settings.GetField("perf_args")
- acquire_timeout = global_settings.GetField("acquire_timeout")
- cache_dir = global_settings.GetField("cache_dir")
- cache_only = global_settings.GetField("cache_only")
- config.AddConfig("no_email", global_settings.GetField("no_email"))
- share_cache = global_settings.GetField("share_cache")
- results_dir = global_settings.GetField("results_dir")
- use_file_locks = global_settings.GetField("use_file_locks")
- locks_dir = global_settings.GetField("locks_dir")
+ chromeos_root = global_settings.GetField('chromeos_root')
+ rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
+ perf_args = global_settings.GetField('perf_args')
+ acquire_timeout = global_settings.GetField('acquire_timeout')
+ cache_dir = global_settings.GetField('cache_dir')
+ cache_only = global_settings.GetField('cache_only')
+ config.AddConfig('no_email', global_settings.GetField('no_email'))
+ share_cache = global_settings.GetField('share_cache')
+ results_dir = global_settings.GetField('results_dir')
+ use_file_locks = global_settings.GetField('use_file_locks')
+ locks_dir = global_settings.GetField('locks_dir')
# If we pass a blank locks_dir to the Experiment, it will use the AFE server
# lock mechanism. So if the user specified use_file_locks, but did not
# specify a locks dir, set the locks dir to the default locks dir in
# file_lock_machine.
if use_file_locks and not locks_dir:
locks_dir = file_lock_machine.Machine.LOCKS_DIR
- chrome_src = global_settings.GetField("chrome_src")
- show_all_results = global_settings.GetField("show_all_results")
- log_level = global_settings.GetField("logging_level")
- if log_level not in ("quiet", "average", "verbose"):
- log_level = "verbose"
+ chrome_src = global_settings.GetField('chrome_src')
+ show_all_results = global_settings.GetField('show_all_results')
+ log_level = global_settings.GetField('logging_level')
+ if log_level not in ('quiet', 'average', 'verbose'):
+ log_level = 'verbose'
# Default cache hit conditions. The image checksum in the cache and the
# computed checksum of the image must match. Also a cache file must exist.
cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
CacheConditions.CHECKSUMS_MATCH]
- if global_settings.GetField("rerun_if_failed"):
+ if global_settings.GetField('rerun_if_failed'):
cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
- if global_settings.GetField("rerun"):
+ if global_settings.GetField('rerun'):
cache_conditions.append(CacheConditions.FALSE)
- if global_settings.GetField("same_machine"):
+ if global_settings.GetField('same_machine'):
cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
- if global_settings.GetField("same_specs"):
+ if global_settings.GetField('same_specs'):
cache_conditions.append(CacheConditions.MACHINES_MATCH)
# Construct benchmarks.
# Some fields are common with global settings. The values are
# inherited and/or merged with the global settings values.
benchmarks = []
- all_benchmark_settings = experiment_file.GetSettings("benchmark")
+ all_benchmark_settings = experiment_file.GetSettings('benchmark')
for benchmark_settings in all_benchmark_settings:
benchmark_name = benchmark_settings.name
- test_name = benchmark_settings.GetField("test_name")
+ test_name = benchmark_settings.GetField('test_name')
if not test_name:
test_name = benchmark_name
- test_args = benchmark_settings.GetField("test_args")
- iterations = benchmark_settings.GetField("iterations")
- suite = benchmark_settings.GetField("suite")
- retries = benchmark_settings.GetField("retries")
- run_local = benchmark_settings.GetField("run_local")
+ test_args = benchmark_settings.GetField('test_args')
+ iterations = benchmark_settings.GetField('iterations')
+ suite = benchmark_settings.GetField('suite')
+ retries = benchmark_settings.GetField('retries')
+ run_local = benchmark_settings.GetField('run_local')
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
@@ -168,71 +162,81 @@ class ExperimentFactory(object):
run_local)
# Add non-telemetry toolchain-perf benchmarks:
benchmarks.append(Benchmark('graphics_WebGLAquarium',
- 'graphics_WebGLAquarium', '', iterations,
- rm_chroot_tmp, perf_args, '',
- show_all_results, retries,
+ 'graphics_WebGLAquarium',
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
run_local=False))
elif test_name == 'all_toolchain_perf_old':
- self._AppendBenchmarkSet(benchmarks,
- telemetry_toolchain_old_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local)
+ self._AppendBenchmarkSet(
+ benchmarks, telemetry_toolchain_old_perf_tests, test_args,
+ iterations, rm_chroot_tmp, perf_args, suite, show_all_results,
+ retries, run_local)
else:
- benchmark = Benchmark(test_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
+ benchmark = Benchmark(test_name, test_name, test_args, iterations,
+ rm_chroot_tmp, perf_args, suite,
show_all_results, retries, run_local)
benchmarks.append(benchmark)
else:
# Add the single benchmark.
- benchmark = Benchmark(benchmark_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local=False)
+ benchmark = Benchmark(benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local=False)
benchmarks.append(benchmark)
if not benchmarks:
- raise RuntimeError("No benchmarks specified")
+ raise RuntimeError('No benchmarks specified')
# Construct labels.
# Some fields are common with global settings. The values are
# inherited and/or merged with the global settings values.
labels = []
- all_label_settings = experiment_file.GetSettings("label")
+ all_label_settings = experiment_file.GetSettings('label')
all_remote = list(remote)
for label_settings in all_label_settings:
label_name = label_settings.name
- image = label_settings.GetField("chromeos_image")
- chromeos_root = label_settings.GetField("chromeos_root")
- my_remote = label_settings.GetField("remote")
- compiler = label_settings.GetField("compiler")
+ image = label_settings.GetField('chromeos_image')
+ chromeos_root = label_settings.GetField('chromeos_root')
+ my_remote = label_settings.GetField('remote')
+ compiler = label_settings.GetField('compiler')
new_remote = []
if my_remote:
for i in my_remote:
c = re.sub('["\']', '', i)
new_remote.append(c)
my_remote = new_remote
- if image == "":
- build = label_settings.GetField("build")
+ if image == '':
+ build = label_settings.GetField('build')
if len(build) == 0:
raise RuntimeError("Can not have empty 'build' field!")
image = label_settings.GetXbuddyPath(build, board, chromeos_root,
log_level)
- cache_dir = label_settings.GetField("cache_dir")
- chrome_src = label_settings.GetField("chrome_src")
+ cache_dir = label_settings.GetField('cache_dir')
+ chrome_src = label_settings.GetField('chrome_src')
- # TODO(yunlian): We should consolidate code in machine_manager.py
- # to derermine whether we are running from within google or not
- if ("corp.google.com" in socket.gethostname() and
- (not my_remote
- or my_remote == remote
- and global_settings.GetField("board") != board)):
+ # TODO(yunlian): We should consolidate code in machine_manager.py
+ # to derermine whether we are running from within google or not
+ if ('corp.google.com' in socket.gethostname() and
+ (not my_remote or my_remote == remote and
+ global_settings.GetField('board') != board)):
my_remote = self.GetDefaultRemotes(board)
- if global_settings.GetField("same_machine") and len(my_remote) > 1:
- raise RuntimeError("Only one remote is allowed when same_machine "
- "is turned on")
+ if global_settings.GetField('same_machine') and len(my_remote) > 1:
+ raise RuntimeError('Only one remote is allowed when same_machine '
+ 'is turned on')
all_remote += my_remote
- image_args = label_settings.GetField("image_args")
+ image_args = label_settings.GetField('image_args')
if test_flag.GetTestMode():
# pylint: disable=too-many-function-args
label = MockLabel(label_name, image, chromeos_root, board, my_remote,
@@ -245,37 +249,35 @@ class ExperimentFactory(object):
labels.append(label)
if not labels:
- raise RuntimeError("No labels specified")
+ raise RuntimeError('No labels specified')
- email = global_settings.GetField("email")
+ email = global_settings.GetField('email')
all_remote += list(set(my_remote))
all_remote = list(set(all_remote))
- experiment = Experiment(experiment_name, all_remote,
- working_directory, chromeos_root,
- cache_conditions, labels, benchmarks,
- experiment_file.Canonicalize(),
- email, acquire_timeout, log_dir, log_level,
- share_cache,
+ experiment = Experiment(experiment_name, all_remote, working_directory,
+ chromeos_root, cache_conditions, labels, benchmarks,
+ experiment_file.Canonicalize(), email,
+ acquire_timeout, log_dir, log_level, share_cache,
results_dir, locks_dir)
return experiment
def GetDefaultRemotes(self, board):
- default_remotes_file = os.path.join(os.path.dirname(__file__),
- "default_remotes")
+ default_remotes_file = os.path.join(
+ os.path.dirname(__file__), 'default_remotes')
try:
with open(default_remotes_file) as f:
for line in f:
- key, v = line.split(":")
+ key, v = line.split(':')
if key.strip() == board:
- remotes = v.strip().split(" ")
+ remotes = v.strip().split(' ')
if remotes:
return remotes
else:
- raise RuntimeError("There is no remote for {0}".format(board))
+ raise RuntimeError('There is no remote for {0}'.format(board))
except IOError:
# TODO: rethrow instead of throwing different exception.
- raise RuntimeError("IOError while reading file {0}"
+ raise RuntimeError('IOError while reading file {0}'
.format(default_remotes_file))
else:
- raise RuntimeError("There is not remote for {0}".format(board))
+ raise RuntimeError('There is not remote for {0}'.format(board))
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 148b7e4b..97561008 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -45,171 +45,162 @@ class ExperimentFactoryTest(unittest.TestCase):
def testLoadExperimentFile1(self):
experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory="",
- log_dir="")
- self.assertEqual(experiment.remote, ["chromeos-alex3"])
+ working_directory='',
+ log_dir='')
+ self.assertEqual(experiment.remote, ['chromeos-alex3'])
self.assertEqual(len(experiment.benchmarks), 1)
- self.assertEqual(experiment.benchmarks[0].name, "PageCycler")
- self.assertEqual(experiment.benchmarks[0].test_name, "PageCycler")
+ self.assertEqual(experiment.benchmarks[0].name, 'PageCycler')
+ self.assertEqual(experiment.benchmarks[0].test_name, 'PageCycler')
self.assertEqual(experiment.benchmarks[0].iterations, 3)
self.assertEqual(len(experiment.labels), 2)
self.assertEqual(experiment.labels[0].chromeos_image,
- "/usr/local/google/cros_image1.bin")
- self.assertEqual(experiment.labels[0].board,
- "x86-alex")
-
-
+ '/usr/local/google/cros_image1.bin')
+ self.assertEqual(experiment.labels[0].board, 'x86-alex')
def test_append_benchmark_set(self):
ef = ExperimentFactory()
bench_list = []
ef._AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_perfv2_tests,
- "", 1, False, "", "telemetry_Crosperf", False, 0,
- False)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_perfv2_tests))
+ experiment_factory.telemetry_perfv2_tests, '', 1,
+ False, '', 'telemetry_Crosperf', False, 0, False)
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_perfv2_tests))
self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
bench_list = []
ef._AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_pagecycler_tests,
- "", 1, False, "", "telemetry_Crosperf", False, 0,
- False)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_pagecycler_tests))
+ experiment_factory.telemetry_pagecycler_tests, '', 1,
+ False, '', 'telemetry_Crosperf', False, 0, False)
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_pagecycler_tests))
self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
bench_list = []
- ef._AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_toolchain_perf_tests,
- "", 1, False, "", "telemetry_Crosperf", False, 0,
- False)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_toolchain_perf_tests))
+ ef._AppendBenchmarkSet(
+ bench_list, experiment_factory.telemetry_toolchain_perf_tests, '', 1,
+ False, '', 'telemetry_Crosperf', False, 0, False)
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_toolchain_perf_tests))
self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
-
-
@mock.patch.object(socket, 'gethostname')
@mock.patch.object(machine_manager.MachineManager, 'AddMachine')
def test_get_experiment(self, mock_machine_manager, mock_socket):
test_flag.SetTestMode(False)
self.append_benchmark_call_args = []
+
def FakeAppendBenchmarkSet(bench_list, set_list, args, iters, rm_ch,
perf_args, suite, show_all):
- "Helper function for test_get_experiment"
+ 'Helper function for test_get_experiment'
arg_list = [bench_list, set_list, args, iters, rm_ch, perf_args, suite,
show_all]
self.append_benchmark_call_args.append(args_list)
def FakeGetDefaultRemotes(board):
- return ["fake_chromeos_machine1.cros",
- "fake_chromeos_machine2.cros"]
+ return ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros']
def FakeGetXbuddyPath(build, board, chroot, log_level):
- return "fake_image_path"
+ return 'fake_image_path'
ef = ExperimentFactory()
ef._AppendBenchmarkSet = FakeAppendBenchmarkSet
ef.GetDefaultRemotes = FakeGetDefaultRemotes
- label_settings = settings_factory.LabelSettings("image_label")
- benchmark_settings = settings_factory.BenchmarkSettings("bench_test")
- global_settings = settings_factory.GlobalSettings("test_name")
+ label_settings = settings_factory.LabelSettings('image_label')
+ benchmark_settings = settings_factory.BenchmarkSettings('bench_test')
+ global_settings = settings_factory.GlobalSettings('test_name')
label_settings.GetXbuddyPath = FakeGetXbuddyPath
- mock_experiment_file = ExperimentFile(StringIO.StringIO(""))
+ mock_experiment_file = ExperimentFile(StringIO.StringIO(''))
mock_experiment_file.all_settings = []
test_flag.SetTestMode(True)
# Basic test.
- global_settings.SetField("name","unittest_test")
- global_settings.SetField("board", "lumpy")
- global_settings.SetField("remote", "123.45.67.89 123.45.76.80")
- benchmark_settings.SetField("test_name", "kraken")
- benchmark_settings.SetField("suite", "telemetry_Crosperf")
- benchmark_settings.SetField("iterations", 1)
- label_settings.SetField("chromeos_image", "chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin")
- label_settings.SetField("chrome_src", "/usr/local/google/home/chrome-top")
-
+ global_settings.SetField('name', 'unittest_test')
+ global_settings.SetField('board', 'lumpy')
+ global_settings.SetField('remote', '123.45.67.89 123.45.76.80')
+ benchmark_settings.SetField('test_name', 'kraken')
+ benchmark_settings.SetField('suite', 'telemetry_Crosperf')
+ benchmark_settings.SetField('iterations', 1)
+ label_settings.SetField(
+ 'chromeos_image',
+ 'chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin')
+ label_settings.SetField('chrome_src', '/usr/local/google/home/chrome-top')
mock_experiment_file.global_settings = global_settings
- mock_experiment_file.all_settings.append (label_settings)
- mock_experiment_file.all_settings.append (benchmark_settings)
- mock_experiment_file.all_settings.append (global_settings)
+ mock_experiment_file.all_settings.append(label_settings)
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ mock_experiment_file.all_settings.append(global_settings)
- mock_socket.return_value = ""
+ mock_socket.return_value = ''
# First test. General test.
- exp = ef.GetExperiment(mock_experiment_file, "", "")
- self.assertEqual(exp.remote, ["123.45.67.89", "123.45.76.80"])
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(exp.remote, ['123.45.67.89', '123.45.76.80'])
self.assertEqual(exp.cache_conditions, [0, 2, 1])
- self.assertEqual(exp.log_level, "average")
+ self.assertEqual(exp.log_level, 'average')
self.assertEqual(len(exp.benchmarks), 1)
- self.assertEqual(exp.benchmarks[0].name, "kraken")
- self.assertEqual(exp.benchmarks[0].test_name, "kraken")
+ self.assertEqual(exp.benchmarks[0].name, 'kraken')
+ self.assertEqual(exp.benchmarks[0].test_name, 'kraken')
self.assertEqual(exp.benchmarks[0].iterations, 1)
- self.assertEqual(exp.benchmarks[0].suite, "telemetry_Crosperf")
+ self.assertEqual(exp.benchmarks[0].suite, 'telemetry_Crosperf')
self.assertFalse(exp.benchmarks[0].show_all_results)
self.assertEqual(len(exp.labels), 1)
self.assertEqual(exp.labels[0].chromeos_image,
- "chromeos/src/build/images/lumpy/latest/"
- "chromiumos_test_image.bin")
- self.assertEqual(exp.labels[0].board, "lumpy")
+ 'chromeos/src/build/images/lumpy/latest/'
+ 'chromiumos_test_image.bin')
+ self.assertEqual(exp.labels[0].board, 'lumpy')
# Second test: Remotes listed in labels.
test_flag.SetTestMode(True)
- label_settings.SetField("remote", "chromeos1.cros chromeos2.cros")
- exp = ef.GetExperiment(mock_experiment_file, "", "")
- self.assertEqual(exp.remote,
- ["chromeos1.cros", "chromeos2.cros", "123.45.67.89",
- "123.45.76.80", ])
+ label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros')
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(exp.remote, ['chromeos1.cros',
+ 'chromeos2.cros',
+ '123.45.67.89',
+ '123.45.76.80'])
# Third test: Automatic fixing of bad logging_level param:
- global_settings.SetField("logging_level", "really loud!")
- exp = ef.GetExperiment(mock_experiment_file, "", "")
- self.assertEqual(exp.log_level, "verbose")
+ global_settings.SetField('logging_level', 'really loud!')
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(exp.log_level, 'verbose')
# Fourth test: Setting cache conditions; only 1 remote with "same_machine"
- global_settings.SetField("rerun_if_failed", "true")
- global_settings.SetField("rerun", "true")
- global_settings.SetField("same_machine", "true")
- global_settings.SetField("same_specs", "true")
-
- self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, "",
- "")
- label_settings.SetField("remote", "")
- global_settings.SetField("remote", "123.45.67.89")
- exp = ef.GetExperiment(mock_experiment_file, "", "")
+ global_settings.SetField('rerun_if_failed', 'true')
+ global_settings.SetField('rerun', 'true')
+ global_settings.SetField('same_machine', 'true')
+ global_settings.SetField('same_specs', 'true')
+
+ self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '', '')
+ label_settings.SetField('remote', '')
+ global_settings.SetField('remote', '123.45.67.89')
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1])
# Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all
# remotes (Call GetDefaultRemotes).
- mock_socket.return_value = "test.corp.google.com"
- global_settings.SetField("remote", "")
- global_settings.SetField("same_machine", "false")
+ mock_socket.return_value = 'test.corp.google.com'
+ global_settings.SetField('remote', '')
+ global_settings.SetField('same_machine', 'false')
- label_settings_2 = settings_factory.LabelSettings("official_image_label")
- label_settings_2.SetField("chromeos_root", "chromeos")
- label_settings_2.SetField("build", "official-dev")
+ label_settings_2 = settings_factory.LabelSettings('official_image_label')
+ label_settings_2.SetField('chromeos_root', 'chromeos')
+ label_settings_2.SetField('build', 'official-dev')
label_settings_2.GetXbuddyPath = FakeGetXbuddyPath
- mock_experiment_file.all_settings.append (label_settings_2)
- exp = ef.GetExperiment(mock_experiment_file, "", "")
+ mock_experiment_file.all_settings.append(label_settings_2)
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
self.assertEqual(len(exp.labels), 2)
- self.assertEqual(exp.labels[1].chromeos_image, "fake_image_path")
- self.assertEqual(exp.remote, ["fake_chromeos_machine1.cros",
- "fake_chromeos_machine2.cros"])
-
-
+ self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path')
+ self.assertEqual(exp.remote, ['fake_chromeos_machine1.cros',
+ 'fake_chromeos_machine2.cros'])
def test_get_default_remotes(self):
board_list = ['x86-alex', 'lumpy', 'stumpy', 'parrot', 'daisy', 'peach_pit',
@@ -224,7 +215,8 @@ class ExperimentFactoryTest(unittest.TestCase):
remotes = ef.GetDefaultRemotes(b)
self.assertEqual(len(remotes), 3)
-if __name__ == "__main__":
+
+if __name__ == '__main__':
FileUtils.Configure(True)
test_flag.SetTestMode(True)
unittest.main()
diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py
index 3cb46dcc..7967855b 100644
--- a/crosperf/experiment_file.py
+++ b/crosperf/experiment_file.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -31,11 +30,11 @@ class ExperimentFile(object):
"""
# Field regex, e.g. "iterations: 3"
- _FIELD_VALUE_RE = re.compile(r"(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)")
+ _FIELD_VALUE_RE = re.compile(r'(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)')
# Open settings regex, e.g. "label {"
- _OPEN_SETTINGS_RE = re.compile(r"(?:([\w.-]+):)?\s*([\w.-]+)\s*{")
+ _OPEN_SETTINGS_RE = re.compile(r'(?:([\w.-]+):)?\s*([\w.-]+)\s*{')
# Close settings regex.
- _CLOSE_SETTINGS_RE = re.compile(r"}")
+ _CLOSE_SETTINGS_RE = re.compile(r'}')
def __init__(self, experiment_file, overrides=None):
"""Construct object from file-like experiment_file.
@@ -48,7 +47,7 @@ class ExperimentFile(object):
Exception: if invalid build type or description is invalid.
"""
self.all_settings = []
- self.global_settings = SettingsFactory().GetSettings("global", "global")
+ self.global_settings = SettingsFactory().GetSettings('global', 'global')
self.all_settings.append(self.global_settings)
self._Parse(experiment_file)
@@ -84,7 +83,7 @@ class ExperimentFile(object):
match = ExperimentFile._OPEN_SETTINGS_RE.match(line)
settings_type = match.group(1)
if settings_type is None:
- settings_type = ""
+ settings_type = ''
settings_name = match.group(2)
settings = SettingsFactory().GetSettings(settings_name, settings_type)
settings.SetParentSettings(self.global_settings)
@@ -100,7 +99,7 @@ class ExperimentFile(object):
elif ExperimentFile._CLOSE_SETTINGS_RE.match(line):
return settings
- raise Exception("Unexpected EOF while parsing settings block.")
+ raise Exception('Unexpected EOF while parsing settings block.')
def _Parse(self, experiment_file):
"""Parse experiment file and create settings."""
@@ -123,44 +122,44 @@ class ExperimentFile(object):
field = self._ParseField(reader)
self.global_settings.SetField(field[0], field[1], field[2])
else:
- raise Exception("Unexpected line.")
+ raise Exception('Unexpected line.')
except Exception, err:
- raise Exception("Line %d: %s\n==> %s" % (reader.LineNo(), str(err),
+ raise Exception('Line %d: %s\n==> %s' % (reader.LineNo(), str(err),
reader.CurrentLine(False)))
def Canonicalize(self):
"""Convert parsed experiment file back into an experiment file."""
- res = ""
- board = ""
+ res = ''
+ board = ''
for field_name in self.global_settings.fields:
field = self.global_settings.fields[field_name]
if field.assigned:
- res += "%s: %s\n" % (field.name, field.GetString())
- if field.name == "board":
+ res += '%s: %s\n' % (field.name, field.GetString())
+ if field.name == 'board':
board = field.GetString()
- res += "\n"
+ res += '\n'
for settings in self.all_settings:
- if settings.settings_type != "global":
- res += "%s: %s {\n" % (settings.settings_type, settings.name)
+ if settings.settings_type != 'global':
+ res += '%s: %s {\n' % (settings.settings_type, settings.name)
for field_name in settings.fields:
field = settings.fields[field_name]
if field.assigned:
- res += "\t%s: %s\n" % (field.name, field.GetString())
- if field.name == "chromeos_image":
- real_file = (os.path.realpath
- (os.path.expanduser(field.GetString())))
+ res += '\t%s: %s\n' % (field.name, field.GetString())
+ if field.name == 'chromeos_image':
+ real_file = (
+ os.path.realpath(os.path.expanduser(field.GetString())))
if real_file != field.GetString():
- res += "\t#actual_image: %s\n" % real_file
- if field.name == "build":
- chromeos_root_field = settings.fields["chromeos_root"]
+ res += '\t#actual_image: %s\n' % real_file
+ if field.name == 'build':
+ chromeos_root_field = settings.fields['chromeos_root']
if chromeos_root_field:
chromeos_root = chromeos_root_field.GetString()
value = field.GetString()
- xbuddy_path = settings.GetXbuddyPath (value, board, chromeos_root,
- "quiet")
- res += "\t#actual_image: %s\n" % xbuddy_path
- res += "}\n\n"
+ xbuddy_path = settings.GetXbuddyPath(value, board, chromeos_root,
+ 'quiet')
+ res += '\t#actual_image: %s\n' % xbuddy_path
+ res += '}\n\n'
return res
@@ -187,8 +186,8 @@ class ExperimentFileReader(object):
def _StripComment(self, line):
"""Strip comments starting with # from a line."""
- if "#" in line:
- line = line[:line.find("#")] + line[-1]
+ if '#' in line:
+ line = line[:line.find('#')] + line[-1]
return line
def LineNo(self):
diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py
index 1adba31e..97779410 100755
--- a/crosperf/experiment_file_unittest.py
+++ b/crosperf/experiment_file_unittest.py
@@ -64,7 +64,7 @@ EXPERIMENT_FILE_3 = """
}
"""
-OUTPUT_FILE="""board: x86-alex
+OUTPUT_FILE = """board: x86-alex
remote: chromeos-alex3
perf_args: record -a -e cycles
@@ -82,39 +82,41 @@ label: image2 {
\tchromeos_image: /usr/local/google/cros_image2.bin
}\n\n"""
+
class ExperimentFileTest(unittest.TestCase):
+
def testLoadExperimentFile1(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
experiment_file = ExperimentFile(input_file)
global_settings = experiment_file.GetGlobalSettings()
- self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"])
- self.assertEqual(global_settings.GetField("perf_args"),
- "record -a -e cycles")
- benchmark_settings = experiment_file.GetSettings("benchmark")
+ self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
+ self.assertEqual(
+ global_settings.GetField('perf_args'), 'record -a -e cycles')
+ benchmark_settings = experiment_file.GetSettings('benchmark')
self.assertEqual(len(benchmark_settings), 1)
- self.assertEqual(benchmark_settings[0].name, "PageCycler")
- self.assertEqual(benchmark_settings[0].GetField("iterations"), 3)
+ self.assertEqual(benchmark_settings[0].name, 'PageCycler')
+ self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
- label_settings = experiment_file.GetSettings("label")
+ label_settings = experiment_file.GetSettings('label')
self.assertEqual(len(label_settings), 2)
- self.assertEqual(label_settings[0].name, "image1")
- self.assertEqual(label_settings[0].GetField("chromeos_image"),
- "/usr/local/google/cros_image1.bin")
- self.assertEqual(label_settings[1].GetField("remote"), ["chromeos-lumpy1"])
- self.assertEqual(label_settings[0].GetField("remote"), ["chromeos-alex3"])
+ self.assertEqual(label_settings[0].name, 'image1')
+ self.assertEqual(label_settings[0].GetField('chromeos_image'),
+ '/usr/local/google/cros_image1.bin')
+ self.assertEqual(label_settings[1].GetField('remote'), ['chromeos-lumpy1'])
+ self.assertEqual(label_settings[0].GetField('remote'), ['chromeos-alex3'])
def testOverrideSetting(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_2)
experiment_file = ExperimentFile(input_file)
global_settings = experiment_file.GetGlobalSettings()
- self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"])
+ self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
- benchmark_settings = experiment_file.GetSettings("benchmark")
+ benchmark_settings = experiment_file.GetSettings('benchmark')
self.assertEqual(len(benchmark_settings), 2)
- self.assertEqual(benchmark_settings[0].name, "PageCycler")
- self.assertEqual(benchmark_settings[0].GetField("iterations"), 3)
- self.assertEqual(benchmark_settings[1].name, "AndroidBench")
- self.assertEqual(benchmark_settings[1].GetField("iterations"), 2)
+ self.assertEqual(benchmark_settings[0].name, 'PageCycler')
+ self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
+ self.assertEqual(benchmark_settings[1].name, 'AndroidBench')
+ self.assertEqual(benchmark_settings[1].GetField('iterations'), 2)
def testDuplicateLabel(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_3)
@@ -126,5 +128,6 @@ class ExperimentFileTest(unittest.TestCase):
res = experiment_file.Canonicalize()
self.assertEqual(res, OUTPUT_FILE)
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 2f1e8668..2a654e69 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -1,5 +1,4 @@
# Copyright 2011-2015 Google Inc. All Rights Reserved.
-
"""The experiment runner module."""
import getpass
import os
@@ -30,7 +29,11 @@ class ExperimentRunner(object):
STATUS_TIME_DELAY = 30
THREAD_MONITOR_DELAY = 2
- def __init__(self, experiment, json_report, using_schedv2=False, log=None,
+ def __init__(self,
+ experiment,
+ json_report,
+ using_schedv2=False,
+ log=None,
cmd_exec=None):
self._experiment = experiment
self.l = log or logger.GetLogger(experiment.log_dir)
@@ -38,7 +41,7 @@ class ExperimentRunner(object):
self._terminated = False
self.json_report = json_report
self.locked_machines = []
- if experiment.log_level != "verbose":
+ if experiment.log_level != 'verbose':
self.STATUS_TIME_DELAY = 10
# Setting this to True will use crosperf sched v2 (feature in progress).
@@ -89,23 +92,22 @@ class ExperimentRunner(object):
else:
lock_mgr = afe_lock_machine.AFELockManager(
self._GetMachineList(),
- "",
+ '',
experiment.labels[0].chromeos_root,
None,
- log=self.l,
- )
+ log=self.l,)
for m in lock_mgr.machines:
if not lock_mgr.MachineIsKnown(m):
lock_mgr.AddLocalMachine(m)
- machine_states = lock_mgr.GetMachineStates("lock")
- lock_mgr.CheckMachineLocks(machine_states, "lock")
+ machine_states = lock_mgr.GetMachineStates('lock')
+ lock_mgr.CheckMachineLocks(machine_states, 'lock')
self.locked_machines = lock_mgr.UpdateMachines(True)
self._experiment.locked_machines = self.locked_machines
self._UpdateMachineList(self.locked_machines)
self._experiment.machine_manager.RemoveNonLockedMachines(
self.locked_machines)
if len(self.locked_machines) == 0:
- raise RuntimeError("Unable to lock any machines.")
+ raise RuntimeError('Unable to lock any machines.')
def _UnlockAllMachines(self, experiment):
"""Attempt to globally unlock all of the machines requested for run.
@@ -118,13 +120,12 @@ class ExperimentRunner(object):
lock_mgr = afe_lock_machine.AFELockManager(
self.locked_machines,
- "",
+ '',
experiment.labels[0].chromeos_root,
None,
- log=self.l,
- )
- machine_states = lock_mgr.GetMachineStates("unlock")
- lock_mgr.CheckMachineLocks(machine_states, "unlock")
+ log=self.l,)
+ machine_states = lock_mgr.GetMachineStates('unlock')
+ lock_mgr.CheckMachineLocks(machine_states, 'unlock')
lock_mgr.UpdateMachines(False)
def _ClearCacheEntries(self, experiment):
@@ -138,7 +139,7 @@ class ExperimentRunner(object):
br.benchmark.show_all_results, br.benchmark.run_local)
cache_dir = cache._GetCacheDirForWrite()
if os.path.exists(cache_dir):
- self.l.LogOutput("Removing cache dir: %s" % cache_dir)
+ self.l.LogOutput('Removing cache dir: %s' % cache_dir)
shutil.rmtree(cache_dir)
def _Run(self, experiment):
@@ -153,15 +154,15 @@ class ExperimentRunner(object):
status = ExperimentStatus(experiment)
experiment.Run()
last_status_time = 0
- last_status_string = ""
+ last_status_string = ''
try:
- if experiment.log_level != "verbose":
+ if experiment.log_level != 'verbose':
self.l.LogStartDots()
while not experiment.IsComplete():
if last_status_time + self.STATUS_TIME_DELAY < time.time():
last_status_time = time.time()
- border = "=============================="
- if experiment.log_level == "verbose":
+ border = '=============================='
+ if experiment.log_level == 'verbose':
self.l.LogOutput(border)
self.l.LogOutput(status.GetProgressString())
self.l.LogOutput(status.GetStatusString())
@@ -179,12 +180,12 @@ class ExperimentRunner(object):
time.sleep(self.THREAD_MONITOR_DELAY)
except KeyboardInterrupt:
self._terminated = True
- self.l.LogError("Ctrl-c pressed. Cleaning up...")
+ self.l.LogError('Ctrl-c pressed. Cleaning up...')
experiment.Terminate()
raise
except SystemExit:
self._terminated = True
- self.l.LogError("Unexpected exit. Cleaning up...")
+ self.l.LogError('Unexpected exit. Cleaning up...')
experiment.Terminate()
raise
finally:
@@ -201,28 +202,28 @@ class ExperimentRunner(object):
if not benchmark_run.cache_hit:
send_mail = True
break
- if (not send_mail and not experiment.email_to
- or config.GetConfig("no_email")):
+ if (not send_mail and not experiment.email_to or
+ config.GetConfig('no_email')):
return
label_names = []
for label in experiment.labels:
label_names.append(label.name)
- subject = "%s: %s" % (experiment.name, " vs. ".join(label_names))
+ subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
text_report = TextResultsReport(experiment, True).GetReport()
- text_report += ("\nResults are stored in %s.\n" %
+ text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
html_report = HTMLResultsReport(experiment).GetReport()
- attachment = EmailSender.Attachment("report.html", html_report)
+ attachment = EmailSender.Attachment('report.html', html_report)
email_to = experiment.email_to or []
email_to.append(getpass.getuser())
EmailSender().SendEmail(email_to,
subject,
text_report,
attachments=[attachment],
- msg_type="html")
+ msg_type='html')
def _StoreResults(self, experiment):
if self._terminated:
@@ -230,32 +231,30 @@ class ExperimentRunner(object):
results_directory = experiment.results_directory
FileUtils().RmDir(results_directory)
FileUtils().MkDirP(results_directory)
- self.l.LogOutput("Storing experiment file in %s." % results_directory)
- experiment_file_path = os.path.join(results_directory,
- "experiment.exp")
+ self.l.LogOutput('Storing experiment file in %s.' % results_directory)
+ experiment_file_path = os.path.join(results_directory, 'experiment.exp')
FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)
- self.l.LogOutput("Storing results report in %s." % results_directory)
- results_table_path = os.path.join(results_directory, "results.html")
+ self.l.LogOutput('Storing results report in %s.' % results_directory)
+ results_table_path = os.path.join(results_directory, 'results.html')
report = HTMLResultsReport(experiment).GetReport()
if self.json_report:
JSONResultsReport(experiment).GetReport(results_directory)
FileUtils().WriteFile(results_table_path, report)
- self.l.LogOutput("Storing email message body in %s." % results_directory)
- msg_file_path = os.path.join(results_directory, "msg_body.html")
+ self.l.LogOutput('Storing email message body in %s.' % results_directory)
+ msg_file_path = os.path.join(results_directory, 'msg_body.html')
text_report = TextResultsReport(experiment, True).GetReport()
- text_report += ("\nResults are stored in %s.\n" %
+ text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
FileUtils().WriteFile(msg_file_path, msg_body)
- self.l.LogOutput("Storing results of each benchmark run.")
+ self.l.LogOutput('Storing results of each benchmark run.')
for benchmark_run in experiment.benchmark_runs:
if benchmark_run.result:
benchmark_run_name = filter(str.isalnum, benchmark_run.name)
- benchmark_run_path = os.path.join(results_directory,
- benchmark_run_name)
+ benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
benchmark_run.result.CopyResultsTo(benchmark_run_path)
benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
@@ -281,10 +280,10 @@ class MockExperimentRunner(ExperimentRunner):
experiment.name)
def _PrintTable(self, experiment):
- self.l.LogOutput("Would print the experiment table.")
+ self.l.LogOutput('Would print the experiment table.')
def _Email(self, experiment):
- self.l.LogOutput("Would send result email.")
+ self.l.LogOutput('Would send result email.')
def _StoreResults(self, experiment):
- self.l.LogOutput("Would store the results.")
+ self.l.LogOutput('Would store the results.')
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index 46f50934..f665587e 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -44,6 +44,7 @@ EXPERIMENT_FILE_1 = """
}
"""
+
class FakeLogger(object):
def __init__(self):
@@ -85,6 +86,7 @@ class FakeLogger(object):
self.LogEndDotsCount = 0
self.LogAppendDotCount = 0
+
class ExperimentRunnerTest(unittest.TestCase):
run_counter = 0
@@ -95,33 +97,32 @@ class ExperimentRunnerTest(unittest.TestCase):
test_flag.SetTestMode(True)
experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory="",
- log_dir="")
+ working_directory='',
+ log_dir='')
return experiment
- @mock.patch.object (machine_manager.MachineManager, 'AddMachine')
- @mock.patch.object (os.path, 'isfile')
+ @mock.patch.object(machine_manager.MachineManager, 'AddMachine')
+ @mock.patch.object(os.path, 'isfile')
def setUp(self, mock_isfile, mock_addmachine):
mock_isfile.return_value = True
self.exp = self.make_fake_experiment()
-
def test_init(self):
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
- self.assertFalse (er._terminated)
- self.assertEqual (er.STATUS_TIME_DELAY, 10)
+ self.assertFalse(er._terminated)
+ self.assertEqual(er.STATUS_TIME_DELAY, 10)
- self.exp.log_level = "verbose"
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ self.exp.log_level = 'verbose'
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
- self.assertEqual (er.STATUS_TIME_DELAY, 30)
-
-
+ self.assertEqual(er.STATUS_TIME_DELAY, 30)
@mock.patch.object(experiment_status.ExperimentStatus, 'GetStatusString')
@mock.patch.object(experiment_status.ExperimentStatus, 'GetProgressString')
@@ -150,13 +151,14 @@ class ExperimentRunnerTest(unittest.TestCase):
self.exp.IsComplete = FakeIsComplete
# Test 1: log_level == "quiet"
- self.exp.log_level = "quiet"
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ self.exp.log_level = 'quiet'
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
- log = self.mock_logger,
- cmd_exec =self.mock_cmd_exec)
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = "Fake status string"
+ mock_status_string.return_value = 'Fake status string'
er._Run(self.exp)
self.assertEqual(self.run_count, 1)
self.assertTrue(self.is_complete_count > 0)
@@ -174,14 +176,15 @@ class ExperimentRunnerTest(unittest.TestCase):
# Test 2: log_level == "average"
self.mock_logger.Reset()
reset()
- self.exp.log_level = "average"
+ self.exp.log_level = 'average'
mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = "Fake status string"
+ mock_status_string.return_value = 'Fake status string'
er._Run(self.exp)
self.assertEqual(self.run_count, 1)
self.assertTrue(self.is_complete_count > 0)
@@ -196,19 +199,19 @@ class ExperimentRunnerTest(unittest.TestCase):
'=============================='])
self.assertEqual(len(self.mock_logger.error_msgs), 0)
-
# Test 3: log_level == "verbose"
self.mock_logger.Reset()
reset()
- self.exp.log_level = "verbose"
+ self.exp.log_level = 'verbose'
mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = "Fake status string"
- mock_progress_string.return_value = "Fake progress string"
+ mock_status_string.return_value = 'Fake status string'
+ mock_progress_string.return_value = 'Fake progress string'
er._Run(self.exp)
self.assertEqual(self.run_count, 1)
self.assertTrue(self.is_complete_count > 0)
@@ -219,28 +222,25 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_progress_string.call_count, 2)
self.assertEqual(mock_status_string.call_count, 2)
self.assertEqual(self.mock_logger.output_msgs,
- ['==============================',
- 'Fake progress string', 'Fake status string',
- '==============================',
- '==============================',
- 'Fake progress string', 'Fake status string',
- '=============================='])
+ ['==============================', 'Fake progress string',
+ 'Fake status string', '==============================',
+ '==============================', 'Fake progress string',
+ 'Fake status string', '=============================='])
self.assertEqual(len(self.mock_logger.error_msgs), 0)
-
@mock.patch.object(TextResultsReport, 'GetReport')
def test_print_table(self, mock_report):
self.mock_logger.Reset()
- mock_report.return_value = "This is a fake experiment report."
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ mock_report.return_value = 'This is a fake experiment report.'
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
er._PrintTable(self.exp)
self.assertEqual(mock_report.call_count, 1)
self.assertEqual(self.mock_logger.output_msgs,
- [ 'This is a fake experiment report.' ])
-
+ ['This is a fake experiment report.'])
@mock.patch.object(HTMLResultsReport, 'GetReport')
@mock.patch.object(TextResultsReport, 'GetReport')
@@ -250,14 +250,15 @@ class ExperimentRunnerTest(unittest.TestCase):
def test_email(self, mock_getuser, mock_emailer, mock_attachment,
mock_text_report, mock_html_report):
- mock_getuser.return_value = "john.smith@google.com"
- mock_text_report.return_value = "This is a fake text report."
- mock_html_report.return_value = "This is a fake html report."
+ mock_getuser.return_value = 'john.smith@google.com'
+ mock_text_report.return_value = 'This is a fake text report.'
+ mock_html_report.return_value = 'This is a fake html report.'
self.mock_logger.Reset()
- config.AddConfig("no_email", True)
- self.exp.email_to = ["jane.doe@google.com"]
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ config.AddConfig('no_email', True)
+ self.exp.email_to = ['jane.doe@google.com']
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
@@ -271,7 +272,7 @@ class ExperimentRunnerTest(unittest.TestCase):
# Test 2. Config: email. exp.email_to set; cache hit. => send email
self.mock_logger.Reset()
- config.AddConfig("no_email", False)
+ config.AddConfig('no_email', False)
for r in self.exp.benchmark_runs:
r.cache_hit = True
er._Email(self.exp)
@@ -285,7 +286,7 @@ class ExperimentRunnerTest(unittest.TestCase):
(['john.smith@google.com', 'jane.doe@google.com'],
': image1 vs. image2',
"<pre style='font-size: 13px'>This is a fake text "
- "report.\nResults are stored in _results.\n</pre>"))
+ 'report.\nResults are stored in _results.\n</pre>'))
self.assertTrue(type(mock_emailer.call_args[1]) is dict)
self.assertEqual(len(mock_emailer.call_args[1]), 2)
self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
@@ -301,7 +302,7 @@ class ExperimentRunnerTest(unittest.TestCase):
mock_attachment.reset_mock()
mock_text_report.reset_mock()
mock_html_report.reset_mock()
- config.AddConfig("no_email", False)
+ config.AddConfig('no_email', False)
for r in self.exp.benchmark_runs:
r.cache_hit = False
er._Email(self.exp)
@@ -315,7 +316,7 @@ class ExperimentRunnerTest(unittest.TestCase):
(['john.smith@google.com', 'jane.doe@google.com'],
': image1 vs. image2',
"<pre style='font-size: 13px'>This is a fake text "
- "report.\nResults are stored in _results.\n</pre>"))
+ 'report.\nResults are stored in _results.\n</pre>'))
self.assertTrue(type(mock_emailer.call_args[1]) is dict)
self.assertEqual(len(mock_emailer.call_args[1]), 2)
self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
@@ -340,10 +341,9 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_html_report.call_count, 1)
self.assertEqual(len(mock_emailer.call_args), 2)
self.assertEqual(mock_emailer.call_args[0],
- (['john.smith@google.com'],
- ': image1 vs. image2',
+ (['john.smith@google.com'], ': image1 vs. image2',
"<pre style='font-size: 13px'>This is a fake text "
- "report.\nResults are stored in _results.\n</pre>"))
+ 'report.\nResults are stored in _results.\n</pre>'))
self.assertTrue(type(mock_emailer.call_args[1]) is dict)
self.assertEqual(len(mock_emailer.call_args[1]), 2)
self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
@@ -379,13 +379,14 @@ class ExperimentRunnerTest(unittest.TestCase):
mock_report, mock_writefile, mock_mkdir, mock_rmdir):
self.mock_logger.Reset()
- self.exp.results_directory='/usr/local/crosperf-results'
+ self.exp.results_directory = '/usr/local/crosperf-results'
bench_run = self.exp.benchmark_runs[5]
bench_path = '/usr/local/crosperf-results/' + filter(str.isalnum,
bench_run.name)
- self.assertEqual (len(self.exp.benchmark_runs), 6)
+ self.assertEqual(len(self.exp.benchmark_runs), 6)
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
@@ -402,8 +403,8 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(self.mock_logger.LogOutputCount, 0)
# Test 2. _terminated is false; everything works properly.
- fake_result = Result(self.mock_logger, self.exp.labels[0], "average",
- "daisy1")
+ fake_result = Result(self.mock_logger, self.exp.labels[0], 'average',
+ 'daisy1')
for r in self.exp.benchmark_runs:
r.result = fake_result
er._terminated = False
@@ -413,7 +414,7 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_copy.call_count, 6)
mock_copy.called_with(bench_path)
self.assertEqual(mock_writefile.call_count, 3)
- self.assertEqual (len(mock_writefile.call_args_list), 3)
+ self.assertEqual(len(mock_writefile.call_args_list), 3)
first_args = mock_writefile.call_args_list[0]
second_args = mock_writefile.call_args_list[1]
self.assertEqual(first_args[0][0],
@@ -425,12 +426,12 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_rmdir.call_count, 1)
mock_rmdir.called_with('/usr/local/crosperf-results')
self.assertEqual(self.mock_logger.LogOutputCount, 4)
- self.assertEqual(self.mock_logger.output_msgs,
- ['Storing experiment file in /usr/local/crosperf-results.',
- 'Storing results report in /usr/local/crosperf-results.',
- 'Storing email message body in /usr/local/crosperf-results.',
- 'Storing results of each benchmark run.'])
-
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ ['Storing experiment file in /usr/local/crosperf-results.',
+ 'Storing results report in /usr/local/crosperf-results.',
+ 'Storing email message body in /usr/local/crosperf-results.',
+ 'Storing results of each benchmark run.'])
if __name__ == '__main__':
diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py
index 93ada967..8cada078 100644
--- a/crosperf/experiment_status.py
+++ b/crosperf/experiment_status.py
@@ -1,5 +1,4 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""The class to show the banner."""
from __future__ import print_function
@@ -19,14 +18,14 @@ class ExperimentStatus(object):
self.log_level = experiment.log_level
def _GetProgressBar(self, num_complete, num_total):
- ret = "Done: %s%%" % int(100.0 * num_complete / num_total)
+ ret = 'Done: %s%%' % int(100.0 * num_complete / num_total)
bar_length = 50
- done_char = ">"
- undone_char = " "
+ done_char = '>'
+ undone_char = ' '
num_complete_chars = bar_length * num_complete / num_total
num_undone_chars = bar_length - num_complete_chars
- ret += " [%s%s]" % (num_complete_chars * done_char, num_undone_chars *
- undone_char)
+ ret += ' [%s%s]' % (num_complete_chars * done_char,
+ num_undone_chars * undone_char)
return ret
def GetProgressString(self):
@@ -62,26 +61,25 @@ class ExperimentStatus(object):
# first long job, after a series of short jobs). For now, if that
# happens, we set the ETA to "Unknown."
#
- eta_seconds = (float(self.num_total - self.experiment.num_complete -1) *
- time_completed_jobs / self.experiment.num_run_complete
- + (time_completed_jobs / self.experiment.num_run_complete
- - (current_time - self.new_job_start_time)))
+ eta_seconds = (float(self.num_total - self.experiment.num_complete - 1) *
+ time_completed_jobs / self.experiment.num_run_complete +
+ (time_completed_jobs / self.experiment.num_run_complete -
+ (current_time - self.new_job_start_time)))
eta_seconds = int(eta_seconds)
if eta_seconds > 0:
eta = datetime.timedelta(seconds=eta_seconds)
else:
- eta = "Unknown"
+ eta = 'Unknown'
except ZeroDivisionError:
- eta = "Unknown"
+ eta = 'Unknown'
strings = []
- strings.append("Current time: %s Elapsed: %s ETA: %s" %
+ strings.append('Current time: %s Elapsed: %s ETA: %s' %
(datetime.datetime.now(),
- datetime.timedelta(seconds=int(elapsed_time)),
- eta))
+ datetime.timedelta(seconds=int(elapsed_time)), eta))
strings.append(self._GetProgressBar(self.experiment.num_complete,
self.num_total))
- return "\n".join(strings)
+ return '\n'.join(strings)
def GetStatusString(self):
"""Get the status string of all the benchmark_runs."""
@@ -93,26 +91,26 @@ class ExperimentStatus(object):
status_strings = []
for key, val in status_bins.items():
- if key == "RUNNING":
- status_strings.append("%s: %s" %
+ if key == 'RUNNING':
+ status_strings.append('%s: %s' %
(key, self._GetNamesAndIterations(val)))
else:
- status_strings.append("%s: %s" %
+ status_strings.append('%s: %s' %
(key, self._GetCompactNamesAndIterations(val)))
- thread_status = ""
- thread_status_format = "Thread Status: \n{}\n"
+ thread_status = ''
+ thread_status_format = 'Thread Status: \n{}\n'
if (self.experiment.schedv2() is None and
- self.experiment.log_level == "verbose"):
- # Add the machine manager status.
+ self.experiment.log_level == 'verbose'):
+ # Add the machine manager status.
thread_status = thread_status_format.format(
self.experiment.machine_manager.AsString())
elif self.experiment.schedv2():
- # In schedv2 mode, we always print out thread status.
- thread_status = thread_status_format.format(
- self.experiment.schedv2().threads_status_as_string())
+ # In schedv2 mode, we always print out thread status.
+ thread_status = thread_status_format.format(self.experiment.schedv2(
+ ).threads_status_as_string())
- result = "{}{}".format(thread_status, "\n".join(status_strings))
+ result = '{}{}'.format(thread_status, '\n'.join(status_strings))
return result
@@ -121,9 +119,9 @@ class ExperimentStatus(object):
t = time.time()
for benchmark_run in benchmark_runs:
t_last = benchmark_run.timeline.GetLastEventTime()
- elapsed = str(datetime.timedelta(seconds=int(t-t_last)))
+ elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
- return " %s (%s)" % (len(strings), ", ".join(strings))
+ return ' %s (%s)' % (len(strings), ', '.join(strings))
def _GetCompactNamesAndIterations(self, benchmark_runs):
output = ''
@@ -144,8 +142,8 @@ class ExperimentStatus(object):
benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
for key, val in benchmark_iterations.items():
val.sort()
- iterations = ",".join(map(str, val))
- strings.append("{} [{}]".format(key, iterations))
- output += " " + label + ": " + ", ".join(strings) + "\n"
+ iterations = ','.join(map(str, val))
+ strings.append('{} [{}]'.format(key, iterations))
+ output += ' ' + label + ': ' + ', '.join(strings) + '\n'
- return " %s \n%s" % (len(benchmark_runs), output)
+ return ' %s \n%s' % (len(benchmark_runs), output)
diff --git a/crosperf/field.py b/crosperf/field.py
index b70fb557..e25ffe30 100644
--- a/crosperf/field.py
+++ b/crosperf/field.py
@@ -1,7 +1,7 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Module to represent a Field in an experiment file."""
+
class Field(object):
"""Class representing a Field in an experiment file."""
@@ -37,8 +37,13 @@ class Field(object):
class TextField(Field):
"""Class of text field."""
- def __init__(self, name, required=False, default="", inheritable=False,
- description=""):
+
+ def __init__(self,
+ name,
+ required=False,
+ default='',
+ inheritable=False,
+ description=''):
super(TextField, self).__init__(name, required, default, inheritable,
description)
@@ -48,15 +53,20 @@ class TextField(Field):
class BooleanField(Field):
"""Class of boolean field."""
- def __init__(self, name, required=False, default=False, inheritable=False,
- description=""):
+
+ def __init__(self,
+ name,
+ required=False,
+ default=False,
+ inheritable=False,
+ description=''):
super(BooleanField, self).__init__(name, required, default, inheritable,
description)
def _Parse(self, value):
- if value.lower() == "true":
+ if value.lower() == 'true':
return True
- elif value.lower() == "false":
+ elif value.lower() == 'false':
return False
raise Exception("Invalid value for '%s'. Must be true or false." %
self.name)
@@ -64,8 +74,13 @@ class BooleanField(Field):
class IntegerField(Field):
"""Class of integer field."""
- def __init__(self, name, required=False, default=0, inheritable=False,
- description=""):
+
+ def __init__(self,
+ name,
+ required=False,
+ default=0,
+ inheritable=False,
+ description=''):
super(IntegerField, self).__init__(name, required, default, inheritable,
description)
@@ -75,8 +90,13 @@ class IntegerField(Field):
class FloatField(Field):
"""Class of float field."""
- def __init__(self, name, required=False, default=0, inheritable=False,
- description=""):
+
+ def __init__(self,
+ name,
+ required=False,
+ default=0,
+ inheritable=False,
+ description=''):
super(FloatField, self).__init__(name, required, default, inheritable,
description)
@@ -86,8 +106,13 @@ class FloatField(Field):
class ListField(Field):
"""Class of list field."""
- def __init__(self, name, required=False, default=None, inheritable=False,
- description=""):
+
+ def __init__(self,
+ name,
+ required=False,
+ default=None,
+ inheritable=False,
+ description=''):
super(ListField, self).__init__(name, required, default, inheritable,
description)
@@ -95,7 +120,7 @@ class ListField(Field):
return value.split()
def GetString(self):
- return " ".join(self._value)
+ return ' '.join(self._value)
def Append(self, value):
v = self._Parse(value)
@@ -105,16 +130,23 @@ class ListField(Field):
self._value += v
self.assigned = True
+
class EnumField(Field):
"""Class of enum field."""
- def __init__(self, name, options, required=False, default="",
- inheritable=False, description=""):
+
+ def __init__(self,
+ name,
+ options,
+ required=False,
+ default='',
+ inheritable=False,
+ description=''):
super(EnumField, self).__init__(name, required, default, inheritable,
description)
self.options = options
def _Parse(self, value):
if value not in self.options:
- raise Exception("Invalid enum value for field '%s'. Must be one of (%s)"
- % (self.name, ", ".join(self.options)))
+ raise Exception("Invalid enum value for field '%s'. Must be one of (%s)" %
+ (self.name, ', '.join(self.options)))
return str(value)
diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py
index f3678cfd..3a60cb83 100755
--- a/crosperf/flag_test_unittest.py
+++ b/crosperf/flag_test_unittest.py
@@ -6,6 +6,7 @@ import test_flag
import unittest
+
class FlagTestCase(unittest.TestCase):
def test_test_flag(self):
@@ -14,7 +15,6 @@ class FlagTestCase(unittest.TestCase):
self.assertTrue(type(test_flag._is_test) is list)
self.assertEqual(len(test_flag._is_test), 1)
-
# Verify that the getting the flag works and that the flag
# contains False, its starting value.
save_flag = test_flag.GetTestMode()
diff --git a/crosperf/help.py b/crosperf/help.py
index 81e39b55..e10035aa 100644
--- a/crosperf/help.py
+++ b/crosperf/help.py
@@ -1,5 +1,4 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Module to print help message."""
from __future__ import print_function
@@ -13,29 +12,30 @@ from settings_factory import LabelSettings
class Help(object):
"""The help class."""
+
def GetUsage(self):
return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0])
def _WrapLine(self, line):
- return "\n".join(textwrap.wrap(line, 80))
+ return '\n'.join(textwrap.wrap(line, 80))
def _GetFieldDescriptions(self, fields):
- res = ""
+ res = ''
for field_name in fields:
field = fields[field_name]
- res += "Field:\t\t%s\n" % field.name
- res += self._WrapLine("Description:\t%s" % field.description) + "\n"
- res += "Type:\t\t%s\n" % type(field).__name__.replace("Field", "")
- res += "Required:\t%s\n" % field.required
+ res += 'Field:\t\t%s\n' % field.name
+ res += self._WrapLine('Description:\t%s' % field.description) + '\n'
+ res += 'Type:\t\t%s\n' % type(field).__name__.replace('Field', '')
+ res += 'Required:\t%s\n' % field.required
if field.default:
- res += "Default:\t%s\n" % field.default
- res += "\n"
+ res += 'Default:\t%s\n' % field.default
+ res += '\n'
return res
def GetHelp(self):
- global_fields = self._GetFieldDescriptions(GlobalSettings("").fields)
- benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings("").fields)
- label_fields = self._GetFieldDescriptions(LabelSettings("").fields)
+ global_fields = self._GetFieldDescriptions(GlobalSettings('').fields)
+ benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings('').fields)
+ label_fields = self._GetFieldDescriptions(LabelSettings('').fields)
return """%s is a script for running performance experiments on
ChromeOS. It allows one to run ChromeOS Autotest benchmarks over
@@ -110,5 +110,5 @@ experiment file). Crosperf runs the experiment and caches the results
generates and displays a report based on the run, and emails the
report to the user. If the results were all read out of the cache,
then by default no email is generated.
-""" % (sys.argv[0], sys.argv[0], global_fields,
- benchmark_fields, label_fields, sys.argv[0])
+""" % (sys.argv[0], sys.argv[0], global_fields, benchmark_fields, label_fields,
+ sys.argv[0])
diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py
index f4c02277..3571ad95 100644
--- a/crosperf/image_checksummer.py
+++ b/crosperf/image_checksummer.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
@@ -10,7 +9,9 @@ from cros_utils.file_utils import FileUtils
class ImageChecksummer(object):
+
class PerImageChecksummer(object):
+
def __init__(self, label, log_level):
self._lock = threading.Lock()
self.label = label
@@ -23,17 +24,17 @@ class ImageChecksummer(object):
logger.GetLogger().LogOutput("Acquiring checksum for '%s'." %
self.label.name)
self._checksum = None
- if self.label.image_type != "local":
- raise Exception("Called Checksum on non-local image!")
+ if self.label.image_type != 'local':
+ raise Exception('Called Checksum on non-local image!')
if self.label.chromeos_image:
if os.path.exists(self.label.chromeos_image):
self._checksum = FileUtils().Md5File(self.label.chromeos_image,
log_level=self.log_level)
- logger.GetLogger().LogOutput("Computed checksum is "
- ": %s" % self._checksum)
+ logger.GetLogger().LogOutput('Computed checksum is '
+ ': %s' % self._checksum)
if not self._checksum:
- raise Exception("Checksum computing error.")
- logger.GetLogger().LogOutput("Checksum is: %s" % self._checksum)
+ raise Exception('Checksum computing error.')
+ logger.GetLogger().LogOutput('Checksum is: %s' % self._checksum)
return self._checksum
_instance = None
@@ -43,23 +44,22 @@ class ImageChecksummer(object):
def __new__(cls, *args, **kwargs):
with cls._lock:
if not cls._instance:
- cls._instance = super(ImageChecksummer, cls).__new__(cls,
- *args, **kwargs)
+ cls._instance = super(ImageChecksummer, cls).__new__(cls, *args,
+ **kwargs)
return cls._instance
def Checksum(self, label, log_level):
- if label.image_type != "local":
- raise Exception("Attempt to call Checksum on non-local image.")
+ if label.image_type != 'local':
+ raise Exception('Attempt to call Checksum on non-local image.')
with self._lock:
if label.name not in self._per_image_checksummers:
- self._per_image_checksummers[label.name] = (ImageChecksummer.
- PerImageChecksummer(label,
- log_level))
+ self._per_image_checksummers[label.name] = (
+ ImageChecksummer.PerImageChecksummer(label, log_level))
checksummer = self._per_image_checksummers[label.name]
try:
return checksummer.Checksum()
except Exception, e:
- logger.GetLogger().LogError("Could not compute checksum of image in label"
- " '%s'."% label.name)
+ logger.GetLogger().LogError('Could not compute checksum of image in label'
+ " '%s'." % label.name)
raise e
diff --git a/crosperf/label.py b/crosperf/label.py
index a34416d2..b9fc9330 100644
--- a/crosperf/label.py
+++ b/crosperf/label.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The label of benchamrks."""
from __future__ import print_function
@@ -16,15 +15,25 @@ from cros_utils import misc
class Label(object):
"""The label class."""
- def __init__(self, name, chromeos_image, chromeos_root, board, remote,
- image_args, cache_dir, cache_only, log_level, compiler,
+
+ def __init__(self,
+ name,
+ chromeos_image,
+ chromeos_root,
+ board,
+ remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
chrome_src=None):
self.image_type = self._GetImageType(chromeos_image)
# Expand ~
chromeos_root = os.path.expanduser(chromeos_root)
- if self.image_type == "local":
+ if self.image_type == 'local':
chromeos_image = os.path.expanduser(chromeos_image)
self.name = name
@@ -35,11 +44,11 @@ class Label(object):
self.cache_dir = cache_dir
self.cache_only = cache_only
self.log_level = log_level
- self.chrome_version = ""
+ self.chrome_version = ''
self.compiler = compiler
if not chromeos_root:
- if self.image_type == "local":
+ if self.image_type == 'local':
chromeos_root = FileUtils().ChromeOSRootFromImage(chromeos_image)
if not chromeos_root:
raise Exception("No ChromeOS root given for label '%s' and could not "
@@ -48,22 +57,21 @@ class Label(object):
else:
chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chromeos_root)
if not chromeos_root:
- raise Exception("Invalid ChromeOS root given for label '%s': '%s'."
- % (name, chromeos_root))
+ raise Exception("Invalid ChromeOS root given for label '%s': '%s'." %
+ (name, chromeos_root))
self.chromeos_root = chromeos_root
if not chrome_src:
self.chrome_src = os.path.join(
- self.chromeos_root,
- ".cache/distfiles/target/chrome-src-internal")
+ self.chromeos_root, '.cache/distfiles/target/chrome-src-internal')
if not os.path.exists(self.chrome_src):
self.chrome_src = os.path.join(self.chromeos_root,
- ".cache/distfiles/target/chrome-src")
+ '.cache/distfiles/target/chrome-src')
else:
chromeos_src = misc.CanonicalizePath(chrome_src)
if not chromeos_src:
- raise Exception("Invalid Chrome src given for label '%s': '%s'."
- % (name, chrome_src))
+ raise Exception("Invalid Chrome src given for label '%s': '%s'." %
+ (name, chrome_src))
self.chrome_src = chromeos_src
self._SetupChecksum()
@@ -72,19 +80,19 @@ class Label(object):
"""Compute label checksum only once."""
self.checksum = None
- if self.image_type == "local":
+ if self.image_type == 'local':
self.checksum = ImageChecksummer().Checksum(self, self.log_level)
- elif self.image_type == "trybot":
+ elif self.image_type == 'trybot':
self.checksum = hashlib.md5(self.chromeos_image).hexdigest()
def _GetImageType(self, chromeos_image):
image_type = None
- if chromeos_image.find("xbuddy://") < 0:
- image_type = "local"
- elif chromeos_image.find("trybot") >= 0:
- image_type = "trybot"
+ if chromeos_image.find('xbuddy://') < 0:
+ image_type = 'local'
+ elif chromeos_image.find('trybot') >= 0:
+ image_type = 'trybot'
else:
- image_type = "official"
+ image_type = 'official'
return image_type
def __hash__(self):
@@ -102,10 +110,21 @@ class Label(object):
return 'label[name="{}"]'.format(self.name)
+
class MockLabel(object):
"""The mock label class."""
- def __init__(self, name, chromeos_image, chromeos_root, board, remote,
- image_args, cache_dir, cache_only, log_level, compiler,
+
+ def __init__(self,
+ name,
+ chromeos_image,
+ chromeos_root,
+ board,
+ remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
chrome_src=None):
self.name = name
self.chromeos_image = chromeos_image
@@ -114,7 +133,7 @@ class MockLabel(object):
self.cache_dir = cache_dir
self.cache_only = cache_only
if not chromeos_root:
- self.chromeos_root = "/tmp/chromeos_root"
+ self.chromeos_root = '/tmp/chromeos_root'
else:
self.chromeos_root = chromeos_root
self.image_args = image_args
@@ -123,14 +142,14 @@ class MockLabel(object):
self.checksum = ''
self.log_level = log_level
self.compiler = compiler
- self.chrome_version = "Fake Chrome Version 50"
+ self.chrome_version = 'Fake Chrome Version 50'
def _GetImageType(self, chromeos_image):
image_type = None
- if chromeos_image.find("xbuddy://") < 0:
- image_type = "local"
- elif chromeos_image.find("trybot") >= 0:
- image_type = "trybot"
+ if chromeos_image.find('xbuddy://') < 0:
+ image_type = 'local'
+ elif chromeos_image.find('trybot') >= 0:
+ image_type = 'trybot'
else:
- image_type = "official"
+ image_type = 'official'
return image_type
diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py
index 31d0bd6b..3b96140e 100644
--- a/crosperf/machine_image_manager.py
+++ b/crosperf/machine_image_manager.py
@@ -1,9 +1,9 @@
-#!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
+
class MachineImageManager(object):
- """Management of allocating images to duts.
+ """Management of allocating images to duts.
* Data structure we have -
@@ -132,28 +132,28 @@ class MachineImageManager(object):
"""
- def __init__(self, labels, duts):
- self.labels_ = labels
- self.duts_ = duts
- self.n_labels_ = len(labels)
- self.n_duts_ = len(duts)
- self.dut_name_ordinal_ = dict()
- for idx, dut in enumerate(self.duts_):
- self.dut_name_ordinal_[dut.name] = idx
-
- # Generate initial matrix containg 'X' or ' '.
- self.matrix_ = [['X' if (l.remote and len(l.remote)) else ' ' \
- for d in range(self.n_duts_)] for l in self.labels_]
- for ol, l in enumerate(self.labels_):
- if l.remote:
- for r in l.remote:
- self.matrix_[ol][self.dut_name_ordinal_[r]] = ' '
-
- self.label_duts_ = [[] for _ in range(self.n_labels_)]
- self.allocate_log_ = []
-
- def compute_initial_allocation(self):
- """Compute the initial label-dut allocation.
+ def __init__(self, labels, duts):
+ self.labels_ = labels
+ self.duts_ = duts
+ self.n_labels_ = len(labels)
+ self.n_duts_ = len(duts)
+ self.dut_name_ordinal_ = dict()
+ for idx, dut in enumerate(self.duts_):
+ self.dut_name_ordinal_[dut.name] = idx
+
+ # Generate initial matrix containg 'X' or ' '.
+ self.matrix_ = [['X' if (l.remote and len(l.remote)) else ' ' \
+ for d in range(self.n_duts_)] for l in self.labels_]
+ for ol, l in enumerate(self.labels_):
+ if l.remote:
+ for r in l.remote:
+ self.matrix_[ol][self.dut_name_ordinal_[r]] = ' '
+
+ self.label_duts_ = [[] for _ in range(self.n_labels_)]
+ self.allocate_log_ = []
+
+ def compute_initial_allocation(self):
+ """Compute the initial label-dut allocation.
This method finds the most efficient way that every label gets imaged at
least once.
@@ -163,35 +163,35 @@ class MachineImageManager(object):
otherwise True.
"""
- if self.n_duts_ == 1:
- for i, v in self.matrix_vertical_generator(0):
- if v != 'X':
- self.matrix_[i][0] = 'Y'
- return
+ if self.n_duts_ == 1:
+ for i, v in self.matrix_vertical_generator(0):
+ if v != 'X':
+ self.matrix_[i][0] = 'Y'
+ return
- if self.n_labels_ == 1:
- for j, v in self.matrix_horizontal_generator(0):
- if v != 'X':
- self.matrix_[0][j] = 'Y'
- return
+ if self.n_labels_ == 1:
+ for j, v in self.matrix_horizontal_generator(0):
+ if v != 'X':
+ self.matrix_[0][j] = 'Y'
+ return
- if self.n_duts_ >= self.n_labels_:
- n = 1
- else:
- n = self.n_labels_ - self.n_duts_ + 1
- while n <= self.n_labels_:
- if self._compute_initial_allocation_internal(0, n):
- break
- n += 1
+ if self.n_duts_ >= self.n_labels_:
+ n = 1
+ else:
+ n = self.n_labels_ - self.n_duts_ + 1
+ while n <= self.n_labels_:
+ if self._compute_initial_allocation_internal(0, n):
+ break
+ n += 1
- return n <= self.n_labels_
+ return n <= self.n_labels_
- def _record_allocate_log(self, label_i, dut_j):
- self.allocate_log_.append((label_i, dut_j))
- self.label_duts_[label_i].append(dut_j)
+ def _record_allocate_log(self, label_i, dut_j):
+ self.allocate_log_.append((label_i, dut_j))
+ self.label_duts_[label_i].append(dut_j)
- def allocate(self, dut, schedv2=None):
- """Allocate a label for dut.
+ def allocate(self, dut, schedv2=None):
+ """Allocate a label for dut.
Arguments:
dut: the dut that asks for a new image.
@@ -202,101 +202,100 @@ class MachineImageManager(object):
a label to image onto the dut or None if no more available images for
the dut.
"""
- j = self.dut_name_ordinal_[dut.name]
- # 'can_' prefix means candidate label's.
- can_reimage_number = 999
- can_i = 999
- can_label = None
- can_pending_br_num = 0
- for i, v in self.matrix_vertical_generator(j):
- label = self.labels_[i]
-
- # 2 optimizations here regarding allocating label to dut.
- # Note schedv2 might be None in case we do not need this
- # optimization or we are in testing mode.
- if schedv2 is not None:
- pending_br_num = len(schedv2._label_brl_map[label])
- if pending_br_num == 0:
- # (A) - we have finished all br of this label,
- # apparently, we do not want to reimaeg dut to
- # this label.
- continue
- else:
- # In case we do not have a schedv2 instance, mark
- # pending_br_num as 0, so pending_br_num >=
- # can_pending_br_num is always True.
- pending_br_num = 0
-
- # For this time being, I just comment this out until we have a
- # better estimation how long each benchmarkrun takes.
- # if (pending_br_num <= 5 and
- # len(self.label_duts_[i]) >= 1):
- # # (B) this is heuristic - if there are just a few test cases
- # # (say <5) left undone for this label, and there is at least
- # # 1 other machine working on this lable, we probably not want
- # # to bother to reimage this dut to help with these 5 test
- # # cases
- # continue
-
- if v == 'Y':
- self.matrix_[i][j] = '_'
- self._record_allocate_log(i, j)
- return label
- if v == ' ':
- label_reimage_number = len(self.label_duts_[i])
- if ((can_label is None) or
- (label_reimage_number < can_reimage_number or
- (label_reimage_number == can_reimage_number and
- pending_br_num >= can_pending_br_num))):
- can_reimage_number = label_reimage_number
- can_i = i
- can_label = label
- can_pending_br_num = pending_br_num
-
- # All labels are marked either '_' (already taken) or 'X' (not
- # compatible), so return None to notify machine thread to quit.
- if can_label is None:
- return None
-
- # At this point, we don't find any 'Y' for the machine, so we go the
- # 'min' approach.
- self.matrix_[can_i][j] = '_'
- self._record_allocate_log(can_i, j)
- return can_label
-
- def matrix_vertical_generator(self, col):
- """Iterate matrix vertically at column 'col'.
+ j = self.dut_name_ordinal_[dut.name]
+ # 'can_' prefix means candidate label's.
+ can_reimage_number = 999
+ can_i = 999
+ can_label = None
+ can_pending_br_num = 0
+ for i, v in self.matrix_vertical_generator(j):
+ label = self.labels_[i]
+
+ # 2 optimizations here regarding allocating label to dut.
+ # Note schedv2 might be None in case we do not need this
+ # optimization or we are in testing mode.
+ if schedv2 is not None:
+ pending_br_num = len(schedv2._label_brl_map[label])
+ if pending_br_num == 0:
+ # (A) - we have finished all br of this label,
+ # apparently, we do not want to reimaeg dut to
+ # this label.
+ continue
+ else:
+ # In case we do not have a schedv2 instance, mark
+ # pending_br_num as 0, so pending_br_num >=
+ # can_pending_br_num is always True.
+ pending_br_num = 0
+
+ # For this time being, I just comment this out until we have a
+ # better estimation how long each benchmarkrun takes.
+ # if (pending_br_num <= 5 and
+ # len(self.label_duts_[i]) >= 1):
+ # # (B) this is heuristic - if there are just a few test cases
+ # # (say <5) left undone for this label, and there is at least
+ # # 1 other machine working on this lable, we probably not want
+ # # to bother to reimage this dut to help with these 5 test
+ # # cases
+ # continue
+
+ if v == 'Y':
+ self.matrix_[i][j] = '_'
+ self._record_allocate_log(i, j)
+ return label
+ if v == ' ':
+ label_reimage_number = len(self.label_duts_[i])
+ if ((can_label is None) or
+ (label_reimage_number < can_reimage_number or
+ (label_reimage_number == can_reimage_number and
+ pending_br_num >= can_pending_br_num))):
+ can_reimage_number = label_reimage_number
+ can_i = i
+ can_label = label
+ can_pending_br_num = pending_br_num
+
+ # All labels are marked either '_' (already taken) or 'X' (not
+ # compatible), so return None to notify machine thread to quit.
+ if can_label is None:
+ return None
+
+ # At this point, we don't find any 'Y' for the machine, so we go the
+ # 'min' approach.
+ self.matrix_[can_i][j] = '_'
+ self._record_allocate_log(can_i, j)
+ return can_label
+
+ def matrix_vertical_generator(self, col):
+ """Iterate matrix vertically at column 'col'.
Yield row number i and value at matrix_[i][col].
"""
- for i, l in enumerate(self.labels_):
- yield i, self.matrix_[i][col]
+ for i, l in enumerate(self.labels_):
+ yield i, self.matrix_[i][col]
- def matrix_horizontal_generator(self, row):
- """Iterate matrix horizontally at row 'row'.
+ def matrix_horizontal_generator(self, row):
+ """Iterate matrix horizontally at row 'row'.
Yield col number j and value at matrix_[row][j].
"""
- for j, d in enumerate(self.duts_):
- yield j, self.matrix_[row][j]
-
-
- def _compute_initial_allocation_internal(self, level, N):
- """ Search matrix for d with N. """
-
- if level == self.n_labels_:
+ for j, d in enumerate(self.duts_):
+ yield j, self.matrix_[row][j]
+
+ def _compute_initial_allocation_internal(self, level, N):
+ """ Search matrix for d with N. """
+
+ if level == self.n_labels_:
+ return True
+
+ for j, v in self.matrix_horizontal_generator(level):
+ if v == ' ':
+ # Before we put a 'Y', we check how many Y column 'j' has.
+ # Note y[0] is row idx, y[1] is the cell value.
+ ny = reduce(lambda x, y: x + 1 if (y[1] == 'Y') else x,
+ self.matrix_vertical_generator(j), 0)
+ if ny < N:
+ self.matrix_[level][j] = 'Y'
+ if self._compute_initial_allocation_internal(level + 1, N):
return True
+ self.matrix_[level][j] = ' '
- for j, v in self.matrix_horizontal_generator(level):
- if v == ' ':
- # Before we put a 'Y', we check how many Y column 'j' has.
- # Note y[0] is row idx, y[1] is the cell value.
- ny = reduce(lambda x, y: x + 1 if (y[1] == 'Y') else x,
- self.matrix_vertical_generator(j), 0)
- if ny < N:
- self.matrix_[level][j] = 'Y'
- if self._compute_initial_allocation_internal(level + 1, N):
- return True
- self.matrix_[level][j] = ' '
-
- return False
+ return False
diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py
index 60e8354a..220c4cf4 100755
--- a/crosperf/machine_image_manager_unittest.py
+++ b/crosperf/machine_image_manager_unittest.py
@@ -7,274 +7,231 @@ import unittest
from machine_image_manager import MachineImageManager
+
class MockLabel(object):
- def __init__(self, name, remotes=None):
- self.name = name
- self.remote = remotes
+ def __init__(self, name, remotes=None):
+ self.name = name
+ self.remote = remotes
- def __hash__(self):
- """Provide hash function for label.
+ def __hash__(self):
+ """Provide hash function for label.
This is required because Label object is used inside a dict as key.
"""
- return hash(self.name)
+ return hash(self.name)
- def __eq__(self, other):
- """Provide eq function for label.
+ def __eq__(self, other):
+ """Provide eq function for label.
This is required because Label object is used inside a dict as key.
"""
- return isinstance(other, MockLabel) and other.name == self.name
+ return isinstance(other, MockLabel) and other.name == self.name
+
class MockDut(object):
- def __init__(self, name, label=None):
- self.name = name
- self.label_ = label
+ def __init__(self, name, label=None):
+ self.name = name
+ self.label_ = label
class MachineImageManagerTester(unittest.TestCase):
- def gen_duts_by_name(self, *names):
- duts = []
- for n in names:
- duts.append(MockDut(n))
- return duts
-
- def print_matrix(self, matrix):
- for r in matrix:
- for v in r:
- print '{} '.format('.' if v == ' ' else v),
- print('')
-
- def create_labels_and_duts_from_pattern(self, pattern):
- labels = []
- duts = []
- for i, r in enumerate(pattern):
- l = MockLabel('l{}'.format(i), [])
- for j, v in enumerate(r.split()):
- if v == '.':
- l.remote.append('m{}'.format(j))
- if i == 0:
- duts.append(MockDut('m{}'.format(j)))
- labels.append(l)
- return labels, duts
-
- def check_matrix_against_pattern(self, matrix, pattern):
- for i, s in enumerate(pattern):
- for j, v in enumerate(s.split()):
- self.assertTrue(v == '.' and matrix[i][j] == ' ' or
- v == matrix[i][j])
-
- def pattern_based_test(self, input, output):
- labels, duts = self.create_labels_and_duts_from_pattern(input)
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.check_matrix_against_pattern(mim.matrix_, output)
- return mim
-
- def test_single_dut(self):
- labels = [MockLabel('l1'),
- MockLabel('l2'),
- MockLabel('l3')]
- dut = MockDut('m1')
- mim = MachineImageManager(labels, [dut])
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [['Y'], ['Y'], ['Y']])
-
- def test_single_label(self):
- labels = [MockLabel('l1')]
- duts = self.gen_duts_by_name('m1', 'm2', 'm3')
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
-
- def test_case1(self):
- labels = [MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
- duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'],
- ['X', ' ', ' '],
- [' ', 'X', 'X']])
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'],
- ['X', ' ', 'Y'],
- ['Y', 'X', 'X']])
-
- def test_case2(self):
- labels = [MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
- duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'],
- ['X', ' ', ' '],
- [' ', 'X', 'X']])
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'],
- ['X', ' ', 'Y'],
- ['Y', 'X', 'X']])
-
- def test_case3(self):
- labels = [MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
- duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'],
- ['X', ' ', 'Y'],
- ['Y', 'X', 'X']])
-
- def test_case4(self):
- labels = [MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
- duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'],
- ['X', ' ', 'Y'],
- ['Y', 'X', 'X']])
-
- def test_case5(self):
- labels = [MockLabel('l1', ['m3']),
- MockLabel('l2', ['m3']),
- MockLabel('l3', ['m1'])]
- duts = self.gen_duts_by_name('m1', 'm2', 'm3')
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'],
- ['X', 'X', 'Y'],
- ['Y', 'X', 'X']])
-
- def test_2x2_with_allocation(self):
- labels = [MockLabel('l0'), MockLabel('l1')]
- duts = [MockDut('m0'), MockDut('m1')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.assertTrue(mim.allocate(duts[0]) == labels[0])
- self.assertTrue(mim.allocate(duts[0]) == labels[1])
- self.assertTrue(mim.allocate(duts[0]) is None)
- self.assertTrue(mim.matrix_[0][0] == '_')
- self.assertTrue(mim.matrix_[1][0] == '_')
- self.assertTrue(mim.allocate(duts[1]) == labels[1])
-
- def test_10x10_general(self):
- """Gen 10x10 matrix."""
- n = 10
- labels = []
- duts = []
- for i in range(n):
- labels.append(MockLabel('l{}'.format(i)))
- duts.append(MockDut('m{}'.format(i)))
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- for i in range(n):
- for j in range(n):
- if i == j:
- self.assertTrue(mim.matrix_[i][j] == 'Y')
- else:
- self.assertTrue(mim.matrix_[i][j] == ' ')
- self.assertTrue(mim.allocate(duts[3]).name == 'l3')
-
- def test_random_generated(self):
- n = 10
- labels = []
- duts = []
- for i in range(10):
- # generate 3-5 machines that is compatible with this label
- l = MockLabel('l{}'.format(i), [])
- r = random.random()
- for _ in range(4):
- t = int(r * 10) % n
- r *= 10
- l.remote.append('m{}'.format(t))
- labels.append(l)
- duts.append(MockDut('m{}'.format(i)))
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
-
- def test_10x10_fully_random(self):
- input = ['X . . . X X . X X .',
- 'X X . X . X . X X .',
- 'X X X . . X . X . X',
- 'X . X X . . X X . X',
- 'X X X X . . . X . .',
- 'X X . X . X . . X .',
- '. X . X . X X X . .',
- '. X . X X . X X . .',
- 'X X . . . X X X . .',
- '. X X X X . . . . X']
- output = ['X Y . . X X . X X .',
- 'X X Y X . X . X X .',
- 'X X X Y . X . X . X',
- 'X . X X Y . X X . X',
- 'X X X X . Y . X . .',
- 'X X . X . X Y . X .',
- 'Y X . X . X X X . .',
- '. X . X X . X X Y .',
- 'X X . . . X X X . Y',
- '. X X X X . . Y . X']
- self.pattern_based_test(input, output)
-
- def test_10x10_fully_random2(self):
- input = ['X . X . . X . X X X',
- 'X X X X X X . . X .',
- 'X . X X X X X . . X',
- 'X X X . X . X X . .',
- '. X . X . X X X X X',
- 'X X X X X X X . . X',
- 'X . X X X X X . . X',
- 'X X X . X X X X . .',
- 'X X X . . . X X X X',
- '. X X . X X X . X X']
- output = ['X . X Y . X . X X X',
- 'X X X X X X Y . X .',
- 'X Y X X X X X . . X',
- 'X X X . X Y X X . .',
- '. X Y X . X X X X X',
- 'X X X X X X X Y . X',
- 'X . X X X X X . Y X',
- 'X X X . X X X X . Y',
- 'X X X . Y . X X X X',
- 'Y X X . X X X . X X']
- self.pattern_based_test(input, output)
-
- def test_3x4_with_allocation(self):
- input = ['X X . .',
- '. . X .',
- 'X . X .']
- output = ['X X Y .',
- 'Y . X .',
- 'X Y X .']
- mim = self.pattern_based_test(input, output)
- self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[3]) is None)
- self.assertTrue(mim.allocate(mim.duts_[2]) is None)
- self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[1]) == None)
- self.assertTrue(mim.allocate(mim.duts_[0]) == None)
- self.assertTrue(mim.label_duts_[0] == [2, 3])
- self.assertTrue(mim.label_duts_[1] == [0, 3, 1])
- self.assertTrue(mim.label_duts_[2] == [3, 1])
- self.assertTrue(mim.allocate_log_ ==
- [(0, 2),
- (2, 3),
- (1, 0),
- (2, 1),
- (1, 3),
- (0, 3),
- (1, 1)])
-
- def test_cornercase_1(self):
- """This corner case is brought up by Caroline.
+ def gen_duts_by_name(self, *names):
+ duts = []
+ for n in names:
+ duts.append(MockDut(n))
+ return duts
+
+ def print_matrix(self, matrix):
+ for r in matrix:
+ for v in r:
+ print '{} '.format('.' if v == ' ' else v),
+ print('')
+
+ def create_labels_and_duts_from_pattern(self, pattern):
+ labels = []
+ duts = []
+ for i, r in enumerate(pattern):
+ l = MockLabel('l{}'.format(i), [])
+ for j, v in enumerate(r.split()):
+ if v == '.':
+ l.remote.append('m{}'.format(j))
+ if i == 0:
+ duts.append(MockDut('m{}'.format(j)))
+ labels.append(l)
+ return labels, duts
+
+ def check_matrix_against_pattern(self, matrix, pattern):
+ for i, s in enumerate(pattern):
+ for j, v in enumerate(s.split()):
+ self.assertTrue(v == '.' and matrix[i][j] == ' ' or v == matrix[i][j])
+
+ def pattern_based_test(self, input, output):
+ labels, duts = self.create_labels_and_duts_from_pattern(input)
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.check_matrix_against_pattern(mim.matrix_, output)
+ return mim
+
+ def test_single_dut(self):
+ labels = [MockLabel('l1'), MockLabel('l2'), MockLabel('l3')]
+ dut = MockDut('m1')
+ mim = MachineImageManager(labels, [dut])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [['Y'], ['Y'], ['Y']])
+
+ def test_single_label(self):
+ labels = [MockLabel('l1')]
+ duts = self.gen_duts_by_name('m1', 'm2', 'm3')
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
+
+ def test_case1(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
+ 'X']])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case2(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
+ 'X']])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case3(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case4(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case5(self):
+ labels = [MockLabel('l1', ['m3']), MockLabel('l2', ['m3']),
+ MockLabel('l3', ['m1'])]
+ duts = self.gen_duts_by_name('m1', 'm2', 'm3')
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_2x2_with_allocation(self):
+ labels = [MockLabel('l0'), MockLabel('l1')]
+ duts = [MockDut('m0'), MockDut('m1')]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.assertTrue(mim.allocate(duts[0]) == labels[0])
+ self.assertTrue(mim.allocate(duts[0]) == labels[1])
+ self.assertTrue(mim.allocate(duts[0]) is None)
+ self.assertTrue(mim.matrix_[0][0] == '_')
+ self.assertTrue(mim.matrix_[1][0] == '_')
+ self.assertTrue(mim.allocate(duts[1]) == labels[1])
+
+ def test_10x10_general(self):
+ """Gen 10x10 matrix."""
+ n = 10
+ labels = []
+ duts = []
+ for i in range(n):
+ labels.append(MockLabel('l{}'.format(i)))
+ duts.append(MockDut('m{}'.format(i)))
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ for i in range(n):
+ for j in range(n):
+ if i == j:
+ self.assertTrue(mim.matrix_[i][j] == 'Y')
+ else:
+ self.assertTrue(mim.matrix_[i][j] == ' ')
+ self.assertTrue(mim.allocate(duts[3]).name == 'l3')
+
+ def test_random_generated(self):
+ n = 10
+ labels = []
+ duts = []
+ for i in range(10):
+ # generate 3-5 machines that is compatible with this label
+ l = MockLabel('l{}'.format(i), [])
+ r = random.random()
+ for _ in range(4):
+ t = int(r * 10) % n
+ r *= 10
+ l.remote.append('m{}'.format(t))
+ labels.append(l)
+ duts.append(MockDut('m{}'.format(i)))
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+
+ def test_10x10_fully_random(self):
+ input = ['X . . . X X . X X .', 'X X . X . X . X X .',
+ 'X X X . . X . X . X', 'X . X X . . X X . X',
+ 'X X X X . . . X . .', 'X X . X . X . . X .',
+ '. X . X . X X X . .', '. X . X X . X X . .',
+ 'X X . . . X X X . .', '. X X X X . . . . X']
+ output = ['X Y . . X X . X X .', 'X X Y X . X . X X .',
+ 'X X X Y . X . X . X', 'X . X X Y . X X . X',
+ 'X X X X . Y . X . .', 'X X . X . X Y . X .',
+ 'Y X . X . X X X . .', '. X . X X . X X Y .',
+ 'X X . . . X X X . Y', '. X X X X . . Y . X']
+ self.pattern_based_test(input, output)
+
+ def test_10x10_fully_random2(self):
+ input = ['X . X . . X . X X X', 'X X X X X X . . X .',
+ 'X . X X X X X . . X', 'X X X . X . X X . .',
+ '. X . X . X X X X X', 'X X X X X X X . . X',
+ 'X . X X X X X . . X', 'X X X . X X X X . .',
+ 'X X X . . . X X X X', '. X X . X X X . X X']
+ output = ['X . X Y . X . X X X', 'X X X X X X Y . X .',
+ 'X Y X X X X X . . X', 'X X X . X Y X X . .',
+ '. X Y X . X X X X X', 'X X X X X X X Y . X',
+ 'X . X X X X X . Y X', 'X X X . X X X X . Y',
+ 'X X X . Y . X X X X', 'Y X X . X X X . X X']
+ self.pattern_based_test(input, output)
+
+ def test_3x4_with_allocation(self):
+ input = ['X X . .', '. . X .', 'X . X .']
+ output = ['X X Y .', 'Y . X .', 'X Y X .']
+ mim = self.pattern_based_test(input, output)
+ self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[3]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[2]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[1]) == None)
+ self.assertTrue(mim.allocate(mim.duts_[0]) == None)
+ self.assertTrue(mim.label_duts_[0] == [2, 3])
+ self.assertTrue(mim.label_duts_[1] == [0, 3, 1])
+ self.assertTrue(mim.label_duts_[2] == [3, 1])
+ self.assertTrue(mim.allocate_log_ == [(0, 2), (2, 3), (1, 0), (2, 1), (
+ 1, 3), (0, 3), (1, 1)])
+
+ def test_cornercase_1(self):
+ """This corner case is brought up by Caroline.
The description is -
@@ -310,19 +267,15 @@ class MachineImageManagerTester(unittest.TestCase):
"""
- input = ['. X X',
- '. X X',
- '. X X', ]
- output = ['Y X X',
- 'Y X X',
- 'Y X X', ]
- mim = self.pattern_based_test(input, output)
- self.assertTrue(mim.allocate(mim.duts_[1]) is None)
- self.assertTrue(mim.allocate(mim.duts_[2]) is None)
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[0]) is None)
+ input = ['. X X', '. X X', '. X X']
+ output = ['Y X X', 'Y X X', 'Y X X']
+ mim = self.pattern_based_test(input, output)
+ self.assertTrue(mim.allocate(mim.duts_[1]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[2]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[0]) is None)
if __name__ == '__main__':
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index 7bada0d1..86c63a20 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Machine Manager module."""
from __future__ import print_function
@@ -20,24 +19,30 @@ import test_flag
from cros_utils import command_executer
from cros_utils import logger
-CHECKSUM_FILE = "/usr/local/osimage_checksum_file"
+CHECKSUM_FILE = '/usr/local/osimage_checksum_file'
+
class BadChecksum(Exception):
"""Raised if all machines for a label don't have the same checksum."""
pass
+
class BadChecksumString(Exception):
"""Raised if all machines for a label don't have the same checksum string."""
pass
+
class MissingLocksDirectory(Exception):
"""Raised when cannot find/access the machine locks directory."""
+
class CrosCommandError(Exception):
"""Raised when an error occurs running command on DUT."""
+
class CrosMachine(object):
"""The machine class."""
+
def __init__(self, name, chromeos_root, log_level, cmd_exec=None):
self.name = name
self.image = None
@@ -71,7 +76,7 @@ class CrosMachine(object):
self.machine_id_checksum = self._GetMD5Checksum(self.machine_id)
def IsReachable(self):
- command = "ls"
+ command = 'ls'
ret = self.ce.CrosRunCommand(command,
machine=self.name,
chromeos_root=self.chromeos_root)
@@ -113,55 +118,63 @@ class CrosMachine(object):
def _GetMemoryInfo(self):
#TODO yunlian: when the machine in rebooting, it will not return
#meminfo, the assert does not catch it either
- command = "cat /proc/meminfo"
+ command = 'cat /proc/meminfo'
ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- assert ret == 0, "Could not get meminfo from machine: %s" % self.name
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
+ assert ret == 0, 'Could not get meminfo from machine: %s' % self.name
if ret == 0:
self._ParseMemoryInfo()
def _GetCPUInfo(self):
- command = "cat /proc/cpuinfo"
+ command = 'cat /proc/cpuinfo'
ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- assert ret == 0, "Could not get cpuinfo from machine: %s" % self.name
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
+ assert ret == 0, 'Could not get cpuinfo from machine: %s' % self.name
def _ComputeMachineChecksumString(self):
- self.checksum_string = ""
- exclude_lines_list = ["MHz", "BogoMIPS", "bogomips"]
+ self.checksum_string = ''
+ exclude_lines_list = ['MHz', 'BogoMIPS', 'bogomips']
for line in self.cpuinfo.splitlines():
if not any([e in line for e in exclude_lines_list]):
self.checksum_string += line
- self.checksum_string += " " + str(self.phys_kbytes)
+ self.checksum_string += ' ' + str(self.phys_kbytes)
def _GetMD5Checksum(self, ss):
if ss:
return hashlib.md5(ss).hexdigest()
else:
- return ""
+ return ''
def _GetMachineID(self):
- command = "dump_vpd_log --full --stdout"
+ command = 'dump_vpd_log --full --stdout'
_, if_out, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
b = if_out.splitlines()
- a = [l for l in b if "Product" in l]
+ a = [l for l in b if 'Product' in l]
if len(a):
self.machine_id = a[0]
return
- command = "ifconfig"
+ command = 'ifconfig'
_, if_out, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
b = if_out.splitlines()
- a = [l for l in b if "HWaddr" in l]
+ a = [l for l in b if 'HWaddr' in l]
if len(a):
- self.machine_id = "_".join(a)
+ self.machine_id = '_'.join(a)
return
- a = [l for l in b if "ether" in l]
+ a = [l for l in b if 'ether' in l]
if len(a):
- self.machine_id = "_".join(a)
+ self.machine_id = '_'.join(a)
return
- assert 0, "Could not get machine_id from machine: %s" % self.name
+ assert 0, 'Could not get machine_id from machine: %s' % self.name
def __str__(self):
l = []
@@ -170,7 +183,7 @@ class CrosMachine(object):
l.append(str(self.checksum))
l.append(str(self.locked))
l.append(str(self.released_time))
- return ", ".join(l)
+ return ', '.join(l)
class MachineManager(object):
@@ -184,8 +197,14 @@ class MachineManager(object):
multiple benchmark runs within the same experiment from trying to use the
same machine at the same time.
"""
- def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir,
- cmd_exec=None, lgr=None):
+
+ def __init__(self,
+ chromeos_root,
+ acquire_timeout,
+ log_level,
+ locks_dir,
+ cmd_exec=None,
+ lgr=None):
self._lock = threading.RLock()
self._all_machines = []
self._machines = []
@@ -202,8 +221,8 @@ class MachineManager(object):
self.logger = lgr or logger.GetLogger()
if self.locks_dir and not os.path.isdir(self.locks_dir):
- raise MissingLocksDirectory("Cannot access locks directory: %s"
- % self.locks_dir)
+ raise MissingLocksDirectory('Cannot access locks directory: %s' %
+ self.locks_dir)
self._initialized_machines = []
self.chromeos_root = chromeos_root
@@ -220,15 +239,17 @@ class MachineManager(object):
def GetChromeVersion(self, machine):
"""Get the version of Chrome running on the DUT."""
- cmd = "/opt/google/chrome/chrome --version"
+ cmd = '/opt/google/chrome/chrome --version'
ret, version, _ = self.ce.CrosRunCommandWOutput(
- cmd, machine=machine.name, chromeos_root=self.chromeos_root)
+ cmd,
+ machine=machine.name,
+ chromeos_root=self.chromeos_root)
if ret != 0:
- raise CrosCommandError("Couldn't get Chrome version from %s."
- % machine.name)
+ raise CrosCommandError("Couldn't get Chrome version from %s." %
+ machine.name)
if ret != 0:
- version = ""
+ version = ''
return version.rstrip()
def ImageMachine(self, machine, label):
@@ -239,41 +260,40 @@ class MachineManager(object):
chromeos_root = label.chromeos_root
if not chromeos_root:
chromeos_root = self.chromeos_root
- image_chromeos_args = [image_chromeos.__file__,
- "--no_lock",
- "--chromeos_root=%s" % chromeos_root,
- "--image=%s" % label.chromeos_image,
- "--image_args=%s" % label.image_args,
- "--remote=%s" % machine.name,
- "--logging_level=%s" % self.log_level]
+ image_chromeos_args = [image_chromeos.__file__, '--no_lock',
+ '--chromeos_root=%s' % chromeos_root,
+ '--image=%s' % label.chromeos_image,
+ '--image_args=%s' % label.image_args, '--remote=%s' %
+ machine.name, '--logging_level=%s' % self.log_level]
if label.board:
- image_chromeos_args.append("--board=%s" % label.board)
+ image_chromeos_args.append('--board=%s' % label.board)
# Currently can't image two machines at once.
# So have to serialized on this lock.
save_ce_log_level = self.ce.log_level
- if self.log_level != "verbose":
- self.ce.log_level = "average"
+ if self.log_level != 'verbose':
+ self.ce.log_level = 'average'
with self.image_lock:
- if self.log_level != "verbose":
- self.logger.LogOutput("Pushing image onto machine.")
- self.logger.LogOutput("Running image_chromeos.DoImage with %s"
- % " ".join(image_chromeos_args))
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Pushing image onto machine.')
+ self.logger.LogOutput('Running image_chromeos.DoImage with %s' %
+ ' '.join(image_chromeos_args))
retval = 0
if not test_flag.GetTestMode():
retval = image_chromeos.DoImage(image_chromeos_args)
if retval:
- cmd = "reboot && exit"
- if self.log_level != "verbose":
- self.logger.LogOutput("reboot & exit.")
- self.ce.CrosRunCommand(cmd, machine=machine.name,
+ cmd = 'reboot && exit'
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('reboot & exit.')
+ self.ce.CrosRunCommand(cmd,
+ machine=machine.name,
chromeos_root=self.chromeos_root)
time.sleep(60)
- if self.log_level != "verbose":
- self.logger.LogOutput("Pushing image onto machine.")
- self.logger.LogOutput("Running image_chromeos.DoImage with %s"
- % " ".join(image_chromeos_args))
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Pushing image onto machine.')
+ self.logger.LogOutput('Running image_chromeos.DoImage with %s' %
+ ' '.join(image_chromeos_args))
retval = image_chromeos.DoImage(image_chromeos_args)
if retval:
raise Exception("Could not image machine: '%s'." % machine.name)
@@ -305,7 +325,7 @@ class MachineManager(object):
common_checksum = cs
# Make sure this machine's checksum matches our 'common' checksum.
if cs != common_checksum:
- raise BadChecksum("Machine checksums do not match!")
+ raise BadChecksum('Machine checksums do not match!')
self.machine_checksum[label.name] = common_checksum
def ComputeCommonCheckSumString(self, label):
@@ -332,9 +352,10 @@ class MachineManager(object):
sys.argv[0])
if locked:
self._machines.append(cros_machine)
- command = "cat %s" % CHECKSUM_FILE
+ command = 'cat %s' % CHECKSUM_FILE
ret, out, _ = self.ce.CrosRunCommandWOutput(
- command, chromeos_root=self.chromeos_root,
+ command,
+ chromeos_root=self.chromeos_root,
machine=cros_machine.name)
if ret == 0:
cros_machine.checksum = out.strip()
@@ -345,27 +366,24 @@ class MachineManager(object):
def AddMachine(self, machine_name):
with self._lock:
for m in self._all_machines:
- assert m.name != machine_name, "Tried to double-add %s" % machine_name
+ assert m.name != machine_name, 'Tried to double-add %s' % machine_name
- if self.log_level != "verbose":
- self.logger.LogOutput("Setting up remote access to %s" % machine_name)
- self.logger.LogOutput(
- "Checking machine characteristics for %s" % machine_name)
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Setting up remote access to %s' % machine_name)
+ self.logger.LogOutput('Checking machine characteristics for %s' %
+ machine_name)
cm = CrosMachine(machine_name, self.chromeos_root, self.log_level)
if cm.machine_checksum:
self._all_machines.append(cm)
-
def RemoveMachine(self, machine_name):
with self._lock:
- self._machines = [m for m in self._machines
- if m.name != machine_name]
+ self._machines = [m for m in self._machines if m.name != machine_name]
if self.locks_dir:
res = file_lock_machine.Machine(machine_name,
self.locks_dir).Unlock(True)
if not res:
- self.logger.LogError("Could not unlock machine: '%s'."
- % machine_name)
+ self.logger.LogError("Could not unlock machine: '%s'." % machine_name)
def ForceSameImageToAllMachines(self, label):
machines = self.GetMachines(label)
@@ -396,21 +414,23 @@ class MachineManager(object):
machine_names = []
for machine in machines:
machine_names.append(machine.name)
- self.logger.LogFatal("Could not acquire any of the "
- "following machines: '%s'"
- % ", ".join(machine_names))
+ self.logger.LogFatal('Could not acquire any of the '
+ "following machines: '%s'" %
+ ', '.join(machine_names))
### for m in self._machines:
### if (m.locked and time.time() - m.released_time < 10 and
### m.checksum == image_checksum):
### return None
- for m in [machine for machine in self.GetAvailableMachines(label)
+ for m in [machine
+ for machine in self.GetAvailableMachines(label)
if not machine.locked]:
if image_checksum and (m.checksum == image_checksum):
m.locked = True
m.test_run = threading.current_thread()
return m
- for m in [machine for machine in self.GetAvailableMachines(label)
+ for m in [machine
+ for machine in self.GetAvailableMachines(label)
if not machine.locked]:
if not m.checksum:
m.locked = True
@@ -422,7 +442,8 @@ class MachineManager(object):
# the number of re-images.
# TODO(asharif): If we centralize the thread-scheduler, we wont need this
# code and can implement minimal reimaging code more cleanly.
- for m in [machine for machine in self.GetAvailableMachines(label)
+ for m in [machine
+ for machine in self.GetAvailableMachines(label)
if not machine.locked]:
if time.time() - m.released_time > 15:
# The release time gap is too large, so it is probably in the start
@@ -448,10 +469,10 @@ class MachineManager(object):
with self._lock:
for m in self._machines:
if machine.name == m.name:
- assert m.locked == True, "Tried to double-release %s" % m.name
+ assert m.locked == True, 'Tried to double-release %s' % m.name
m.released_time = time.time()
m.locked = False
- m.status = "Available"
+ m.status = 'Available'
break
def Cleanup(self):
@@ -461,40 +482,36 @@ class MachineManager(object):
res = file_lock_machine.Machine(m.name, self.locks_dir).Unlock(True)
if not res:
- self.logger.LogError("Could not unlock machine: '%s'."
- % m.name)
+ self.logger.LogError("Could not unlock machine: '%s'." % m.name)
def __str__(self):
with self._lock:
- l = ["MachineManager Status:"]
+ l = ['MachineManager Status:']
for m in self._machines:
l.append(str(m))
- return "\n".join(l)
+ return '\n'.join(l)
def AsString(self):
with self._lock:
- stringify_fmt = "%-30s %-10s %-4s %-25s %-32s"
- header = stringify_fmt % ("Machine", "Thread", "Lock", "Status",
- "Checksum")
+ stringify_fmt = '%-30s %-10s %-4s %-25s %-32s'
+ header = stringify_fmt % ('Machine', 'Thread', 'Lock', 'Status',
+ 'Checksum')
table = [header]
for m in self._machines:
if m.test_run:
test_name = m.test_run.name
test_status = m.test_run.timeline.GetLastEvent()
else:
- test_name = ""
- test_status = ""
+ test_name = ''
+ test_status = ''
try:
- machine_string = stringify_fmt % (m.name,
- test_name,
- m.locked,
- test_status,
- m.checksum)
+ machine_string = stringify_fmt % (m.name, test_name, m.locked,
+ test_status, m.checksum)
except ValueError:
- machine_string = ""
+ machine_string = ''
table.append(machine_string)
- return "Machine Status:\n%s" % "\n".join(table)
+ return 'Machine Status:\n%s' % '\n'.join(table)
def GetAllCPUInfo(self, labels):
"""Get cpuinfo for labels, merge them if their cpuinfo are the same."""
@@ -507,12 +524,12 @@ class MachineManager(object):
else:
dic[machine.cpuinfo].append(label.name)
break
- output = ""
+ output = ''
for key, v in dic.items():
- output += " ".join(v)
- output += "\n-------------------\n"
+ output += ' '.join(v)
+ output += '\n-------------------\n'
output += key
- output += "\n\n\n"
+ output += '\n\n\n'
return output
@@ -618,7 +635,7 @@ power management:
self.released_time = time.time()
self.test_run = None
self.chromeos_root = chromeos_root
- self.checksum_string = re.sub(r"\d", "", name)
+ self.checksum_string = re.sub(r'\d', '', name)
#In test, we assume "lumpy1", "lumpy2" are the same machine.
self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
self.log_level = log_level
@@ -635,25 +652,25 @@ power management:
def _GetCPUInfo(self):
self.cpuinfo = self.CPUINFO_STRING
+
class MockMachineManager(MachineManager):
"""Mock machine manager class."""
- def __init__(self, chromeos_root, acquire_timeout,
- log_level):
+
+ def __init__(self, chromeos_root, acquire_timeout, log_level):
super(MockMachineManager, self).__init__(
- chromeos_root, acquire_timeout,
- log_level,
+ chromeos_root, acquire_timeout, log_level,
file_lock_machine.Machine.LOCKS_DIR)
def _TryToLockMachine(self, cros_machine):
self._machines.append(cros_machine)
- cros_machine.checksum = ""
+ cros_machine.checksum = ''
def AddMachine(self, machine_name):
with self._lock:
for m in self._all_machines:
- assert m.name != machine_name, "Tried to double-add %s" % machine_name
+ assert m.name != machine_name, 'Tried to double-add %s' % machine_name
cm = MockCrosMachine(machine_name, self.chromeos_root, self.log_level)
- assert cm.machine_checksum, ("Could not find checksum for machine %s" %
+ assert cm.machine_checksum, ('Could not find checksum for machine %s' %
machine_name)
# In Original MachineManager, the test is 'if cm.machine_checksum:' - if a
# machine is unreachable, then its machine_checksum is None. Here we
@@ -663,7 +680,7 @@ class MockMachineManager(MachineManager):
self._all_machines.append(cm)
def GetChromeVersion(self, machine):
- return "Mock Chrome Version R50"
+ return 'Mock Chrome Version R50'
def AcquireMachine(self, label):
for machine in self._all_machines:
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index 7aed09d4..abbbaff7 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
-
"""Unittest for machine_manager."""
import os.path
import time
@@ -22,35 +21,37 @@ from benchmark_run import MockBenchmarkRun
from cros_utils import command_executer
from cros_utils import logger
+
class MyMachineManager(machine_manager.MachineManager):
def __init__(self, chromeos_root):
- super(MyMachineManager, self).__init__(chromeos_root, 0, "average",
+ super(MyMachineManager, self).__init__(chromeos_root, 0, 'average',
file_lock_machine.Machine.LOCKS_DIR)
def _TryToLockMachine(self, cros_machine):
self._machines.append(cros_machine)
- cros_machine.checksum = ""
+ cros_machine.checksum = ''
def AddMachine(self, machine_name):
with self._lock:
for m in self._all_machines:
- assert m.name != machine_name, "Tried to double-add %s" % machine_name
+ assert m.name != machine_name, 'Tried to double-add %s' % machine_name
cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root,
- "average")
- assert cm.machine_checksum, ("Could not find checksum for machine %s" %
+ 'average')
+ assert cm.machine_checksum, ('Could not find checksum for machine %s' %
machine_name)
self._all_machines.append(cm)
-CHROMEOS_ROOT = "/tmp/chromeos-root"
-MACHINE_NAMES = ["lumpy1", "lumpy2", "lumpy3", "daisy1", "daisy2"]
-LABEL_LUMPY = label.MockLabel("lumpy", "lumpy_chromeos_image", CHROMEOS_ROOT,
- "lumpy",
- ["lumpy1", "lumpy2", "lumpy3", "lumpy4"],
- "", "", False, "average," "gcc", None)
-LABEL_MIX = label.MockLabel("mix", "chromeos_image", CHROMEOS_ROOT, "mix",
- ["daisy1", "daisy2", "lumpy3", "lumpy4"],
- "", "", False, "average", "gcc", None)
+
+CHROMEOS_ROOT = '/tmp/chromeos-root'
+MACHINE_NAMES = ['lumpy1', 'lumpy2', 'lumpy3', 'daisy1', 'daisy2']
+LABEL_LUMPY = label.MockLabel('lumpy', 'lumpy_chromeos_image', CHROMEOS_ROOT,
+ 'lumpy', ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'],
+ '', '', False, 'average,'
+ 'gcc', None)
+LABEL_MIX = label.MockLabel('mix', 'chromeos_image', CHROMEOS_ROOT, 'mix',
+ ['daisy1', 'daisy2', 'lumpy3', 'lumpy4'], '', '',
+ False, 'average', 'gcc', None)
class MachineManagerTest(unittest.TestCase):
@@ -66,15 +67,14 @@ class MachineManagerTest(unittest.TestCase):
mock_daisy1 = mock.Mock(spec=machine_manager.CrosMachine)
mock_daisy2 = mock.Mock(spec=machine_manager.CrosMachine)
- @mock.patch.object (os.path, 'isdir')
+ @mock.patch.object(os.path, 'isdir')
def setUp(self, mock_isdir):
mock_isdir.return_value = True
- self.mm = machine_manager.MachineManager("/usr/local/chromeos", 0,
- "average",
- file_lock_machine.Machine.LOCKS_DIR,
- self.mock_cmd_exec,
- self.mock_logger)
+ self.mm = machine_manager.MachineManager(
+ '/usr/local/chromeos', 0, 'average',
+ file_lock_machine.Machine.LOCKS_DIR, self.mock_cmd_exec,
+ self.mock_logger)
self.mock_lumpy1.name = 'lumpy1'
self.mock_lumpy2.name = 'lumpy2'
@@ -94,25 +94,24 @@ class MachineManagerTest(unittest.TestCase):
self.mock_lumpy4.checksum_string = 'lumpy_checksum_str'
self.mock_daisy1.checksum_string = 'daisy_checksum_str'
self.mock_daisy2.checksum_string = 'daisy_checksum_str'
- self.mock_lumpy1.cpuinfo = "lumpy_cpu_info"
- self.mock_lumpy2.cpuinfo = "lumpy_cpu_info"
- self.mock_lumpy3.cpuinfo = "lumpy_cpu_info"
- self.mock_lumpy4.cpuinfo = "lumpy_cpu_info"
- self.mock_daisy1.cpuinfo = "daisy_cpu_info"
- self.mock_daisy2.cpuinfo = "daisy_cpu_info"
+ self.mock_lumpy1.cpuinfo = 'lumpy_cpu_info'
+ self.mock_lumpy2.cpuinfo = 'lumpy_cpu_info'
+ self.mock_lumpy3.cpuinfo = 'lumpy_cpu_info'
+ self.mock_lumpy4.cpuinfo = 'lumpy_cpu_info'
+ self.mock_daisy1.cpuinfo = 'daisy_cpu_info'
+ self.mock_daisy2.cpuinfo = 'daisy_cpu_info'
self.mm._all_machines.append(self.mock_daisy1)
self.mm._all_machines.append(self.mock_daisy2)
self.mm._all_machines.append(self.mock_lumpy1)
self.mm._all_machines.append(self.mock_lumpy2)
self.mm._all_machines.append(self.mock_lumpy3)
-
def testGetMachines(self):
manager = MyMachineManager(CHROMEOS_ROOT)
for m in MACHINE_NAMES:
manager.AddMachine(m)
names = [m.name for m in manager.GetMachines(LABEL_LUMPY)]
- self.assertEqual(names, ["lumpy1", "lumpy2", "lumpy3"])
+ self.assertEqual(names, ['lumpy1', 'lumpy2', 'lumpy3'])
def testGetAvailableMachines(self):
manager = MyMachineManager(CHROMEOS_ROOT)
@@ -122,17 +121,17 @@ class MachineManagerTest(unittest.TestCase):
if int(m.name[-1]) % 2:
manager._TryToLockMachine(m)
names = [m.name for m in manager.GetAvailableMachines(LABEL_LUMPY)]
- self.assertEqual(names, ["lumpy1", "lumpy3"])
+ self.assertEqual(names, ['lumpy1', 'lumpy3'])
@mock.patch.object(time, 'sleep')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
@mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
- def test_image_machine(self, mock_checksummer, mock_run_croscmd,
- mock_run_cmd, mock_sleep):
+ def test_image_machine(self, mock_checksummer, mock_run_croscmd, mock_run_cmd,
+ mock_sleep):
def FakeMD5Checksum(input_str):
- return "machine_fake_md5_checksum"
+ return 'machine_fake_md5_checksum'
self.fake_logger_count = 0
self.fake_logger_msgs = []
@@ -148,7 +147,7 @@ class MachineManagerTest(unittest.TestCase):
mock_run_croscmd.reset_mock()
mock_checksummer.reset_mock()
mock_sleep.reset_mock()
- machine.checksum = "fake_md5_checksum"
+ machine.checksum = 'fake_md5_checksum'
self.mm.checksum = None
self.mm.num_reimages = 0
@@ -158,20 +157,20 @@ class MachineManagerTest(unittest.TestCase):
self.mm.logger.LogOutput = FakeLogOutput
machine = self.mock_lumpy1
machine._GetMD5Checksum = FakeMD5Checksum
- machine.checksum = "fake_md5_checksum"
- mock_checksummer.return_value = "fake_md5_checksum"
- self.mock_cmd_exec.log_level = "verbose"
+ machine.checksum = 'fake_md5_checksum'
+ mock_checksummer.return_value = 'fake_md5_checksum'
+ self.mock_cmd_exec.log_level = 'verbose'
test_flag.SetTestMode(True)
# Test 1: label.image_type == "local"
- LABEL_LUMPY.image_type = "local"
+ LABEL_LUMPY.image_type = 'local'
self.mm.ImageMachine(machine, LABEL_LUMPY)
self.assertEqual(mock_run_cmd.call_count, 0)
self.assertEqual(mock_run_croscmd.call_count, 0)
#Test 2: label.image_type == "trybot"
ResetValues()
- LABEL_LUMPY.image_type = "trybot"
+ LABEL_LUMPY.image_type = 'trybot'
mock_run_cmd.return_value = 0
self.mm.ImageMachine(machine, LABEL_LUMPY)
self.assertEqual(mock_run_croscmd.call_count, 0)
@@ -180,7 +179,7 @@ class MachineManagerTest(unittest.TestCase):
# Test 3: label.image_type is neither local nor trybot; retval from
# RunCommand is 1, i.e. image_chromeos fails...
ResetValues()
- LABEL_LUMPY.image_type = "other"
+ LABEL_LUMPY.image_type = 'other'
mock_run_cmd.return_value = 1
try:
self.mm.ImageMachine(machine, LABEL_LUMPY)
@@ -195,10 +194,10 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(image_call_args[1].split('/')[-1], 'image_chromeos.pyc')
image_call_args = image_call_args[2:]
self.assertEqual(image_call_args,
- [ '--chromeos_root=/tmp/chromeos-root',
- '--image=lumpy_chromeos_image',
- '--image_args=', '--remote=lumpy1',
- '--logging_level=average', '--board=lumpy'])
+ ['--chromeos_root=/tmp/chromeos-root',
+ '--image=lumpy_chromeos_image', '--image_args=',
+ '--remote=lumpy1', '--logging_level=average',
+ '--board=lumpy'])
self.assertEqual(mock_run_croscmd.call_args[0][0], 'reboot && exit')
# Test 4: Everything works properly. Trybot image type.
@@ -210,7 +209,6 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(mock_run_croscmd.call_count, 0)
self.assertEqual(mock_sleep.call_count, 0)
-
def test_compute_common_checksum(self):
self.mm.machine_checksum = {}
@@ -219,8 +217,8 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(len(self.mm.machine_checksum), 1)
self.mm.machine_checksum = {}
- self.assertRaises(machine_manager.BadChecksum, self.mm.ComputeCommonCheckSum, LABEL_MIX)
-
+ self.assertRaises(machine_manager.BadChecksum,
+ self.mm.ComputeCommonCheckSum, LABEL_MIX)
def test_compute_common_checksum_string(self):
self.mm.machine_checksum_string = {}
@@ -235,19 +233,18 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(self.mm.machine_checksum_string['mix'],
'daisy_checksum_str')
-
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
def test_try_to_lock_machine(self, mock_cros_runcmd):
self.assertRaises(self.mm._TryToLockMachine, None)
- mock_cros_runcmd.return_value = [0, "false_lock_checksum", ""]
+ mock_cros_runcmd.return_value = [0, 'false_lock_checksum', '']
self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
self.mm._machines = []
self.mm._TryToLockMachine(self.mock_lumpy1)
self.assertEqual(len(self.mm._machines), 1)
self.assertEqual(self.mm._machines[0], self.mock_lumpy1)
- self.assertEqual(self.mock_lumpy1.checksum, "false_lock_checksum")
+ self.assertEqual(self.mock_lumpy1.checksum, 'false_lock_checksum')
self.assertEqual(mock_cros_runcmd.call_count, 1)
cmd_str = mock_cros_runcmd.call_args[0][0]
self.assertEqual(cmd_str, 'cat /usr/local/osimage_checksum_file')
@@ -256,31 +253,28 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(args_dict['machine'], self.mock_lumpy1.name)
self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
-
@mock.patch.object(machine_manager, 'CrosMachine')
def test_add_machine(self, mock_machine):
mock_machine.machine_checksum = 'daisy123'
- self.assertEqual (len(self.mm._all_machines), 5)
+ self.assertEqual(len(self.mm._all_machines), 5)
self.mm.AddMachine('daisy3')
- self.assertEqual (len(self.mm._all_machines), 6)
+ self.assertEqual(len(self.mm._all_machines), 6)
self.assertRaises(Exception, self.mm.AddMachine, 'lumpy1')
-
def test_remove_machine(self):
self.mm._machines = self.mm._all_machines
self.assertTrue(self.mock_lumpy2 in self.mm._machines)
self.mm.RemoveMachine(self.mock_lumpy2.name)
self.assertFalse(self.mock_lumpy2 in self.mm._machines)
-
def test_force_same_image_to_all_machines(self):
self.image_log = []
def FakeImageMachine(machine, label_arg):
image = label_arg.chromeos_image
- self.image_log.append("Pushed %s onto %s" % (image, machine.name))
+ self.image_log.append('Pushed %s onto %s' % (image, machine.name))
def FakeSetUpChecksumInfo():
pass
@@ -299,17 +293,15 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(self.image_log[2],
'Pushed lumpy_chromeos_image onto lumpy3')
-
-
@mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
- @mock.patch.object(hashlib,'md5')
+ @mock.patch.object(hashlib, 'md5')
def test_acquire_machine(self, mock_md5, mock_checksum):
self.msgs = []
self.log_fatal_msgs = []
def FakeLock(machine):
- self.msgs.append("Tried to lock %s" % machine.name)
+ self.msgs.append('Tried to lock %s' % machine.name)
def FakeLogFatal(msg):
self.log_fatal_msgs.append(msg)
@@ -317,17 +309,17 @@ class MachineManagerTest(unittest.TestCase):
self.mm._TryToLockMachine = FakeLock
self.mm.logger.LogFatal = FakeLogFatal
- mock_md5.return_value = "123456"
- mock_checksum.return_value = "fake_md5_checksum"
+ mock_md5.return_value = '123456'
+ mock_checksum.return_value = 'fake_md5_checksum'
self.mm._machines = self.mm._all_machines
self.mock_lumpy1.locked = True
self.mock_lumpy2.locked = True
self.mock_lumpy3.locked = False
- self.mock_lumpy3.checksum = "fake_md5_checksum"
+ self.mock_lumpy3.checksum = 'fake_md5_checksum'
self.mock_daisy1.locked = True
self.mock_daisy2.locked = False
- self.mock_daisy2.checksum = "fake_md5_checksum"
+ self.mock_daisy2.checksum = 'fake_md5_checksum'
self.mock_lumpy1.released_time = time.time()
self.mock_lumpy2.released_time = time.time()
@@ -341,8 +333,7 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(m, self.mock_lumpy1)
self.assertTrue(self.mock_lumpy1.locked)
self.assertEqual(mock_md5.call_count, 0)
- self.assertEqual(self.msgs, ['Tried to lock lumpy1',
- 'Tried to lock lumpy2',
+ self.assertEqual(self.msgs, ['Tried to lock lumpy1', 'Tried to lock lumpy2',
'Tried to lock lumpy3'])
# Test the second return statment (machine is unlocked, has no checksum)
@@ -358,7 +349,7 @@ class MachineManagerTest(unittest.TestCase):
# - checksums don't match
# - current time minus release time is > 20.
self.mock_lumpy1.locked = False
- self.mock_lumpy1.checksum = "123"
+ self.mock_lumpy1.checksum = '123'
self.mock_lumpy1.released_time = time.time() - 8
m = self.mm.AcquireMachine(LABEL_LUMPY)
self.assertEqual(m, self.mock_lumpy1)
@@ -370,8 +361,7 @@ class MachineManagerTest(unittest.TestCase):
# Restore values of mock_lumpy1, so other tests succeed.
self.mock_lumpy1.locked = save_locked
- self.mock_lumpy1.checksum = "123"
-
+ self.mock_lumpy1.checksum = '123'
def test_get_available_machines(self):
self.mm._machines = self.mm._all_machines
@@ -387,7 +377,6 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(machine_list, [self.mock_lumpy1, self.mock_lumpy2,
self.mock_lumpy3])
-
def test_get_machines(self):
machine_list = self.mm.GetMachines()
self.assertEqual(machine_list, self.mm._all_machines)
@@ -400,7 +389,6 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(machine_list, [self.mock_lumpy1, self.mock_lumpy2,
self.mock_lumpy3])
-
def test_release_machines(self):
self.mm._machines = [self.mock_lumpy1, self.mock_daisy2]
@@ -411,45 +399,49 @@ class MachineManagerTest(unittest.TestCase):
self.assertTrue(self.mock_lumpy1.locked)
self.mm.ReleaseMachine(self.mock_lumpy1)
self.assertFalse(self.mock_lumpy1.locked)
- self.assertEqual(self.mock_lumpy1.status, "Available")
+ self.assertEqual(self.mock_lumpy1.status, 'Available')
self.assertTrue(self.mock_daisy2.locked)
self.mm.ReleaseMachine(self.mock_daisy2)
self.assertFalse(self.mock_daisy2.locked)
- self.assertEqual(self.mock_daisy2.status, "Available")
+ self.assertEqual(self.mock_daisy2.status, 'Available')
# Test double-relase...
self.assertRaises(AssertionError, self.mm.ReleaseMachine, self.mock_lumpy1)
-
def test_cleanup(self):
self.mock_logger.reset_mock()
self.mm.Cleanup()
self.assertEqual(self.mock_logger.call_count, 0)
- OUTPUT_STR = 'Machine Status:\nMachine Thread Lock Status Checksum \nlumpy1 test run True PENDING 123 \nlumpy2 test run False PENDING 123 \nlumpy3 test run False PENDING 123 \ndaisy1 test run False PENDING 678 \ndaisy2 test run True PENDING 678 '
+ OUTPUT_STR = ('Machine Status:\nMachine Thread '
+ 'Lock Status Checksum'
+ ' \nlumpy1 test '
+ 'run True PENDING 123'
+ ' \nlumpy2 '
+ 'test run False PENDING 123'
+ ' \nlumpy3 '
+ 'test run False PENDING 123'
+ ' \ndaisy1 '
+ 'test run False PENDING 678'
+ ' \ndaisy2 '
+ 'test run True PENDING 678'
+ ' ')
def test_as_string(self):
mock_logger = mock.Mock(spec=logger.Logger)
- bench = Benchmark("page_cycler.netsim.top_10", # name
- "page_cycler.netsim.top_10", # test_name
- "", # test_args
+ bench = Benchmark('page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
1, # iteratins
False, # rm_chroot_tmp
- "", # perf_args
- suite="telemetry_Crosperf") # suite
-
- test_run = MockBenchmarkRun("test run",
- bench,
- LABEL_LUMPY,
- 1,
- [],
- self.mm,
- mock_logger,
- "verbose",
- "")
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
+
+ test_run = MockBenchmarkRun('test run', bench, LABEL_LUMPY, 1, [], self.mm,
+ mock_logger, 'verbose', '')
self.mm._machines = [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3,
self.mock_daisy1, self.mock_daisy2]
@@ -466,16 +458,15 @@ class MachineManagerTest(unittest.TestCase):
self.mock_daisy1.locked = False
self.mock_daisy2.locked = True
- self.mock_lumpy1.checksum = "123"
- self.mock_lumpy2.checksum = "123"
- self.mock_lumpy3.checksum = "123"
- self.mock_daisy1.checksum = "678"
- self.mock_daisy2.checksum = "678"
+ self.mock_lumpy1.checksum = '123'
+ self.mock_lumpy2.checksum = '123'
+ self.mock_lumpy3.checksum = '123'
+ self.mock_daisy1.checksum = '678'
+ self.mock_daisy2.checksum = '678'
output = self.mm.AsString()
self.assertEqual(output, self.OUTPUT_STR)
-
def test_get_all_cpu_info(self):
info = self.mm.GetAllCPUInfo([LABEL_LUMPY, LABEL_MIX])
self.assertEqual(info,
@@ -483,7 +474,6 @@ class MachineManagerTest(unittest.TestCase):
'------------------\ndaisy_cpu_info\n\n\n')
-
MEMINFO_STRING = """MemTotal: 3990332 kB
MemFree: 2608396 kB
Buffers: 147168 kB
@@ -574,7 +564,35 @@ address sizes: 36 bits physical, 48 bits virtual
power management:
"""
-CHECKSUM_STRING = "processor: 0vendor_id: GenuineIntelcpu family: 6model: 42model name: Intel(R) Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache size: 2048 KBphysical id: 0siblings: 2core id: 0cpu cores: 2apicid: 0initial apicid: 0fpu: yesfpu_exception: yescpuid level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush size: 64cache_alignment: 64address sizes: 36 bits physical, 48 bits virtualpower management:processor: 1vendor_id: GenuineIntelcpu family: 6model: 42model name: Intel(R) Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache size: 2048 KBphysical id: 0siblings: 2core id: 1cpu cores: 2apicid: 2initial apicid: 2fpu: yesfpu_exception: yescpuid level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush size: 64cache_alignment: 64address sizes: 36 bits physical, 48 bits virtualpower management: 4194304"
+CHECKSUM_STRING = ('processor: 0vendor_id: GenuineIntelcpu family: 6model: '
+ '42model name: Intel(R) Celeron(R) CPU 867 @ '
+ '1.30GHzstepping: 7microcode: 0x25cache size: 2048 '
+ 'KBphysical id: 0siblings: 2core id: 0cpu cores: 2apicid: '
+ '0initial apicid: 0fpu: yesfpu_exception: yescpuid level: '
+ '13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep'
+ ' mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse '
+ 'sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc '
+ 'arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc '
+ 'aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 '
+ 'ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt '
+ 'tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts '
+ 'dts tpr_shadow vnmi flexpriority ept vpidclflush size: '
+ '64cache_alignment: 64address sizes: 36 bits physical, 48 '
+ 'bits virtualpower management:processor: 1vendor_id: '
+ 'GenuineIntelcpu family: 6model: 42model name: Intel(R) '
+ 'Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache'
+ ' size: 2048 KBphysical id: 0siblings: 2core id: 1cpu cores:'
+ ' 2apicid: 2initial apicid: 2fpu: yesfpu_exception: yescpuid'
+ ' level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 '
+ 'apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx '
+ 'fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm '
+ 'constant_tsc arch_perfmon pebs bts rep_good nopl xtopology '
+ 'nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl '
+ 'vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic '
+ 'popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt '
+ 'pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush '
+ 'size: 64cache_alignment: 64address sizes: 36 bits physical,'
+ ' 48 bits virtualpower management: 4194304')
DUMP_VPD_STRING = """
"PBA_SN"="Pba.txt"
@@ -592,7 +610,6 @@ DUMP_VPD_STRING = """
"ActivateDate"="2013-38"
"""
-
IFCONFIG_STRING = """
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.17.129.247 netmask 255.255.254.0 broadcast 172.17.129.255
@@ -640,12 +657,11 @@ class CrosMachineTest(unittest.TestCase):
@mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_init(self, mock_setup):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
self.assertEqual(mock_setup.call_count, 1)
- self.assertEqual(cm.chromeos_root, "/usr/local/chromeos")
- self.assertEqual(cm.log_level, "average")
-
+ self.assertEqual(cm.chromeos_root, '/usr/local/chromeos')
+ self.assertEqual(cm.log_level, 'average')
@mock.patch.object(machine_manager.CrosMachine, 'IsReachable')
@mock.patch.object(machine_manager.CrosMachine, '_GetMemoryInfo')
@@ -661,19 +677,19 @@ class CrosMachineTest(unittest.TestCase):
# Test 1. Machine is not reachable; SetUpChecksumInfo is called via
# __init__.
mock_isreachable.return_value = False
- mock_md5sum.return_value = "md5_checksum"
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
- cm.checksum_string = "This is a checksum string."
- cm.machine_id = "machine_id1"
+ mock_md5sum.return_value = 'md5_checksum'
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ cm.checksum_string = 'This is a checksum string.'
+ cm.machine_id = 'machine_id1'
self.assertEqual(mock_isreachable.call_count, 1)
self.assertIsNone(cm.machine_checksum)
self.assertEqual(mock_meminfo.call_count, 0)
# Test 2. Machine is reachable. Call explicitly.
mock_isreachable.return_value = True
- cm.checksum_string = "This is a checksum string."
- cm.machine_id = "machine_id1"
+ cm.checksum_string = 'This is a checksum string.'
+ cm.machine_id = 'machine_id1'
cm.SetUpChecksumInfo()
self.assertEqual(mock_isreachable.call_count, 2)
self.assertEqual(mock_meminfo.call_count, 1)
@@ -681,19 +697,18 @@ class CrosMachineTest(unittest.TestCase):
self.assertEqual(mock_checkstring.call_count, 1)
self.assertEqual(mock_machineid.call_count, 1)
self.assertEqual(mock_md5sum.call_count, 2)
- self.assertEqual(cm.machine_checksum, "md5_checksum")
- self.assertEqual(cm.machine_id_checksum, "md5_checksum")
+ self.assertEqual(cm.machine_checksum, 'md5_checksum')
+ self.assertEqual(cm.machine_id_checksum, 'md5_checksum')
self.assertEqual(mock_md5sum.call_args_list[0][0][0],
- "This is a checksum string.")
- self.assertEqual(mock_md5sum.call_args_list[1][0][0],
- "machine_id1")
+ 'This is a checksum string.')
+ self.assertEqual(mock_md5sum.call_args_list[1][0][0], 'machine_id1')
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_is_reachable(self, mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
# Test 1. CrosRunCommand returns 1 (fail)
@@ -710,106 +725,103 @@ class CrosMachineTest(unittest.TestCase):
self.assertEqual(mock_run_cmd.call_count, 2)
first_args = mock_run_cmd.call_args_list[0]
second_args = mock_run_cmd.call_args_list[1]
- self.assertEqual (first_args[0], second_args[0])
- self.assertEqual (first_args[1], second_args[1])
- self.assertEqual (len(first_args[0]), 1)
- self.assertEqual (len(first_args[1]), 2)
- self.assertEqual (first_args[0][0], 'ls')
+ self.assertEqual(first_args[0], second_args[0])
+ self.assertEqual(first_args[1], second_args[1])
+ self.assertEqual(len(first_args[0]), 1)
+ self.assertEqual(len(first_args[1]), 2)
+ self.assertEqual(first_args[0][0], 'ls')
args_dict = first_args[1]
- self.assertEqual (args_dict['machine'], 'daisy.cros')
- self.assertEqual (args_dict['chromeos_root'], '/usr/local/chromeos')
-
+ self.assertEqual(args_dict['machine'], 'daisy.cros')
+ self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_parse_memory_info(self, mock_setup):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
cm.meminfo = MEMINFO_STRING
cm._ParseMemoryInfo()
self.assertEqual(cm.phys_kbytes, 4194304)
-
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_get_memory_info(self, mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
- mock_run_cmd.return_value = [0, MEMINFO_STRING, ""]
+ mock_run_cmd.return_value = [0, MEMINFO_STRING, '']
cm._GetMemoryInfo()
self.assertEqual(mock_run_cmd.call_count, 1)
call_args = mock_run_cmd.call_args_list[0]
- self.assertEqual(call_args[0][0], "cat /proc/meminfo")
+ self.assertEqual(call_args[0][0], 'cat /proc/meminfo')
args_dict = call_args[1]
self.assertEqual(args_dict['machine'], 'daisy.cros')
self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
self.assertEqual(cm.meminfo, MEMINFO_STRING)
self.assertEqual(cm.phys_kbytes, 4194304)
- mock_run_cmd.return_value = [1, MEMINFO_STRING, ""]
- self.assertRaises (cm._GetMemoryInfo)
-
+ mock_run_cmd.return_value = [1, MEMINFO_STRING, '']
+ self.assertRaises(cm._GetMemoryInfo)
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_get_cpu_info(self, mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
- mock_run_cmd.return_value = [0, CPUINFO_STRING, ""]
+ mock_run_cmd.return_value = [0, CPUINFO_STRING, '']
cm._GetCPUInfo()
self.assertEqual(mock_run_cmd.call_count, 1)
call_args = mock_run_cmd.call_args_list[0]
- self.assertEqual(call_args[0][0], "cat /proc/cpuinfo")
+ self.assertEqual(call_args[0][0], 'cat /proc/cpuinfo')
args_dict = call_args[1]
self.assertEqual(args_dict['machine'], 'daisy.cros')
self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
self.assertEqual(cm.cpuinfo, CPUINFO_STRING)
-
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_compute_machine_checksum_string(self, mock_setup):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
cm.cpuinfo = CPUINFO_STRING
cm.meminfo = MEMINFO_STRING
cm._ParseMemoryInfo()
cm._ComputeMachineChecksumString()
self.assertEqual(cm.checksum_string, CHECKSUM_STRING)
-
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_get_md5_checksum(self, mock_setup):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
- temp_str = "abcde"
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ temp_str = 'abcde'
checksum_str = cm._GetMD5Checksum(temp_str)
- self.assertEqual(checksum_str, "ab56b4d92b40713acc5af89985d4b786")
+ self.assertEqual(checksum_str, 'ab56b4d92b40713acc5af89985d4b786')
- temp_str = ""
+ temp_str = ''
checksum_str = cm._GetMD5Checksum(temp_str)
- self.assertEqual(checksum_str, "")
-
+ self.assertEqual(checksum_str, '')
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_get_machine_id(self, mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
- mock_run_cmd.return_value = [0, DUMP_VPD_STRING, ""]
+ mock_run_cmd.return_value = [0, DUMP_VPD_STRING, '']
cm._GetMachineID()
self.assertEqual(cm.machine_id, '"Product_S/N"="HT4L91SC300208"')
- mock_run_cmd.return_value = [0, IFCONFIG_STRING, ""]
+ mock_run_cmd.return_value = [0, IFCONFIG_STRING, '']
cm._GetMachineID()
- self.assertEqual(cm.machine_id, " ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether 44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)")
+ self.assertEqual(
+ cm.machine_id,
+ ' ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ '
+ 'ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether '
+ '44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)')
- mock_run_cmd.return_value = [0, "invalid hardware config", ""]
+ mock_run_cmd.return_value = [0, 'invalid hardware config', '']
self.assertRaises(cm._GetMachineID)
-
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
index 62ff2da4..b689565c 100644
--- a/crosperf/mock_instance.py
+++ b/crosperf/mock_instance.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""This contains some mock instances for testing."""
from benchmark import Benchmark
@@ -12,103 +10,114 @@ from label import MockLabel
from machine_manager import MockMachineManager
from results_cache import MockResultsCache
-perf_args = "record -a -e cycles"
-label1 = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos-alex1", image_args="",
- cache_dir="", cache_only=False, log_level="average",
- compiler="gcc")
-
-label2 = MockLabel("test2", "image2", "/tmp/test_benchmark_run_2",
- "x86-alex", "chromeos-alex2", image_args="",
- cache_dir="", cache_only=False, log_level="average",
- compiler="gcc")
-
-benchmark1 = Benchmark("benchmark1", "autotest_name_1",
- "autotest_args", 2, "", perf_args, "", "")
-
-benchmark2 = Benchmark("benchmark2", "autotest_name_2",
- "autotest_args", 2, "", perf_args, "", "")
-
+perf_args = 'record -a -e cycles'
+label1 = MockLabel('test1',
+ 'image1',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos-alex1',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+label2 = MockLabel('test2',
+ 'image2',
+ '/tmp/test_benchmark_run_2',
+ 'x86-alex',
+ 'chromeos-alex2',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '',
+ perf_args, '', '')
+
+benchmark2 = Benchmark('benchmark2', 'autotest_name_2', 'autotest_args', 2, '',
+ perf_args, '', '')
keyval = {}
keyval[0] = {'': 'PASS',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'test{1}': '2',
- 'test{2}': '4',
- 'ms_1': '2.1',
- 'total': '5',
- 'bool': 'True'}
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'ms_1': '2.1',
+ 'total': '5',
+ 'bool': 'True'}
keyval[1] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6',
- 'test{1}': '3',
- 'test{2}': '4',
- 'bool': 'FALSE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6',
+ 'test{1}': '3',
+ 'test{2}': '4',
+ 'bool': 'FALSE'}
keyval[2] = {'': 'PASS',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6',
- 'test{1}': '5',
- 'test{2}': '4',
- 'bool': 'FALSE'}
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6',
+ 'test{1}': '5',
+ 'test{2}': '4',
+ 'bool': 'FALSE'}
keyval[3] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '6',
- 'bool': 'FALSE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'FALSE'}
keyval[4] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '6',
- 'bool': 'TRUE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'TRUE'}
keyval[5] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '2',
- 'bool': 'TRUE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '2',
+ 'bool': 'TRUE'}
keyval[6] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '4',
- 'bool': 'TRUE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'bool': 'TRUE'}
keyval[7] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7',
- 'test{1}': '1',
- 'test{2}': '6',
- 'bool': 'TRUE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7',
+ 'test{1}': '1',
+ 'test{2}': '6',
+ 'bool': 'TRUE'}
keyval[8] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '3.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '8',
- 'bool': 'TRUE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '3.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '8',
+ 'bool': 'TRUE'}
diff --git a/crosperf/perf_table.py b/crosperf/perf_table.py
index 7e21c83c..c996719d 100644
--- a/crosperf/perf_table.py
+++ b/crosperf/perf_table.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2012 Google Inc. All Rights Reserved.
"""Parse perf report data for tabulator."""
@@ -11,8 +9,10 @@ from utils import perf_diff
def ParsePerfReport(perf_file):
"""It should return a dict."""
- return {"cycles": {"foo": 10, "bar": 20},
- "cache_miss": {"foo": 20, "bar": 10}}
+ return {'cycles': {'foo': 10,
+ 'bar': 20},
+ 'cache_miss': {'foo': 20,
+ 'bar': 10}}
class PerfTable(object):
@@ -37,12 +37,11 @@ class PerfTable(object):
def GenerateData(self):
for label in self._label_names:
for benchmark in self._experiment.benchmarks:
- for i in range(1, benchmark.iterations+1):
+ for i in range(1, benchmark.iterations + 1):
dir_name = label + benchmark.name + str(i)
dir_name = filter(str.isalnum, dir_name)
- perf_file = os.path.join(self._experiment.results_directory,
- dir_name,
- "perf.data.report.0")
+ perf_file = os.path.join(self._experiment.results_directory, dir_name,
+ 'perf.data.report.0')
if os.path.exists(perf_file):
self.ReadPerfReport(perf_file, label, benchmark.name, i - 1)
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index cdf14315..fc619738 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Module to deal with result cache."""
import getpass
@@ -24,12 +22,13 @@ from image_checksummer import ImageChecksummer
import results_report
import test_flag
-SCRATCH_DIR = os.path.expanduser("~/cros_scratch")
-RESULTS_FILE = "results.txt"
-MACHINE_FILE = "machine.txt"
-AUTOTEST_TARBALL = "autotest.tbz2"
-PERF_RESULTS_FILE = "perf-results.txt"
-CACHE_KEYS_FILE = "cache_keys.txt"
+SCRATCH_DIR = os.path.expanduser('~/cros_scratch')
+RESULTS_FILE = 'results.txt'
+MACHINE_FILE = 'machine.txt'
+AUTOTEST_TARBALL = 'autotest.tbz2'
+PERF_RESULTS_FILE = 'perf-results.txt'
+CACHE_KEYS_FILE = 'cache_keys.txt'
+
class Result(object):
""" This class manages what exactly is stored inside the cache without knowing
@@ -40,8 +39,9 @@ class Result(object):
def __init__(self, logger, label, log_level, machine, cmd_exec=None):
self._chromeos_root = label.chromeos_root
self._logger = logger
- self._ce = cmd_exec or command_executer.GetCommandExecuter(self._logger,
- log_level=log_level)
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger,
+ log_level=log_level)
self._temp_dir = None
self.label = label
self.results_dir = None
@@ -54,28 +54,26 @@ class Result(object):
file_index = 0
for file_to_copy in files_to_copy:
if not os.path.isdir(dest_dir):
- command = "mkdir -p %s" % dest_dir
+ command = 'mkdir -p %s' % dest_dir
self._ce.RunCommand(command)
dest_file = os.path.join(dest_dir,
- ("%s.%s" % (os.path.basename(file_to_copy),
+ ('%s.%s' % (os.path.basename(file_to_copy),
file_index)))
- ret = self._ce.CopyFiles(file_to_copy,
- dest_file,
- recursive=False)
+ ret = self._ce.CopyFiles(file_to_copy, dest_file, recursive=False)
if ret:
- raise Exception("Could not copy results file: %s" % file_to_copy)
+ raise Exception('Could not copy results file: %s' % file_to_copy)
def CopyResultsTo(self, dest_dir):
self._CopyFilesTo(dest_dir, self.perf_data_files)
self._CopyFilesTo(dest_dir, self.perf_report_files)
if len(self.perf_data_files) or len(self.perf_report_files):
- self._logger.LogOutput("Perf results files stored in %s." % dest_dir)
+ self._logger.LogOutput('Perf results files stored in %s.' % dest_dir)
def _GetNewKeyvals(self, keyvals_dict):
# Initialize 'units' dictionary.
units_dict = {}
for k in keyvals_dict:
- units_dict[k] = ""
+ units_dict[k] = ''
results_files = self._GetDataMeasurementsFiles()
for f in results_files:
# Make sure we can find the results file
@@ -85,26 +83,23 @@ class Result(object):
# Otherwise get the base filename and create the correct
# path for it.
f_dir, f_base = misc.GetRoot(f)
- data_filename = os.path.join(self._chromeos_root, "chroot/tmp",
+ data_filename = os.path.join(self._chromeos_root, 'chroot/tmp',
self._temp_dir, f_base)
if os.path.exists(data_filename):
- with open(data_filename, "r") as data_file:
+ with open(data_filename, 'r') as data_file:
lines = data_file.readlines()
for line in lines:
tmp_dict = json.loads(line)
- graph_name = tmp_dict["graph"]
- graph_str = (graph_name + "__") if graph_name else ""
- key = graph_str + tmp_dict["description"]
- keyvals_dict[key] = tmp_dict["value"]
- units_dict[key] = tmp_dict["units"]
+ graph_name = tmp_dict['graph']
+ graph_str = (graph_name + '__') if graph_name else ''
+ key = graph_str + tmp_dict['description']
+ keyvals_dict[key] = tmp_dict['value']
+ units_dict[key] = tmp_dict['units']
return keyvals_dict, units_dict
-
def _AppendTelemetryUnits(self, keyvals_dict, units_dict):
- """
- keyvals_dict is the dictionary of key-value pairs that is used for
- generating Crosperf reports.
+ """keyvals_dict is the dictionary of key-value pairs that is used for generating Crosperf reports.
units_dict is a dictionary of the units for the return values in
keyvals_dict. We need to associate the units with the return values,
@@ -119,31 +114,31 @@ class Result(object):
results_dict = {}
for k in keyvals_dict:
# We don't want these lines in our reports; they add no useful data.
- if k == "" or k == "telemetry_Crosperf":
+ if k == '' or k == 'telemetry_Crosperf':
continue
val = keyvals_dict[k]
units = units_dict[k]
- new_val = [ val, units ]
+ new_val = [val, units]
results_dict[k] = new_val
return results_dict
def _GetKeyvals(self, show_all):
- results_in_chroot = os.path.join(self._chromeos_root,
- "chroot", "tmp")
+ results_in_chroot = os.path.join(self._chromeos_root, 'chroot', 'tmp')
if not self._temp_dir:
self._temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
- command = "cp -r {0}/* {1}".format(self.results_dir, self._temp_dir)
+ command = 'cp -r {0}/* {1}'.format(self.results_dir, self._temp_dir)
self._ce.RunCommand(command, print_to_console=False)
- command = ("python generate_test_report --no-color --csv %s" %
- (os.path.join("/tmp", os.path.basename(self._temp_dir))))
- _, out, _ = self._ce.ChrootRunCommandWOutput(
- self._chromeos_root, command, print_to_console=False)
+ command = ('python generate_test_report --no-color --csv %s' %
+ (os.path.join('/tmp', os.path.basename(self._temp_dir))))
+ _, out, _ = self._ce.ChrootRunCommandWOutput(self._chromeos_root,
+ command,
+ print_to_console=False)
keyvals_dict = {}
tmp_dir_in_chroot = misc.GetInsideChrootPath(self._chromeos_root,
self._temp_dir)
for line in out.splitlines():
- tokens = re.split("=|,", line)
+ tokens = re.split('=|,', line)
key = tokens[-2]
if key.startswith(tmp_dir_in_chroot):
key = key[len(tmp_dir_in_chroot) + 1:]
@@ -153,39 +148,37 @@ class Result(object):
# Check to see if there is a perf_measurements file and get the
# data from it if so.
keyvals_dict, units_dict = self._GetNewKeyvals(keyvals_dict)
- if self.suite == "telemetry_Crosperf":
+ if self.suite == 'telemetry_Crosperf':
# For telemtry_Crosperf results, append the units to the return
# results, for use in generating the reports.
- keyvals_dict = self._AppendTelemetryUnits(keyvals_dict,
- units_dict)
+ keyvals_dict = self._AppendTelemetryUnits(keyvals_dict, units_dict)
return keyvals_dict
def _GetResultsDir(self):
- mo = re.search(r"Results placed in (\S+)", self.out)
+ mo = re.search(r'Results placed in (\S+)', self.out)
if mo:
result = mo.group(1)
return result
- raise Exception("Could not find results directory.")
+ raise Exception('Could not find results directory.')
def _FindFilesInResultsDir(self, find_args):
if not self.results_dir:
return None
- command = "find %s %s" % (self.results_dir,
- find_args)
+ command = 'find %s %s' % (self.results_dir, find_args)
ret, out, _ = self._ce.RunCommandWOutput(command, print_to_console=False)
if ret:
- raise Exception("Could not run find command!")
+ raise Exception('Could not run find command!')
return out
def _GetPerfDataFiles(self):
- return self._FindFilesInResultsDir("-name perf.data").splitlines()
+ return self._FindFilesInResultsDir('-name perf.data').splitlines()
def _GetPerfReportFiles(self):
- return self._FindFilesInResultsDir("-name perf.data.report").splitlines()
+ return self._FindFilesInResultsDir('-name perf.data.report').splitlines()
def _GetDataMeasurementsFiles(self):
- return self._FindFilesInResultsDir("-name perf_measurements").splitlines()
+ return self._FindFilesInResultsDir('-name perf_measurements').splitlines()
def _GeneratePerfReportFiles(self):
perf_report_files = []
@@ -194,65 +187,57 @@ class Result(object):
# file.
chroot_perf_data_file = misc.GetInsideChrootPath(self._chromeos_root,
perf_data_file)
- perf_report_file = "%s.report" % perf_data_file
+ perf_report_file = '%s.report' % perf_data_file
if os.path.exists(perf_report_file):
- raise Exception("Perf report file already exists: %s" %
+ raise Exception('Perf report file already exists: %s' %
perf_report_file)
chroot_perf_report_file = misc.GetInsideChrootPath(self._chromeos_root,
perf_report_file)
- perf_path = os.path.join (self._chromeos_root,
- "chroot",
- "usr/bin/perf")
+ perf_path = os.path.join(self._chromeos_root, 'chroot', 'usr/bin/perf')
- perf_file = "/usr/sbin/perf"
+ perf_file = '/usr/sbin/perf'
if os.path.exists(perf_path):
- perf_file = "/usr/bin/perf"
+ perf_file = '/usr/bin/perf'
# The following is a hack, to use the perf.static binary that
# was given to us by Stephane Eranian, until he can figure out
# why "normal" perf cannot properly symbolize ChromeOS perf.data files.
# Get the directory containing the 'crosperf' script.
dirname, _ = misc.GetRoot(sys.argv[0])
- perf_path = os.path.join (dirname, "..", "perf.static")
+ perf_path = os.path.join(dirname, '..', 'perf.static')
if os.path.exists(perf_path):
# copy the executable into the chroot so that it can be found.
src_path = perf_path
- dst_path = os.path.join (self._chromeos_root, "chroot",
- "tmp/perf.static")
- command = "cp %s %s" % (src_path,dst_path)
- self._ce.RunCommand (command)
- perf_file = "/tmp/perf.static"
-
- command = ("%s report "
- "-n "
- "--symfs /build/%s "
- "--vmlinux /build/%s/usr/lib/debug/boot/vmlinux "
- "--kallsyms /build/%s/boot/System.map-* "
- "-i %s --stdio "
- "> %s" %
- (perf_file,
- self._board,
- self._board,
- self._board,
- chroot_perf_data_file,
- chroot_perf_report_file))
+ dst_path = os.path.join(self._chromeos_root, 'chroot',
+ 'tmp/perf.static')
+ command = 'cp %s %s' % (src_path, dst_path)
+ self._ce.RunCommand(command)
+ perf_file = '/tmp/perf.static'
+
+ command = ('%s report '
+ '-n '
+ '--symfs /build/%s '
+ '--vmlinux /build/%s/usr/lib/debug/boot/vmlinux '
+ '--kallsyms /build/%s/boot/System.map-* '
+ '-i %s --stdio '
+ '> %s' % (perf_file, self._board, self._board, self._board,
+ chroot_perf_data_file, chroot_perf_report_file))
self._ce.ChrootRunCommand(self._chromeos_root, command)
# Add a keyval to the dictionary for the events captured.
- perf_report_files.append(
- misc.GetOutsideChrootPath(self._chromeos_root,
- chroot_perf_report_file))
+ perf_report_files.append(misc.GetOutsideChrootPath(
+ self._chromeos_root, chroot_perf_report_file))
return perf_report_files
def _GatherPerfResults(self):
report_id = 0
for perf_report_file in self.perf_report_files:
- with open(perf_report_file, "r") as f:
+ with open(perf_report_file, 'r') as f:
report_contents = f.read()
- for group in re.findall(r"Events: (\S+) (\S+)", report_contents):
+ for group in re.findall(r'Events: (\S+) (\S+)', report_contents):
num_events = group[0]
event_name = group[1]
- key = "perf_%s_%s" % (report_id, event_name)
+ key = 'perf_%s_%s' % (report_id, event_name)
value = str(misc.UnitToNumber(num_events))
self.keyvals[key] = value
@@ -279,7 +264,7 @@ class Result(object):
# cache hit or miss. It should process results agnostic of the cache hit
# state.
self.keyvals = self._GetKeyvals(show_all)
- self.keyvals["retval"] = self.retval
+ self.keyvals['retval'] = self.retval
# Generate report from all perf.data files.
# Now parse all perf report files and include them in keyvals.
self._GatherPerfResults()
@@ -288,21 +273,20 @@ class Result(object):
self.test_name = test
self.suite = suite
# Read in everything from the cache directory.
- with open(os.path.join(cache_dir, RESULTS_FILE), "r") as f:
+ with open(os.path.join(cache_dir, RESULTS_FILE), 'r') as f:
self.out = pickle.load(f)
self.err = pickle.load(f)
self.retval = pickle.load(f)
# Untar the tarball to a temporary directory
- self._temp_dir = tempfile.mkdtemp(dir=os.path.join(self._chromeos_root,
- "chroot", "tmp"))
+ self._temp_dir = tempfile.mkdtemp(
+ dir=os.path.join(self._chromeos_root, 'chroot', 'tmp'))
- command = ("cd %s && tar xf %s" %
- (self._temp_dir,
- os.path.join(cache_dir, AUTOTEST_TARBALL)))
+ command = ('cd %s && tar xf %s' %
+ (self._temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
ret = self._ce.RunCommand(command, print_to_console=False)
if ret:
- raise Exception("Could not untar cached tarball")
+ raise Exception('Could not untar cached tarball')
self.results_dir = self._temp_dir
self.perf_data_files = self._GetPerfDataFiles()
self.perf_report_files = self._GetPerfReportFiles()
@@ -311,13 +295,13 @@ class Result(object):
def CleanUp(self, rm_chroot_tmp):
if rm_chroot_tmp and self.results_dir:
dirname, basename = misc.GetRoot(self.results_dir)
- if basename.find("test_that_results_") != -1:
- command = "rm -rf %s" % self.results_dir
+ if basename.find('test_that_results_') != -1:
+ command = 'rm -rf %s' % self.results_dir
else:
- command = "rm -rf %s" % dirname
+ command = 'rm -rf %s' % dirname
self._ce.RunCommand(command)
if self._temp_dir:
- command = "rm -rf %s" % self._temp_dir
+ command = 'rm -rf %s' % self._temp_dir
self._ce.RunCommand(command)
def StoreToCacheDir(self, cache_dir, machine_manager, key_list):
@@ -325,54 +309,62 @@ class Result(object):
temp_dir = tempfile.mkdtemp()
# Store to the temp directory.
- with open(os.path.join(temp_dir, RESULTS_FILE), "w") as f:
+ with open(os.path.join(temp_dir, RESULTS_FILE), 'w') as f:
pickle.dump(self.out, f)
pickle.dump(self.err, f)
pickle.dump(self.retval, f)
if not test_flag.GetTestMode():
- with open(os.path.join(temp_dir, CACHE_KEYS_FILE), "w") as f:
- f.write("%s\n" % self.label.name)
- f.write("%s\n" % self.label.chrome_version)
- f.write("%s\n" % self.machine.checksum_string)
+ with open(os.path.join(temp_dir, CACHE_KEYS_FILE), 'w') as f:
+ f.write('%s\n' % self.label.name)
+ f.write('%s\n' % self.label.chrome_version)
+ f.write('%s\n' % self.machine.checksum_string)
for k in key_list:
f.write(k)
- f.write("\n")
+ f.write('\n')
if self.results_dir:
tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
- command = ("cd %s && "
- "tar "
- "--exclude=var/spool "
- "--exclude=var/log "
- "-cjf %s ." % (self.results_dir, tarball))
+ command = ('cd %s && '
+ 'tar '
+ '--exclude=var/spool '
+ '--exclude=var/log '
+ '-cjf %s .' % (self.results_dir, tarball))
ret = self._ce.RunCommand(command)
if ret:
raise Exception("Couldn't store autotest output directory.")
# Store machine info.
# TODO(asharif): Make machine_manager a singleton, and don't pass it into
# this function.
- with open(os.path.join(temp_dir, MACHINE_FILE), "w") as f:
+ with open(os.path.join(temp_dir, MACHINE_FILE), 'w') as f:
f.write(machine_manager.machine_checksum_string[self.label.name])
if os.path.exists(cache_dir):
- command = "rm -rf {0}".format(cache_dir)
+ command = 'rm -rf {0}'.format(cache_dir)
self._ce.RunCommand(command)
- command = "mkdir -p {0} && ".format(os.path.dirname(cache_dir))
- command += "chmod g+x {0} && ".format(temp_dir)
- command += "mv {0} {1}".format(temp_dir, cache_dir)
+ command = 'mkdir -p {0} && '.format(os.path.dirname(cache_dir))
+ command += 'chmod g+x {0} && '.format(temp_dir)
+ command += 'mv {0} {1}'.format(temp_dir, cache_dir)
ret = self._ce.RunCommand(command)
if ret:
- command = "rm -rf {0}".format(temp_dir)
+ command = 'rm -rf {0}'.format(temp_dir)
self._ce.RunCommand(command)
- raise Exception("Could not move dir %s to dir %s" %
- (temp_dir, cache_dir))
+ raise Exception('Could not move dir %s to dir %s' % (temp_dir, cache_dir))
@classmethod
- def CreateFromRun(cls, logger, log_level, label, machine, out, err, retval,
- show_all, test, suite="telemetry_Crosperf"):
- if suite == "telemetry":
+ def CreateFromRun(cls,
+ logger,
+ log_level,
+ label,
+ machine,
+ out,
+ err,
+ retval,
+ show_all,
+ test,
+ suite='telemetry_Crosperf'):
+ if suite == 'telemetry':
result = TelemetryResult(logger, label, log_level, machine)
else:
result = cls(logger, label, log_level, machine)
@@ -380,9 +372,16 @@ class Result(object):
return result
@classmethod
- def CreateFromCacheHit(cls, logger, log_level, label, machine, cache_dir,
- show_all, test, suite="telemetry_Crosperf"):
- if suite == "telemetry":
+ def CreateFromCacheHit(cls,
+ logger,
+ log_level,
+ label,
+ machine,
+ cache_dir,
+ show_all,
+ test,
+ suite='telemetry_Crosperf'):
+ if suite == 'telemetry':
result = TelemetryResult(logger, label, log_level, machine)
else:
result = cls(logger, label, log_level, machine)
@@ -390,7 +389,7 @@ class Result(object):
result._PopulateFromCacheDir(cache_dir, show_all, test, suite)
except Exception as e:
- logger.LogError("Exception while using cache: %s" % e)
+ logger.LogError('Exception while using cache: %s' % e)
return None
return result
@@ -424,24 +423,24 @@ class TelemetryResult(Result):
self.keyvals = {}
if lines:
- if lines[0].startswith("JSON.stringify"):
+ if lines[0].startswith('JSON.stringify'):
lines = lines[1:]
if not lines:
return
- labels = lines[0].split(",")
+ labels = lines[0].split(',')
for line in lines[1:]:
- fields = line.split(",")
+ fields = line.split(',')
if len(fields) != len(labels):
continue
for i in range(1, len(labels)):
- key = "%s %s" % (fields[0], labels[i])
+ key = '%s %s' % (fields[0], labels[i])
value = fields[i]
self.keyvals[key] = value
- self.keyvals["retval"] = self.retval
+ self.keyvals['retval'] = self.retval
def _PopulateFromCacheDir(self, cache_dir):
- with open(os.path.join(cache_dir, RESULTS_FILE), "r") as f:
+ with open(os.path.join(cache_dir, RESULTS_FILE), 'r') as f:
self.out = pickle.load(f)
self.err = pickle.load(f)
self.retval = pickle.load(f)
@@ -474,17 +473,16 @@ class CacheConditions(object):
class ResultsCache(object):
-
""" This class manages the key of the cached runs without worrying about what
is exactly stored (value). The value generation is handled by the Results
class.
"""
CACHE_VERSION = 6
- def Init(self, chromeos_image, chromeos_root, test_name, iteration,
- test_args, profiler_args, machine_manager, machine, board,
- cache_conditions, logger_to_use, log_level, label, share_cache,
- suite, show_all_results, run_local):
+ def Init(self, chromeos_image, chromeos_root, test_name, iteration, test_args,
+ profiler_args, machine_manager, machine, board, cache_conditions,
+ logger_to_use, log_level, label, share_cache, suite,
+ show_all_results, run_local):
self.chromeos_image = chromeos_image
self.chromeos_root = chromeos_root
self.test_name = test_name
@@ -519,19 +517,17 @@ class ResultsCache(object):
def _GetCacheDirForWrite(self, get_keylist=False):
cache_path = self._FormCacheDir(self._GetCacheKeyList(False))[0]
if get_keylist:
- args_str = "%s_%s_%s" % (self.test_args,
- self.profiler_args,
+ args_str = '%s_%s_%s' % (self.test_args, self.profiler_args,
self.run_local)
version, image = results_report.ParseChromeosImage(
self.label.chromeos_image)
- keylist = [version, image, self.label.board,
- self.machine.name, self.test_name, str(self.iteration),
- args_str]
+ keylist = [version, image, self.label.board, self.machine.name,
+ self.test_name, str(self.iteration), args_str]
return cache_path, keylist
return cache_path
def _FormCacheDir(self, list_of_strings):
- cache_key = " ".join(list_of_strings)
+ cache_key = ' '.join(list_of_strings)
cache_dir = misc.GetFilenameFromString(cache_key)
if self.label.cache_dir:
cache_home = os.path.abspath(os.path.expanduser(self.label.cache_dir))
@@ -540,36 +536,36 @@ class ResultsCache(object):
cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
if len(self.share_cache):
- for path in [x.strip() for x in self.share_cache.split(",")]:
+ for path in [x.strip() for x in self.share_cache.split(',')]:
if os.path.exists(path):
cache_path.append(os.path.join(path, cache_dir))
else:
- self._logger.LogFatal("Unable to find shared cache: %s" % path)
+ self._logger.LogFatal('Unable to find shared cache: %s' % path)
return cache_path
def _GetCacheKeyList(self, read):
if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
- machine_checksum = "*"
+ machine_checksum = '*'
else:
machine_checksum = self.machine_manager.machine_checksum[self.label.name]
if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions:
- checksum = "*"
- elif self.label.image_type == "trybot":
+ checksum = '*'
+ elif self.label.image_type == 'trybot':
checksum = hashlib.md5(self.label.chromeos_image).hexdigest()
- elif self.label.image_type == "official":
- checksum = "*"
+ elif self.label.image_type == 'official':
+ checksum = '*'
else:
checksum = ImageChecksummer().Checksum(self.label, self.log_level)
if read and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions:
- image_path_checksum = "*"
+ image_path_checksum = '*'
else:
image_path_checksum = hashlib.md5(self.chromeos_image).hexdigest()
- machine_id_checksum = ""
+ machine_id_checksum = ''
if read and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions:
- machine_id_checksum = "*"
+ machine_id_checksum = '*'
else:
if self.machine and self.machine.name in self.label.remote:
machine_id_checksum = self.machine.machine_id_checksum
@@ -579,23 +575,17 @@ class ResultsCache(object):
machine_id_checksum = machine.machine_id_checksum
break
- temp_test_args = "%s %s %s" % (self.test_args,
- self.profiler_args,
+ temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
self.run_local)
- test_args_checksum = hashlib.md5(
- "".join(temp_test_args)).hexdigest()
- return (image_path_checksum,
- self.test_name, str(self.iteration),
- test_args_checksum,
- checksum,
- machine_checksum,
- machine_id_checksum,
+ test_args_checksum = hashlib.md5(''.join(temp_test_args)).hexdigest()
+ return (image_path_checksum, self.test_name, str(self.iteration),
+ test_args_checksum, checksum, machine_checksum, machine_id_checksum,
str(self.CACHE_VERSION))
def ReadResult(self):
if CacheConditions.FALSE in self.cache_conditions:
cache_dir = self._GetCacheDirForWrite()
- command = "rm -rf {0}".format(cache_dir)
+ command = 'rm -rf {0}'.format(cache_dir)
self._ce.RunCommand(command)
return None
cache_dir = self._GetCacheDirForRead()
@@ -607,15 +597,10 @@ class ResultsCache(object):
return None
if self.log_level == 'verbose':
- self._logger.LogOutput("Trying to read from cache dir: %s" % cache_dir)
- result = Result.CreateFromCacheHit(self._logger,
- self.log_level,
- self.label,
- self.machine,
- cache_dir,
- self.show_all,
- self.test_name,
- self.suite)
+ self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir)
+ result = Result.CreateFromCacheHit(self._logger, self.log_level, self.label,
+ self.machine, cache_dir, self.show_all,
+ self.test_name, self.suite)
if not result:
return None
@@ -631,6 +616,7 @@ class ResultsCache(object):
class MockResultsCache(ResultsCache):
+
def Init(self, *args):
pass
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 790b4718..11746db7 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -120,12 +120,40 @@ INFO : Test results:
INFO : Elapsed time: 0m18s
"""
-
-keyvals = {'': 'PASS', 'b_stdio_putcgetc__0_': '0.100005711667', 'b_string_strstr___azbycxdwevfugthsirjqkplomn__': '0.0133123556667', 'b_malloc_thread_local__0_': '0.01138439', 'b_string_strlen__0_': '0.044893587', 'b_malloc_sparse__0_': '0.015053784', 'b_string_memset__0_': '0.00275405066667', 'platform_LibCBench': 'PASS', 'b_pthread_uselesslock__0_': '0.0294113346667', 'b_string_strchr__0_': '0.00456903', 'b_pthread_create_serial1__0_': '0.0291785246667', 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__': '0.118360778', 'b_string_strstr___aaaaaaaaaaaaaacccccccccccc__': '0.0135694476667', 'b_pthread_createjoin_serial1__0_': '0.031907936', 'b_malloc_thread_stress__0_': '0.0367894733333', 'b_regex_search____a_b_c__d_b__': '0.00165455066667', 'b_malloc_bubble__0_': '0.015066374', 'b_malloc_big2__0_': '0.002951359', 'b_stdio_putcgetc_unlocked__0_': '0.0371443833333', 'b_pthread_createjoin_serial2__0_': '0.043485347', 'b_regex_search___a_25_b__': '0.0496191923333', 'b_utf8_bigbuf__0_': '0.0473772253333', 'b_malloc_big1__0_': '0.00375231466667', 'b_regex_compile____a_b_c__d_b__': '0.00529833933333', 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__': '0.068957325', 'b_malloc_tiny2__0_': '0.000581407333333', 'b_utf8_onebyone__0_': '0.130938538333', 'b_malloc_tiny1__0_': '0.000768474333333', 'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333'}
-
+keyvals = {'': 'PASS',
+ 'b_stdio_putcgetc__0_': '0.100005711667',
+ 'b_string_strstr___azbycxdwevfugthsirjqkplomn__': '0.0133123556667',
+ 'b_malloc_thread_local__0_': '0.01138439',
+ 'b_string_strlen__0_': '0.044893587',
+ 'b_malloc_sparse__0_': '0.015053784',
+ 'b_string_memset__0_': '0.00275405066667',
+ 'platform_LibCBench': 'PASS',
+ 'b_pthread_uselesslock__0_': '0.0294113346667',
+ 'b_string_strchr__0_': '0.00456903',
+ 'b_pthread_create_serial1__0_': '0.0291785246667',
+ 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__':
+ '0.118360778',
+ 'b_string_strstr___aaaaaaaaaaaaaacccccccccccc__': '0.0135694476667',
+ 'b_pthread_createjoin_serial1__0_': '0.031907936',
+ 'b_malloc_thread_stress__0_': '0.0367894733333',
+ 'b_regex_search____a_b_c__d_b__': '0.00165455066667',
+ 'b_malloc_bubble__0_': '0.015066374',
+ 'b_malloc_big2__0_': '0.002951359',
+ 'b_stdio_putcgetc_unlocked__0_': '0.0371443833333',
+ 'b_pthread_createjoin_serial2__0_': '0.043485347',
+ 'b_regex_search___a_25_b__': '0.0496191923333',
+ 'b_utf8_bigbuf__0_': '0.0473772253333',
+ 'b_malloc_big1__0_': '0.00375231466667',
+ 'b_regex_compile____a_b_c__d_b__': '0.00529833933333',
+ 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__': '0.068957325',
+ 'b_malloc_tiny2__0_': '0.000581407333333',
+ 'b_utf8_onebyone__0_': '0.130938538333',
+ 'b_malloc_tiny1__0_': '0.000768474333333',
+ 'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333'}
TMP_DIR1 = '/tmp/tmpAbcXyz'
+
class MockResult(Result):
def __init__(self, logger, label, logging_level, machine):
@@ -141,24 +169,22 @@ class MockResult(Result):
class ResultTest(unittest.TestCase):
mock_label = MockLabel('mock_label', 'chromeos_image', '/tmp', 'lumpy',
- 'remote', 'image_args', 'cache_dir', 'average',
- 'gcc', None)
+ 'remote', 'image_args', 'cache_dir', 'average', 'gcc',
+ None)
mock_logger = mock.Mock(spec=logger.Logger)
mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
def testCreateFromRun(self):
result = MockResult.CreateFromRun(logger.GetLogger(), 'average',
- self.mock_label, 'remote1',
- OUTPUT, error, 0, True, 0)
+ self.mock_label, 'remote1', OUTPUT, error,
+ 0, True, 0)
self.assertEqual(result.keyvals, keyvals)
self.assertEqual(result.chroot_results_dir,
'/tmp/test_that.PO1234567/platform_LibCBench')
self.assertEqual(result.results_dir,
- '/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench')
+ '/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench')
self.assertEqual(result.retval, 0)
-
-
def setUp(self):
self.result = Result(self.mock_logger, self.mock_label, 'average',
self.mock_cmd_exec)
@@ -204,8 +230,7 @@ class ResultTest(unittest.TestCase):
mock_copyfiles.return_value = 1
self.assertRaises(Exception, self.result._CopyFilesTo, dest_dir, files)
-
- @mock.patch.object (Result, '_CopyFilesTo')
+ @mock.patch.object(Result, '_CopyFilesTo')
def test_copy_results_to(self, mock_CopyFilesTo):
perf_data_files = ['/tmp/perf.data.0', '/tmp/perf.data.1',
'/tmp/perf.data.2']
@@ -224,7 +249,6 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_CopyFilesTo.call_args_list[1][0],
('/tmp/results/', perf_report_files))
-
def test_get_new_keyvals(self):
kv_dict = {}
@@ -235,51 +259,73 @@ class ResultTest(unittest.TestCase):
self.result._GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles
kv_dict2, udict = self.result._GetNewKeyvals(kv_dict)
self.assertEqual(kv_dict2,
- {u'Box2D__Box2D': 4775, u'Mandreel__Mandreel': 6620,
- u'Gameboy__Gameboy': 9901, u'Crypto__Crypto': 8737,
- u'telemetry_page_measurement_results__num_errored': 0,
- u'telemetry_page_measurement_results__num_failed': 0,
- u'PdfJS__PdfJS': 6455, u'Total__Score': 7918,
- u'EarleyBoyer__EarleyBoyer': 14340,
- u'MandreelLatency__MandreelLatency': 5188,
- u'CodeLoad__CodeLoad': 6271, u'DeltaBlue__DeltaBlue': 14401,
- u'Typescript__Typescript': 9815,
- u'SplayLatency__SplayLatency': 7653, u'zlib__zlib': 16094,
- u'Richards__Richards': 10358, u'RegExp__RegExp': 1765,
- u'NavierStokes__NavierStokes': 9815, u'Splay__Splay': 4425,
- u'RayTrace__RayTrace': 16600})
- self.assertEqual(udict,
- {u'Box2D__Box2D': u'score', u'Mandreel__Mandreel': u'score',
- u'Gameboy__Gameboy': u'score', u'Crypto__Crypto': u'score',
- u'telemetry_page_measurement_results__num_errored': u'count',
- u'telemetry_page_measurement_results__num_failed': u'count',
- u'PdfJS__PdfJS': u'score', u'Total__Score': u'score',
- u'EarleyBoyer__EarleyBoyer': u'score',
- u'MandreelLatency__MandreelLatency': u'score',
- u'CodeLoad__CodeLoad': u'score',
- u'DeltaBlue__DeltaBlue': u'score',
- u'Typescript__Typescript': u'score',
- u'SplayLatency__SplayLatency': u'score', u'zlib__zlib': u'score',
- u'Richards__Richards': u'score', u'RegExp__RegExp': u'score',
- u'NavierStokes__NavierStokes': u'score',
- u'Splay__Splay': u'score', u'RayTrace__RayTrace': u'score'})
-
+ {u'Box2D__Box2D': 4775,
+ u'Mandreel__Mandreel': 6620,
+ u'Gameboy__Gameboy': 9901,
+ u'Crypto__Crypto': 8737,
+ u'telemetry_page_measurement_results__num_errored': 0,
+ u'telemetry_page_measurement_results__num_failed': 0,
+ u'PdfJS__PdfJS': 6455,
+ u'Total__Score': 7918,
+ u'EarleyBoyer__EarleyBoyer': 14340,
+ u'MandreelLatency__MandreelLatency': 5188,
+ u'CodeLoad__CodeLoad': 6271,
+ u'DeltaBlue__DeltaBlue': 14401,
+ u'Typescript__Typescript': 9815,
+ u'SplayLatency__SplayLatency': 7653,
+ u'zlib__zlib': 16094,
+ u'Richards__Richards': 10358,
+ u'RegExp__RegExp': 1765,
+ u'NavierStokes__NavierStokes': 9815,
+ u'Splay__Splay': 4425,
+ u'RayTrace__RayTrace': 16600})
+ self.assertEqual(
+ udict, {u'Box2D__Box2D': u'score',
+ u'Mandreel__Mandreel': u'score',
+ u'Gameboy__Gameboy': u'score',
+ u'Crypto__Crypto': u'score',
+ u'telemetry_page_measurement_results__num_errored': u'count',
+ u'telemetry_page_measurement_results__num_failed': u'count',
+ u'PdfJS__PdfJS': u'score',
+ u'Total__Score': u'score',
+ u'EarleyBoyer__EarleyBoyer': u'score',
+ u'MandreelLatency__MandreelLatency': u'score',
+ u'CodeLoad__CodeLoad': u'score',
+ u'DeltaBlue__DeltaBlue': u'score',
+ u'Typescript__Typescript': u'score',
+ u'SplayLatency__SplayLatency': u'score',
+ u'zlib__zlib': u'score',
+ u'Richards__Richards': u'score',
+ u'RegExp__RegExp': u'score',
+ u'NavierStokes__NavierStokes': u'score',
+ u'Splay__Splay': u'score',
+ u'RayTrace__RayTrace': u'score'})
def test_append_telemetry_units(self):
- kv_dict = {u'Box2D__Box2D': 4775, u'Mandreel__Mandreel': 6620,
- u'Gameboy__Gameboy': 9901, u'Crypto__Crypto': 8737,
- u'PdfJS__PdfJS': 6455, u'Total__Score': 7918,
+ kv_dict = {u'Box2D__Box2D': 4775,
+ u'Mandreel__Mandreel': 6620,
+ u'Gameboy__Gameboy': 9901,
+ u'Crypto__Crypto': 8737,
+ u'PdfJS__PdfJS': 6455,
+ u'Total__Score': 7918,
u'EarleyBoyer__EarleyBoyer': 14340,
u'MandreelLatency__MandreelLatency': 5188,
- u'CodeLoad__CodeLoad': 6271, u'DeltaBlue__DeltaBlue': 14401,
+ u'CodeLoad__CodeLoad': 6271,
+ u'DeltaBlue__DeltaBlue': 14401,
u'Typescript__Typescript': 9815,
- u'SplayLatency__SplayLatency': 7653, u'zlib__zlib': 16094,
- u'Richards__Richards': 10358, u'RegExp__RegExp': 1765,
- u'NavierStokes__NavierStokes': 9815, u'Splay__Splay': 4425,
+ u'SplayLatency__SplayLatency': 7653,
+ u'zlib__zlib': 16094,
+ u'Richards__Richards': 10358,
+ u'RegExp__RegExp': 1765,
+ u'NavierStokes__NavierStokes': 9815,
+ u'Splay__Splay': 4425,
u'RayTrace__RayTrace': 16600}
- units_dict = {u'Box2D__Box2D': u'score', u'Mandreel__Mandreel': u'score',
- u'Gameboy__Gameboy': u'score', u'Crypto__Crypto': u'score',
- u'PdfJS__PdfJS': u'score', u'Total__Score': u'score',
+ units_dict = {u'Box2D__Box2D': u'score',
+ u'Mandreel__Mandreel': u'score',
+ u'Gameboy__Gameboy': u'score',
+ u'Crypto__Crypto': u'score',
+ u'PdfJS__PdfJS': u'score',
+ u'Total__Score': u'score',
u'EarleyBoyer__EarleyBoyer': u'score',
u'MandreelLatency__MandreelLatency': u'score',
u'CodeLoad__CodeLoad': u'score',
@@ -287,9 +333,11 @@ class ResultTest(unittest.TestCase):
u'Typescript__Typescript': u'score',
u'SplayLatency__SplayLatency': u'score',
u'zlib__zlib': u'score',
- u'Richards__Richards': u'score', u'RegExp__RegExp': u'score',
+ u'Richards__Richards': u'score',
+ u'RegExp__RegExp': u'score',
u'NavierStokes__NavierStokes': u'score',
- u'Splay__Splay': u'score', u'RayTrace__RayTrace': u'score'}
+ u'Splay__Splay': u'score',
+ u'RayTrace__RayTrace': u'score'}
results_dict = self.result._AppendTelemetryUnits(kv_dict, units_dict)
self.assertEqual(results_dict,
@@ -312,11 +360,10 @@ class ResultTest(unittest.TestCase):
u'RayTrace__RayTrace': [16600, u'score'],
u'NavierStokes__NavierStokes': [9815, u'score']})
-
- @mock.patch.object (misc, 'GetInsideChrootPath')
- @mock.patch.object (tempfile, 'mkdtemp')
- @mock.patch.object (command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object (command_executer.CommandExecuter, 'ChrootRunCommand')
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(tempfile, 'mkdtemp')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_get_keyvals(self, mock_chrootruncmd, mock_runcmd, mock_mkdtemp,
mock_getpath):
@@ -334,16 +381,14 @@ class ResultTest(unittest.TestCase):
def FakeGetNewKeyvals(kv_dict):
self.kv_dict = kv_dict
self.call_GetNewKeyvals = True
- return_kvdict = { 'first_time' : 680, 'Total' : 10}
- return_udict = { 'first_time' : 'ms', 'Total' : 'score'}
+ return_kvdict = {'first_time': 680, 'Total': 10}
+ return_udict = {'first_time': 'ms', 'Total': 'score'}
return return_kvdict, return_udict
-
mock_mkdtemp.return_value = TMP_DIR1
mock_chrootruncmd.return_value = ['',
('%s,PASS\n%s/telemetry_Crosperf,PASS\n')
- % (TMP_DIR1, TMP_DIR1),
- '']
+ % (TMP_DIR1, TMP_DIR1), '']
mock_getpath.return_value = TMP_DIR1
self.result._ce.ChrootRunCommand = mock_chrootruncmd
self.result._ce.RunCommand = mock_runcmd
@@ -354,26 +399,22 @@ class ResultTest(unittest.TestCase):
# Test 1. no self._temp_dir.
res = self.result._GetKeyvals(True)
self.assertTrue(self.call_GetNewKeyvals)
- self.assertEqual(self.kv_dict, { '': 'PASS', 'telemetry_Crosperf': 'PASS' })
+ self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
self.assertEqual(mock_runcmd.call_count, 1)
self.assertEqual(mock_runcmd.call_args_list[0][0],
('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1,))
self.assertEqual(mock_chrootruncmd.call_count, 1)
- self.assertEqual(mock_chrootruncmd.call_args_list[0][0],
- ('/tmp',
- ('python generate_test_report --no-color --csv %s') %
- TMP_DIR1))
+ self.assertEqual(mock_chrootruncmd.call_args_list[0][0], (
+ '/tmp', ('python generate_test_report --no-color --csv %s') % TMP_DIR1))
self.assertEqual(mock_getpath.call_count, 1)
self.assertEqual(mock_mkdtemp.call_count, 1)
self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
-
# Test 2. self._temp_dir
reset()
mock_chrootruncmd.return_value = ['',
('/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/'
- 'telemetry_Crosperf,PASS\n'),
- '']
+ 'telemetry_Crosperf,PASS\n'), '']
mock_getpath.return_value = '/tmp/tmpJCajRG'
self.result._temp_dir = '/tmp/tmpJCajRG'
res = self.result._GetKeyvals(True)
@@ -381,7 +422,7 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_mkdtemp.call_count, 0)
self.assertEqual(mock_chrootruncmd.call_count, 1)
self.assertTrue(self.call_GetNewKeyvals)
- self.assertEqual(self.kv_dict, { '': 'PASS', 'telemetry_Crosperf': 'PASS' })
+ self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
# Test 3. suite != telemetry_Crosperf. Normally this would be for
@@ -392,8 +433,7 @@ class ResultTest(unittest.TestCase):
reset()
self.result.suite = ''
res = self.result._GetKeyvals(True)
- self.assertEqual(res, {'Total': 10, 'first_time': 680 })
-
+ self.assertEqual(res, {'Total': 10, 'first_time': 680})
def test_get_results_dir(self):
@@ -402,11 +442,9 @@ class ResultTest(unittest.TestCase):
self.result.out = OUTPUT
resdir = self.result._GetResultsDir()
- self.assertEqual(resdir,
- '/tmp/test_that.PO1234567/platform_LibCBench')
-
+ self.assertEqual(resdir, '/tmp/test_that.PO1234567/platform_LibCBench')
- @mock.patch.object (command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
def test_find_files_in_results_dir(self, mock_runcmd):
self.result.results_dir = None
@@ -427,9 +465,7 @@ class ResultTest(unittest.TestCase):
self.assertRaises(Exception, self.result._FindFilesInResultsDir,
'-name perf.data')
-
-
- @mock.patch.object (Result, '_FindFilesInResultsDir')
+ @mock.patch.object(Result, '_FindFilesInResultsDir')
def test_get_perf_data_files(self, mock_findfiles):
self.args = None
@@ -439,7 +475,6 @@ class ResultTest(unittest.TestCase):
self.assertEqual(res, ['line1', 'line1'])
self.assertEqual(mock_findfiles.call_args_list[0][0], ('-name perf.data',))
-
def test_get_perf_report_files(self):
self.args = None
@@ -452,7 +487,6 @@ class ResultTest(unittest.TestCase):
self.assertEqual(res, ['line1', 'line1'])
self.assertEqual(self.args, '-name perf.data.report')
-
def test_get_data_measurement_files(self):
self.args = None
@@ -465,9 +499,8 @@ class ResultTest(unittest.TestCase):
self.assertEqual(res, ['line1', 'line1'])
self.assertEqual(self.args, '-name perf_measurements')
-
- @mock.patch.object (misc, 'GetInsideChrootPath')
- @mock.patch.object (command_executer.CommandExecuter, 'ChrootRunCommand')
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath):
fake_file = '/usr/chromeos/chroot/tmp/results/fake_file'
self.result.perf_data_files = ['/tmp/results/perf.data']
@@ -483,9 +516,7 @@ class ResultTest(unittest.TestCase):
'--kallsyms /build/lumpy/boot/System.map-* -i '
'%s --stdio > %s') % (fake_file, fake_file)))
-
-
- @mock.patch.object (misc, 'GetOutsideChrootPath')
+ @mock.patch.object(misc, 'GetOutsideChrootPath')
def test_populate_from_run(self, mock_getpath):
def FakeGetResultsDir():
@@ -527,9 +558,9 @@ class ResultTest(unittest.TestCase):
def FakeGetKeyvals(show_all):
if show_all:
- return { 'first_time' : 680, 'Total' : 10}
+ return {'first_time': 680, 'Total': 10}
else:
- return { 'Total' : 10}
+ return {'Total': 10}
def FakeGatherPerfResults():
self.callGatherPerfResults = True
@@ -543,18 +574,17 @@ class ResultTest(unittest.TestCase):
self.result._ProcessResults(True)
self.assertTrue(self.callGatherPerfResults)
self.assertEqual(len(self.result.keyvals), 3)
- self.assertEqual(self.result.keyvals,
- { 'first_time' : 680, 'Total' : 10, 'retval' : 0 })
+ self.assertEqual(self.result.keyvals, {'first_time': 680,
+ 'Total': 10,
+ 'retval': 0})
self.result.retval = 1
self.result._ProcessResults(False)
self.assertEqual(len(self.result.keyvals), 2)
- self.assertEqual(self.result.keyvals,
- { 'Total' : 10, 'retval' : 1 })
-
+ self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 1})
- @mock.patch.object (misc, 'GetInsideChrootPath')
- @mock.patch.object (command_executer.CommandExecuter, 'ChrootRunCommand')
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_populate_from_cache_dir(self, mock_runchrootcmd, mock_getpath):
def FakeMkdtemp(dir=''):
@@ -566,8 +596,7 @@ class ResultTest(unittest.TestCase):
self.result._ce.ChrootRunCommand = mock_runchrootcmd
mock_runchrootcmd.return_value = ['',
('%s,PASS\n%s/\telemetry_Crosperf,PASS\n')
- % (TMP_DIR1, TMP_DIR1),
- '']
+ % (TMP_DIR1, TMP_DIR1), '']
mock_getpath.return_value = TMP_DIR1
self.tmpdir = tempfile.mkdtemp()
save_real_mkdtemp = tempfile.mkdtemp
@@ -575,54 +604,47 @@ class ResultTest(unittest.TestCase):
self.result._PopulateFromCacheDir(cache_dir, True, 'sunspider',
'telemetry_Crosperf')
- self.assertEqual(self.result.keyvals,
- {u'Total__Total': [444.0, u'ms'],
- u'regexp-dna__regexp-dna': [16.2, u'ms'],
- u'telemetry_page_measurement_results__num_failed':
- [0, u'count'],
- u'telemetry_page_measurement_results__num_errored':
- [0, u'count'],
- u'string-fasta__string-fasta': [23.2, u'ms'],
- u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
- u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte':
- [3.2, u'ms'],
- u'access-nsieve__access-nsieve': [7.9, u'ms'],
- u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
- u'string-validate-input__string-validate-input':
- [19.3, u'ms'],
- u'3d-raytrace__3d-raytrace': [24.7, u'ms'],
- u'3d-cube__3d-cube': [28.0, u'ms'],
- u'string-unpack-code__string-unpack-code': [46.7, u'ms'],
- u'date-format-tofte__date-format-tofte': [26.3, u'ms'],
- u'math-partial-sums__math-partial-sums': [22.0, u'ms'],
- '\telemetry_Crosperf': ['PASS', ''],
- u'crypto-aes__crypto-aes': [15.2, u'ms'],
- u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'],
- u'crypto-md5__crypto-md5': [10.5, u'ms'],
- u'string-tagcloud__string-tagcloud': [52.8, u'ms'],
- u'access-nbody__access-nbody': [8.5, u'ms'],
- 'retval': 0,
- u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'],
- u'math-cordic__math-cordic': [8.7, u'ms'],
- u'access-binary-trees__access-binary-trees': [4.5, u'ms'],
- u'controlflow-recursive__controlflow-recursive':
- [4.4, u'ms'],
- u'access-fannkuch__access-fannkuch': [17.8, u'ms'],
- u'string-base64__string-base64': [16.0, u'ms'],
- u'date-format-xparb__date-format-xparb': [20.9, u'ms'],
- u'3d-morph__3d-morph': [22.1, u'ms'],
- u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms']
- })
-
+ self.assertEqual(
+ self.result.keyvals,
+ {u'Total__Total': [444.0, u'ms'],
+ u'regexp-dna__regexp-dna': [16.2, u'ms'],
+ u'telemetry_page_measurement_results__num_failed': [0, u'count'],
+ u'telemetry_page_measurement_results__num_errored': [0, u'count'],
+ u'string-fasta__string-fasta': [23.2, u'ms'],
+ u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
+ u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': [3.2, u'ms'],
+ u'access-nsieve__access-nsieve': [7.9, u'ms'],
+ u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
+ u'string-validate-input__string-validate-input': [19.3, u'ms'],
+ u'3d-raytrace__3d-raytrace': [24.7, u'ms'],
+ u'3d-cube__3d-cube': [28.0, u'ms'],
+ u'string-unpack-code__string-unpack-code': [46.7, u'ms'],
+ u'date-format-tofte__date-format-tofte': [26.3, u'ms'],
+ u'math-partial-sums__math-partial-sums': [22.0, u'ms'],
+ '\telemetry_Crosperf': ['PASS', ''],
+ u'crypto-aes__crypto-aes': [15.2, u'ms'],
+ u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'],
+ u'crypto-md5__crypto-md5': [10.5, u'ms'],
+ u'string-tagcloud__string-tagcloud': [52.8, u'ms'],
+ u'access-nbody__access-nbody': [8.5, u'ms'],
+ 'retval': 0,
+ u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'],
+ u'math-cordic__math-cordic': [8.7, u'ms'],
+ u'access-binary-trees__access-binary-trees': [4.5, u'ms'],
+ u'controlflow-recursive__controlflow-recursive': [4.4, u'ms'],
+ u'access-fannkuch__access-fannkuch': [17.8, u'ms'],
+ u'string-base64__string-base64': [16.0, u'ms'],
+ u'date-format-xparb__date-format-xparb': [20.9, u'ms'],
+ u'3d-morph__3d-morph': [22.1, u'ms'],
+ u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms']})
# Clean up after test.
tempfile.mkdtemp = save_real_mkdtemp
command = 'rm -Rf %s' % self.tmpdir
self.result._ce.RunCommand(command)
-
- @mock.patch.object (misc, 'GetRoot')
- @mock.patch.object (command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(misc, 'GetRoot')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
def test_cleanup(self, mock_runcmd, mock_getroot):
# Test 1. 'rm_chroot_tmp' is True; self.results_dir exists;
@@ -673,9 +695,8 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_getroot.call_count, 0)
self.assertEqual(mock_runcmd.call_count, 0)
-
- @mock.patch.object (misc, 'GetInsideChrootPath')
- @mock.patch.object (command_executer.CommandExecuter, 'ChrootRunCommand')
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_store_to_cache_dir(self, mock_chrootruncmd, mock_getpath):
def FakeMkdtemp(dir=''):
@@ -728,19 +749,102 @@ class ResultTest(unittest.TestCase):
self.result._ce.RunCommand(command)
-TELEMETRY_RESULT_KEYVALS = {'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html math-cordic (ms)': '11.4', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html access-nbody (ms)': '6.9', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html access-fannkuch (ms)': '26.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html math-spectral-norm (ms)': '6.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html bitops-nsieve-bits (ms)': '9.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html math-partial-sums (ms)': '32.8', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html regexp-dna (ms)': '16.1', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html 3d-cube (ms)': '42.7', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html crypto-md5 (ms)': '10.8', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html crypto-sha1 (ms)': '12.4', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html string-tagcloud (ms)': '47.2', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html string-fasta (ms)': '36.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html access-binary-trees (ms)': '7.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html date-format-xparb (ms)': '138.1', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html crypto-aes (ms)': '19.2', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html Total (ms)': '656.5', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html string-base64 (ms)': '17.5', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html string-validate-input (ms)': '24.8', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html 3d-raytrace (ms)': '28.7', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html controlflow-recursive (ms)': '5.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html bitops-bits-in-byte (ms)': '9.8', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html 3d-morph (ms)': '50.2', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html bitops-bitwise-and (ms)': '8.8', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html access-nsieve (ms)': '8.6', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html date-format-tofte (ms)': '31.2', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html bitops-3bit-bits-in-byte (ms)': '3.5', 'retval': 0, 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html string-unpack-code (ms)': '45.0'}
+TELEMETRY_RESULT_KEYVALS = {
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'math-cordic (ms)':
+ '11.4',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-nbody (ms)':
+ '6.9',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-fannkuch (ms)':
+ '26.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'math-spectral-norm (ms)':
+ '6.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-nsieve-bits (ms)':
+ '9.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'math-partial-sums (ms)':
+ '32.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'regexp-dna (ms)':
+ '16.1',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ '3d-cube (ms)':
+ '42.7',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'crypto-md5 (ms)':
+ '10.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'crypto-sha1 (ms)':
+ '12.4',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-tagcloud (ms)':
+ '47.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-fasta (ms)':
+ '36.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-binary-trees (ms)':
+ '7.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'date-format-xparb (ms)':
+ '138.1',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'crypto-aes (ms)':
+ '19.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'Total (ms)':
+ '656.5',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-base64 (ms)':
+ '17.5',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-validate-input (ms)':
+ '24.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ '3d-raytrace (ms)':
+ '28.7',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'controlflow-recursive (ms)':
+ '5.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-bits-in-byte (ms)':
+ '9.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ '3d-morph (ms)':
+ '50.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-bitwise-and (ms)':
+ '8.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-nsieve (ms)':
+ '8.6',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'date-format-tofte (ms)':
+ '31.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-3bit-bits-in-byte (ms)':
+ '3.5',
+ 'retval': 0,
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-unpack-code (ms)':
+ '45.0'
+}
PURE_TELEMETRY_OUTPUT = """page_name,3d-cube (ms),3d-morph (ms),3d-raytrace (ms),Total (ms),access-binary-trees (ms),access-fannkuch (ms),access-nbody (ms),access-nsieve (ms),bitops-3bit-bits-in-byte (ms),bitops-bits-in-byte (ms),bitops-bitwise-and (ms),bitops-nsieve-bits (ms),controlflow-recursive (ms),crypto-aes (ms),crypto-md5 (ms),crypto-sha1 (ms),date-format-tofte (ms),date-format-xparb (ms),math-cordic (ms),math-partial-sums (ms),math-spectral-norm (ms),regexp-dna (ms),string-base64 (ms),string-fasta (ms),string-tagcloud (ms),string-unpack-code (ms),string-validate-input (ms)\r\nhttp://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html,42.7,50.2,28.7,656.5,7.3,26.3,6.9,8.6,3.5,9.8,8.8,9.3,5.3,19.2,10.8,12.4,31.2,138.1,11.4,32.8,6.3,16.1,17.5,36.3,47.2,45.0,24.8\r\n"""
+
class TelemetryResultTest(unittest.TestCase):
mock_logger = mock.Mock(spec=logger.Logger)
mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
mock_label = MockLabel('mock_label', 'chromeos_image', '/tmp', 'lumpy',
- 'remote', 'image_args', 'cache_dir', 'average',
- 'gcc', None)
- mock_machine = machine_manager.MockCrosMachine('falco.cros',
- '/tmp/chromeos',
+ 'remote', 'image_args', 'cache_dir', 'average', 'gcc',
+ None)
+ mock_machine = machine_manager.MockCrosMachine('falco.cros', '/tmp/chromeos',
'average')
def test_populate_from_run(self):
@@ -749,8 +853,8 @@ class TelemetryResultTest(unittest.TestCase):
self.callFakeProcessResults = True
self.callFakeProcessResults = False
- self.result = TelemetryResult(self.mock_logger, self.mock_label,
- 'average', self.mock_cmd_exec)
+ self.result = TelemetryResult(self.mock_logger, self.mock_label, 'average',
+ self.mock_cmd_exec)
self.result._ProcessResults = FakeProcessResults
self.result._PopulateFromRun(OUTPUT, error, 3, False, 'fake_test',
'telemetry_Crosperf')
@@ -759,11 +863,10 @@ class TelemetryResultTest(unittest.TestCase):
self.assertEqual(self.result.err, error)
self.assertEqual(self.result.retval, 3)
-
def test_populate_from_cache_dir_and_process_results(self):
- self.result = TelemetryResult(self.mock_logger, self.mock_label,
- 'average', self.mock_machine)
+ self.result = TelemetryResult(self.mock_logger, self.mock_label, 'average',
+ self.mock_machine)
current_path = os.getcwd()
cache_dir = os.path.join(current_path,
'test_cache/test_puretelemetry_input')
@@ -778,14 +881,14 @@ class ResultsCacheTest(unittest.TestCase):
mock_logger = mock.Mock(spec=logger.Logger)
mock_label = MockLabel('mock_label', 'chromeos_image', '/tmp', 'lumpy',
- 'remote', 'image_args', 'cache_dir', 'average',
- 'gcc', None)
+ 'remote', 'image_args', 'cache_dir', 'average', 'gcc',
+ None)
+
def setUp(self):
self.results_cache = ResultsCache()
mock_machine = machine_manager.MockCrosMachine('falco.cros',
- '/tmp/chromeos',
- 'average')
+ '/tmp/chromeos', 'average')
mock_mm = machine_manager.MockMachineManager('/tmp/chromeos_root', 0,
'average')
@@ -794,9 +897,9 @@ class ResultsCacheTest(unittest.TestCase):
self.results_cache.Init(self.mock_label.chromeos_image,
self.mock_label.chromeos_root,
'sunspider',
- 1, # benchmark_run.iteration,
- '', # benchmark_run.test_args,
- '', # benchmark_run.profiler_args,
+ 1, # benchmark_run.iteration,
+ '', # benchmark_run.test_args,
+ '', # benchmark_run.profiler_args,
mock_mm,
mock_machine,
self.mock_label.board,
@@ -805,24 +908,20 @@ class ResultsCacheTest(unittest.TestCase):
self.mock_logger,
'average',
self.mock_label,
- '', # benchmark_run.share_cache
+ '', # benchmark_run.share_cache
'telemetry_Crosperf',
- True, # benchmark_run.show_all_results
- False) # benchmark_run.run_local
-
+ True, # benchmark_run.show_all_results
+ False) # benchmark_run.run_local
- @mock.patch.object (image_checksummer.ImageChecksummer, 'Checksum')
+ @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
def test_get_cache_dir_for_write(self, mock_checksum):
def FakeGetMachines(label):
- m1 = machine_manager.MockCrosMachine('lumpy1.cros',
- self.results_cache.chromeos_root,
- 'average')
- m2 = machine_manager.MockCrosMachine('lumpy2.cros',
- self.results_cache.chromeos_root,
- 'average')
- return [m1, m2]
-
+ m1 = machine_manager.MockCrosMachine(
+ 'lumpy1.cros', self.results_cache.chromeos_root, 'average')
+ m2 = machine_manager.MockCrosMachine(
+ 'lumpy2.cros', self.results_cache.chromeos_root, 'average')
+ return [m1, m2]
mock_checksum.return_value = 'FakeImageChecksumabc123'
self.results_cache.machine_manager.GetMachines = FakeGetMachines
@@ -841,7 +940,6 @@ class ResultsCacheTest(unittest.TestCase):
'abc987__6')
self.assertEqual(result_path, comp_path)
-
def test_form_cache_dir(self):
# This is very similar to the previous test (_FormCacheDir is called
# from _GetCacheDirForWrite).
@@ -856,21 +954,17 @@ class ResultsCacheTest(unittest.TestCase):
comp_path = os.path.join(os.getcwd(), 'cache_dir', test_dirname)
self.assertEqual(path1, comp_path)
-
- @mock.patch.object (image_checksummer.ImageChecksummer, 'Checksum')
+ @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
def test_get_cache_key_list(self, mock_checksum):
# This tests the mechanism that generates the various pieces of the
# cache directory name, based on various conditions.
def FakeGetMachines(label):
- m1 = machine_manager.MockCrosMachine('lumpy1.cros',
- self.results_cache.chromeos_root,
- 'average')
- m2 = machine_manager.MockCrosMachine('lumpy2.cros',
- self.results_cache.chromeos_root,
- 'average')
- return [m1, m2]
-
+ m1 = machine_manager.MockCrosMachine(
+ 'lumpy1.cros', self.results_cache.chromeos_root, 'average')
+ m2 = machine_manager.MockCrosMachine(
+ 'lumpy2.cros', self.results_cache.chromeos_root, 'average')
+ return [m1, m2]
mock_checksum.return_value = 'FakeImageChecksumabc123'
self.results_cache.machine_manager.GetMachines = FakeGetMachines
@@ -879,7 +973,7 @@ class ResultsCacheTest(unittest.TestCase):
# Test 1. Generating cache name for reading (not writing).
key_list = self.results_cache._GetCacheKeyList(True)
- self.assertEqual(key_list[0], '*') # Machine checksum value, for read.
+ self.assertEqual(key_list[0], '*') # Machine checksum value, for read.
self.assertEqual(key_list[1], 'sunspider')
self.assertEqual(key_list[2], '1')
self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
@@ -929,13 +1023,13 @@ class ResultsCacheTest(unittest.TestCase):
self.assertEqual(key_list[4], 'FakeImageChecksumabc123')
self.assertEqual(key_list[5], 'FakeMachineChecksumabc987')
-
- @mock.patch.object (command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object (os.path, 'isdir')
- @mock.patch.object (Result, 'CreateFromCacheHit')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(os.path, 'isdir')
+ @mock.patch.object(Result, 'CreateFromCacheHit')
def test_read_result(self, mock_create, mock_isdir, mock_runcmd):
self.fakeCacheReturnResult = None
+
def FakeGetCacheDirForRead():
return self.fakeCacheReturnResult
@@ -950,7 +1044,7 @@ class ResultsCacheTest(unittest.TestCase):
# Set up results_cache _GetCacheDirFor{Read,Write} to return
# self.fakeCacheReturnResult, which is initially None (see above).
# So initially, no cache dir is returned.
- self.results_cache._GetCacheDirForRead = FakeGetCacheDirForRead
+ self.results_cache._GetCacheDirForRead = FakeGetCacheDirForRead
self.results_cache._GetCacheDirForWrite = FakeGetCacheDirForWrite
mock_isdir.return_value = True
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index efd70c63..39554c41 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Parse data from benchmark_runs for tabulator."""
from __future__ import print_function
@@ -13,7 +12,8 @@ import sys
from cros_utils import misc
-TELEMETRY_RESULT_DEFAULTS_FILE = "default-telemetry-results.json"
+TELEMETRY_RESULT_DEFAULTS_FILE = 'default-telemetry-results.json'
+
class ResultOrganizer(object):
"""Create a dict from benchmark_runs.
@@ -30,11 +30,14 @@ class ResultOrganizer(object):
]}.
"""
- def __init__(self, benchmark_runs, labels, benchmarks=None,
+ def __init__(self,
+ benchmark_runs,
+ labels,
+ benchmarks=None,
json_report=False):
self.result = {}
self.labels = []
- self.prog = re.compile(r"(\w+)\{(\d+)\}")
+ self.prog = re.compile(r'(\w+)\{(\d+)\}')
self.benchmarks = benchmarks
if not self.benchmarks:
self.benchmarks = []
@@ -62,7 +65,7 @@ class ResultOrganizer(object):
if not show_all_results:
summary_list = self._GetSummaryResults(benchmark.test_name)
if len(summary_list) > 0:
- summary_list.append("retval")
+ summary_list.append('retval')
else:
# Did not find test_name in json file; therefore show everything.
show_all_results = True
@@ -77,7 +80,8 @@ class ResultOrganizer(object):
cur_dict['retval'] = 1
# TODO: This output should be sent via logger.
print("WARNING: Test '%s' appears to have succeeded but returned"
- " no results." % benchmark_name, file=sys.stderr)
+ ' no results.' % benchmark_name,
+ file=sys.stderr)
if json_report and benchmark_run.machine:
cur_dict['machine'] = benchmark_run.machine.name
cur_dict['machine_checksum'] = benchmark_run.machine.checksum
@@ -117,8 +121,7 @@ class ResultOrganizer(object):
for run in label:
for key in run:
if re.match(self.prog, key):
- max_dup = max(max_dup,
- int(re.search(self.prog, key).group(2)))
+ max_dup = max(max_dup, int(re.search(self.prog, key).group(2)))
return max_dup
def _GetNonDupLabel(self, max_dup, label):
@@ -134,7 +137,7 @@ class ResultOrganizer(object):
if re.match(self.prog, key):
new_key = re.search(self.prog, key).group(1)
index = int(re.search(self.prog, key).group(2))
- new_label[start_index+index][new_key] = str(value)
+ new_label[start_index + index][new_key] = str(value)
del new_run[key]
return new_label
@@ -144,4 +147,4 @@ class ResultOrganizer(object):
if benchmark.name == bench:
if not benchmark.iteration_adjusted:
benchmark.iteration_adjusted = True
- benchmark.iterations *= (max_dup +1)
+ benchmark.iterations *= (max_dup + 1)
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
index c170f0a3..914ecc5e 100755
--- a/crosperf/results_organizer_unittest.py
+++ b/crosperf/results_organizer_unittest.py
@@ -3,7 +3,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Testing of ResultsOrganizer. We create some labels, benchmark_runs
and then create a ResultsOrganizer, after that, we compare the result of
ResultOrganizer"""
@@ -17,101 +16,88 @@ from results_organizer import ResultOrganizer
import mock_instance
result = {'benchmark1': [[{'': 'PASS',
- 'bool': 'True',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'ms_1': '2.1',
- 'total': '5'},
- {'test': '2'},
- {'test': '4'},
- {'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6'},
- {'test': '3'},
- {'test': '4'}],
- [{'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6'},
- {'test': '5'},
- {'test': '4'},
- {'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7'},
- {'test': '2'},
- {'test': '6'}]],
- 'benchmark2': [[{'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7'},
- {'test': '2'},
- {'test': '6'},
- {'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7'},
- {'test': '2'},
- {'test': '2'}],
- [{'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7'},
- {'test': '2'},
- {'test': '4'},
- {'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7'},
- {'test': '1'},
- {'test': '6'}]]}
+ 'bool': 'True',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'ms_1': '2.1',
+ 'total': '5'}, {'test': '2'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6'}, {'test': '3'}, {'test': '4'}],
+ [{'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6'}, {'test': '5'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7'}, {'test': '2'}, {'test': '6'}]],
+ 'benchmark2': [[{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7'}, {'test': '2'}, {'test': '6'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7'}, {'test': '2'}, {'test': '2'}],
+ [{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7'}, {'test': '2'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7'}, {'test': '1'}, {'test': '6'}]]}
+
class ResultOrganizerTest(unittest.TestCase):
+
def testResultOrganizer(self):
labels = [mock_instance.label1, mock_instance.label2]
benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
- benchmark_runs = [None]*8
- benchmark_runs[0] = BenchmarkRun("b1", benchmarks[0],
- labels[0], 1, "", "", "", "average", "")
- benchmark_runs[1] = BenchmarkRun("b2", benchmarks[0],
- labels[0], 2, "", "", "", "average", "")
- benchmark_runs[2] = BenchmarkRun("b3", benchmarks[0],
- labels[1], 1, "", "", "", "average", "")
- benchmark_runs[3] = BenchmarkRun("b4", benchmarks[0],
- labels[1], 2, "", "", "", "average", "")
- benchmark_runs[4] = BenchmarkRun("b5", benchmarks[1],
- labels[0], 1, "", "", "", "average", "")
- benchmark_runs[5] = BenchmarkRun("b6", benchmarks[1],
- labels[0], 2, "", "", "", "average", "")
- benchmark_runs[6] = BenchmarkRun("b7", benchmarks[1],
- labels[1], 1, "", "", "", "average", "")
- benchmark_runs[7] = BenchmarkRun("b8", benchmarks[1],
- labels[1], 2, "", "", "", "average", "")
+ benchmark_runs = [None] * 8
+ benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '',
+ '', 'average', '')
i = 0
for b in benchmark_runs:
- b.result = Result("", b.label, "average", "machine")
+ b.result = Result('', b.label, 'average', 'machine')
b.result.keyvals = mock_instance.keyval[i]
i += 1
ro = ResultOrganizer(benchmark_runs, labels, benchmarks)
self.assertEqual(ro.result, result)
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 9734eb32..f5d71aee 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""A module to handle the report format."""
from __future__ import print_function
@@ -63,8 +62,8 @@ def ParseChromeosImage(chromeos_image):
# chromeos_image should have been something like:
# <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
num_pieces = len(pieces)
- if pieces[num_pieces-1] == "chromiumos_test_image.bin":
- version = pieces[num_pieces-2]
+ if pieces[num_pieces - 1] == 'chromiumos_test_image.bin':
+ version = pieces[num_pieces - 2]
# Find last '.' in the version and chop it off (removing the .datatime
# piece from local builds).
loc = version.rfind('.')
@@ -79,6 +78,7 @@ def ParseChromeosImage(chromeos_image):
image = real_file
return version, image
+
class ResultsReport(object):
"""Class to handle the report format."""
MAX_COLOR_CODE = 255
@@ -100,54 +100,41 @@ class ResultsReport(object):
return labels
def GetFullTables(self, perf=False):
- columns = [Column(RawResult(),
- Format()),
- Column(MinResult(),
- Format()),
- Column(MaxResult(),
- Format()),
- Column(AmeanResult(),
- Format()),
- Column(StdResult(),
- Format(), "StdDev"),
- Column(CoeffVarResult(),
- CoeffVarFormat(), "StdDev/Mean"),
- Column(GmeanRatioResult(),
- RatioFormat(), "GmeanSpeedup"),
- Column(PValueResult(),
- PValueFormat(), "p-value")
- ]
+ columns = [Column(RawResult(), Format()), Column(
+ MinResult(), Format()), Column(MaxResult(),
+ Format()), Column(AmeanResult(),
+ Format()),
+ Column(StdResult(), Format(),
+ 'StdDev'), Column(CoeffVarResult(), CoeffVarFormat(),
+ 'StdDev/Mean'),
+ Column(GmeanRatioResult(), RatioFormat(),
+ 'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
+ 'p-value')]
if not perf:
- return self._GetTables(self.labels, self.benchmark_runs, columns,
- "full")
- return self._GetPerfTables(self.labels, columns, "full")
+ return self._GetTables(self.labels, self.benchmark_runs, columns, 'full')
+ return self._GetPerfTables(self.labels, columns, 'full')
def GetSummaryTables(self, perf=False):
- columns = [Column(AmeanResult(),
- Format()),
- Column(StdResult(),
- Format(), "StdDev"),
- Column(CoeffVarResult(),
- CoeffVarFormat(), "StdDev/Mean"),
- Column(GmeanRatioResult(),
- RatioFormat(), "GmeanSpeedup"),
- Column(PValueResult(),
- PValueFormat(), "p-value")
- ]
+ columns = [Column(AmeanResult(), Format()), Column(StdResult(), Format(),
+ 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
+ Column(GmeanRatioResult(), RatioFormat(),
+ 'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
+ 'p-value')]
if not perf:
return self._GetTables(self.labels, self.benchmark_runs, columns,
- "summary")
- return self._GetPerfTables(self.labels, columns, "summary")
+ 'summary')
+ return self._GetPerfTables(self.labels, columns, 'summary')
def _ParseColumn(self, columns, iteration):
new_column = []
for column in columns:
- if column.result.__class__.__name__ != "RawResult":
- #TODO(asharif): tabulator should support full table natively.
+ if column.result.__class__.__name__ != 'RawResult':
+ #TODO(asharif): tabulator should support full table natively.
new_column.append(column)
else:
for i in range(iteration):
- cc = Column(LiteralResult(i), Format(), str(i+1))
+ cc = Column(LiteralResult(i), Format(), str(i + 1))
new_column.append(cc)
return new_column
@@ -159,12 +146,12 @@ class ResultsReport(object):
return True
def _GetTableHeader(self, benchmark):
- benchmark_info = ("Benchmark: {0}; Iterations: {1}"
+ benchmark_info = ('Benchmark: {0}; Iterations: {1}'
.format(benchmark.name, benchmark.iterations))
cell = Cell()
cell.string_value = benchmark_info
cell.header = True
- return [[cell]]
+ return [[cell]]
def _GetTables(self, labels, benchmark_runs, columns, table_type):
tables = []
@@ -179,10 +166,10 @@ class ResultsReport(object):
break
ben_table = self._GetTableHeader(benchmark)
- if self._AreAllRunsEmpty(runs):
+ if self._AreAllRunsEmpty(runs):
cell = Cell()
- cell.string_value = ("This benchmark contains no result."
- " Is the benchmark name valid?")
+ cell.string_value = ('This benchmark contains no result.'
+ ' Is the benchmark name valid?')
cell_table = [[cell]]
else:
tg = TableGenerator(runs, label_name)
@@ -214,7 +201,8 @@ class ResultsReport(object):
row_info = p_table.row_info[benchmark]
table = []
for event in benchmark_data:
- tg = TableGenerator(benchmark_data[event], label_names,
+ tg = TableGenerator(benchmark_data[event],
+ label_names,
sort=TableGenerator.SORT_BY_VALUES_DESC)
table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
parsed_columns = self._ParseColumn(columns, ben.iterations)
@@ -228,19 +216,19 @@ class ResultsReport(object):
return tables
def PrintTables(self, tables, out_to):
- output = ""
+ output = ''
if not tables:
return output
for table in tables:
- if out_to == "HTML":
+ if out_to == 'HTML':
tp = TablePrinter(table, TablePrinter.HTML)
- elif out_to == "PLAIN":
+ elif out_to == 'PLAIN':
tp = TablePrinter(table, TablePrinter.PLAIN)
- elif out_to == "CONSOLE":
+ elif out_to == 'CONSOLE':
tp = TablePrinter(table, TablePrinter.CONSOLE)
- elif out_to == "TSV":
+ elif out_to == 'TSV':
tp = TablePrinter(table, TablePrinter.TSV)
- elif out_to == "EMAIL":
+ elif out_to == 'EMAIL':
tp = TablePrinter(table, TablePrinter.EMAIL)
else:
pass
@@ -293,16 +281,21 @@ CPUInfo
def GetStatusTable(self):
"""Generate the status table by the tabulator."""
- table = [["", ""]]
- columns = [Column(LiteralResult(iteration=0), Format(), "Status"),
- Column(LiteralResult(iteration=1), Format(), "Failing Reason")]
+ table = [['', '']]
+ columns = [Column(
+ LiteralResult(iteration=0),
+ Format(),
+ 'Status'), Column(
+ LiteralResult(iteration=1),
+ Format(),
+ 'Failing Reason')]
for benchmark_run in self.benchmark_runs:
status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
benchmark_run.failure_reason]]
table.append(status)
tf = TableFormatter(table, columns)
- cell_table = tf.GetCellTable("status")
+ cell_table = tf.GetCellTable('status')
return [cell_table]
def GetReport(self):
@@ -313,23 +306,20 @@ CPUInfo
if not perf_table:
perf_table = None
if not self.email:
- return self.TEXT % (self.experiment.name,
- self.PrintTables(summary_table, "CONSOLE"),
- self.experiment.machine_manager.num_reimages,
- self.PrintTables(status_table, "CONSOLE"),
- self.PrintTables(perf_table, "CONSOLE"),
- self.experiment.experiment_file,
- self.experiment.machine_manager.GetAllCPUInfo(
- self.experiment.labels))
-
- return self.TEXT % (self.experiment.name,
- self.PrintTables(summary_table, "EMAIL"),
- self.experiment.machine_manager.num_reimages,
- self.PrintTables(status_table, "EMAIL"),
- self.PrintTables(perf_table, "EMAIL"),
- self.experiment.experiment_file,
- self.experiment.machine_manager.GetAllCPUInfo(
- self.experiment.labels))
+ return self.TEXT % (
+ self.experiment.name, self.PrintTables(summary_table, 'CONSOLE'),
+ self.experiment.machine_manager.num_reimages,
+ self.PrintTables(status_table, 'CONSOLE'),
+ self.PrintTables(perf_table, 'CONSOLE'),
+ self.experiment.experiment_file,
+ self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))
+
+ return self.TEXT % (
+ self.experiment.name, self.PrintTables(summary_table, 'EMAIL'),
+ self.experiment.machine_manager.num_reimages,
+ self.PrintTables(status_table, 'EMAIL'),
+ self.PrintTables(perf_table, 'EMAIL'), self.experiment.experiment_file,
+ self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))
class HTMLResultsReport(ResultsReport):
@@ -489,11 +479,11 @@ pre {
</div>""" % (table, table, table)
def GetReport(self):
- chart_javascript = ""
+ chart_javascript = ''
charts = self._GetCharts(self.labels, self.benchmark_runs)
for chart in charts:
chart_javascript += chart.GetJavascript()
- chart_divs = ""
+ chart_divs = ''
for chart in charts:
chart_divs += chart.GetDiv()
@@ -501,30 +491,23 @@ pre {
full_table = self.GetFullTables()
perf_table = self.GetSummaryTables(perf=True)
if perf_table:
- perf_html = self.PERF_HTML % (
- self.PrintTables(perf_table, "HTML"),
- self.PrintTables(perf_table, "PLAIN"),
- self.PrintTables(perf_table, "TSV"),
- self._GetTabMenuHTML("perf")
- )
+ perf_html = self.PERF_HTML % (self.PrintTables(perf_table, 'HTML'),
+ self.PrintTables(perf_table, 'PLAIN'),
+ self.PrintTables(perf_table, 'TSV'),
+ self._GetTabMenuHTML('perf'))
perf_init = "switchTab('perf', 'html');"
else:
- perf_html = ""
- perf_init = ""
-
- return self.HTML % (perf_init,
- chart_javascript,
- self.PrintTables(summary_table, "HTML"),
- self.PrintTables(summary_table, "PLAIN"),
- self.PrintTables(summary_table, "TSV"),
- self._GetTabMenuHTML("summary"),
- perf_html,
- chart_divs,
- self.PrintTables(full_table, "HTML"),
- self.PrintTables(full_table, "PLAIN"),
- self.PrintTables(full_table, "TSV"),
- self._GetTabMenuHTML("full"),
- self.experiment.experiment_file)
+ perf_html = ''
+ perf_init = ''
+
+ return self.HTML % (
+ perf_init, chart_javascript, self.PrintTables(summary_table, 'HTML'),
+ self.PrintTables(summary_table, 'PLAIN'),
+ self.PrintTables(summary_table, 'TSV'), self._GetTabMenuHTML('summary'),
+ perf_html, chart_divs, self.PrintTables(full_table, 'HTML'),
+ self.PrintTables(full_table, 'PLAIN'),
+ self.PrintTables(full_table, 'TSV'), self._GetTabMenuHTML('full'),
+ self.experiment.experiment_file)
def _GetCharts(self, labels, benchmark_runs):
charts = []
@@ -534,32 +517,26 @@ pre {
runs = result[item]
tg = TableGenerator(runs, ro.labels)
table = tg.GetTable()
- columns = [Column(AmeanResult(),
- Format()),
- Column(MinResult(),
- Format()),
- Column(MaxResult(),
- Format())
- ]
+ columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
+ Column(MaxResult(), Format())]
tf = TableFormatter(table, columns)
- data_table = tf.GetCellTable("full")
+ data_table = tf.GetCellTable('full')
for i in range(2, len(data_table)):
cur_row_data = data_table[i]
test_key = cur_row_data[0].string_value
- title = "{0}: {1}".format(item, test_key.replace("/", ""))
+ title = '{0}: {1}'.format(item, test_key.replace('/', ''))
chart = ColumnChart(title, 300, 200)
- chart.AddColumn("Label", "string")
- chart.AddColumn("Average", "number")
- chart.AddColumn("Min", "number")
- chart.AddColumn("Max", "number")
- chart.AddSeries("Min", "line", "black")
- chart.AddSeries("Max", "line", "black")
+ chart.AddColumn('Label', 'string')
+ chart.AddColumn('Average', 'number')
+ chart.AddColumn('Min', 'number')
+ chart.AddColumn('Max', 'number')
+ chart.AddSeries('Min', 'line', 'black')
+ chart.AddSeries('Max', 'line', 'black')
cur_index = 1
for label in ro.labels:
- chart.AddRow([label, cur_row_data[cur_index].value,
- cur_row_data[cur_index + 1].value,
- cur_row_data[cur_index + 2].value])
+ chart.AddRow([label, cur_row_data[cur_index].value, cur_row_data[
+ cur_index + 1].value, cur_row_data[cur_index + 2].value])
if isinstance(cur_row_data[cur_index].value, str):
chart = None
break
@@ -568,8 +545,10 @@ pre {
charts.append(chart)
return charts
+
class JSONResultsReport(ResultsReport):
"""class to generate JASON report."""
+
def __init__(self, experiment, date=None, time=None):
super(JSONResultsReport, self).__init__(experiment)
self.ro = ResultOrganizer(experiment.benchmark_runs,
@@ -581,8 +560,8 @@ class JSONResultsReport(ResultsReport):
self.defaults = TelemetryDefaults()
if not self.date:
timestamp = datetime.datetime.strftime(datetime.datetime.now(),
- "%Y-%m-%d %H:%M:%S")
- date, time = timestamp.split(" ")
+ '%Y-%m-%d %H:%M:%S')
+ date, time = timestamp.split(' ')
self.date = date
self.time = time
@@ -653,9 +632,8 @@ class JSONResultsReport(ResultsReport):
json_results['detailed_results'] = detail_results
final_results.append(json_results)
- filename = "report_%s_%s_%s.%s.json" % (board, self.date,
- self.time.replace(':', '.'),
- compiler_string)
+ filename = 'report_%s_%s_%s.%s.json' % (
+ board, self.date, self.time.replace(':', '.'), compiler_string)
fullname = os.path.join(results_dir, filename)
- with open(fullname, "w") as fp:
+ with open(fullname, 'w') as fp:
json.dump(final_results, fp, indent=2)
diff --git a/crosperf/results_sorter.py b/crosperf/results_sorter.py
index e2caa41e..1ebbb8b4 100644
--- a/crosperf/results_sorter.py
+++ b/crosperf/results_sorter.py
@@ -1,8 +1,10 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Module to sort the results."""
+
+
class ResultSorter(object):
"""Class to sort the results."""
+
def __init__(self, benchmark_runs):
self.table = {}
for benchmark_run in benchmark_runs:
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index b73d384f..3a31d93c 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
@@ -16,79 +15,79 @@ from cros_utils import logger
class DutWorker(Thread):
- """Working thread for a dut."""
-
- def __init__(self, dut, sched):
- super(DutWorker, self).__init__(name='DutWorker-{}'.format(dut.name))
- self._dut = dut
- self._sched = sched
- self._stat_num_br_run = 0
- self._stat_num_reimage = 0
- self._stat_annotation = ""
- self._logger = logger.GetLogger(self._sched._experiment.log_dir)
- self.daemon = True
- self._terminated = False
- self._active_br = None
- # Race condition accessing _active_br between _execute_benchmark_run and
- # _terminate, so lock it up.
- self._active_br_lock = Lock()
-
- def terminate(self):
- self._terminated = True
- with self._active_br_lock:
- if self._active_br is not None:
- # BenchmarkRun.Terminate() terminates any running testcase via
- # suite_runner.Terminate and updates timeline.
- self._active_br.Terminate()
-
- def run(self):
- """Do the "run-test->(optionally reimage)->run-test" chore.
+ """Working thread for a dut."""
+
+ def __init__(self, dut, sched):
+ super(DutWorker, self).__init__(name='DutWorker-{}'.format(dut.name))
+ self._dut = dut
+ self._sched = sched
+ self._stat_num_br_run = 0
+ self._stat_num_reimage = 0
+ self._stat_annotation = ''
+ self._logger = logger.GetLogger(self._sched._experiment.log_dir)
+ self.daemon = True
+ self._terminated = False
+ self._active_br = None
+ # Race condition accessing _active_br between _execute_benchmark_run and
+ # _terminate, so lock it up.
+ self._active_br_lock = Lock()
+
+ def terminate(self):
+ self._terminated = True
+ with self._active_br_lock:
+ if self._active_br is not None:
+ # BenchmarkRun.Terminate() terminates any running testcase via
+ # suite_runner.Terminate and updates timeline.
+ self._active_br.Terminate()
+
+ def run(self):
+ """Do the "run-test->(optionally reimage)->run-test" chore.
Note - 'br' below means 'benchmark_run'.
"""
- # Firstly, handle benchmarkruns that have cache hit.
- br = self._sched.get_cached_benchmark_run()
- while br:
- try:
- self._stat_annotation = 'finishing cached {}'.format(br)
- br.run()
- except:
- traceback.print_exc(file=sys.stdout)
- br = self._sched.get_cached_benchmark_run()
-
- # Secondly, handle benchmarkruns that needs to be run on dut.
- self._setup_dut_label()
- try:
- self._logger.LogOutput("{} started.".format(self))
- while not self._terminated:
- br = self._sched.get_benchmark_run(self._dut)
- if br is None:
- # No br left for this label. Considering reimaging.
- label = self._sched.allocate_label(self._dut)
- if label is None:
- # No br even for other labels. We are done.
- self._logger.LogOutput("ImageManager found no label "
- "for dut, stopping working "
- "thread {}.".format(self))
- break
- if self._reimage(label):
- # Reimage to run other br fails, dut is doomed, stop
- # this thread.
- self._logger.LogWarning("Re-image failed, dut "
- "in an unstable state, stopping "
- "working thread {}.".format(self))
- break
- else:
- # Execute the br.
- self._execute_benchmark_run(br)
- finally:
- self._stat_annotation = "finished"
- # Thread finishes. Notify scheduler that I'm done.
- self._sched.dut_worker_finished(self)
-
- def _reimage(self, label):
- """Reimage image to label.
+ # Firstly, handle benchmarkruns that have cache hit.
+ br = self._sched.get_cached_benchmark_run()
+ while br:
+ try:
+ self._stat_annotation = 'finishing cached {}'.format(br)
+ br.run()
+ except:
+ traceback.print_exc(file=sys.stdout)
+ br = self._sched.get_cached_benchmark_run()
+
+ # Secondly, handle benchmarkruns that needs to be run on dut.
+ self._setup_dut_label()
+ try:
+ self._logger.LogOutput('{} started.'.format(self))
+ while not self._terminated:
+ br = self._sched.get_benchmark_run(self._dut)
+ if br is None:
+ # No br left for this label. Considering reimaging.
+ label = self._sched.allocate_label(self._dut)
+ if label is None:
+ # No br even for other labels. We are done.
+ self._logger.LogOutput('ImageManager found no label '
+ 'for dut, stopping working '
+ 'thread {}.'.format(self))
+ break
+ if self._reimage(label):
+ # Reimage to run other br fails, dut is doomed, stop
+ # this thread.
+ self._logger.LogWarning('Re-image failed, dut '
+ 'in an unstable state, stopping '
+ 'working thread {}.'.format(self))
+ break
+ else:
+ # Execute the br.
+ self._execute_benchmark_run(br)
+ finally:
+ self._stat_annotation = 'finished'
+ # Thread finishes. Notify scheduler that I'm done.
+ self._sched.dut_worker_finished(self)
+
+ def _reimage(self, label):
+ """Reimage image to label.
Args:
label: the label to remimage onto dut.
@@ -97,235 +96,233 @@ class DutWorker(Thread):
0 if successful, otherwise 1.
"""
- # Termination could happen anywhere, check it.
- if self._terminated:
- return 1
-
- self._logger.LogOutput('Reimaging {} using {}'.format(self, label))
- self._stat_num_reimage += 1
- self._stat_annotation = 'reimaging using "{}"'.format(label.name)
- try:
- # Note, only 1 reimage at any given time, this is guaranteed in
- # ImageMachine, so no sync needed below.
- retval = self._sched._experiment.machine_manager.ImageMachine(
- self._dut, label)
- if retval:
- return 1
- except:
- return 1
-
- self._dut.label = label
- return 0
-
- def _execute_benchmark_run(self, br):
- """Execute a single benchmark_run.
+ # Termination could happen anywhere, check it.
+ if self._terminated:
+ return 1
+
+ self._logger.LogOutput('Reimaging {} using {}'.format(self, label))
+ self._stat_num_reimage += 1
+ self._stat_annotation = 'reimaging using "{}"'.format(label.name)
+ try:
+ # Note, only 1 reimage at any given time, this is guaranteed in
+ # ImageMachine, so no sync needed below.
+ retval = self._sched._experiment.machine_manager.ImageMachine(self._dut,
+ label)
+ if retval:
+ return 1
+ except:
+ return 1
+
+ self._dut.label = label
+ return 0
+
+ def _execute_benchmark_run(self, br):
+ """Execute a single benchmark_run.
Note - this function never throws exceptions.
"""
- # Termination could happen anywhere, check it.
- if self._terminated:
- return
+ # Termination could happen anywhere, check it.
+ if self._terminated:
+ return
+
+ self._logger.LogOutput('{} started working on {}'.format(self, br))
+ self._stat_num_br_run += 1
+ self._stat_annotation = 'executing {}'.format(br)
+ # benchmark_run.run does not throws, but just play it safe here.
+ try:
+ assert br.owner_thread is None
+ br.owner_thread = self
+ with self._active_br_lock:
+ self._active_br = br
+ br.run()
+ finally:
+ self._sched._experiment.BenchmarkRunFinished(br)
+ with self._active_br_lock:
+ self._active_br = None
- self._logger.LogOutput('{} started working on {}'.format(self, br))
- self._stat_num_br_run += 1
- self._stat_annotation = 'executing {}'.format(br)
- # benchmark_run.run does not throws, but just play it safe here.
- try:
- assert br.owner_thread is None
- br.owner_thread = self
- with self._active_br_lock:
- self._active_br = br
- br.run()
- finally:
- self._sched._experiment.BenchmarkRunFinished(br)
- with self._active_br_lock:
- self._active_br = None
-
- def _setup_dut_label(self):
- """Try to match dut image with a certain experiment label.
+ def _setup_dut_label(self):
+ """Try to match dut image with a certain experiment label.
If such match is found, we just skip doing reimage and jump to execute
some benchmark_runs.
"""
- checksum_file = "/usr/local/osimage_checksum_file"
- try:
- rv, checksum, _ = command_executer.GetCommandExecuter().\
- CrosRunCommandWOutput(
- "cat " + checksum_file,
- chromeos_root=self._sched._labels[0].chromeos_root,
- machine=self._dut.name,
- print_to_console=False)
- if rv == 0:
- checksum = checksum.strip()
- for l in self._sched._labels:
- if l.checksum == checksum:
- self._logger.LogOutput(
- "Dut '{}' is pre-installed with '{}'".format(
- self._dut.name, l))
- self._dut.label = l
- return
- except:
- traceback.print_exc(file=sys.stdout)
- self._dut.label = None
-
- def __str__(self):
- return 'DutWorker[dut="{}", label="{}"]'.format(
- self._dut.name, self._dut.label.name if self._dut.label else "None")
-
- def dut(self):
- return self._dut
-
- def status_str(self):
- """Report thread status."""
-
- return ('Worker thread "{}", label="{}", benchmark_run={}, '
- 'reimage={}, now {}'.format(
- self._dut.name,
- 'None' if self._dut.label is None else self._dut.label.name,
- self._stat_num_br_run,
- self._stat_num_reimage,
- self._stat_annotation))
+ checksum_file = '/usr/local/osimage_checksum_file'
+ try:
+ rv, checksum, _ = command_executer.GetCommandExecuter().\
+ CrosRunCommandWOutput(
+ 'cat ' + checksum_file,
+ chromeos_root=self._sched._labels[0].chromeos_root,
+ machine=self._dut.name,
+ print_to_console=False)
+ if rv == 0:
+ checksum = checksum.strip()
+ for l in self._sched._labels:
+ if l.checksum == checksum:
+ self._logger.LogOutput("Dut '{}' is pre-installed with '{}'".format(
+ self._dut.name, l))
+ self._dut.label = l
+ return
+ except:
+ traceback.print_exc(file=sys.stdout)
+ self._dut.label = None
+
+ def __str__(self):
+ return 'DutWorker[dut="{}", label="{}"]'.format(
+ self._dut.name, self._dut.label.name if self._dut.label else 'None')
+
+ def dut(self):
+ return self._dut
+
+ def status_str(self):
+ """Report thread status."""
+
+ return ('Worker thread "{}", label="{}", benchmark_run={}, '
+ 'reimage={}, now {}'.format(
+ self._dut.name, 'None' if self._dut.label is None else
+ self._dut.label.name, self._stat_num_br_run,
+ self._stat_num_reimage, self._stat_annotation))
+
class BenchmarkRunCacheReader(Thread):
- """The thread to read cache for a list of benchmark_runs.
+ """The thread to read cache for a list of benchmark_runs.
On creation, each instance of this class is given a br_list, which is a
subset of experiment._benchmark_runs.
"""
- def __init__(self, schedv2, br_list):
- super(BenchmarkRunCacheReader, self).__init__()
- self._schedv2 = schedv2
- self._br_list = br_list
- self._logger = self._schedv2._logger
-
- def run(self):
- for br in self._br_list:
- try:
- br.ReadCache()
- if br.cache_hit:
- self._logger.LogOutput('Cache hit - {}'.format(br))
- with self._schedv2._lock_on('_cached_br_list'):
- self._schedv2._cached_br_list.append(br)
- else:
- self._logger.LogOutput('Cache not hit - {}'.format(br))
- except:
- traceback.print_exc(file=sys.stderr)
+ def __init__(self, schedv2, br_list):
+ super(BenchmarkRunCacheReader, self).__init__()
+ self._schedv2 = schedv2
+ self._br_list = br_list
+ self._logger = self._schedv2._logger
+
+ def run(self):
+ for br in self._br_list:
+ try:
+ br.ReadCache()
+ if br.cache_hit:
+ self._logger.LogOutput('Cache hit - {}'.format(br))
+ with self._schedv2._lock_on('_cached_br_list'):
+ self._schedv2._cached_br_list.append(br)
+ else:
+ self._logger.LogOutput('Cache not hit - {}'.format(br))
+ except:
+ traceback.print_exc(file=sys.stderr)
class Schedv2(object):
- """New scheduler for crosperf."""
+ """New scheduler for crosperf."""
- def __init__(self, experiment):
- self._experiment = experiment
- self._logger = logger.GetLogger(experiment.log_dir)
+ def __init__(self, experiment):
+ self._experiment = experiment
+ self._logger = logger.GetLogger(experiment.log_dir)
- # Create shortcuts to nested data structure. "_duts" points to a list of
- # locked machines. _labels points to a list of all labels.
- self._duts = self._experiment.machine_manager._all_machines
- self._labels = self._experiment.labels
+ # Create shortcuts to nested data structure. "_duts" points to a list of
+ # locked machines. _labels points to a list of all labels.
+ self._duts = self._experiment.machine_manager._all_machines
+ self._labels = self._experiment.labels
- # Bookkeeping for synchronization.
- self._workers_lock = Lock()
- self._lock_map = defaultdict(lambda: Lock())
+ # Bookkeeping for synchronization.
+ self._workers_lock = Lock()
+ self._lock_map = defaultdict(lambda: Lock())
- # Test mode flag
- self._in_test_mode = test_flag.GetTestMode()
+ # Test mode flag
+ self._in_test_mode = test_flag.GetTestMode()
- # Read benchmarkrun cache.
- self._read_br_cache()
+ # Read benchmarkrun cache.
+ self._read_br_cache()
- # Mapping from label to a list of benchmark_runs.
- self._label_brl_map = dict([(l, []) for l in self._labels])
- for br in self._experiment.benchmark_runs:
- assert br.label in self._label_brl_map
- # Only put no-cache-hit br into the map.
- if br not in self._cached_br_list:
- self._label_brl_map[br.label].append(br)
+ # Mapping from label to a list of benchmark_runs.
+ self._label_brl_map = dict([(l, []) for l in self._labels])
+ for br in self._experiment.benchmark_runs:
+ assert br.label in self._label_brl_map
+ # Only put no-cache-hit br into the map.
+ if br not in self._cached_br_list:
+ self._label_brl_map[br.label].append(br)
- # Use machine image manager to calculate initial label allocation.
- self._mim = MachineImageManager(self._labels, self._duts)
- self._mim.compute_initial_allocation()
+ # Use machine image manager to calculate initial label allocation.
+ self._mim = MachineImageManager(self._labels, self._duts)
+ self._mim.compute_initial_allocation()
- # Create worker thread, 1 per dut.
- self._active_workers = [DutWorker(dut, self) for dut in self._duts]
- self._finished_workers = []
+ # Create worker thread, 1 per dut.
+ self._active_workers = [DutWorker(dut, self) for dut in self._duts]
+ self._finished_workers = []
- # Termination flag.
- self._terminated = False
+ # Termination flag.
+ self._terminated = False
- def run_sched(self):
- """Start all dut worker threads and return immediately."""
+ def run_sched(self):
+ """Start all dut worker threads and return immediately."""
- [w.start() for w in self._active_workers]
+ [w.start() for w in self._active_workers]
- def _read_br_cache(self):
- """Use multi-threading to read cache for all benchmarkruns.
+ def _read_br_cache(self):
+ """Use multi-threading to read cache for all benchmarkruns.
We do this by firstly creating a few threads, and then assign each
thread a segment of all brs. Each thread will check cache status for
each br and put those with cache into '_cached_br_list'."""
- self._cached_br_list = []
- n_benchmarkruns = len(self._experiment.benchmark_runs)
- if n_benchmarkruns <= 4:
- # Use single thread to read cache.
- self._logger.LogOutput(('Starting to read cache status for '
- '{} benchmark runs ...').format(n_benchmarkruns))
- BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run()
- return
-
- # Split benchmarkruns set into segments. Each segment will be handled by
- # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
- n_threads = max(2, min(20, (n_benchmarkruns + 3) / 4))
- self._logger.LogOutput(('Starting {} threads to read cache status for '
- '{} benchmark runs ...').format(
- n_threads, n_benchmarkruns))
- benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) / n_threads
- benchmarkrun_segments = []
- for i in range(n_threads - 1):
- start = i * benchmarkruns_per_thread
- end = (i + 1) * benchmarkruns_per_thread
- benchmarkrun_segments.append(
- self._experiment.benchmark_runs[start : end])
- benchmarkrun_segments.append(self._experiment.benchmark_runs[
- (n_threads - 1) * benchmarkruns_per_thread:])
-
- # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
- assert (sum([len(x) for x in benchmarkrun_segments]) == n_benchmarkruns)
-
- # Create and start all readers.
- cache_readers = [
- BenchmarkRunCacheReader(self, x) for x in benchmarkrun_segments]
-
- for x in cache_readers:
- x.start()
-
- # Wait till all readers finish.
- for x in cache_readers:
- x.join()
-
- # Summarize.
- self._logger.LogOutput(
- 'Total {} cache hit out of {} benchmark_runs.'.format(
- len(self._cached_br_list), n_benchmarkruns))
-
- def get_cached_benchmark_run(self):
- """Get a benchmark_run with 'cache hit'.
+ self._cached_br_list = []
+ n_benchmarkruns = len(self._experiment.benchmark_runs)
+ if n_benchmarkruns <= 4:
+ # Use single thread to read cache.
+ self._logger.LogOutput(('Starting to read cache status for '
+ '{} benchmark runs ...').format(n_benchmarkruns))
+ BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run()
+ return
+
+ # Split benchmarkruns set into segments. Each segment will be handled by
+ # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
+ n_threads = max(2, min(20, (n_benchmarkruns + 3) / 4))
+ self._logger.LogOutput(('Starting {} threads to read cache status for '
+ '{} benchmark runs ...').format(n_threads,
+ n_benchmarkruns))
+ benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) / n_threads
+ benchmarkrun_segments = []
+ for i in range(n_threads - 1):
+ start = i * benchmarkruns_per_thread
+ end = (i + 1) * benchmarkruns_per_thread
+ benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end])
+ benchmarkrun_segments.append(self._experiment.benchmark_runs[
+ (n_threads - 1) * benchmarkruns_per_thread:])
+
+ # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
+ assert (sum([len(x) for x in benchmarkrun_segments]) == n_benchmarkruns)
+
+ # Create and start all readers.
+ cache_readers = [
+ BenchmarkRunCacheReader(self, x) for x in benchmarkrun_segments
+ ]
+
+ for x in cache_readers:
+ x.start()
+
+ # Wait till all readers finish.
+ for x in cache_readers:
+ x.join()
+
+ # Summarize.
+ self._logger.LogOutput(
+ 'Total {} cache hit out of {} benchmark_runs.'.format(
+ len(self._cached_br_list), n_benchmarkruns))
+
+ def get_cached_benchmark_run(self):
+ """Get a benchmark_run with 'cache hit'.
return:
The benchmark that has cache hit, if any. Otherwise none.
"""
- with self._lock_on('_cached_br_list'):
- if self._cached_br_list:
- return self._cached_br_list.pop()
- return None
+ with self._lock_on('_cached_br_list'):
+ if self._cached_br_list:
+ return self._cached_br_list.pop()
+ return None
- def get_benchmark_run(self, dut):
- """Get a benchmark_run (br) object for a certain dut.
+ def get_benchmark_run(self, dut):
+ """Get a benchmark_run (br) object for a certain dut.
Arguments:
dut: the dut for which a br is returned.
@@ -336,25 +333,25 @@ class Schedv2(object):
dut).
"""
- # If terminated, stop providing any br.
- if self._terminated:
- return None
+ # If terminated, stop providing any br.
+ if self._terminated:
+ return None
- # If dut bears an unrecognized label, return None.
- if dut.label is None:
- return None
+ # If dut bears an unrecognized label, return None.
+ if dut.label is None:
+ return None
- # If br list for the dut's label is empty (that means all brs for this
- # label have been done), return None.
- with self._lock_on(dut.label):
- brl = self._label_brl_map[dut.label]
- if not brl:
- return None
- # Return the first br.
- return brl.pop(0)
+ # If br list for the dut's label is empty (that means all brs for this
+ # label have been done), return None.
+ with self._lock_on(dut.label):
+ brl = self._label_brl_map[dut.label]
+ if not brl:
+ return None
+ # Return the first br.
+ return brl.pop(0)
- def allocate_label(self, dut):
- """Allocate a label to a dut.
+ def allocate_label(self, dut):
+ """Allocate a label to a dut.
The work is delegated to MachineImageManager.
@@ -368,48 +365,48 @@ class Schedv2(object):
The label or None.
"""
- if self._terminated:
- return None
+ if self._terminated:
+ return None
- return self._mim.allocate(dut, self)
+ return self._mim.allocate(dut, self)
- def dut_worker_finished(self, dut_worker):
- """Notify schedv2 that the dut_worker thread finished.
+ def dut_worker_finished(self, dut_worker):
+ """Notify schedv2 that the dut_worker thread finished.
Arguemnts:
dut_worker: the thread that is about to end."""
- self._logger.LogOutput("{} finished.".format(dut_worker))
- with self._workers_lock:
- self._active_workers.remove(dut_worker)
- self._finished_workers.append(dut_worker)
+ self._logger.LogOutput('{} finished.'.format(dut_worker))
+ with self._workers_lock:
+ self._active_workers.remove(dut_worker)
+ self._finished_workers.append(dut_worker)
- def is_complete(self):
- return len(self._active_workers) == 0
+ def is_complete(self):
+ return len(self._active_workers) == 0
- def _lock_on(self, object):
- return self._lock_map[object]
+ def _lock_on(self, object):
+ return self._lock_map[object]
- def terminate(self):
- """Mark flag so we stop providing br/reimages.
+ def terminate(self):
+ """Mark flag so we stop providing br/reimages.
Also terminate each DutWorker, so they refuse to execute br or reimage.
"""
- self._terminated = True
- for dut_worker in self._active_workers:
- dut_worker.terminate()
-
- def threads_status_as_string(self):
- """Report the dut worker threads status."""
-
- status = "{} active threads, {} finished threads.\n".format(
- len(self._active_workers), len(self._finished_workers))
- status += " Active threads:"
- for dw in self._active_workers:
- status += '\n ' + dw.status_str()
- if self._finished_workers:
- status += "\n Finished threads:"
- for dw in self._finished_workers:
- status += '\n ' + dw.status_str()
- return status
+ self._terminated = True
+ for dut_worker in self._active_workers:
+ dut_worker.terminate()
+
+ def threads_status_as_string(self):
+ """Report the dut worker threads status."""
+
+ status = '{} active threads, {} finished threads.\n'.format(
+ len(self._active_workers), len(self._finished_workers))
+ status += ' Active threads:'
+ for dw in self._active_workers:
+ status += '\n ' + dw.status_str()
+ if self._finished_workers:
+ status += '\n Finished threads:'
+ for dw in self._finished_workers:
+ status += '\n ' + dw.status_str()
+ return status
diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py
index 3276cd0f..29ffcb41 100755
--- a/crosperf/schedv2_unittest.py
+++ b/crosperf/schedv2_unittest.py
@@ -20,7 +20,6 @@ from cros_utils.command_executer import CommandExecuter
from experiment_runner_unittest import FakeLogger
from schedv2 import Schedv2
-
EXPERIMENT_FILE_1 = """\
board: daisy
remote: chromeos-daisy1.cros chromeos-daisy2.cros
@@ -41,7 +40,6 @@ image2 {
}
"""
-
EXPERIMENT_FILE_WITH_FORMAT = """\
board: daisy
remote: chromeos-daisy1.cros chromeos-daisy2.cros
@@ -65,153 +63,152 @@ image2 {{
class Schedv2Test(unittest.TestCase):
- mock_logger = FakeLogger()
- mock_cmd_exec = mock.Mock(spec=CommandExecuter)
+ mock_logger = FakeLogger()
+ mock_cmd_exec = mock.Mock(spec=CommandExecuter)
- @mock.patch('benchmark_run.BenchmarkRun',
- new=benchmark_run.MockBenchmarkRun)
- def _make_fake_experiment(self, expstr):
- """Create fake experiment from string.
+ @mock.patch('benchmark_run.BenchmarkRun', new=benchmark_run.MockBenchmarkRun)
+ def _make_fake_experiment(self, expstr):
+ """Create fake experiment from string.
Note - we mock out BenchmarkRun in this step.
"""
- experiment_file = ExperimentFile(StringIO.StringIO(expstr))
- experiment = ExperimentFactory().GetExperiment(
- experiment_file, working_directory="", log_dir="")
- return experiment
-
- def test_remote(self):
- """Test that remotes in labels are aggregated into experiment.remote."""
-
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
- self.exp.log_level = 'verbose'
- schedv2 = Schedv2(self.exp)
- self.assertIn('chromeos-daisy1.cros', self.exp.remote)
- self.assertIn('chromeos-daisy2.cros', self.exp.remote)
- self.assertIn('chromeos-daisy3.cros', self.exp.remote)
- self.assertIn('chromeos-daisy4.cros', self.exp.remote)
- self.assertIn('chromeos-daisy5.cros', self.exp.remote)
-
- def test_unreachable_remote(self):
- """Test unreachable remotes are removed from experiment remote and
+ experiment_file = ExperimentFile(StringIO.StringIO(expstr))
+ experiment = ExperimentFactory().GetExperiment(experiment_file,
+ working_directory='',
+ log_dir='')
+ return experiment
+
+ def test_remote(self):
+ """Test that remotes in labels are aggregated into experiment.remote."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
+ self.exp.log_level = 'verbose'
+ schedv2 = Schedv2(self.exp)
+ self.assertIn('chromeos-daisy1.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy2.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy3.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy4.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy5.cros', self.exp.remote)
+
+ def test_unreachable_remote(self):
+ """Test unreachable remotes are removed from experiment remote and
label.remote."""
- def MockIsReachable(cm):
- return (cm.name != 'chromeos-daisy3.cros' and
- cm.name != 'chromeos-daisy5.cros')
-
- with mock.patch('machine_manager.MockCrosMachine.IsReachable',
- new=MockIsReachable) as f:
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
- self.assertIn('chromeos-daisy1.cros', self.exp.remote)
- self.assertIn('chromeos-daisy2.cros', self.exp.remote)
- self.assertNotIn('chromeos-daisy3.cros', self.exp.remote)
- self.assertIn('chromeos-daisy4.cros', self.exp.remote)
- self.assertNotIn('chromeos-daisy5.cros', self.exp.remote)
-
- for l in self.exp.labels:
- if l.name == 'image2':
- self.assertNotIn('chromeos-daisy5.cros', l.remote)
- self.assertIn('chromeos-daisy4.cros', l.remote)
- elif l.name == 'image1':
- self.assertNotIn('chromeos-daisy3.cros', l.remote)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_1(self, reader):
- """Test benchmarkrun set is split into 5 segments."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=9))
- schedv2 = Schedv2(self.exp)
- # We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4,
- # 4, 2 brs respectively.
- # Assert that BenchmarkRunCacheReader() is called 5 times.
- self.assertEquals(reader.call_count, 5)
- # reader.call_args_list[n] - nth call.
- # reader.call_args_list[n][0] - positioned args in nth call.
- # reader.call_args_list[n][0][1] - the 2nd arg in nth call,
- # that is 'br_list' in 'schedv2.BenchmarkRunCacheReader'.
- self.assertEquals(len(reader.call_args_list[0][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[1][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[2][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[3][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[4][0][1]), 2)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_2(self, reader):
- """Test benchmarkrun set is split into 4 segments."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=8))
- schedv2 = Schedv2(self.exp)
- # We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs.
- self.assertEquals(reader.call_count, 4)
- self.assertEquals(len(reader.call_args_list[0][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[1][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[2][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[3][0][1]), 4)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_3(self, reader):
- """Test benchmarkrun set is split into 2 segments."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=3))
- schedv2 = Schedv2(self.exp)
- # We have 3 * 2 == 6 brs, we use 2 threads.
- self.assertEquals(reader.call_count, 2)
- self.assertEquals(len(reader.call_args_list[0][0][1]), 3)
- self.assertEquals(len(reader.call_args_list[1][0][1]), 3)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_4(self, reader):
- """Test benchmarkrun set is not splitted."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=1))
- schedv2 = Schedv2(self.exp)
- # We have 1 * 2 == 2 br, so only 1 instance.
- self.assertEquals(reader.call_count, 1)
- self.assertEquals(len(reader.call_args_list[0][0][1]), 2)
-
- def test_cachehit(self):
- """Test cache-hit and none-cache-hit brs are properly organized."""
-
- def MockReadCache(br):
- br.cache_hit = (br.label.name == 'image2')
-
- with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
- new=MockReadCache) as f:
- # We have 2 * 30 brs, half of which are put into _cached_br_list.
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
- schedv2 = Schedv2(self.exp)
- self.assertEquals(len(schedv2._cached_br_list), 30)
- # The non-cache-hit brs are put into Schedv2._label_brl_map.
- self.assertEquals(reduce(lambda a, x: a + len(x[1]),
- schedv2._label_brl_map.iteritems(), 0),
- 30)
-
- def test_nocachehit(self):
- """Test no cache-hit."""
-
- def MockReadCache(br):
- br.cache_hit = False
-
- with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
- new=MockReadCache) as f:
- # We have 2 * 30 brs, none of which are put into _cached_br_list.
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
- schedv2 = Schedv2(self.exp)
- self.assertEquals(len(schedv2._cached_br_list), 0)
- # The non-cache-hit brs are put into Schedv2._label_brl_map.
- self.assertEquals(reduce(lambda a, x: a + len(x[1]),
- schedv2._label_brl_map.iteritems(), 0),
- 60)
+ def MockIsReachable(cm):
+ return (cm.name != 'chromeos-daisy3.cros' and
+ cm.name != 'chromeos-daisy5.cros')
+
+ with mock.patch('machine_manager.MockCrosMachine.IsReachable',
+ new=MockIsReachable) as f:
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
+ self.assertIn('chromeos-daisy1.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy2.cros', self.exp.remote)
+ self.assertNotIn('chromeos-daisy3.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy4.cros', self.exp.remote)
+ self.assertNotIn('chromeos-daisy5.cros', self.exp.remote)
+
+ for l in self.exp.labels:
+ if l.name == 'image2':
+ self.assertNotIn('chromeos-daisy5.cros', l.remote)
+ self.assertIn('chromeos-daisy4.cros', l.remote)
+ elif l.name == 'image1':
+ self.assertNotIn('chromeos-daisy3.cros', l.remote)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_1(self, reader):
+ """Test benchmarkrun set is split into 5 segments."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=9))
+ schedv2 = Schedv2(self.exp)
+ # We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4,
+ # 4, 2 brs respectively.
+ # Assert that BenchmarkRunCacheReader() is called 5 times.
+ self.assertEquals(reader.call_count, 5)
+ # reader.call_args_list[n] - nth call.
+ # reader.call_args_list[n][0] - positioned args in nth call.
+ # reader.call_args_list[n][0][1] - the 2nd arg in nth call,
+ # that is 'br_list' in 'schedv2.BenchmarkRunCacheReader'.
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[1][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[2][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[3][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[4][0][1]), 2)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_2(self, reader):
+ """Test benchmarkrun set is split into 4 segments."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=8))
+ schedv2 = Schedv2(self.exp)
+ # We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs.
+ self.assertEquals(reader.call_count, 4)
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[1][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[2][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[3][0][1]), 4)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_3(self, reader):
+ """Test benchmarkrun set is split into 2 segments."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=3))
+ schedv2 = Schedv2(self.exp)
+ # We have 3 * 2 == 6 brs, we use 2 threads.
+ self.assertEquals(reader.call_count, 2)
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 3)
+ self.assertEquals(len(reader.call_args_list[1][0][1]), 3)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_4(self, reader):
+ """Test benchmarkrun set is not splitted."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=1))
+ schedv2 = Schedv2(self.exp)
+ # We have 1 * 2 == 2 br, so only 1 instance.
+ self.assertEquals(reader.call_count, 1)
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 2)
+
+ def test_cachehit(self):
+ """Test cache-hit and none-cache-hit brs are properly organized."""
+
+ def MockReadCache(br):
+ br.cache_hit = (br.label.name == 'image2')
+
+ with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
+ new=MockReadCache) as f:
+ # We have 2 * 30 brs, half of which are put into _cached_br_list.
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=30))
+ schedv2 = Schedv2(self.exp)
+ self.assertEquals(len(schedv2._cached_br_list), 30)
+ # The non-cache-hit brs are put into Schedv2._label_brl_map.
+ self.assertEquals(
+ reduce(lambda a, x: a + len(x[1]), schedv2._label_brl_map.iteritems(),
+ 0), 30)
+
+ def test_nocachehit(self):
+ """Test no cache-hit."""
+
+ def MockReadCache(br):
+ br.cache_hit = False
+
+ with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
+ new=MockReadCache) as f:
+ # We have 2 * 30 brs, none of which are put into _cached_br_list.
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=30))
+ schedv2 = Schedv2(self.exp)
+ self.assertEquals(len(schedv2._cached_br_list), 0)
+ # The non-cache-hit brs are put into Schedv2._label_brl_map.
+ self.assertEquals(
+ reduce(lambda a, x: a + len(x[1]), schedv2._label_brl_map.iteritems(),
+ 0), 60)
if __name__ == '__main__':
- test_flag.SetTestMode(True)
- unittest.main()
-
+ test_flag.SetTestMode(True)
+ unittest.main()
diff --git a/crosperf/settings.py b/crosperf/settings.py
index 24613cf8..fe312c0f 100644
--- a/crosperf/settings.py
+++ b/crosperf/settings.py
@@ -1,5 +1,4 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Module to get the settings from experiment file."""
from __future__ import print_function
@@ -24,13 +23,13 @@ class Settings(object):
def AddField(self, field):
name = field.name
if name in self.fields:
- raise Exception("Field %s defined previously." % name)
+ raise Exception('Field %s defined previously.' % name)
self.fields[name] = field
def SetField(self, name, value, append=False):
if name not in self.fields:
- raise Exception("'%s' is not a valid field in '%s' settings"
- % (name, self.settings_type))
+ raise Exception("'%s' is not a valid field in '%s' settings" %
+ (name, self.settings_type))
if append:
self.fields[name].Append(value)
else:
@@ -64,17 +63,17 @@ class Settings(object):
"""Check that all required fields have been set."""
for name in self.fields:
if not self.fields[name].assigned and self.fields[name].required:
- raise Exception("Field %s is invalid." % name)
+ raise Exception('Field %s is invalid.' % name)
def GetXbuddyPath(self, path_str, board, chromeos_root, log_level):
- prefix = "remote"
+ prefix = 'remote'
l = logger.GetLogger()
- if path_str.find("trybot") < 0 and path_str.find(board) < 0:
- xbuddy_path = "%s/%s/%s" % (prefix, board, path_str)
+ if path_str.find('trybot') < 0 and path_str.find(board) < 0:
+ xbuddy_path = '%s/%s/%s' % (prefix, board, path_str)
else:
- xbuddy_path = "%s/%s" % (prefix, path_str)
+ xbuddy_path = '%s/%s' % (prefix, path_str)
image_downloader = ImageDownloader(l, log_level)
retval, image_path = image_downloader.Run(chromeos_root, xbuddy_path)
if retval != 0:
- raise Exception("Unable to find/download xbuddy image.")
+ raise Exception('Unable to find/download xbuddy image.')
return image_path
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 885f7767..65cca80e 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Setting files for global, benchmark and labels."""
from field import BooleanField
@@ -15,158 +13,195 @@ from settings import Settings
class BenchmarkSettings(Settings):
+
def __init__(self, name):
- super(BenchmarkSettings, self).__init__(name, "benchmark")
- self.AddField(TextField("test_name",
- description="The name of the test to run."
- "Defaults to the name of the benchmark."))
- self.AddField(TextField("test_args",
- description="Arguments to be passed to the "
- "test."))
- self.AddField(IntegerField("iterations", default=1,
- description="Number of iterations to run the "
- "test."))
- self.AddField(TextField("suite", default="",
- description="The type of the benchmark"))
- self.AddField(IntegerField("retries", default=0,
- description="Number of times to retry a "
- "benchmark run."))
- self.AddField(BooleanField("run_local",
- description="Run benchmark harness on the DUT. "
- "Currently only compatible with the suite: "
- "telemetry_Crosperf.",
- required=False, default=True))
+ super(BenchmarkSettings, self).__init__(name, 'benchmark')
+ self.AddField(TextField('test_name',
+ description='The name of the test to run.'
+ 'Defaults to the name of the benchmark.'))
+ self.AddField(TextField('test_args',
+ description='Arguments to be passed to the '
+ 'test.'))
+ self.AddField(IntegerField('iterations',
+ default=1,
+ description='Number of iterations to run the '
+ 'test.'))
+ self.AddField(TextField('suite',
+ default='',
+ description='The type of the benchmark'))
+ self.AddField(IntegerField('retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
+ self.AddField(BooleanField('run_local',
+ description='Run benchmark harness on the DUT. '
+ 'Currently only compatible with the suite: '
+ 'telemetry_Crosperf.',
+ required=False,
+ default=True))
class LabelSettings(Settings):
+
def __init__(self, name):
- super(LabelSettings, self).__init__(name, "label")
- self.AddField(TextField("chromeos_image", required=False,
- description="The path to the image to run tests "
+ super(LabelSettings, self).__init__(name, 'label')
+ self.AddField(TextField('chromeos_image',
+ required=False,
+ description='The path to the image to run tests '
"on, for local/custom-built images. See 'build' "
- "option for official or trybot images."))
- self.AddField(TextField("chromeos_root",
- description="The path to a chromeos checkout which "
- "contains a src/scripts directory. Defaults to "
- "the chromeos checkout which contains the "
- "chromeos_image."))
- self.AddField(ListField("remote", description=
- "A comma-separated list of ip's of chromeos"
- "devices to run experiments on."))
- self.AddField(TextField("image_args", required=False,
- default="",
- description="Extra arguments to pass to "
- "image_chromeos.py."))
- self.AddField(TextField("cache_dir", default="",
- description="The cache dir for this image."))
- self.AddField(TextField("compiler", default="gcc",
- description="The compiler used to build the "
- "ChromeOS image (gcc or llvm)."))
- self.AddField(TextField("chrome_src",
- description="The path to the source of chrome. "
- "This is used to run telemetry benchmarks. "
- "The default one is the src inside chroot.",
- required=False, default=""))
- self.AddField(TextField("build",
- description="The xbuddy specification for an "
- "official or trybot image to use for tests. "
+ 'option for official or trybot images.'))
+ self.AddField(TextField('chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ ListField('remote',
+ description="A comma-separated list of ip's of chromeos"
+ 'devices to run experiments on.'))
+ self.AddField(TextField('image_args',
+ required=False,
+ default='',
+ description='Extra arguments to pass to '
+ 'image_chromeos.py.'))
+ self.AddField(TextField('cache_dir',
+ default='',
+ description='The cache dir for this image.'))
+ self.AddField(TextField('compiler',
+ default='gcc',
+ description='The compiler used to build the '
+ 'ChromeOS image (gcc or llvm).'))
+ self.AddField(TextField('chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(TextField('build',
+ description='The xbuddy specification for an '
+ 'official or trybot image to use for tests. '
"'/remote' is assumed, and the board is given "
"elsewhere, so omit the '/remote/<board>/' xbuddy"
- "prefix.",
- required=False, default=""))
+ 'prefix.',
+ required=False,
+ default=''))
class GlobalSettings(Settings):
+
def __init__(self, name):
- super(GlobalSettings, self).__init__(name, "global")
- self.AddField(TextField("name",
- description="The name of the experiment. Just an "
- "identifier."))
- self.AddField(TextField("board", description="The target "
- "board for running experiments on, e.g. x86-alex."))
- self.AddField(ListField("remote",
+ super(GlobalSettings, self).__init__(name, 'global')
+ self.AddField(TextField('name',
+ description='The name of the experiment. Just an '
+ 'identifier.'))
+ self.AddField(TextField('board',
+ description='The target '
+ 'board for running experiments on, e.g. x86-alex.'))
+ self.AddField(ListField('remote',
description="A comma-separated list of ip's of "
- "chromeos devices to run experiments on."))
- self.AddField(BooleanField("rerun_if_failed", description="Whether to "
- "re-run failed test runs or not.",
+ 'chromeos devices to run experiments on.'))
+ self.AddField(BooleanField('rerun_if_failed',
+ description='Whether to '
+ 're-run failed test runs or not.',
default=False))
- self.AddField(BooleanField("rm_chroot_tmp", default=False,
- description="Whether to remove the test_that"
- "result in the chroot"))
- self.AddField(ListField("email", description="Space-seperated"
- "list of email addresses to send email to."))
- self.AddField(BooleanField("rerun", description="Whether to ignore the "
- "cache and for tests to be re-run.",
+ self.AddField(BooleanField('rm_chroot_tmp',
+ default=False,
+ description='Whether to remove the test_that'
+ 'result in the chroot'))
+ self.AddField(ListField('email',
+ description='Space-seperated'
+ 'list of email addresses to send email to.'))
+ self.AddField(BooleanField('rerun',
+ description='Whether to ignore the '
+ 'cache and for tests to be re-run.',
default=False))
- self.AddField(BooleanField("same_specs", default=True,
- description="Ensure cached runs are run on the "
- "same kind of devices which are specified as a "
- "remote."))
- self.AddField(BooleanField("same_machine", default=False,
- description="Ensure cached runs are run on the "
- "exact the same remote"))
- self.AddField(BooleanField("use_file_locks", default=False,
- description="Whether to use the file locks "
- "mechanism (deprecated) instead of the AFE "
- "server lock mechanism."))
- self.AddField(IntegerField("iterations", default=1,
- description="Number of iterations to run all "
- "tests."))
- self.AddField(TextField("chromeos_root",
- description="The path to a chromeos checkout which "
- "contains a src/scripts directory. Defaults to "
- "the chromeos checkout which contains the "
- "chromeos_image."))
- self.AddField(TextField("logging_level", default="average",
- description="The level of logging desired. "
+ self.AddField(BooleanField('same_specs',
+ default=True,
+ description='Ensure cached runs are run on the '
+ 'same kind of devices which are specified as a '
+ 'remote.'))
+ self.AddField(BooleanField('same_machine',
+ default=False,
+ description='Ensure cached runs are run on the '
+ 'exact the same remote'))
+ self.AddField(BooleanField('use_file_locks',
+ default=False,
+ description='Whether to use the file locks '
+ 'mechanism (deprecated) instead of the AFE '
+ 'server lock mechanism.'))
+ self.AddField(IntegerField('iterations',
+ default=1,
+ description='Number of iterations to run all '
+ 'tests.'))
+ self.AddField(TextField('chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(TextField('logging_level',
+ default='average',
+ description='The level of logging desired. '
"Options are 'quiet', 'average', and 'verbose'."))
- self.AddField(IntegerField("acquire_timeout", default=0,
- description="Number of seconds to wait for "
- "machine before exit if all the machines in "
- "the experiment file are busy. Default is 0"))
- self.AddField(TextField("perf_args", default="",
- description="The optional profile command. It "
- "enables perf commands to record perforamance "
- "related counters. It must start with perf "
- "command record or stat followed by arguments."))
- self.AddField(TextField("cache_dir", default="",
- description="The abs path of cache dir. "
- "Default is /home/$(whoami)/cros_scratch."))
- self.AddField(BooleanField("cache_only", default=False,
- description="Whether to use only cached "
- "results (do not rerun failed tests)."))
- self.AddField(BooleanField("no_email", default=False,
- description="Whether to disable the email to "
- "user after crosperf finishes."))
- self.AddField(BooleanField("json_report", default=False,
- description="Whether to generate a json version"
- " of the report, for archiving."))
- self.AddField(BooleanField("show_all_results", default=False,
- description="When running Telemetry tests, "
- "whether to all the results, instead of just "
- "the default (summary) results."))
- self.AddField(TextField("share_cache", default="",
- description="Path to alternate cache whose data "
- "you want to use. It accepts multiples directories"
+ self.AddField(IntegerField('acquire_timeout',
+ default=0,
+ description='Number of seconds to wait for '
+ 'machine before exit if all the machines in '
+ 'the experiment file are busy. Default is 0'))
+ self.AddField(TextField('perf_args',
+ default='',
+ description='The optional profile command. It '
+ 'enables perf commands to record perforamance '
+ 'related counters. It must start with perf '
+ 'command record or stat followed by arguments.'))
+ self.AddField(TextField('cache_dir',
+ default='',
+ description='The abs path of cache dir. '
+ 'Default is /home/$(whoami)/cros_scratch.'))
+ self.AddField(BooleanField('cache_only',
+ default=False,
+ description='Whether to use only cached '
+ 'results (do not rerun failed tests).'))
+ self.AddField(BooleanField('no_email',
+ default=False,
+ description='Whether to disable the email to '
+ 'user after crosperf finishes.'))
+ self.AddField(BooleanField('json_report',
+ default=False,
+ description='Whether to generate a json version'
+ ' of the report, for archiving.'))
+ self.AddField(BooleanField('show_all_results',
+ default=False,
+ description='When running Telemetry tests, '
+ 'whether to all the results, instead of just '
+ 'the default (summary) results.'))
+ self.AddField(TextField('share_cache',
+ default='',
+ description='Path to alternate cache whose data '
+ 'you want to use. It accepts multiples directories'
" separated by a \",\""))
- self.AddField(TextField("results_dir", default="",
- description="The results dir"))
- self.AddField(TextField("locks_dir", default="",
- description="An alternate directory to use for "
- "storing/checking machine locks. Using this field "
- "automatically sets use_file_locks to True.\n"
- "WARNING: If you use your own locks directory, "
- "there is no guarantee that someone else might not "
- "hold a lock on the same machine in a different "
- "locks directory."))
- self.AddField(TextField("chrome_src",
- description="The path to the source of chrome. "
- "This is used to run telemetry benchmarks. "
- "The default one is the src inside chroot.",
- required=False, default=""))
- self.AddField(IntegerField("retries", default=0,
- description="Number of times to retry a "
- "benchmark run."))
+ self.AddField(TextField('results_dir',
+ default='',
+ description='The results dir'))
+ self.AddField(TextField('locks_dir',
+ default='',
+ description='An alternate directory to use for '
+ 'storing/checking machine locks. Using this field '
+ 'automatically sets use_file_locks to True.\n'
+ 'WARNING: If you use your own locks directory, '
+ 'there is no guarantee that someone else might not '
+ 'hold a lock on the same machine in a different '
+ 'locks directory.'))
+ self.AddField(TextField('chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(IntegerField('retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
+
class SettingsFactory(object):
"""Factory class for building different types of Settings objects.
@@ -177,11 +212,11 @@ class SettingsFactory(object):
"""
def GetSettings(self, name, settings_type):
- if settings_type == "label" or not settings_type:
+ if settings_type == 'label' or not settings_type:
return LabelSettings(name)
- if settings_type == "global":
+ if settings_type == 'global':
return GlobalSettings(name)
- if settings_type == "benchmark":
+ if settings_type == 'benchmark':
return BenchmarkSettings(name)
raise Exception("Invalid settings type: '%s'." % settings_type)
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index 4d3ee342..5538e8cc 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
-
"""Unittest for crosperf."""
import os
@@ -14,10 +13,11 @@ import settings
from cros_utils import command_executer
from cros_utils import logger
+
class BenchmarkSettingsTest(unittest.TestCase):
def test_init(self):
- res = settings_factory.BenchmarkSettings("b_settings")
+ res = settings_factory.BenchmarkSettings('b_settings')
self.assertIsNotNone(res)
self.assertEqual(len(res.fields), 6)
self.assertEqual(res.GetField('test_name'), '')
@@ -25,10 +25,11 @@ class BenchmarkSettingsTest(unittest.TestCase):
self.assertEqual(res.GetField('iterations'), 1)
self.assertEqual(res.GetField('suite'), '')
+
class LabelSettingsTest(unittest.TestCase):
def test_init(self):
- res = settings_factory.LabelSettings("l_settings")
+ res = settings_factory.LabelSettings('l_settings')
self.assertIsNotNone(res)
self.assertEqual(len(res.fields), 8)
self.assertEqual(res.GetField('chromeos_image'), '')
@@ -43,7 +44,7 @@ class LabelSettingsTest(unittest.TestCase):
class GlobalSettingsTest(unittest.TestCase):
def test_init(self):
- res = settings_factory.GlobalSettings("g_settings")
+ res = settings_factory.GlobalSettings('g_settings')
self.assertIsNotNone(res)
self.assertEqual(len(res.fields), 25)
self.assertEqual(res.GetField('name'), '')
@@ -72,24 +73,24 @@ class GlobalSettingsTest(unittest.TestCase):
class SettingsFactoryTest(unittest.TestCase):
def test_get_settings(self):
- self.assertRaises (Exception, settings_factory.SettingsFactory.GetSettings,
- 'global', 'bad_type')
-
+ self.assertRaises(Exception, settings_factory.SettingsFactory.GetSettings,
+ 'global', 'bad_type')
- l_settings = settings_factory.SettingsFactory().GetSettings ('label', 'label')
+ l_settings = settings_factory.SettingsFactory().GetSettings('label',
+ 'label')
self.assertIsInstance(l_settings, settings_factory.LabelSettings)
self.assertEqual(len(l_settings.fields), 8)
- b_settings = settings_factory.SettingsFactory().GetSettings ('benchmark',
- 'benchmark')
+ b_settings = settings_factory.SettingsFactory().GetSettings('benchmark',
+ 'benchmark')
self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings)
self.assertEqual(len(b_settings.fields), 6)
- g_settings = settings_factory.SettingsFactory().GetSettings ('global',
- 'global')
+ g_settings = settings_factory.SettingsFactory().GetSettings('global',
+ 'global')
self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
self.assertEqual(len(g_settings.fields), 25)
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py
index 2ce5f582..e5ccfd46 100755
--- a/crosperf/settings_unittest.py
+++ b/crosperf/settings_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/python2
#
# Copyright 2014 Google Inc. All Rights Reserved.
-
"""unittest for settings."""
from __future__ import print_function
@@ -18,6 +17,7 @@ import download_images
from cros_utils import logger
+
class TestSettings(unittest.TestCase):
"""setting test class."""
@@ -29,38 +29,42 @@ class TestSettings(unittest.TestCase):
self.assertEqual(self.settings.settings_type, 'global')
self.assertIsNone(self.settings.parent)
-
def test_set_parent_settings(self):
self.assertIsNone(self.settings.parent)
- settings_parent = {'fake_parent_entry' : 0}
+ settings_parent = {'fake_parent_entry': 0}
self.settings.SetParentSettings(settings_parent)
self.assertIsNotNone(self.settings.parent)
self.assertEqual(type(self.settings.parent), dict)
self.assertEqual(self.settings.parent, settings_parent)
-
def test_add_field(self):
self.assertEqual(self.settings.fields, {})
- self.settings.AddField(IntegerField("iterations", default=1, required=False,
- description="Number of iterations to "
- "run the test."))
+ self.settings.AddField(IntegerField('iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to '
+ 'run the test.'))
self.assertEqual(len(self.settings.fields), 1)
# Adding the same field twice raises an exception.
- self.assertRaises(Exception, self.settings.AddField,
- (IntegerField("iterations", default=1, required=False,
- description="Number of iterations to run "
- "the test.")))
+ self.assertRaises(Exception,
+ self.settings.AddField,
+ (IntegerField('iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to run '
+ 'the test.')))
res = self.settings.fields['iterations']
self.assertIsInstance(res, IntegerField)
self.assertEqual(res.Get(), 1)
-
def test_set_field(self):
self.assertEqual(self.settings.fields, {})
- self.settings.AddField(IntegerField(
- "iterations", default=1, required=False,
- description="Number of iterations to run the "
- "test."))
+ self.settings.AddField(
+ IntegerField('iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to run the '
+ 'test.'))
res = self.settings.fields['iterations']
self.assertEqual(res.Get(), 1)
@@ -69,13 +73,15 @@ class TestSettings(unittest.TestCase):
self.assertEqual(res.Get(), 10)
# Setting a field that's not there raises an exception.
- self.assertRaises(Exception, self.settings.SetField,
- 'remote', 'lumpy1.cros')
-
- self.settings.AddField(ListField("remote", default=[], description=
- "A comma-separated list of ip's of "
- "chromeos devices to run "
- "experiments on."))
+ self.assertRaises(Exception, self.settings.SetField, 'remote',
+ 'lumpy1.cros')
+
+ self.settings.AddField(
+ ListField('remote',
+ default=[],
+ description="A comma-separated list of ip's of "
+ 'chromeos devices to run '
+ 'experiments on.'))
self.assertEqual(type(self.settings.fields), dict)
self.assertEqual(len(self.settings.fields), 2)
res = self.settings.fields['remote']
@@ -85,15 +91,15 @@ class TestSettings(unittest.TestCase):
res = self.settings.fields['remote']
self.assertEqual(res.Get(), ['lumpy1.cros', 'lumpy2.cros'])
-
def test_get_field(self):
# Getting a field that's not there raises an exception.
self.assertRaises(Exception, self.settings.GetField, 'iterations')
# Getting a required field that hasn't been assigned raises an exception.
- self.settings.AddField(IntegerField("iterations", required=True,
- description="Number of iterations to "
- "run the test."))
+ self.settings.AddField(IntegerField('iterations',
+ required=True,
+ description='Number of iterations to '
+ 'run the test.'))
self.assertIsNotNone(self.settings.fields['iterations'])
self.assertRaises(Exception, self.settings.GetField, 'iterations')
@@ -102,7 +108,6 @@ class TestSettings(unittest.TestCase):
res = self.settings.GetField('iterations')
self.assertEqual(res, 5)
-
def test_inherit(self):
parent_settings = settings_factory.SettingsFactory().GetSettings('global',
'global')
@@ -119,13 +124,12 @@ class TestSettings(unittest.TestCase):
label_settings.Inherit()
self.assertEqual(label_settings.GetField('chromeos_root'), '/tmp/chromeos')
-
def test_override(self):
- self.settings.AddField(ListField("email", default=[],
- description="Space-seperated"
- "list of email addresses to send "
- "email to."))
-
+ self.settings.AddField(ListField('email',
+ default=[],
+ description='Space-seperated'
+ 'list of email addresses to send '
+ 'email to.'))
global_settings = settings_factory.SettingsFactory().GetSettings('global',
'global')
@@ -140,20 +144,23 @@ class TestSettings(unittest.TestCase):
res = self.settings.GetField('email')
self.assertEqual(res, ['john.doe@google.com', 'jane.smith@google.com'])
-
def test_validate(self):
- self.settings.AddField(IntegerField("iterations", required=True,
- description="Number of iterations "
- "to run the test."))
- self.settings.AddField(ListField("remote", default=[], required=True,
- description="A comma-separated list "
+ self.settings.AddField(IntegerField('iterations',
+ required=True,
+ description='Number of iterations '
+ 'to run the test.'))
+ self.settings.AddField(ListField('remote',
+ default=[],
+ required=True,
+ description='A comma-separated list '
"of ip's of chromeos "
- "devices to run experiments on."))
- self.settings.AddField(ListField("email", default=[],
- description="Space-seperated"
- "list of email addresses to "
- "send email to."))
+ 'devices to run experiments on.'))
+ self.settings.AddField(ListField('email',
+ default=[],
+ description='Space-seperated'
+ 'list of email addresses to '
+ 'send email to.'))
# 'required' fields have not been assigned; should raise an exception.
self.assertRaises(Exception, self.settings.Validate)
@@ -167,7 +174,6 @@ class TestSettings(unittest.TestCase):
@mock.patch.object(download_images, 'ImageDownloader')
def test_get_xbuddy_path(self, mock_downloader, mock_run, mock_logger):
-
mock_run.return_value = [0, 'fake_xbuddy_translation']
mock_downloader.Run = mock_run
board = 'lumpy'
@@ -182,23 +188,21 @@ class TestSettings(unittest.TestCase):
self.assertEqual(mock_run.call_count, 1)
self.assertEqual(mock_run.call_args_list[0][0],
('/tmp/chromeos',
- 'remote/trybot-lumpy-paladin/R34-5417.0.0-b1506',))
-
+ 'remote/trybot-lumpy-paladin/R34-5417.0.0-b1506'))
mock_run.reset_mock()
self.settings.GetXbuddyPath(official_str, board, chromeos_root, log_level)
self.assertEqual(mock_run.call_count, 1)
self.assertEqual(mock_run.call_args_list[0][0],
('/tmp/chromeos',
- 'remote/lumpy-release/R34-5417.0.0',))
-
+ 'remote/lumpy-release/R34-5417.0.0'))
mock_run.reset_mock()
self.settings.GetXbuddyPath(xbuddy_str, board, chromeos_root, log_level)
self.assertEqual(mock_run.call_count, 1)
self.assertEqual(mock_run.call_args_list[0][0],
('/tmp/chromeos',
- 'remote/lumpy/latest-dev',))
+ 'remote/lumpy/latest-dev'))
mock_run.return_value = [1, 'fake_xbuddy_translation']
self.assertRaises(Exception, self.settings.GetXbuddyPath, xbuddy_str, board,
@@ -206,5 +210,6 @@ class TestSettings(unittest.TestCase):
if mock_logger:
return
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 4c94de20..48ef97a5 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -14,7 +13,8 @@ import test_flag
TEST_THAT_PATH = '/usr/bin/test_that'
CHROME_MOUNT_DIR = '/tmp/chrome_root'
-def GetProfilerArgs (profiler_args):
+
+def GetProfilerArgs(profiler_args):
# Remove "--" from in front of profiler args.
args_list = shlex.split(profiler_args)
new_list = []
@@ -27,68 +27,74 @@ def GetProfilerArgs (profiler_args):
# Remove "perf_options=" from middle of profiler args.
new_list = []
for arg in args_list:
- idx = arg.find("perf_options=")
+ idx = arg.find('perf_options=')
if idx != -1:
prefix = arg[0:idx]
- suffix = arg[idx + len("perf_options=") + 1 : -1]
+ suffix = arg[idx + len('perf_options=') + 1:-1]
new_arg = prefix + "'" + suffix + "'"
new_list.append(new_arg)
else:
new_list.append(arg)
args_list = new_list
- return " ".join(args_list)
+ return ' '.join(args_list)
class SuiteRunner(object):
""" This defines the interface from crosperf to test script.
"""
- def __init__(self, logger_to_use=None, log_level="verbose", cmd_exec=None,
+ def __init__(self,
+ logger_to_use=None,
+ log_level='verbose',
+ cmd_exec=None,
cmd_term=None):
self._logger = logger_to_use
self.log_level = log_level
- self._ce = cmd_exec or command_executer.GetCommandExecuter(self._logger,
- log_level=self.log_level)
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger,
+ log_level=self.log_level)
self._ct = cmd_term or command_executer.CommandTerminator()
def Run(self, machine, label, benchmark, test_args, profiler_args):
for i in range(0, benchmark.retries + 1):
self.PinGovernorExecutionFrequencies(machine, label.chromeos_root)
- if benchmark.suite == "telemetry":
+ if benchmark.suite == 'telemetry':
ret_tup = self.Telemetry_Run(machine, label, benchmark, profiler_args)
- elif benchmark.suite == "telemetry_Crosperf":
+ elif benchmark.suite == 'telemetry_Crosperf':
ret_tup = self.Telemetry_Crosperf_Run(machine, label, benchmark,
test_args, profiler_args)
else:
ret_tup = self.Test_That_Run(machine, label, benchmark, test_args,
profiler_args)
if ret_tup[0] != 0:
- self._logger.LogOutput("benchmark %s failed. Retries left: %s"
- % (benchmark.name, benchmark.retries - i))
+ self._logger.LogOutput('benchmark %s failed. Retries left: %s' %
+ (benchmark.name, benchmark.retries - i))
elif i > 0:
- self._logger.LogOutput("benchmark %s succeded after %s retries"
- % (benchmark.name, i))
+ self._logger.LogOutput('benchmark %s succeded after %s retries' %
+ (benchmark.name, i))
break
else:
- self._logger.LogOutput("benchmark %s succeded on first try"
- % benchmark.name)
+ self._logger.LogOutput('benchmark %s succeded on first try' %
+ benchmark.name)
break
return ret_tup
def GetHighestStaticFrequency(self, machine_name, chromeos_root):
""" Gets the highest static frequency for the specified machine
"""
- get_avail_freqs = ("cd /sys/devices/system/cpu/cpu0/cpufreq/; "
- "if [[ -e scaling_available_frequencies ]]; then "
- " cat scaling_available_frequencies; "
- "else "
- " cat scaling_max_freq ; "
- "fi")
+ get_avail_freqs = ('cd /sys/devices/system/cpu/cpu0/cpufreq/; '
+ 'if [[ -e scaling_available_frequencies ]]; then '
+ ' cat scaling_available_frequencies; '
+ 'else '
+ ' cat scaling_max_freq ; '
+ 'fi')
ret, freqs_str, _ = self._ce.CrosRunCommandWOutput(
- get_avail_freqs, machine=machine_name, chromeos_root=chromeos_root)
- self._logger.LogFatalIf(ret, "Could not get available frequencies "
- "from machine: %s" % machine_name)
+ get_avail_freqs,
+ machine=machine_name,
+ chromeos_root=chromeos_root)
+ self._logger.LogFatalIf(ret, 'Could not get available frequencies '
+ 'from machine: %s' % machine_name)
freqs = freqs_str.split()
# We need to make sure that the frequencies are sorted in decreasing
# order
@@ -99,7 +105,7 @@ class SuiteRunner(object):
if len(freqs) == 1:
return freqs[0]
# The dynamic frequency ends with a "1000". So, ignore it if found.
- if freqs[0].endswith("1000"):
+ if freqs[0].endswith('1000'):
return freqs[1]
else:
return freqs[0]
@@ -108,95 +114,93 @@ class SuiteRunner(object):
""" Set min and max frequencies to max static frequency
"""
highest_freq = self.GetHighestStaticFrequency(machine_name, chromeos_root)
- BASH_FOR = "for f in {list}; do {body}; done"
- CPUFREQ_DIRS = "/sys/devices/system/cpu/cpu*/cpufreq/"
- change_max_freq = BASH_FOR.format(list=CPUFREQ_DIRS + "scaling_max_freq",
- body="echo %s > $f" % highest_freq)
- change_min_freq = BASH_FOR.format(list=CPUFREQ_DIRS + "scaling_min_freq",
- body="echo %s > $f" % highest_freq)
- change_perf_gov = BASH_FOR.format(list=CPUFREQ_DIRS + "scaling_governor",
- body="echo performance > $f")
- if self.log_level == "average":
- self._logger.LogOutput("Pinning governor execution frequencies for %s"
- % machine_name)
- ret = self._ce.CrosRunCommand(" && ".join(("set -e ",
- change_max_freq,
- change_min_freq,
- change_perf_gov)),
+ BASH_FOR = 'for f in {list}; do {body}; done'
+ CPUFREQ_DIRS = '/sys/devices/system/cpu/cpu*/cpufreq/'
+ change_max_freq = BASH_FOR.format(list=CPUFREQ_DIRS + 'scaling_max_freq',
+ body='echo %s > $f' % highest_freq)
+ change_min_freq = BASH_FOR.format(list=CPUFREQ_DIRS + 'scaling_min_freq',
+ body='echo %s > $f' % highest_freq)
+ change_perf_gov = BASH_FOR.format(list=CPUFREQ_DIRS + 'scaling_governor',
+ body='echo performance > $f')
+ if self.log_level == 'average':
+ self._logger.LogOutput('Pinning governor execution frequencies for %s' %
+ machine_name)
+ ret = self._ce.CrosRunCommand(' && '.join((
+ 'set -e ', change_max_freq, change_min_freq, change_perf_gov)),
machine=machine_name,
chromeos_root=chromeos_root)
- self._logger.LogFatalIf(ret, "Could not pin frequencies on machine: %s"
- % machine_name)
+ self._logger.LogFatalIf(ret, 'Could not pin frequencies on machine: %s' %
+ machine_name)
def RebootMachine(self, machine_name, chromeos_root):
- command = "reboot && exit"
- self._ce.CrosRunCommand(command, machine=machine_name,
- chromeos_root=chromeos_root)
+ command = 'reboot && exit'
+ self._ce.CrosRunCommand(command,
+ machine=machine_name,
+ chromeos_root=chromeos_root)
time.sleep(60)
# Whenever we reboot the machine, we need to restore the governor settings.
self.PinGovernorExecutionFrequencies(machine_name, chromeos_root)
def Test_That_Run(self, machine, label, benchmark, test_args, profiler_args):
"""Run the test_that test.."""
- options = ""
+ options = ''
if label.board:
- options += " --board=%s" % label.board
+ options += ' --board=%s' % label.board
if test_args:
- options += " %s" % test_args
+ options += ' %s' % test_args
if profiler_args:
- self._logger.LogFatal("test_that does not support profiler.")
- command = "rm -rf /usr/local/autotest/results/*"
- self._ce.CrosRunCommand(command, machine=machine,
+ self._logger.LogFatal('test_that does not support profiler.')
+ command = 'rm -rf /usr/local/autotest/results/*'
+ self._ce.CrosRunCommand(command,
+ machine=machine,
chromeos_root=label.chromeos_root)
# We do this because some tests leave the machine in weird states.
# Rebooting between iterations has proven to help with this.
self.RebootMachine(machine, label.chromeos_root)
- command = (("%s --autotest_dir ~/trunk/src/third_party/autotest/files --fast "
- "%s %s %s") %
- (TEST_THAT_PATH, options, machine, benchmark.test_name))
- if self.log_level != "verbose":
- self._logger.LogOutput("Running test.")
- self._logger.LogOutput("CMD: %s" % command)
+ command = (
+ ('%s --autotest_dir ~/trunk/src/third_party/autotest/files --fast '
+ '%s %s %s') % (TEST_THAT_PATH, options, machine, benchmark.test_name))
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('Running test.')
+ self._logger.LogOutput('CMD: %s' % command)
# Use --no-ns-pid so that cros_sdk does not create a different
# process namespace and we can kill process created easily by
# their process group.
- return self._ce.ChrootRunCommandWOutput(
- label.chromeos_root, command, command_terminator=self._ct,
- cros_sdk_options="--no-ns-pid")
-
- def RemoveTelemetryTempFile (self, machine, chromeos_root):
- filename = "telemetry@%s" % machine
- fullname = os.path.join (chromeos_root,
- "chroot",
- "tmp",
- filename)
+ return self._ce.ChrootRunCommandWOutput(label.chromeos_root,
+ command,
+ command_terminator=self._ct,
+ cros_sdk_options='--no-ns-pid')
+
+ def RemoveTelemetryTempFile(self, machine, chromeos_root):
+ filename = 'telemetry@%s' % machine
+ fullname = os.path.join(chromeos_root, 'chroot', 'tmp', filename)
if os.path.exists(fullname):
- os.remove(fullname)
+ os.remove(fullname)
- def Telemetry_Crosperf_Run (self, machine, label, benchmark, test_args,
- profiler_args):
+ def Telemetry_Crosperf_Run(self, machine, label, benchmark, test_args,
+ profiler_args):
if not os.path.isdir(label.chrome_src):
- self._logger.LogFatal("Cannot find chrome src dir to"
- " run telemetry: %s" % label.chrome_src)
+ self._logger.LogFatal('Cannot find chrome src dir to'
+ ' run telemetry: %s' % label.chrome_src)
# Check for and remove temporary file that may have been left by
# previous telemetry runs (and which might prevent this run from
# working).
- self.RemoveTelemetryTempFile (machine, label.chromeos_root)
+ self.RemoveTelemetryTempFile(machine, label.chromeos_root)
# For telemetry runs, we can use the autotest copy from the source
# location. No need to have one under /build/<board>.
autotest_dir_arg = '--autotest_dir ~/trunk/src/third_party/autotest/files'
- profiler_args = GetProfilerArgs (profiler_args)
- fast_arg = ""
+ profiler_args = GetProfilerArgs(profiler_args)
+ fast_arg = ''
if not profiler_args:
# --fast works unless we are doing profiling (autotest limitation).
# --fast avoids unnecessary copies of syslogs.
- fast_arg = "--fast"
- args_string = ""
+ fast_arg = '--fast'
+ args_string = ''
if test_args:
# Strip double quotes off args (so we can wrap them in single
# quotes, to pass through to Telemetry).
@@ -205,68 +209,62 @@ class SuiteRunner(object):
args_string = "test_args='%s'" % test_args
cmd = ('{} {} {} --board={} --args="{} run_local={} test={} '
- '{}" {} telemetry_Crosperf'.format(TEST_THAT_PATH,
- autotest_dir_arg,
- fast_arg,
- label.board,
- args_string,
- benchmark.run_local,
- benchmark.test_name,
- profiler_args,
- machine))
+ '{}" {} telemetry_Crosperf'.format(
+ TEST_THAT_PATH, autotest_dir_arg, fast_arg, label.board,
+ args_string, benchmark.run_local, benchmark.test_name,
+ profiler_args, machine))
# Use --no-ns-pid so that cros_sdk does not create a different
# process namespace and we can kill process created easily by their
# process group.
- chrome_root_options = ("--no-ns-pid "
- "--chrome_root={} --chrome_root_mount={} "
+ chrome_root_options = ('--no-ns-pid '
+ '--chrome_root={} --chrome_root_mount={} '
"FEATURES=\"-usersandbox\" "
- "CHROME_ROOT={}".format(label.chrome_src,
- CHROME_MOUNT_DIR,
- CHROME_MOUNT_DIR))
- if self.log_level != "verbose":
- self._logger.LogOutput("Running test.")
- self._logger.LogOutput("CMD: %s" % cmd)
+ 'CHROME_ROOT={}'.format(label.chrome_src,
+ CHROME_MOUNT_DIR,
+ CHROME_MOUNT_DIR))
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('Running test.')
+ self._logger.LogOutput('CMD: %s' % cmd)
return self._ce.ChrootRunCommandWOutput(
- label.chromeos_root, cmd, command_terminator=self._ct,
+ label.chromeos_root,
+ cmd,
+ command_terminator=self._ct,
cros_sdk_options=chrome_root_options)
-
def Telemetry_Run(self, machine, label, benchmark, profiler_args):
- telemetry_run_path = ""
+ telemetry_run_path = ''
if not os.path.isdir(label.chrome_src):
- self._logger.LogFatal("Cannot find chrome src dir to"
- " run telemetry.")
+ self._logger.LogFatal('Cannot find chrome src dir to' ' run telemetry.')
else:
- telemetry_run_path = os.path.join(label.chrome_src, "src/tools/perf")
+ telemetry_run_path = os.path.join(label.chrome_src, 'src/tools/perf')
if not os.path.exists(telemetry_run_path):
- self._logger.LogFatal("Cannot find %s directory." % telemetry_run_path)
+ self._logger.LogFatal('Cannot find %s directory.' % telemetry_run_path)
if profiler_args:
- self._logger.LogFatal("Telemetry does not support the perf profiler.")
+ self._logger.LogFatal('Telemetry does not support the perf profiler.')
# Check for and remove temporary file that may have been left by
# previous telemetry runs (and which might prevent this run from
# working).
if not test_flag.GetTestMode():
- self.RemoveTelemetryTempFile (machine, label.chromeos_root)
-
- rsa_key = os.path.join(label.chromeos_root,
- "src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa")
-
- cmd = ("cd {0} && "
- "./run_measurement "
- "--browser=cros-chrome "
- "--output-format=csv "
- "--remote={1} "
- "--identity {2} "
- "{3} {4}".format(telemetry_run_path, machine,
- rsa_key,
- benchmark.test_name,
- benchmark.test_args))
- if self.log_level != "verbose":
- self._logger.LogOutput("Running test.")
- self._logger.LogOutput("CMD: %s" % cmd)
+ self.RemoveTelemetryTempFile(machine, label.chromeos_root)
+
+ rsa_key = os.path.join(
+ label.chromeos_root,
+ 'src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa')
+
+ cmd = ('cd {0} && '
+ './run_measurement '
+ '--browser=cros-chrome '
+ '--output-format=csv '
+ '--remote={1} '
+ '--identity {2} '
+ '{3} {4}'.format(telemetry_run_path, machine, rsa_key,
+ benchmark.test_name, benchmark.test_args))
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('Running test.')
+ self._logger.LogOutput('CMD: %s' % cmd)
return self._ce.RunCommandWOutput(cmd, print_to_console=False)
def Terminate(self):
@@ -274,11 +272,12 @@ class SuiteRunner(object):
class MockSuiteRunner(object):
+
def __init__(self):
self._true = True
def Run(self, *_args):
if self._true:
- return [0, "", ""]
+ return [0, '', '']
else:
- return [0, "", ""]
+ return [0, '', '']
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index d534f3a8..daff6c39 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
-
"""Unittest for machine_manager."""
import os.path
import time
@@ -30,38 +29,37 @@ class SuiteRunnerTest(unittest.TestCase):
mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
mock_cmd_term = mock.Mock(spec=command_executer.CommandTerminator)
mock_logger = mock.Mock(spec=logger.Logger)
- mock_label = label.MockLabel("lumpy", "lumpy_chromeos_image", "/tmp/chromeos",
- "lumpy", [ "lumpy1.cros", "lumpy.cros2" ],
- "", "", False, "average", "gcc", "")
- telemetry_crosperf_bench = Benchmark("b1_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles", # perf_args
- "telemetry_Crosperf", # suite
- True) # show_all_results
-
- test_that_bench = Benchmark("b2_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles") # perf_args
-
- telemetry_bench = Benchmark("b3_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles", # perf_args
- "telemetry", # suite
- False) # show_all_results
+ mock_label = label.MockLabel('lumpy', 'lumpy_chromeos_image', '/tmp/chromeos',
+ 'lumpy', ['lumpy1.cros', 'lumpy.cros2'], '', '',
+ False, 'average', 'gcc', '')
+ telemetry_crosperf_bench = Benchmark('b1_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry_Crosperf', # suite
+ True) # show_all_results
+
+ test_that_bench = Benchmark('b2_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles') # perf_args
+
+ telemetry_bench = Benchmark('b3_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry', # suite
+ False) # show_all_results
def setUp(self):
- self.runner = suite_runner.SuiteRunner(self.mock_logger, "verbose",
- self.mock_cmd_exec, self.mock_cmd_term)
-
+ self.runner = suite_runner.SuiteRunner(
+ self.mock_logger, 'verbose', self.mock_cmd_exec, self.mock_cmd_term)
def test_get_profiler_args(self):
input_str = ('--profiler=custom_perf --profiler_args=\'perf_options'
@@ -81,32 +79,29 @@ class SuiteRunnerTest(unittest.TestCase):
self.pin_governor_args = []
self.test_that_args = []
self.telemetry_run_args = []
- self.telemetry_crosperf_args = []
-
+ self.telemetry_crosperf_args = []
def FakePinGovernor(machine, chroot):
self.call_pin_governor = True
self.pin_governor_args = [machine, chroot]
-
def FakeTelemetryRun(machine, label, benchmark, profiler_args):
self.telemetry_run_args = [machine, label, benchmark, profiler_args]
self.call_telemetry_run = True
- return "Ran FakeTelemetryRun"
-
+ return 'Ran FakeTelemetryRun'
def FakeTelemetryCrosperfRun(machine, label, benchmark, test_args,
profiler_args):
self.telemetry_crosperf_args = [machine, label, benchmark, test_args,
profiler_args]
self.call_telemetry_crosperf_run = True
- return "Ran FakeTelemetryCrosperfRun"
-
+ return 'Ran FakeTelemetryCrosperfRun'
def FakeTestThatRun(machine, label, benchmark, test_args, profiler_args):
- self.test_that_args = [machine, label, benchmark, test_args, profiler_args]
+ self.test_that_args = [machine, label, benchmark, test_args, profiler_args
+ ]
self.call_test_that_run = True
- return "Ran FakeTestThatRun"
+ return 'Ran FakeTestThatRun'
self.runner.PinGovernorExecutionFrequencies = FakePinGovernor
self.runner.Telemetry_Run = FakeTelemetryRun
@@ -123,8 +118,9 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertTrue(self.call_telemetry_run)
self.assertFalse(self.call_test_that_run)
self.assertFalse(self.call_telemetry_crosperf_run)
- self.assertEqual(self.telemetry_run_args,
- ['fake_machine', self.mock_label, self.telemetry_bench, ''])
+ self.assertEqual(
+ self.telemetry_run_args,
+ ['fake_machine', self.mock_label, self.telemetry_bench, ''])
reset()
res = self.runner.Run(machine, self.mock_label, self.test_that_bench,
@@ -133,13 +129,13 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertFalse(self.call_telemetry_run)
self.assertTrue(self.call_test_that_run)
self.assertFalse(self.call_telemetry_crosperf_run)
- self.assertEqual(self.test_that_args,
- ['fake_machine', self.mock_label, self.test_that_bench, '',
- ''])
+ self.assertEqual(self.test_that_args, ['fake_machine', self.mock_label,
+ self.test_that_bench, '', ''])
reset()
- res = self.runner.Run(machine, self.mock_label, self.telemetry_crosperf_bench,
- test_args, profiler_args)
+ res = self.runner.Run(machine, self.mock_label,
+ self.telemetry_crosperf_bench, test_args,
+ profiler_args)
self.assertTrue(self.call_pin_governor)
self.assertFalse(self.call_telemetry_run)
self.assertFalse(self.call_test_that_run)
@@ -148,27 +144,23 @@ class SuiteRunnerTest(unittest.TestCase):
['fake_machine', self.mock_label,
self.telemetry_crosperf_bench, '', ''])
-
-
- @mock.patch.object (command_executer.CommandExecuter, 'CrosRunCommandWOutput')
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
def test_get_highest_static_frequency(self, mock_cros_runcmd):
self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
- mock_cros_runcmd.return_value = [ 0, '1666000 1333000 1000000', '']
- freq = self.runner.GetHighestStaticFrequency ('lumpy1.cros', '/tmp/chromeos')
+ mock_cros_runcmd.return_value = [0, '1666000 1333000 1000000', '']
+ freq = self.runner.GetHighestStaticFrequency('lumpy1.cros', '/tmp/chromeos')
self.assertEqual(freq, '1666000')
- mock_cros_runcmd.return_value = [ 0, '1333000', '']
- freq = self.runner.GetHighestStaticFrequency ('lumpy1.cros', '/tmp/chromeos')
+ mock_cros_runcmd.return_value = [0, '1333000', '']
+ freq = self.runner.GetHighestStaticFrequency('lumpy1.cros', '/tmp/chromeos')
self.assertEqual(freq, '1333000')
- mock_cros_runcmd.return_value = [ 0, '1661000 1333000 1000000', '']
- freq = self.runner.GetHighestStaticFrequency ('lumpy1.cros', '/tmp/chromeos')
+ mock_cros_runcmd.return_value = [0, '1661000 1333000 1000000', '']
+ freq = self.runner.GetHighestStaticFrequency('lumpy1.cros', '/tmp/chromeos')
self.assertEqual(freq, '1333000')
-
-
- @mock.patch.object (command_executer.CommandExecuter, 'CrosRunCommand')
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
def test_pin_governor_execution_frequencies(self, mock_cros_runcmd):
def FakeGetHighestFreq(machine_name, chromeos_root):
@@ -179,11 +171,18 @@ class SuiteRunnerTest(unittest.TestCase):
self.runner.PinGovernorExecutionFrequencies('lumpy1.cros', '/tmp/chromeos')
self.assertEqual(mock_cros_runcmd.call_count, 1)
cmd = mock_cros_runcmd.call_args_list[0][0]
- self.assertEqual (cmd, ('set -e && for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq; do echo 1666000 > $f; done && for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq; do echo 1666000 > $f; done && for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do echo performance > $f; done',))
-
-
- @mock.patch.object (time, 'sleep')
- @mock.patch.object (command_executer.CommandExecuter, 'CrosRunCommand')
+ self.assertEqual(cmd, (
+ 'set -e && for f in '
+ '/sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq; do echo '
+ '1666000 > $f; done && for f in '
+ '/sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq; do echo '
+ '1666000 > $f; done && for f in '
+ '/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do echo '
+ 'performance > $f; done',
+ ))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
def test_reboot_machine(self, mock_cros_runcmd, mock_sleep):
def FakePinGovernor(machine_name, chromeos_root):
@@ -197,16 +196,15 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_sleep.call_args_list[0][0], (60,))
-
- @mock.patch.object (command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object (command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
+ @mock.patch.object(command_executer.CommandExecuter,
+ 'ChrootRunCommandWOutput')
def test_test_that_run(self, mock_chroot_runcmd, mock_cros_runcmd):
- def FakeRebootMachine (machine, chroot):
+ def FakeRebootMachine(machine, chroot):
pass
- def FakeLogMsg (fd, termfd, msg, flush):
+ def FakeLogMsg(fd, termfd, msg, flush):
pass
save_log_msg = self.real_logger._LogMsg
@@ -217,8 +215,7 @@ class SuiteRunnerTest(unittest.TestCase):
raised_exception = False
try:
self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
- self.test_that_bench, '',
- 'record -a -e cycles')
+ self.test_that_bench, '', 'record -a -e cycles')
except:
raised_exception = True
self.assertTrue(raised_exception)
@@ -226,9 +223,8 @@ class SuiteRunnerTest(unittest.TestCase):
mock_chroot_runcmd.return_value = 0
self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd
self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
- res = self.runner.Test_That_Run ('lumpy1.cros', self.mock_label,
- self.test_that_bench, '--iterations=2',
- '')
+ res = self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
+ self.test_that_bench, '--iterations=2', '')
self.assertEqual(mock_cros_runcmd.call_count, 1)
self.assertEqual(mock_chroot_runcmd.call_count, 1)
self.assertEqual(res, 0)
@@ -245,10 +241,9 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
self.real_logger._LogMsg = save_log_msg
-
- @mock.patch.object (os.path, 'isdir')
- @mock.patch.object (command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
+ @mock.patch.object(os.path, 'isdir')
+ @mock.patch.object(command_executer.CommandExecuter,
+ 'ChrootRunCommandWOutput')
def test_telemetry_crosperf_run(self, mock_chroot_runcmd, mock_isdir):
mock_isdir.return_value = True
@@ -256,9 +251,9 @@ class SuiteRunnerTest(unittest.TestCase):
self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd
profiler_args = ('--profiler=custom_perf --profiler_args=\'perf_options'
'="record -a -e cycles,instructions"\'')
- res = self.runner.Telemetry_Crosperf_Run ('lumpy1.cros', self.mock_label,
- self.telemetry_crosperf_bench,
- '', profiler_args)
+ res = self.runner.Telemetry_Crosperf_Run('lumpy1.cros', self.mock_label,
+ self.telemetry_crosperf_bench, '',
+ profiler_args)
self.assertEqual(res, 0)
self.assertEqual(mock_chroot_runcmd.call_count, 1)
args_list = mock_chroot_runcmd.call_args_list[0][0]
@@ -277,13 +272,12 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
self.assertEqual(len(args_dict), 2)
-
- @mock.patch.object (os.path, 'isdir')
- @mock.patch.object (os.path, 'exists')
- @mock.patch.object (command_executer.CommandExecuter, 'RunCommandWOutput')
+ @mock.patch.object(os.path, 'isdir')
+ @mock.patch.object(os.path, 'exists')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_telemetry_run(self, mock_runcmd, mock_exists, mock_isdir):
- def FakeLogMsg (fd, termfd, msg, flush):
+ def FakeLogMsg(fd, termfd, msg, flush):
pass
save_log_msg = self.real_logger._LogMsg
@@ -330,13 +324,14 @@ class SuiteRunnerTest(unittest.TestCase):
self.telemetry_bench, '')
self.assertEqual(res, 0)
self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- (('cd src/tools/perf && ./run_measurement '
- '--browser=cros-chrome --output-format=csv '
- '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
- '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
+ self.assertEqual(mock_runcmd.call_args_list[0][0], (
+ ('cd src/tools/perf && ./run_measurement '
+ '--browser=cros-chrome --output-format=csv '
+ '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
+ '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
self.real_logger._LogMsg = save_log_msg
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/test_flag.py b/crosperf/test_flag.py
index 06f2ae10..0305eea7 100644
--- a/crosperf/test_flag.py
+++ b/crosperf/test_flag.py
@@ -1,8 +1,6 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""A global variable for testing."""
-
_is_test = [False]
@@ -11,4 +9,4 @@ def SetTestMode(flag):
def GetTestMode():
- return _is_test[0]
+ return _is_test[0]
diff --git a/crosperf/translate_xbuddy.py b/crosperf/translate_xbuddy.py
index 57aa2167..a32854e1 100644
--- a/crosperf/translate_xbuddy.py
+++ b/crosperf/translate_xbuddy.py
@@ -9,14 +9,15 @@ if '/mnt/host/source/src/third_party/toolchain-utils/crosperf' in sys.path:
dev_path = os.path.expanduser('~/trunk/src/platform/dev')
sys.path.append(dev_path)
else:
- print ('This script can only be run from inside a ChromeOS chroot. Please '
- 'enter your chroot, go to ~/src/third_party/toolchain-utils/crosperf'
- ' and try again.')
+ print('This script can only be run from inside a ChromeOS chroot. Please '
+ 'enter your chroot, go to ~/src/third_party/toolchain-utils/crosperf'
+ ' and try again.')
sys.exit(0)
#pylint: disable=import-error
import xbuddy
+
def Main(xbuddy_string):
if not os.path.exists('./xbuddy_config.ini'):
config_path = os.path.expanduser('~/trunk/src/platform/dev/'
@@ -26,6 +27,7 @@ def Main(xbuddy_string):
build_id = x.Translate(os.path.split(xbuddy_string))
return build_id
-if __name__ == "__main__":
+
+if __name__ == '__main__':
print(Main(sys.argv[1]))
sys.exit(0)
diff --git a/cwp/bartlett/server.py b/cwp/bartlett/server.py
index c61f3444..f6b35361 100755
--- a/cwp/bartlett/server.py
+++ b/cwp/bartlett/server.py
@@ -41,19 +41,19 @@ logging.getLogger().setLevel(logging.DEBUG)
class FileEntry(db.Model):
- profile_data = db.BlobProperty() # The profile data
- date = db.DateTimeProperty(auto_now_add=True) # Date it was uploaded
- data_md5 = db.ByteStringProperty() # md5 of the profile data
- board = db.StringProperty() # board arch
- chromeos_version = db.StringProperty() # ChromeOS version
+ profile_data = db.BlobProperty() # The profile data
+ date = db.DateTimeProperty(auto_now_add=True) # Date it was uploaded
+ data_md5 = db.ByteStringProperty() # md5 of the profile data
+ board = db.StringProperty() # board arch
+ chromeos_version = db.StringProperty() # ChromeOS version
class MainPage(webapp.RequestHandler):
"""Main page only used as the form template, not actually displayed."""
- def get(self, response=""): # pylint: disable-msg=C6409
+ def get(self, response=''): # pylint: disable-msg=C6409
if response:
- self.response.out.write("<html><body>")
+ self.response.out.write('<html><body>')
self.response.out.write("""<br>
<form action="/upload" enctype="multipart/form-data" method="post">
<div><label>Profile Data:</label></div>
@@ -74,11 +74,11 @@ class Upload(webapp.RequestHandler):
def post(self): # pylint: disable-msg=C6409
"""Takes input based on the main page's form."""
getfile = FileEntry()
- f1 = self.request.get("profile_data")
+ f1 = self.request.get('profile_data')
getfile.profile_data = db.Blob(f1)
getfile.data_md5 = md5.new(f1).hexdigest()
- getfile.board = self.request.get("board")
- getfile.chromeos_version = self.request.get("chromeos_version")
+ getfile.board = self.request.get('board')
+ getfile.chromeos_version = self.request.get('chromeos_version')
getfile.put()
self.response.out.write(getfile.key())
#self.redirect('/')
@@ -101,15 +101,15 @@ class ListAll(webapp.RequestHandler):
def get(self): # pylint: disable-msg=C6409
"""Displays all information in FileEntry, ~ delimited."""
if Authenticate(self):
- query_str = "SELECT * FROM FileEntry ORDER BY date ASC"
+ query_str = 'SELECT * FROM FileEntry ORDER BY date ASC'
query = db.GqlQuery(query_str)
- delimiter = "~"
+ delimiter = '~'
for item in query:
- display_list = [item.key(), item.date, item.data_md5,
- item.board, item.chromeos_version]
+ display_list = [item.key(), item.date, item.data_md5, item.board,
+ item.chromeos_version]
str_list = [cgi.escape(str(i)) for i in display_list]
- self.response.out.write(delimiter.join(str_list)+"</br>")
+ self.response.out.write(delimiter.join(str_list) + '</br>')
class DelEntries(webapp.RequestHandler):
@@ -129,24 +129,25 @@ def Authenticate(webpage):
user = users.get_current_user()
if user is None:
webpage.redirect(users.create_login_url(webpage.request.uri))
- elif user.email().endswith("@google.com"):
+ elif user.email().endswith('@google.com'):
return True
else:
- webpage.response.out.write("Not Authenticated")
+ webpage.response.out.write('Not Authenticated')
return False
def main():
- application = webapp.WSGIApplication([
- ("/", MainPage),
- ("/upload", Upload),
- ("/serve/([^/]+)?", ServeHandler),
- ("/serve", ListAll),
- ("/del/([^/]+)?", DelEntries),
- ], debug=False)
+ application = webapp.WSGIApplication(
+ [
+ ('/', MainPage),
+ ('/upload', Upload),
+ ('/serve/([^/]+)?', ServeHandler),
+ ('/serve', ListAll),
+ ('/del/([^/]+)?', DelEntries),
+ ],
+ debug=False)
run_wsgi_app(application)
-if __name__ == "__main__":
+if __name__ == '__main__':
main()
-
diff --git a/cwp/bartlett/test/server_tester.py b/cwp/bartlett/test/server_tester.py
index 27889651..585da43a 100644
--- a/cwp/bartlett/test/server_tester.py
+++ b/cwp/bartlett/test/server_tester.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
# Author: mrdmnd@ (Matt Redmond)
"""A unit test for sending data to Bartlett. Requires poster module."""
@@ -15,10 +14,9 @@ import urllib2
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
-
-SERVER_DIR = "../."
-SERVER_URL = "http://localhost:8080/"
-GET = "_ah/login?email=googler@google.com&action=Login&continue=%s"
+SERVER_DIR = '../.'
+SERVER_URL = 'http://localhost:8080/'
+GET = '_ah/login?email=googler@google.com&action=Login&continue=%s'
AUTH_URL = SERVER_URL + GET
@@ -51,44 +49,46 @@ class ServerTest(unittest.TestCase):
def _testUpload(self): # pylint: disable-msg=C6409
register_openers()
- data = {"profile_data": self.profile_data,
- "board": "x86-zgb",
- "chromeos_version": "2409.0.2012_06_08_1114"}
+ data = {'profile_data': self.profile_data,
+ 'board': 'x86-zgb',
+ 'chromeos_version': '2409.0.2012_06_08_1114'}
datagen, headers = multipart_encode(data)
- request = urllib2.Request(SERVER_URL + "upload", datagen, headers)
+ request = urllib2.Request(SERVER_URL + 'upload', datagen, headers)
response = urllib2.urlopen(request).read()
self.assertTrue(response)
return response
def _testListAll(self): # pylint: disable-msg=C6409
- request = urllib2.Request(AUTH_URL % (SERVER_URL + "serve"))
+ request = urllib2.Request(AUTH_URL % (SERVER_URL + 'serve'))
response = self.opener.open(request).read()
self.assertTrue(response)
def _testServeKey(self, key): # pylint: disable-msg=C6409
- request = urllib2.Request(AUTH_URL % (SERVER_URL + "serve/" + key))
+ request = urllib2.Request(AUTH_URL % (SERVER_URL + 'serve/' + key))
response = self.opener.open(request).read()
self.assertTrue(response)
def _testDelKey(self, key): # pylint: disable-msg=C6409
# There is no response to a delete request.
# We will check the listAll page to ensure there is no data.
- request = urllib2.Request(AUTH_URL % (SERVER_URL + "del/" + key))
+ request = urllib2.Request(AUTH_URL % (SERVER_URL + 'del/' + key))
response = self.opener.open(request).read()
- request = urllib2.Request(AUTH_URL % (SERVER_URL + "serve"))
+ request = urllib2.Request(AUTH_URL % (SERVER_URL + 'serve'))
response = self.opener.open(request).read()
self.assertFalse(response)
def LaunchLocalServer():
"""Launch and store an authentication cookie with a local server."""
- proc = subprocess.Popen(["dev_appserver.py", "--clear_datastore", SERVER_DIR],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ proc = subprocess.Popen(
+ ['dev_appserver.py', '--clear_datastore', SERVER_DIR],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
# Wait for server to come up
while True:
time.sleep(1)
try:
- request = urllib2.Request(SERVER_URL + "serve")
+ request = urllib2.Request(SERVER_URL + 'serve')
response = urllib2.urlopen(request).read()
if response:
break
@@ -97,6 +97,5 @@ def LaunchLocalServer():
return proc
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
-
diff --git a/cwp/interpreter/app_engine_pull.py b/cwp/interpreter/app_engine_pull.py
index 65f67940..d092e2a3 100644
--- a/cwp/interpreter/app_engine_pull.py
+++ b/cwp/interpreter/app_engine_pull.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
# Author: mrdmnd@ (Matt Redmond)
"""A client to pull data from Bartlett.
@@ -23,9 +22,9 @@ import os
import urllib
import urllib2
-SERVER_NAME = "http://chromeoswideprofiling.appspot.com"
-APP_NAME = "chromeoswideprofiling"
-DELIMITER = "~"
+SERVER_NAME = 'http://chromeoswideprofiling.appspot.com'
+APP_NAME = 'chromeoswideprofiling'
+DELIMITER = '~'
def Authenticate(server_name):
@@ -38,33 +37,33 @@ def Authenticate(server_name):
to grab other pages.
"""
- if server_name.endswith("/"):
- server_name = server_name.rstrip("/")
+ if server_name.endswith('/'):
+ server_name = server_name.rstrip('/')
# Grab username and password from user through stdin.
- username = raw_input("Email (must be @google.com account): ")
- password = getpass.getpass("Password: ")
+ username = raw_input('Email (must be @google.com account): ')
+ password = getpass.getpass('Password: ')
# Use a cookie to authenticate with GAE.
cookiejar = cookielib.LWPCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
urllib2.install_opener(opener)
# Get an AuthToken from Google accounts service.
- auth_uri = "https://www.google.com/accounts/ClientLogin"
- authreq_data = urllib.urlencode({"Email": username,
- "Passwd": password,
- "service": "ah",
- "source": APP_NAME,
- "accountType": "HOSTED_OR_GOOGLE"})
+ auth_uri = 'https://www.google.com/accounts/ClientLogin'
+ authreq_data = urllib.urlencode({'Email': username,
+ 'Passwd': password,
+ 'service': 'ah',
+ 'source': APP_NAME,
+ 'accountType': 'HOSTED_OR_GOOGLE'})
auth_req = urllib2.Request(auth_uri, data=authreq_data)
try:
auth_resp = urllib2.urlopen(auth_req)
except urllib2.URLError:
- print "Error logging in to Google accounts service."
+ print 'Error logging in to Google accounts service.'
return None
body = auth_resp.read()
# Auth response contains several fields.
# We care about the part after Auth=
- auth_resp_dict = dict(x.split("=") for x in body.split("\n") if x)
- authtoken = auth_resp_dict["Auth"]
+ auth_resp_dict = dict(x.split('=') for x in body.split('\n') if x)
+ authtoken = auth_resp_dict['Auth']
return authtoken
@@ -81,32 +80,32 @@ def DownloadSamples(server_name, authtoken, output_dir, start, stop):
None
"""
- if server_name.endswith("/"):
- server_name = server_name.rstrip("/")
+ if server_name.endswith('/'):
+ server_name = server_name.rstrip('/')
serve_page_string = _GetServePage(server_name, authtoken)
if serve_page_string is None:
- print "Error getting /serve page."
+ print 'Error getting /serve page.'
return
- sample_list = serve_page_string.split("</br>")
- print "Will download:"
+ sample_list = serve_page_string.split('</br>')
+ print 'Will download:'
sample_list_subset = sample_list[start:stop]
for sample in sample_list_subset:
print sample
for sample in sample_list_subset:
- assert sample, "Sample should be valid."
+ assert sample, 'Sample should be valid.'
sample_info = [s.strip() for s in sample.split(DELIMITER)]
key = sample_info[0]
time = sample_info[1]
- time = time.replace(" ", "_") # No space between date and time.
+ time = time.replace(' ', '_') # No space between date and time.
# sample_md5 = sample_info[2]
board = sample_info[3]
version = sample_info[4]
# Put a compressed copy of the samples in output directory.
- _DownloadSampleFromServer(server_name, authtoken, key, time, board,
- version, output_dir)
+ _DownloadSampleFromServer(server_name, authtoken, key, time, board, version,
+ output_dir)
_UncompressSample(key, time, board, version, output_dir)
@@ -124,8 +123,8 @@ def _BuildFilenameFromParams(key, time, board, version):
return filename
-def _DownloadSampleFromServer(server_name, authtoken, key, time, board,
- version, output_dir):
+def _DownloadSampleFromServer(server_name, authtoken, key, time, board, version,
+ output_dir):
"""Downloads sample_$(samplekey).gz to current dir.
Args:
server_name: (string) URL that the app engine code is living on.
@@ -139,18 +138,18 @@ def _DownloadSampleFromServer(server_name, authtoken, key, time, board,
None
"""
filename = _BuildFilenameFromParams(key, time, board, version)
- compressed_filename = filename+".gz"
+ compressed_filename = filename + '.gz'
if os.path.exists(os.path.join(output_dir, filename)):
- print "Already downloaded %s, skipping." % filename
+ print 'Already downloaded %s, skipping.' % filename
return
- serv_uri = server_name + "/serve/" + key
- serv_args = {"continue": serv_uri, "auth": authtoken}
- full_serv_uri = server_name + "/_ah/login?%s" % urllib.urlencode(serv_args)
+ serv_uri = server_name + '/serve/' + key
+ serv_args = {'continue': serv_uri, 'auth': authtoken}
+ full_serv_uri = server_name + '/_ah/login?%s' % urllib.urlencode(serv_args)
serv_req = urllib2.Request(full_serv_uri)
serv_resp = urllib2.urlopen(serv_req)
- f = open(os.path.join(output_dir, compressed_filename), "w+")
+ f = open(os.path.join(output_dir, compressed_filename), 'w+')
f.write(serv_resp.read())
f.close()
@@ -167,14 +166,14 @@ def _UncompressSample(key, time, board, version, output_dir):
None
"""
filename = _BuildFilenameFromParams(key, time, board, version)
- compressed_filename = filename+".gz"
+ compressed_filename = filename + '.gz'
if os.path.exists(os.path.join(output_dir, filename)):
- print "Already decompressed %s, skipping." % filename
+ print 'Already decompressed %s, skipping.' % filename
return
- out_file = open(os.path.join(output_dir, filename), "wb")
- in_file = gzip.open(os.path.join(output_dir, compressed_filename), "rb")
+ out_file = open(os.path.join(output_dir, filename), 'wb')
+ in_file = gzip.open(os.path.join(output_dir, compressed_filename), 'rb')
out_file.write(in_file.read())
in_file.close()
out_file.close()
@@ -192,9 +191,9 @@ def _DeleteSampleFromServer(server_name, authtoken, key):
None
"""
- serv_uri = server_name + "/del/" + key
- serv_args = {"continue": serv_uri, "auth": authtoken}
- full_serv_uri = server_name + "/_ah/login?%s" % urllib.urlencode(serv_args)
+ serv_uri = server_name + '/del/' + key
+ serv_args = {'continue': serv_uri, 'auth': authtoken}
+ full_serv_uri = server_name + '/_ah/login?%s' % urllib.urlencode(serv_args)
serv_req = urllib2.Request(full_serv_uri)
urllib2.urlopen(serv_req)
@@ -208,9 +207,9 @@ def _GetServePage(server_name, authtoken):
The text of the /serve page (including HTML tags)
"""
- serv_uri = server_name + "/serve"
- serv_args = {"continue": serv_uri, "auth": authtoken}
- full_serv_uri = server_name + "/_ah/login?%s" % urllib.urlencode(serv_args)
+ serv_uri = server_name + '/serve'
+ serv_args = {'continue': serv_uri, 'auth': authtoken}
+ full_serv_uri = server_name + '/_ah/login?%s' % urllib.urlencode(serv_args)
serv_req = urllib2.Request(full_serv_uri)
serv_resp = urllib2.urlopen(serv_req)
return serv_resp.read()
@@ -218,28 +217,37 @@ def _GetServePage(server_name, authtoken):
def main():
parser = optparse.OptionParser()
- parser.add_option("--output_dir", dest="output_dir", action="store",
- help="Path to output perf data files.")
- parser.add_option("--start", dest="start_ind", action="store",
- default=0, help="Start index.")
- parser.add_option("--stop", dest="stop_ind", action="store",
- default=-1, help="Stop index.")
+ parser.add_option('--output_dir',
+ dest='output_dir',
+ action='store',
+ help='Path to output perf data files.')
+ parser.add_option('--start',
+ dest='start_ind',
+ action='store',
+ default=0,
+ help='Start index.')
+ parser.add_option('--stop',
+ dest='stop_ind',
+ action='store',
+ default=-1,
+ help='Stop index.')
options = parser.parse_args()[0]
if not options.output_dir:
- print "Must specify --output_dir."
+ print 'Must specify --output_dir.'
return 1
if not os.path.exists(options.output_dir):
- print "Specified output_dir does not exist."
+ print 'Specified output_dir does not exist.'
return 1
authtoken = Authenticate(SERVER_NAME)
if not authtoken:
- print "Could not obtain authtoken, exiting."
+ print 'Could not obtain authtoken, exiting.'
return 1
- DownloadSamples(SERVER_NAME, authtoken, options.output_dir,
- options.start_ind, options.stop_ind)
- print "Downloaded samples."
+ DownloadSamples(SERVER_NAME, authtoken, options.output_dir, options.start_ind,
+ options.stop_ind)
+ print 'Downloaded samples.'
return 0
-if __name__ == "__main__":
+
+if __name__ == '__main__':
exit(main())
diff --git a/cwp/interpreter/symbolizer.py b/cwp/interpreter/symbolizer.py
index 3e589538..902e319d 100644
--- a/cwp/interpreter/symbolizer.py
+++ b/cwp/interpreter/symbolizer.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
"""A script that symbolizes perf.data files."""
import optparse
@@ -9,19 +8,18 @@ from subprocess import PIPE
from subprocess import Popen
from utils import misc
-
-GSUTIL_CMD = "gsutil cp gs://chromeos-image-archive/%s-release/%s/debug.tgz %s"
-TAR_CMD = "tar -zxvf %s -C %s"
-PERF_BINARY = "/google/data/ro/projects/perf/perf"
-VMLINUX_FLAG = " --vmlinux=/usr/lib/debug/boot/vmlinux"
-PERF_CMD = PERF_BINARY +" report -i %s -n --symfs=%s" + VMLINUX_FLAG
+GSUTIL_CMD = 'gsutil cp gs://chromeos-image-archive/%s-release/%s/debug.tgz %s'
+TAR_CMD = 'tar -zxvf %s -C %s'
+PERF_BINARY = '/google/data/ro/projects/perf/perf'
+VMLINUX_FLAG = ' --vmlinux=/usr/lib/debug/boot/vmlinux'
+PERF_CMD = PERF_BINARY + ' report -i %s -n --symfs=%s' + VMLINUX_FLAG
def main():
parser = optparse.OptionParser()
- parser.add_option("--in", dest="in_dir")
- parser.add_option("--out", dest="out_dir")
- parser.add_option("--cache", dest="cache")
+ parser.add_option('--in', dest='in_dir')
+ parser.add_option('--out', dest='out_dir')
+ parser.add_option('--cache', dest='cache')
(opts, _) = parser.parse_args()
if not _ValidateOpts(opts):
return 1
@@ -31,7 +29,7 @@ def main():
_DownloadSymbols(filename, opts.cache)
_PerfReport(filename, opts.in_dir, opts.out_dir, opts.cache)
except:
- print "Exception caught. Continuing..."
+ print 'Exception caught. Continuing...'
return 0
@@ -54,14 +52,14 @@ def _ParseFilename(filename, canonical=False):
If canonical is True, instead returns (database_key, board, canonical_vers)
canonical_vers includes the revision string.
"""
- key, time, board, vers = filename.split("~")
+ key, time, board, vers = filename.split('~')
if canonical:
vers = misc.GetChromeOSVersionFromLSBVersion(vers)
return (key, time, board, vers)
def _FormReleaseDir(board, version):
- return "%s-release~%s" % (board, version)
+ return '%s-release~%s' % (board, version)
def _DownloadSymbols(filename, cache):
@@ -70,27 +68,27 @@ def _DownloadSymbols(filename, cache):
named like cache/$board-release~$canonical_vers/usr/lib/debug
"""
_, _, board, vers = _ParseFilename(filename, canonical=True)
- tmp_suffix = ".tmp"
+ tmp_suffix = '.tmp'
tarball_subdir = _FormReleaseDir(board, vers)
tarball_dir = os.path.join(cache, tarball_subdir)
- tarball_path = os.path.join(tarball_dir, "debug.tgz")
+ tarball_path = os.path.join(tarball_dir, 'debug.tgz')
- symbol_subdir = os.path.join("usr", "lib")
+ symbol_subdir = os.path.join('usr', 'lib')
symbol_dir = os.path.join(tarball_dir, symbol_subdir)
if os.path.isdir(symbol_dir):
- print "Symbol directory %s exists, skipping download." % symbol_dir
+ print 'Symbol directory %s exists, skipping download.' % symbol_dir
return
else:
# First download using gsutil.
if not os.path.isfile(tarball_path):
download_cmd = GSUTIL_CMD % (board, vers, tarball_path + tmp_suffix)
- print "Downloading symbols for %s" % filename
+ print 'Downloading symbols for %s' % filename
print download_cmd
ret = call(download_cmd.split())
if ret != 0:
- print "gsutil returned non-zero error code: %s." % ret
+ print 'gsutil returned non-zero error code: %s.' % ret
# Clean up the empty directory structures.
os.remove(tarball_path + tmp_suffix)
raise IOError
@@ -100,11 +98,11 @@ def _DownloadSymbols(filename, cache):
# Next, untar the tarball.
os.makedirs(symbol_dir + tmp_suffix)
extract_cmd = TAR_CMD % (tarball_path, symbol_dir + tmp_suffix)
- print "Extracting symbols for %s" % filename
+ print 'Extracting symbols for %s' % filename
print extract_cmd
ret = call(extract_cmd.split())
if ret != 0:
- print "tar returned non-zero code: %s." % ret
+ print 'tar returned non-zero code: %s.' % ret
raise IOError
shutil.move(symbol_dir + tmp_suffix, symbol_dir)
os.remove(tarball_path)
@@ -119,13 +117,13 @@ def _PerfReport(filename, in_dir, out_dir, cache):
input_file = os.path.join(in_dir, filename)
symfs = os.path.join(cache, symbol_cache_tld)
report_cmd = PERF_CMD % (input_file, symfs)
- print "Reporting."
+ print 'Reporting.'
print report_cmd
report_proc = Popen(report_cmd.split(), stdout=PIPE)
- outfile = open(os.path.join(out_dir, filename), "w")
+ outfile = open(os.path.join(out_dir, filename), 'w')
outfile.write(report_proc.stdout.read())
outfile.close()
-if __name__ == "__main__":
+if __name__ == '__main__':
exit(main())
diff --git a/cwp/performance/experiment_gen.py b/cwp/performance/experiment_gen.py
index 7752c11e..a12da2c5 100644
--- a/cwp/performance/experiment_gen.py
+++ b/cwp/performance/experiment_gen.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
"""This script generates a crosperf overhead-testing experiment file for MoreJS.
@@ -35,29 +34,53 @@ default {
def main():
parser = optparse.OptionParser()
- parser.add_option('--crosperf', dest='crosperf_root', action='store',
+ parser.add_option('--crosperf',
+ dest='crosperf_root',
+ action='store',
default='/home/mrdmnd/depot2/crosperf',
help='Crosperf root directory.')
- parser.add_option('--chromeos_root', dest='chromeos_root', action='store',
+ parser.add_option('--chromeos_root',
+ dest='chromeos_root',
+ action='store',
default='/home/mrdmnd/chromiumos',
help='ChromiumOS root directory.')
- parser.add_option('--remote', dest='remote', action='store',
+ parser.add_option('--remote',
+ dest='remote',
+ action='store',
help='Host to run test on. Required.')
- parser.add_option('--board', dest='board', action='store',
+ parser.add_option('--board',
+ dest='board',
+ action='store',
help='Board architecture to run on. Required.')
- parser.add_option('--event', dest='event', action='store',
+ parser.add_option('--event',
+ dest='event',
+ action='store',
help='Event to profile. Required.')
- parser.add_option('-F', dest='sampling_frequencies', action='append',
+ parser.add_option('-F',
+ dest='sampling_frequencies',
+ action='append',
help='A target frequency to sample at.')
- parser.add_option('-c', dest='sampling_periods', action='append',
+ parser.add_option('-c',
+ dest='sampling_periods',
+ action='append',
help='A target period to sample at. Event specific.')
- parser.add_option('--benchmark-iterations', dest='benchmark_iterations',
- action='store', default=4, help='Number of benchmark iters')
- parser.add_option('--test-iterations', dest='test_iterations',
- action='store', default=10, help='Number of test iters')
- parser.add_option('-p', dest='print_only', action='store_true',
+ parser.add_option('--benchmark-iterations',
+ dest='benchmark_iterations',
+ action='store',
+ default=4,
+ help='Number of benchmark iters')
+ parser.add_option('--test-iterations',
+ dest='test_iterations',
+ action='store',
+ default=10,
+ help='Number of test iters')
+ parser.add_option('-p',
+ dest='print_only',
+ action='store_true',
help='If enabled, will print experiment file and exit.')
- parser.add_option('--perf_options', dest='perf_options', action='store',
+ parser.add_option('--perf_options',
+ dest='perf_options',
+ action='store',
help='Arbitrary flags to perf. Surround with dblquotes.')
options = parser.parse_args()[0]
if options.remote is None:
@@ -85,16 +108,14 @@ def main():
for freq in options.sampling_frequencies:
test_string = str(freq) + 'Freq'
experiment_file += EXPERIMENT % (test_string, bench_iters, test_iters,
- '-F %s' % freq,
- '' if perf_opts is None else perf_opts,
- event)
+ '-F %s' % freq, '' if perf_opts is None
+ else perf_opts, event)
if options.sampling_periods:
for period in options.sampling_periods:
test_string = str(period) + 'Period'
- experiment_file += EXPERIMENT % (test_string, bench_iters, test_iters,
- '-c %s' % period,
- '' if perf_opts is None else perf_opts,
- event)
+ experiment_file += EXPERIMENT % (
+ test_string, bench_iters, test_iters, '-c %s' % period, '' if
+ perf_opts is None else perf_opts, event)
# Point to the target image.
experiment_file += DEFAULT_IMAGE % (chromeos_root, board)
if options.print_only:
@@ -102,7 +123,7 @@ def main():
else:
current_time = int(round(time.time() * 1000))
file_name = 'perf_overhead_%s' % str(current_time)
- with open(file_name, "w") as f:
+ with open(file_name, 'w') as f:
f.write(experiment_file)
try:
process = subprocess.Popen(['%s/crosperf' % crosperf_root, file_name])
@@ -112,5 +133,6 @@ def main():
return 1
return 0
+
if __name__ == '__main__':
exit(main())
diff --git a/dejagnu/__init__.py b/dejagnu/__init__.py
index e69de29b..8b137891 100644
--- a/dejagnu/__init__.py
+++ b/dejagnu/__init__.py
@@ -0,0 +1 @@
+
diff --git a/dejagnu/gdb_dejagnu.py b/dejagnu/gdb_dejagnu.py
index 1b8340b8..1192fc0b 100755
--- a/dejagnu/gdb_dejagnu.py
+++ b/dejagnu/gdb_dejagnu.py
@@ -3,7 +3,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The gdb dejagnu test wrapper."""
import optparse
import os
@@ -21,8 +20,8 @@ from utils import misc
from run_dejagnu import TryAcquireMachine
-_VALID_TEST_RESULTS = ['FAIL', 'UNRESOLVED', 'XPASS',
- 'ERROR', 'UNSUPPORTED', 'PASS']
+_VALID_TEST_RESULTS = ['FAIL', 'UNRESOLVED', 'XPASS', 'ERROR', 'UNSUPPORTED',
+ 'PASS']
def ProcessArguments(argv):
@@ -31,9 +30,13 @@ def ProcessArguments(argv):
'Launches gdb dejagnu test in chroot for chromeos toolchain, compares '
'the test result with a repository baseline and prints out the result.'),
usage='run_dejagnu options')
- parser.add_option('-c', '--chromeos_root', dest='chromeos_root',
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
help='Required. Specify chromeos root')
- parser.add_option('-m', '--mount', dest='mount',
+ parser.add_option('-m',
+ '--mount',
+ dest='mount',
help=('Specify gdb source to mount instead of "auto". '
'Under "auto" mode, which is the default - gdb is '
'checked out and built automatically at default '
@@ -41,12 +44,18 @@ def ProcessArguments(argv):
'- the gdb_source is set to "$chromeos_'
'root/chroot/usr/local/toolchain_root/gdb", which is '
'the mount point for this option value.'))
- parser.add_option('-b', '--board', dest='board',
+ parser.add_option('-b',
+ '--board',
+ dest='board',
help=('Required. Specify board.'))
- parser.add_option('-r', '--remote', dest='remote',
+ parser.add_option('-r',
+ '--remote',
+ dest='remote',
help=('Required. Specify addresses/names of the board, '
'seperate each address/name using comma(\',\').'))
- parser.add_option('--cleanup', dest='cleanup', default=None,
+ parser.add_option('--cleanup',
+ dest='cleanup',
+ default=None,
help=('Optional. Values to this option could be '
'\'chroot\' (delete chroot) and '
'\'chromeos\' (delete the whole chromeos tree).'))
@@ -61,9 +70,9 @@ def ProcessArguments(argv):
raise Exception('Missing argument for --board.')
if options.cleanup == 'mount' and not options.mount:
raise Exception('--cleanup=\'mount\' not valid unless --mount is given.')
- if options.cleanup and not (
- options.cleanup == 'mount' or
- options.cleanup == 'chroot' or options.cleanup == 'chromeos'):
+ if options.cleanup and not (options.cleanup == 'mount' or
+ options.cleanup == 'chroot' or
+ options.cleanup == 'chromeos'):
raise Exception('Invalid option value for --cleanup')
return options
@@ -88,8 +97,7 @@ class DejagnuExecuter(object):
self._base_dir = base_dir
self._tmp_abs = None
self._cleanup = cleanup
- self._sshflag = ('-o StrictHostKeyChecking=no ' +
- '-o CheckHostIP=no ' +
+ self._sshflag = ('-o StrictHostKeyChecking=no ' + '-o CheckHostIP=no ' +
'-o UserKnownHostsFile=$(mktemp) ')
if source_dir:
@@ -101,18 +109,19 @@ class DejagnuExecuter(object):
self._mount_flag = ''
def SetupTestingDir(self):
- self._tmp_abs = tempfile.mkdtemp(prefix='dejagnu_', dir=path.join(
- self._chromeos_chroot, 'tmp'))
+ self._tmp_abs = tempfile.mkdtemp(
+ prefix='dejagnu_',
+ dir=path.join(self._chromeos_chroot, 'tmp'))
self._tmp = self._tmp_abs[len(self._chromeos_chroot):]
self._tmp_testing_rsa = path.join(self._tmp, 'testing_rsa')
self._tmp_testing_rsa_abs = path.join(self._tmp_abs, 'testing_rsa')
def PrepareTestingRsaKeys(self):
if not path.isfile(self._tmp_testing_rsa_abs):
- shutil.copy(path.join(
- self._chromeos_root,
- 'src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa'),
- self._tmp_testing_rsa_abs)
+ shutil.copy(
+ path.join(self._chromeos_root,
+ 'src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa'),
+ self._tmp_testing_rsa_abs)
os.chmod(self._tmp_testing_rsa_abs, stat.S_IRUSR)
def PrepareTestFiles(self):
@@ -128,7 +137,8 @@ class DejagnuExecuter(object):
'__boardname__': self._board,
'__board_hostname__': self._remote,
'__tmp_testing_rsa__': self._tmp_testing_rsa,
- '__tmp_dir__': self._tmp})
+ '__tmp_dir__': self._tmp
+ })
for pat, sub in substitutions.items():
content = content.replace(pat, sub)
@@ -146,7 +156,8 @@ class DejagnuExecuter(object):
substitutions = dict({
'__board_hostname__': self._remote,
'__tmp_testing_rsa__': self._tmp_testing_rsa,
- '__tmp_dir__': self._tmp})
+ '__tmp_dir__': self._tmp
+ })
for pat, sub in substitutions.items():
content = content.replace(pat, sub)
@@ -162,8 +173,7 @@ class DejagnuExecuter(object):
def PrepareGdbDefault(self):
ret = self._executer.ChrootRunCommandWOutput(
- self._chromeos_root,
- 'equery w cross-%s/gdb' % self._target)[1]
+ self._chromeos_root, 'equery w cross-%s/gdb' % self._target)[1]
ret = path.basename(ret.strip())
matcher = re.match(r'(.*).ebuild', ret)
@@ -172,13 +182,12 @@ class DejagnuExecuter(object):
else:
raise Exception('Failed to get gdb reversion.')
gdb_version = gdb_reversion.split('-r')[0]
- gdb_portage_dir = '/var/tmp/portage/cross-%s/%s/work' % (
- self._target, gdb_reversion)
+ gdb_portage_dir = '/var/tmp/portage/cross-%s/%s/work' % (self._target,
+ gdb_reversion)
self._gdb_source_dir = path.join(gdb_portage_dir, gdb_version)
- ret = self._executer.ChrootRunCommand(
- self._chromeos_root,
- ('sudo %s ebuild $(equery w cross-%s/gdb) clean compile' % (
+ ret = self._executer.ChrootRunCommand(self._chromeos_root, (
+ 'sudo %s ebuild $(equery w cross-%s/gdb) clean compile' % (
self._mount_flag, self._target)))
if ret:
raise Exception('ebuild gdb failed.')
@@ -189,19 +198,18 @@ class DejagnuExecuter(object):
def PrepareGdbserverDefault(self):
cmd = ('./setup_board --board {0}; '
'{1} emerge-{0} gdb'.format(self._board, self._mount_flag))
- ret = self._executer.ChrootRunCommand(
- self._chromeos_root,
- cmd, print_to_console=True)
+ ret = self._executer.ChrootRunCommand(self._chromeos_root,
+ cmd,
+ print_to_console=True)
if ret:
raise Exception('ebuild gdbserver failed.')
cmd = ('scp -i {0} {1} '
- '/build/{2}/usr/bin/gdbserver root@{3}:/usr/local/bin/'
- .format(self._tmp_testing_rsa, self._sshflag,
- self._board, self._remote))
- ret = self._executer.ChrootRunCommand(
- self._chromeos_root,
- cmd, print_to_console=True)
+ '/build/{2}/usr/bin/gdbserver root@{3}:/usr/local/bin/'.format(
+ self._tmp_testing_rsa, self._sshflag, self._board, self._remote))
+ ret = self._executer.ChrootRunCommand(self._chromeos_root,
+ cmd,
+ print_to_console=True)
if ret:
raise Exception('copy gdbserver failed.')
@@ -235,25 +243,19 @@ class DejagnuExecuter(object):
def MakeCheck(self):
cmd = ('ssh -i {0} {1} root@{2} "reboot && exit"'
- .format(self._tmp_testing_rsa, self._sshflag,
- self._remote))
- self._executer.ChrootRunCommand(
- self._chromeos_root, cmd)
+ .format(self._tmp_testing_rsa, self._sshflag, self._remote))
+ self._executer.ChrootRunCommand(self._chromeos_root, cmd)
time.sleep(40)
cmd = ('ssh -i {0} {1} root@{2} '
- '"iptables -A INPUT -p tcp --dport 1234 -j ACCEPT"'
- .format(self._tmp_testing_rsa, self._sshflag,
- self._remote))
- self._executer.ChrootRunCommand(
- self._chromeos_root, cmd)
+ '"iptables -A INPUT -p tcp --dport 1234 -j ACCEPT"'.format(
+ self._tmp_testing_rsa, self._sshflag, self._remote))
+ self._executer.ChrootRunCommand(self._chromeos_root, cmd)
cmd = ('cd %s ; '
- 'DEJAGNU=%s make check' %
- (path.join(self._gdb_source_dir, 'gdb'),
- path.join(self._tmp, 'site.exp')))
- ret = self._executer.ChrootRunCommand(
- self._chromeos_root, cmd)
+ 'DEJAGNU=%s make check' % (path.join(self._gdb_source_dir, 'gdb'),
+ path.join(self._tmp, 'site.exp')))
+ ret = self._executer.ChrootRunCommand(self._chromeos_root, cmd)
if ret:
raise Exception('Make check failed.')
@@ -265,10 +267,9 @@ class DejagnuExecuter(object):
else:
mount = '-m'
cmd = ('python {0} --chromeos_root={1} '
- '--gdb_dir={2} --board={3} {4}'
- .format(script, self._chromeos_root,
- self._source_dir, self._board,
- mount))
+ '--gdb_dir={2} --board={3} {4}'.format(script, self._chromeos_root,
+ self._source_dir, self._board,
+ mount))
rv = self._executer.RunCommand(cmd)
if rv:
raise Exception('Mount source failed.')
@@ -287,10 +288,9 @@ class DejagnuExecuter(object):
return result
def PrepareResult(self):
- test_output = os.path.join(self._gdb_source_dir, 'gdb',
- 'testsuite', 'gdb.sum')
- test_output = misc.GetOutsideChrootPath(self._chromeos_root,
- test_output)
+ test_output = os.path.join(self._gdb_source_dir, 'gdb', 'testsuite',
+ 'gdb.sum')
+ test_output = misc.GetOutsideChrootPath(self._chromeos_root, test_output)
base_output = os.path.join(self._base_dir, 'gdb_baseline', self._target)
self.test_result = self.ParseResult(test_output)
@@ -318,11 +318,9 @@ class DejagnuExecuter(object):
def Main(argv):
opts = ProcessArguments(argv)
available_machine = TryAcquireMachine(opts.remote)
- executer = DejagnuExecuter(misc.GetRoot(argv[0])[0],
- opts.mount, opts.chromeos_root,
- available_machine._name,
- opts.board,
- opts.cleanup)
+ executer = DejagnuExecuter(
+ misc.GetRoot(argv[0])[0], opts.mount, opts.chromeos_root,
+ available_machine._name, opts.board, opts.cleanup)
# Return value is a 3- or 4-element tuple
# element#1 - exit code
# element#2 - stdout
@@ -353,6 +351,7 @@ def Main(argv):
executer.Cleanup()
return ret
+
if __name__ == '__main__':
retval = Main(sys.argv)[0]
sys.exit(retval)
diff --git a/dejagnu/run_dejagnu.py b/dejagnu/run_dejagnu.py
index 5506d0c0..1aea31a7 100755
--- a/dejagnu/run_dejagnu.py
+++ b/dejagnu/run_dejagnu.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Tool script for auto dejagnu."""
__author__ = 'shenhan@google.com (Han Shen)'
@@ -31,9 +30,13 @@ def ProcessArguments(argv):
'Launches gcc dejagnu test in chroot for chromeos toolchain, compares '
'the test result with a repository baseline and prints out the result.'),
usage='run_dejagnu options')
- parser.add_option('-c', '--chromeos_root', dest='chromeos_root',
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
help='Required. Specify chromeos root')
- parser.add_option('-m', '--mount', dest='mount',
+ parser.add_option('-m',
+ '--mount',
+ dest='mount',
help=('Specify gcc source to mount instead of "auto". '
'Under "auto" mode, which is the default - gcc is '
'checked out and built automatically at default '
@@ -45,25 +48,39 @@ def ProcessArguments(argv):
'"${gcc_source_dir}-build-${ctarget}". In this mode, '
'a complete gcc build must be performed in the '
'computed gcc-build-dir beforehand.'))
- parser.add_option('-b', '--board', dest='board',
+ parser.add_option('-b',
+ '--board',
+ dest='board',
help=('Required. Specify board.'))
- parser.add_option('-r', '--remote', dest='remote',
+ parser.add_option('-r',
+ '--remote',
+ dest='remote',
help=('Required. Specify addresses/names of the board, '
'seperate each address/name using comma(\',\').'))
- parser.add_option('-f', '--flags', dest='flags',
+ parser.add_option('-f',
+ '--flags',
+ dest='flags',
help='Optional. Extra run test flags to pass to dejagnu.')
- parser.add_option('-k', '--keep', dest='keep_intermediate_files',
- action='store_true', default=False,
+ parser.add_option('-k',
+ '--keep',
+ dest='keep_intermediate_files',
+ action='store_true',
+ default=False,
help=('Optional. Default to false. Do not remove dejagnu '
'intermediate files after test run.'))
- parser.add_option('--cleanup', dest='cleanup', default=None,
+ parser.add_option('--cleanup',
+ dest='cleanup',
+ default=None,
help=('Optional. Values to this option could be '
'\'mount\' (unmount gcc source and '
'directory directory, '
'only valid when --mount is given), '
'\'chroot\' (delete chroot) and '
'\'chromeos\' (delete the whole chromeos tree).'))
- parser.add_option('-t', '--tools', dest='tools', default='gcc,g++',
+ parser.add_option('-t',
+ '--tools',
+ dest='tools',
+ default='gcc,g++',
help=('Optional. Specify which tools to check, using '
'","(comma) as separator. A typical value would be '
'"g++" so that only g++ tests are performed. '
@@ -92,8 +109,8 @@ def ProcessArguments(argv):
class DejagnuExecuter(object):
"""The class wrapper for dejagnu test executer."""
- def __init__(self, base_dir, mount, chromeos_root, remote, board,
- flags, keep_intermediate_files, tools, cleanup):
+ def __init__(self, base_dir, mount, chromeos_root, remote, board, flags,
+ keep_intermediate_files, tools, cleanup):
self._l = logger.GetLogger()
self._chromeos_root = chromeos_root
self._chromeos_chroot = path.join(chromeos_root, 'chroot')
@@ -118,8 +135,9 @@ class DejagnuExecuter(object):
self._cleanup = cleanup
def SetupTestingDir(self):
- self._tmp_abs = tempfile.mkdtemp(prefix='dejagnu_', dir=path.join(
- self._chromeos_chroot, 'tmp'))
+ self._tmp_abs = tempfile.mkdtemp(
+ prefix='dejagnu_',
+ dir=path.join(self._chromeos_chroot, 'tmp'))
self._tmp = self._tmp_abs[len(self._chromeos_chroot):]
self._tmp_testing_rsa = path.join(self._tmp, 'testing_rsa')
self._tmp_testing_rsa_abs = path.join(self._tmp_abs, 'testing_rsa')
@@ -131,15 +149,15 @@ class DejagnuExecuter(object):
if self._tmp_abs and path.isdir(self._tmp_abs):
if self._keep_intermediate_files:
self._l.LogOutput(
- 'Your intermediate dejagnu files are kept, you can re-run '
- 'inside chroot the command:')
+ 'Your intermediate dejagnu files are kept, you can re-run '
+ 'inside chroot the command:')
self._l.LogOutput(
' DEJAGNU={0} make -C {1} {2} RUNTESTFLAGS="--target_board={3} {4}"' \
.format(path.join(self._tmp, 'site.exp'), self._gcc_build_dir,
self.MakeCheckString(), self._board, self._flags))
else:
- self._l.LogOutput(
- '[Cleanup] - Removing temp dir - {0}'.format(self._tmp_abs))
+ self._l.LogOutput('[Cleanup] - Removing temp dir - {0}'.format(
+ self._tmp_abs))
shutil.rmtree(self._tmp_abs)
def Cleanup(self):
@@ -158,20 +176,20 @@ class DejagnuExecuter(object):
if self._cleanup == 'chroot' or self._cleanup == 'chromeos':
self._l.LogOutput('[Cleanup]: Deleting chroot inside \'{0}\''.format(
- self._chromeos_root))
- command = "cd %s; cros_sdk --delete" % self._chromeos_root
+ self._chromeos_root))
+ command = 'cd %s; cros_sdk --delete' % self._chromeos_root
rv = self._executer.RunCommand(command)
if rv:
self._l.LogWarning('Warning - failed to delete chroot.')
# Delete .cache - crosbug.com/34956
- command = "sudo rm -fr %s" % os.path.join(self._chromeos_root, ".cache")
+ command = 'sudo rm -fr %s' % os.path.join(self._chromeos_root, '.cache')
rv = self._executer.RunCommand(command)
if rv:
self._l.LogWarning('Warning - failed to delete \'.cache\'.')
if self._cleanup == 'chromeos':
self._l.LogOutput('[Cleanup]: Deleting chromeos tree \'{0}\' ...'.format(
- self._chromeos_root))
+ self._chromeos_root))
command = 'rm -fr {0}'.format(self._chromeos_root)
rv = self._executer.RunCommand(command)
if rv:
@@ -179,10 +197,10 @@ class DejagnuExecuter(object):
def PrepareTestingRsaKeys(self):
if not path.isfile(self._tmp_testing_rsa_abs):
- shutil.copy(path.join(
- self._chromeos_root,
- 'src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa'),
- self._tmp_testing_rsa_abs)
+ shutil.copy(
+ path.join(self._chromeos_root,
+ 'src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa'),
+ self._tmp_testing_rsa_abs)
os.chmod(self._tmp_testing_rsa_abs, stat.S_IRUSR)
def PrepareTestFiles(self):
@@ -197,7 +215,8 @@ class DejagnuExecuter(object):
'__boardname__': self._board,
'__board_hostname__': self._remote,
'__tmp_testing_rsa__': self._tmp_testing_rsa,
- '__tmp_dir__': self._tmp})
+ '__tmp_dir__': self._tmp
+ })
for pat, sub in substitutions.items():
content = content.replace(pat, sub)
@@ -233,9 +252,9 @@ class DejagnuExecuter(object):
'sudo mkdir -p {0}'.format(self._gcc_source_dir_abs)):
raise Exception("Failed to create \'{0}\' inside chroot.".format(
self._gcc_source_dir))
- if not (path.isdir(self._gcc_source_dir_to_mount) and path.isdir(
- path.join(self._gcc_source_dir_to_mount, 'gcc'))):
- raise Exception("{0} is not a valid gcc source tree.".format(
+ if not (path.isdir(self._gcc_source_dir_to_mount) and
+ path.isdir(path.join(self._gcc_source_dir_to_mount, 'gcc'))):
+ raise Exception('{0} is not a valid gcc source tree.'.format(
self._gcc_source_dir_to_mount))
# We have these build directories -
@@ -254,7 +273,7 @@ class DejagnuExecuter(object):
self._gcc_build_dir_to_mount = '{0}-build-{1}'.format(
self._gcc_source_dir_to_mount, self._target)
self._gcc_top_build_dir_abs = path.join(self._chromeos_chroot,
- self._gcc_top_build_dir.lstrip('/'))
+ self._gcc_top_build_dir.lstrip('/'))
if not path.isdir(self._gcc_top_build_dir_abs) and \
self._executer.RunCommand(
'sudo mkdir -p {0}'.format(self._gcc_top_build_dir_abs)):
@@ -271,8 +290,7 @@ class DejagnuExecuter(object):
def PrepareGccDefault(self):
"""Auto emerging gcc for building purpose only."""
ret = self._executer.ChrootRunCommandWOutput(
- self._chromeos_root,
- 'equery w cross-%s/gcc' % self._target)[1]
+ self._chromeos_root, 'equery w cross-%s/gcc' % self._target)[1]
ret = path.basename(ret.strip())
# ret is expected to be something like 'gcc-4.6.2-r11.ebuild' or
# 'gcc-9999.ebuild' parse it.
@@ -285,19 +303,18 @@ class DejagnuExecuter(object):
else:
raise Exception('Failed to get gcc version.')
- gcc_portage_dir = '/var/tmp/portage/cross-%s/%s/work' % (
- self._target, gccrevision)
+ gcc_portage_dir = '/var/tmp/portage/cross-%s/%s/work' % (self._target,
+ gccrevision)
self._gcc_source_dir = path.join(gcc_portage_dir, gccversion)
self._gcc_top_build_dir = (gcc_portage_dir + '/%s-build-%s') % (
gccversion, self._target)
self._gcc_build_dir = path.join(self._gcc_top_build_dir, 'gcc')
- gcc_build_dir_abs = path.join(
- self._chromeos_root, 'chroot', self._gcc_build_dir.lstrip('/'))
+ gcc_build_dir_abs = path.join(self._chromeos_root, 'chroot',
+ self._gcc_build_dir.lstrip('/'))
if not path.isdir(gcc_build_dir_abs):
- ret = self._executer.ChrootRunCommand(
- self._chromeos_root,
- ('ebuild $(equery w cross-%s/gcc) clean prepare compile' % (
- self._target)))
+ ret = self._executer.ChrootRunCommand(self._chromeos_root, (
+ 'ebuild $(equery w cross-%s/gcc) clean prepare compile' % (
+ self._target)))
if ret:
raise Exception('ebuild gcc failed.')
@@ -313,11 +330,10 @@ class DejagnuExecuter(object):
validate_failures_py = path.join(
self._gcc_source_dir,
'contrib/testsuite-management/validate_failures.py')
- cmd = 'cd {0} ; {1} --build_dir={0}'.format(
- self._gcc_top_build_dir, validate_failures_py)
+ cmd = 'cd {0} ; {1} --build_dir={0}'.format(self._gcc_top_build_dir,
+ validate_failures_py)
self.MountGccSourceAndBuildDir()
- ret = self._executer.ChrootRunCommandWOutput(
- self._chromeos_root, cmd)
+ ret = self._executer.ChrootRunCommandWOutput(self._chromeos_root, cmd)
if ret[0] != 0:
self._l.LogWarning('*** validate_failures.py exited with non-zero code,'
'please run it manually inside chroot - \n'
@@ -326,12 +342,12 @@ class DejagnuExecuter(object):
# This method ensures necessary mount points before executing chroot comamnd.
def MountGccSourceAndBuildDir(self, unmount=False):
- mount_points = [tc_enter_chroot.MountPoint(
- self._gcc_source_dir_to_mount, self._gcc_source_dir_abs,
- getpass.getuser(), "ro"),
- tc_enter_chroot.MountPoint(
- self._gcc_build_dir_to_mount, self._gcc_top_build_dir_abs,
- getpass.getuser(), "rw"),]
+ mount_points = [tc_enter_chroot.MountPoint(self._gcc_source_dir_to_mount,
+ self._gcc_source_dir_abs,
+ getpass.getuser(), 'ro'),
+ tc_enter_chroot.MountPoint(self._gcc_build_dir_to_mount,
+ self._gcc_top_build_dir_abs,
+ getpass.getuser(), 'rw')]
for mp in mount_points:
if unmount:
if mp.UnMount():
@@ -339,13 +355,14 @@ class DejagnuExecuter(object):
else:
self._l.LogOutput('{0} unmounted successfully.'.format(mp.mount_dir))
elif mp.DoMount():
- raise Exception('Failed to mount {0} onto {1}'.format(
- mp.external_dir, mp.mount_dir))
+ raise Exception('Failed to mount {0} onto {1}'.format(mp.external_dir,
+ mp.mount_dir))
else:
self._l.LogOutput('{0} mounted successfully.'.format(mp.mount_dir))
# The end of class DejagnuExecuter
+
def TryAcquireMachine(remotes):
available_machine = None
for r in remotes.split(','):
@@ -355,21 +372,20 @@ def TryAcquireMachine(remotes):
break
else:
logger.GetLogger().LogWarning(
- '*** Failed to lock machine \'{0}\'.'.format(r))
+ '*** Failed to lock machine \'{0}\'.'.format(r))
if not available_machine:
- raise Exception(
- "Failed to acquire one machine from \'{0}\'.".format(remotes))
+ raise Exception("Failed to acquire one machine from \'{0}\'.".format(
+ remotes))
return available_machine
+
def Main(argv):
opts = ProcessArguments(argv)
available_machine = TryAcquireMachine(opts.remote)
- executer = DejagnuExecuter(misc.GetRoot(argv[0])[0],
- opts.mount, opts.chromeos_root,
- available_machine._name,
- opts.board, opts.flags,
- opts.keep_intermediate_files, opts.tools,
- opts.cleanup)
+ executer = DejagnuExecuter(
+ misc.GetRoot(argv[0])[0], opts.mount, opts.chromeos_root,
+ available_machine._name, opts.board, opts.flags,
+ opts.keep_intermediate_files, opts.tools, opts.cleanup)
# Return value is a 3- or 4-element tuple
# element#1 - exit code
# element#2 - stdout
@@ -395,6 +411,7 @@ def Main(argv):
executer.Cleanup()
return ret
+
if __name__ == '__main__':
retval = Main(sys.argv)[0]
sys.exit(retval)
diff --git a/fdo_scripts/divide_and_merge_profiles.py b/fdo_scripts/divide_and_merge_profiles.py
index e750a626..390ad54a 100755
--- a/fdo_scripts/divide_and_merge_profiles.py
+++ b/fdo_scripts/divide_and_merge_profiles.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Script to divide and merge profiles."""
import copy
@@ -20,6 +19,7 @@ from utils import logger
class ProfileMerger:
+
def __init__(self, inputs, output, chunk_size, merge_program, multipliers):
self._inputs = inputs
self._output = output
@@ -31,13 +31,14 @@ class ProfileMerger:
def _GetFilesSetForInputDir(self, input_dir):
output_file = tempfile.mktemp()
- command = "find %s -name '*.gcda' -o -name '*.imports' > %s" % (input_dir, output_file)
+ command = "find %s -name '*.gcda' -o -name '*.imports' > %s" % (input_dir,
+ output_file)
self._ce.RunCommand(command)
- files = open(output_file, "r").read()
+ files = open(output_file, 'r').read()
files_set = set([])
for f in files.splitlines():
- stripped_file = f.replace(input_dir, "", 1)
- stripped_file = stripped_file.lstrip("/")
+ stripped_file = f.replace(input_dir, '', 1)
+ stripped_file = stripped_file.lstrip('/')
files_set.add(stripped_file)
return files_set
@@ -60,9 +61,9 @@ class ProfileMerger:
src_file = os.path.join(input_dir, f)
dst_file = os.path.join(output_dir, f)
if not os.path.isdir(os.path.dirname(dst_file)):
- command = "mkdir -p %s" % os.path.dirname(dst_file)
+ command = 'mkdir -p %s' % os.path.dirname(dst_file)
self._ce.RunCommand(command)
- command = "cp %s %s" % (src_file, dst_file)
+ command = 'cp %s %s' % (src_file, dst_file)
self._ce.RunCommand(command)
def _DoChunkMerge(self, current_files):
@@ -72,17 +73,14 @@ class ProfileMerger:
temp_dirs.append(temp_dir)
self._CopyFilesTree(i, current_files, temp_dir)
# Now do the merge.
- command = ("%s --inputs=%s --output=%s" %
- (self._merge_program,
- ",".join(temp_dirs),
- self._output))
+ command = ('%s --inputs=%s --output=%s' %
+ (self._merge_program, ','.join(temp_dirs), self._output))
if self._multipliers:
- command = ("%s --multipliers=%s" %
- (command, self._multipliers))
+ command = ('%s --multipliers=%s' % (command, self._multipliers))
ret = self._ce.RunCommand(command)
- assert ret == 0, "%s command failed!" % command
+ assert ret == 0, '%s command failed!' % command
for temp_dir in temp_dirs:
- command = "rm -rf %s" % temp_dir
+ command = 'rm -rf %s' % temp_dir
self._ce.RunCommand(command)
def DoMerge(self):
@@ -97,49 +95,46 @@ class ProfileMerger:
def Main(argv):
"""The main function."""
# Common initializations
-### command_executer.InitCommandExecuter(True)
+ ### command_executer.InitCommandExecuter(True)
command_executer.InitCommandExecuter()
l = logger.GetLogger()
ce = command_executer.GetCommandExecuter()
parser = optparse.OptionParser()
- parser.add_option("--inputs",
- dest="inputs",
- help="Comma-separated input profile directories to merge.")
- parser.add_option("--output",
- dest="output",
- help="Output profile directory.")
- parser.add_option("--chunk_size",
- dest="chunk_size",
- default="50",
- help="Chunk size to divide up the profiles into.")
- parser.add_option("--merge_program",
- dest="merge_program",
- default="/home/xur/bin/profile_merge_v15.par",
- help="Merge program to use to do the actual merge.")
- parser.add_option("--multipliers",
- dest="multipliers",
- help="multipliers to use when merging. (optional)")
+ parser.add_option('--inputs',
+ dest='inputs',
+ help='Comma-separated input profile directories to merge.')
+ parser.add_option('--output', dest='output', help='Output profile directory.')
+ parser.add_option('--chunk_size',
+ dest='chunk_size',
+ default='50',
+ help='Chunk size to divide up the profiles into.')
+ parser.add_option('--merge_program',
+ dest='merge_program',
+ default='/home/xur/bin/profile_merge_v15.par',
+ help='Merge program to use to do the actual merge.')
+ parser.add_option('--multipliers',
+ dest='multipliers',
+ help='multipliers to use when merging. (optional)')
options, _ = parser.parse_args(argv)
- if not all([options.inputs,
- options.output,]):
- l.LogError("Must supply --inputs and --output")
+ if not all([options.inputs, options.output]):
+ l.LogError('Must supply --inputs and --output')
return 1
try:
- pm = ProfileMerger(options.inputs.split(","), options.output,
- int(options.chunk_size), options.merge_program,
- options.multipliers)
+ pm = ProfileMerger(
+ options.inputs.split(','), options.output, int(options.chunk_size),
+ options.merge_program, options.multipliers)
pm.DoMerge()
retval = 0
except:
retval = 1
finally:
- print "My work is done..."
+ print 'My work is done...'
return retval
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/fdo_scripts/divide_and_merge_profiles_test.py b/fdo_scripts/divide_and_merge_profiles_test.py
index be76ad60..29fd6e57 100755
--- a/fdo_scripts/divide_and_merge_profiles_test.py
+++ b/fdo_scripts/divide_and_merge_profiles_test.py
@@ -2,7 +2,7 @@
#
# Copyright 2010 Google Inc. All Rights Reserved.
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import os
import random
@@ -15,6 +15,7 @@ from utils import misc
class DivideAndMergeProfilesTest(unittest.TestCase):
+
def tearDown(self):
shutil.rmtree(self._program_dir)
for profile_dir in self._profile_dirs:
@@ -26,19 +27,17 @@ class DivideAndMergeProfilesTest(unittest.TestCase):
self._writeProgram()
self._writeMakefile()
with misc.WorkingDirectory(self._program_dir):
- self._ce.RunCommand("make")
+ self._ce.RunCommand('make')
num_profile_dirs = 2
self._profile_dirs = []
for i in range(num_profile_dirs):
profile_dir = tempfile.mkdtemp()
- command = ("GCOV_PREFIX_STRIP=%s GCOV_PREFIX=$(/bin/pwd) "
- " %s/program" %
- (profile_dir.count("/"),
- self._program_dir))
+ command = ('GCOV_PREFIX_STRIP=%s GCOV_PREFIX=$(/bin/pwd) '
+ ' %s/program' % (profile_dir.count('/'), self._program_dir))
with misc.WorkingDirectory(profile_dir):
self._ce.RunCommand(command)
self._profile_dirs.append(profile_dir)
- self._merge_program = "/home/build/static/projects/crosstool/profile-merge/v14.5/profile_merge.par"
+ self._merge_program = '/home/build/static/projects/crosstool/profile-merge/v14.5/profile_merge.par'
def _writeMakefile(self):
makefile_contents = """
@@ -57,21 +56,22 @@ program: $(OBJS)
%.o: %.c
$(CC) -c -o $@ $^ $(CFLAGS)"""
- makefile = os.path.join(self._program_dir, "Makefile")
- with open(makefile, "w") as f:
+
+ makefile = os.path.join(self._program_dir, 'Makefile')
+ with open(makefile, 'w') as f:
print >> f, makefile_contents
def _writeProgram(self, num_files=100):
for i in range(num_files):
- current_file = os.path.join(self._program_dir, "%s.c" % i)
- with open(current_file, "w") as f:
+ current_file = os.path.join(self._program_dir, '%s.c' % i)
+ with open(current_file, 'w') as f:
if i != num_files - 1:
- print >> f, "extern void foo%s();" % (i + 1)
- print >> f, "void foo%s(){foo%s();}" % (i, i + 1)
+ print >> f, 'extern void foo%s();' % (i + 1)
+ print >> f, 'void foo%s(){foo%s();}' % (i, i + 1)
else:
print >> f, "void foo%s(){printf(\"\");}" % i
if i == 0:
- print >> f, "int main(){foo%s(); return 0;}" % i
+ print >> f, 'int main(){foo%s(); return 0;}' % i
def testMerge(self):
reference_output = self._getReferenceOutput()
@@ -83,33 +83,27 @@ program: $(OBJS)
self.assertTrue(ret == 0)
def _diffOutputs(self, reference, mine):
- command = "diff -uNr %s %s" % (reference, mine)
+ command = 'diff -uNr %s %s' % (reference, mine)
return self._ce.RunCommand(command)
- def _getMyOutput(self, args=""):
+ def _getMyOutput(self, args=''):
my_output = tempfile.mkdtemp()
- my_merge_program = os.path.join(os.path.dirname(__file__),
- "divide_and_merge_profiles.py")
- command = ("python %s --inputs=%s --output=%s "
- "--chunk_size=10 "
- "--merge_program=%s "
- "%s" %
- (my_merge_program,
- ",".join(self._profile_dirs),
- my_output,
- self._merge_program,
- args))
+ my_merge_program = os.path.join(
+ os.path.dirname(__file__), 'divide_and_merge_profiles.py')
+ command = ('python %s --inputs=%s --output=%s '
+ '--chunk_size=10 '
+ '--merge_program=%s '
+ '%s' % (my_merge_program, ','.join(self._profile_dirs),
+ my_output, self._merge_program, args))
self._ce.RunCommand(command)
return my_output
- def _getReferenceOutput(self, args=""):
+ def _getReferenceOutput(self, args=''):
# First do a regular merge.
reference_output = tempfile.mkdtemp()
- command = ("%s --inputs=%s --output=%s %s" %
- (self._merge_program,
- ",".join(self._profile_dirs),
- reference_output,
- args))
+ command = ('%s --inputs=%s --output=%s %s' %
+ (self._merge_program, ','.join(self._profile_dirs),
+ reference_output, args))
self._ce.RunCommand(command)
return reference_output
@@ -117,7 +111,7 @@ program: $(OBJS)
num_profiles = len(self._profile_dirs)
multipliers = [str(random.randint(0, num_profiles)) \
for _ in range(num_profiles)]
- args = "--multipliers=%s" % ",".join(multipliers)
+ args = '--multipliers=%s' % ','.join(multipliers)
reference_output = self._getReferenceOutput(args)
my_output = self._getMyOutput(args)
@@ -128,5 +122,6 @@ program: $(OBJS)
shutil.rmtree(reference_output)
self.assertTrue(ret == 0)
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/fdo_scripts/profile_cycler.py b/fdo_scripts/profile_cycler.py
index 5651a9d2..0ffc1d90 100755
--- a/fdo_scripts/profile_cycler.py
+++ b/fdo_scripts/profile_cycler.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Script to profile a page cycler, and get it back to the host."""
import copy
@@ -23,7 +22,7 @@ from utils import misc
class CyclerProfiler:
- REMOTE_TMP_DIR = "/tmp"
+ REMOTE_TMP_DIR = '/tmp'
def __init__(self, chromeos_root, board, cycler, profile_dir, remote):
self._chromeos_root = chromeos_root
@@ -34,24 +33,19 @@ class CyclerProfiler:
self._ce = command_executer.GetCommandExecuter()
self._l = logger.GetLogger()
- self._gcov_prefix = os.path.join(self.REMOTE_TMP_DIR,
- self._GetProfileDir())
+ self._gcov_prefix = os.path.join(self.REMOTE_TMP_DIR, self._GetProfileDir())
def _GetProfileDir(self):
return misc.GetCtargetFromBoard(self._board, self._chromeos_root)
def _CopyTestData(self):
- page_cycler_dir = os.path.join(self._chromeos_root,
- "distfiles",
- "target",
- "chrome-src-internal",
- "src",
- "data",
- "page_cycler")
+ page_cycler_dir = os.path.join(self._chromeos_root, 'distfiles', 'target',
+ 'chrome-src-internal', 'src', 'data',
+ 'page_cycler')
if not os.path.isdir(page_cycler_dir):
- raise Exception("Page cycler dir %s not found!" % page_cycler_dir)
+ raise Exception('Page cycler dir %s not found!' % page_cycler_dir)
self._ce.CopyFiles(page_cycler_dir,
- os.path.join(self.REMOTE_TMP_DIR, "page_cycler"),
+ os.path.join(self.REMOTE_TMP_DIR, 'page_cycler'),
dest_machine=self._remote,
chromeos_root=self._chromeos_root,
recursive=True,
@@ -59,13 +53,15 @@ class CyclerProfiler:
def _PrepareTestData(self):
# chmod files so everyone can read them.
- command = ("cd %s && find page_cycler -type f | xargs chmod a+r" %
+ command = ('cd %s && find page_cycler -type f | xargs chmod a+r' %
self.REMOTE_TMP_DIR)
- self._ce.CrosRunCommand(command, chromeos_root=self._chromeos_root,
+ self._ce.CrosRunCommand(command,
+ chromeos_root=self._chromeos_root,
machine=self._remote)
- command = ("cd %s && find page_cycler -type d | xargs chmod a+rx" %
+ command = ('cd %s && find page_cycler -type d | xargs chmod a+rx' %
self.REMOTE_TMP_DIR)
- self._ce.CrosRunCommand(command, chromeos_root=self._chromeos_root,
+ self._ce.CrosRunCommand(command,
+ chromeos_root=self._chromeos_root,
machine=self._remote)
def _CopyProfileToHost(self):
@@ -73,14 +69,14 @@ class CyclerProfiler:
os.path.basename(self._gcov_prefix))
# First remove the dir if it exists already
if os.path.exists(dest_dir):
- command = "rm -rf %s" % dest_dir
+ command = 'rm -rf %s' % dest_dir
self._ce.RunCommand(command)
# Strip out the initial prefix for the Chrome directory before doing the
# copy.
chrome_dir_prefix = misc.GetChromeSrcDir()
- command = "mkdir -p %s" % dest_dir
+ command = 'mkdir -p %s' % dest_dir
self._ce.RunCommand(command)
self._ce.CopyFiles(self._gcov_prefix,
dest_dir,
@@ -90,34 +86,36 @@ class CyclerProfiler:
src_cros=True)
def _RemoveRemoteProfileDir(self):
- command = "rm -rf %s" % self._gcov_prefix
- self._ce.CrosRunCommand(command, chromeos_root=self._chromeos_root,
+ command = 'rm -rf %s' % self._gcov_prefix
+ self._ce.CrosRunCommand(command,
+ chromeos_root=self._chromeos_root,
machine=self._remote)
def _LaunchCycler(self, cycler):
- command = ("DISPLAY=:0 "
- "XAUTHORITY=/home/chronos/.Xauthority "
- "GCOV_PREFIX=%s "
- "GCOV_PREFIX_STRIP=3 "
- "/opt/google/chrome/chrome "
- "--no-sandbox "
- "--renderer-clean-exit "
- "--user-data-dir=$(mktemp -d) "
- "--url \"file:///%s/page_cycler/%s/start.html?iterations=10&auto=1\" "
- "--enable-file-cookies "
- "--no-first-run "
- "--js-flags=expose_gc &" %
- (self._gcov_prefix,
- self.REMOTE_TMP_DIR,
- cycler))
-
- self._ce.CrosRunCommand(command, chromeos_root=self._chromeos_root,
+ command = (
+ 'DISPLAY=:0 '
+ 'XAUTHORITY=/home/chronos/.Xauthority '
+ 'GCOV_PREFIX=%s '
+ 'GCOV_PREFIX_STRIP=3 '
+ '/opt/google/chrome/chrome '
+ '--no-sandbox '
+ '--renderer-clean-exit '
+ '--user-data-dir=$(mktemp -d) '
+ "--url \"file:///%s/page_cycler/%s/start.html?iterations=10&auto=1\" "
+ '--enable-file-cookies '
+ '--no-first-run '
+ '--js-flags=expose_gc &' % (self._gcov_prefix, self.REMOTE_TMP_DIR,
+ cycler))
+
+ self._ce.CrosRunCommand(command,
+ chromeos_root=self._chromeos_root,
machine=self._remote,
command_timeout=60)
- def _PkillChrome(self, signal="9"):
- command = "pkill -%s chrome" % signal
- self._ce.CrosRunCommand(command, chromeos_root=self._chromeos_root,
+ def _PkillChrome(self, signal='9'):
+ command = 'pkill -%s chrome' % signal
+ self._ce.CrosRunCommand(command,
+ chromeos_root=self._chromeos_root,
machine=self._remote)
def DoProfile(self):
@@ -126,7 +124,7 @@ class CyclerProfiler:
self._PrepareTestData()
self._RemoveRemoteProfileDir()
- for cycler in self._cycler.split(","):
+ for cycler in self._cycler.split(','):
self._ProfileOneCycler(cycler)
# Copy the profile back
@@ -138,7 +136,7 @@ class CyclerProfiler:
cros_login.RestartUI(self._remote, self._chromeos_root, login=False)
# Run the cycler
self._LaunchCycler(cycler)
- self._PkillChrome(signal="INT")
+ self._PkillChrome(signal='INT')
# Let libgcov dump the profile.
# TODO(asharif): There is a race condition here. Fix it later.
time.sleep(30)
@@ -147,59 +145,55 @@ class CyclerProfiler:
def Main(argv):
"""The main function."""
# Common initializations
-### command_executer.InitCommandExecuter(True)
+ ### command_executer.InitCommandExecuter(True)
command_executer.InitCommandExecuter()
l = logger.GetLogger()
ce = command_executer.GetCommandExecuter()
parser = optparse.OptionParser()
- parser.add_option("--cycler",
- dest="cycler",
- default="alexa_us",
- help=("Comma-separated cyclers to profile. "
- "Example: alexa_us,moz,moz2"
- "Use all to profile all cyclers."))
- parser.add_option("--chromeos_root",
- dest="chromeos_root",
- default="../../",
- help="Output profile directory.")
- parser.add_option("--board",
- dest="board",
- default="x86-zgb",
- help="The target board.")
- parser.add_option("--remote",
- dest="remote",
- help=("The remote chromeos machine that"
- " has the profile image."))
- parser.add_option("--profile_dir",
- dest="profile_dir",
- default="profile_dir",
- help="Store profiles in this directory.")
+ parser.add_option('--cycler',
+ dest='cycler',
+ default='alexa_us',
+ help=('Comma-separated cyclers to profile. '
+ 'Example: alexa_us,moz,moz2'
+ 'Use all to profile all cyclers.'))
+ parser.add_option('--chromeos_root',
+ dest='chromeos_root',
+ default='../../',
+ help='Output profile directory.')
+ parser.add_option('--board',
+ dest='board',
+ default='x86-zgb',
+ help='The target board.')
+ parser.add_option('--remote',
+ dest='remote',
+ help=('The remote chromeos machine that'
+ ' has the profile image.'))
+ parser.add_option('--profile_dir',
+ dest='profile_dir',
+ default='profile_dir',
+ help='Store profiles in this directory.')
options, _ = parser.parse_args(argv)
- all_cyclers = ["alexa_us", "bloat", "dhtml", "dom",
- "intl1", "intl2", "morejs", "morejsnp",
- "moz", "moz2"]
+ all_cyclers = ['alexa_us', 'bloat', 'dhtml', 'dom', 'intl1', 'intl2',
+ 'morejs', 'morejsnp', 'moz', 'moz2']
- if options.cycler == "all":
- options.cycler = ",".join(all_cyclers)
+ if options.cycler == 'all':
+ options.cycler = ','.join(all_cyclers)
try:
- cp = CyclerProfiler(options.chromeos_root,
- options.board,
- options.cycler,
- options.profile_dir,
- options.remote)
+ cp = CyclerProfiler(options.chromeos_root, options.board, options.cycler,
+ options.profile_dir, options.remote)
cp.DoProfile()
retval = 0
except Exception as e:
retval = 1
print e
finally:
- print "Exiting..."
+ print 'Exiting...'
return retval
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/fdo_scripts/summarize_hot_blocks.py b/fdo_scripts/summarize_hot_blocks.py
index 5fdd3349..e68d6747 100644
--- a/fdo_scripts/summarize_hot_blocks.py
+++ b/fdo_scripts/summarize_hot_blocks.py
@@ -1,7 +1,4 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Summarize hottest basic blocks found while doing a ChromeOS FDO build.
Here is an example execution:
@@ -24,19 +21,23 @@ Here is an example of the *.profile and *.optimized files contents:
# BLOCK 7 freq:3901 count:60342, starting at line 92
# PRED: 6 [39.0%] count:60342 (true,exec)
- [url_canon_internal.cc : 92:28] MEM[(const char * *)source_6(D) + 16B] = D.28080_17;
- [url_canon_internal.cc : 93:41] MEM[(struct Component *)parsed_4(D) + 16B] = MEM[(const struct Component &)repl_1(D) + 80];
+ [url_canon_internal.cc : 92:28] MEM[(const char * *)source_6(D) + 16B] =
+ D.28080_17;
+ [url_canon_internal.cc : 93:41] MEM[(struct Component *)parsed_4(D) + 16B] =
+ MEM[(const struct Component &)repl_1(D) + 80];
# SUCC: 8 [100.0%] count:60342 (fallthru,exec)
# BLOCK 8 freq:10000 count:154667, starting at line 321
-# PRED: 7 [100.0%] count:60342 (fallthru,exec) 6 [61.0%] count:94325 (false,exec)
- [url_canon_internal.cc : 321:51] # DEBUG D#10 => [googleurl/src/url_canon_internal.cc : 321] &parsed_4(D)->host
+# PRED: 7 [100.0%] count:60342 (fallthru,exec) 6 [61.0%] count:94325
+(false,exec)
+ [url_canon_internal.cc : 321:51] # DEBUG D#10 =>
+ [googleurl/src/url_canon_internal.cc : 321] &parsed_4(D)->host
this script finds the blocks with highest count and shows the first line
of each block so that it is easy to identify the origin of the basic block.
"""
-__author__ = "llozano@google.com (Luis Lozano)"
+__author__ = 'llozano@google.com (Luis Lozano)'
import optparse
import os
@@ -51,7 +52,7 @@ from utils import command_executer
# Given a line, check if it has a block count and return it.
# Return -1 if there is no match
def GetBlockCount(line):
- match_obj = re.match(".*# BLOCK \d+ .*count:(\d+)", line)
+ match_obj = re.match('.*# BLOCK \d+ .*count:(\d+)', line)
if match_obj:
return int(match_obj.group(1))
else:
@@ -59,6 +60,7 @@ def GetBlockCount(line):
class Collector(object):
+
def __init__(self, data_dir, cutoff, output_dir, tempdir):
self._data_dir = data_dir
self._cutoff = cutoff
@@ -72,7 +74,7 @@ class Collector(object):
os.path.join(self._tempdir, list_file)))
ret = self._ce.RunCommand(command)
if ret:
- raise Exception("Failed: %s" % command)
+ raise Exception('Failed: %s' % command)
def SummarizeLines(self, data_file):
sum_lines = []
@@ -85,25 +87,25 @@ class Collector(object):
sum_line = line.strip()
sum_count = count
# look for a line that starts with line number information
- elif search_lno and re.match("^\s*\[.*: \d*:\d*]", line):
+ elif search_lno and re.match('^\s*\[.*: \d*:\d*]', line):
search_lno = False
- sum_lines.append("%d:%s: %s %s" %
+ sum_lines.append('%d:%s: %s %s' %
(sum_count, data_file.name, sum_line, line))
return sum_lines
# Look for blocks in the data file that have a count larger than the cutoff
# and generate a sorted summary file of the hottest blocks.
def SummarizeFile(self, data_file, sum_file):
- with open(data_file, "r") as f:
+ with open(data_file, 'r') as f:
sum_lines = self.SummarizeLines(f)
# sort reverse the list in place by the block count number
sum_lines.sort(key=GetBlockCount, reverse=True)
- with open(sum_file, "w") as sf:
- sf.write("".join(sum_lines))
+ with open(sum_file, 'w') as sf:
+ sf.write(''.join(sum_lines))
- print "Generated file Summary: ", sum_file
+ print 'Generated file Summary: ', sum_file
# Find hottest blocks in the list of files, generate a sorted summary for
# each file and then do a sorted merge of all the summaries.
@@ -112,56 +114,56 @@ class Collector(object):
sort_list = []
for file_name in f:
file_name = file_name.strip()
- sum_file = "%s.sum" % file_name
- sort_list.append("%s%s" % (sum_file, chr(0)))
+ sum_file = '%s.sum' % file_name
+ sort_list.append('%s%s' % (sum_file, chr(0)))
self.SummarizeFile(file_name, sum_file)
- tmp_list_file = os.path.join(self._tempdir, "file_list.dat")
- with open(tmp_list_file, "w") as file_list_file:
+ tmp_list_file = os.path.join(self._tempdir, 'file_list.dat')
+ with open(tmp_list_file, 'w') as file_list_file:
for x in sort_list:
file_list_file.write(x)
- merge_command = ("sort -nr -t: -k1 --merge --files0-from=%s > %s " %
+ merge_command = ('sort -nr -t: -k1 --merge --files0-from=%s > %s ' %
(tmp_list_file, summary_file))
ret = self._ce.RunCommand(merge_command)
if ret:
- raise Exception("Failed: %s" % merge_command)
- print "Generated general summary: ", summary_file
+ raise Exception('Failed: %s' % merge_command)
+ print 'Generated general summary: ', summary_file
def SummarizePreOptimized(self, summary_file):
- self.CollectFileList("*.profile", "chrome.profile.list")
- self.SummarizeList("chrome.profile.list",
+ self.CollectFileList('*.profile', 'chrome.profile.list')
+ self.SummarizeList('chrome.profile.list',
os.path.join(self._output_dir, summary_file))
def SummarizeOptimized(self, summary_file):
- self.CollectFileList("*.optimized", "chrome.optimized.list")
- self.SummarizeList("chrome.optimized.list",
+ self.CollectFileList('*.optimized', 'chrome.optimized.list')
+ self.SummarizeList('chrome.optimized.list',
os.path.join(self._output_dir, summary_file))
def Main(argv):
command_executer.InitCommandExecuter()
- usage = ("usage: %prog --data_dir=<dir> --cutoff=<value> "
- "--output_dir=<dir> [--keep_tmp]")
+ usage = ('usage: %prog --data_dir=<dir> --cutoff=<value> '
+ '--output_dir=<dir> [--keep_tmp]')
parser = optparse.OptionParser(usage=usage)
- parser.add_option("--data_dir",
- dest="data_dir",
- help=("directory where the FDO (*.profile and "
- "*.optimized) files are located"))
- parser.add_option("--cutoff",
- dest="cutoff",
- help="Minimum count to consider for each basic block")
- parser.add_option("--output_dir",
- dest="output_dir",
- help=("directory where summary data will be generated"
- "(pre_optimized.txt, optimized.txt)"))
- parser.add_option("--keep_tmp",
- action="store_true",
- dest="keep_tmp",
+ parser.add_option('--data_dir',
+ dest='data_dir',
+ help=('directory where the FDO (*.profile and '
+ '*.optimized) files are located'))
+ parser.add_option('--cutoff',
+ dest='cutoff',
+ help='Minimum count to consider for each basic block')
+ parser.add_option('--output_dir',
+ dest='output_dir',
+ help=('directory where summary data will be generated'
+ '(pre_optimized.txt, optimized.txt)'))
+ parser.add_option('--keep_tmp',
+ action='store_true',
+ dest='keep_tmp',
default=False,
- help=("Keep directory with temporary files"
- "(for debugging purposes)"))
+ help=('Keep directory with temporary files'
+ '(for debugging purposes)'))
options = parser.parse_args(argv)[0]
if not all((options.data_dir, options.cutoff, options.output_dir)):
parser.print_help()
@@ -171,14 +173,15 @@ def Main(argv):
co = Collector(options.data_dir, int(options.cutoff), options.output_dir,
tempdir)
- co.SummarizePreOptimized("pre_optimized.txt")
- co.SummarizeOptimized("optimized.txt")
+ co.SummarizePreOptimized('pre_optimized.txt')
+ co.SummarizeOptimized('optimized.txt')
if not options.keep_tmp:
shutil.rmtree(tempdir, ignore_errors=True)
return 0
-if __name__ == "__main__":
+
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/fdo_scripts/vanilla_vs_fdo.py b/fdo_scripts/vanilla_vs_fdo.py
index f9dc8fd4..8431293f 100644
--- a/fdo_scripts/vanilla_vs_fdo.py
+++ b/fdo_scripts/vanilla_vs_fdo.py
@@ -1,7 +1,4 @@
-#!/usr/bin/python
-#
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Script to build chrome with FDO and compare performance against no FDO."""
import getpass
@@ -17,46 +14,45 @@ from utils import logger
class Patcher(object):
+
def __init__(self, dir_to_patch, patch_file):
self._dir_to_patch = dir_to_patch
self._patch_file = patch_file
- self._base_patch_command = "patch -p0 %%s < %s" % patch_file
+ self._base_patch_command = 'patch -p0 %%s < %s' % patch_file
self._ce = command_executer.GetCommandExecuter()
def _RunPatchCommand(self, args):
patch_command = self._base_patch_command % args
- command = ("cd %s && %s" % (self._dir_to_patch,
- patch_command))
+ command = ('cd %s && %s' % (self._dir_to_patch, patch_command))
return self._ce.RunCommand(command)
def _ApplyPatch(self, args):
- full_args = "%s --dry-run" % args
+ full_args = '%s --dry-run' % args
ret = self._RunPatchCommand(full_args)
if ret:
- raise Exception("Patch dry run failed!")
+ raise Exception('Patch dry run failed!')
ret = self._RunPatchCommand(args)
if ret:
- raise Exception("Patch application failed!")
+ raise Exception('Patch application failed!')
def __enter__(self):
- self._ApplyPatch("")
+ self._ApplyPatch('')
def __exit__(self, type, value, traceback):
- self._ApplyPatch("-R")
+ self._ApplyPatch('-R')
class FDOComparator(object):
+
def __init__(self, board, remotes, ebuild_version, plus_pgo, minus_pgo,
update_pgo, chromeos_root):
self._board = board
self._remotes = remotes
self._ebuild_version = ebuild_version
- self._remote = remotes.split(",")[0]
+ self._remote = remotes.split(',')[0]
self._chromeos_root = chromeos_root
- self._profile_dir = "profile_dir"
- self._profile_path = os.path.join(self._chromeos_root,
- "src",
- "scripts",
+ self._profile_dir = 'profile_dir'
+ self._profile_path = os.path.join(self._chromeos_root, 'src', 'scripts',
os.path.basename(self._profile_dir))
self._plus_pgo = plus_pgo
self._minus_pgo = minus_pgo
@@ -68,69 +64,62 @@ class FDOComparator(object):
def _CheckoutChromeOS(self):
if not os.path.exists(self._chromeos_root):
setup_chromeos_args = [setup_chromeos.__file__,
- "--dir=%s" % self._chromeos_root,
- "--minilayout"]
+ '--dir=%s' % self._chromeos_root, '--minilayout']
setup_chromeos.Main(setup_chromeos_args)
def _BuildChromeOSUsingBinaries(self):
image_dir = misc.GetImageDir(self._chromeos_root, self._board)
- command = "equery-%s l chromeos" % self._board
+ command = 'equery-%s l chromeos' % self._board
ret = self._ce.ChrootRunCommand(self._chromeos_root, command)
if ret:
- command = misc.GetSetupBoardCommand(self._board,
- usepkg=True)
- ret = self._ce.ChrootRunCommand(self._chromeos_root,
- command)
+ command = misc.GetSetupBoardCommand(self._board, usepkg=True)
+ ret = self._ce.ChrootRunCommand(self._chromeos_root, command)
if ret:
raise Exception("Couldn't run setup_board!")
- command = misc.GetBuildPackagesCommand(self._board,
- True)
- ret = self._ce.ChrootRunCommand(self._chromeos_root,
- command)
+ command = misc.GetBuildPackagesCommand(self._board, True)
+ ret = self._ce.ChrootRunCommand(self._chromeos_root, command)
if ret:
raise Exception("Couldn't run build_packages!")
def _ReportMismatches(self, build_log):
- mismatch_signature = "-Wcoverage-mismatch"
+ mismatch_signature = '-Wcoverage-mismatch'
mismatches = build_log.count(mismatch_signature)
- self._l.LogOutput("Total mismatches: %s" % mismatches)
+ self._l.LogOutput('Total mismatches: %s' % mismatches)
stale_files = set([])
for line in build_log.splitlines():
if mismatch_signature in line:
- filename = line.split(":")[0]
+ filename = line.split(':')[0]
stale_files.add(filename)
- self._l.LogOutput("Total stale files: %s" % len(stale_files))
-
- def _BuildChromeAndImage(self, ebuild_version="", env_dict={}, cflags="",
- cxxflags="", ldflags="", label="",
- build_image_args=""):
+ self._l.LogOutput('Total stale files: %s' % len(stale_files))
+
+ def _BuildChromeAndImage(self,
+ ebuild_version='',
+ env_dict={},
+ cflags='',
+ cxxflags='',
+ ldflags='',
+ label='',
+ build_image_args=''):
env_string = misc.GetEnvStringFromDict(env_dict)
if not label:
- label = " ".join([env_string,
- cflags,
- cxxflags,
- ldflags,
- ebuild_version])
+ label = ' '.join([env_string, cflags, cxxflags, ldflags, ebuild_version])
label = label.strip()
label = misc.GetFilenameFromString(label)
if not misc.DoesLabelExist(self._chromeos_root, self._board, label):
- build_chrome_browser_args = ["--clean",
- "--chromeos_root=%s" % self._chromeos_root,
- "--board=%s" % self._board,
- "--env=%r" % env_string,
- "--cflags=%r" % cflags,
- "--cxxflags=%r" % cxxflags,
- "--ldflags=%r" % ldflags,
- "--ebuild_version=%s" % ebuild_version,
- "--build_image_args=%s" % build_image_args]
-
- build_chrome_browser = os.path.join(os.path.dirname(__file__),
- "..",
- "build_chrome_browser.py")
- command = "python %s %s" % (build_chrome_browser,
- " ".join(build_chrome_browser_args))
+ build_chrome_browser_args = ['--clean', '--chromeos_root=%s' %
+ self._chromeos_root, '--board=%s' %
+ self._board, '--env=%r' % env_string,
+ '--cflags=%r' % cflags, '--cxxflags=%r' %
+ cxxflags, '--ldflags=%r' % ldflags,
+ '--ebuild_version=%s' % ebuild_version,
+ '--build_image_args=%s' % build_image_args]
+
+ build_chrome_browser = os.path.join(
+ os.path.dirname(__file__), '..', 'build_chrome_browser.py')
+ command = 'python %s %s' % (build_chrome_browser,
+ ' '.join(build_chrome_browser_args))
ret, out, err = self._ce.RunCommandWOutput(command)
- if "-fprofile-use" in cxxflags:
+ if '-fprofile-use' in cxxflags:
self._ReportMismatches(out)
if ret:
@@ -139,7 +128,7 @@ class FDOComparator(object):
return label
def _TestLabels(self, labels):
- experiment_file = "pgo_experiment.txt"
+ experiment_file = 'pgo_experiment.txt'
experiment_header = """
board: %s
remote: %s
@@ -149,87 +138,79 @@ class FDOComparator(object):
iterations: 1
}
"""
- with open(experiment_file, "w") as f:
- print >>f, experiment_header
- print >>f, experiment_tests
+
+ with open(experiment_file, 'w') as f:
+ print >> f, experiment_header
+ print >> f, experiment_tests
for label in labels:
# TODO(asharif): Fix crosperf so it accepts labels with symbols
crosperf_label = label
- crosperf_label = crosperf_label.replace("-", "minus")
- crosperf_label = crosperf_label.replace("+", "plus")
+ crosperf_label = crosperf_label.replace('-', 'minus')
+ crosperf_label = crosperf_label.replace('+', 'plus')
experiment_image = """
%s {
chromeos_image: %s
}
- """ % (crosperf_label,
- os.path.join(misc.GetImageDir(self._chromeos_root, self._board),
- label,
- "chromiumos_test_image.bin"))
- print >>f, experiment_image
- crosperf = os.path.join(os.path.dirname(__file__),
- "..",
- "crosperf",
- "crosperf")
- command = "%s %s" % (crosperf, experiment_file)
+ """ % (crosperf_label, os.path.join(
+ misc.GetImageDir(self._chromeos_root, self._board), label,
+ 'chromiumos_test_image.bin'))
+ print >> f, experiment_image
+ crosperf = os.path.join(
+ os.path.dirname(__file__), '..', 'crosperf', 'crosperf')
+ command = '%s %s' % (crosperf, experiment_file)
ret = self._ce.RunCommand(command)
if ret:
raise Exception("Couldn't run crosperf!")
def _ImageRemote(self, label):
- image_path = os.path.join(misc.GetImageDir(self._chromeos_root,
- self._board),
- label,
- "chromiumos_test_image.bin")
- image_chromeos_args = [image_chromeos.__file__,
- "--chromeos_root=%s" % self._chromeos_root,
- "--image=%s" % image_path,
- "--remote=%s" % self._remote,
- "--board=%s" % self._board]
+ image_path = os.path.join(
+ misc.GetImageDir(self._chromeos_root,
+ self._board), label, 'chromiumos_test_image.bin')
+ image_chromeos_args = [image_chromeos.__file__, '--chromeos_root=%s' %
+ self._chromeos_root, '--image=%s' % image_path,
+ '--remote=%s' % self._remote,
+ '--board=%s' % self._board]
image_chromeos.Main(image_chromeos_args)
def _ProfileRemote(self):
- profile_cycler = os.path.join(os.path.dirname(__file__),
- "profile_cycler.py")
- profile_cycler_args = ["--chromeos_root=%s" % self._chromeos_root,
- "--cycler=all",
- "--board=%s" % self._board,
- "--profile_dir=%s" % self._profile_path,
- "--remote=%s" % self._remote]
- command = "python %s %s" % (profile_cycler, " ".join(profile_cycler_args))
+ profile_cycler = os.path.join(
+ os.path.dirname(__file__), 'profile_cycler.py')
+ profile_cycler_args = ['--chromeos_root=%s' % self._chromeos_root,
+ '--cycler=all', '--board=%s' % self._board,
+ '--profile_dir=%s' % self._profile_path,
+ '--remote=%s' % self._remote]
+ command = 'python %s %s' % (profile_cycler, ' '.join(profile_cycler_args))
ret = self._ce.RunCommand(command)
if ret:
raise Exception("Couldn't profile cycler!")
def _BuildGenerateImage(self):
# TODO(asharif): add cflags as well.
- labels_list = ["fprofile-generate", self._ebuild_version]
- label = "_".join(labels_list)
+ labels_list = ['fprofile-generate', self._ebuild_version]
+ label = '_'.join(labels_list)
generate_label = self._BuildChromeAndImage(
- env_dict={"USE": "chrome_internal -pgo pgo_generate"},
+ env_dict={'USE': 'chrome_internal -pgo pgo_generate'},
label=label,
ebuild_version=self._ebuild_version,
- build_image_args="--rootfs_boost_size=400")
+ build_image_args='--rootfs_boost_size=400')
return generate_label
def _BuildUseImage(self):
ctarget = misc.GetCtargetFromBoard(self._board, self._chromeos_root)
- chroot_profile_dir = os.path.join("/home/%s/trunk" % getpass.getuser(),
- "src",
- "scripts",
- self._profile_dir,
+ chroot_profile_dir = os.path.join('/home/%s/trunk' % getpass.getuser(),
+ 'src', 'scripts', self._profile_dir,
ctarget)
- cflags = ("-fprofile-use "
- "-fprofile-correction "
- "-Wno-error "
- "-fdump-tree-optimized-blocks-lineno "
- "-fdump-ipa-profile-blocks-lineno "
- "-fno-vpt "
- "-fprofile-dir=%s" %
- chroot_profile_dir)
- labels_list = ["updated_pgo", self._ebuild_version]
- label = "_".join(labels_list)
+ cflags = ('-fprofile-use '
+ '-fprofile-correction '
+ '-Wno-error '
+ '-fdump-tree-optimized-blocks-lineno '
+ '-fdump-ipa-profile-blocks-lineno '
+ '-fno-vpt '
+ '-fprofile-dir=%s' % chroot_profile_dir)
+ labels_list = ['updated_pgo', self._ebuild_version]
+ label = '_'.join(labels_list)
pgo_use_label = self._BuildChromeAndImage(
- env_dict={"USE": "chrome_internal -pgo"},
+ env_dict={'USE': 'chrome_internal -pgo'},
cflags=cflags,
cxxflags=cflags,
ldflags=cflags,
@@ -243,12 +224,14 @@ class FDOComparator(object):
labels = []
if self._minus_pgo:
- minus_pgo = self._BuildChromeAndImage(env_dict={"USE": "chrome_internal -pgo"},
- ebuild_version=self._ebuild_version)
+ minus_pgo = self._BuildChromeAndImage(
+ env_dict={'USE': 'chrome_internal -pgo'},
+ ebuild_version=self._ebuild_version)
labels.append(minus_pgo)
if self._plus_pgo:
- plus_pgo = self._BuildChromeAndImage(env_dict={"USE": "chrome_internal pgo"},
- ebuild_version=self._ebuild_version)
+ plus_pgo = self._BuildChromeAndImage(
+ env_dict={'USE': 'chrome_internal pgo'},
+ ebuild_version=self._ebuild_version)
labels.append(plus_pgo)
if self._update_pgo:
@@ -272,62 +255,58 @@ class FDOComparator(object):
def Main(argv):
"""The main function."""
# Common initializations
-### command_executer.InitCommandExecuter(True)
+ ### command_executer.InitCommandExecuter(True)
command_executer.InitCommandExecuter()
parser = optparse.OptionParser()
- parser.add_option("--remote",
- dest="remote",
- help="Remote machines to run tests on.")
- parser.add_option("--board",
- dest="board",
- default="x86-zgb",
- help="The target board.")
- parser.add_option("--ebuild_version",
- dest="ebuild_version",
- default="",
- help="The Chrome ebuild version to use.")
- parser.add_option("--plus_pgo",
- dest="plus_pgo",
- action="store_true",
+ parser.add_option('--remote',
+ dest='remote',
+ help='Remote machines to run tests on.')
+ parser.add_option('--board',
+ dest='board',
+ default='x86-zgb',
+ help='The target board.')
+ parser.add_option('--ebuild_version',
+ dest='ebuild_version',
+ default='',
+ help='The Chrome ebuild version to use.')
+ parser.add_option('--plus_pgo',
+ dest='plus_pgo',
+ action='store_true',
default=False,
- help="Build USE=+pgo.")
- parser.add_option("--minus_pgo",
- dest="minus_pgo",
- action="store_true",
+ help='Build USE=+pgo.')
+ parser.add_option('--minus_pgo',
+ dest='minus_pgo',
+ action='store_true',
default=False,
- help="Build USE=-pgo.")
- parser.add_option("--update_pgo",
- dest="update_pgo",
- action="store_true",
+ help='Build USE=-pgo.')
+ parser.add_option('--update_pgo',
+ dest='update_pgo',
+ action='store_true',
default=False,
- help="Update pgo and build Chrome with the update.")
- parser.add_option("--chromeos_root",
- dest="chromeos_root",
+ help='Update pgo and build Chrome with the update.')
+ parser.add_option('--chromeos_root',
+ dest='chromeos_root',
default=False,
- help="The chromeos root directory")
+ help='The chromeos root directory')
options, _ = parser.parse_args(argv)
if not options.board:
- print "Please give a board."
+ print 'Please give a board.'
return 1
if not options.remote:
- print "Please give at least one remote machine."
+ print 'Please give at least one remote machine.'
return 1
if not options.chromeos_root:
- print "Please provide the chromeos root directory."
+ print 'Please provide the chromeos root directory.'
return 1
if not any((options.minus_pgo, options.plus_pgo, options.update_pgo)):
- print "Please provide at least one build option."
+ print 'Please provide at least one build option.'
return 1
- fc = FDOComparator(options.board,
- options.remote,
- options.ebuild_version,
- options.plus_pgo,
- options.minus_pgo,
- options.update_pgo,
+ fc = FDOComparator(options.board, options.remote, options.ebuild_version,
+ options.plus_pgo, options.minus_pgo, options.update_pgo,
os.path.expanduser(options.chromeos_root))
return fc.DoAll()
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/file_lock_machine.py b/file_lock_machine.py
index f67a9bb1..5e5c17ca 100755
--- a/file_lock_machine.py
+++ b/file_lock_machine.py
@@ -1,10 +1,9 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to lock/unlock machines."""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import datetime
import fcntl
@@ -19,20 +18,21 @@ import time
from utils import logger
-LOCK_SUFFIX = "_check_lock_liveness"
+LOCK_SUFFIX = '_check_lock_liveness'
# The locks file directory REQUIRES that 'group' only has read/write
# privileges and 'world' has no privileges. So the mask must be
# '0027': 0777 - 0027 = 0750.
LOCK_MASK = 0027
+
def FileCheckName(name):
return name + LOCK_SUFFIX
def OpenLiveCheck(file_name):
with FileCreationMask(LOCK_MASK):
- fd = open(file_name, "a+w")
+ fd = open(file_name, 'a+w')
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
@@ -41,6 +41,7 @@ def OpenLiveCheck(file_name):
class FileCreationMask(object):
+
def __init__(self, mask):
self._mask = mask
@@ -56,30 +57,27 @@ class LockDescription(object):
def __init__(self, desc=None):
try:
- self.owner = desc["owner"]
- self.exclusive = desc["exclusive"]
- self.counter = desc["counter"]
- self.time = desc["time"]
- self.reason = desc["reason"]
- self.auto = desc["auto"]
+ self.owner = desc['owner']
+ self.exclusive = desc['exclusive']
+ self.counter = desc['counter']
+ self.time = desc['time']
+ self.reason = desc['reason']
+ self.auto = desc['auto']
except (KeyError, TypeError):
- self.owner = ""
+ self.owner = ''
self.exclusive = False
self.counter = 0
self.time = 0
- self.reason = ""
+ self.reason = ''
self.auto = False
def IsLocked(self):
return self.counter or self.exclusive
def __str__(self):
- return " ".join(["Owner: %s" % self.owner,
- "Exclusive: %s" % self.exclusive,
- "Counter: %s" % self.counter,
- "Time: %s" % self.time,
- "Reason: %s" % self.reason,
- "Auto: %s" % self.auto])
+ return ' '.join(['Owner: %s' % self.owner, 'Exclusive: %s' % self.exclusive,
+ 'Counter: %s' % self.counter, 'Time: %s' % self.time,
+ 'Reason: %s' % self.reason, 'Auto: %s' % self.auto])
class FileLock(object):
@@ -89,31 +87,28 @@ class FileLock(object):
def __init__(self, lock_filename):
self._filepath = lock_filename
lock_dir = os.path.dirname(lock_filename)
- assert os.path.isdir(lock_dir), (
- "Locks dir: %s doesn't exist!" % lock_dir)
+ assert os.path.isdir(lock_dir), ("Locks dir: %s doesn't exist!" % lock_dir)
self._file = None
@classmethod
def AsString(cls, file_locks):
- stringify_fmt = "%-30s %-15s %-4s %-4s %-15s %-40s %-4s"
- header = stringify_fmt % ("machine", "owner", "excl", "ctr",
- "elapsed", "reason", "auto")
+ stringify_fmt = '%-30s %-15s %-4s %-4s %-15s %-40s %-4s'
+ header = stringify_fmt % ('machine', 'owner', 'excl', 'ctr', 'elapsed',
+ 'reason', 'auto')
lock_strings = []
for file_lock in file_locks:
elapsed_time = datetime.timedelta(
seconds=int(time.time() - file_lock._description.time))
- elapsed_time = "%s ago" % elapsed_time
- lock_strings.append(stringify_fmt %
- (os.path.basename(file_lock._filepath),
- file_lock._description.owner,
- file_lock._description.exclusive,
- file_lock._description.counter,
- elapsed_time,
- file_lock._description.reason,
- file_lock._description.auto))
- table = "\n".join(lock_strings)
- return "\n".join([header, table])
+ elapsed_time = '%s ago' % elapsed_time
+ lock_strings.append(
+ stringify_fmt %
+ (os.path.basename(file_lock._filepath), file_lock._description.owner,
+ file_lock._description.exclusive, file_lock._description.counter,
+ elapsed_time, file_lock._description.reason,
+ file_lock._description.auto))
+ table = '\n'.join(lock_strings)
+ return '\n'.join([header, table])
@classmethod
def ListLock(cls, pattern, locks_dir):
@@ -128,16 +123,16 @@ class FileLock(object):
with file_lock as lock:
if lock.IsLocked():
file_locks.append(file_lock)
- logger.GetLogger().LogOutput("\n%s" % cls.AsString(file_locks))
+ logger.GetLogger().LogOutput('\n%s' % cls.AsString(file_locks))
def __enter__(self):
with FileCreationMask(LOCK_MASK):
try:
- self._file = open(self._filepath, "a+")
+ self._file = open(self._filepath, 'a+')
self._file.seek(0, os.SEEK_SET)
if fcntl.flock(self._file.fileno(), fcntl.LOCK_EX) == -1:
- raise IOError("flock(%s, LOCK_EX) failed!" % self._filepath)
+ raise IOError('flock(%s, LOCK_EX) failed!' % self._filepath)
try:
desc = json.load(self._file)
@@ -176,23 +171,24 @@ class FileLock(object):
class Lock(object):
+
def __init__(self, lock_file, auto=True):
self._to_lock = os.path.basename(lock_file)
self._lock_file = lock_file
self._logger = logger.GetLogger()
self._auto = auto
- def NonBlockingLock(self, exclusive, reason=""):
+ def NonBlockingLock(self, exclusive, reason=''):
with FileLock(self._lock_file) as lock:
if lock.exclusive:
self._logger.LogError(
- "Exclusive lock already acquired by %s. Reason: %s" %
+ 'Exclusive lock already acquired by %s. Reason: %s' %
(lock.owner, lock.reason))
return False
if exclusive:
if lock.counter:
- self._logger.LogError("Shared lock already acquired")
+ self._logger.LogError('Shared lock already acquired')
return False
lock_file_check = FileCheckName(self._lock_file)
fd = OpenLiveCheck(lock_file_check)
@@ -205,7 +201,7 @@ class Lock(object):
lock.auto = self._auto
else:
lock.counter += 1
- self._logger.LogOutput("Successfully locked: %s" % self._to_lock)
+ self._logger.LogOutput('Successfully locked: %s' % self._to_lock)
return True
def Unlock(self, exclusive, force=False):
@@ -215,7 +211,7 @@ class Lock(object):
return True
if lock.exclusive != exclusive:
- self._logger.LogError("shared locks must be unlocked with --shared")
+ self._logger.LogError('shared locks must be unlocked with --shared')
return False
if lock.exclusive:
@@ -225,14 +221,15 @@ class Lock(object):
return False
if lock.auto != self._auto:
self._logger.LogError("Can't unlock lock with different -a"
- " parameter.")
+ ' parameter.')
return False
lock.exclusive = False
- lock.reason = ""
- lock.owner = ""
+ lock.reason = ''
+ lock.owner = ''
if self._auto:
- del_list = [i for i in FileLock.FILE_OPS
+ del_list = [i
+ for i in FileLock.FILE_OPS
if i.name == FileCheckName(self._lock_file)]
for i in del_list:
FileLock.FILE_OPS.remove(i)
@@ -248,7 +245,7 @@ class Lock(object):
class Machine(object):
- LOCKS_DIR = "/google/data/rw/users/mo/mobiletc-prebuild/locks"
+ LOCKS_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/locks'
def __init__(self, name, locks_dir=LOCKS_DIR, auto=True):
self._name = name
@@ -259,19 +256,19 @@ class Machine(object):
self._full_name = self._name
self._full_name = os.path.join(locks_dir, self._full_name)
- def Lock(self, exclusive=False, reason=""):
+ def Lock(self, exclusive=False, reason=''):
lock = Lock(self._full_name, self._auto)
return lock.NonBlockingLock(exclusive, reason)
- def TryLock(self, timeout=300, exclusive=False, reason=""):
+ def TryLock(self, timeout=300, exclusive=False, reason=''):
locked = False
sleep = timeout / 10
while True:
locked = self.Lock(exclusive, reason)
if locked or not timeout >= 0:
break
- print "Lock not acquired for {0}, wait {1} seconds ...".format(
- self._name, sleep)
+ print 'Lock not acquired for {0}, wait {1} seconds ...'.format(self._name,
+ sleep)
time.sleep(sleep)
timeout -= sleep
return locked
@@ -284,41 +281,41 @@ class Machine(object):
def Main(argv):
"""The main function."""
parser = optparse.OptionParser()
- parser.add_option("-r",
- "--reason",
- dest="reason",
- default="",
- help="The lock reason.")
- parser.add_option("-u",
- "--unlock",
- dest="unlock",
- action="store_true",
+ parser.add_option('-r',
+ '--reason',
+ dest='reason',
+ default='',
+ help='The lock reason.')
+ parser.add_option('-u',
+ '--unlock',
+ dest='unlock',
+ action='store_true',
default=False,
- help="Use this to unlock.")
- parser.add_option("-l",
- "--list_locks",
- dest="list_locks",
- action="store_true",
+ help='Use this to unlock.')
+ parser.add_option('-l',
+ '--list_locks',
+ dest='list_locks',
+ action='store_true',
default=False,
- help="Use this to list locks.")
- parser.add_option("-f",
- "--ignore_ownership",
- dest="ignore_ownership",
- action="store_true",
+ help='Use this to list locks.')
+ parser.add_option('-f',
+ '--ignore_ownership',
+ dest='ignore_ownership',
+ action='store_true',
default=False,
help="Use this to force unlock on a lock you don't own.")
- parser.add_option("-s",
- "--shared",
- dest="shared",
- action="store_true",
+ parser.add_option('-s',
+ '--shared',
+ dest='shared',
+ action='store_true',
default=False,
- help="Use this for a shared (non-exclusive) lock.")
- parser.add_option("-d",
- "--dir",
- dest="locks_dir",
- action="store",
+ help='Use this for a shared (non-exclusive) lock.')
+ parser.add_option('-d',
+ '--dir',
+ dest='locks_dir',
+ action='store',
default=Machine.LOCKS_DIR,
- help="Use this to set different locks_dir")
+ help='Use this to set different locks_dir')
options, args = parser.parse_args(argv)
@@ -327,7 +324,7 @@ def Main(argv):
if not options.list_locks and len(args) != 2:
logger.GetLogger().LogError(
- "Either --list_locks or a machine arg is needed.")
+ 'Either --list_locks or a machine arg is needed.')
return 1
if len(args) > 1:
@@ -336,7 +333,7 @@ def Main(argv):
machine = None
if options.list_locks:
- FileLock.ListLock("*", options.locks_dir)
+ FileLock.ListLock('*', options.locks_dir)
retval = True
elif options.unlock:
retval = machine.Unlock(exclusive, options.ignore_ownership)
@@ -348,5 +345,6 @@ def Main(argv):
else:
return 1
-if __name__ == "__main__":
+
+if __name__ == '__main__':
sys.exit(Main(sys.argv))
diff --git a/get_common_image_version.py b/get_common_image_version.py
index cdac8006..bf5d219b 100755
--- a/get_common_image_version.py
+++ b/get_common_image_version.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
-
"""Script to find list of common images (first beta releases) in Chromeos.
Display information about stable ChromeOS/Chrome versions to be used
@@ -11,7 +10,7 @@ using randomly selected versions. Currently we define as a "stable"
version the first Beta release in a particular release cycle.
"""
-__author__ = "llozano@google.com (Luis Lozano)"
+__author__ = 'llozano@google.com (Luis Lozano)'
import optparse
import pickle
@@ -19,13 +18,13 @@ import re
import sys
import urllib
-VERSIONS_HISTORY_URL = "http://cros-omahaproxy.appspot.com/history"
+VERSIONS_HISTORY_URL = 'http://cros-omahaproxy.appspot.com/history'
def DisplayBetas(betas):
- print "List of betas from", VERSIONS_HISTORY_URL
+ print 'List of betas from', VERSIONS_HISTORY_URL
for beta in betas:
- print " Release", beta["chrome_major_version"], beta
+ print ' Release', beta['chrome_major_version'], beta
return
@@ -36,14 +35,14 @@ def FindAllBetas(all_versions):
prev_beta = {}
for line in all_versions:
match_obj = re.match(
- r"(?P<date>.*),(?P<chromeos_version>.*),"
- r"(?P<chrome_major_version>\d*).(?P<chrome_minor_version>.*),"
- r"(?P<chrome_appid>.*),beta-channel,,Samsung Chromebook Series 5 550",
+ r'(?P<date>.*),(?P<chromeos_version>.*),'
+ r'(?P<chrome_major_version>\d*).(?P<chrome_minor_version>.*),'
+ r'(?P<chrome_appid>.*),beta-channel,,Samsung Chromebook Series 5 550',
line)
if match_obj:
if prev_beta:
- if (prev_beta["chrome_major_version"] !=
- match_obj.group("chrome_major_version")):
+ if (prev_beta['chrome_major_version'] !=
+ match_obj.group('chrome_major_version')):
all_betas.append(prev_beta)
prev_beta = match_obj.groupdict()
if prev_beta:
@@ -52,9 +51,9 @@ def FindAllBetas(all_versions):
def SerializeBetas(all_betas, serialize_file):
- with open(serialize_file, "wb") as f:
+ with open(serialize_file, 'wb') as f:
pickle.dump(all_betas, f)
- print "Serialized list of betas into", serialize_file
+ print 'Serialized list of betas into', serialize_file
return
@@ -62,15 +61,17 @@ def Main(argv):
"""Get ChromeOS first betas list from history URL."""
parser = optparse.OptionParser()
- parser.add_option("--serialize", dest="serialize", default=None,
- help="Save list of common images into the specified file.")
+ parser.add_option('--serialize',
+ dest='serialize',
+ default=None,
+ help='Save list of common images into the specified file.')
options = parser.parse_args(argv)[0]
try:
opener = urllib.URLopener()
all_versions = opener.open(VERSIONS_HISTORY_URL)
except IOError as ioe:
- print "Cannot open", VERSIONS_HISTORY_URL
+ print 'Cannot open', VERSIONS_HISTORY_URL
print ioe
return 1
@@ -82,6 +83,7 @@ def Main(argv):
return 0
-if __name__ == "__main__":
+
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/heat_map.py b/heat_map.py
index 3bb7e363..7d4be65f 100755
--- a/heat_map.py
+++ b/heat_map.py
@@ -2,7 +2,6 @@
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Wrapper to generate heat maps for chrome."""
import argparse
@@ -15,10 +14,12 @@ from sets import Set
from utils import command_executer
from utils import misc
+
def IsARepoRoot(directory):
"""Returns True if directory is the root of a repo checkout."""
return os.path.exists(os.path.join(directory, '.repo'))
+
class HeatMapProducer(object):
"""Class to produce heat map."""
@@ -28,34 +29,39 @@ class HeatMapProducer(object):
self.page_size = page_size
self.dir = os.path.dirname(os.path.realpath(__file__))
self.binary = binary
- self.tempDir = ""
+ self.tempDir = ''
self.ce = command_executer.GetCommandExecuter()
def copyFileToChroot(self):
- self.tempDir = tempfile.mkdtemp(prefix=os.path.join(self.chromeos_root, 'src/'))
+ self.tempDir = tempfile.mkdtemp(
+ prefix=os.path.join(self.chromeos_root, 'src/'))
self.temp_perf = os.path.join(self.tempDir, 'perf.data')
shutil.copy2(self.perf_data, self.temp_perf)
- self.temp_perf_inchroot = os.path.join('~/trunk/src', os.path.basename(self.tempDir))
+ self.temp_perf_inchroot = os.path.join('~/trunk/src',
+ os.path.basename(self.tempDir))
def getPerfReport(self):
- cmd='cd %s; perf report -D -i perf.data > perf_report.txt' % self.temp_perf_inchroot
+ cmd = 'cd %s; perf report -D -i perf.data > perf_report.txt' % self.temp_perf_inchroot
retval = self.ce.ChrootRunCommand(self.chromeos_root, cmd)
if retval:
raise RuntimeError('Failed to generate perf report')
self.perf_report = os.path.join(self.tempDir, 'perf_report.txt')
def getBinaryBaseAddress(self):
- cmd = 'grep PERF_RECORD_MMAP %s | grep "%s$"' % (self.perf_report, self.binary)
+ cmd = 'grep PERF_RECORD_MMAP %s | grep "%s$"' % (self.perf_report,
+ self.binary)
retval, output, _ = self.ce.RunCommandWOutput(cmd)
if retval:
raise RuntimeError('Failed to run grep to get base address')
- baseAddresses = Set();
+ baseAddresses = Set()
for line in output.strip().split('\n'):
head = line.split('[')[2]
address = head.split('(')[0]
baseAddresses.add(address)
if len(baseAddresses) > 1:
- raise RuntimeError('Multiple base address found, please disable ASLR and collect profile again')
+ raise RuntimeError(
+ 'Multiple base address found, please disable ASLR and collect '
+ 'profile again')
if not len(baseAddresses):
raise RuntimeError('Could not find the base address in the profile')
self.loading_address = baseAddresses.pop()
@@ -71,20 +77,16 @@ class HeatMapProducer(object):
if not self.loading_address:
return
heatmap_script = os.path.join(self.dir, 'perf-to-inst-page.sh')
- cmd = '{0} {1} {2} {3} {4}'.format(heatmap_script,
- self.binary,
- self.perf_report,
- self.loading_address,
+ cmd = '{0} {1} {2} {3} {4}'.format(heatmap_script, self.binary,
+ self.perf_report, self.loading_address,
self.page_size)
retval = self.ce.RunCommand(cmd)
if retval:
raise RuntimeError('Failed to run script to generate heatmap')
-
def main(argv):
- """
- Parse the options.
+ """Parse the options.
Args:
argv: The options with which this script was invoked.
@@ -94,13 +96,22 @@ def main(argv):
"""
parser = argparse.ArgumentParser()
- parser.add_argument('--chromeos_root', dest='chromeos_root', required=True,
+ parser.add_argument('--chromeos_root',
+ dest='chromeos_root',
+ required=True,
help='ChromeOS root to use for generate heatmaps.')
- parser.add_argument('--perf_data', dest='perf_data', required=True,
+ parser.add_argument('--perf_data',
+ dest='perf_data',
+ required=True,
help='The raw perf data.')
- parser.add_argument('--binary', dest='binary', required=False,
- help='The name of the binary.', default='chrome')
- parser.add_argument('--page_size', dest='page_size', required=False,
+ parser.add_argument('--binary',
+ dest='binary',
+ required=False,
+ help='The name of the binary.',
+ default='chrome')
+ parser.add_argument('--page_size',
+ dest='page_size',
+ required=False,
help='The page size for heat maps.',
default=4096)
options = parser.parse_args(argv)
@@ -127,4 +138,4 @@ def main(argv):
if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
+ sys.exit(main(sys.argv[1:]))
diff --git a/image_chromeos.py b/image_chromeos.py
index 4b764b06..562731a6 100755
--- a/image_chromeos.py
+++ b/image_chromeos.py
@@ -1,13 +1,12 @@
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Script to image a ChromeOS device.
This script images a remote ChromeOS device with a specific image."
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import filecmp
import glob
@@ -25,11 +24,12 @@ from utils import logger
from utils import misc
from utils.file_utils import FileUtils
-checksum_file = "/usr/local/osimage_checksum_file"
-lock_file = "/tmp/image_chromeos_lock/image_chromeos_lock"
+checksum_file = '/usr/local/osimage_checksum_file'
+lock_file = '/tmp/image_chromeos_lock/image_chromeos_lock'
+
def Usage(parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
@@ -43,42 +43,48 @@ def CheckForCrosFlash(chromeos_root, remote, log_level):
chromeos_root=chromeos_root,
machine=remote)
logger.GetLogger().LogFatalIf(
- retval == 255, "Failed ssh to %s (for checking cherrypy)" % remote)
+ retval == 255, 'Failed ssh to %s (for checking cherrypy)' % remote)
logger.GetLogger().LogFatalIf(
retval != 0, "Failed to find cherrypy or ctypes on remote '{}', "
- "cros flash cannot work.".format(remote))
+ 'cros flash cannot work.'.format(remote))
def DoImage(argv):
"""Image ChromeOS."""
parser = optparse.OptionParser()
- parser.add_option("-c", "--chromeos_root", dest="chromeos_root",
- help="Target directory for ChromeOS installation.")
- parser.add_option("-r", "--remote", dest="remote",
- help="Target device.")
- parser.add_option("-i", "--image", dest="image",
- help="Image binary file.")
- parser.add_option("-b", "--board", dest="board",
- help="Target board override.")
- parser.add_option("-f", "--force", dest="force",
- action="store_true",
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='Target directory for ChromeOS installation.')
+ parser.add_option('-r', '--remote', dest='remote', help='Target device.')
+ parser.add_option('-i', '--image', dest='image', help='Image binary file.')
+ parser.add_option('-b',
+ '--board',
+ dest='board',
+ help='Target board override.')
+ parser.add_option('-f',
+ '--force',
+ dest='force',
+ action='store_true',
default=False,
- help="Force an image even if it is non-test.")
- parser.add_option("-n", "--no_lock", dest="no_lock",
- default=False, action="store_true",
- help="Do not attempt to lock remote before imaging. "
- "This option should only be used in cases where the "
- "exclusive lock has already been acquired (e.g. in "
- "a script that calls this one).")
- parser.add_option("-l", "--logging_level", dest="log_level",
- default="verbose",
- help="Amount of logging to be used. Valid levels are "
+ help='Force an image even if it is non-test.')
+ parser.add_option('-n',
+ '--no_lock',
+ dest='no_lock',
+ default=False,
+ action='store_true',
+ help='Do not attempt to lock remote before imaging. '
+ 'This option should only be used in cases where the '
+ 'exclusive lock has already been acquired (e.g. in '
+ 'a script that calls this one).')
+ parser.add_option('-l',
+ '--logging_level',
+ dest='log_level',
+ default='verbose',
+ help='Amount of logging to be used. Valid levels are '
"'quiet', 'average', and 'verbose'.")
- parser.add_option("-a",
- "--image_args",
- dest="image_args")
-
+ parser.add_option('-a', '--image_args', dest='image_args')
options = parser.parse_args(argv[1:])[0]
@@ -92,10 +98,10 @@ def DoImage(argv):
l = logger.GetLogger()
if options.chromeos_root is None:
- Usage(parser, "--chromeos_root must be set")
+ Usage(parser, '--chromeos_root must be set')
if options.remote is None:
- Usage(parser, "--remote must be set")
+ Usage(parser, '--remote must be set')
options.chromeos_root = os.path.expanduser(options.chromeos_root)
@@ -106,49 +112,47 @@ def DoImage(argv):
if options.image is None:
images_dir = misc.GetImageDir(options.chromeos_root, board)
- image = os.path.join(images_dir,
- "latest",
- "chromiumos_test_image.bin")
+ image = os.path.join(images_dir, 'latest', 'chromiumos_test_image.bin')
if not os.path.exists(image):
- image = os.path.join(images_dir,
- "latest",
- "chromiumos_image.bin")
+ image = os.path.join(images_dir, 'latest', 'chromiumos_image.bin')
else:
image = options.image
- if image.find("xbuddy://") < 0:
+ if image.find('xbuddy://') < 0:
image = os.path.expanduser(image)
- if image.find("xbuddy://") < 0:
+ if image.find('xbuddy://') < 0:
image = os.path.realpath(image)
- if not os.path.exists(image) and image.find("xbuddy://") < 0:
- Usage(parser, "Image file: " + image + " does not exist!")
+ if not os.path.exists(image) and image.find('xbuddy://') < 0:
+ Usage(parser, 'Image file: ' + image + ' does not exist!')
try:
should_unlock = False
if not options.no_lock:
try:
- status = locks.AcquireLock(list(options.remote.split()),
- options.chromeos_root)
+ status = locks.AcquireLock(
+ list(options.remote.split()), options.chromeos_root)
should_unlock = True
except Exception as e:
- raise Exception("Error acquiring machine: %s" % str(e))
+ raise Exception('Error acquiring machine: %s' % str(e))
reimage = False
local_image = False
- if image.find("xbuddy://") < 0:
+ if image.find('xbuddy://') < 0:
local_image = True
image_checksum = FileUtils().Md5File(image, log_level=log_level)
- command = "cat " + checksum_file
+ command = 'cat ' + checksum_file
retval, device_checksum, _ = cmd_executer.CrosRunCommandWOutput(
- command, chromeos_root=options.chromeos_root, machine=options.remote)
+ command,
+ chromeos_root=options.chromeos_root,
+ machine=options.remote)
device_checksum = device_checksum.strip()
image_checksum = str(image_checksum)
- l.LogOutput("Image checksum: " + image_checksum)
- l.LogOutput("Device checksum: " + device_checksum)
+ l.LogOutput('Image checksum: ' + image_checksum)
+ l.LogOutput('Device checksum: ' + device_checksum)
if image_checksum != device_checksum:
[found, located_image] = LocateOrCopyImage(options.chromeos_root,
@@ -156,91 +160,86 @@ def DoImage(argv):
board=board)
reimage = True
- l.LogOutput("Checksums do not match. Re-imaging...")
+ l.LogOutput('Checksums do not match. Re-imaging...')
is_test_image = IsImageModdedForTest(options.chromeos_root,
located_image, log_level)
if not is_test_image and not options.force:
- logger.GetLogger().LogFatal("Have to pass --force to image a non-test"
- " image!")
+ logger.GetLogger().LogFatal('Have to pass --force to image a non-test'
+ ' image!')
else:
reimage = True
found = True
- l.LogOutput("Using non-local image; Re-imaging...")
-
+ l.LogOutput('Using non-local image; Re-imaging...')
if reimage:
# If the device has /tmp mounted as noexec, image_to_live.sh can fail.
- command = "mount -o remount,rw,exec /tmp"
+ command = 'mount -o remount,rw,exec /tmp'
cmd_executer.CrosRunCommand(command,
chromeos_root=options.chromeos_root,
machine=options.remote)
- real_src_dir = os.path.join(os.path.realpath(options.chromeos_root),
- "src")
- real_chroot_dir = os.path.join(os.path.realpath(options.chromeos_root),
- "chroot")
+ real_src_dir = os.path.join(
+ os.path.realpath(options.chromeos_root), 'src')
+ real_chroot_dir = os.path.join(
+ os.path.realpath(options.chromeos_root), 'chroot')
if local_image:
if located_image.find(real_src_dir) != 0:
if located_image.find(real_chroot_dir) != 0:
- raise Exception("Located image: %s not in chromeos_root: %s" %
+ raise Exception('Located image: %s not in chromeos_root: %s' %
(located_image, options.chromeos_root))
else:
chroot_image = located_image[len(real_chroot_dir):]
else:
chroot_image = os.path.join(
- "~/trunk/src",
- located_image[len(real_src_dir):].lstrip("/"))
+ '~/trunk/src', located_image[len(real_src_dir):].lstrip('/'))
# Check to see if cros flash will work for the remote machine.
CheckForCrosFlash(options.chromeos_root, options.remote, log_level)
if local_image:
- cros_flash_args = ["--board=%s" % board,
- "--clobber-stateful",
- options.remote,
- chroot_image]
+ cros_flash_args = ['--board=%s' % board, '--clobber-stateful',
+ options.remote, chroot_image]
else:
- cros_flash_args = ["--board=%s" % board,
- "--clobber-stateful",
- options.remote,
- image]
+ cros_flash_args = ['--board=%s' % board, '--clobber-stateful',
+ options.remote, image]
- command = ("cros flash %s" % " ".join(cros_flash_args))
+ command = ('cros flash %s' % ' '.join(cros_flash_args))
# Workaround for crosbug.com/35684.
os.chmod(misc.GetChromeOSKeyFile(options.chromeos_root), 0600)
- if log_level == "quiet":
- l.LogOutput("CMD : %s" % command)
- elif log_level == "average":
- cmd_executer.SetLogLevel("verbose");
+ if log_level == 'quiet':
+ l.LogOutput('CMD : %s' % command)
+ elif log_level == 'average':
+ cmd_executer.SetLogLevel('verbose')
retval = cmd_executer.ChrootRunCommand(options.chromeos_root,
- command, command_timeout=1800)
+ command,
+ command_timeout=1800)
retries = 0
while retval != 0 and retries < 2:
retries += 1
- if log_level == "quiet":
- l.LogOutput("Imaging failed. Retry # %d." % retries)
- l.LogOutput("CMD : %s" % command)
+ if log_level == 'quiet':
+ l.LogOutput('Imaging failed. Retry # %d.' % retries)
+ l.LogOutput('CMD : %s' % command)
retval = cmd_executer.ChrootRunCommand(options.chromeos_root,
- command, command_timeout=1800)
+ command,
+ command_timeout=1800)
- if log_level == "average":
+ if log_level == 'average':
cmd_executer.SetLogLevel(log_level)
if found == False:
temp_dir = os.path.dirname(located_image)
- l.LogOutput("Deleting temp image dir: %s" % temp_dir)
+ l.LogOutput('Deleting temp image dir: %s' % temp_dir)
shutil.rmtree(temp_dir)
- logger.GetLogger().LogFatalIf(retval, "Image command failed")
+ logger.GetLogger().LogFatalIf(retval, 'Image command failed')
# Unfortunately cros_image_to_target.py sometimes returns early when the
# machine isn't fully up yet.
- retval = EnsureMachineUp(options.chromeos_root, options.remote,
- log_level)
+ retval = EnsureMachineUp(options.chromeos_root, options.remote, log_level)
# If this is a non-local image, then the retval returned from
# EnsureMachineUp is the one that will be returned by this function;
@@ -251,66 +250,61 @@ def DoImage(argv):
retval = 1
if local_image:
- if log_level == "average":
- l.LogOutput("Verifying image.")
- command = "echo %s > %s && chmod -w %s" % (image_checksum,
- checksum_file,
- checksum_file)
- retval = cmd_executer.CrosRunCommand(command,
- chromeos_root=options.chromeos_root,
- machine=options.remote)
- logger.GetLogger().LogFatalIf(retval, "Writing checksum failed.")
-
- successfully_imaged = VerifyChromeChecksum(options.chromeos_root,
- image,
+ if log_level == 'average':
+ l.LogOutput('Verifying image.')
+ command = 'echo %s > %s && chmod -w %s' % (image_checksum,
+ checksum_file, checksum_file)
+ retval = cmd_executer.CrosRunCommand(
+ command,
+ chromeos_root=options.chromeos_root,
+ machine=options.remote)
+ logger.GetLogger().LogFatalIf(retval, 'Writing checksum failed.')
+
+ successfully_imaged = VerifyChromeChecksum(options.chromeos_root, image,
options.remote, log_level)
logger.GetLogger().LogFatalIf(not successfully_imaged,
- "Image verification failed!")
+ 'Image verification failed!')
TryRemountPartitionAsRW(options.chromeos_root, options.remote,
log_level)
else:
- l.LogOutput("Checksums match. Skipping reimage")
+ l.LogOutput('Checksums match. Skipping reimage')
return retval
finally:
if should_unlock:
- locks.ReleaseLock(list(options.remote.split()), options.chromeos_root)
+ locks.ReleaseLock(list(options.remote.split()), options.chromeos_root)
def LocateOrCopyImage(chromeos_root, image, board=None):
l = logger.GetLogger()
if board is None:
- board_glob = "*"
+ board_glob = '*'
else:
board_glob = board
chromeos_root_realpath = os.path.realpath(chromeos_root)
image = os.path.realpath(image)
- if image.startswith("%s/" % chromeos_root_realpath):
+ if image.startswith('%s/' % chromeos_root_realpath):
return [True, image]
# First search within the existing build dirs for any matching files.
- images_glob = ("%s/src/build/images/%s/*/*.bin" %
- (chromeos_root_realpath,
- board_glob))
+ images_glob = ('%s/src/build/images/%s/*/*.bin' % (chromeos_root_realpath,
+ board_glob))
images_list = glob.glob(images_glob)
for potential_image in images_list:
if filecmp.cmp(potential_image, image):
- l.LogOutput("Found matching image %s in chromeos_root." % potential_image)
+ l.LogOutput('Found matching image %s in chromeos_root.' % potential_image)
return [True, potential_image]
# We did not find an image. Copy it in the src dir and return the copied
# file.
if board is None:
- board = ""
- base_dir = ("%s/src/build/images/%s" %
- (chromeos_root_realpath,
- board))
+ board = ''
+ base_dir = ('%s/src/build/images/%s' % (chromeos_root_realpath, board))
if not os.path.isdir(base_dir):
os.makedirs(base_dir)
- temp_dir = tempfile.mkdtemp(prefix="%s/tmp" % base_dir)
- new_image = "%s/%s" % (temp_dir, os.path.basename(image))
- l.LogOutput("No matching image found. Copying %s to %s" %
- (image, new_image))
+ temp_dir = tempfile.mkdtemp(prefix='%s/tmp' % base_dir)
+ new_image = '%s/%s' % (temp_dir, os.path.basename(image))
+ l.LogOutput('No matching image found. Copying %s to %s' % (image, new_image))
shutil.copyfile(image, new_image)
return [False, new_image]
@@ -318,37 +312,45 @@ def LocateOrCopyImage(chromeos_root, image, board=None):
def GetImageMountCommand(chromeos_root, image, rootfs_mp, stateful_mp):
image_dir = os.path.dirname(image)
image_file = os.path.basename(image)
- mount_command = ("cd %s/src/scripts &&"
- "./mount_gpt_image.sh --from=%s --image=%s"
- " --safe --read_only"
- " --rootfs_mountpt=%s"
- " --stateful_mountpt=%s" %
- (chromeos_root, image_dir, image_file, rootfs_mp,
- stateful_mp))
+ mount_command = ('cd %s/src/scripts &&'
+ './mount_gpt_image.sh --from=%s --image=%s'
+ ' --safe --read_only'
+ ' --rootfs_mountpt=%s'
+ ' --stateful_mountpt=%s' % (chromeos_root, image_dir,
+ image_file, rootfs_mp,
+ stateful_mp))
return mount_command
-def MountImage(chromeos_root, image, rootfs_mp, stateful_mp, log_level,
+def MountImage(chromeos_root,
+ image,
+ rootfs_mp,
+ stateful_mp,
+ log_level,
unmount=False):
cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
command = GetImageMountCommand(chromeos_root, image, rootfs_mp, stateful_mp)
if unmount:
- command = "%s --unmount" % command
+ command = '%s --unmount' % command
retval = cmd_executer.RunCommand(command)
- logger.GetLogger().LogFatalIf(retval, "Mount/unmount command failed!")
+ logger.GetLogger().LogFatalIf(retval, 'Mount/unmount command failed!')
return retval
def IsImageModdedForTest(chromeos_root, image, log_level):
- if log_level != "verbose":
- log_level = "quiet"
+ if log_level != 'verbose':
+ log_level = 'quiet'
rootfs_mp = tempfile.mkdtemp()
stateful_mp = tempfile.mkdtemp()
MountImage(chromeos_root, image, rootfs_mp, stateful_mp, log_level)
- lsb_release_file = os.path.join(rootfs_mp, "etc/lsb-release")
+ lsb_release_file = os.path.join(rootfs_mp, 'etc/lsb-release')
lsb_release_contents = open(lsb_release_file).read()
- is_test_image = re.search("test", lsb_release_contents, re.IGNORECASE)
- MountImage(chromeos_root, image, rootfs_mp, stateful_mp, log_level,
+ is_test_image = re.search('test', lsb_release_contents, re.IGNORECASE)
+ MountImage(chromeos_root,
+ image,
+ rootfs_mp,
+ stateful_mp,
+ log_level,
unmount=True)
return is_test_image
@@ -358,50 +360,56 @@ def VerifyChromeChecksum(chromeos_root, image, remote, log_level):
rootfs_mp = tempfile.mkdtemp()
stateful_mp = tempfile.mkdtemp()
MountImage(chromeos_root, image, rootfs_mp, stateful_mp, log_level)
- image_chrome_checksum = FileUtils().Md5File("%s/opt/google/chrome/chrome" %
+ image_chrome_checksum = FileUtils().Md5File('%s/opt/google/chrome/chrome' %
rootfs_mp,
log_level=log_level)
- MountImage(chromeos_root, image, rootfs_mp, stateful_mp, log_level,
+ MountImage(chromeos_root,
+ image,
+ rootfs_mp,
+ stateful_mp,
+ log_level,
unmount=True)
- command = "md5sum /opt/google/chrome/chrome"
- [_, o, _] = cmd_executer.CrosRunCommandWOutput(
- command, chromeos_root=chromeos_root, machine=remote)
+ command = 'md5sum /opt/google/chrome/chrome'
+ [_, o, _] = cmd_executer.CrosRunCommandWOutput(command,
+ chromeos_root=chromeos_root,
+ machine=remote)
device_chrome_checksum = o.split()[0]
if image_chrome_checksum.strip() == device_chrome_checksum.strip():
return True
else:
return False
+
# Remount partition as writable.
# TODO: auto-detect if an image is built using --noenable_rootfs_verification.
def TryRemountPartitionAsRW(chromeos_root, remote, log_level):
l = logger.GetLogger()
cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
- command = "sudo mount -o remount,rw /"
+ command = 'sudo mount -o remount,rw /'
retval = cmd_executer.CrosRunCommand(\
command, chromeos_root=chromeos_root, machine=remote, terminated_timeout=10)
if retval:
## Safely ignore.
- l.LogWarning("Failed to remount partition as rw, "
- "probably the image was not built with "
+ l.LogWarning('Failed to remount partition as rw, '
+ 'probably the image was not built with '
"\"--noenable_rootfs_verification\", "
- "you can safely ignore this.")
+ 'you can safely ignore this.')
else:
- l.LogOutput("Re-mounted partition as writable.")
+ l.LogOutput('Re-mounted partition as writable.')
def EnsureMachineUp(chromeos_root, remote, log_level):
l = logger.GetLogger()
cmd_executer = command_executer.GetCommandExecuter(log_level=log_level)
timeout = 600
- magic = "abcdefghijklmnopqrstuvwxyz"
- command = "echo %s" % magic
+ magic = 'abcdefghijklmnopqrstuvwxyz'
+ command = 'echo %s' % magic
start_time = time.time()
while True:
current_time = time.time()
if current_time - start_time > timeout:
- l.LogError("Timeout of %ss reached. Machine still not up. Aborting." %
+ l.LogError('Timeout of %ss reached. Machine still not up. Aborting.' %
timeout)
return False
retval = cmd_executer.CrosRunCommand(command,
@@ -411,6 +419,6 @@ def EnsureMachineUp(chromeos_root, remote, log_level):
return True
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = DoImage(sys.argv)
sys.exit(retval)
diff --git a/lock_machine_test.py b/lock_machine_test.py
index d61878b8..7634e2a5 100644
--- a/lock_machine_test.py
+++ b/lock_machine_test.py
@@ -1,13 +1,10 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""lock_machine.py related unit-tests.
MachineManagerTest tests MachineManager.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
from multiprocessing import Process
import time
@@ -22,30 +19,31 @@ def LockAndSleep(machine):
class MachineTest(unittest.TestCase):
+
def setUp(self):
pass
def testRepeatedUnlock(self):
- mach = lock_machine.Machine("qqqraymes.mtv")
+ mach = lock_machine.Machine('qqqraymes.mtv')
for i in range(10):
self.assertFalse(mach.Unlock())
- mach = lock_machine.Machine("qqqraymes.mtv", auto=True)
+ mach = lock_machine.Machine('qqqraymes.mtv', auto=True)
for i in range(10):
self.assertFalse(mach.Unlock())
def testLockUnlock(self):
- mach = lock_machine.Machine("otter.mtv", "/tmp")
+ mach = lock_machine.Machine('otter.mtv', '/tmp')
for i in range(10):
self.assertTrue(mach.Lock(exclusive=True))
self.assertTrue(mach.Unlock(exclusive=True))
- mach = lock_machine.Machine("otter.mtv", "/tmp", True)
+ mach = lock_machine.Machine('otter.mtv', '/tmp', True)
for i in range(10):
self.assertTrue(mach.Lock(exclusive=True))
self.assertTrue(mach.Unlock(exclusive=True))
def testSharedLock(self):
- mach = lock_machine.Machine("chrotomation.mtv")
+ mach = lock_machine.Machine('chrotomation.mtv')
for i in range(10):
self.assertTrue(mach.Lock(exclusive=False))
for i in range(10):
@@ -53,7 +51,7 @@ class MachineTest(unittest.TestCase):
self.assertTrue(mach.Lock(exclusive=True))
self.assertTrue(mach.Unlock(exclusive=True))
- mach = lock_machine.Machine("chrotomation.mtv", auto=True)
+ mach = lock_machine.Machine('chrotomation.mtv', auto=True)
for i in range(10):
self.assertTrue(mach.Lock(exclusive=False))
for i in range(10):
@@ -62,14 +60,14 @@ class MachineTest(unittest.TestCase):
self.assertTrue(mach.Unlock(exclusive=True))
def testExclusiveLock(self):
- mach = lock_machine.Machine("atree.mtv")
+ mach = lock_machine.Machine('atree.mtv')
self.assertTrue(mach.Lock(exclusive=True))
for i in range(10):
self.assertFalse(mach.Lock(exclusive=True))
self.assertFalse(mach.Lock(exclusive=False))
self.assertTrue(mach.Unlock(exclusive=True))
- mach = lock_machine.Machine("atree.mtv", auto=True)
+ mach = lock_machine.Machine('atree.mtv', auto=True)
self.assertTrue(mach.Lock(exclusive=True))
for i in range(10):
self.assertFalse(mach.Lock(exclusive=True))
@@ -77,29 +75,29 @@ class MachineTest(unittest.TestCase):
self.assertTrue(mach.Unlock(exclusive=True))
def testExclusiveState(self):
- mach = lock_machine.Machine("testExclusiveState")
+ mach = lock_machine.Machine('testExclusiveState')
self.assertTrue(mach.Lock(exclusive=True))
for i in range(10):
self.assertFalse(mach.Lock(exclusive=False))
self.assertTrue(mach.Unlock(exclusive=True))
- mach = lock_machine.Machine("testExclusiveState", auto=True)
+ mach = lock_machine.Machine('testExclusiveState', auto=True)
self.assertTrue(mach.Lock(exclusive=True))
for i in range(10):
self.assertFalse(mach.Lock(exclusive=False))
self.assertTrue(mach.Unlock(exclusive=True))
def testAutoLockGone(self):
- mach = lock_machine.Machine("lockgone", auto=True)
- p = Process(target=LockAndSleep, args=("lockgone",))
+ mach = lock_machine.Machine('lockgone', auto=True)
+ p = Process(target=LockAndSleep, args=('lockgone',))
p.start()
time.sleep(1.1)
p.join()
self.assertTrue(mach.Lock(exclusive=True))
def testAutoLockFromOther(self):
- mach = lock_machine.Machine("other_lock", auto=True)
- p = Process(target=LockAndSleep, args=("other_lock",))
+ mach = lock_machine.Machine('other_lock', auto=True)
+ p = Process(target=LockAndSleep, args=('other_lock',))
p.start()
time.sleep(0.5)
self.assertFalse(mach.Lock(exclusive=True))
@@ -108,13 +106,13 @@ class MachineTest(unittest.TestCase):
self.assertTrue(mach.Lock(exclusive=True))
def testUnlockByOthers(self):
- mach = lock_machine.Machine("other_unlock", auto=True)
- p = Process(target=LockAndSleep, args=("other_unlock",))
+ mach = lock_machine.Machine('other_unlock', auto=True)
+ p = Process(target=LockAndSleep, args=('other_unlock',))
p.start()
time.sleep(0.5)
self.assertTrue(mach.Unlock(exclusive=True))
self.assertTrue(mach.Lock(exclusive=True))
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/mem_tests/clean_data.py b/mem_tests/clean_data.py
index dc8a7b71..f9a11e75 100755
--- a/mem_tests/clean_data.py
+++ b/mem_tests/clean_data.py
@@ -1,5 +1,4 @@
#! /usr/bin/python
-
"""Cleans output from other scripts to eliminate duplicates.
When frequently sampling data, we see that records occasionally will contain
@@ -15,16 +14,16 @@ standard time.
import argparse
parser = argparse.ArgumentParser()
-parser.add_argument("filename")
+parser.add_argument('filename')
args = parser.parse_args()
my_file = open(args.filename)
-output_file = open("clean2.csv", "a")
+output_file = open('clean2.csv', 'a')
dictionary = dict()
for line in my_file:
- new_time = int(line.split(",")[0])
- dictionary[new_time] = line
+ new_time = int(line.split(',')[0])
+ dictionary[new_time] = line
for key in dictionary.keys():
- output_file.write(dictionary[key])
+ output_file.write(dictionary[key])
diff --git a/mem_tests/mem_groups.py b/mem_tests/mem_groups.py
index 75591182..6de76914 100755
--- a/mem_tests/mem_groups.py
+++ b/mem_tests/mem_groups.py
@@ -1,5 +1,4 @@
#! /usr/bin/python
-
"""Groups memory by allocation sizes.
Takes a log entry and sorts sorts everything into groups based on what size
@@ -20,37 +19,37 @@ from datetime import datetime
pretty_print = True
parser = argparse.ArgumentParser()
-parser.add_argument("filename")
+parser.add_argument('filename')
args = parser.parse_args()
my_file = open(args.filename)
-output_file = open("groups.csv", "a")
+output_file = open('groups.csv', 'a')
# The cutoffs for each group in the output (in bytes)
groups = [1024, 8192, 65536, 524288, 4194304]
base_time = datetime(2014, 6, 11, 0, 0)
-prev_line = ""
+prev_line = ''
half_entry = (None, None)
for line in my_file:
- if "heap profile:" in line:
- if half_entry[0] is not None:
- group_totals = half_entry[1]
- total = sum(group_totals) * 1.0
- to_join = [half_entry[0]] + [value / total for value in group_totals]
- to_output = ",".join([str(elem) for elem in to_join])
- output_file.write(to_output)
- total_diff = compute_total_diff(line, base_time)
- half_entry = (total_diff, [0]*(len(groups) + 1))
- if "] @ " in line and "heap profile:" not in line:
- mem_samples = line.strip().split("[")[0]
- num_samples, total_mem = map(int, mem_samples.strip().split(":"))
- mem_per_sample = total_mem // num_samples
- group_totals = half_entry[1]
- for cutoff_index in range(len(groups)):
- if mem_per_sample <= groups[cutoff_index]:
- group_totals[cutoff_index] += total_mem
- break
- if mem_per_sample > groups[-1]:
- group_totals[-1] += total_mem
+ if 'heap profile:' in line:
+ if half_entry[0] is not None:
+ group_totals = half_entry[1]
+ total = sum(group_totals) * 1.0
+ to_join = [half_entry[0]] + [value / total for value in group_totals]
+ to_output = ','.join([str(elem) for elem in to_join])
+ output_file.write(to_output)
+ total_diff = compute_total_diff(line, base_time)
+ half_entry = (total_diff, [0] * (len(groups) + 1))
+ if '] @ ' in line and 'heap profile:' not in line:
+ mem_samples = line.strip().split('[')[0]
+ num_samples, total_mem = map(int, mem_samples.strip().split(':'))
+ mem_per_sample = total_mem // num_samples
+ group_totals = half_entry[1]
+ for cutoff_index in range(len(groups)):
+ if mem_per_sample <= groups[cutoff_index]:
+ group_totals[cutoff_index] += total_mem
+ break
+ if mem_per_sample > groups[-1]:
+ group_totals[-1] += total_mem
diff --git a/mem_tests/total_mem_actual.py b/mem_tests/total_mem_actual.py
index c9c51b16..2e836e88 100755
--- a/mem_tests/total_mem_actual.py
+++ b/mem_tests/total_mem_actual.py
@@ -1,5 +1,4 @@
#! /usr/bin/python
-
"""Parses the actual memory usage from TCMalloc.
This goes through logs that have the actual allocated memory (not sampled) in
@@ -17,22 +16,22 @@ from datetime import datetime
pretty_print = True
parser = argparse.ArgumentParser()
-parser.add_argument("filename")
+parser.add_argument('filename')
args = parser.parse_args()
my_file = open(args.filename)
-output_file = open("raw_memory_data.csv", "a")
+output_file = open('raw_memory_data.csv', 'a')
base_time = datetime(2014, 6, 11, 0, 0)
-prev_line = ""
+prev_line = ''
half_entry = (None, None)
for line in my_file:
- if "Output Heap Stats:" in line:
- total_diff = compute_total_diff(line, base_time)
- half_entry = (total_diff, None)
- if "Bytes in use by application" in line:
- total_diff = half_entry[0]
- memory_used = int(line.strip().split()[1])
- half_entry = (None, None)
- output_file.write("{0},{1}\n".format(total_diff, memory_used))
+ if 'Output Heap Stats:' in line:
+ total_diff = compute_total_diff(line, base_time)
+ half_entry = (total_diff, None)
+ if 'Bytes in use by application' in line:
+ total_diff = half_entry[0]
+ memory_used = int(line.strip().split()[1])
+ half_entry = (None, None)
+ output_file.write('{0},{1}\n'.format(total_diff, memory_used))
diff --git a/mem_tests/total_mem_sampled.py b/mem_tests/total_mem_sampled.py
index f8ed8013..c7336473 100755
--- a/mem_tests/total_mem_sampled.py
+++ b/mem_tests/total_mem_sampled.py
@@ -1,5 +1,4 @@
#! /usr/bin/python
-
"""Parses the total amount of sampled memory from log files.
This file outputs the total amount of memory that has been sampled by tcmalloc.
@@ -14,19 +13,19 @@ from utils import compute_total_diff
from datetime import datetime
parser = argparse.ArgumentParser()
-parser.add_argument("filename")
+parser.add_argument('filename')
args = parser.parse_args()
my_file = open(args.filename)
-output_file = open("memory_data.csv", "a")
+output_file = open('memory_data.csv', 'a')
base_time = datetime(2014, 6, 11, 0, 0)
-prev_line = ""
+prev_line = ''
half_entry = (None, None)
for line in my_file:
- if "heap profile: " not in line:
- continue
- memory_used = line.strip().split(":")[-1].strip().split("]")[0].strip()
- total_diff = compute_total_diff(line, base_time)
- output_file.write("{0},{1}\n".format(int(total_diff), memory_used))
+ if 'heap profile: ' not in line:
+ continue
+ memory_used = line.strip().split(':')[-1].strip().split(']')[0].strip()
+ total_diff = compute_total_diff(line, base_time)
+ output_file.write('{0},{1}\n'.format(int(total_diff), memory_used))
diff --git a/mem_tests/utils.py b/mem_tests/utils.py
index 54dbcc2d..38bd89ca 100644
--- a/mem_tests/utils.py
+++ b/mem_tests/utils.py
@@ -1,12 +1,11 @@
-#! /usr/bin/python
-
"""Utility functions for the memory tests.
"""
from datetime import datetime
+
def compute_total_diff(line, base_time):
- """
+ """
Computes the difference in time the line was recorded from the base time.
An example of a line is:
@@ -17,7 +16,7 @@ def compute_total_diff(line, base_time):
line- the line that contains the time the record was taken
base_time- the base time to measure our timestamp from
"""
- date = line.strip().split(":")[2].split("/")
- timestamp = datetime(2014, int(date[0][0:2]), int(date[0][2:4]),
- int(date[1][0:2]), int(date[1][2:4]), int(date[1][4:6]))
- return (timestamp - base_time).total_seconds()
+ date = line.strip().split(':')[2].split('/')
+ timestamp = datetime(2014, int(date[0][0:2]), int(date[0][2:4]),
+ int(date[1][0:2]), int(date[1][2:4]), int(date[1][4:6]))
+ return (timestamp - base_time).total_seconds()
diff --git a/produce_output.py b/produce_output.py
index 009e5152..99e48e96 100755
--- a/produce_output.py
+++ b/produce_output.py
@@ -1,13 +1,11 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
+"""This simulates a real job by producing a lot of output.
"""
-This simulates a real job by producing a lot of output.
-"""
-
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import optparse
import os
@@ -25,11 +23,11 @@ def Main(argv):
for j in range(10):
for i in range(10000):
- print str(j) + "The quick brown fox jumped over the lazy dog." + str(i)
+ print str(j) + 'The quick brown fox jumped over the lazy dog.' + str(i)
time.sleep(60)
return 0
-if __name__ == "__main__":
+if __name__ == '__main__':
Main(sys.argv)
diff --git a/remote_gcc_build.py b/remote_gcc_build.py
index 1862e40f..807ace37 100755
--- a/remote_gcc_build.py
+++ b/remote_gcc_build.py
@@ -3,7 +3,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Script to use remote try-bot build image with local gcc."""
import argparse
@@ -21,42 +20,42 @@ from utils import logger
from utils import manifest_versions
from utils import misc
-BRANCH = "the_actual_branch_used_in_this_script"
-TMP_BRANCH = "tmp_branch"
+BRANCH = 'the_actual_branch_used_in_this_script'
+TMP_BRANCH = 'tmp_branch'
SLEEP_TIME = 600
def GetPatchNum(output):
lines = output.splitlines()
- line = [l for l in lines if "googlesource" in l][0]
- patch_num = re.findall(r"\d+", line)[0]
- if "chrome-internal" in line:
- patch_num = "*" + patch_num
+ line = [l for l in lines if 'googlesource' in l][0]
+ patch_num = re.findall(r'\d+', line)[0]
+ if 'chrome-internal' in line:
+ patch_num = '*' + patch_num
return str(patch_num)
def GetPatchString(patch):
if patch:
- return "+".join(patch)
- return "NO_PATCH"
+ return '+'.join(patch)
+ return 'NO_PATCH'
def FindVersionForToolchain(branch, chromeos_root):
"""Find the version number in artifacts link in the tryserver email."""
# For example: input: toolchain-3701.42.B
# output: R26-3701.42.1
- digits = branch.split("-")[1].split("B")[0]
- manifest_dir = os.path.join(chromeos_root, "manifest-internal")
+ digits = branch.split('-')[1].split('B')[0]
+ manifest_dir = os.path.join(chromeos_root, 'manifest-internal')
os.chdir(manifest_dir)
- major_version = digits.split(".")[0]
+ major_version = digits.split('.')[0]
ce = command_executer.GetCommandExecuter()
- command = "repo sync . && git branch -a | grep {0}".format(major_version)
+ command = 'repo sync . && git branch -a | grep {0}'.format(major_version)
_, branches, _ = ce.RunCommandWOutput(command, print_to_console=False)
- m = re.search(r"(R\d+)", branches)
+ m = re.search(r'(R\d+)', branches)
if not m:
- logger.GetLogger().LogFatal("Cannot find version for branch {0}"
+ logger.GetLogger().LogFatal('Cannot find version for branch {0}'
.format(branch))
- version = m.group(0)+"-"+digits+"1"
+ version = m.group(0) + '-' + digits + '1'
return version
@@ -67,9 +66,9 @@ def FindBuildId(description):
(result, number) = FindBuildIdFromLog(description)
if result >= 0:
return (result, number)
- logger.GetLogger().LogOutput("{0} minutes passed."
+ logger.GetLogger().LogOutput('{0} minutes passed.'
.format(running_time / 60))
- logger.GetLogger().LogOutput("Sleeping {0} seconds.".format(SLEEP_TIME))
+ logger.GetLogger().LogOutput('Sleeping {0} seconds.'.format(SLEEP_TIME))
time.sleep(SLEEP_TIME)
running_time += SLEEP_TIME
@@ -84,9 +83,8 @@ def FindBuildIdFromLog(description):
# description yet
file_dir = os.path.dirname(os.path.realpath(__file__))
- commands = ("{0}/utils/buildbot_json.py builds "
- "http://chromegw/p/tryserver.chromiumos/"
- .format(file_dir))
+ commands = ('{0}/utils/buildbot_json.py builds '
+ 'http://chromegw/p/tryserver.chromiumos/'.format(file_dir))
ce = command_executer.GetCommandExecuter()
_, buildinfo, _ = ce.RunCommandWOutput(commands, print_to_console=False)
@@ -101,20 +99,20 @@ def FindBuildIdFromLog(description):
while current_line < len(my_info):
my_dict = {}
while True:
- key = my_info[current_line].split(":")[0].strip()
- value = my_info[current_line].split(":", 1)[1].strip()
+ key = my_info[current_line].split(':')[0].strip()
+ value = my_info[current_line].split(':', 1)[1].strip()
my_dict[key] = value
current_line += 1
- if "Build" in key or current_line == len(my_info):
+ if 'Build' in key or current_line == len(my_info):
break
- if ("True" not in my_dict["completed"] and
- str(description) in my_dict["reason"]):
+ if ('True' not in my_dict['completed'] and
+ str(description) in my_dict['reason']):
running_job = True
- if ("True" not in my_dict["completed"] or
- str(description) not in my_dict["reason"]):
+ if ('True' not in my_dict['completed'] or
+ str(description) not in my_dict['reason']):
continue
- result = int(my_dict["result"])
- build_id = int(my_dict["number"])
+ result = int(my_dict['result'])
+ build_id = int(my_dict['number'])
if result == 0:
return (result, build_id)
else:
@@ -133,37 +131,35 @@ def DownloadImage(target, index, dest, version):
os.makedirs(dest)
rversion = manifest_versions.RFormatCrosVersion(version)
-# ls_cmd = ("gsutil ls gs://chromeos-image-archive/trybot-{0}/{1}-b{2}"
-# .format(target, rversion, index))
- ls_cmd = ("gsutil ls gs://chromeos-image-archive/trybot-{0}/*-b{2}"
+ # ls_cmd = ("gsutil ls gs://chromeos-image-archive/trybot-{0}/{1}-b{2}"
+ # .format(target, rversion, index))
+ ls_cmd = ('gsutil ls gs://chromeos-image-archive/trybot-{0}/*-b{2}'
.format(target, rversion, index))
- download_cmd = ("$(which gsutil) cp {0} {1}".format("{0}", dest))
+ download_cmd = ('$(which gsutil) cp {0} {1}'.format('{0}', dest))
ce = command_executer.GetCommandExecuter()
_, out, _ = ce.RunCommandWOutput(ls_cmd, print_to_console=True)
lines = out.splitlines()
- download_files = ["autotest.tar", "chromeos-chrome",
- "chromiumos_test_image", "debug.tgz",
- "sysroot_chromeos-base_chromeos-chrome.tar.xz"
- ]
+ download_files = ['autotest.tar', 'chromeos-chrome', 'chromiumos_test_image',
+ 'debug.tgz', 'sysroot_chromeos-base_chromeos-chrome.tar.xz']
for line in lines:
if any([e in line for e in download_files]):
cmd = download_cmd.format(line)
if ce.RunCommand(cmd):
- logger.GetLogger().LogFatal("Command {0} failed, existing..."
+ logger.GetLogger().LogFatal('Command {0} failed, existing...'
.format(cmd))
def UnpackImage(dest):
"""Unpack the image, the chroot build dir."""
- chrome_tbz2 = glob.glob(dest+"/*.tbz2")[0]
- commands = ("tar xJf {0}/sysroot_chromeos-base_chromeos-chrome.tar.xz "
- "-C {0} &&"
- "tar xjf {1} -C {0} &&"
- "tar xzf {0}/debug.tgz -C {0}/usr/lib/ &&"
- "tar xf {0}/autotest.tar -C {0}/usr/local/ &&"
- "tar xJf {0}/chromiumos_test_image.tar.xz -C {0}"
+ chrome_tbz2 = glob.glob(dest + '/*.tbz2')[0]
+ commands = ('tar xJf {0}/sysroot_chromeos-base_chromeos-chrome.tar.xz '
+ '-C {0} &&'
+ 'tar xjf {1} -C {0} &&'
+ 'tar xzf {0}/debug.tgz -C {0}/usr/lib/ &&'
+ 'tar xf {0}/autotest.tar -C {0}/usr/local/ &&'
+ 'tar xJf {0}/chromiumos_test_image.tar.xz -C {0}'
.format(dest, chrome_tbz2))
ce = command_executer.GetCommandExecuter()
return ce.RunCommand(commands)
@@ -172,60 +168,60 @@ def UnpackImage(dest):
def RemoveOldBranch():
"""Remove the branch with name BRANCH."""
ce = command_executer.GetCommandExecuter()
- command = "git rev-parse --abbrev-ref HEAD"
+ command = 'git rev-parse --abbrev-ref HEAD'
_, out, _ = ce.RunCommandWOutput(command)
if BRANCH in out:
- command = "git checkout -B {0}".format(TMP_BRANCH)
+ command = 'git checkout -B {0}'.format(TMP_BRANCH)
ce.RunCommand(command)
command = "git commit -m 'nouse'"
ce.RunCommand(command)
- command = "git branch -D {0}".format(BRANCH)
+ command = 'git branch -D {0}'.format(BRANCH)
ce.RunCommand(command)
-def UploadManifest(manifest, chromeos_root, branch="master"):
+def UploadManifest(manifest, chromeos_root, branch='master'):
"""Copy the manifest to $chromeos_root/manifest-internal and upload."""
chromeos_root = misc.CanonicalizePath(chromeos_root)
- manifest_dir = os.path.join(chromeos_root, "manifest-internal")
+ manifest_dir = os.path.join(chromeos_root, 'manifest-internal')
os.chdir(manifest_dir)
ce = command_executer.GetCommandExecuter()
RemoveOldBranch()
- if branch != "master":
- branch = "{0}".format(branch)
- command = "git checkout -b {0} -t cros-internal/{1}".format(BRANCH, branch)
+ if branch != 'master':
+ branch = '{0}'.format(branch)
+ command = 'git checkout -b {0} -t cros-internal/{1}'.format(BRANCH, branch)
ret = ce.RunCommand(command)
if ret:
- raise Exception("Command {0} failed".format(command))
+ raise Exception('Command {0} failed'.format(command))
# We remove the default.xml, which is the symbolic link of full.xml.
# After that, we copy our xml file to default.xml.
# We did this because the full.xml might be updated during the
# run of the script.
- os.remove(os.path.join(manifest_dir, "default.xml"))
- shutil.copyfile(manifest, os.path.join(manifest_dir, "default.xml"))
+ os.remove(os.path.join(manifest_dir, 'default.xml'))
+ shutil.copyfile(manifest, os.path.join(manifest_dir, 'default.xml'))
return UploadPatch(manifest)
-def GetManifestPatch(manifests, version, chromeos_root, branch="master"):
+def GetManifestPatch(manifests, version, chromeos_root, branch='master'):
"""Return a gerrit patch number given a version of manifest file."""
temp_dir = tempfile.mkdtemp()
- to_file = os.path.join(temp_dir, "default.xml")
+ to_file = os.path.join(temp_dir, 'default.xml')
manifests.GetManifest(version, to_file)
return UploadManifest(to_file, chromeos_root, branch)
def UploadPatch(source):
"""Up load patch to gerrit, return patch number."""
- commands = ("git add -A . &&"
+ commands = ('git add -A . &&'
"git commit -m 'test' -m 'BUG=None' -m 'TEST=None' "
"-m 'hostname={0}' -m 'source={1}'"
.format(socket.gethostname(), source))
ce = command_executer.GetCommandExecuter()
ce.RunCommand(commands)
- commands = ("yes | repo upload . --cbr --no-verify")
+ commands = ('yes | repo upload . --cbr --no-verify')
_, _, err = ce.RunCommandWOutput(commands)
return GetPatchNum(err)
@@ -234,26 +230,26 @@ def ReplaceSysroot(chromeos_root, dest_dir, target):
"""Copy unpacked sysroot and image to chromeos_root."""
ce = command_executer.GetCommandExecuter()
# get the board name from "board-release". board may contain "-"
- board = target.rsplit("-", 1)[0]
- board_dir = os.path.join(chromeos_root, "chroot", "build", board)
- command = "sudo rm -rf {0}".format(board_dir)
+ board = target.rsplit('-', 1)[0]
+ board_dir = os.path.join(chromeos_root, 'chroot', 'build', board)
+ command = 'sudo rm -rf {0}'.format(board_dir)
ce.RunCommand(command)
- command = "sudo mv {0} {1}".format(dest_dir, board_dir)
+ command = 'sudo mv {0} {1}'.format(dest_dir, board_dir)
ce.RunCommand(command)
- image_dir = os.path.join(chromeos_root, "src", "build", "images",
- board, "latest")
- command = "rm -rf {0} && mkdir -p {0}".format(image_dir)
+ image_dir = os.path.join(chromeos_root, 'src', 'build', 'images', board,
+ 'latest')
+ command = 'rm -rf {0} && mkdir -p {0}'.format(image_dir)
ce.RunCommand(command)
- command = "mv {0}/chromiumos_test_image.bin {1}".format(board_dir, image_dir)
+ command = 'mv {0}/chromiumos_test_image.bin {1}'.format(board_dir, image_dir)
return ce.RunCommand(command)
def GccBranchForToolchain(branch):
- if branch == "toolchain-3428.65.B":
- return "release-R25-3428.B"
+ if branch == 'toolchain-3428.65.B':
+ return 'release-R25-3428.B'
else:
return None
@@ -261,18 +257,18 @@ def GccBranchForToolchain(branch):
def GetGccBranch(branch):
"""Get the remote branch name from branch or version."""
ce = command_executer.GetCommandExecuter()
- command = "git branch -a | grep {0}".format(branch)
+ command = 'git branch -a | grep {0}'.format(branch)
_, out, _ = ce.RunCommandWOutput(command)
if not out:
- release_num = re.match(r".*(R\d+)-*", branch)
+ release_num = re.match(r'.*(R\d+)-*', branch)
if release_num:
release_num = release_num.group(0)
- command = "git branch -a | grep {0}".format(release_num)
+ command = 'git branch -a | grep {0}'.format(release_num)
_, out, _ = ce.RunCommandWOutput(command)
if not out:
GccBranchForToolchain(branch)
if not out:
- out = "remotes/cros/master"
+ out = 'remotes/cros/master'
new_branch = out.splitlines()[0]
return new_branch
@@ -281,52 +277,51 @@ def UploadGccPatch(chromeos_root, gcc_dir, branch):
"""Upload local gcc to gerrit and get the CL number."""
ce = command_executer.GetCommandExecuter()
gcc_dir = misc.CanonicalizePath(gcc_dir)
- gcc_path = os.path.join(chromeos_root, "src/third_party/gcc")
- assert os.path.isdir(gcc_path), ("{0} is not a valid chromeos root"
+ gcc_path = os.path.join(chromeos_root, 'src/third_party/gcc')
+ assert os.path.isdir(gcc_path), ('{0} is not a valid chromeos root'
.format(chromeos_root))
- assert os.path.isdir(gcc_dir), ("{0} is not a valid dir for gcc"
- "source".format(gcc_dir))
+ assert os.path.isdir(gcc_dir), ('{0} is not a valid dir for gcc'
+ 'source'.format(gcc_dir))
os.chdir(gcc_path)
RemoveOldBranch()
if not branch:
- branch = "master"
+ branch = 'master'
branch = GetGccBranch(branch)
- command = ("git checkout -b {0} -t {1} && "
- "rm -rf *".format(BRANCH, branch))
+ command = ('git checkout -b {0} -t {1} && ' 'rm -rf *'.format(BRANCH, branch))
ce.RunCommand(command, print_to_console=False)
command = ("rsync -az --exclude='*.svn' --exclude='*.git'"
- " {0}/ .".format(gcc_dir))
+ ' {0}/ .'.format(gcc_dir))
ce.RunCommand(command)
return UploadPatch(gcc_dir)
-def RunRemote(chromeos_root, branch, patches, is_local,
- target, chrome_version, dest_dir):
+def RunRemote(chromeos_root, branch, patches, is_local, target, chrome_version,
+ dest_dir):
"""The actual running commands."""
ce = command_executer.GetCommandExecuter()
if is_local:
- local_flag = "--local -r {0}".format(dest_dir)
+ local_flag = '--local -r {0}'.format(dest_dir)
else:
- local_flag = "--remote"
- patch = ""
+ local_flag = '--remote'
+ patch = ''
for p in patches:
- patch += " -g {0}".format(p)
- cbuildbot_path = os.path.join(chromeos_root, "chromite/cbuildbot")
+ patch += ' -g {0}'.format(p)
+ cbuildbot_path = os.path.join(chromeos_root, 'chromite/cbuildbot')
os.chdir(cbuildbot_path)
- branch_flag = ""
- if branch != "master":
- branch_flag = " -b {0}".format(branch)
- chrome_version_flag = ""
+ branch_flag = ''
+ if branch != 'master':
+ branch_flag = ' -b {0}'.format(branch)
+ chrome_version_flag = ''
if chrome_version:
- chrome_version_flag = " --chrome_version={0}".format(chrome_version)
- description = "{0}_{1}_{2}".format(branch, GetPatchString(patches), target)
- command = ("yes | ./cbuildbot {0} {1} {2} {3} {4} {5}"
- " --remote-description={6}"
- " --chrome_rev=tot"
- .format(patch, branch_flag, chrome_version, local_flag,
- chrome_version_flag, target, description))
+ chrome_version_flag = ' --chrome_version={0}'.format(chrome_version)
+ description = '{0}_{1}_{2}'.format(branch, GetPatchString(patches), target)
+ command = ('yes | ./cbuildbot {0} {1} {2} {3} {4} {5}'
+ ' --remote-description={6}'
+ ' --chrome_rev=tot'.format(patch, branch_flag, chrome_version,
+ local_flag, chrome_version_flag, target,
+ description))
ce.RunCommand(command)
return description
@@ -336,36 +331,58 @@ def Main(argv):
"""The main function."""
# Common initializations
parser = argparse.ArgumentParser()
- parser.add_argument("-c", "--chromeos_root", required=True,
- dest="chromeos_root", help="The chromeos_root")
- parser.add_argument("-g", "--gcc_dir", default="", dest="gcc_dir",
- help="The gcc dir")
- parser.add_argument("-t", "--target", required=True, dest="target",
- help=("The target to be build, the list is at"
- " $(chromeos_root)/chromite/buildbot/cbuildbot"
- " --list -all"))
- parser.add_argument("-l", "--local", action="store_true")
- parser.add_argument("-d", "--dest_dir", dest="dest_dir",
- help=("The dir to build the whole chromeos if"
- " --local is set"))
- parser.add_argument("--chrome_version", dest="chrome_version",
- default="", help="The chrome version to use. "
- "Default it will use the latest one.")
- parser.add_argument("--chromeos_version", dest="chromeos_version",
- default="",
- help=("The chromeos version to use."
- "(1) A release version in the format: "
- "'\d+\.\d+\.\d+\.\d+.*'"
+ parser.add_argument('-c',
+ '--chromeos_root',
+ required=True,
+ dest='chromeos_root',
+ help='The chromeos_root')
+ parser.add_argument('-g',
+ '--gcc_dir',
+ default='',
+ dest='gcc_dir',
+ help='The gcc dir')
+ parser.add_argument('-t',
+ '--target',
+ required=True,
+ dest='target',
+ help=('The target to be build, the list is at'
+ ' $(chromeos_root)/chromite/buildbot/cbuildbot'
+ ' --list -all'))
+ parser.add_argument('-l', '--local', action='store_true')
+ parser.add_argument('-d',
+ '--dest_dir',
+ dest='dest_dir',
+ help=('The dir to build the whole chromeos if'
+ ' --local is set'))
+ parser.add_argument('--chrome_version',
+ dest='chrome_version',
+ default='',
+ help='The chrome version to use. '
+ 'Default it will use the latest one.')
+ parser.add_argument('--chromeos_version',
+ dest='chromeos_version',
+ default='',
+ help=('The chromeos version to use.'
+ '(1) A release version in the format: '
+ "'\d+\.\d+\.\d+\.\d+.*'"
"(2) 'latest_lkgm' for the latest lkgm version"))
- parser.add_argument("-r", "--replace_sysroot", action="store_true",
- help=("Whether or not to replace the build/$board dir"
- "under the chroot of chromeos_root and copy "
- "the image to src/build/image/$board/latest."
- " Default is False"))
- parser.add_argument("-b", "--branch", dest="branch", default="",
- help=("The branch to run trybot, default is None"))
- parser.add_argument("-p", "--patch", dest="patch", default="",
- help=("The patches to be applied, the patches numbers "
+ parser.add_argument('-r',
+ '--replace_sysroot',
+ action='store_true',
+ help=('Whether or not to replace the build/$board dir'
+ 'under the chroot of chromeos_root and copy '
+ 'the image to src/build/image/$board/latest.'
+ ' Default is False'))
+ parser.add_argument('-b',
+ '--branch',
+ dest='branch',
+ default='',
+ help=('The branch to run trybot, default is None'))
+ parser.add_argument('-p',
+ '--patch',
+ dest='patch',
+ default='',
+ help=('The patches to be applied, the patches numbers '
"be seperated by ','"))
script_dir = os.path.dirname(os.path.realpath(__file__))
@@ -373,28 +390,28 @@ def Main(argv):
args = parser.parse_args(argv[1:])
target = args.target
if args.patch:
- patch = args.patch.split(",")
+ patch = args.patch.split(',')
else:
patch = []
chromeos_root = misc.CanonicalizePath(args.chromeos_root)
if args.chromeos_version and args.branch:
- raise Exception("You can not set chromeos_version and branch at the "
- "same time.")
+ raise Exception('You can not set chromeos_version and branch at the '
+ 'same time.')
manifests = None
if args.branch:
- chromeos_version = ""
+ chromeos_version = ''
branch = args.branch
else:
chromeos_version = args.chromeos_version
manifests = manifest_versions.ManifestVersions()
- if chromeos_version == "latest_lkgm":
+ if chromeos_version == 'latest_lkgm':
chromeos_version = manifests.TimeToVersion(time.mktime(time.gmtime()))
- logger.GetLogger().LogOutput("found version %s for latest LKGM" % (
- chromeos_version))
+ logger.GetLogger().LogOutput('found version %s for latest LKGM' %
+ (chromeos_version))
# TODO: this script currently does not handle the case where the version
# is not in the "master" branch
- branch = "master"
+ branch = 'master'
if chromeos_version:
manifest_patch = GetManifestPatch(manifests, chromeos_version,
@@ -405,8 +422,8 @@ def Main(argv):
# patch for GCC even if GCC has not changed. The description should
# be based on the MD5 of the GCC patch contents.
patch.append(UploadGccPatch(chromeos_root, args.gcc_dir, branch))
- description = RunRemote(chromeos_root, branch, patch, args.local,
- target, args.chrome_version, args.dest_dir)
+ description = RunRemote(chromeos_root, branch, patch, args.local, target,
+ args.chrome_version, args.dest_dir)
if args.local or not args.dest_dir:
# TODO: We are not checktng the result of cbuild_bot in here!
return 0
@@ -421,12 +438,12 @@ def Main(argv):
dest_dir = misc.CanonicalizePath(args.dest_dir)
(bot_result, build_id) = FindBuildId(description)
if bot_result > 0 and build_id > 0:
- logger.GetLogger().LogError("Remote trybot failed but image was generated")
+ logger.GetLogger().LogError('Remote trybot failed but image was generated')
bot_result = 1
elif bot_result > 0:
- logger.GetLogger().LogError("Remote trybot failed. No image was generated")
+ logger.GetLogger().LogError('Remote trybot failed. No image was generated')
return 2
- if "toolchain" in branch:
+ if 'toolchain' in branch:
chromeos_version = FindVersionForToolchain(branch, chromeos_root)
assert not manifest_versions.IsRFormatCrosVersion(chromeos_version)
DownloadImage(target, build_id, dest_dir, chromeos_version)
@@ -444,6 +461,7 @@ def Main(argv):
# got an image and we were successful in placing it where requested
return bot_result
-if __name__ == "__main__":
+
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/remote_kill_test.py b/remote_kill_test.py
index ca1bc5fd..88aaf15d 100755
--- a/remote_kill_test.py
+++ b/remote_kill_test.py
@@ -1,14 +1,13 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to wrap test_that script.
Run this script and kill it. Then run ps -ef to see if sleep
is still running,.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import optparse
import os
@@ -20,22 +19,26 @@ from utils import command_executer
def Usage(parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
+
def Main(argv):
parser = optparse.OptionParser()
- parser.add_option("-c", "--chromeos_root", dest="chromeos_root",
- help="ChromeOS root checkout directory")
- parser.add_option("-r", "--remote", dest="remote",
- help="Remote chromeos device.")
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='ChromeOS root checkout directory')
+ parser.add_option('-r',
+ '--remote',
+ dest='remote',
+ help='Remote chromeos device.')
options = parser.parse_args(argv)[0]
ce = command_executer.GetCommandExecuter()
- ce.RunCommand("ls; sleep 10000",
- machine=os.uname()[1])
+ ce.RunCommand('ls; sleep 10000', machine=os.uname()[1])
return 0
-if __name__ == "__main__":
+if __name__ == '__main__':
Main(sys.argv)
diff --git a/remote_test.py b/remote_test.py
index f3ff3fc0..59eb9eb5 100755
--- a/remote_test.py
+++ b/remote_test.py
@@ -1,13 +1,12 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to wrap test_that script.
This script can login to the chromeos machine using the test private key.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import optparse
import os
@@ -19,26 +18,31 @@ from utils import misc
def Usage(parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
+
def Main(argv):
parser = optparse.OptionParser()
- parser.add_option("-c", "--chromeos_root", dest="chromeos_root",
- help="ChromeOS root checkout directory")
- parser.add_option("-r", "--remote", dest="remote",
- help="Remote chromeos device.")
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='ChromeOS root checkout directory')
+ parser.add_option('-r',
+ '--remote',
+ dest='remote',
+ help='Remote chromeos device.')
options = parser.parse_args(argv)[0]
if options.chromeos_root is None:
- Usage(parser, "chromeos_root must be given")
+ Usage(parser, 'chromeos_root must be given')
if options.remote is None:
- Usage(parser, "remote must be given")
+ Usage(parser, 'remote must be given')
options.chromeos_root = os.path.expanduser(options.chromeos_root)
- command = "ls -lt /"
+ command = 'ls -lt /'
ce = command_executer.GetCommandExecuter()
ce.CrosRunCommand(command,
chromeos_root=options.chromeos_root,
@@ -49,37 +53,37 @@ def Main(argv):
# Tests to copy directories and files to the chromeos box.
ce.CopyFiles(version_dir_path,
- "/tmp/" + version_dir,
+ '/tmp/' + version_dir,
dest_machine=options.remote,
dest_cros=True,
chromeos_root=options.chromeos_root)
ce.CopyFiles(version_dir_path,
- "/tmp/" + version_dir + "1",
+ '/tmp/' + version_dir + '1',
dest_machine=options.remote,
dest_cros=True,
chromeos_root=options.chromeos_root)
ce.CopyFiles(sys.argv[0],
- "/tmp/" + script_name,
+ '/tmp/' + script_name,
recursive=False,
dest_machine=options.remote,
dest_cros=True,
chromeos_root=options.chromeos_root)
ce.CopyFiles(sys.argv[0],
- "/tmp/" + script_name + "1",
+ '/tmp/' + script_name + '1',
recursive=False,
dest_machine=options.remote,
dest_cros=True,
chromeos_root=options.chromeos_root)
# Test to copy directories and files from the chromeos box.
- ce.CopyFiles("/tmp/" + script_name,
- "/tmp/hello",
+ ce.CopyFiles('/tmp/' + script_name,
+ '/tmp/hello',
recursive=False,
src_machine=options.remote,
src_cros=True,
chromeos_root=options.chromeos_root)
- ce.CopyFiles("/tmp/" + script_name,
- "/tmp/" + script_name,
+ ce.CopyFiles('/tmp/' + script_name,
+ '/tmp/' + script_name,
recursive=False,
src_machine=options.remote,
src_cros=True,
@@ -89,5 +93,5 @@ def Main(argv):
return 0
-if __name__ == "__main__":
+if __name__ == '__main__':
Main(sys.argv)
diff --git a/repo_to_repo.py b/repo_to_repo.py
index 487bbbbd..b0de0484 100755
--- a/repo_to_repo.py
+++ b/repo_to_repo.py
@@ -41,6 +41,7 @@ def SplitMapping(mapping):
class Repo(object):
+
def __init__(self, no_create_tmp_dir=False):
self.repo_type = None
self.address = None
@@ -68,14 +69,14 @@ class Repo(object):
def _RsyncExcludingRepoDirs(self, source_dir, dest_dir):
for f in os.listdir(source_dir):
- if f in [".git", ".svn", ".p4config"]:
+ if f in ['.git', '.svn', '.p4config']:
continue
dest_file = os.path.join(dest_dir, f)
source_file = os.path.join(source_dir, f)
if os.path.exists(dest_file):
- command = "rm -rf %s" % dest_file
+ command = 'rm -rf %s' % dest_file
self._ce.RunCommand(command)
- command = "rsync -a %s %s" % (source_file, dest_dir)
+ command = 'rsync -a %s %s' % (source_file, dest_dir)
self._ce.RunCommand(command)
return 0
@@ -91,14 +92,14 @@ class Repo(object):
return self._ce.RunCommand(command)
def __str__(self):
- return '\n'.join(str(s) for s in [self.repo_type,
- self.address,
- self.mappings])
+ return '\n'.join(str(s)
+ for s in [self.repo_type, self.address, self.mappings])
# Note - this type of repo is used only for "readonly", in other words, this
# only serves as a incoming repo.
class FileRepo(Repo):
+
def __init__(self, address, ignores=None):
Repo.__init__(self, no_create_tmp_dir=True)
self.repo_type = 'file'
@@ -115,6 +116,7 @@ class FileRepo(Repo):
class P4Repo(Repo):
+
def __init__(self, address, mappings, revision=None):
Repo.__init__(self)
self.repo_type = 'p4'
@@ -126,9 +128,9 @@ class P4Repo(Repo):
client_name = socket.gethostname()
client_name += tempfile.mkstemp()[1].replace('/', '-')
mappings = self.mappings
- p4view = perforce.View('depot2',
- GetCanonicalMappings(mappings))
- p4client = perforce.CommandsFactory(self._root_dir, p4view,
+ p4view = perforce.View('depot2', GetCanonicalMappings(mappings))
+ p4client = perforce.CommandsFactory(self._root_dir,
+ p4view,
name=client_name)
command = p4client.SetupAndDo(p4client.Sync(self.revision))
ret = self._ce.RunCommand(command)
@@ -144,6 +146,7 @@ class P4Repo(Repo):
class SvnRepo(Repo):
+
def __init__(self, address, mappings):
Repo.__init__(self)
self.repo_type = 'svn'
@@ -156,19 +159,22 @@ class SvnRepo(Repo):
remote_path, local_path = SplitMapping(mapping)
command = 'svn co %s/%s %s' % (self.address, remote_path, local_path)
ret = self._ce.RunCommand(command)
- if ret: return ret
+ if ret:
+ return ret
self.revision = ''
for mapping in self.mappings:
remote_path, local_path = SplitMapping(mapping)
command = 'cd %s && svnversion -c .' % (local_path)
ret, o, _ = self._ce.RunCommandWOutput(command)
- self.revision += o.strip().split(":")[-1]
- if ret: return ret
+ self.revision += o.strip().split(':')[-1]
+ if ret:
+ return ret
return 0
class GitRepo(Repo):
+
def __init__(self, address, branch, mappings=None, ignores=None, gerrit=None):
Repo.__init__(self)
self.repo_type = 'git'
@@ -187,11 +193,13 @@ class GitRepo(Repo):
def PullSources(self):
with misc.WorkingDirectory(self._root_dir):
ret = self._CloneSources()
- if ret: return ret
+ if ret:
+ return ret
command = 'git checkout %s' % self.branch
ret = self._ce.RunCommand(command)
- if ret: return ret
+ if ret:
+ return ret
command = 'git describe --always'
ret, o, _ = self._ce.RunCommandWOutput(command)
@@ -231,13 +239,14 @@ class GitRepo(Repo):
elif commit_message:
message_arg = '-m \'%s\'' % commit_message
else:
- raise Exception("No commit message given!")
+ raise Exception('No commit message given!')
command += '&& git commit -v %s' % message_arg
return self._ce.RunCommand(command)
def PushSources(self, commit_message=None, dry_run=False, message_file=None):
ret = self.CommitLocally(commit_message, message_file)
- if ret: return ret
+ if ret:
+ return ret
push_args = ''
if dry_run:
push_args += ' -n '
@@ -246,7 +255,7 @@ class GitRepo(Repo):
label = 'somelabel'
command = 'git remote add %s %s' % (label, self.address)
command += ('&& git push %s %s HEAD:refs/for/master' %
- (push_args,label))
+ (push_args, label))
else:
command = 'git push -v %s origin %s:%s' % (push_args, self.branch,
self.branch)
@@ -264,11 +273,13 @@ class GitRepo(Repo):
local_path.rstrip('...')
full_local_path = os.path.join(root_dir, local_path)
ret = self._RsyncExcludingRepoDirs(remote_path, full_local_path)
- if ret: return ret
+ if ret:
+ return ret
return 0
class RepoReader(object):
+
def __init__(self, filename):
self.filename = filename
self.main_dict = {}
@@ -302,12 +313,9 @@ class RepoReader(object):
revision = repo_dict.get('revision', None)
if repo_type == 'p4':
- repo = P4Repo(repo_address,
- repo_mappings,
- revision=revision)
+ repo = P4Repo(repo_address, repo_mappings, revision=revision)
elif repo_type == 'svn':
- repo = SvnRepo(repo_address,
- repo_mappings)
+ repo = SvnRepo(repo_address, repo_mappings)
elif repo_type == 'git':
repo = GitRepo(repo_address,
repo_branch,
@@ -353,29 +361,33 @@ def Main(argv):
for output_repo in output_repos:
if output_repo.repo_type == 'file':
logger.GetLogger().LogFatal(
- 'FileRepo is only supported as an input repo.')
+ 'FileRepo is only supported as an input repo.')
for output_repo in output_repos:
ret = output_repo.SetupForPush()
- if ret: return ret
+ if ret:
+ return ret
input_revisions = []
for input_repo in input_repos:
ret = input_repo.PullSources()
- if ret: return ret
+ if ret:
+ return ret
input_revisions.append(input_repo.revision)
for input_repo in input_repos:
for output_repo in output_repos:
ret = input_repo.MapSources(output_repo.GetRoot())
- if ret: return ret
+ if ret:
+ return ret
commit_message = 'Synced repos to: %s' % ','.join(input_revisions)
for output_repo in output_repos:
ret = output_repo.PushSources(commit_message=commit_message,
dry_run=options.dry_run,
message_file=options.message_file)
- if ret: return ret
+ if ret:
+ return ret
if not options.dry_run:
for output_repo in output_repos:
diff --git a/report_generator.py b/report_generator.py
index 6c7467aa..7dc53651 100755
--- a/report_generator.py
+++ b/report_generator.py
@@ -1,66 +1,71 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to compare a baseline results file to a new results file."""
-__author__ = "raymes@google.com (Raymes Khoury)"
+__author__ = 'raymes@google.com (Raymes Khoury)'
import sys
from utils import logger
from utils import html_tools
-PASS = "pass"
-FAIL = "fail"
-NOT_EXECUTED = "not executed"
+PASS = 'pass'
+FAIL = 'fail'
+NOT_EXECUTED = 'not executed'
+
class ResultsReport:
- def __init__(self, report, num_tests_executed, num_passes, num_failures, num_regressions):
+
+ def __init__(self, report, num_tests_executed, num_passes, num_failures,
+ num_regressions):
self.report = report
self.num_tests_executed = num_tests_executed
self.num_passes = num_passes
self.num_failures = num_failures
self.num_regressions = num_regressions
-
+
def GetReport(self):
return self.report
-
+
def GetNumExecuted(self):
return self.num_tests_executed
-
+
def GetNumPasses(self):
return self.num_passes
-
+
def GetNumFailures(self):
return self.num_failures
-
+
def GetNumRegressions(self):
return self.num_regressions
def GetSummary(self):
- summary = "Tests executed: %s\n" % str(self.num_tests_executed)
- summary += "Tests Passing: %s\n" % str(self.num_passes)
- summary += "Tests Failing: %s\n" % str(self.num_failures)
- summary += "Regressions: %s\n" % str(self.num_regressions)
+ summary = 'Tests executed: %s\n' % str(self.num_tests_executed)
+ summary += 'Tests Passing: %s\n' % str(self.num_passes)
+ summary += 'Tests Failing: %s\n' % str(self.num_failures)
+ summary += 'Regressions: %s\n' % str(self.num_regressions)
return summary
+
def Usage():
- print "Usage: %s baseline_results new_results" % sys.argv[0]
+ print 'Usage: %s baseline_results new_results' % sys.argv[0]
sys.exit(1)
+
def ParseResults(results_filename):
results = []
try:
results_file = open(results_filename, 'rb')
for line in results_file:
- if line.strip() != "":
- results.append(line.strip().split("\t"))
+ if line.strip() != '':
+ results.append(line.strip().split('\t'))
results_file.close()
except IOError:
- logger.GetLogger().LogWarning("Could not open results file: " +
+ logger.GetLogger().LogWarning('Could not open results file: ' +
results_filename)
return results
+
def GenerateResultsReport(baseline_file, new_result_file):
baseline_results = ParseResults(baseline_file)
new_results = ParseResults(new_result_file)
@@ -85,7 +90,7 @@ def GenerateResultsReport(baseline_file, new_result_file):
for result in test_status.keys():
if test_status[result][0] != test_status[result][1]:
regressions.append(result)
-
+
num_tests_executed = len(new_results)
num_regressions = len(regressions)
num_passes = 0
@@ -96,32 +101,35 @@ def GenerateResultsReport(baseline_file, new_result_file):
else:
num_failures += 1
- report = html_tools.GetPageHeader("Test Summary")
- report += html_tools.GetHeader("Test Summary")
+ report = html_tools.GetPageHeader('Test Summary')
+ report += html_tools.GetHeader('Test Summary')
report += html_tools.GetListHeader()
- report += html_tools.GetListItem("Tests executed: " + str(num_tests_executed))
- report += html_tools.GetListItem("Passes: " + str(num_passes))
- report += html_tools.GetListItem("Failures: " + str(num_failures))
- report += html_tools.GetListItem("Regressions: " + str(num_regressions))
+ report += html_tools.GetListItem('Tests executed: ' + str(num_tests_executed))
+ report += html_tools.GetListItem('Passes: ' + str(num_passes))
+ report += html_tools.GetListItem('Failures: ' + str(num_failures))
+ report += html_tools.GetListItem('Regressions: ' + str(num_regressions))
report += html_tools.GetListFooter()
- report += html_tools.GetHeader("Regressions", 2)
- report += html_tools.GetTableHeader(["Test name", "Expected result",
- "Actual result"])
-
+ report += html_tools.GetHeader('Regressions', 2)
+ report += html_tools.GetTableHeader(['Test name', 'Expected result',
+ 'Actual result'])
+
for regression in regressions:
- report += html_tools.GetTableRow([regression[:150], test_status[regression][1],
- test_status[regression][0]])
- report += "\n"
+ report += html_tools.GetTableRow([regression[:150], test_status[regression][
+ 1], test_status[regression][0]])
+ report += '\n'
report += html_tools.GetTableFooter()
- report += html_tools.GetHeader("All Tests", 2)
- report += html_tools.GetTableHeader(["Test name", "Expected result", "Actual result"])
+ report += html_tools.GetHeader('All Tests', 2)
+ report += html_tools.GetTableHeader(['Test name', 'Expected result',
+ 'Actual result'])
for result in test_status.keys():
report += html_tools.GetTableRow([result[:150], test_status[result][1],
- test_status[result][0]])
- report += "\n"
+ test_status[result][0]])
+ report += '\n'
report += html_tools.GetTableFooter()
report += html_tools.GetFooter()
- return ResultsReport(report, num_tests_executed, num_passes, num_failures, num_regressions)
+ return ResultsReport(report, num_tests_executed, num_passes, num_failures,
+ num_regressions)
+
def Main(argv):
if len(argv) < 2:
@@ -129,5 +137,6 @@ def Main(argv):
print GenerateResultsReport(argv[1], argv[2])[0]
-if __name__ == "__main__":
+
+if __name__ == '__main__':
Main(sys.argv)
diff --git a/run_benchmarks.py b/run_benchmarks.py
index cee9abc8..75ef48c5 100755
--- a/run_benchmarks.py
+++ b/run_benchmarks.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to run ChromeOS benchmarks
Inputs:
@@ -11,7 +10,8 @@ Inputs:
hostname/IP of Chromeos machine
chromeos/cpu/<benchname>
- - Read run script rules from bench.mk perflab-bin, copy benchmark to host, run
+ - Read run script rules from bench.mk perflab-bin, copy benchmark to
+ host, run
and return results.
chromeos/startup
@@ -25,7 +25,7 @@ Inputs:
"""
-__author__ = "bjanakiraman@google.com (Bhaskar Janakiraman)"
+__author__ = 'bjanakiraman@google.com (Bhaskar Janakiraman)'
import optparse
import os
@@ -37,30 +37,27 @@ import run_tests
from utils import command_executer
from utils import logger
-
KNOWN_BENCHMARKS = [
- "chromeos/startup",
- "chromeos/browser/pagecycler",
- "chromeos/browser/sunspider",
- "chromeos/browser/v8bench",
- "chromeos/cpu/bikjmp"]
+ 'chromeos/startup', 'chromeos/browser/pagecycler',
+ 'chromeos/browser/sunspider', 'chromeos/browser/v8bench',
+ 'chromeos/cpu/bikjmp'
+]
name_map = {
- "pagecycler" : "Page",
- "sunspider" : "SunSpider",
- "v8bench" : "V8Bench",
- "startup" : "BootPerfServer"}
-
+ 'pagecycler': 'Page',
+ 'sunspider': 'SunSpider',
+ 'v8bench': 'V8Bench',
+ 'startup': 'BootPerfServer'
+}
# Run command template
-
# Common initializations
cmd_executer = command_executer.GetCommandExecuter()
def Usage(parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
@@ -118,16 +115,18 @@ def RunCpuBenchmark(chromeos_root, bench, workdir, machine):
# Since this has exclusive access to the machine,
# we do not worry about duplicates.
args = 'rm -rf /tmp/%s' % benchname
- retval = cmd_executer.CrosRunCommand(args, chromeos_root=chromeos_root,
+ retval = cmd_executer.CrosRunCommand(args,
+ chromeos_root=chromeos_root,
machine=machine)
if retval:
return retval
# Copy benchmark directory.
- retval = cmd_executer.CopyFiles(benchdir, "/tmp/" + benchname,
- chromeos_root=chromeos_root,
- dest_machine=machine,
- dest_cros=True)
+ retval = cmd_executer.CopyFiles(benchdir,
+ '/tmp/' + benchname,
+ chromeos_root=chromeos_root,
+ dest_machine=machine,
+ dest_cros=True)
if retval:
return retval
@@ -146,7 +145,8 @@ def RunCpuBenchmark(chromeos_root, bench, workdir, machine):
# Capture output and process it.
sshargs = "'cd /tmp/%s;" % benchname
sshargs += "time -p %s'" % run_cmd
- cmd_executer.CrosRunCommand(sshargs, chromeos_root=chromeos_root,
+ cmd_executer.CrosRunCommand(sshargs,
+ chromeos_root=chromeos_root,
machine=machine)
return retval
@@ -157,30 +157,37 @@ def Main(argv):
# Common initializations
parser = optparse.OptionParser()
- parser.add_option("-c", "--chromeos_root", dest="chromeos_root",
- help="Target directory for ChromeOS installation.")
- parser.add_option("-m", "--machine", dest="machine",
- help="The chromeos host machine.")
- parser.add_option("--workdir", dest="workdir", default="./perflab-bin",
- help="Work directory for perflab outputs.")
- parser.add_option("--board", dest="board",
- help="ChromeOS target board, e.g. x86-generic")
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='Target directory for ChromeOS installation.')
+ parser.add_option('-m',
+ '--machine',
+ dest='machine',
+ help='The chromeos host machine.')
+ parser.add_option('--workdir',
+ dest='workdir',
+ default='./perflab-bin',
+ help='Work directory for perflab outputs.')
+ parser.add_option('--board',
+ dest='board',
+ help='ChromeOS target board, e.g. x86-generic')
(options, args) = parser.parse_args(argv[1:])
# validate args
for arg in args:
if arg not in KNOWN_BENCHMARKS:
- logger.GetLogger().LogFatal("Bad benchmark %s specified" % arg)
+ logger.GetLogger().LogFatal('Bad benchmark %s specified' % arg)
if options.chromeos_root is None:
- Usage(parser, "--chromeos_root must be set")
+ Usage(parser, '--chromeos_root must be set')
if options.board is None:
- Usage(parser, "--board must be set")
+ Usage(parser, '--board must be set')
if options.machine is None:
- Usage(parser, "--machine must be set")
+ Usage(parser, '--machine must be set')
found_err = 0
retval = 0
@@ -189,50 +196,47 @@ def Main(argv):
comps = re.split('/', arg)
if re.match('chromeos/cpu', arg):
benchname = comps[2]
- print "RUNNING %s" % benchname
- retval = RunCpuBenchmark(options.chromeos_root,
- arg, options.workdir, options.machine)
+ print 'RUNNING %s' % benchname
+ retval = RunCpuBenchmark(options.chromeos_root, arg, options.workdir,
+ options.machine)
if not found_err:
found_err = retval
elif re.match('chromeos/startup', arg):
benchname = comps[1]
- image_args = [os.path.dirname(os.path.abspath(__file__)) +
- "/image_chromeos.py",
- "--chromeos_root=" + options.chromeos_root,
- "--remote=" + options.machine,
- "--image=" + options.workdir + "/" +
- benchname + "/chromiumos_image.bin"
- ]
- logger.GetLogger().LogOutput("Reimaging machine %s" % options.machine)
+ image_args = [
+ os.path.dirname(os.path.abspath(__file__)) + '/image_chromeos.py',
+ '--chromeos_root=' + options.chromeos_root,
+ '--remote=' + options.machine, '--image=' + options.workdir + '/' +
+ benchname + '/chromiumos_image.bin'
+ ]
+ logger.GetLogger().LogOutput('Reimaging machine %s' % options.machine)
image_chromeos.Main(image_args)
- logger.GetLogger().LogOutput("Running %s" % arg)
- retval = RunStartupBenchmark(options.chromeos_root,
- options.board,
- arg, options.workdir, options.machine)
+ logger.GetLogger().LogOutput('Running %s' % arg)
+ retval = RunStartupBenchmark(options.chromeos_root, options.board, arg,
+ options.workdir, options.machine)
if not found_err:
found_err = retval
elif re.match('chromeos/browser', arg):
benchname = comps[2]
- image_args = [os.path.dirname(os.path.abspath(__file__)) +
- "/image_chromeos.py",
- "--chromeos_root=" + options.chromeos_root,
- "--remote=" + options.machine,
- "--image=" + options.workdir + "/" +
- benchname + "/chromiumos_image.bin"
- ]
- logger.GetLogger().LogOutput("Reimaging machine %s" % options.machine)
+ image_args = [
+ os.path.dirname(os.path.abspath(__file__)) + '/image_chromeos.py',
+ '--chromeos_root=' + options.chromeos_root,
+ '--remote=' + options.machine, '--image=' + options.workdir + '/' +
+ benchname + '/chromiumos_image.bin'
+ ]
+ logger.GetLogger().LogOutput('Reimaging machine %s' % options.machine)
image_chromeos.Main(image_args)
- logger.GetLogger().LogOutput("Running %s" % arg)
- retval = RunBrowserBenchmark(options.chromeos_root,
- options.board,
- arg, options.workdir, options.machine)
+ logger.GetLogger().LogOutput('Running %s' % arg)
+ retval = RunBrowserBenchmark(options.chromeos_root, options.board, arg,
+ options.workdir, options.machine)
if not found_err:
found_err = retval
return found_err
-if __name__ == "__main__":
+
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/run_tests.py b/run_tests.py
index 07174cb2..11bbb8c3 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -1,13 +1,12 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to wrap run_remote_tests.sh script.
This script calls run_remote_tests.sh with standard tests.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import optparse
import os
@@ -21,8 +20,9 @@ import build_chromeos
def Main(argv):
"""The main function."""
- print "This script is deprecated. Use crosperf for running tests."
+ print 'This script is deprecated. Use crosperf for running tests.'
return 1
-if __name__ == "__main__":
+
+if __name__ == '__main__':
sys.exit(Main(sys.argv))
diff --git a/setup_chromeos.py b/setup_chromeos.py
index 8a66c800..b90e2ae4 100755
--- a/setup_chromeos.py
+++ b/setup_chromeos.py
@@ -3,14 +3,13 @@
# Copyright 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Script to checkout the ChromeOS source.
This script sets up the ChromeOS source in the given directory, matching a
particular release of ChromeOS.
"""
-__author__ = "raymes@google.com (Raymes Khoury)"
+__author__ = 'raymes@google.com (Raymes Khoury)'
from datetime import datetime
import getpass
@@ -41,64 +40,70 @@ GCLIENT_FILE = """solutions = [
# List of stable versions used for common team image
# Sheriff must update this list when a new common version becomes available
-COMMON_VERSIONS = "/home/mobiletc-prebuild/common_images/common_list.txt"
+COMMON_VERSIONS = '/home/mobiletc-prebuild/common_images/common_list.txt'
+
def Usage(parser):
parser.print_help()
sys.exit(0)
+
# Get version spec file, either from "paladin" or "buildspec" directory.
def GetVersionSpecFile(version, versions_git):
temp = tempfile.mkdtemp()
- commands = ["cd {0}".format(temp), \
- "git clone {0} versions".format(versions_git)]
+ commands = ['cd {0}'.format(temp), \
+ 'git clone {0} versions'.format(versions_git)]
cmd_executer = command_executer.GetCommandExecuter()
ret = cmd_executer.RunCommands(commands)
err_msg = None
if ret:
- err_msg = "Failed to checkout versions_git - {0}".format(versions_git)
+ err_msg = 'Failed to checkout versions_git - {0}'.format(versions_git)
ret = None
else:
- v, m = version.split(".", 1)
- paladin_spec = "paladin/buildspecs/{0}/{1}.xml".format(v, m)
- generic_spec = "buildspecs/{0}/{1}.xml".format(v, m)
- paladin_path = "{0}/versions/{1}".format(temp, paladin_spec)
- generic_path = "{0}/versions/{1}".format(temp, generic_spec)
+ v, m = version.split('.', 1)
+ paladin_spec = 'paladin/buildspecs/{0}/{1}.xml'.format(v, m)
+ generic_spec = 'buildspecs/{0}/{1}.xml'.format(v, m)
+ paladin_path = '{0}/versions/{1}'.format(temp, paladin_spec)
+ generic_path = '{0}/versions/{1}'.format(temp, generic_spec)
if os.path.exists(paladin_path):
ret = paladin_spec
elif os.path.exists(generic_path):
ret = generic_spec
else:
- err_msg = "No spec found for version {0}".format(version)
+ err_msg = 'No spec found for version {0}'.format(version)
ret = None
# Fall through to clean up.
- commands = ["rm -rf {0}".format(temp)]
+ commands = ['rm -rf {0}'.format(temp)]
cmd_executer.RunCommands(commands)
if err_msg:
logger.GetLogger().LogFatal(err_msg)
return ret
+
def TimeToCommonVersion(timestamp):
"""Convert timestamp to common image version."""
tdt = datetime.fromtimestamp(float(timestamp))
- with open(COMMON_VERSIONS, "r") as f:
+ with open(COMMON_VERSIONS, 'r') as f:
common_list = pickle.load(f)
for sv in common_list:
- sdt = datetime.strptime(sv["date"], "%Y-%m-%d %H:%M:%S.%f")
+ sdt = datetime.strptime(sv['date'], '%Y-%m-%d %H:%M:%S.%f')
if tdt >= sdt:
- return "%s.%s" % (sv["chrome_major_version"], sv["chromeos_version"])
+ return '%s.%s' % (sv['chrome_major_version'], sv['chromeos_version'])
# should never reach here
- logger.GetLogger().LogFatal("No common version for timestamp")
+ logger.GetLogger().LogFatal('No common version for timestamp')
return None
def Main(argv):
"""Checkout the ChromeOS source."""
parser = optparse.OptionParser()
- parser.add_option("--dir", dest="directory",
- help="Target directory for ChromeOS installation.")
- parser.add_option("--version", dest="version", default="latest_lkgm",
+ parser.add_option('--dir',
+ dest='directory',
+ help='Target directory for ChromeOS installation.')
+ parser.add_option('--version',
+ dest='version',
+ default='latest_lkgm',
help="""ChromeOS version. Can be:
(1) A release version in the format: 'X.X.X.X'
(2) 'top' for top of trunk
@@ -107,128 +112,136 @@ def Main(argv):
(5) 'latest_common' for the latest team common stable version
(6) 'common' for the team common stable version before timestamp
Default is 'latest_lkgm'.""")
- parser.add_option("--timestamp", dest="timestamp", default=None,
+ parser.add_option('--timestamp',
+ dest='timestamp',
+ default=None,
help="""Timestamps in epoch format. It will check out the
latest LKGM or the latest COMMON version of ChromeOS before the timestamp.
Use in combination with --version=latest or --version=common. Use
'date -d <date string> +%s' to find epoch time""")
- parser.add_option("--minilayout", dest="minilayout", default=False,
- action="store_true",
+ parser.add_option('--minilayout',
+ dest='minilayout',
+ default=False,
+ action='store_true',
help="""Whether to checkout the minilayout
(smaller checkout).'""")
- parser.add_option("--jobs", "-j", dest="jobs",
- help="Number of repo sync threads to use.")
- parser.add_option("--public", "-p", dest="public", default=False,
- action="store_true",
- help="Use the public checkout instead of the private one.")
+ parser.add_option('--jobs',
+ '-j',
+ dest='jobs',
+ help='Number of repo sync threads to use.')
+ parser.add_option('--public',
+ '-p',
+ dest='public',
+ default=False,
+ action='store_true',
+ help='Use the public checkout instead of the private one.')
options = parser.parse_args(argv)[0]
if not options.version:
parser.print_help()
- logger.GetLogger().LogFatal("No version specified.")
+ logger.GetLogger().LogFatal('No version specified.')
else:
version = options.version.strip()
if not options.timestamp:
- timestamp = ""
+ timestamp = ''
else:
timestamp = options.timestamp.strip()
- if version not in ("lkgm", "common"):
+ if version not in ('lkgm', 'common'):
parser.print_help()
- logger.GetLogger().LogFatal("timestamp option only applies for "
+ logger.GetLogger().LogFatal('timestamp option only applies for '
"versions \"lkgm\" or \"common\"")
if not options.directory:
parser.print_help()
- logger.GetLogger().LogFatal("No directory specified.")
+ logger.GetLogger().LogFatal('No directory specified.')
directory = options.directory.strip()
if options.public:
- manifest_repo = "https://chromium.googlesource.com/chromiumos/manifest.git"
- versions_repo = ("https://chromium.googlesource.com/"
- "chromiumos/manifest-versions.git")
+ manifest_repo = 'https://chromium.googlesource.com/chromiumos/manifest.git'
+ versions_repo = ('https://chromium.googlesource.com/'
+ 'chromiumos/manifest-versions.git')
else:
manifest_repo = (
- "https://chrome-internal.googlesource.com/chromeos/manifest-internal.git")
+ 'https://chrome-internal.googlesource.com/chromeos/manifest-internal.git'
+ )
versions_repo = (
- "https://chrome-internal.googlesource.com/chromeos/manifest-versions.git")
+ 'https://chrome-internal.googlesource.com/chromeos/manifest-versions.git'
+ )
- if version == "top":
- init = "repo init -u %s" % manifest_repo
- elif version == "latest_lkgm":
+ if version == 'top':
+ init = 'repo init -u %s' % manifest_repo
+ elif version == 'latest_lkgm':
manifests = manifest_versions.ManifestVersions()
version = manifests.TimeToVersion(time.mktime(time.gmtime()))
- version, manifest = version.split(".", 1)
- logger.GetLogger().LogOutput("found version %s.%s for latest LKGM" % (
- version, manifest))
- init = ("repo init -u %s -m paladin/buildspecs/%s/%s.xml" % (
- versions_repo, version, manifest))
+ version, manifest = version.split('.', 1)
+ logger.GetLogger().LogOutput('found version %s.%s for latest LKGM' %
+ (version, manifest))
+ init = ('repo init -u %s -m paladin/buildspecs/%s/%s.xml' %
+ (versions_repo, version, manifest))
del manifests
- elif version == "lkgm":
+ elif version == 'lkgm':
if not timestamp:
parser.print_help()
- logger.GetLogger().LogFatal("No timestamp specified for version=lkgm")
+ logger.GetLogger().LogFatal('No timestamp specified for version=lkgm')
manifests = manifest_versions.ManifestVersions()
version = manifests.TimeToVersion(timestamp)
- version, manifest = version.split(".", 1)
- logger.GetLogger().LogOutput("found version %s.%s for LKGM at timestamp %s"
+ version, manifest = version.split('.', 1)
+ logger.GetLogger().LogOutput('found version %s.%s for LKGM at timestamp %s'
% (version, manifest, timestamp))
- init = ("repo init -u %s -m paladin/buildspecs/%s/%s.xml" % (
- versions_repo, version, manifest))
+ init = ('repo init -u %s -m paladin/buildspecs/%s/%s.xml' %
+ (versions_repo, version, manifest))
del manifests
- elif version == "latest_common":
+ elif version == 'latest_common':
version = TimeToCommonVersion(time.mktime(time.gmtime()))
- version, manifest = version.split(".", 1)
- logger.GetLogger().LogOutput("found version %s.%s for latest Common image" %
+ version, manifest = version.split('.', 1)
+ logger.GetLogger().LogOutput('found version %s.%s for latest Common image' %
(version, manifest))
- init = ("repo init -u %s -m buildspecs/%s/%s.xml" % (
- versions_repo, version, manifest))
- elif version == "common":
+ init = ('repo init -u %s -m buildspecs/%s/%s.xml' % (versions_repo, version,
+ manifest))
+ elif version == 'common':
if not timestamp:
parser.print_help()
- logger.GetLogger().LogFatal("No timestamp specified for version=lkgm")
+ logger.GetLogger().LogFatal('No timestamp specified for version=lkgm')
version = TimeToCommonVersion(timestamp)
- version, manifest = version.split(".", 1)
- logger.GetLogger().LogOutput("found version %s.%s for latest common image "
- "at timestamp %s" % (
- version, manifest, timestamp))
- init = ("repo init -u %s -m buildspecs/%s/%s.xml" % (
- versions_repo, version, manifest))
+ version, manifest = version.split('.', 1)
+ logger.GetLogger().LogOutput('found version %s.%s for latest common image '
+ 'at timestamp %s' % (version, manifest,
+ timestamp))
+ init = ('repo init -u %s -m buildspecs/%s/%s.xml' % (versions_repo, version,
+ manifest))
else:
# user specified a specific version number
version_spec_file = GetVersionSpecFile(version, versions_repo)
if not version_spec_file:
return 1
- init = "repo init -u %s -m %s" % (versions_repo, version_spec_file)
+ init = 'repo init -u %s -m %s' % (versions_repo, version_spec_file)
if options.minilayout:
- init += " -g minilayout"
+ init += ' -g minilayout'
- init += " --repo-url=https://chromium.googlesource.com/external/repo.git"
+ init += ' --repo-url=https://chromium.googlesource.com/external/repo.git'
# crosbug#31837 - "Sources need to be world-readable to properly
# function inside the chroot"
- sync = "umask 022 && repo sync"
+ sync = 'umask 022 && repo sync'
if options.jobs:
- sync += " -j %s" % options.jobs
+ sync += ' -j %s' % options.jobs
- commands = ["mkdir -p %s" % directory,
- "cd %s" % directory,
- init,
- sync]
+ commands = ['mkdir -p %s' % directory, 'cd %s' % directory, init, sync]
cmd_executer = command_executer.GetCommandExecuter()
ret = cmd_executer.RunCommands(commands)
if ret:
return ret
return cmd_executer.RunCommand(
- "git ls-remote "
- "https://chrome-internal.googlesource.com/chrome/src-internal.git "
- "> /dev/null")
+ 'git ls-remote '
+ 'https://chrome-internal.googlesource.com/chrome/src-internal.git '
+ '> /dev/null')
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/sheriff_rotation.py b/sheriff_rotation.py
index 64c655f5..718bdb26 100755
--- a/sheriff_rotation.py
+++ b/sheriff_rotation.py
@@ -1,13 +1,12 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to build the ChromeOS toolchain.
This script sets up the toolchain if you give it the gcctools directory.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import datetime
import os
@@ -18,9 +17,10 @@ from utils import email_sender
class SheriffHandler(object):
- SHERIFF_FILE = os.path.join(constants.CROSTC_WORKSPACE, "sheriffs.txt")
- SUBJECT = "You (%s) are the sheriff for the week: %s - %s"
- BODY = "Please see instructions here: https://sites.google.com/a/google.com/chromeos-toolchain-team-home2/home/sheriff-s-corner/sheriff-duties"
+ SHERIFF_FILE = os.path.join(constants.CROSTC_WORKSPACE, 'sheriffs.txt')
+ SUBJECT = 'You (%s) are the sheriff for the week: %s - %s'
+ BODY = ('Please see instructions here: '
+ 'https://sites.google.com/a/google.com/chromeos-toolchain-team-home2/home/sheriff-s-corner/sheriff-duties')
def GetWeekInfo(self, day=datetime.datetime.today()):
"""Return week_start, week_end."""
@@ -28,14 +28,14 @@ class SheriffHandler(object):
epoch = datetime.datetime.utcfromtimestamp(0)
delta_since_epoch = day - epoch
- abs_days = abs(delta_since_epoch.days) - 2 # To get it to start from Sat.
+ abs_days = abs(delta_since_epoch.days) - 2 # To get it to start from Sat.
weeks_since_epoch = abs_days / 7
day_of_week = abs_days % 7
week_begin = day - datetime.timedelta(days=day_of_week)
week_end = day + datetime.timedelta(days=(6 - day_of_week))
- strftime_format = "%A, %B %d %Y"
+ strftime_format = '%A, %B %d %Y'
return (week_begin.strftime(strftime_format),
week_end.strftime(strftime_format))
@@ -46,14 +46,14 @@ class SheriffHandler(object):
def ReadSheriffsAsList(self):
"""Return the sheriff file contents."""
- contents = ""
- with open(self.SHERIFF_FILE, "r") as f:
+ contents = ''
+ with open(self.SHERIFF_FILE, 'r') as f:
contents = f.read()
return contents.splitlines()
def WriteSheriffsAsList(self, to_write):
- with open(self.SHERIFF_FILE, "w") as f:
- f.write("\n".join(to_write))
+ with open(self.SHERIFF_FILE, 'w') as f:
+ f.write('\n'.join(to_write))
def GetRotatedSheriffs(self, num_rotations=1):
"""Return the sheriff file contents."""
@@ -61,8 +61,8 @@ class SheriffHandler(object):
new_sheriff_list = []
num_rotations = num_rotations % len(sheriff_list)
- new_sheriff_list = (sheriff_list[num_rotations:] +
- sheriff_list[:num_rotations])
+ new_sheriff_list = (
+ sheriff_list[num_rotations:] + sheriff_list[:num_rotations])
return new_sheriff_list
def Email(self):
@@ -74,26 +74,26 @@ class SheriffHandler(object):
subject,
self.BODY,
email_from=os.path.basename(__file__),
- email_cc=["c-compiler-chrome"])
+ email_cc=['c-compiler-chrome'])
def Main(argv):
parser = optparse.OptionParser()
- parser.add_option("-e",
- "--email",
- dest="email",
- action="store_true",
- help="Email the sheriff.")
- parser.add_option("-r",
- "--rotate",
- dest="rotate",
- help="Print sheriffs after n rotations.")
- parser.add_option("-w",
- "--write",
- dest="write",
- action="store_true",
+ parser.add_option('-e',
+ '--email',
+ dest='email',
+ action='store_true',
+ help='Email the sheriff.')
+ parser.add_option('-r',
+ '--rotate',
+ dest='rotate',
+ help='Print sheriffs after n rotations.')
+ parser.add_option('-w',
+ '--write',
+ dest='write',
+ action='store_true',
default=False,
- help="Wrote rotated contents to the sheriff file.")
+ help='Wrote rotated contents to the sheriff file.')
options, _ = parser.parse_args(argv)
@@ -102,22 +102,23 @@ def Main(argv):
current_sheriff = sheriff_handler.GetCurrentSheriff()
week_start, week_end = sheriff_handler.GetWeekInfo()
- print "Current sheriff: %s (%s - %s)" % (current_sheriff, week_start, week_end)
+ print 'Current sheriff: %s (%s - %s)' % (current_sheriff, week_start,
+ week_end)
if options.email:
sheriff_handler.Email()
if options.rotate:
rotated_sheriffs = sheriff_handler.GetRotatedSheriffs(int(options.rotate))
- print "Rotated sheriffs (after %s rotations)" % options.rotate
- print "\n".join(rotated_sheriffs)
+ print 'Rotated sheriffs (after %s rotations)' % options.rotate
+ print '\n'.join(rotated_sheriffs)
if options.write:
sheriff_handler.WriteSheriffsAsList(rotated_sheriffs)
- print "Rotated sheriffs written to file."
+ print 'Rotated sheriffs written to file.'
return 0
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/summarize_results.py b/summarize_results.py
index 4dbcf0e4..69ab4c25 100755
--- a/summarize_results.py
+++ b/summarize_results.py
@@ -1,20 +1,21 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to summarize the results of various log files."""
-__author__ = "raymes@google.com (Raymes Khoury)"
+__author__ = 'raymes@google.com (Raymes Khoury)'
from utils import command_executer
import os
import sys
import re
-RESULTS_DIR = "results"
-RESULTS_FILE = RESULTS_DIR + "/results.csv"
+RESULTS_DIR = 'results'
+RESULTS_FILE = RESULTS_DIR + '/results.csv'
+
class DejaGNUSummarizer:
+
def Matches(self, log_file):
for log_line in log_file:
if log_line.find("""tests ===""") > -1:
@@ -22,47 +23,49 @@ class DejaGNUSummarizer:
return False
def Summarize(self, log_file, filename):
- result = ""
- pass_statuses = ["PASS", "XPASS"]
- fail_statuses = ["FAIL", "XFAIL", "UNSUPPORTED"]
+ result = ''
+ pass_statuses = ['PASS', 'XPASS']
+ fail_statuses = ['FAIL', 'XFAIL', 'UNSUPPORTED']
name_count = {}
for line in log_file:
- line = line.strip().split(":")
+ line = line.strip().split(':')
if len(line) > 1 and (line[0] in pass_statuses or
line[0] in fail_statuses):
- test_name = (":".join(line[1:])).replace("\t", " ").strip()
+ test_name = (':'.join(line[1:])).replace('\t', ' ').strip()
count = name_count.get(test_name, 0) + 1
name_count[test_name] = count
- test_name = "%s (%s)" % (test_name, str(count))
+ test_name = '%s (%s)' % (test_name, str(count))
if line[0] in pass_statuses:
- test_result = "pass"
+ test_result = 'pass'
else:
- test_result = "fail"
- result += "%s\t%s\t%s\n" % (test_name, test_result, filename)
+ test_result = 'fail'
+ result += '%s\t%s\t%s\n' % (test_name, test_result, filename)
return result
class PerflabSummarizer:
+
def Matches(self, log_file):
- p = re.compile("METRIC isolated \w+")
+ p = re.compile('METRIC isolated \w+')
for log_line in log_file:
if p.search(log_line):
return True
return False
def Summarize(self, log_file, filename):
- result = ""
+ result = ''
p = re.compile("METRIC isolated (\w+) .*\['(.*?)'\]")
- log_file_lines = "\n".join(log_file)
+ log_file_lines = '\n'.join(log_file)
matches = p.findall(log_file_lines)
for match in matches:
if len(match) != 2:
continue
- result += "%s\t%s\n" % (match[0], match[1])
+ result += '%s\t%s\n' % (match[0], match[1])
return result
class AutoTestSummarizer:
+
def Matches(self, log_file):
for log_line in log_file:
if log_line.find("""Installing autotest on""") > -1:
@@ -70,23 +73,24 @@ class AutoTestSummarizer:
return False
def Summarize(self, log_file, filename):
- result = ""
- pass_statuses = ["PASS"]
- fail_statuses = ["FAIL"]
+ result = ''
+ pass_statuses = ['PASS']
+ fail_statuses = ['FAIL']
for line in log_file:
- line = line.strip().split(" ")
+ line = line.strip().split(' ')
if len(line) > 1 and (line[-1].strip() in pass_statuses or
line[-1].strip() in fail_statuses):
test_name = (line[0].strip())
if line[-1].strip() in pass_statuses:
- test_result = "pass"
+ test_result = 'pass'
else:
- test_result = "fail"
- result += "%s\t%s\t%s\n" % (test_name, test_result, filename)
+ test_result = 'fail'
+ result += '%s\t%s\t%s\n' % (test_name, test_result, filename)
return result
+
def Usage():
- print "Usage: %s log_file" % sys.argv[0]
+ print 'Usage: %s log_file' % sys.argv[0]
sys.exit(1)
@@ -112,15 +116,15 @@ def Main(argv):
filename = argv[1]
executer = command_executer.GetCommandExecuter()
- executer.RunCommand("mkdir -p %s" % RESULTS_DIR)
+ executer.RunCommand('mkdir -p %s' % RESULTS_DIR)
summary = SummarizeFile(filename)
if summary is not None:
- output = open(RESULTS_FILE, "a")
- output.write(summary.strip() + "\n")
+ output = open(RESULTS_FILE, 'a')
+ output.write(summary.strip() + '\n')
output.close()
return 0
-if __name__ == "__main__":
+
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
-
diff --git a/tc_enter_chroot.py b/tc_enter_chroot.py
index c68c48a2..39bb7dc4 100755
--- a/tc_enter_chroot.py
+++ b/tc_enter_chroot.py
@@ -1,13 +1,12 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script to enter the ChromeOS chroot with mounted sources.
This script enters the chroot with mounted sources.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import getpass
import optparse
@@ -22,93 +21,101 @@ from utils import misc
class MountPoint:
+
def __init__(self, external_dir, mount_dir, owner, options=None):
self.external_dir = os.path.realpath(external_dir)
self.mount_dir = os.path.realpath(mount_dir)
self.owner = owner
self.options = options
-
def CreateAndOwnDir(self, dir_name):
retval = 0
if not os.path.exists(dir_name):
- command = "mkdir -p " + dir_name
- command += " || sudo mkdir -p " + dir_name
+ command = 'mkdir -p ' + dir_name
+ command += ' || sudo mkdir -p ' + dir_name
retval = command_executer.GetCommandExecuter().RunCommand(command)
if retval != 0:
return retval
pw = pwd.getpwnam(self.owner)
if os.stat(dir_name).st_uid != pw.pw_uid:
- command = "sudo chown -f " + self.owner + " " + dir_name
+ command = 'sudo chown -f ' + self.owner + ' ' + dir_name
retval = command_executer.GetCommandExecuter().RunCommand(command)
return retval
-
def DoMount(self):
ce = command_executer.GetCommandExecuter()
- mount_signature = "%s on %s" % (self.external_dir, self.mount_dir)
- command = "mount"
+ mount_signature = '%s on %s' % (self.external_dir, self.mount_dir)
+ command = 'mount'
retval, out, err = ce.RunCommandWOutput(command)
if mount_signature not in out:
retval = self.CreateAndOwnDir(self.mount_dir)
- logger.GetLogger().LogFatalIf(retval, "Cannot create mount_dir!")
+ logger.GetLogger().LogFatalIf(retval, 'Cannot create mount_dir!')
retval = self.CreateAndOwnDir(self.external_dir)
- logger.GetLogger().LogFatalIf(retval, "Cannot create external_dir!")
+ logger.GetLogger().LogFatalIf(retval, 'Cannot create external_dir!')
retval = self.MountDir()
- logger.GetLogger().LogFatalIf(retval, "Cannot mount!")
+ logger.GetLogger().LogFatalIf(retval, 'Cannot mount!')
return retval
else:
return 0
-
def UnMount(self):
ce = command_executer.GetCommandExecuter()
- return ce.RunCommand("sudo umount %s" % self.mount_dir)
-
+ return ce.RunCommand('sudo umount %s' % self.mount_dir)
def MountDir(self):
- command = "sudo mount --bind " + self.external_dir + " " + self.mount_dir
- if self.options == "ro":
- command += " && sudo mount --bind -oremount,ro " + self.mount_dir
+ command = 'sudo mount --bind ' + self.external_dir + ' ' + self.mount_dir
+ if self.options == 'ro':
+ command += ' && sudo mount --bind -oremount,ro ' + self.mount_dir
retval = command_executer.GetCommandExecuter().RunCommand(command)
return retval
-
def __str__(self):
- ret = ""
- ret += self.external_dir + "\n"
- ret += self.mount_dir + "\n"
+ ret = ''
+ ret += self.external_dir + '\n'
+ ret += self.mount_dir + '\n'
if self.owner:
- ret += self.owner + "\n"
+ ret += self.owner + '\n'
if self.options:
- ret += self.options + "\n"
+ ret += self.options + '\n'
return ret
def Main(argv, return_output=False):
"""The main function."""
parser = optparse.OptionParser()
- parser.add_option("-c", "--chromeos_root", dest="chromeos_root",
- default="../..",
- help="ChromeOS root checkout directory.")
- parser.add_option("-t", "--toolchain_root", dest="toolchain_root",
- help="Toolchain root directory.")
- parser.add_option("-o", "--output", dest="output",
- help="Toolchain output directory")
- parser.add_option("--sudo", dest="sudo",
- action="store_true",
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ default='../..',
+ help='ChromeOS root checkout directory.')
+ parser.add_option('-t',
+ '--toolchain_root',
+ dest='toolchain_root',
+ help='Toolchain root directory.')
+ parser.add_option('-o',
+ '--output',
+ dest='output',
+ help='Toolchain output directory')
+ parser.add_option('--sudo',
+ dest='sudo',
+ action='store_true',
default=False,
- help="Run the command with sudo.")
- parser.add_option("-r", "--third_party", dest="third_party",
- help="The third_party directory to mount.")
- parser.add_option("-m", "--other_mounts", dest="other_mounts",
- help="Other mount points in the form: " +
- "dir:mounted_dir:options")
- parser.add_option("-s", "--mount-scripts-only",
- dest="mount_scripts_only",
- action="store_true",
+ help='Run the command with sudo.')
+ parser.add_option('-r',
+ '--third_party',
+ dest='third_party',
+ help='The third_party directory to mount.')
+ parser.add_option(
+ '-m',
+ '--other_mounts',
+ dest='other_mounts',
+ help='Other mount points in the form: ' + 'dir:mounted_dir:options')
+ parser.add_option('-s',
+ '--mount-scripts-only',
+ dest='mount_scripts_only',
+ action='store_true',
default=False,
- help="Mount only the scripts dir, and not the sources.")
+ help='Mount only the scripts dir, and not the sources.')
passthrough_argv = []
(options, passthrough_argv) = parser.parse_args(argv)
@@ -123,79 +130,78 @@ def Main(argv, return_output=False):
tc_dirs = []
if options.toolchain_root is None or options.mount_scripts_only:
- m = "toolchain_root not specified. Will not mount toolchain dirs."
+ m = 'toolchain_root not specified. Will not mount toolchain dirs.'
logger.GetLogger().LogWarning(m)
else:
- tc_dirs = [options.toolchain_root + "/google_vendor_src_branch/gcc",
- options.toolchain_root + "/google_vendor_src_branch/binutils"]
+ tc_dirs = [options.toolchain_root + '/google_vendor_src_branch/gcc',
+ options.toolchain_root + '/google_vendor_src_branch/binutils']
for tc_dir in tc_dirs:
if not os.path.exists(tc_dir):
- logger.GetLogger().LogError("toolchain path " +
- tc_dir + " does not exist!")
+ logger.GetLogger().LogError('toolchain path ' + tc_dir +
+ ' does not exist!')
parser.print_help()
sys.exit(1)
if not os.path.exists(chromeos_root):
- logger.GetLogger().LogError("chromeos_root " + options.chromeos_root +
- " does not exist!")
+ logger.GetLogger().LogError('chromeos_root ' + options.chromeos_root +
+ ' does not exist!')
parser.print_help()
sys.exit(1)
- if not os.path.exists(chromeos_root + "/src/scripts/build_packages"):
- logger.GetLogger().LogError(options.chromeos_root +
- "/src/scripts/build_packages"
- " not found!")
+ if not os.path.exists(chromeos_root + '/src/scripts/build_packages'):
+ logger.GetLogger(
+ ).LogError(options.chromeos_root + '/src/scripts/build_packages'
+ ' not found!')
parser.print_help()
sys.exit(1)
version_dir = os.path.realpath(os.path.expanduser(os.path.dirname(__file__)))
- mounted_tc_root = "/usr/local/toolchain_root"
- full_mounted_tc_root = chromeos_root + "/chroot/" + mounted_tc_root
+ mounted_tc_root = '/usr/local/toolchain_root'
+ full_mounted_tc_root = chromeos_root + '/chroot/' + mounted_tc_root
full_mounted_tc_root = os.path.abspath(full_mounted_tc_root)
mount_points = []
for tc_dir in tc_dirs:
last_dir = misc.GetRoot(tc_dir)[1]
- mount_point = MountPoint(tc_dir, full_mounted_tc_root + "/" + last_dir,
- getpass.getuser(), "ro")
+ mount_point = MountPoint(tc_dir, full_mounted_tc_root + '/' + last_dir,
+ getpass.getuser(), 'ro')
mount_points.append(mount_point)
# Add the third_party mount point if it exists
if options.third_party:
third_party_dir = options.third_party
- logger.GetLogger().LogFatalIf(not os.path.isdir(third_party_dir),
- "--third_party option is not a valid dir.")
+ logger.GetLogger().LogFatalIf(
+ not os.path.isdir(third_party_dir),
+ '--third_party option is not a valid dir.')
else:
- third_party_dir = os.path.abspath("%s/../../../third_party" %
+ third_party_dir = os.path.abspath('%s/../../../third_party' %
os.path.dirname(__file__))
if os.path.isdir(third_party_dir):
- mount_point = MountPoint(third_party_dir,
- ("%s/%s" %
- (full_mounted_tc_root,
- os.path.basename(third_party_dir))),
- getpass.getuser())
+ mount_point = MountPoint(third_party_dir, ('%s/%s' % (
+ full_mounted_tc_root, os.path.basename(third_party_dir))),
+ getpass.getuser())
mount_points.append(mount_point)
output = options.output
if output is None and options.toolchain_root:
# Mount the output directory at /usr/local/toolchain_root/output
- output = options.toolchain_root + "/output"
+ output = options.toolchain_root + '/output'
if output:
- mount_points.append(MountPoint(output, full_mounted_tc_root + "/output",
+ mount_points.append(MountPoint(output, full_mounted_tc_root + '/output',
getpass.getuser()))
# Mount the other mount points
mount_points += CreateMountPointsFromString(options.other_mounts,
- chromeos_root + "/chroot/")
+ chromeos_root + '/chroot/')
last_dir = misc.GetRoot(version_dir)[1]
# Mount the version dir (v14) at /usr/local/toolchain_root/v14
- mount_point = MountPoint(version_dir, full_mounted_tc_root + "/" + last_dir,
+ mount_point = MountPoint(version_dir, full_mounted_tc_root + '/' + last_dir,
getpass.getuser())
mount_points.append(mount_point)
@@ -205,49 +211,52 @@ def Main(argv, return_output=False):
return retval
# Finally, create the symlink to build-gcc.
- command = "sudo chown " + getpass.getuser() + " " + full_mounted_tc_root
+ command = 'sudo chown ' + getpass.getuser() + ' ' + full_mounted_tc_root
retval = command_executer.GetCommandExecuter().RunCommand(command)
try:
- CreateSymlink(last_dir + "/build-gcc", full_mounted_tc_root + "/build-gcc")
- CreateSymlink(last_dir + "/build-binutils", full_mounted_tc_root + "/build-binutils")
+ CreateSymlink(last_dir + '/build-gcc', full_mounted_tc_root + '/build-gcc')
+ CreateSymlink(last_dir + '/build-binutils',
+ full_mounted_tc_root + '/build-binutils')
except Exception as e:
logger.GetLogger().LogError(str(e))
# Now call cros_sdk --enter with the rest of the arguments.
- command = "cd %s/src/scripts && cros_sdk --enter" % chromeos_root
+ command = 'cd %s/src/scripts && cros_sdk --enter' % chromeos_root
if len(passthrough_argv) > 1:
- inner_command = " ".join(passthrough_argv[1:])
+ inner_command = ' '.join(passthrough_argv[1:])
inner_command = inner_command.strip()
- if inner_command.startswith("-- "):
+ if inner_command.startswith('-- '):
inner_command = inner_command[3:]
- command_file = "tc_enter_chroot.cmd"
- command_file_path = chromeos_root + "/src/scripts/" + command_file
- retval = command_executer.GetCommandExecuter().RunCommand("sudo rm -f " + command_file_path)
+ command_file = 'tc_enter_chroot.cmd'
+ command_file_path = chromeos_root + '/src/scripts/' + command_file
+ retval = command_executer.GetCommandExecuter().RunCommand('sudo rm -f ' +
+ command_file_path)
if retval != 0:
return retval
- f = open(command_file_path, "w")
+ f = open(command_file_path, 'w')
f.write(inner_command)
f.close()
logger.GetLogger().LogCmd(inner_command)
- retval = command_executer.GetCommandExecuter().RunCommand("chmod +x " + command_file_path)
+ retval = command_executer.GetCommandExecuter().RunCommand('chmod +x ' +
+ command_file_path)
if retval != 0:
return retval
if options.sudo:
- command += " sudo ./" + command_file
+ command += ' sudo ./' + command_file
else:
- command += " ./" + command_file
+ command += ' ./' + command_file
retval = command_executer.GetCommandExecuter().RunCommandGeneric(
command, return_output)
return retval
else:
- os.chdir("%s/src/scripts" % chromeos_root)
+ os.chdir('%s/src/scripts' % chromeos_root)
ce = command_executer.GetCommandExecuter()
- _, out, _ = ce.RunCommandWOutput("which cros_sdk")
+ _, out, _ = ce.RunCommandWOutput('which cros_sdk')
cros_sdk_binary = out.split()[0]
- return os.execv(cros_sdk_binary, ["", "--enter"])
+ return os.execv(cros_sdk_binary, ['', '--enter'])
def CreateMountPointsFromString(mount_strings, chroot_dir):
@@ -257,30 +266,30 @@ def CreateMountPointsFromString(mount_strings, chroot_dir):
return mount_points
mount_list = mount_strings.split()
for mount_string in mount_list:
- mount_values = mount_string.split(":")
+ mount_values = mount_string.split(':')
external_dir = mount_values[0]
mount_dir = mount_values[1]
if len(mount_values) > 2:
options = mount_values[2]
else:
options = None
- mount_point = MountPoint(external_dir, chroot_dir + "/" + mount_dir,
+ mount_point = MountPoint(external_dir, chroot_dir + '/' + mount_dir,
getpass.getuser(), options)
mount_points.append(mount_point)
return mount_points
def CreateSymlink(target, link_name):
- logger.GetLogger().LogFatalIf(target.startswith("/"),
- "Can't create symlink to absolute path!")
- real_from_file = misc.GetRoot(link_name)[0] + "/" + target
+ logger.GetLogger().LogFatalIf(
+ target.startswith('/'), "Can't create symlink to absolute path!")
+ real_from_file = misc.GetRoot(link_name)[0] + '/' + target
if os.path.realpath(real_from_file) != os.path.realpath(link_name):
if os.path.exists(link_name):
- command = "rm -rf " + link_name
+ command = 'rm -rf ' + link_name
command_executer.GetCommandExecuter().RunCommand(command)
os.symlink(target, link_name)
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/test_gcc_dejagnu.py b/test_gcc_dejagnu.py
index 450c6cb9..97dbfa2a 100755
--- a/test_gcc_dejagnu.py
+++ b/test_gcc_dejagnu.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Script adapter used by automation client for testing dejagnu.
This is not intended to be run on command line.
To kick off a single dejagnu run, use chromeos/v14/dejagnu/run_dejagnu.py
@@ -20,17 +19,18 @@ from dejagnu import run_dejagnu
from utils import command_executer
from utils import email_sender
+
class DejagnuAdapter(object):
# TODO(shenhan): move these to constants.py.
- _CHROMIUM_GCC_GIT = ("https://chromium.googlesource.com/"
- "chromiumos/third_party/gcc.git")
- _CHROMIUM_GCC_BRANCH = "gcc.gnu.org/branches/google/gcc-4_7-mobile"
+ _CHROMIUM_GCC_GIT = ('https://chromium.googlesource.com/'
+ 'chromiumos/third_party/gcc.git')
+ _CHROMIUM_GCC_BRANCH = 'gcc.gnu.org/branches/google/gcc-4_7-mobile'
_cmd_exec = command_executer.GetCommandExecuter()
- def __init__(self, board, remote, gcc_dir,
- chromeos_root, runtestflags, cleanup):
+ def __init__(self, board, remote, gcc_dir, chromeos_root, runtestflags,
+ cleanup):
self._board = board
self._remote = remote
self._gcc_dir = gcc_dir
@@ -39,55 +39,53 @@ class DejagnuAdapter(object):
self._cleanup = cleanup
def SetupChromeOS(self):
- cmd = [setup_chromeos.__file__,
- "--dir=" + self._chromeos_root, "--minilayout", "--jobs=8"]
+ cmd = [setup_chromeos.__file__, '--dir=' + self._chromeos_root,
+ '--minilayout', '--jobs=8']
ret = setup_chromeos.Main(cmd)
if ret:
- raise Exception("Failed to checkout chromeos")
+ raise Exception('Failed to checkout chromeos')
## Do cros_sdk and setup_board, otherwise build_tc in next step will fail.
- cmd = "cd {0} && cros_sdk --download".format(self._chromeos_root)
+ cmd = 'cd {0} && cros_sdk --download'.format(self._chromeos_root)
ret = self._cmd_exec.RunCommand(cmd, terminated_timeout=9000)
if ret:
- raise Exception("Failed to create chroot.")
+ raise Exception('Failed to create chroot.')
def SetupBoard(self):
- cmd = "./setup_board --board=" + self._board
+ cmd = './setup_board --board=' + self._board
ret = self._cmd_exec.ChrootRunCommand(self._chromeos_root,
- cmd, terminated_timeout=4000)
+ cmd,
+ terminated_timeout=4000)
if ret:
- raise Exception("Failed to setup board.")
+ raise Exception('Failed to setup board.')
def CheckoutGCC(self):
- cmd = "git clone {0} {1} && cd {1} && git checkout {2}".format(
- self._CHROMIUM_GCC_GIT, self._gcc_dir, self._CHROMIUM_GCC_BRANCH)
+ cmd = 'git clone {0} {1} && cd {1} && git checkout {2}'.format(
+ self._CHROMIUM_GCC_GIT, self._gcc_dir, self._CHROMIUM_GCC_BRANCH)
ret = self._cmd_exec.RunCommand(cmd, terminated_timeout=300)
if ret:
- raise Exception("Failed to checkout gcc.")
+ raise Exception('Failed to checkout gcc.')
## Handle build_tc bug.
- cmd = ("touch {0}/gcc/config/arm/arm-tune.md " + \
- "{0}/gcc/config/arm/arm-tables.opt").format(self._gcc_dir)
+ cmd = ('touch {0}/gcc/config/arm/arm-tune.md ' + \
+ '{0}/gcc/config/arm/arm-tables.opt').format(self._gcc_dir)
ret = self._cmd_exec.RunCommand(cmd)
def BuildGCC(self):
- build_gcc_args = [build_tc.__file__,
- "--board=" + self._board,
- "--chromeos_root=" + self._chromeos_root,
- "--gcc_dir=" + self._gcc_dir]
+ build_gcc_args = [build_tc.__file__, '--board=' + self._board,
+ '--chromeos_root=' + self._chromeos_root,
+ '--gcc_dir=' + self._gcc_dir]
ret = build_tc.Main(build_gcc_args)
if ret:
- raise Exception("Building gcc failed.")
+ raise Exception('Building gcc failed.')
def CheckGCC(self):
- args = [run_dejagnu.__file__,
- "--board=" + self._board,
- "--chromeos_root=" + self._chromeos_root,
- "--mount=" + self._gcc_dir,
- "--remote=" + self._remote]
+ args = [run_dejagnu.__file__, '--board=' + self._board,
+ '--chromeos_root=' + self._chromeos_root,
+ '--mount=' + self._gcc_dir, '--remote=' + self._remote]
if self._cleanup:
- args.append("--cleanup=" + self._cleanup)
+ args.append('--cleanup=' + self._cleanup)
if self._runtestflags:
- args.append("--flags=" + self._runtestflags)
+ args.append('--flags=' + self._runtestflags)
return run_dejagnu.Main(args)
@@ -110,6 +108,7 @@ def GetNumNewFailures(str):
return -1
return n_failures
+
# Do not throw any exception in this function!
def EmailResult(result):
email_to = ['c-compiler-chrome@google.com']
@@ -144,31 +143,44 @@ def EmailResult(result):
except Exception as e:
# Do not propagate this email sending exception, you want to email an
# email exception? Just log it on console.
- print ('Sending email failed - {0}'
- 'Subject: {1}'
- 'Text: {2}').format(
- str(e), subject, email_text)
+ print('Sending email failed - {0}'
+ 'Subject: {1}'
+ 'Text: {2}').format(
+ str(e), subject, email_text)
def ProcessArguments(argv):
"""Processing script arguments."""
- parser = optparse.OptionParser(description=(
- 'This script is used by nightly client to test gcc. '
- 'DO NOT run it unless you know what you are doing.'),
+ parser = optparse.OptionParser(
+ description=('This script is used by nightly client to test gcc. '
+ 'DO NOT run it unless you know what you are doing.'),
usage='test_gcc_dejagnu.py options')
- parser.add_option('-b', '--board', dest='board',
+ parser.add_option('-b',
+ '--board',
+ dest='board',
help=('Required. Specify board type. For example '
'\'lumpy\' and \'daisy\''))
- parser.add_option('-r', '--remote', dest='remote',
+ parser.add_option('-r',
+ '--remote',
+ dest='remote',
help=('Required. Specify remote board address'))
- parser.add_option('-g', '--gcc_dir', dest='gcc_dir', default='gcc.live',
+ parser.add_option('-g',
+ '--gcc_dir',
+ dest='gcc_dir',
+ default='gcc.live',
help=('Optional. Specify gcc checkout directory.'))
- parser.add_option('-c', '--chromeos_root', dest='chromeos_root',
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
default='chromeos.live',
help=('Optional. Specify chromeos checkout directory.'))
- parser.add_option('--cleanup', dest='cleanup', default=None,
+ parser.add_option('--cleanup',
+ dest='cleanup',
+ default=None,
help=('Optional. Do cleanup after the test.'))
- parser.add_option('--runtestflags', dest='runtestflags', default=None,
+ parser.add_option('--runtestflags',
+ dest='runtestflags',
+ default=None,
help=('Optional. Options to RUNTESTFLAGS env var '
'while invoking make check. '
'(Mainly used for testing purpose.)'))
@@ -176,16 +188,15 @@ def ProcessArguments(argv):
options, args = parser.parse_args(argv)
if not options.board or not options.remote:
- raise Exception("--board and --remote are mandatory options.")
+ raise Exception('--board and --remote are mandatory options.')
return options
def Main(argv):
opt = ProcessArguments(argv)
- adapter = DejagnuAdapter(
- opt.board, opt.remote, opt.gcc_dir, opt.chromeos_root,
- opt.runtestflags, opt.cleanup)
+ adapter = DejagnuAdapter(opt.board, opt.remote, opt.gcc_dir,
+ opt.chromeos_root, opt.runtestflags, opt.cleanup)
try:
adapter.SetupChromeOS()
adapter.SetupBoard()
@@ -200,6 +211,6 @@ def Main(argv):
return ret
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval[0])
diff --git a/test_gdb_dejagnu.py b/test_gdb_dejagnu.py
index d7a21abc..65f007ef 100755
--- a/test_gdb_dejagnu.py
+++ b/test_gdb_dejagnu.py
@@ -11,8 +11,7 @@ from utils import email_sender
class DejagnuAdapter(object):
- def __init__(self, board, remote, gdb_dir,
- chromeos_root, cleanup):
+ def __init__(self, board, remote, gdb_dir, chromeos_root, cleanup):
self._board = board
self._remote = remote
self._gdb_dir = gdb_dir
@@ -21,8 +20,8 @@ class DejagnuAdapter(object):
self._cmd_exec = command_executer.GetCommandExecuter()
def SetupChromeOS(self):
- cmd = [setup_chromeos.__file__,
- '--dir=' + self._chromeos_root, '--minilayout', '--jobs=8']
+ cmd = [setup_chromeos.__file__, '--dir=' + self._chromeos_root,
+ '--minilayout', '--jobs=8']
ret = setup_chromeos.Main(cmd)
if ret:
raise Exception('Failed to checkout chromeos')
@@ -35,16 +34,15 @@ class DejagnuAdapter(object):
def SetupBoard(self):
cmd = './setup_board --board=' + self._board
ret = self._cmd_exec.ChrootRunCommand(self._chromeos_root,
- cmd, terminated_timeout=4000)
+ cmd,
+ terminated_timeout=4000)
if ret:
raise Exception('Failed to setup board.')
def CheckGDB(self):
- args = [gdb_dejagnu.__file__,
- '--board=' + self._board,
+ args = [gdb_dejagnu.__file__, '--board=' + self._board,
'--chromeos_root=' + self._chromeos_root,
- '--mount=' + self._gdb_dir,
- '--remote=' + self._remote]
+ '--mount=' + self._gdb_dir, '--remote=' + self._remote]
if self._cleanup:
args.append('--cleanup=' + self._cleanup)
return gdb_dejagnu.Main(args)
@@ -63,8 +61,8 @@ def EmailResult(result):
email_to = ['yunlian@google.com']
if len(result) == 4:
subject = 'Job failed: dejagnu test didn\'t finish'
- email_text = ('Job failed prematurely, check exception below.\n' +
- result[3])
+ email_text = (
+ 'Job failed prematurely, check exception below.\n' + result[3])
elif result[0]:
subject = 'Job finished: dejagnu test failed'
num_new_failures = GetNumNewFailures(result[1])
@@ -72,11 +70,10 @@ def EmailResult(result):
summary = '{0} new fail(s), check log below.'.format(num_new_failures)
else:
summary = 'At least 1 new fail found, check log below.'
- email_text = (summary +
- ('\nStdout ====\n'
- '{0}\n'
- '\nStderr ===\n'
- '{1}\n').format(result[1], result[2]))
+ email_text = (summary + ('\nStdout ====\n'
+ '{0}\n'
+ '\nStderr ===\n'
+ '{1}\n').format(result[1], result[2]))
else:
subject = 'Job finished: dejagnu test passed'
email_text = ('Cool! No new fail found.\n'
@@ -91,29 +88,40 @@ def EmailResult(result):
except Exception as e:
# Do not propagate this email sending exception, you want to email an
# email exception? Just log it on console.
- print ('Sending email failed - {0}'
- 'Subject: {1}'
- 'Text: {2}').format(
- str(e), subject, email_text)
+ print('Sending email failed - {0}'
+ 'Subject: {1}'
+ 'Text: {2}').format(
+ str(e), subject, email_text)
def ProcessArguments(argv):
"""Processing script arguments."""
- parser = optparse.OptionParser(description=(
- 'This script is used by nightly client to test gdb. '
- 'DO NOT run it unless you know what you are doing.'),
- usage='test_gdb_dejagnu.py options')
- parser.add_option('-b', '--board', dest='board',
+ parser = optparse.OptionParser(
+ description=('This script is used by nightly client to test gdb. '
+ 'DO NOT run it unless you know what you are doing.'),
+ usage='test_gdb_dejagnu.py options')
+ parser.add_option('-b',
+ '--board',
+ dest='board',
help=('Required. Specify board type. For example '
'\'lumpy\' and \'daisy\''))
- parser.add_option('-r', '--remote', dest='remote',
+ parser.add_option('-r',
+ '--remote',
+ dest='remote',
help=('Required. Specify remote board address'))
- parser.add_option('-g', '--gdb_dir', dest='gdb_dir', default='',
+ parser.add_option('-g',
+ '--gdb_dir',
+ dest='gdb_dir',
+ default='',
help=('Optional. Specify gdb checkout directory.'))
- parser.add_option('-c', '--chromeos_root', dest='chromeos_root',
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
default='chromeos.live',
help=('Optional. Specify chromeos checkout directory.'))
- parser.add_option('--cleanup', dest='cleanup', default=None,
+ parser.add_option('--cleanup',
+ dest='cleanup',
+ default=None,
help=('Optional. Do cleanup after the test.'))
options, _ = parser.parse_args(argv)
@@ -127,9 +135,8 @@ def ProcessArguments(argv):
def Main(argv):
opt = ProcessArguments(argv)
print opt
- adapter = DejagnuAdapter(
- opt.board, opt.remote, opt.gdb_dir, opt.chromeos_root,
- opt.cleanup)
+ adapter = DejagnuAdapter(opt.board, opt.remote, opt.gdb_dir,
+ opt.chromeos_root, opt.cleanup)
try:
adapter.SetupChromeOS()
adapter.SetupBoard()
@@ -141,6 +148,7 @@ def Main(argv):
EmailResult(ret)
return ret
-if __name__ == '__main__':
+
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval[0])
diff --git a/test_toolchains.py b/test_toolchains.py
index 0c6f1d82..fdd13fe5 100755
--- a/test_toolchains.py
+++ b/test_toolchains.py
@@ -13,24 +13,27 @@ from utils import command_executer
from utils import misc
from utils import logger
+CROSTC_ROOT = '/usr/local/google/crostc'
+MAIL_PROGRAM = '~/var/bin/mail-sheriff'
+WEEKLY_REPORTS_ROOT = os.path.join(CROSTC_ROOT, 'weekly_test_data')
+PENDING_ARCHIVES_DIR = os.path.join(CROSTC_ROOT, 'pending_archives')
+NIGHTLY_TESTS_DIR = os.path.join(CROSTC_ROOT, 'nightly_test_reports')
-CROSTC_ROOT = "/usr/local/google/crostc"
-MAIL_PROGRAM = "~/var/bin/mail-sheriff"
-WEEKLY_REPORTS_ROOT = os.path.join(CROSTC_ROOT, "weekly_test_data")
-PENDING_ARCHIVES_DIR = os.path.join(CROSTC_ROOT, "pending_archives")
-NIGHTLY_TESTS_DIR = os.path.join(CROSTC_ROOT, "nightly_test_reports")
class GCCConfig(object):
+
def __init__(self, githash):
self.githash = githash
class ToolchainConfig:
+
def __init__(self, gcc_config=None, binutils_config=None):
self.gcc_config = gcc_config
class ChromeOSCheckout(object):
+
def __init__(self, board, chromeos_root):
self._board = board
self._chromeos_root = chromeos_root
@@ -39,12 +42,12 @@ class ChromeOSCheckout(object):
self._build_num = None
def _DeleteChroot(self):
- command = "cd %s; cros_sdk --delete" % self._chromeos_root
+ command = 'cd %s; cros_sdk --delete' % self._chromeos_root
return self._ce.RunCommand(command)
def _DeleteCcahe(self):
# crosbug.com/34956
- command = "sudo rm -rf %s" % os.path.join(self._chromeos_root, ".cache")
+ command = 'sudo rm -rf %s' % os.path.join(self._chromeos_root, '.cache')
return self._ce.RunCommand(command)
def _GetBuildNumber(self):
@@ -58,48 +61,46 @@ class ChromeOSCheckout(object):
comparison tests.
"""
# Get the path to 'latest'
- sym_path = os.path.join (misc.GetImageDir(self._chromeos_root,
- self._board),
- "latest")
+ sym_path = os.path.join(
+ misc.GetImageDir(self._chromeos_root, self._board), 'latest')
# Translate the symlink to its 'real' path.
real_path = os.path.realpath(sym_path)
# Break up the path and get the last piece
# (e.g. 'R37-5982.0.2014_06_23_0454-a1"
- path_pieces = real_path.split("/")
+ path_pieces = real_path.split('/')
last_piece = path_pieces[-1]
# Break this piece into the image number + other pieces, and get the
# image number [ 'R37-5982', '0', '2014_06_23_0454-a1']
- image_parts = last_piece.split(".")
+ image_parts = last_piece.split('.')
self._build_num = image_parts[0]
def _BuildLabelName(self, config, board):
- pieces = config.split("/")
+ pieces = config.split('/')
compiler_version = pieces[-1]
- label = compiler_version + "_tot_afdo"
+ label = compiler_version + '_tot_afdo'
return label
- def _BuildAndImage(self, label=""):
+ def _BuildAndImage(self, label=''):
if (not label or
not misc.DoesLabelExist(self._chromeos_root, self._board, label)):
build_chromeos_args = [build_chromeos.__file__,
- "--chromeos_root=%s" % self._chromeos_root,
- "--board=%s" % self._board,
- "--rebuild"]
+ '--chromeos_root=%s' % self._chromeos_root,
+ '--board=%s' % self._board, '--rebuild']
if self._public:
- build_chromeos_args.append("--env=USE=-chrome_internal")
+ build_chromeos_args.append('--env=USE=-chrome_internal')
ret = build_chromeos.Main(build_chromeos_args)
if ret != 0:
raise RuntimeError("Couldn't build ChromeOS!")
- if not self._build_num:
+ if not self._build_num:
self._GetBuildNumber()
# Check to see if we need to create the symbolic link for the vanilla
# image, and do so if appropriate.
- if not misc.DoesLabelExist(self._chromeos_root, self._board, "vanilla"):
- build_name = "%s-release/%s.0.0" % (self._board, self._build_num)
- full_vanilla_path = os.path.join (os.getcwd(), self._chromeos_root,
- 'chroot/tmp', build_name)
+ if not misc.DoesLabelExist(self._chromeos_root, self._board, 'vanilla'):
+ build_name = '%s-release/%s.0.0' % (self._board, self._build_num)
+ full_vanilla_path = os.path.join(os.getcwd(), self._chromeos_root,
+ 'chroot/tmp', build_name)
misc.LabelLatestImage(self._chromeos_root, self._board, label,
full_vanilla_path)
else:
@@ -108,22 +109,18 @@ class ChromeOSCheckout(object):
def _SetupBoard(self, env_dict, usepkg_flag, clobber_flag):
env_string = misc.GetEnvStringFromDict(env_dict)
- command = ("%s %s" %
- (env_string,
- misc.GetSetupBoardCommand(self._board,
- usepkg=usepkg_flag,
- force=clobber_flag)))
- ret = self._ce.ChrootRunCommand(self._chromeos_root,
- command)
+ command = ('%s %s' % (env_string,
+ misc.GetSetupBoardCommand(self._board,
+ usepkg=usepkg_flag,
+ force=clobber_flag)))
+ ret = self._ce.ChrootRunCommand(self._chromeos_root, command)
error_str = "Could not setup board: '%s'" % command
assert ret == 0, error_str
def _UnInstallToolchain(self):
- command = ("sudo CLEAN_DELAY=0 emerge -C cross-%s/gcc" %
- misc.GetCtargetFromBoard(self._board,
- self._chromeos_root))
- ret = self._ce.ChrootRunCommand(self._chromeos_root,
- command)
+ command = ('sudo CLEAN_DELAY=0 emerge -C cross-%s/gcc' %
+ misc.GetCtargetFromBoard(self._board, self._chromeos_root))
+ ret = self._ce.ChrootRunCommand(self._chromeos_root, command)
if ret != 0:
raise RuntimeError("Couldn't uninstall the toolchain!")
@@ -131,32 +128,38 @@ class ChromeOSCheckout(object):
# TODO(asharif): Setup a fixed ChromeOS version (quarterly snapshot).
if not os.path.exists(self._chromeos_root):
setup_chromeos_args = [setup_chromeos.__file__,
- "--dir=%s" % self._chromeos_root]
+ '--dir=%s' % self._chromeos_root]
if self._public:
- setup_chromeos_args.append("--public")
+ setup_chromeos_args.append('--public')
ret = setup_chromeos.Main(setup_chromeos_args)
if ret != 0:
raise RuntimeError("Couldn't run setup_chromeos!")
-
def _BuildToolchain(self, config):
# Call setup_board for basic, vanilla setup.
self._SetupBoard({}, usepkg_flag=True, clobber_flag=False)
# Now uninstall the vanilla compiler and setup/build our custom
# compiler.
self._UnInstallToolchain()
- envdict = {"USE": "git_gcc",
- "GCC_GITHASH": config.gcc_config.githash,
- "EMERGE_DEFAULT_OPTS": "--exclude=gcc"}
+ envdict = {'USE': 'git_gcc',
+ 'GCC_GITHASH': config.gcc_config.githash,
+ 'EMERGE_DEFAULT_OPTS': '--exclude=gcc'}
self._SetupBoard(envdict, usepkg_flag=False, clobber_flag=False)
class ToolchainComparator(ChromeOSCheckout):
- def __init__(self, board, remotes, configs, clean,
- public, force_mismatch, noschedv2=False):
+
+ def __init__(self,
+ board,
+ remotes,
+ configs,
+ clean,
+ public,
+ force_mismatch,
+ noschedv2=False):
self._board = board
self._remotes = remotes
- self._chromeos_root = "chromeos"
+ self._chromeos_root = 'chromeos'
self._configs = configs
self._clean = clean
self._public = public
@@ -164,40 +167,38 @@ class ToolchainComparator(ChromeOSCheckout):
self._ce = command_executer.GetCommandExecuter()
self._l = logger.GetLogger()
timestamp = datetime.datetime.strftime(datetime.datetime.now(),
- "%Y-%m-%d_%H:%M:%S")
+ '%Y-%m-%d_%H:%M:%S')
self._reports_dir = os.path.join(NIGHTLY_TESTS_DIR,
- "%s.%s" % (timestamp, board),
- )
+ '%s.%s' % (timestamp, board),)
self._noschedv2 = noschedv2
ChromeOSCheckout.__init__(self, board, self._chromeos_root)
-
def _FinishSetup(self):
# Get correct .boto file
current_dir = os.getcwd()
- src = "/usr/local/google/home/mobiletc-prebuild/.boto"
+ src = '/usr/local/google/home/mobiletc-prebuild/.boto'
dest = os.path.join(current_dir, self._chromeos_root,
- "src/private-overlays/chromeos-overlay/"
- "googlestorage_account.boto")
+ 'src/private-overlays/chromeos-overlay/'
+ 'googlestorage_account.boto')
# Copy the file to the correct place
- copy_cmd = "cp %s %s" % (src, dest)
+ copy_cmd = 'cp %s %s' % (src, dest)
retval = self._ce.RunCommand(copy_cmd)
if retval != 0:
raise RuntimeError("Couldn't copy .boto file for google storage.")
# Fix protections on ssh key
- command = ("chmod 600 /var/cache/chromeos-cache/distfiles/target"
- "/chrome-src-internal/src/third_party/chromite/ssh_keys"
- "/testing_rsa")
+ command = ('chmod 600 /var/cache/chromeos-cache/distfiles/target'
+ '/chrome-src-internal/src/third_party/chromite/ssh_keys'
+ '/testing_rsa')
retval = self._ce.ChrootRunCommand(self._chromeos_root, command)
if retval != 0:
- raise RuntimeError("chmod for testing_rsa failed")
+ raise RuntimeError('chmod for testing_rsa failed')
def _TestLabels(self, labels):
- experiment_file = "toolchain_experiment.txt"
- image_args = ""
+ experiment_file = 'toolchain_experiment.txt'
+ image_args = ''
if self._force_mismatch:
- image_args = "--force-mismatch"
+ image_args = '--force-mismatch'
experiment_header = """
board: %s
remote: %s
@@ -209,18 +210,19 @@ class ToolchainComparator(ChromeOSCheckout):
iterations: 3
}
"""
- with open(experiment_file, "w") as f:
- print >>f, experiment_header
- print >>f, experiment_tests
+
+ with open(experiment_file, 'w') as f:
+ print >> f, experiment_header
+ print >> f, experiment_tests
for label in labels:
# TODO(asharif): Fix crosperf so it accepts labels with symbols
crosperf_label = label
- crosperf_label = crosperf_label.replace("-", "_")
- crosperf_label = crosperf_label.replace("+", "_")
- crosperf_label = crosperf_label.replace(".", "")
+ crosperf_label = crosperf_label.replace('-', '_')
+ crosperf_label = crosperf_label.replace('+', '_')
+ crosperf_label = crosperf_label.replace('.', '')
# Use the official build instead of building vanilla ourselves.
- if label == "vanilla":
+ if label == 'vanilla':
build_name = '%s-release/%s.0.0' % (self._board, self._build_num)
# Now add 'official build' to test file.
@@ -230,7 +232,7 @@ class ToolchainComparator(ChromeOSCheckout):
build: %s
}
""" % (self._chromeos_root, build_name)
- print >>f, official_image
+ print >> f, official_image
else:
experiment_image = """
@@ -238,87 +240,78 @@ class ToolchainComparator(ChromeOSCheckout):
chromeos_image: %s
image_args: %s
}
- """ % (crosperf_label,
- os.path.join(misc.GetImageDir(self._chromeos_root,
- self._board),
- label, "chromiumos_test_image.bin"),
- image_args)
- print >>f, experiment_image
-
- crosperf = os.path.join(os.path.dirname(__file__),
- "crosperf",
- "crosperf")
+ """ % (crosperf_label, os.path.join(
+ misc.GetImageDir(self._chromeos_root, self._board), label,
+ 'chromiumos_test_image.bin'), image_args)
+ print >> f, experiment_image
+
+ crosperf = os.path.join(os.path.dirname(__file__), 'crosperf', 'crosperf')
noschedv2_opts = '--noschedv2' if self._noschedv2 else ''
- command = ("{crosperf} --no_email=True --results_dir={r_dir} "
- "--json_report=True {noschedv2_opts} {exp_file}").format(
- crosperf=crosperf,
- r_dir=self._reports_dir,
- noschedv2_opts=noschedv2_opts,
- exp_file=experiment_file)
+ command = ('{crosperf} --no_email=True --results_dir={r_dir} '
+ '--json_report=True {noschedv2_opts} {exp_file}').format(
+ crosperf=crosperf,
+ r_dir=self._reports_dir,
+ noschedv2_opts=noschedv2_opts,
+ exp_file=experiment_file)
ret = self._ce.RunCommand(command)
if ret != 0:
raise RuntimeError("Couldn't run crosperf!")
else:
# Copy json report to pending archives directory.
- command = "cp %s/*.json %s/." % (self._reports_dir, PENDING_ARCHIVES_DIR)
+ command = 'cp %s/*.json %s/.' % (self._reports_dir, PENDING_ARCHIVES_DIR)
ret = self._ce.RunCommand(command)
return
-
def _CopyWeeklyReportFiles(self, labels):
"""Create tar files of the custom and official images and copy them
to the weekly reports directory, so they exist when the weekly report
gets generated. IMPORTANT NOTE: This function must run *after*
crosperf has been run; otherwise the vanilla images will not be there.
"""
- images_path = os.path.join(os.path.realpath(self._chromeos_root),
- "src/build/images", self._board)
- weekday = time.strftime("%a")
+ images_path = os.path.join(
+ os.path.realpath(self._chromeos_root), 'src/build/images', self._board)
+ weekday = time.strftime('%a')
data_dir = os.path.join(WEEKLY_REPORTS_ROOT, self._board)
- dest_dir = os.path.join (data_dir, weekday)
+ dest_dir = os.path.join(data_dir, weekday)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
# Make sure dest_dir is empty (clean out last week's data).
- cmd = "cd %s; rm -Rf %s_*_image*" % (dest_dir, weekday)
+ cmd = 'cd %s; rm -Rf %s_*_image*' % (dest_dir, weekday)
self._ce.RunCommand(cmd)
# Now create new tar files and copy them over.
for l in labels:
test_path = os.path.join(images_path, l)
if os.path.exists(test_path):
- if l != "vanilla":
- label_name = "test"
+ if l != 'vanilla':
+ label_name = 'test'
else:
- label_name = "vanilla"
- tar_file_name = "%s_%s_image.tar" % (weekday, label_name)
- cmd = ("cd %s; tar -cvf %s %s/chromiumos_test_image.bin; "
- "cp %s %s/.") % (images_path,
- tar_file_name,
- l, tar_file_name,
+ label_name = 'vanilla'
+ tar_file_name = '%s_%s_image.tar' % (weekday, label_name)
+ cmd = ('cd %s; tar -cvf %s %s/chromiumos_test_image.bin; '
+ 'cp %s %s/.') % (images_path, tar_file_name, l, tar_file_name,
dest_dir)
tar_ret = self._ce.RunCommand(cmd)
if tar_ret != 0:
- self._l.LogOutput("Error while creating/copying test tar file(%s)."
- % tar_file_name)
+ self._l.LogOutput('Error while creating/copying test tar file(%s).' %
+ tar_file_name)
def _SendEmail(self):
"""Find email msesage generated by crosperf and send it."""
- filename = os.path.join(self._reports_dir, "msg_body.html")
+ filename = os.path.join(self._reports_dir, 'msg_body.html')
if (os.path.exists(filename) and
os.path.exists(os.path.expanduser(MAIL_PROGRAM))):
- command = ('cat %s | %s -s "Nightly test results, %s" -team -html'
- % (filename, MAIL_PROGRAM, self._board))
+ command = ('cat %s | %s -s "Nightly test results, %s" -team -html' %
+ (filename, MAIL_PROGRAM, self._board))
self._ce.RunCommand(command)
def DoAll(self):
self._CheckoutChromeOS()
labels = []
- labels.append("vanilla")
+ labels.append('vanilla')
for config in self._configs:
label = self._BuildLabelName(config.gcc_config.githash, self._board)
- if (not misc.DoesLabelExist(self._chromeos_root,
- self._board,
- label)):
+ if (not misc.DoesLabelExist(self._chromeos_root, self._board, label)):
self._BuildToolchain(config)
label = self._BuildAndImage(label)
labels.append(label)
@@ -340,58 +333,57 @@ class ToolchainComparator(ChromeOSCheckout):
def Main(argv):
"""The main function."""
# Common initializations
-### command_executer.InitCommandExecuter(True)
+ ### command_executer.InitCommandExecuter(True)
command_executer.InitCommandExecuter()
parser = optparse.OptionParser()
- parser.add_option("--remote",
- dest="remote",
- help="Remote machines to run tests on.")
- parser.add_option("--board",
- dest="board",
- default="x86-zgb",
- help="The target board.")
- parser.add_option("--githashes",
- dest="githashes",
- default="master",
- help="The gcc githashes to test.")
- parser.add_option("--clean",
- dest="clean",
+ parser.add_option('--remote',
+ dest='remote',
+ help='Remote machines to run tests on.')
+ parser.add_option('--board',
+ dest='board',
+ default='x86-zgb',
+ help='The target board.')
+ parser.add_option('--githashes',
+ dest='githashes',
+ default='master',
+ help='The gcc githashes to test.')
+ parser.add_option('--clean',
+ dest='clean',
default=False,
- action="store_true",
- help="Clean the chroot after testing.")
- parser.add_option("--public",
- dest="public",
+ action='store_true',
+ help='Clean the chroot after testing.')
+ parser.add_option('--public',
+ dest='public',
default=False,
- action="store_true",
- help="Use the public checkout/build.")
- parser.add_option("--force-mismatch",
- dest="force_mismatch",
- default="",
- help="Force the image regardless of board mismatch")
- parser.add_option("--noschedv2",
- dest="noschedv2",
- action="store_true",
+ action='store_true',
+ help='Use the public checkout/build.')
+ parser.add_option('--force-mismatch',
+ dest='force_mismatch',
+ default='',
+ help='Force the image regardless of board mismatch')
+ parser.add_option('--noschedv2',
+ dest='noschedv2',
+ action='store_true',
default=False,
- help="Pass --noschedv2 to crosperf.")
+ help='Pass --noschedv2 to crosperf.')
options, _ = parser.parse_args(argv)
if not options.board:
- print "Please give a board."
+ print 'Please give a board.'
return 1
if not options.remote:
- print "Please give at least one remote machine."
+ print 'Please give at least one remote machine.'
return 1
toolchain_configs = []
- for githash in options.githashes.split(","):
+ for githash in options.githashes.split(','):
gcc_config = GCCConfig(githash=githash)
toolchain_config = ToolchainConfig(gcc_config=gcc_config)
toolchain_configs.append(toolchain_config)
fc = ToolchainComparator(options.board, options.remote, toolchain_configs,
options.clean, options.public,
- options.force_mismatch,
- options.noschedv2)
+ options.force_mismatch, options.noschedv2)
return fc.DoAll()
-if __name__ == "__main__":
+if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
diff --git a/update_telemetry_defaults.py b/update_telemetry_defaults.py
index 33b96f6c..6d83e143 100755
--- a/update_telemetry_defaults.py
+++ b/update_telemetry_defaults.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
-
"""Script to maintain the Telemetry benchmark default results file.
This script allows the user to see and update the set of default
@@ -10,7 +9,7 @@ benchmarks.
"""
-__author__ = "cmtice@google.com (Caroline Tice)"
+__author__ = 'cmtice@google.com (Caroline Tice)'
import os
import sys
@@ -33,87 +32,85 @@ class TelemetryDefaults(object):
self._filename = fullname
self._defaults = {}
- def ReadDefaultsFile (self):
+ def ReadDefaultsFile(self):
if os.path.exists(self._filename):
- with open(self._filename, "r") as fp:
+ with open(self._filename, 'r') as fp:
self._defaults = json.load(fp)
- def WriteDefaultsFile (self):
- with open(self._filename, "w") as fp:
+ def WriteDefaultsFile(self):
+ with open(self._filename, 'w') as fp:
json.dump(self._defaults, fp, indent=2)
- def ListCurrentDefaults (self, benchmark=all):
+ def ListCurrentDefaults(self, benchmark=all):
# Show user current defaults. By default, show all. The user
# can specify the name of a particular benchmark to see only that
# benchmark's default values.
if len(self._defaults) == 0:
- print ("The benchmark default results are currently empty.")
+ print('The benchmark default results are currently empty.')
if benchmark == all:
for b in self._defaults.keys():
results = self._defaults[b]
out_str = b + ' : '
for r in results:
- out_str += r + ' '
- print (out_str)
+ out_str += r + ' '
+ print(out_str)
elif benchmark in self._defaults:
results = self._defaults[benchmark]
out_str = benchmark + ' : '
for r in results:
- out_str += r + ' '
- print (out_str)
+ out_str += r + ' '
+ print(out_str)
else:
- print ("Error: Unrecognized benchmark '%s'" % benchmark)
-
+ print("Error: Unrecognized benchmark '%s'" % benchmark)
- def AddDefault (self, benchmark, result):
+ def AddDefault(self, benchmark, result):
if benchmark in self._defaults:
resultList = self._defaults[benchmark]
else:
resultList = []
resultList.append(result)
self._defaults[benchmark] = resultList
- print ("Updated results set for '%s': " % benchmark)
- print ("%s : %s" % (benchmark, repr(self._defaults[benchmark])))
+ print("Updated results set for '%s': " % benchmark)
+ print('%s : %s' % (benchmark, repr(self._defaults[benchmark])))
-
- def RemoveDefault (self, benchmark, result):
+ def RemoveDefault(self, benchmark, result):
if benchmark in self._defaults:
resultList = self._defaults[benchmark]
if result in resultList:
resultList.remove(result)
- print ("Updated results set for '%s': " % benchmark)
- print ("%s : %s" % (benchmark, repr(self._defaults[benchmark])))
+ print("Updated results set for '%s': " % benchmark)
+ print('%s : %s' % (benchmark, repr(self._defaults[benchmark])))
else:
- print ("'%s' is not in '%s's default results list." %
- (result, benchmark))
+ print("'%s' is not in '%s's default results list." %
+ (result, benchmark))
else:
- print ("Cannot find benchmark named '%s'" % benchmark)
+ print("Cannot find benchmark named '%s'" % benchmark)
- def GetDefault (self):
+ def GetDefault(self):
return self._defaults
- def RemoveBenchmark (self, benchmark):
+ def RemoveBenchmark(self, benchmark):
if benchmark in self._defaults:
del self._defaults[benchmark]
- print ("Deleted benchmark '%s' from list of benchmarks." % benchmark)
+ print("Deleted benchmark '%s' from list of benchmarks." % benchmark)
else:
- print ("Cannot find benchmark named '%s'" % benchmark)
+ print("Cannot find benchmark named '%s'" % benchmark)
- def RenameBenchmark (self, old_name, new_name):
+ def RenameBenchmark(self, old_name, new_name):
if old_name in self._defaults:
resultsList = self._defaults[old_name]
del self._defaults[old_name]
self._defaults[new_name] = resultsList
- print ("Renamed '%s' to '%s'." % (old_name, new_name))
+ print("Renamed '%s' to '%s'." % (old_name, new_name))
else:
- print ("Cannot find benchmark named '%s'" % old_name)
+ print("Cannot find benchmark named '%s'" % old_name)
def UsageError(self, user_input):
# Print error message, then show options
- print ("Error:Invalid user input: '%s'" % user_input)
+ print("Error:Invalid user input: '%s'" % user_input)
self.ShowOptions()
- def ShowOptions (self):
+ def ShowOptions(self):
print """
Below are the valid user options and their arguments, and an explanation
of what each option does. You may either print out the full name of the
@@ -136,14 +133,14 @@ lower) does not matter, for the command (case of the result name DOES matter):
"""
- def GetUserInput (self):
+ def GetUserInput(self):
# Prompt user
- print ("Enter option> ")
+ print('Enter option> ')
# Process user input
inp = sys.stdin.readline()
inp = inp[:-1]
# inp = inp.lower()
- words = inp.split(" ")
+ words = inp.split(' ')
option = words[0]
option = option.lower()
if option == 'h' or option == 'help':
@@ -155,7 +152,7 @@ lower) does not matter, for the command (case of the result name DOES matter):
self.ListCurrentDefaults(benchmark=words[1])
elif option == 'a' or option == 'add':
if len(words) < 3:
- self.UsageError (inp)
+ self.UsageError(inp)
else:
benchmark = words[1]
resultList = words[2:]
@@ -163,20 +160,20 @@ lower) does not matter, for the command (case of the result name DOES matter):
self.AddDefault(benchmark, r)
elif option == 'd' or option == 'delete':
if len(words) != 3:
- self.UsageError (inp)
+ self.UsageError(inp)
else:
benchmark = words[1]
result = words[2]
self.RemoveDefault(benchmark, result)
elif option == 'r' or option == 'remove':
if len(words) != 2:
- self.UsageError (inp)
+ self.UsageError(inp)
else:
benchmark = words[1]
self.RemoveBenchmark(benchmark)
elif option == 'm' or option == 'move':
if len(words) != 3:
- self.UsageError (inp)
+ self.UsageError(inp)
else:
old_name = words[1]
new_name = words[2]
@@ -197,6 +194,7 @@ def Main():
done = defaults.GetUserInput()
return 0
-if __name__ == "__main__":
+
+if __name__ == '__main__':
retval = Main()
sys.exit(retval)
diff --git a/utils/__init__.py b/utils/__init__.py
index e69de29b..8b137891 100644
--- a/utils/__init__.py
+++ b/utils/__init__.py
@@ -0,0 +1 @@
+
diff --git a/utils/buildbot_json.py b/utils/buildbot_json.py
index 6ea7f8fb..d4e91863 100755
--- a/utils/buildbot_json.py
+++ b/utils/buildbot_json.py
@@ -28,7 +28,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# NOTE: This file is NOT under GPL. See above.
-
"""Queries buildbot through the json interface.
"""
@@ -53,12 +52,10 @@ except ImportError:
# after "vm7". Defaults to normal sorting.
natsorted = sorted
-
# These values are buildbot constants used for Build and BuildStep.
# This line was copied from master/buildbot/status/builder.py.
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY = range(6)
-
## Generic node caching code.
@@ -184,6 +181,7 @@ class NonAddressableDataNode(Node): # pylint: disable=W0223
The data comes directly from the parent.
"""
+
def __init__(self, parent, subkey):
super(NonAddressableDataNode, self).__init__(parent, None)
self.subkey = subkey
@@ -275,6 +273,7 @@ class VirtualNodeList(Node):
class NodeList(VirtualNodeList): # pylint: disable=W0223
"""Adds a cache of the keys."""
+
def __init__(self, parent, url):
super(NodeList, self).__init__(parent, url)
self._keys = []
@@ -301,10 +300,9 @@ class NonAddressableNodeList(VirtualNodeList): # pylint: disable=W0223
def __init__(self, parent, subkey):
super(NonAddressableNodeList, self).__init__(parent, None)
self.subkey = subkey
- assert (
- not isinstance(self._child_cls, NonAddressableDataNode) and
- issubclass(self._child_cls, NonAddressableDataNode)), (
- self._child_cls.__name__)
+ assert (not isinstance(self._child_cls, NonAddressableDataNode) and
+ issubclass(self._child_cls, NonAddressableDataNode)), (
+ self._child_cls.__name__)
@property
def cached_children(self):
@@ -367,10 +365,9 @@ class AddressableNodeList(NodeList):
def __init__(self, parent, url):
super(AddressableNodeList, self).__init__(parent, url)
self._cache = {}
- assert (
- not isinstance(self._child_cls, AddressableDataNode) and
- issubclass(self._child_cls, AddressableDataNode)), (
- self._child_cls.__name__)
+ assert (not isinstance(self._child_cls, AddressableDataNode) and
+ issubclass(self._child_cls, AddressableDataNode)), (
+ self._child_cls.__name__)
@property
def cached_children(self):
@@ -411,13 +408,14 @@ class AddressableNodeList(NodeList):
# pylint: disable=W0212
if not self._is_cached:
to_fetch = [
- child for child in children
+ child
+ for child in children
if not (child in self._cache and self._cache[child].cached_data)
]
if to_fetch:
# Similar to cache(). The only reason to sort is to simplify testing.
- params = '&'.join(
- 'select=%s' % urllib.quote(str(v)) for v in sorted(to_fetch))
+ params = '&'.join('select=%s' % urllib.quote(str(v))
+ for v in sorted(to_fetch))
data = self.read('?' + params)
for key in sorted(data):
self._create_obj(key, data[key])
@@ -517,14 +515,16 @@ class SubViewNodeList(VirtualNodeList): # pylint: disable=W0223
self.cache()
return super(SubViewNodeList, self).__iter__()
-
###############################################################################
## Buildbot-specific code
class Slave(AddressableDataNode):
printable_attributes = AddressableDataNode.printable_attributes + [
- 'name', 'key', 'connected', 'version',
+ 'name',
+ 'key',
+ 'connected',
+ 'version',
]
def __init__(self, parent, name, data):
@@ -561,8 +561,8 @@ class BuilderSlaves(SubViewNodeList):
printable_attributes = SubViewNodeList.printable_attributes + ['names']
def __init__(self, parent):
- super(BuilderSlaves, self).__init__(
- parent, parent.parent.parent.slaves, 'slaves')
+ super(BuilderSlaves, self).__init__(parent, parent.parent.parent.slaves,
+ 'slaves')
@property
def names(self):
@@ -571,9 +571,16 @@ class BuilderSlaves(SubViewNodeList):
class BuildStep(NonAddressableDataNode):
printable_attributes = NonAddressableDataNode.printable_attributes + [
- 'name', 'number', 'start_time', 'end_time', 'duration', 'is_started',
- 'is_finished', 'is_running',
- 'result', 'simplified_result',
+ 'name',
+ 'number',
+ 'start_time',
+ 'end_time',
+ 'duration',
+ 'is_started',
+ 'is_finished',
+ 'is_running',
+ 'result',
+ 'simplified_result',
]
def __init__(self, parent, number):
@@ -643,7 +650,7 @@ class BuildStep(NonAddressableDataNode):
class BuildSteps(NonAddressableNodeList):
"""Duplicates keys to support lookup by both step number and step name."""
printable_attributes = NonAddressableNodeList.printable_attributes + [
- 'failed',
+ 'failed',
]
_child_cls = BuildStep
@@ -678,9 +685,20 @@ class BuildSteps(NonAddressableNodeList):
class Build(AddressableDataNode):
printable_attributes = AddressableDataNode.printable_attributes + [
- 'key', 'number', 'steps', 'blame', 'reason', 'revision', 'result',
- 'simplified_result', 'start_time', 'end_time', 'duration', 'slave',
- 'properties', 'completed',
+ 'key',
+ 'number',
+ 'steps',
+ 'blame',
+ 'reason',
+ 'revision',
+ 'result',
+ 'simplified_result',
+ 'start_time',
+ 'end_time',
+ 'duration',
+ 'slave',
+ 'properties',
+ 'completed',
]
def __init__(self, parent, key, data):
@@ -776,12 +794,13 @@ class Build(AddressableDataNode):
class CurrentBuilds(SubViewNodeList):
"""Lists of the current builds."""
+
def __init__(self, parent):
- super(CurrentBuilds, self).__init__(
- parent, parent.builds, 'currentBuilds')
+ super(CurrentBuilds, self).__init__(parent, parent.builds, 'currentBuilds')
class PendingBuilds(AddressableDataNode):
+
def __init__(self, parent):
super(PendingBuilds, self).__init__(parent, 'pendingBuilds', None)
@@ -863,7 +882,12 @@ class Builds(AddressableNodeList):
class Builder(AddressableDataNode):
printable_attributes = AddressableDataNode.printable_attributes + [
- 'name', 'key', 'builds', 'slaves', 'pending_builds', 'current_builds',
+ 'name',
+ 'key',
+ 'builds',
+ 'slaves',
+ 'pending_builds',
+ 'current_builds',
]
def __init__(self, parent, name, data):
@@ -897,7 +921,9 @@ class Buildbot(AddressableBaseDataNode):
# Throttle fetches to not kill the server.
auto_throttle = None
printable_attributes = AddressableDataNode.printable_attributes + [
- 'slaves', 'builders', 'last_fetch',
+ 'slaves',
+ 'builders',
+ 'last_fetch',
]
def __init__(self, url):
@@ -924,10 +950,9 @@ class Buildbot(AddressableBaseDataNode):
if self.auto_throttle:
if self.last_fetch:
delta = datetime.datetime.utcnow() - self.last_fetch
- remaining = (datetime.timedelta(seconds=self.auto_throttle) -
- delta)
+ remaining = (datetime.timedelta(seconds=self.auto_throttle) - delta)
if remaining > datetime.timedelta(seconds=0):
- logging.debug('Sleeping for %ss' % remaining)
+ logging.debug('Sleeping for %ss', remaining)
time.sleep(remaining.seconds)
self.last_fetch = datetime.datetime.utcnow()
url = '%s/%s' % (self.url, suburl)
@@ -935,7 +960,7 @@ class Buildbot(AddressableBaseDataNode):
url += '&filter=1'
else:
url += '?filter=1'
- logging.info('read(%s)' % suburl)
+ logging.info('read(%s)', suburl)
channel = urllib.urlopen(url)
data = channel.read()
try:
@@ -943,31 +968,33 @@ class Buildbot(AddressableBaseDataNode):
except ValueError:
if channel.getcode() >= 400:
# Convert it into an HTTPError for easier processing.
- raise urllib2.HTTPError(
- url, channel.getcode(), '%s:\n%s' % (url, data), channel.headers,
- None)
+ raise urllib2.HTTPError(url, channel.getcode(), '%s:\n%s' % (url, data),
+ channel.headers, None)
raise
def _readall(self):
return self.read('project')
-
###############################################################################
## Controller code
def usage(more):
+
def hook(fn):
fn.func_usage_more = more
return fn
+
return hook
def need_buildbot(fn):
"""Post-parse args to create a buildbot object."""
+
@functools.wraps(fn)
def hook(parser, args, *extra_args, **kwargs):
old_parse_args = parser.parse_args
+
def new_parse_args(args):
options, args = old_parse_args(args)
if len(args) < 1:
@@ -978,6 +1005,7 @@ def need_buildbot(fn):
buildbot = Buildbot(url)
buildbot.auto_throttle = options.throttle
return options, args, buildbot
+
parser.parse_args = new_parse_args
# Call the original function with the modified parser.
return fn(parser, args, *extra_args, **kwargs)
@@ -989,9 +1017,12 @@ def need_buildbot(fn):
@need_buildbot
def CMDpending(parser, args):
"""Lists pending jobs."""
- parser.add_option(
- '-b', '--builder', dest='builders', action='append', default=[],
- help='Builders to filter on')
+ parser.add_option('-b',
+ '--builder',
+ dest='builders',
+ action='append',
+ default=[],
+ help='Builders to filter on')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
@@ -1023,8 +1054,10 @@ def CMDrun(parser, args):
was on its own line.
"""
parser.add_option('-f', '--file', help='Read script from file')
- parser.add_option(
- '-i', dest='use_stdin', action='store_true', help='Read script on stdin')
+ parser.add_option('-i',
+ dest='use_stdin',
+ action='store_true',
+ help='Read script on stdin')
# Variable 'buildbot' is not used directly.
# pylint: disable=W0612
options, args, buildbot = parser.parse_args(args)
@@ -1051,10 +1084,7 @@ def CMDinteractive(parser, args):
'Buildbot interactive console for "%s".\n'
'Hint: Start with typing: \'buildbot.printable_attributes\' or '
'\'print str(buildbot)\' to explore.') % buildbot.url[:-len('/json')]
- local_vars = {
- 'buildbot': buildbot,
- 'b': buildbot,
- }
+ local_vars = {'buildbot': buildbot, 'b': buildbot}
code.interact(prompt, None, local_vars)
@@ -1083,12 +1113,18 @@ def CMDdisconnected(parser, args):
def find_idle_busy_slaves(parser, args, show_idle):
- parser.add_option(
- '-b', '--builder', dest='builders', action='append', default=[],
- help='Builders to filter on')
- parser.add_option(
- '-s', '--slave', dest='slaves', action='append', default=[],
- help='Slaves to filter on')
+ parser.add_option('-b',
+ '--builder',
+ dest='builders',
+ action='append',
+ default=[],
+ help='Builders to filter on')
+ parser.add_option('-s',
+ '--slave',
+ dest='slaves',
+ action='append',
+ default=[],
+ help='Slaves to filter on')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
@@ -1117,8 +1153,11 @@ def find_idle_busy_slaves(parser, args, show_idle):
return 0
-def last_failure(
- buildbot, builders=None, slaves=None, steps=None, no_cache=False):
+def last_failure(buildbot,
+ builders=None,
+ slaves=None,
+ steps=None,
+ no_cache=False):
"""Generator returning Build object that were the last failure with the
specific filters.
"""
@@ -1168,26 +1207,38 @@ def CMDlast_failure(parser, args):
Example: to find all slaves where their last build was a compile failure,
run with --step compile"""
parser.add_option(
- '-S', '--step', dest='steps', action='append', default=[],
- help='List all slaves that failed on that step on their last build')
- parser.add_option(
- '-b', '--builder', dest='builders', action='append', default=[],
- help='Builders to filter on')
- parser.add_option(
- '-s', '--slave', dest='slaves', action='append', default=[],
- help='Slaves to filter on')
- parser.add_option(
- '-n', '--no_cache', action='store_true',
- help='Don\'t load all builds at once')
+ '-S',
+ '--step',
+ dest='steps',
+ action='append',
+ default=[],
+ help='List all slaves that failed on that step on their last build')
+ parser.add_option('-b',
+ '--builder',
+ dest='builders',
+ action='append',
+ default=[],
+ help='Builders to filter on')
+ parser.add_option('-s',
+ '--slave',
+ dest='slaves',
+ action='append',
+ default=[],
+ help='Slaves to filter on')
+ parser.add_option('-n',
+ '--no_cache',
+ action='store_true',
+ help='Don\'t load all builds at once')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
print_builders = not options.quiet and len(options.builders) != 1
last_builder = None
- for build in last_failure(
- buildbot, builders=options.builders,
- slaves=options.slaves, steps=options.steps,
- no_cache=options.no_cache):
+ for build in last_failure(buildbot,
+ builders=options.builders,
+ slaves=options.slaves,
+ steps=options.steps,
+ no_cache=options.no_cache):
if print_builders and last_builder != build.builder:
print build.builder.name
@@ -1199,8 +1250,8 @@ def CMDlast_failure(parser, args):
else:
print build.slave.name
else:
- out = '%d on %s: blame:%s' % (
- build.number, build.slave.name, ', '.join(build.blame))
+ out = '%d on %s: blame:%s' % (build.number, build.slave.name,
+ ', '.join(build.blame))
if print_builders:
out = ' ' + out
print out
@@ -1220,11 +1271,15 @@ def CMDlast_failure(parser, args):
@need_buildbot
def CMDcurrent(parser, args):
"""Lists current jobs."""
- parser.add_option(
- '-b', '--builder', dest='builders', action='append', default=[],
- help='Builders to filter on')
- parser.add_option(
- '--blame', action='store_true', help='Only print the blame list')
+ parser.add_option('-b',
+ '--builder',
+ dest='builders',
+ action='append',
+ default=[],
+ help='Builders to filter on')
+ parser.add_option('--blame',
+ action='store_true',
+ help='Only print the blame list')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
@@ -1268,17 +1323,26 @@ def CMDbuilds(parser, args):
Example: to find all builds on a single slave, run with -b bar -s foo
"""
- parser.add_option(
- '-r', '--result', type='int', help='Build result to filter on')
- parser.add_option(
- '-b', '--builder', dest='builders', action='append', default=[],
- help='Builders to filter on')
- parser.add_option(
- '-s', '--slave', dest='slaves', action='append', default=[],
- help='Slaves to filter on')
- parser.add_option(
- '-n', '--no_cache', action='store_true',
- help='Don\'t load all builds at once')
+ parser.add_option('-r',
+ '--result',
+ type='int',
+ help='Build result to filter on')
+ parser.add_option('-b',
+ '--builder',
+ dest='builders',
+ action='append',
+ default=[],
+ help='Builders to filter on')
+ parser.add_option('-s',
+ '--slave',
+ dest='slaves',
+ action='append',
+ default=[],
+ help='Slaves to filter on')
+ parser.add_option('-n',
+ '--no_cache',
+ action='store_true',
+ help='Don\'t load all builds at once')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
@@ -1305,11 +1369,16 @@ def CMDbuilds(parser, args):
def CMDcount(parser, args):
"""Count the number of builds that occured during a specific period.
"""
- parser.add_option(
- '-o', '--over', type='int', help='Number of seconds to look for')
- parser.add_option(
- '-b', '--builder', dest='builders', action='append', default=[],
- help='Builders to filter on')
+ parser.add_option('-o',
+ '--over',
+ type='int',
+ help='Number of seconds to look for')
+ parser.add_option('-b',
+ '--builder',
+ dest='builders',
+ action='append',
+ default=[],
+ help='Builders to filter on')
options, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
@@ -1330,8 +1399,8 @@ def CMDcount(parser, args):
start_time = build.start_time
except urllib2.HTTPError:
# The build was probably trimmed.
- print >> sys.stderr, (
- 'Failed to fetch build %s/%d' % (builder.name, build.number))
+ print >> sys.stderr, ('Failed to fetch build %s/%d' %
+ (builder.name, build.number))
continue
if start_time >= since:
counts[builder.name] += 1
@@ -1353,12 +1422,12 @@ def gen_parser():
It should be then processed with gen_usage() before being used.
"""
- parser = optparse.OptionParser(
- version=__version__)
+ parser = optparse.OptionParser(version=__version__)
# Remove description formatting
parser.format_description = lambda x: parser.description
# Add common parsing.
old_parser_args = parser.parse_args
+
def Parse(*args, **kwargs):
options, args = old_parser_args(*args, **kwargs)
if options.verbose >= 2:
@@ -1368,20 +1437,23 @@ def gen_parser():
else:
logging.basicConfig(level=logging.WARNING)
return options, args
+
parser.parse_args = Parse
+ parser.add_option('-v',
+ '--verbose',
+ action='count',
+ help='Use multiple times to increase logging leve')
parser.add_option(
- '-v', '--verbose', action='count',
- help='Use multiple times to increase logging leve')
- parser.add_option(
- '-q', '--quiet', action='store_true',
- help='Reduces the output to be parsed by scripts, independent of -v')
- parser.add_option(
- '--throttle', type='float',
- help='Minimum delay to sleep between requests')
+ '-q',
+ '--quiet',
+ action='store_true',
+ help='Reduces the output to be parsed by scripts, independent of -v')
+ parser.add_option('--throttle',
+ type='float',
+ help='Minimum delay to sleep between requests')
return parser
-
###############################################################################
## Generic subcommand handling code
diff --git a/utils/buildbot_utils.py b/utils/buildbot_utils.py
index cff30073..28d4468d 100644
--- a/utils/buildbot_utils.py
+++ b/utils/buildbot_utils.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -13,19 +12,19 @@ from utils import logger
from utils import buildbot_json
SLEEP_TIME = 600 # 10 minutes; time between polling of buildbot.
-TIME_OUT = 18000 # Decide the build is dead or will never finish
- # after this time (5 hours).
-OK_STATUS = [ # List of result status values that are 'ok'.
- # This was obtained from: https://chromium.googlesource.com/chromium/tools/build/+/master/third_party/buildbot_8_4p1/buildbot/status/results.py
- 0, # "success"
- 1, # "warnings"
- 6, # "retry"
- ]
-
+TIME_OUT = 18000 # Decide the build is dead or will never finish
+# after this time (5 hours).
+OK_STATUS = [ # List of result status values that are 'ok'.
+ # This was obtained from: https://chromium.googlesource.com/chromium/tools/build/+/master/third_party/buildbot_8_4p1/buildbot/status/results.py
+ 0, # "success"
+ 1, # "warnings"
+ 6, # "retry"
+]
"""Utilities for launching and accessing ChromeOS buildbots."""
-def ParseReportLog (url, build):
- """
+
+def ParseReportLog(url, build):
+ """
Scrape the trybot image name off the Reports log page.
This takes the URL for a trybot Reports Stage web page,
@@ -35,43 +34,44 @@ def ParseReportLog (url, build):
'trybot-daisy-release/R40-6394.0.0-b1389'). It returns the
artifact name, if found.
"""
- trybot_image = ""
- url += "/text"
- newurl = url.replace ("uberchromegw", "chromegw")
- webpage = urllib2.urlopen(newurl)
- data = webpage.read()
- lines = data.split('\n')
- for l in lines:
- if l.find("Artifacts") > 0 and l.find("trybot") > 0:
- trybot_name = "trybot-%s" % build
- start_pos = l.find(trybot_name)
- end_pos = l.find("@https://storage")
- trybot_image = l[start_pos:end_pos]
-
- return trybot_image
-
-def GetBuildData (buildbot_queue, build_id):
- """
+ trybot_image = ''
+ url += '/text'
+ newurl = url.replace('uberchromegw', 'chromegw')
+ webpage = urllib2.urlopen(newurl)
+ data = webpage.read()
+ lines = data.split('\n')
+ for l in lines:
+ if l.find('Artifacts') > 0 and l.find('trybot') > 0:
+ trybot_name = 'trybot-%s' % build
+ start_pos = l.find(trybot_name)
+ end_pos = l.find('@https://storage')
+ trybot_image = l[start_pos:end_pos]
+
+ return trybot_image
+
+
+def GetBuildData(buildbot_queue, build_id):
+ """
Find the Reports stage web page for a trybot build.
This takes the name of a buildbot_queue, such as 'daisy-release'
and a build id (the build number), and uses the json buildbot api to
find the Reports stage web page for that build, if it exists.
"""
- builder = buildbot_json.Buildbot(
- "http://chromegw/p/tryserver.chromiumos/").builders[buildbot_queue]
- build_data = builder.builds[build_id].data
- logs = build_data["logs"]
- for l in logs:
- fname = l[1]
- if "steps/Report/" in fname:
- return fname
+ builder = buildbot_json.Buildbot(
+ 'http://chromegw/p/tryserver.chromiumos/').builders[buildbot_queue]
+ build_data = builder.builds[build_id].data
+ logs = build_data['logs']
+ for l in logs:
+ fname = l[1]
+ if 'steps/Report/' in fname:
+ return fname
- return ""
+ return ''
def FindBuildRecordFromLog(description, log_info):
- """
+ """
Find the right build record in the build logs.
Get the first build record from build log with a reason field
@@ -80,100 +80,104 @@ def FindBuildRecordFromLog(description, log_info):
point.)
"""
- current_line = 1
- while current_line < len(log_info):
- my_dict = {}
- # Read all the lines from one "Build" to the next into my_dict
- while True:
- key = log_info[current_line].split(":")[0].strip()
- value = log_info[current_line].split(":", 1)[1].strip()
- my_dict[key] = value
- current_line += 1
- if "Build" in key or current_line == len(log_info):
- break
- try:
- # Check to see of the build record is the right one.
- if str(description) in my_dict["reason"]:
- # We found a match; we're done.
- return my_dict
- except:
- print "reason is not in dictionary: '%s'" % repr(my_dict)
- else:
- # Keep going.
- continue
-
- # We hit the bottom of the log without a match.
- return {}
+ current_line = 1
+ while current_line < len(log_info):
+ my_dict = {}
+ # Read all the lines from one "Build" to the next into my_dict
+ while True:
+ key = log_info[current_line].split(':')[0].strip()
+ value = log_info[current_line].split(':', 1)[1].strip()
+ my_dict[key] = value
+ current_line += 1
+ if 'Build' in key or current_line == len(log_info):
+ break
+ try:
+ # Check to see of the build record is the right one.
+ if str(description) in my_dict['reason']:
+ # We found a match; we're done.
+ return my_dict
+ except:
+ print "reason is not in dictionary: '%s'" % repr(my_dict)
+ else:
+ # Keep going.
+ continue
+
+ # We hit the bottom of the log without a match.
+ return {}
def GetBuildInfo(file_dir, builder):
- """
+ """
Get all the build records for the trybot builds.
file_dir is the toolchain_utils directory.
"""
- ce = command_executer.GetCommandExecuter()
- commands = ("{0}/utils/buildbot_json.py builds "
- "http://chromegw/i/tryserver.chromiumos/"
- .format(file_dir))
-
- if builder:
- # For release builds, get logs from the 'release' builder.
- if builder.endswith('-release'):
- commands += " -b release"
- else:
- commands += " -b %s" % builder
- _, buildinfo, _ = ce.RunCommandWOutput(commands, print_to_console=False)
- build_log = buildinfo.splitlines()
- return build_log
+ ce = command_executer.GetCommandExecuter()
+ commands = ('{0}/utils/buildbot_json.py builds '
+ 'http://chromegw/i/tryserver.chromiumos/'.format(file_dir))
+
+ if builder:
+ # For release builds, get logs from the 'release' builder.
+ if builder.endswith('-release'):
+ commands += ' -b release'
+ else:
+ commands += ' -b %s' % builder
+ _, buildinfo, _ = ce.RunCommandWOutput(commands, print_to_console=False)
+ build_log = buildinfo.splitlines()
+ return build_log
def FindArchiveImage(chromeos_root, build, build_id):
- """
+ """
Given a build_id, search Google Storage for a trybot artifact
for the correct board with the correct build_id. Return the
name of the artifact, if found.
"""
- ce = command_executer.GetCommandExecuter()
- command = ("gsutil ls gs://chromeos-image-archive/trybot-%s/*b%s"
- "/chromiumos_test_image.tar.xz" % (build, build_id))
- retval, out, err = ce.ChrootRunCommandWOutput(chromeos_root, command,
- print_to_console=False)
- #
- # If build_id is not unique, there may be multiple archive images
- # to choose from; sort them & pick the first (newest).
- #
- # If there are multiple archive images found, out will look something
- # like this:
- #
- # 'gs://chromeos-image-archive/trybot-peppy-release/R35-5692.0.0-b105/chromiumos_test_image.tar.xz\ngs://chromeos-image-archive/trybot-peppy-release/R46-7339.0.0-b105/chromiumos_test_image.tar.xz\n'
- #
- out = out.rstrip('\n')
- tmp_list = out.split('\n')
- # After stripping the final '\n' and splitting on any other '\n', we get
- # something like this:
- # tmp_list = [ 'gs://chromeos-image-archive/trybot-peppy-release/R35-5692.0.0-b105/chromiumos_test_image.tar.xz' ,
- # 'gs://chromeos-image-archive/trybot-peppy-release/R46-7339.0.0-b105/chromiumos_test_image.tar.xz' ]
- #
- # If we sort this in descending order, we should end up with the most
- # recent test image first, so that's what we do here.
- #
- if len(tmp_list) > 1:
- tmp_list = sorted(tmp_list, reverse=True)
- out = tmp_list[0]
-
- trybot_image = ""
- trybot_name = "trybot-%s" % build
- if out and out.find(trybot_name) > 0:
- start_pos = out.find(trybot_name)
- end_pos = out.find("/chromiumos_test_image")
- trybot_image = out[start_pos:end_pos]
-
- return trybot_image
-
-def GetTrybotImage(chromeos_root, buildbot_name, patch_list, build_tag,
+ ce = command_executer.GetCommandExecuter()
+ command = ('gsutil ls gs://chromeos-image-archive/trybot-%s/*b%s'
+ '/chromiumos_test_image.tar.xz' % (build, build_id))
+ retval, out, err = ce.ChrootRunCommandWOutput(chromeos_root,
+ command,
+ print_to_console=False)
+ #
+ # If build_id is not unique, there may be multiple archive images
+ # to choose from; sort them & pick the first (newest).
+ #
+ # If there are multiple archive images found, out will look something
+ # like this:
+ #
+ # 'gs://chromeos-image-archive/trybot-peppy-release/R35-5692.0.0-b105/chromiumos_test_image.tar.xz\ngs://chromeos-image-archive/trybot-peppy-release/R46-7339.0.0-b105/chromiumos_test_image.tar.xz\n'
+ #
+ out = out.rstrip('\n')
+ tmp_list = out.split('\n')
+ # After stripping the final '\n' and splitting on any other '\n', we get
+ # something like this:
+ # tmp_list = [ 'gs://chromeos-image-archive/trybot-peppy-release/R35-5692.0.0-b105/chromiumos_test_image.tar.xz' ,
+ # 'gs://chromeos-image-archive/trybot-peppy-release/R46-7339.0.0-b105/chromiumos_test_image.tar.xz' ]
+ #
+ # If we sort this in descending order, we should end up with the most
+ # recent test image first, so that's what we do here.
+ #
+ if len(tmp_list) > 1:
+ tmp_list = sorted(tmp_list, reverse=True)
+ out = tmp_list[0]
+
+ trybot_image = ''
+ trybot_name = 'trybot-%s' % build
+ if out and out.find(trybot_name) > 0:
+ start_pos = out.find(trybot_name)
+ end_pos = out.find('/chromiumos_test_image')
+ trybot_image = out[start_pos:end_pos]
+
+ return trybot_image
+
+
+def GetTrybotImage(chromeos_root,
+ buildbot_name,
+ patch_list,
+ build_tag,
build_toolchain=False):
- """
+ """
Launch buildbot and get resulting trybot artifact name.
This function launches a buildbot with the appropriate flags to
@@ -193,107 +197,106 @@ def GetTrybotImage(chromeos_root, buildbot_name, patch_list, build_tag,
build_tag is a (unique) string to be used to look up the buildbot results
from among all the build records.
"""
- ce = command_executer.GetCommandExecuter()
- cbuildbot_path = os.path.join(chromeos_root, "chromite/cbuildbot")
- base_dir = os.getcwd()
- patch_arg = ""
- if patch_list:
- for p in patch_list:
- patch_arg = patch_arg + " -g " + repr(p)
- toolchain_flags=""
- if build_toolchain:
- toolchain_flags += "--latest-toolchain"
- branch = "master"
- os.chdir(cbuildbot_path)
-
- # Launch buildbot with appropriate flags.
- build = buildbot_name
- description = build_tag
- command = ("./cbuildbot --remote --nochromesdk --notests"
- " --remote-description=%s %s %s %s"
- % (description, toolchain_flags, patch_arg, build))
- _, out, _ = ce.RunCommandWOutput(command)
- if "Tryjob submitted!" not in out:
- logger.GetLogger().LogFatal("Error occurred while launching trybot job: "
- "%s" % command)
- os.chdir(base_dir)
-
- build_id = 0
- build_status = None
- # Wait for buildbot to finish running (check every 10 minutes). Wait
- # 10 minutes before the first check to give the buildbot time to launch
- # (so we don't start looking for build data before it's out there).
- time.sleep(SLEEP_TIME)
- done = False
- pending = True
- # pending_time is the time between when we submit the job and when the
- # buildbot actually launches the build. running_time is the time between
- # when the buildbot job launches and when it finishes. The job is
- # considered 'pending' until we can find an entry for it in the buildbot
- # logs.
- pending_time = SLEEP_TIME
- running_time = 0
- while not done:
- done = True
- build_info = GetBuildInfo(base_dir, build)
- if not build_info:
+ ce = command_executer.GetCommandExecuter()
+ cbuildbot_path = os.path.join(chromeos_root, 'chromite/cbuildbot')
+ base_dir = os.getcwd()
+ patch_arg = ''
+ if patch_list:
+ for p in patch_list:
+ patch_arg = patch_arg + ' -g ' + repr(p)
+ toolchain_flags = ''
+ if build_toolchain:
+ toolchain_flags += '--latest-toolchain'
+ branch = 'master'
+ os.chdir(cbuildbot_path)
+
+ # Launch buildbot with appropriate flags.
+ build = buildbot_name
+ description = build_tag
+ command = ('./cbuildbot --remote --nochromesdk --notests'
+ ' --remote-description=%s %s %s %s' %
+ (description, toolchain_flags, patch_arg, build))
+ _, out, _ = ce.RunCommandWOutput(command)
+ if 'Tryjob submitted!' not in out:
+ logger.GetLogger().LogFatal('Error occurred while launching trybot job: '
+ '%s' % command)
+ os.chdir(base_dir)
+
+ build_id = 0
+ build_status = None
+ # Wait for buildbot to finish running (check every 10 minutes). Wait
+ # 10 minutes before the first check to give the buildbot time to launch
+ # (so we don't start looking for build data before it's out there).
+ time.sleep(SLEEP_TIME)
+ done = False
+ pending = True
+ # pending_time is the time between when we submit the job and when the
+ # buildbot actually launches the build. running_time is the time between
+ # when the buildbot job launches and when it finishes. The job is
+ # considered 'pending' until we can find an entry for it in the buildbot
+ # logs.
+ pending_time = SLEEP_TIME
+ running_time = 0
+ while not done:
+ done = True
+ build_info = GetBuildInfo(base_dir, build)
+ if not build_info:
+ if pending_time > TIME_OUT:
+ logger.GetLogger().LogFatal('Unable to get build logs for target %s.' %
+ build)
+ else:
+ pending_message = 'Unable to find build log; job may be pending.'
+ done = False
+
+ if done:
+ data_dict = FindBuildRecordFromLog(description, build_info)
+ if not data_dict:
+ # Trybot job may be pending (not actually launched yet).
if pending_time > TIME_OUT:
- logger.GetLogger().LogFatal("Unable to get build logs for target %s."
- % build)
+ logger.GetLogger().LogFatal('Unable to find build record for trybot'
+ ' %s.' % description)
else:
- pending_message = "Unable to find build log; job may be pending."
+ pending_message = 'Unable to find build record; job may be pending.'
done = False
- if done:
- data_dict = FindBuildRecordFromLog(description, build_info)
- if not data_dict:
- # Trybot job may be pending (not actually launched yet).
- if pending_time > TIME_OUT:
- logger.GetLogger().LogFatal("Unable to find build record for trybot"
- " %s." % description)
- else:
- pending_message = "Unable to find build record; job may be pending."
- done = False
-
- else:
- # Now that we have actually found the entry for the build
- # job in the build log, we know the job is actually
- # runnning, not pending, so we flip the 'pending' flag. We
- # still have to wait for the buildbot job to finish running
- # however.
- pending = False
- if "True" in data_dict["completed"]:
- build_id = data_dict["number"]
- build_status = int(data_dict["result"])
- else:
- done = False
-
- if not done:
- if pending:
- logger.GetLogger().LogOutput(pending_message)
- logger.GetLogger().LogOutput("Current pending time: %d minutes." %
- (pending_time / 60))
- pending_time += SLEEP_TIME
+ else:
+ # Now that we have actually found the entry for the build
+ # job in the build log, we know the job is actually
+ # runnning, not pending, so we flip the 'pending' flag. We
+ # still have to wait for the buildbot job to finish running
+ # however.
+ pending = False
+ if 'True' in data_dict['completed']:
+ build_id = data_dict['number']
+ build_status = int(data_dict['result'])
else:
- logger.GetLogger().LogOutput("{0} minutes passed.".format(
- running_time / 60))
- logger.GetLogger().LogOutput("Sleeping {0} seconds.".format(
- SLEEP_TIME))
- running_time += SLEEP_TIME
-
- time.sleep(SLEEP_TIME)
- if running_time > TIME_OUT:
- done = True
-
- trybot_image = ""
-
- if build_status in OK_STATUS:
- trybot_image = FindArchiveImage(chromeos_root, build, build_id)
- if not trybot_image:
- logger.GetLogger().LogError("Trybot job %s failed with status %d;"
- " no trybot image generated."
- % (description, build_status))
-
- logger.GetLogger().LogOutput("trybot_image is '%s'" % trybot_image)
- logger.GetLogger().LogOutput("build_status is %d" % build_status)
- return trybot_image
+ done = False
+
+ if not done:
+ if pending:
+ logger.GetLogger().LogOutput(pending_message)
+ logger.GetLogger().LogOutput('Current pending time: %d minutes.' %
+ (pending_time / 60))
+ pending_time += SLEEP_TIME
+ else:
+ logger.GetLogger().LogOutput('{0} minutes passed.'.format(running_time /
+ 60))
+ logger.GetLogger().LogOutput('Sleeping {0} seconds.'.format(SLEEP_TIME))
+ running_time += SLEEP_TIME
+
+ time.sleep(SLEEP_TIME)
+ if running_time > TIME_OUT:
+ done = True
+
+ trybot_image = ''
+
+ if build_status in OK_STATUS:
+ trybot_image = FindArchiveImage(chromeos_root, build, build_id)
+ if not trybot_image:
+ logger.GetLogger().LogError('Trybot job %s failed with status %d;'
+ ' no trybot image generated.' %
+ (description, build_status))
+
+ logger.GetLogger().LogOutput("trybot_image is '%s'" % trybot_image)
+ logger.GetLogger().LogOutput('build_status is %d' % build_status)
+ return trybot_image
diff --git a/utils/colortrans.py b/utils/colortrans.py
index 37e91572..0ab44d01 100644
--- a/utils/colortrans.py
+++ b/utils/colortrans.py
@@ -1,5 +1,3 @@
-#! /usr/bin/env python
-
""" Convert values between RGB hex codes and xterm-256 color codes.
Nice long listing of all 256 colors and their codes. Useful for
@@ -14,123 +12,123 @@ I'm not sure where this script was inspired from. I think I must have
written it from scratch, though it's been several years now.
"""
-__author__ = 'Micah Elliott http://MicahElliott.com'
-__version__ = '0.1'
+__author__ = 'Micah Elliott http://MicahElliott.com'
+__version__ = '0.1'
__copyright__ = 'Copyright (C) 2011 Micah Elliott. All rights reserved.'
-__license__ = 'WTFPL http://sam.zoy.org/wtfpl/'
+__license__ = 'WTFPL http://sam.zoy.org/wtfpl/'
#---------------------------------------------------------------------
import sys, re
CLUT = [ # color look-up table
-# 8-bit, RGB hex
+ # 8-bit, RGB hex
# Primary 3-bit (8 colors). Unique representation!
- ('00', '000000'),
- ('01', '800000'),
- ('02', '008000'),
- ('03', '808000'),
- ('04', '000080'),
- ('05', '800080'),
- ('06', '008080'),
- ('07', 'c0c0c0'),
+ ('00', '000000'),
+ ('01', '800000'),
+ ('02', '008000'),
+ ('03', '808000'),
+ ('04', '000080'),
+ ('05', '800080'),
+ ('06', '008080'),
+ ('07', 'c0c0c0'),
# Equivalent "bright" versions of original 8 colors.
- ('08', '808080'),
- ('09', 'ff0000'),
- ('10', '00ff00'),
- ('11', 'ffff00'),
- ('12', '0000ff'),
- ('13', 'ff00ff'),
- ('14', '00ffff'),
- ('15', 'ffffff'),
+ ('08', '808080'),
+ ('09', 'ff0000'),
+ ('10', '00ff00'),
+ ('11', 'ffff00'),
+ ('12', '0000ff'),
+ ('13', 'ff00ff'),
+ ('14', '00ffff'),
+ ('15', 'ffffff'),
# Strictly ascending.
- ('16', '000000'),
- ('17', '00005f'),
- ('18', '000087'),
- ('19', '0000af'),
- ('20', '0000d7'),
- ('21', '0000ff'),
- ('22', '005f00'),
- ('23', '005f5f'),
- ('24', '005f87'),
- ('25', '005faf'),
- ('26', '005fd7'),
- ('27', '005fff'),
- ('28', '008700'),
- ('29', '00875f'),
- ('30', '008787'),
- ('31', '0087af'),
- ('32', '0087d7'),
- ('33', '0087ff'),
- ('34', '00af00'),
- ('35', '00af5f'),
- ('36', '00af87'),
- ('37', '00afaf'),
- ('38', '00afd7'),
- ('39', '00afff'),
- ('40', '00d700'),
- ('41', '00d75f'),
- ('42', '00d787'),
- ('43', '00d7af'),
- ('44', '00d7d7'),
- ('45', '00d7ff'),
- ('46', '00ff00'),
- ('47', '00ff5f'),
- ('48', '00ff87'),
- ('49', '00ffaf'),
- ('50', '00ffd7'),
- ('51', '00ffff'),
- ('52', '5f0000'),
- ('53', '5f005f'),
- ('54', '5f0087'),
- ('55', '5f00af'),
- ('56', '5f00d7'),
- ('57', '5f00ff'),
- ('58', '5f5f00'),
- ('59', '5f5f5f'),
- ('60', '5f5f87'),
- ('61', '5f5faf'),
- ('62', '5f5fd7'),
- ('63', '5f5fff'),
- ('64', '5f8700'),
- ('65', '5f875f'),
- ('66', '5f8787'),
- ('67', '5f87af'),
- ('68', '5f87d7'),
- ('69', '5f87ff'),
- ('70', '5faf00'),
- ('71', '5faf5f'),
- ('72', '5faf87'),
- ('73', '5fafaf'),
- ('74', '5fafd7'),
- ('75', '5fafff'),
- ('76', '5fd700'),
- ('77', '5fd75f'),
- ('78', '5fd787'),
- ('79', '5fd7af'),
- ('80', '5fd7d7'),
- ('81', '5fd7ff'),
- ('82', '5fff00'),
- ('83', '5fff5f'),
- ('84', '5fff87'),
- ('85', '5fffaf'),
- ('86', '5fffd7'),
- ('87', '5fffff'),
- ('88', '870000'),
- ('89', '87005f'),
- ('90', '870087'),
- ('91', '8700af'),
- ('92', '8700d7'),
- ('93', '8700ff'),
- ('94', '875f00'),
- ('95', '875f5f'),
- ('96', '875f87'),
- ('97', '875faf'),
- ('98', '875fd7'),
- ('99', '875fff'),
+ ('16', '000000'),
+ ('17', '00005f'),
+ ('18', '000087'),
+ ('19', '0000af'),
+ ('20', '0000d7'),
+ ('21', '0000ff'),
+ ('22', '005f00'),
+ ('23', '005f5f'),
+ ('24', '005f87'),
+ ('25', '005faf'),
+ ('26', '005fd7'),
+ ('27', '005fff'),
+ ('28', '008700'),
+ ('29', '00875f'),
+ ('30', '008787'),
+ ('31', '0087af'),
+ ('32', '0087d7'),
+ ('33', '0087ff'),
+ ('34', '00af00'),
+ ('35', '00af5f'),
+ ('36', '00af87'),
+ ('37', '00afaf'),
+ ('38', '00afd7'),
+ ('39', '00afff'),
+ ('40', '00d700'),
+ ('41', '00d75f'),
+ ('42', '00d787'),
+ ('43', '00d7af'),
+ ('44', '00d7d7'),
+ ('45', '00d7ff'),
+ ('46', '00ff00'),
+ ('47', '00ff5f'),
+ ('48', '00ff87'),
+ ('49', '00ffaf'),
+ ('50', '00ffd7'),
+ ('51', '00ffff'),
+ ('52', '5f0000'),
+ ('53', '5f005f'),
+ ('54', '5f0087'),
+ ('55', '5f00af'),
+ ('56', '5f00d7'),
+ ('57', '5f00ff'),
+ ('58', '5f5f00'),
+ ('59', '5f5f5f'),
+ ('60', '5f5f87'),
+ ('61', '5f5faf'),
+ ('62', '5f5fd7'),
+ ('63', '5f5fff'),
+ ('64', '5f8700'),
+ ('65', '5f875f'),
+ ('66', '5f8787'),
+ ('67', '5f87af'),
+ ('68', '5f87d7'),
+ ('69', '5f87ff'),
+ ('70', '5faf00'),
+ ('71', '5faf5f'),
+ ('72', '5faf87'),
+ ('73', '5fafaf'),
+ ('74', '5fafd7'),
+ ('75', '5fafff'),
+ ('76', '5fd700'),
+ ('77', '5fd75f'),
+ ('78', '5fd787'),
+ ('79', '5fd7af'),
+ ('80', '5fd7d7'),
+ ('81', '5fd7ff'),
+ ('82', '5fff00'),
+ ('83', '5fff5f'),
+ ('84', '5fff87'),
+ ('85', '5fffaf'),
+ ('86', '5fffd7'),
+ ('87', '5fffff'),
+ ('88', '870000'),
+ ('89', '87005f'),
+ ('90', '870087'),
+ ('91', '8700af'),
+ ('92', '8700d7'),
+ ('93', '8700ff'),
+ ('94', '875f00'),
+ ('95', '875f5f'),
+ ('96', '875f87'),
+ ('97', '875faf'),
+ ('98', '875fd7'),
+ ('99', '875fff'),
('100', '878700'),
('101', '87875f'),
('102', '878787'),
@@ -291,38 +289,44 @@ CLUT = [ # color look-up table
('255', 'eeeeee'),
]
+
def _str2hex(hexstr):
- return int(hexstr, 16)
+ return int(hexstr, 16)
+
def _strip_hash(rgb):
- # Strip leading `#` if exists.
- if rgb.startswith('#'):
- rgb = rgb.lstrip('#')
- return rgb
+ # Strip leading `#` if exists.
+ if rgb.startswith('#'):
+ rgb = rgb.lstrip('#')
+ return rgb
+
def _create_dicts():
- short2rgb_dict = dict(CLUT)
- rgb2short_dict = {}
- for k, v in short2rgb_dict.items():
- rgb2short_dict[v] = k
- return rgb2short_dict, short2rgb_dict
+ short2rgb_dict = dict(CLUT)
+ rgb2short_dict = {}
+ for k, v in short2rgb_dict.items():
+ rgb2short_dict[v] = k
+ return rgb2short_dict, short2rgb_dict
+
def short2rgb(short):
- return SHORT2RGB_DICT[short]
+ return SHORT2RGB_DICT[short]
+
def print_all():
- """ Print all 256 xterm color codes.
+ """ Print all 256 xterm color codes.
"""
- for short, rgb in CLUT:
- sys.stdout.write('\033[48;5;%sm%s:%s' % (short, short, rgb))
- sys.stdout.write("\033[0m ")
- sys.stdout.write('\033[38;5;%sm%s:%s' % (short, short, rgb))
- sys.stdout.write("\033[0m\n")
- print "Printed all codes."
- print "You can translate a hex or 0-255 code by providing an argument."
+ for short, rgb in CLUT:
+ sys.stdout.write('\033[48;5;%sm%s:%s' % (short, short, rgb))
+ sys.stdout.write('\033[0m ')
+ sys.stdout.write('\033[38;5;%sm%s:%s' % (short, short, rgb))
+ sys.stdout.write('\033[0m\n')
+ print 'Printed all codes.'
+ print 'You can translate a hex or 0-255 code by providing an argument.'
+
def rgb2short(rgb):
- """ Find the closest xterm-256 approximation to the given RGB value.
+ """ Find the closest xterm-256 approximation to the given RGB value.
@param rgb: Hex code representing an RGB value, eg, 'abcdef'
@returns: String between 0 and 255, compatible with xterm.
>>> rgb2short('123456')
@@ -332,45 +336,51 @@ def rgb2short(rgb):
>>> rgb2short('0DADD6') # vimeo logo
('38', '00afd7')
"""
- rgb = _strip_hash(rgb)
- incs = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
- # Break 6-char RGB code into 3 integer vals.
- parts = [ int(h, 16) for h in re.split(r'(..)(..)(..)', rgb)[1:4] ]
- res = []
- for part in parts:
- i = 0
- while i < len(incs)-1:
- s, b = incs[i], incs[i+1] # smaller, bigger
- if s <= part <= b:
- s1 = abs(s - part)
- b1 = abs(b - part)
- if s1 < b1: closest = s
- else: closest = b
- res.append(closest)
- break
- i += 1
- #print '***', res
- res = ''.join([ ('%02.x' % i) for i in res ])
- equiv = RGB2SHORT_DICT[ res ]
- #print '***', res, equiv
- return equiv, res
+ rgb = _strip_hash(rgb)
+ incs = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
+ # Break 6-char RGB code into 3 integer vals.
+ parts = [int(h, 16) for h in re.split(r'(..)(..)(..)', rgb)[1:4]]
+ res = []
+ for part in parts:
+ i = 0
+ while i < len(incs) - 1:
+ s, b = incs[i], incs[i + 1] # smaller, bigger
+ if s <= part <= b:
+ s1 = abs(s - part)
+ b1 = abs(b - part)
+ if s1 < b1:
+ closest = s
+ else:
+ closest = b
+ res.append(closest)
+ break
+ i += 1
+ #print '***', res
+ res = ''.join([('%02.x' % i) for i in res])
+ equiv = RGB2SHORT_DICT[res]
+ #print '***', res, equiv
+ return equiv, res
+
RGB2SHORT_DICT, SHORT2RGB_DICT = _create_dicts()
#---------------------------------------------------------------------
if __name__ == '__main__':
- import doctest
- doctest.testmod()
- if len(sys.argv) == 1:
- print_all()
- raise SystemExit
- arg = sys.argv[1]
- if len(arg) < 4 and int(arg) < 256:
- rgb = short2rgb(arg)
- sys.stdout.write('xterm color \033[38;5;%sm%s\033[0m -> RGB exact \033[38;5;%sm%s\033[0m' % (arg, arg, arg, rgb))
- sys.stdout.write("\033[0m\n")
- else:
- short, rgb = rgb2short(arg)
- sys.stdout.write('RGB %s -> xterm color approx \033[38;5;%sm%s (%s)' % (arg, short, short, rgb))
- sys.stdout.write("\033[0m\n")
+ import doctest
+ doctest.testmod()
+ if len(sys.argv) == 1:
+ print_all()
+ raise SystemExit
+ arg = sys.argv[1]
+ if len(arg) < 4 and int(arg) < 256:
+ rgb = short2rgb(arg)
+ sys.stdout.write(
+ 'xterm color \033[38;5;%sm%s\033[0m -> RGB exact \033[38;5;%sm%s\033[0m'
+ % (arg, arg, arg, rgb))
+ sys.stdout.write('\033[0m\n')
+ else:
+ short, rgb = rgb2short(arg)
+ sys.stdout.write('RGB %s -> xterm color approx \033[38;5;%sm%s (%s)' %
+ (arg, short, short, rgb))
+ sys.stdout.write('\033[0m\n')
diff --git a/utils/command_executer.py b/utils/command_executer.py
index 70ff3aff..a7131ef2 100644
--- a/utils/command_executer.py
+++ b/utils/command_executer.py
@@ -1,7 +1,6 @@
# Copyright 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Utilities to run commands in outside/inside chroot and on the board."""
import getpass
@@ -19,7 +18,8 @@ import misc
mock_default = False
-LOG_LEVEL = ("none", "quiet", "average", "verbose")
+LOG_LEVEL = ('none', 'quiet', 'average', 'verbose')
+
def InitCommandExecuter(mock=False):
global mock_default
@@ -27,7 +27,7 @@ def InitCommandExecuter(mock=False):
mock_default = mock
-def GetCommandExecuter(logger_to_set=None, mock=False, log_level="verbose"):
+def GetCommandExecuter(logger_to_set=None, mock=False, log_level='verbose'):
# If the default is a mock executer, always return one.
if mock_default or mock:
return MockCommandExecuter(log_level, logger_to_set)
@@ -40,7 +40,7 @@ class CommandExecuter(object):
def __init__(self, log_level, logger_to_set=None):
self.log_level = log_level
- if log_level == "none":
+ if log_level == 'none':
self.logger = None
else:
if logger_to_set is not None:
@@ -54,8 +54,12 @@ class CommandExecuter(object):
def SetLogLevel(self, log_level):
self.log_level = log_level
- def RunCommandGeneric(self, cmd, return_output=False, machine=None,
- username=None, command_terminator=None,
+ def RunCommandGeneric(self,
+ cmd,
+ return_output=False,
+ machine=None,
+ username=None,
+ command_terminator=None,
command_timeout=None,
terminated_timeout=10,
print_to_console=True):
@@ -66,34 +70,36 @@ class CommandExecuter(object):
cmd = str(cmd)
- if self.log_level == "quiet":
+ if self.log_level == 'quiet':
print_to_console = False
- if self.log_level == "verbose":
+ if self.log_level == 'verbose':
self.logger.LogCmd(cmd, machine, username, print_to_console)
elif self.logger:
self.logger.LogCmdToFileOnly(cmd, machine, username)
if command_terminator and command_terminator.IsTerminated():
if self.logger:
- self.logger.LogError("Command was terminated!", print_to_console)
- return (1, "", "")
+ self.logger.LogError('Command was terminated!', print_to_console)
+ return (1, '', '')
if machine is not None:
- user = ""
+ user = ''
if username is not None:
- user = username + "@"
+ user = username + '@'
cmd = "ssh -t -t %s%s -- '%s'" % (user, machine, cmd)
# We use setsid so that the child will have a different session id
# and we can easily kill the process group. This is also important
# because the child will be disassociated from the parent terminal.
# In this way the child cannot mess the parent's terminal.
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, shell=True,
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
preexec_fn=os.setsid)
- full_stdout = ""
- full_stderr = ""
+ full_stdout = ''
+ full_stderr = ''
# Pull output from pipes, send it to file/stdout/string
out = err = None
@@ -110,9 +116,8 @@ class CommandExecuter(object):
if command_terminator and command_terminator.IsTerminated():
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
if self.logger:
- self.logger.LogError("Command received termination request. "
- "Killed child process group.",
- print_to_console)
+ self.logger.LogError('Command received termination request. '
+ 'Killed child process group.', print_to_console)
break
l = my_poll.poll(100)
@@ -123,7 +128,7 @@ class CommandExecuter(object):
full_stdout += out
if self.logger:
self.logger.LogCommandOutput(out, print_to_console)
- if out == "":
+ if out == '':
pipes.remove(p.stdout)
my_poll.unregister(p.stdout)
if fd == p.stderr.fileno():
@@ -132,7 +137,7 @@ class CommandExecuter(object):
full_stderr += err
if self.logger:
self.logger.LogCommandError(err, print_to_console)
- if err == "":
+ if err == '':
pipes.remove(p.stderr)
my_poll.unregister(p.stderr)
@@ -142,27 +147,27 @@ class CommandExecuter(object):
elif (terminated_timeout is not None and
time.time() - terminated_time > terminated_timeout):
if self.logger:
- self.logger.LogWarning("Timeout of %s seconds reached since "
- "process termination."
- % terminated_timeout, print_to_console)
+ self.logger.LogWarning('Timeout of %s seconds reached since '
+ 'process termination.' % terminated_timeout,
+ print_to_console)
break
if (command_timeout is not None and
time.time() - started_time > command_timeout):
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
if self.logger:
- self.logger.LogWarning("Timeout of %s seconds reached since process"
- "started. Killed child process group."
- % command_timeout, print_to_console)
+ self.logger.LogWarning('Timeout of %s seconds reached since process'
+ 'started. Killed child process group.' %
+ command_timeout, print_to_console)
break
- if out == err == "":
+ if out == err == '':
break
p.wait()
if return_output:
return (p.returncode, full_stdout, full_stderr)
- return (p.returncode, "", "")
+ return (p.returncode, '', '')
def RunCommand(self, *args, **kwargs):
"""Run a command.
@@ -189,51 +194,54 @@ class CommandExecuter(object):
return self.RunCommandGeneric(*args, **kwargs)
def RemoteAccessInitCommand(self, chromeos_root, machine):
- command = ""
- command += "\nset -- --remote=" + machine
- command += "\n. " + chromeos_root + "/src/scripts/common.sh"
- command += "\n. " + chromeos_root + "/src/scripts/remote_access.sh"
- command += "\nTMP=$(mktemp -d)"
+ command = ''
+ command += '\nset -- --remote=' + machine
+ command += '\n. ' + chromeos_root + '/src/scripts/common.sh'
+ command += '\n. ' + chromeos_root + '/src/scripts/remote_access.sh'
+ command += '\nTMP=$(mktemp -d)'
command += "\nFLAGS \"$@\" || exit 1"
- command += "\nremote_access_init"
+ command += '\nremote_access_init'
return command
def WriteToTempShFile(self, contents):
- handle, command_file = tempfile.mkstemp(prefix=os.uname()[1],
- suffix=".sh")
- os.write(handle, "#!/bin/bash\n")
+ handle, command_file = tempfile.mkstemp(prefix=os.uname()[1], suffix='.sh')
+ os.write(handle, '#!/bin/bash\n')
os.write(handle, contents)
os.close(handle)
return command_file
-
def CrosLearnBoard(self, chromeos_root, machine):
command = self.RemoteAccessInitCommand(chromeos_root, machine)
- command += "\nlearn_board"
- command += "\necho ${FLAGS_board}"
+ command += '\nlearn_board'
+ command += '\necho ${FLAGS_board}'
retval, output, _ = self.RunCommandWOutput(command)
if self.logger:
- self.logger.LogFatalIf(retval, "learn_board command failed")
+ self.logger.LogFatalIf(retval, 'learn_board command failed')
elif retval:
sys.exit(1)
return output.split()[-1]
- def CrosRunCommandGeneric(self, cmd, return_output=False, machine=None,
+ def CrosRunCommandGeneric(self,
+ cmd,
+ return_output=False,
+ machine=None,
command_terminator=None,
- chromeos_root=None, command_timeout=None,
- terminated_timeout=10, print_to_console=True):
+ chromeos_root=None,
+ command_timeout=None,
+ terminated_timeout=10,
+ print_to_console=True):
"""Run a command on a ChromeOS box.
Returns triplet (returncode, stdout, stderr).
"""
- if self.log_level != "verbose":
+ if self.log_level != 'verbose':
print_to_console = False
if self.logger:
self.logger.LogCmd(cmd, print_to_console=print_to_console)
- self.logger.LogFatalIf(not machine, "No machine provided!")
- self.logger.LogFatalIf(not chromeos_root, "chromeos_root not given!")
+ self.logger.LogFatalIf(not machine, 'No machine provided!')
+ self.logger.LogFatalIf(not chromeos_root, 'chromeos_root not given!')
else:
if not chromeos_root or not machine:
sys.exit(1)
@@ -241,7 +249,8 @@ class CommandExecuter(object):
# Write all commands to a file.
command_file = self.WriteToTempShFile(cmd)
- retval = self.CopyFiles(command_file, command_file,
+ retval = self.CopyFiles(command_file,
+ command_file,
dest_machine=machine,
command_terminator=command_terminator,
chromeos_root=chromeos_root,
@@ -250,24 +259,25 @@ class CommandExecuter(object):
print_to_console=print_to_console)
if retval:
if self.logger:
- self.logger.LogError("Could not run remote command on machine."
- " Is the machine up?")
- return (retval, "", "")
+ self.logger.LogError('Could not run remote command on machine.'
+ ' Is the machine up?')
+ return (retval, '', '')
command = self.RemoteAccessInitCommand(chromeos_root, machine)
- command += "\nremote_sh bash %s" % command_file
+ command += '\nremote_sh bash %s' % command_file
command += "\nl_retval=$?; echo \"$REMOTE_OUT\"; exit $l_retval"
- retval = self.RunCommandGeneric(command, return_output,
+ retval = self.RunCommandGeneric(command,
+ return_output,
command_terminator=command_terminator,
command_timeout=command_timeout,
terminated_timeout=terminated_timeout,
print_to_console=print_to_console)
if return_output:
- connect_signature = ("Initiating first contact with remote host\n" +
- "Connection OK\n")
+ connect_signature = (
+ 'Initiating first contact with remote host\n' + 'Connection OK\n')
connect_signature_re = re.compile(connect_signature)
modded_retval = list(retval)
- modded_retval[1] = connect_signature_re.sub("", retval[1])
+ modded_retval[1] = connect_signature_re.sub('', retval[1])
return modded_retval
return retval
@@ -295,29 +305,33 @@ class CommandExecuter(object):
kwargs['return_output'] = True
return self.CrosRunCommandGeneric(*args, **kwargs)
- def ChrootRunCommandGeneric(self, chromeos_root, command, return_output=False,
- command_terminator=None, command_timeout=None,
- terminated_timeout=10, print_to_console=True,
- cros_sdk_options=""):
+ def ChrootRunCommandGeneric(self,
+ chromeos_root,
+ command,
+ return_output=False,
+ command_terminator=None,
+ command_timeout=None,
+ terminated_timeout=10,
+ print_to_console=True,
+ cros_sdk_options=''):
"""Runs a command within the chroot.
Returns triplet (returncode, stdout, stderr).
"""
-
- if self.log_level != "verbose":
+ if self.log_level != 'verbose':
print_to_console = False
if self.logger:
self.logger.LogCmd(command, print_to_console=print_to_console)
- handle, command_file = tempfile.mkstemp(dir=os.path.join(chromeos_root,
- "src/scripts"),
- suffix=".sh",
- prefix="in_chroot_cmd")
- os.write(handle, "#!/bin/bash\n")
+ handle, command_file = tempfile.mkstemp(
+ dir=os.path.join(chromeos_root, 'src/scripts'),
+ suffix='.sh',
+ prefix='in_chroot_cmd')
+ os.write(handle, '#!/bin/bash\n')
os.write(handle, command)
- os.write(handle, "\n")
+ os.write(handle, '\n')
os.close(handle)
os.chmod(command_file, 0777)
@@ -326,19 +340,18 @@ class CommandExecuter(object):
# the chroot already exists. We want the final returned output to skip
# the output from chroot creation steps.
if return_output:
- ret = self.RunCommand("cd %s; cros_sdk %s -- true" %
+ ret = self.RunCommand('cd %s; cros_sdk %s -- true' %
(chromeos_root, cros_sdk_options))
if ret:
- return (ret, "", "")
+ return (ret, '', '')
# Run command_file inside the chroot, making sure that any "~" is expanded
# by the shell inside the chroot, not outside.
command = ("cd %s; cros_sdk %s -- bash -c '%s/%s'" %
- (chromeos_root,
- cros_sdk_options,
- misc.CHROMEOS_SCRIPTS_DIR,
+ (chromeos_root, cros_sdk_options, misc.CHROMEOS_SCRIPTS_DIR,
os.path.basename(command_file)))
- ret = self.RunCommandGeneric(command, return_output,
+ ret = self.RunCommandGeneric(command,
+ return_output,
command_terminator=command_terminator,
command_timeout=command_timeout,
terminated_timeout=terminated_timeout,
@@ -372,30 +385,43 @@ class CommandExecuter(object):
kwargs['return_output'] = True
return self.ChrootRunCommandGeneric(*args, **kwargs)
- def RunCommands(self, cmdlist, machine=None,
- username=None, command_terminator=None):
- cmd = " ;\n" .join(cmdlist)
- return self.RunCommand(cmd, machine=machine, username=username,
+ def RunCommands(self,
+ cmdlist,
+ machine=None,
+ username=None,
+ command_terminator=None):
+ cmd = ' ;\n'.join(cmdlist)
+ return self.RunCommand(cmd,
+ machine=machine,
+ username=username,
command_terminator=command_terminator)
- def CopyFiles(self, src, dest, src_machine=None, dest_machine=None,
- src_user=None, dest_user=None, recursive=True,
+ def CopyFiles(self,
+ src,
+ dest,
+ src_machine=None,
+ dest_machine=None,
+ src_user=None,
+ dest_user=None,
+ recursive=True,
command_terminator=None,
- chromeos_root=None, src_cros=False, dest_cros=False,
+ chromeos_root=None,
+ src_cros=False,
+ dest_cros=False,
print_to_console=True):
src = os.path.expanduser(src)
dest = os.path.expanduser(dest)
if recursive:
- src = src + "/"
- dest = dest + "/"
+ src = src + '/'
+ dest = dest + '/'
if src_cros == True or dest_cros == True:
if self.logger:
self.logger.LogFatalIf(src_cros == dest_cros,
- "Only one of src_cros and desc_cros can "
- "be True.")
- self.logger.LogFatalIf(not chromeos_root, "chromeos_root not given!")
+ 'Only one of src_cros and desc_cros can '
+ 'be True.')
+ self.logger.LogFatalIf(not chromeos_root, 'chromeos_root not given!')
elif src_cros == dest_cros or not chromeos_root:
sys.exit(1)
if src_cros == True:
@@ -404,43 +430,46 @@ class CommandExecuter(object):
cros_machine = dest_machine
command = self.RemoteAccessInitCommand(chromeos_root, cros_machine)
- ssh_command = ("ssh -p ${FLAGS_ssh_port}" +
- " -o StrictHostKeyChecking=no" +
- " -o UserKnownHostsFile=$(mktemp)" +
- " -i $TMP_PRIVATE_KEY")
+ ssh_command = (
+ 'ssh -p ${FLAGS_ssh_port}' + ' -o StrictHostKeyChecking=no' +
+ ' -o UserKnownHostsFile=$(mktemp)' + ' -i $TMP_PRIVATE_KEY')
rsync_prefix = "\nrsync -r -e \"%s\" " % ssh_command
if dest_cros == True:
- command += rsync_prefix + "%s root@%s:%s" % (src, dest_machine, dest)
+ command += rsync_prefix + '%s root@%s:%s' % (src, dest_machine, dest)
return self.RunCommand(command,
machine=src_machine,
username=src_user,
command_terminator=command_terminator,
print_to_console=print_to_console)
else:
- command += rsync_prefix + "root@%s:%s %s" % (src_machine, src, dest)
+ command += rsync_prefix + 'root@%s:%s %s' % (src_machine, src, dest)
return self.RunCommand(command,
machine=dest_machine,
username=dest_user,
command_terminator=command_terminator,
print_to_console=print_to_console)
-
if dest_machine == src_machine:
- command = "rsync -a %s %s" % (src, dest)
+ command = 'rsync -a %s %s' % (src, dest)
else:
if src_machine is None:
src_machine = os.uname()[1]
src_user = getpass.getuser()
- command = "rsync -a %s@%s:%s %s" % (src_user, src_machine, src, dest)
+ command = 'rsync -a %s@%s:%s %s' % (src_user, src_machine, src, dest)
return self.RunCommand(command,
machine=dest_machine,
username=dest_user,
command_terminator=command_terminator,
print_to_console=print_to_console)
-
- def RunCommand2(self, cmd, cwd=None, line_consumer=None,
- timeout=None, shell=True, join_stderr=True, env=None):
+ def RunCommand2(self,
+ cmd,
+ cwd=None,
+ line_consumer=None,
+ timeout=None,
+ shell=True,
+ join_stderr=True,
+ env=None):
"""Run the command with an extra feature line_consumer.
This version allow developers to provide a line_consumer which will be
@@ -497,10 +526,11 @@ class CommandExecuter(object):
def notify_line(self):
p = self._buf.find('\n')
while p >= 0:
- self._line_consumer(line=self._buf[:p+1], output=self._name,
+ self._line_consumer(line=self._buf[:p + 1],
+ output=self._name,
pobject=self._pobject)
if p < len(self._buf) - 1:
- self._buf = self._buf[p+1:]
+ self._buf = self._buf[p + 1:]
p = self._buf.find('\n')
else:
self._buf = ''
@@ -510,11 +540,12 @@ class CommandExecuter(object):
def notify_eos(self):
# Notify end of stream. The last line may not end with a '\n'.
if self._buf != '':
- self._line_consumer(line=self._buf, output=self._name,
+ self._line_consumer(line=self._buf,
+ output=self._name,
pobject=self._pobject)
self._buf = ''
- if self.log_level == "verbose":
+ if self.log_level == 'verbose':
self.logger.LogCmd(cmd)
elif self.logger:
self.logger.LogCmdToFileOnly(cmd)
@@ -524,8 +555,13 @@ class CommandExecuter(object):
# because the child will be disassociated from the parent terminal.
# In this way the child cannot mess the parent's terminal.
pobject = subprocess.Popen(
- cmd, cwd=cwd, bufsize=1024, env=env, shell=shell,
- universal_newlines=True, stdout=subprocess.PIPE,
+ cmd,
+ cwd=cwd,
+ bufsize=1024,
+ env=env,
+ shell=shell,
+ universal_newlines=True,
+ stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if join_stderr else subprocess.PIPE,
preexec_fn=os.setsid)
@@ -540,8 +576,7 @@ class CommandExecuter(object):
if not join_stderr:
errfd = pobject.stderr.fileno()
poll.register(errfd, select.POLLIN | select.POLLPRI)
- handlermap[errfd] = StreamHandler(
- pobject, errfd, 'stderr', line_consumer)
+ handlermap[errfd] = StreamHandler(pobject, errfd, 'stderr', line_consumer)
while len(handlermap):
readables = poll.poll(300)
for (fd, evt) in readables:
@@ -561,22 +596,28 @@ class CommandExecuter(object):
class MockCommandExecuter(CommandExecuter):
"""Mock class for class CommandExecuter."""
+
def __init__(self, log_level, logger_to_set=None):
super(MockCommandExecuter, self).__init__(log_level, logger_to_set)
- def RunCommandGeneric(self, cmd, return_output=False, machine=None,
- username=None, command_terminator=None,
- command_timeout=None, terminated_timeout=10,
+ def RunCommandGeneric(self,
+ cmd,
+ return_output=False,
+ machine=None,
+ username=None,
+ command_terminator=None,
+ command_timeout=None,
+ terminated_timeout=10,
print_to_console=True):
assert not command_timeout
cmd = str(cmd)
if machine is None:
- machine = "localhost"
+ machine = 'localhost'
if username is None:
- username = "current"
- logger.GetLogger().LogCmd("(Mock) " + cmd, machine,
- username, print_to_console)
- return (0, "", "")
+ username = 'current'
+ logger.GetLogger().LogCmd('(Mock) ' + cmd, machine, username,
+ print_to_console)
+ return (0, '', '')
def RunCommand(self, *args, **kwargs):
assert 'return_output' not in kwargs
@@ -588,8 +629,10 @@ class MockCommandExecuter(CommandExecuter):
kwargs['return_output'] = True
return self.RunCommandGeneric(*args, **kwargs)
+
class CommandTerminator(object):
"""Object to request termination of a command in execution."""
+
def __init__(self):
self.terminated = False
diff --git a/utils/command_executer_unittest.py b/utils/command_executer_unittest.py
index e2242d0e..b7265da4 100755
--- a/utils/command_executer_unittest.py
+++ b/utils/command_executer_unittest.py
@@ -1,5 +1,4 @@
#!/usr/bin/python
-
"""Unittest for command_executer.py."""
import time
import unittest
@@ -8,15 +7,17 @@ import command_executer
class CommandExecuterTest(unittest.TestCase):
+
def testTimeout(self):
timeout = 1
- logging_level = "average"
+ logging_level = 'average'
ce = command_executer.CommandExecuter(logging_level)
start = time.time()
- command = "sleep 20"
+ command = 'sleep 20'
ce.RunCommand(command, command_timeout=timeout, terminated_timeout=timeout)
end = time.time()
self.assertTrue(round(end - start) == timeout)
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/utils/constants.py b/utils/constants.py
index abf9ff11..827e9233 100644
--- a/utils/constants.py
+++ b/utils/constants.py
@@ -1,7 +1,4 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
-
"""Generic constants used accross modules.
"""
diff --git a/utils/email_sender.py b/utils/email_sender.py
index 1f4b7dfb..5ba1d21c 100755
--- a/utils/email_sender.py
+++ b/utils/email_sender.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Utilities to send email either through SMTP or SendGMR."""
from email import Encoders
@@ -14,10 +13,13 @@ import tempfile
from utils import command_executer
+
class EmailSender(object):
"""Utility class to send email through SMTP or SendGMR."""
+
class Attachment(object):
"""Small class to keep track of attachment info."""
+
def __init__(self, name, content):
self.name = name
self.content = content
@@ -29,25 +31,18 @@ class EmailSender(object):
email_cc=None,
email_bcc=None,
email_from=None,
- msg_type="plain",
+ msg_type='plain',
attachments=None):
"""Choose appropriate email method and call it."""
- if os.path.exists("/usr/bin/sendgmr"):
+ if os.path.exists('/usr/bin/sendgmr'):
self.SendGMREmail(email_to, subject, text_to_send, email_cc, email_bcc,
email_from, msg_type, attachments)
else:
self.SendSMTPEmail(email_to, subject, text_to_send, email_cc, email_bcc,
email_from, msg_type, attachments)
- def SendSMTPEmail(self,
- email_to,
- subject,
- text_to_send,
- email_cc,
- email_bcc,
- email_from,
- msg_type,
- attachments):
+ def SendSMTPEmail(self, email_to, subject, text_to_send, email_cc, email_bcc,
+ email_from, msg_type, attachments):
"""Send email via standard smtp mail."""
# Email summary to the current user.
msg = MIMEMultipart()
@@ -55,53 +50,46 @@ class EmailSender(object):
if not email_from:
email_from = os.path.basename(__file__)
- msg["To"] = ",".join(email_to)
- msg["Subject"] = subject
+ msg['To'] = ','.join(email_to)
+ msg['Subject'] = subject
if email_from:
- msg["From"] = email_from
+ msg['From'] = email_from
if email_cc:
- msg["CC"] = ",".join(email_cc)
+ msg['CC'] = ','.join(email_cc)
email_to += email_cc
if email_bcc:
- msg["BCC"] = ",".join(email_bcc)
+ msg['BCC'] = ','.join(email_bcc)
email_to += email_bcc
msg.attach(MIMEText(text_to_send, msg_type))
if attachments:
for attachment in attachments:
- part = MIMEBase("application", "octet-stream")
+ part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment.content)
Encoders.encode_base64(part)
- part.add_header("Content-Disposition", "attachment; filename=\"%s\"" %
- attachment.name)
+ part.add_header('Content-Disposition',
+ "attachment; filename=\"%s\"" % attachment.name)
msg.attach(part)
# Send the message via our own SMTP server, but don't include the
# envelope header.
- s = smtplib.SMTP("localhost")
+ s = smtplib.SMTP('localhost')
s.sendmail(email_from, email_to, msg.as_string())
s.quit()
- def SendGMREmail(self,
- email_to,
- subject,
- text_to_send,
- email_cc,
- email_bcc,
- email_from,
- msg_type,
- attachments):
+ def SendGMREmail(self, email_to, subject, text_to_send, email_cc, email_bcc,
+ email_from, msg_type, attachments):
"""Send email via sendgmr program."""
- ce = command_executer.GetCommandExecuter(log_level="none")
+ ce = command_executer.GetCommandExecuter(log_level='none')
if not email_from:
email_from = os.path.basename(__file__)
- to_list = ",".join(email_to)
+ to_list = ','.join(email_to)
if not text_to_send:
- text_to_send = "Empty message body."
+ text_to_send = 'Empty message body.'
body_fd, body_filename = tempfile.mkstemp()
to_be_deleted = [body_filename]
@@ -115,34 +103,33 @@ class EmailSender(object):
# character, you need to double it. So...
subject = subject.replace("'", "'\\''")
- if msg_type == "html":
+ if msg_type == 'html':
command = ("sendgmr --to='%s' --subject='%s' --html_file='%s' "
- "--body_file=/dev/null" %
- (to_list, subject, body_filename))
+ '--body_file=/dev/null' % (to_list, subject, body_filename))
else:
command = ("sendgmr --to='%s' --subject='%s' --body_file='%s'" %
(to_list, subject, body_filename))
if email_from:
- command += " --from=%s" % email_from
+ command += ' --from=%s' % email_from
if email_cc:
- cc_list = ",".join(email_cc)
+ cc_list = ','.join(email_cc)
command += " --cc='%s'" % cc_list
if email_bcc:
- bcc_list = ",".join(email_bcc)
+ bcc_list = ','.join(email_bcc)
command += " --bcc='%s'" % bcc_list
if attachments:
attachment_files = []
for attachment in attachments:
- if "<html>" in attachment.content:
- report_suffix = "_report.html"
+ if '<html>' in attachment.content:
+ report_suffix = '_report.html'
else:
- report_suffix = "_report.txt"
+ report_suffix = '_report.txt'
fd, fname = tempfile.mkstemp(suffix=report_suffix)
os.write(fd, attachment.content)
os.close(fd)
attachment_files.append(fname)
- files = ",".join(attachment_files)
+ files = ','.join(attachment_files)
command += " --attachment_files='%s'" % files
to_be_deleted += attachment_files
diff --git a/utils/file_utils.py b/utils/file_utils.py
index bb431f19..584f274e 100644
--- a/utils/file_utils.py
+++ b/utils/file_utils.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
@@ -24,30 +23,28 @@ class FileUtils(object):
cls._instance = super(FileUtils, cls).__new__(MockFileUtils, *args,
**kwargs)
else:
- cls._instance = super(FileUtils, cls).__new__(cls, *args,
- **kwargs)
+ cls._instance = super(FileUtils, cls).__new__(cls, *args, **kwargs)
return cls._instance
- def Md5File(self, filename, log_level="verbose", block_size=2 ** 10):
- command = "md5sum %s" % filename
+ def Md5File(self, filename, log_level='verbose', block_size=2**10):
+ command = 'md5sum %s' % filename
ce = command_executer.GetCommandExecuter(log_level=log_level)
ret, out, err = ce.RunCommandWOutput(command)
if ret:
- raise Exception("Could not run md5sum on: %s" % filename)
+ raise Exception('Could not run md5sum on: %s' % filename)
return out.strip().split()[0]
def CanonicalizeChromeOSRoot(self, chromeos_root):
chromeos_root = os.path.expanduser(chromeos_root)
- if os.path.isdir(os.path.join(chromeos_root,
- "chromite")):
+ if os.path.isdir(os.path.join(chromeos_root, 'chromite')):
return chromeos_root
else:
return None
def ChromeOSRootFromImage(self, chromeos_image):
- chromeos_root = os.path.join(os.path.dirname(chromeos_image),
- "../../../../..")
+ chromeos_root = os.path.join(
+ os.path.dirname(chromeos_image), '../../../../..')
return self.CanonicalizeChromeOSRoot(chromeos_root)
def MkDirP(self, path):
@@ -63,21 +60,21 @@ class FileUtils(object):
shutil.rmtree(path, ignore_errors=True)
def WriteFile(self, path, contents):
- with open(path, "wb") as f:
+ with open(path, 'wb') as f:
f.write(contents)
class MockFileUtils(FileUtils):
"""Mock class for file utilities."""
- def Md5File(self, filename, block_size=2 ** 10):
- return "d41d8cd98f00b204e9800998ecf8427e"
+ def Md5File(self, filename, block_size=2**10):
+ return 'd41d8cd98f00b204e9800998ecf8427e'
def CanonicalizeChromeOSRoot(self, chromeos_root):
- return "/tmp/chromeos_root"
+ return '/tmp/chromeos_root'
def ChromeOSRootFromImage(self, chromeos_image):
- return "/tmp/chromeos_root"
+ return '/tmp/chromeos_root'
def RmDir(self, path):
pass
diff --git a/utils/html_tools.py b/utils/html_tools.py
index 5622e2e0..dc342f9e 100644
--- a/utils/html_tools.py
+++ b/utils/html_tools.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
#
@@ -34,59 +32,59 @@ function displayRow(id){
def GetListHeader():
- return "<ul>"
+ return '<ul>'
def GetListItem(text):
- return "<li>%s</li>" % text
+ return '<li>%s</li>' % text
def GetListFooter():
- return "</ul>"
+ return '</ul>'
def GetList(items):
- return "<ul>%s</ul>" % "".join(["<li>%s</li>" % item for item in items])
+ return '<ul>%s</ul>' % ''.join(['<li>%s</li>' % item for item in items])
def GetParagraph(text):
- return "<p>%s</p>" % text
+ return '<p>%s</p>' % text
def GetFooter():
- return "</body>\n</html>"
+ return '</body>\n</html>'
def GetHeader(text, h=1):
- return "<h%s>%s</h%s>" % (h, text, h)
+ return '<h%s>%s</h%s>' % (h, text, h)
def GetTableHeader(headers):
- row = "".join(["<th>%s</th>" % header for header in headers])
- return "<table><tr>%s</tr>" % row
+ row = ''.join(['<th>%s</th>' % header for header in headers])
+ return '<table><tr>%s</tr>' % row
def GetTableFooter():
- return "</table>"
+ return '</table>'
def FormatLineBreaks(text):
- return text.replace("\n", "<br/>")
+ return text.replace('\n', '<br/>')
def GetTableCell(text):
- return "<td>%s</td>" % FormatLineBreaks(str(text))
+ return '<td>%s</td>' % FormatLineBreaks(str(text))
def GetTableRow(columns):
- return "<tr>%s</tr>" % "\n".join([GetTableCell(column) for column in columns])
+ return '<tr>%s</tr>' % '\n'.join([GetTableCell(column) for column in columns])
def GetTable(headers, rows):
table = [GetTableHeader(headers)]
table.extend([GetTableRow(row) for row in rows])
table.append(GetTableFooter())
- return "\n".join(table)
+ return '\n'.join(table)
def GetLink(link, text):
diff --git a/utils/locks.py b/utils/locks.py
index f846764b..641fe006 100644
--- a/utils/locks.py
+++ b/utils/locks.py
@@ -1,10 +1,7 @@
-#!/usr/bin/python
# Copyright 2015 The Chromium OS Authors. All rights reserved.
-
"""Utilities for locking machines."""
-
import os
import sys
import time
@@ -19,7 +16,7 @@ def AcquireLock(machines, chromeos_root, timeout=1200):
"""Acquire lock for machine(s) with timeout, using AFE server for locking."""
start_time = time.time()
locked = True
- sleep_time = min(10, timeout/10.0)
+ sleep_time = min(10, timeout / 10.0)
while True:
try:
afe_lock_machine.AFELockManager(machines, False, chromeos_root,
@@ -29,8 +26,9 @@ def AcquireLock(machines, chromeos_root, timeout=1200):
if time.time() - start_time > timeout:
locked = False
logger.GetLogger().LogWarning(
- "Could not acquire lock on this machine: {0} within {1} seconds. %s"
- .format(repr(machines), timeout, str(e)))
+ 'Could not acquire lock on this machine: {0} within {1} seconds. %s'
+ .format(
+ repr(machines), timeout, str(e)))
break
time.sleep(sleep_time)
return locked
@@ -44,6 +42,6 @@ def ReleaseLock(machines, chromeos_root):
None).UpdateMachines(False)
except Exception as e:
unlocked = False
- logger.GetLogger().LogWarning("Could not unlock %s. %s" %
+ logger.GetLogger().LogWarning('Could not unlock %s. %s' %
(repr(machines), str(e)))
return unlocked
diff --git a/utils/logger.py b/utils/logger.py
index 15804c0a..ebcd7b63 100644
--- a/utils/logger.py
+++ b/utils/logger.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2010 Google Inc. All Rights Reserved.
# System modules
@@ -7,6 +5,7 @@ import os.path
import sys
import traceback
+
#TODO(yunlian@google.com): Use GetRoot from misc
def GetRoot(scr_name):
"""Break up pathname into (dir+name)."""
@@ -19,7 +18,7 @@ class Logger(object):
MAX_LOG_FILES = 10
- def __init__ (self, rootdir, basefilename, print_console, subdir="logs"):
+ def __init__(self, rootdir, basefilename, print_console, subdir='logs'):
logdir = os.path.join(rootdir, subdir)
basename = os.path.join(logdir, basefilename)
@@ -33,10 +32,10 @@ class Logger(object):
self._CreateLogFileHandles(basename)
- self._WriteTo(self.cmdfd, " ".join(sys.argv), True)
+ self._WriteTo(self.cmdfd, ' '.join(sys.argv), True)
def _AddSuffix(self, basename, suffix):
- return "%s%s" % (basename, suffix)
+ return '%s%s' % (basename, suffix)
def _FindSuffix(self, basename):
timestamps = []
@@ -44,7 +43,7 @@ class Logger(object):
for i in range(self.MAX_LOG_FILES):
suffix = str(i)
suffixed_basename = self._AddSuffix(basename, suffix)
- cmd_file = "%s.cmd" % suffixed_basename
+ cmd_file = '%s.cmd' % suffixed_basename
if not os.path.exists(cmd_file):
found_suffix = suffix
break
@@ -62,32 +61,32 @@ class Logger(object):
def _CreateLogFileHandle(self, name):
fd = None
try:
- fd = open(name, "w")
+ fd = open(name, 'w')
except IOError:
- print "Warning: could not open %s for writing." % name
+ print 'Warning: could not open %s for writing.' % name
return fd
def _CreateLogFileHandles(self, basename):
suffix = self._FindSuffix(basename)
suffixed_basename = self._AddSuffix(basename, suffix)
- self.cmdfd = self._CreateLogFileHandle("%s.cmd" % suffixed_basename)
- self.stdout = self._CreateLogFileHandle("%s.out" % suffixed_basename)
- self.stderr = self._CreateLogFileHandle("%s.err" % suffixed_basename)
+ self.cmdfd = self._CreateLogFileHandle('%s.cmd' % suffixed_basename)
+ self.stdout = self._CreateLogFileHandle('%s.out' % suffixed_basename)
+ self.stderr = self._CreateLogFileHandle('%s.err' % suffixed_basename)
self._CreateLogFileSymlinks(basename, suffixed_basename)
# Symlink unsuffixed basename to currently suffixed one.
def _CreateLogFileSymlinks(self, basename, suffixed_basename):
try:
- for extension in ["cmd", "out", "err"]:
- src_file = "%s.%s" % (os.path.basename(suffixed_basename), extension)
- dest_file = "%s.%s" % (basename, extension)
+ for extension in ['cmd', 'out', 'err']:
+ src_file = '%s.%s' % (os.path.basename(suffixed_basename), extension)
+ dest_file = '%s.%s' % (basename, extension)
if os.path.exists(dest_file):
os.remove(dest_file)
os.symlink(src_file, dest_file)
except Exception as ex:
- print "Exception while creating symlinks: %s" % str(ex)
+ print 'Exception while creating symlinks: %s' % str(ex)
def _WriteTo(self, fd, msg, flush):
if fd:
@@ -99,19 +98,19 @@ class Logger(object):
term_fd = self._GetStdout(print_to_console)
if (term_fd):
term_fd.flush()
- term_fd.write(". ")
+ term_fd.write('. ')
term_fd.flush()
def LogAppendDot(self, print_to_console=True):
term_fd = self._GetStdout(print_to_console)
if (term_fd):
- term_fd.write(". ")
+ term_fd.write('. ')
term_fd.flush()
def LogEndDots(self, print_to_console=True):
term_fd = self._GetStdout(print_to_console)
if (term_fd):
- term_fd.write("\n")
+ term_fd.write('\n')
term_fd.flush()
def _LogMsg(self, file_fd, term_fd, msg, flush=True):
@@ -130,42 +129,42 @@ class Logger(object):
return sys.stderr
return None
- def LogCmdToFileOnly (self, cmd, machine="", user=None):
+ def LogCmdToFileOnly(self, cmd, machine='', user=None):
if not self.cmdfd:
return
- host = ("%s@%s" % (user, machine)) if user else machine
+ host = ('%s@%s' % (user, machine)) if user else machine
flush = True
- cmd_string = "CMD (%s): %s\n" % (host, cmd)
+ cmd_string = 'CMD (%s): %s\n' % (host, cmd)
self._WriteTo(self.cmdfd, cmd_string, flush)
- def LogCmd(self, cmd, machine="", user=None, print_to_console=True):
+ def LogCmd(self, cmd, machine='', user=None, print_to_console=True):
if user:
- host = "%s@%s" % (user, machine)
+ host = '%s@%s' % (user, machine)
else:
host = machine
self._LogMsg(self.cmdfd, self._GetStdout(print_to_console),
- "CMD (%s): %s\n" % (host, cmd))
+ 'CMD (%s): %s\n' % (host, cmd))
def LogFatal(self, msg, print_to_console=True):
self._LogMsg(self.stderr, self._GetStderr(print_to_console),
- "FATAL: %s\n" % msg)
+ 'FATAL: %s\n' % msg)
self._LogMsg(self.stderr, self._GetStderr(print_to_console),
- "\n".join(traceback.format_stack()))
+ '\n'.join(traceback.format_stack()))
sys.exit(1)
def LogError(self, msg, print_to_console=True):
self._LogMsg(self.stderr, self._GetStderr(print_to_console),
- "ERROR: %s\n" % msg)
+ 'ERROR: %s\n' % msg)
def LogWarning(self, msg, print_to_console=True):
self._LogMsg(self.stderr, self._GetStderr(print_to_console),
- "WARNING: %s\n" % msg)
+ 'WARNING: %s\n' % msg)
def LogOutput(self, msg, print_to_console=True):
self._LogMsg(self.stdout, self._GetStdout(print_to_console),
- "OUTPUT: %s\n" % msg)
+ 'OUTPUT: %s\n' % msg)
def LogFatalIf(self, condition, msg):
if condition:
@@ -180,12 +179,16 @@ class Logger(object):
self.LogWarning(msg)
def LogCommandOutput(self, msg, print_to_console=True):
- self._LogMsg(self.stdout, self._GetStdout(print_to_console),
- msg, flush=False)
+ self._LogMsg(self.stdout,
+ self._GetStdout(print_to_console),
+ msg,
+ flush=False)
def LogCommandError(self, msg, print_to_console=True):
- self._LogMsg(self.stderr, self._GetStderr(print_to_console),
- msg, flush=False)
+ self._LogMsg(self.stderr,
+ self._GetStderr(print_to_console),
+ msg,
+ flush=False)
def Flush(self):
self.cmdfd.flush()
@@ -198,13 +201,13 @@ class MockLogger(object):
MAX_LOG_FILES = 10
- def __init__ (self, rootdir, basefilename, print_console, subdir="logs"):
+ def __init__(self, rootdir, basefilename, print_console, subdir='logs'):
self.stdout = sys.stdout
self.stderr = sys.stderr
return None
def _AddSuffix(self, basename, suffix):
- return "%s%s" % (basename, suffix)
+ return '%s%s' % (basename, suffix)
def _FindSuffix(self, basename):
timestamps = []
@@ -212,7 +215,7 @@ class MockLogger(object):
for i in range(self.MAX_LOG_FILES):
suffix = str(i)
suffixed_basename = self._AddSuffix(basename, suffix)
- cmd_file = "%s.cmd" % suffixed_basename
+ cmd_file = '%s.cmd' % suffixed_basename
if not os.path.exists(cmd_file):
found_suffix = suffix
break
@@ -228,86 +231,81 @@ class MockLogger(object):
return suffix
def _CreateLogFileHandle(self, name):
- print "MockLogger: creating open file handle for %s (writing)" % name
+ print 'MockLogger: creating open file handle for %s (writing)' % name
def _CreateLogFileHandles(self, basename):
suffix = self._FindSuffix(basename)
suffixed_basename = self._AddSuffix(basename, suffix)
- print "MockLogger: opening file %s.cmd" % suffixed_basename
- print "MockLogger: opening file %s.out" % suffixed_basename
- print "MockLogger: opening file %s.err" % suffixed_basename
+ print 'MockLogger: opening file %s.cmd' % suffixed_basename
+ print 'MockLogger: opening file %s.out' % suffixed_basename
+ print 'MockLogger: opening file %s.err' % suffixed_basename
self._CreateLogFileSymlinks(basename, suffixed_basename)
# Symlink unsuffixed basename to currently suffixed one.
def _CreateLogFileSymlinks(self, basename, suffixed_basename):
- for extension in ["cmd", "out", "err"]:
- src_file = "%s.%s" % (os.path.basename(suffixed_basename), extension)
- dest_file = "%s.%s" % (basename, extension)
- print "MockLogger: Calling os.symlink(%s, %s)" % (src_file, dest_file)
+ for extension in ['cmd', 'out', 'err']:
+ src_file = '%s.%s' % (os.path.basename(suffixed_basename), extension)
+ dest_file = '%s.%s' % (basename, extension)
+ print 'MockLogger: Calling os.symlink(%s, %s)' % (src_file, dest_file)
def _WriteTo(self, fd, msg, flush):
- print "MockLogger: %s" % msg
+ print 'MockLogger: %s' % msg
def LogStartDots(self, print_to_console=True):
- print ". "
-
+ print '. '
def LogAppendDot(self, print_to_console=True):
- print ". "
+ print '. '
def LogEndDots(self, print_to_console=True):
- print "\n"
+ print '\n'
def _LogMsg(self, file_fd, term_fd, msg, flush=True):
- print "MockLogger: %s" % msg
+ print 'MockLogger: %s' % msg
def _GetStdout(self, print_to_console):
-# if print_to_console:
-# return sys.stdout
+ # if print_to_console:
+ # return sys.stdout
return None
def _GetStderr(self, print_to_console):
-# if print_to_console:
-# return sys.stderr
+ # if print_to_console:
+ # return sys.stderr
return None
- def LogCmdToFileOnly (self, cmd, machine="", user=None):
+ def LogCmdToFileOnly(self, cmd, machine='', user=None):
return
- host = ("%s@%s" % (user, machine)) if user else machine
+ host = ('%s@%s' % (user, machine)) if user else machine
flush = True
- cmd_string = "CMD (%s): %s\n" % (host, cmd)
- print "MockLogger: Writing to file ONLY: %s" % cmd_string
+ cmd_string = 'CMD (%s): %s\n' % (host, cmd)
+ print 'MockLogger: Writing to file ONLY: %s' % cmd_string
- def LogCmd(self, cmd, machine="", user=None, print_to_console=True):
+ def LogCmd(self, cmd, machine='', user=None, print_to_console=True):
if user:
- host = "%s@%s" % (user, machine)
+ host = '%s@%s' % (user, machine)
else:
host = machine
self._LogMsg(0, self._GetStdout(print_to_console),
- "CMD (%s): %s\n" % (host, cmd))
+ 'CMD (%s): %s\n' % (host, cmd))
def LogFatal(self, msg, print_to_console=True):
+ self._LogMsg(0, self._GetStderr(print_to_console), 'FATAL: %s\n' % msg)
self._LogMsg(0, self._GetStderr(print_to_console),
- "FATAL: %s\n" % msg)
- self._LogMsg(0, self._GetStderr(print_to_console),
- "\n".join(traceback.format_stack()))
- print "MockLogger: Calling sysexit(1)"
+ '\n'.join(traceback.format_stack()))
+ print 'MockLogger: Calling sysexit(1)'
def LogError(self, msg, print_to_console=True):
- self._LogMsg(0, self._GetStderr(print_to_console),
- "ERROR: %s\n" % msg)
+ self._LogMsg(0, self._GetStderr(print_to_console), 'ERROR: %s\n' % msg)
def LogWarning(self, msg, print_to_console=True):
- self._LogMsg(0, self._GetStderr(print_to_console),
- "WARNING: %s\n" % msg)
+ self._LogMsg(0, self._GetStderr(print_to_console), 'WARNING: %s\n' % msg)
def LogOutput(self, msg, print_to_console=True):
- self._LogMsg(0, self._GetStdout(print_to_console),
- "OUTPUT: %s\n" % msg)
+ self._LogMsg(0, self._GetStdout(print_to_console), 'OUTPUT: %s\n' % msg)
def LogFatalIf(self, condition, msg):
if condition:
@@ -322,23 +320,28 @@ class MockLogger(object):
self.LogWarning(msg)
def LogCommandOutput(self, msg, print_to_console=True):
- self._LogMsg(self.stdout, self._GetStdout(print_to_console),
- msg, flush=False)
+ self._LogMsg(self.stdout,
+ self._GetStdout(print_to_console),
+ msg,
+ flush=False)
def LogCommandError(self, msg, print_to_console=True):
- self._LogMsg(self.stderr, self._GetStderr(print_to_console),
- msg, flush=False)
+ self._LogMsg(self.stderr,
+ self._GetStderr(print_to_console),
+ msg,
+ flush=False)
def Flush(self):
- print "MockLogger: Flushing cmdfd, stdout, stderr"
+ print 'MockLogger: Flushing cmdfd, stdout, stderr'
main_logger = None
+
def InitLogger(script_name, log_dir, print_console=True, mock=False):
"""Initialize a global logger. To be called only once."""
global main_logger
- assert not main_logger, "The logger has already been initialized"
+ assert not main_logger, 'The logger has already been initialized'
rootdir, basefilename = GetRoot(script_name)
if not log_dir:
log_dir = rootdir
@@ -348,7 +351,7 @@ def InitLogger(script_name, log_dir, print_console=True, mock=False):
main_logger = MockLogger(log_dir, basefilename, print_console)
-def GetLogger(log_dir="", mock=False):
+def GetLogger(log_dir='', mock=False):
if not main_logger:
InitLogger(sys.argv[0], log_dir, mock=mock)
return main_logger
@@ -361,6 +364,6 @@ def HandleUncaughtExceptions(fun):
try:
return fun(*args, **kwargs)
except StandardError:
- GetLogger().LogFatal("Uncaught exception:\n%s" % traceback.format_exc())
+ GetLogger().LogFatal('Uncaught exception:\n%s' % traceback.format_exc())
return _Interceptor
diff --git a/utils/machines.py b/utils/machines.py
index 81763073..fb184a8d 100644
--- a/utils/machines.py
+++ b/utils/machines.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Utilities relating to machine-specific functions."""
from utils import command_executer
diff --git a/utils/manifest_versions.py b/utils/manifest_versions.py
index 57c0f865..e618ffc3 100644
--- a/utils/manifest_versions.py
+++ b/utils/manifest_versions.py
@@ -1,12 +1,10 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Tools for searching/manipulating the manifests repository."""
-__author__ = "llozano@google.com (Luis Lozano)"
+__author__ = 'llozano@google.com (Luis Lozano)'
import os
import re
@@ -19,19 +17,19 @@ import logger
def IsCrosVersion(version):
- match = re.search(r"(\d+\.\d+\.\d+\.\d+)", version)
+ match = re.search(r'(\d+\.\d+\.\d+\.\d+)', version)
return match is not None
def IsRFormatCrosVersion(version):
- match = re.search(r"(R\d+-\d+\.\d+\.\d+)", version)
+ match = re.search(r'(R\d+-\d+\.\d+\.\d+)', version)
return match is not None
def RFormatCrosVersion(version):
assert IsCrosVersion(version)
- tmp_major, tmp_minor = version.split(".", 1)
- rformat = "R" + tmp_major + "-" + tmp_minor
+ tmp_major, tmp_minor = version.split('.', 1)
+ rformat = 'R' + tmp_major + '-' + tmp_minor
assert IsRFormatCrosVersion(rformat)
return rformat
@@ -44,17 +42,16 @@ class ManifestVersions(object):
self.clone_location = tempfile.mkdtemp()
self.ce = command_executer.GetCommandExecuter()
if internal:
- versions_git = (
- "https://chrome-internal.googlesource.com/"
- "chromeos/manifest-versions.git")
+ versions_git = ('https://chrome-internal.googlesource.com/'
+ 'chromeos/manifest-versions.git')
else:
versions_git = (
- "https://chromium.googlesource.com/chromiumos/manifest-versions.git")
- commands = ["cd {0}".format(self.clone_location),
- "git clone {0}".format(versions_git)]
+ 'https://chromium.googlesource.com/chromiumos/manifest-versions.git')
+ commands = ['cd {0}'.format(self.clone_location),
+ 'git clone {0}'.format(versions_git)]
ret = self.ce.RunCommands(commands)
if ret:
- logger.GetLogger().LogFatal("Failed to clone manifest-versions.")
+ logger.GetLogger().LogFatal('Failed to clone manifest-versions.')
def __del__(self):
if self.clone_location:
@@ -65,37 +62,35 @@ class ManifestVersions(object):
cur_time = time.mktime(time.gmtime())
des_time = float(my_time)
if cur_time - des_time > 7000000:
- logger.GetLogger().LogFatal("The time you specify is too early.")
- commands = ["cd {0}".format(self.clone_location),
- "cd manifest-versions",
- "git checkout -f $(git rev-list" +
- " --max-count=1 --before={0} origin/master)".format(my_time)]
+ logger.GetLogger().LogFatal('The time you specify is too early.')
+ commands = ['cd {0}'.format(self.clone_location), 'cd manifest-versions',
+ 'git checkout -f $(git rev-list' +
+ ' --max-count=1 --before={0} origin/master)'.format(my_time)]
ret = self.ce.RunCommands(commands)
if ret:
- logger.GetLogger().LogFatal("Failed to checkout manifest at "
- "specified time")
- path = os.path.realpath("{0}/manifest-versions/LKGM/lkgm.xml".
- format(self.clone_location))
- pp = path.split("/")
- small = os.path.basename(path).split(".xml")[0]
- version = pp[-2] + "." + small
- commands = ["cd {0}".format(self.clone_location),
- "cd manifest-versions", "git checkout master"]
+ logger.GetLogger().LogFatal('Failed to checkout manifest at '
+ 'specified time')
+ path = os.path.realpath('{0}/manifest-versions/LKGM/lkgm.xml'.format(
+ self.clone_location))
+ pp = path.split('/')
+ small = os.path.basename(path).split('.xml')[0]
+ version = pp[-2] + '.' + small
+ commands = ['cd {0}'.format(self.clone_location), 'cd manifest-versions',
+ 'git checkout master']
self.ce.RunCommands(commands)
return version
def GetManifest(self, version, to_file):
"""Get the manifest file from a given chromeos-internal version."""
assert not IsRFormatCrosVersion(version)
- version = version.split(".", 1)[1]
+ version = version.split('.', 1)[1]
os.chdir(self.clone_location)
files = [os.path.join(r, f)
- for r, _, fs in os.walk(".")
- for f in fs if version in f]
+ for r, _, fs in os.walk('.') for f in fs if version in f]
if files:
- command = "cp {0} {1}".format(files[0], to_file)
+ command = 'cp {0} {1}'.format(files[0], to_file)
ret = self.ce.RunCommand(command)
if ret:
- raise Exception("Cannot copy manifest to {0}".format(to_file))
+ raise Exception('Cannot copy manifest to {0}'.format(to_file))
else:
- raise Exception("Version {0} is not available.".format(version))
+ raise Exception('Version {0} is not available.'.format(version))
diff --git a/utils/misc.py b/utils/misc.py
index b4f7d4b4..b174de57 100644
--- a/utils/misc.py
+++ b/utils/misc.py
@@ -1,12 +1,10 @@
-#!/usr/bin/python
# Copyright 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Utilities for toolchain build."""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
from contextlib import contextmanager
import os
@@ -19,32 +17,31 @@ import traceback
import command_executer
import logger
-
-CHROMEOS_SCRIPTS_DIR = "~/trunk/src/scripts"
-TOOLCHAIN_UTILS_PATH = "~/trunk/src/platform/dev/toolchain_utils.sh"
+CHROMEOS_SCRIPTS_DIR = '~/trunk/src/scripts'
+TOOLCHAIN_UTILS_PATH = '~/trunk/src/platform/dev/toolchain_utils.sh'
def GetChromeOSVersionFromLSBVersion(lsb_version):
"""Get Chromeos version from Lsb version."""
ce = command_executer.GetCommandExecuter()
- command = ("git ls-remote "
- "https://chromium.googlesource.com/chromiumos/manifest.git")
+ command = ('git ls-remote '
+ 'https://chromium.googlesource.com/chromiumos/manifest.git')
ret, out, _ = ce.RunCommandWOutput(command, print_to_console=False)
- assert ret == 0, "Command %s failed" % command
+ assert ret == 0, 'Command %s failed' % command
lower = []
for line in out.splitlines():
- mo = re.search(r"refs/heads/release-R(\d+)-(\d+)\.B", line)
+ mo = re.search(r'refs/heads/release-R(\d+)-(\d+)\.B', line)
if mo:
revision = int(mo.group(1))
build = int(mo.group(2))
- lsb_build = int(lsb_version.split(".")[0])
+ lsb_build = int(lsb_version.split('.')[0])
if lsb_build > build:
lower.append(revision)
lower = sorted(lower)
if lower:
- return "R%d-%s" % (lower[-1] + 1, lsb_version)
+ return 'R%d-%s' % (lower[-1] + 1, lsb_version)
else:
- return "Unknown"
+ return 'Unknown'
def ApplySubs(string, *substitutions):
@@ -55,11 +52,9 @@ def ApplySubs(string, *substitutions):
def UnitToNumber(unit_num, base=1000):
"""Convert a number with unit to float."""
- unit_dict = {"kilo": base,
- "mega": base**2,
- "giga": base**3}
+ unit_dict = {'kilo': base, 'mega': base**2, 'giga': base**3}
unit_num = unit_num.lower()
- mo = re.search(r"(\d*)(.+)?", unit_num)
+ mo = re.search(r'(\d*)(.+)?', unit_num)
number = mo.group(1)
unit = mo.group(2)
if not unit:
@@ -67,17 +62,11 @@ def UnitToNumber(unit_num, base=1000):
for k, v in unit_dict.items():
if k.startswith(unit):
return float(number) * v
- raise Exception("Unit: %s not found in byte: %s!" %
- (unit,
- unit_num))
+ raise Exception('Unit: %s not found in byte: %s!' % (unit, unit_num))
def GetFilenameFromString(string):
- return ApplySubs(string,
- (r"/", "__"),
- (r"\s", "_"),
- (r"[\^\$=\"\\\?]", ""),
- )
+ return ApplySubs(string, (r'/', '__'), (r'\s', '_'), (r"[\^\$=\"\\\?]", ''),)
def GetRoot(scr_name):
@@ -87,123 +76,111 @@ def GetRoot(scr_name):
def GetChromeOSKeyFile(chromeos_root):
- return os.path.join(chromeos_root,
- "src",
- "scripts",
- "mod_for_test_scripts",
- "ssh_keys",
- "testing_rsa")
+ return os.path.join(chromeos_root, 'src', 'scripts', 'mod_for_test_scripts',
+ 'ssh_keys', 'testing_rsa')
def GetChrootPath(chromeos_root):
- return os.path.join(chromeos_root,
- "chroot")
+ return os.path.join(chromeos_root, 'chroot')
def GetInsideChrootPath(chromeos_root, file_path):
if not file_path.startswith(GetChrootPath(chromeos_root)):
raise Exception("File: %s doesn't seem to be in the chroot: %s" %
- (file_path,
- chromeos_root))
+ (file_path, chromeos_root))
return file_path[len(GetChrootPath(chromeos_root)):]
def GetOutsideChrootPath(chromeos_root, file_path):
- return os.path.join(GetChrootPath(chromeos_root),
- file_path.lstrip("/"))
+ return os.path.join(GetChrootPath(chromeos_root), file_path.lstrip('/'))
def FormatQuotedCommand(command):
- return ApplySubs(command,
- ("\"", "\\\""))
+ return ApplySubs(command, ("\"", "\\\""))
def FormatCommands(commands):
- return ApplySubs(str(commands),
- ("&&", "&&\n"),
- (";", ";\n"),
- (r"\n+\s*", "\n"))
+ return ApplySubs(
+ str(commands), ('&&', '&&\n'), (';', ';\n'), (r'\n+\s*', '\n'))
def GetImageDir(chromeos_root, board):
- return os.path.join(chromeos_root,
- "src",
- "build",
- "images",
- board)
+ return os.path.join(chromeos_root, 'src', 'build', 'images', board)
def LabelLatestImage(chromeos_root, board, label, vanilla_path=None):
image_dir = GetImageDir(chromeos_root, board)
- latest_image_dir = os.path.join(image_dir, "latest")
+ latest_image_dir = os.path.join(image_dir, 'latest')
latest_image_dir = os.path.realpath(latest_image_dir)
latest_image_dir = os.path.basename(latest_image_dir)
retval = 0
with WorkingDirectory(image_dir):
- command = "ln -sf -T %s %s" % (latest_image_dir, label)
+ command = 'ln -sf -T %s %s' % (latest_image_dir, label)
ce = command_executer.GetCommandExecuter()
retval = ce.RunCommand(command)
if retval:
return retval
if vanilla_path:
- command = "ln -sf -T %s %s" % (vanilla_path, "vanilla")
+ command = 'ln -sf -T %s %s' % (vanilla_path, 'vanilla')
retval2 = ce.RunCommand(command)
return retval2
return retval
def DoesLabelExist(chromeos_root, board, label):
- image_label = os.path.join(GetImageDir(chromeos_root, board),
- label)
+ image_label = os.path.join(GetImageDir(chromeos_root, board), label)
return os.path.exists(image_label)
def GetBuildPackagesCommand(board, usepkg=False, debug=False):
if usepkg:
- usepkg_flag = "--usepkg"
+ usepkg_flag = '--usepkg'
else:
- usepkg_flag = "--nousepkg"
+ usepkg_flag = '--nousepkg'
if debug:
- withdebug_flag = "--withdebug"
+ withdebug_flag = '--withdebug'
else:
- withdebug_flag = "--nowithdebug"
- return ("%s/build_packages %s --withdev --withtest --withautotest "
- "--skip_toolchain_update %s --board=%s "
- "--accept_licenses=@CHROMEOS" %
+ withdebug_flag = '--nowithdebug'
+ return ('%s/build_packages %s --withdev --withtest --withautotest '
+ '--skip_toolchain_update %s --board=%s '
+ '--accept_licenses=@CHROMEOS' %
(CHROMEOS_SCRIPTS_DIR, usepkg_flag, withdebug_flag, board))
def GetBuildImageCommand(board, dev=False):
- dev_args = ""
+ dev_args = ''
if dev:
- dev_args = "--noenable_rootfs_verification --disk_layout=2gb-rootfs"
- return ("%s/build_image --board=%s %s test" %
+ dev_args = '--noenable_rootfs_verification --disk_layout=2gb-rootfs'
+ return ('%s/build_image --board=%s %s test' %
(CHROMEOS_SCRIPTS_DIR, board, dev_args))
-def GetSetupBoardCommand(board, gcc_version=None, binutils_version=None,
- usepkg=None, force=None):
+def GetSetupBoardCommand(board,
+ gcc_version=None,
+ binutils_version=None,
+ usepkg=None,
+ force=None):
"""Get setup_board command."""
options = []
if gcc_version:
- options.append("--gcc_version=%s" % gcc_version)
+ options.append('--gcc_version=%s' % gcc_version)
if binutils_version:
- options.append("--binutils_version=%s" % binutils_version)
+ options.append('--binutils_version=%s' % binutils_version)
if usepkg:
- options.append("--usepkg")
+ options.append('--usepkg')
else:
- options.append("--nousepkg")
+ options.append('--nousepkg')
if force:
- options.append("--force")
+ options.append('--force')
- options.append("--accept_licenses=@CHROMEOS")
+ options.append('--accept_licenses=@CHROMEOS')
- return ("%s/setup_board --board=%s %s" %
- (CHROMEOS_SCRIPTS_DIR, board, " ".join(options)))
+ return ('%s/setup_board --board=%s %s' %
+ (CHROMEOS_SCRIPTS_DIR, board, ' '.join(options)))
def CanonicalizePath(path):
@@ -214,13 +191,13 @@ def CanonicalizePath(path):
def GetCtargetFromBoard(board, chromeos_root):
"""Get Ctarget from board."""
- base_board = board.split("_")[0]
- command = ("source %s; get_ctarget_from_board %s" %
+ base_board = board.split('_')[0]
+ command = ('source %s; get_ctarget_from_board %s' %
(TOOLCHAIN_UTILS_PATH, base_board))
ce = command_executer.GetCommandExecuter()
ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
if ret != 0:
- raise ValueError("Board %s is invalid!" % board)
+ raise ValueError('Board %s is invalid!' % board)
# Remove ANSI escape sequences.
out = StripANSIEscapeSequences(out)
return out.strip()
@@ -228,13 +205,13 @@ def GetCtargetFromBoard(board, chromeos_root):
def GetArchFromBoard(board, chromeos_root):
"""Get Arch from board."""
- base_board = board.split("_")[0]
- command = ("source %s; get_board_arch %s" %
+ base_board = board.split('_')[0]
+ command = ('source %s; get_board_arch %s' %
(TOOLCHAIN_UTILS_PATH, base_board))
ce = command_executer.GetCommandExecuter()
ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
if ret != 0:
- raise ValueError("Board %s is invalid!" % board)
+ raise ValueError('Board %s is invalid!' % board)
# Remove ANSI escape sequences.
out = StripANSIEscapeSequences(out)
return out.strip()
@@ -243,28 +220,28 @@ def GetArchFromBoard(board, chromeos_root):
def GetGccLibsDestForBoard(board, chromeos_root):
"""Get gcc libs destination from board."""
arch = GetArchFromBoard(board, chromeos_root)
- if arch == "x86":
- return "/build/%s/usr/lib/gcc/" % board
- if arch == "amd64":
- return "/build/%s/usr/lib64/gcc/" % board
- if arch == "arm":
- return "/build/%s/usr/lib/gcc/" % board
- if arch == "arm64":
- return "/build/%s/usr/lib/gcc/" % board
- raise ValueError("Arch %s is invalid!" % arch)
+ if arch == 'x86':
+ return '/build/%s/usr/lib/gcc/' % board
+ if arch == 'amd64':
+ return '/build/%s/usr/lib64/gcc/' % board
+ if arch == 'arm':
+ return '/build/%s/usr/lib/gcc/' % board
+ if arch == 'arm64':
+ return '/build/%s/usr/lib/gcc/' % board
+ raise ValueError('Arch %s is invalid!' % arch)
def StripANSIEscapeSequences(string):
- string = re.sub(r"\x1b\[[0-9]*[a-zA-Z]", "", string)
+ string = re.sub(r'\x1b\[[0-9]*[a-zA-Z]', '', string)
return string
def GetChromeSrcDir():
- return "var/cache/distfiles/target/chrome-src/src"
+ return 'var/cache/distfiles/target/chrome-src/src'
def GetEnvStringFromDict(env_dict):
- return " ".join(["%s=\"%s\"" % var for var in env_dict.items()])
+ return ' '.join(["%s=\"%s\"" % var for var in env_dict.items()])
def MergeEnvStringWithDict(env_string, env_dict, prepend=True):
@@ -279,19 +256,19 @@ def MergeEnvStringWithDict(env_string, env_dict, prepend=True):
new_env = "%s=\"%s $%s\"" % (k, v, k)
else:
new_env = "%s=\"$%s %s\"" % (k, k, v)
- command = "; ".join([env_string, new_env, "echo $%s" % k])
+ command = '; '.join([env_string, new_env, 'echo $%s' % k])
ret, out, _ = ce.RunCommandWOutput(command)
- override_env_list.append("%s=%r" % (k, out.strip()))
- ret = env_string + " " + " ".join(override_env_list)
+ override_env_list.append('%s=%r' % (k, out.strip()))
+ ret = env_string + ' ' + ' '.join(override_env_list)
return ret.strip()
def GetAllImages(chromeos_root, board):
ce = command_executer.GetCommandExecuter()
- command = ("find %s/src/build/images/%s -name chromiumos_test_image.bin" %
+ command = ('find %s/src/build/images/%s -name chromiumos_test_image.bin' %
(chromeos_root, board))
ret, out, _ = ce.RunCommandWOutput(command)
- assert ret == 0, "Could not run command: %s" % command
+ assert ret == 0, 'Could not run command: %s' % command
return out.splitlines()
@@ -309,16 +286,16 @@ def RemoveChromeBrowserObjectFiles(chromeos_root, board):
"""Remove any object files from all the posible locations."""
out_dir = os.path.join(
GetChrootPath(chromeos_root),
- "var/cache/chromeos-chrome/chrome-src/src/out_%s" % board)
+ 'var/cache/chromeos-chrome/chrome-src/src/out_%s' % board)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
- logger.GetLogger().LogCmd("rm -rf %s" % out_dir)
+ logger.GetLogger().LogCmd('rm -rf %s' % out_dir)
out_dir = os.path.join(
GetChrootPath(chromeos_root),
- "var/cache/chromeos-chrome/chrome-src-internal/src/out_%s" % board)
+ 'var/cache/chromeos-chrome/chrome-src-internal/src/out_%s' % board)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
- logger.GetLogger().LogCmd("rm -rf %s" % out_dir)
+ logger.GetLogger().LogCmd('rm -rf %s' % out_dir)
@contextmanager
@@ -326,37 +303,40 @@ def WorkingDirectory(new_dir):
"""Get the working directory."""
old_dir = os.getcwd()
if old_dir != new_dir:
- msg = "cd %s" % new_dir
+ msg = 'cd %s' % new_dir
logger.GetLogger().LogCmd(msg)
os.chdir(new_dir)
yield new_dir
if old_dir != new_dir:
- msg = "cd %s" % old_dir
+ msg = 'cd %s' % old_dir
logger.GetLogger().LogCmd(msg)
os.chdir(old_dir)
def HasGitStagedChanges(git_dir):
"""Return True if git repository has staged changes."""
- command = "cd {0} && git diff --quiet --cached --exit-code HEAD".format(
+ command = 'cd {0} && git diff --quiet --cached --exit-code HEAD'.format(
git_dir)
return command_executer.GetCommandExecuter().RunCommand(
- command, print_to_console=False)
+ command,
+ print_to_console=False)
def HasGitUnstagedChanges(git_dir):
"""Return True if git repository has un-staged changes."""
- command = "cd {0} && git diff --quiet --exit-code HEAD".format(git_dir)
+ command = 'cd {0} && git diff --quiet --exit-code HEAD'.format(git_dir)
return command_executer.GetCommandExecuter().RunCommand(
- command, print_to_console=False)
+ command,
+ print_to_console=False)
def HasGitUntrackedChanges(git_dir):
"""Return True if git repository has un-tracked changes."""
- command = ("cd {0} && test -z "
- "$(git ls-files --exclude-standard --others)").format(git_dir)
+ command = ('cd {0} && test -z '
+ '$(git ls-files --exclude-standard --others)').format(git_dir)
return command_executer.GetCommandExecuter().RunCommand(
- command, print_to_console=False)
+ command,
+ print_to_console=False)
def GitGetCommitHash(git_dir, commit_symbolic_name):
@@ -376,7 +356,8 @@ def GitGetCommitHash(git_dir, commit_symbolic_name):
command = ('cd {0} && git log -n 1 --pretty="format:%H" {1}').format(
git_dir, commit_symbolic_name)
rv, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- command, print_to_console=False)
+ command,
+ print_to_console=False)
if rv == 0:
return out.strip()
return None
@@ -391,13 +372,13 @@ def IsGitTreeClean(git_dir):
True if git dir is clean.
"""
if HasGitStagedChanges(git_dir):
- logger.GetLogger().LogWarning("Git tree has staged changes.")
+ logger.GetLogger().LogWarning('Git tree has staged changes.')
return False
if HasGitUnstagedChanges(git_dir):
- logger.GetLogger().LogWarning("Git tree has unstaged changes.")
+ logger.GetLogger().LogWarning('Git tree has unstaged changes.')
return False
if HasGitUntrackedChanges(git_dir):
- logger.GetLogger().LogWarning("Git tree has un-tracked changes.")
+ logger.GetLogger().LogWarning('Git tree has un-tracked changes.')
return False
return True
@@ -412,13 +393,14 @@ def GetGitChangesAsList(git_dir, path=None, staged=False):
Returns:
A list containing all the changed files.
"""
- command = "cd {0} && git diff --name-only".format(git_dir)
+ command = 'cd {0} && git diff --name-only'.format(git_dir)
if staged:
- command += " --cached"
+ command += ' --cached'
if path:
- command += " -- " + path
+ command += ' -- ' + path
_, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- command, print_to_console=False)
+ command,
+ print_to_console=False)
rv = []
for line in out.splitlines():
rv.append(line)
@@ -426,10 +408,9 @@ def GetGitChangesAsList(git_dir, path=None, staged=False):
def IsChromeOsTree(chromeos_root):
- return (os.path.isdir(os.path.join(
- chromeos_root, "src/third_party/chromiumos-overlay")) and
- os.path.isdir(os.path.join(
- chromeos_root, "manifest")))
+ return (os.path.isdir(os.path.join(chromeos_root,
+ 'src/third_party/chromiumos-overlay')) and
+ os.path.isdir(os.path.join(chromeos_root, 'manifest')))
def DeleteChromeOsTree(chromeos_root, dry_run=False):
@@ -447,12 +428,13 @@ def DeleteChromeOsTree(chromeos_root, dry_run=False):
'"{0}" does not seem to be a valid chromeos tree, do nothing.'.format(
chromeos_root))
return False
- cmd0 = "cd {0} && cros_sdk --delete".format(chromeos_root)
+ cmd0 = 'cd {0} && cros_sdk --delete'.format(chromeos_root)
if dry_run:
print cmd0
else:
if command_executer.GetCommandExecuter().RunCommand(
- cmd0, print_to_console=True) != 0:
+ cmd0,
+ print_to_console=True) != 0:
return False
cmd1 = ('export CHROMEOSDIRNAME="$(dirname $(cd {0} && pwd))" && '
@@ -464,11 +446,13 @@ def DeleteChromeOsTree(chromeos_root, dry_run=False):
return True
return command_executer.GetCommandExecuter().RunCommand(
- cmd1, print_to_console=True) == 0
+ cmd1,
+ print_to_console=True) == 0
def ApplyGerritPatches(chromeos_root,
- gerrit_patch_string, branch='cros/master'):
+ gerrit_patch_string,
+ branch='cros/master'):
"""Apply gerrit patches on a chromeos tree.
Args:
@@ -510,8 +494,11 @@ def ApplyGerritPatches(chromeos_root,
return True
-def BooleanPrompt(prompt='Do you want to continue?', default=True,
- true_value='yes', false_value='no', prolog=None):
+def BooleanPrompt(prompt='Do you want to continue?',
+ default=True,
+ true_value='yes',
+ false_value='no',
+ prolog=None):
"""Helper function for processing boolean choice prompts.
Args:
@@ -527,8 +514,8 @@ def BooleanPrompt(prompt='Do you want to continue?', default=True,
true_value, false_value = true_value.lower(), false_value.lower()
true_text, false_text = true_value, false_value
if true_value == false_value:
- raise ValueError('true_value and false_value must differ: got %r'
- % true_value)
+ raise ValueError('true_value and false_value must differ: got %r' %
+ true_value)
if default:
true_text = true_text[0].upper() + true_text[1:]
diff --git a/utils/misc_test.py b/utils/misc_test.py
index e234332f..ef93a1cd 100644
--- a/utils/misc_test.py
+++ b/utils/misc_test.py
@@ -1,5 +1,4 @@
# Copyright 2012 Google Inc. All Rights Reserved.
-
"""Tests for misc."""
__author__ = 'asharif@google.com (Ahmad Sharif)'
@@ -13,6 +12,7 @@ import misc
class UtilsTest(unittest.TestCase):
+
def testGetFilenameFromString(self):
string = 'a /b=c"d^$?\\'
filename = misc.GetFilenameFromString(string)
@@ -29,11 +29,10 @@ class UtilsTest(unittest.TestCase):
self.assertTrue(new_env_string == ' '.join([env_string, expected_new_env]))
def testGetChromeOSVersionFromLSBVersion(self):
- versions_dict = {"2630.0.0": "22",
- "2030.0.0": "19"}
+ versions_dict = {'2630.0.0': '22', '2030.0.0': '19'}
f = misc.GetChromeOSVersionFromLSBVersion
for k, v in versions_dict.items():
- self.assertTrue(f(k) == "R%s-%s" % (v, k))
+ self.assertTrue(f(k) == 'R%s-%s' % (v, k))
def testPostpendMergeEnv(self):
var = 'USE'
@@ -41,10 +40,10 @@ class UtilsTest(unittest.TestCase):
added_use_flags = 'bla bla'
env_string = '%s=%r' % (var, use_flags)
new_env_string = misc.MergeEnvStringWithDict(env_string,
- {var: added_use_flags},
- False)
+ {var: added_use_flags}, False)
expected_new_env = '%s=%r' % (var, ' '.join([use_flags, added_use_flags]))
self.assertTrue(new_env_string == ' '.join([env_string, expected_new_env]))
+
if __name__ == '__main__':
unittest.main()
diff --git a/utils/no_pseudo_terminal_test.py b/utils/no_pseudo_terminal_test.py
index c4c3c43e..209840f1 100644
--- a/utils/no_pseudo_terminal_test.py
+++ b/utils/no_pseudo_terminal_test.py
@@ -14,7 +14,7 @@ class NoPsuedoTerminalTest(unittest.TestCase):
def _AttachStraceToSelf(self, output_file):
"""Attaches strace to the current process."""
- args = ["strace", "-o", output_file, "-p", str(os.getpid())]
+ args = ['strace', '-o', output_file, '-p', str(os.getpid())]
print args
self._strace_process = subprocess.Popen(args)
# Wait until we see some activity.
@@ -37,12 +37,12 @@ class NoPsuedoTerminalTest(unittest.TestCase):
self.assertTrue(self._AttachStraceToSelf(temp_file))
ce = command_executer.GetCommandExecuter()
- ce.RunCommand("echo")
+ ce.RunCommand('echo')
self.assertTrue(self._KillStraceProcess())
strace_contents = open(temp_file).read()
- self.assertFalse("/dev/ptmx" in strace_contents)
+ self.assertFalse('/dev/ptmx' in strace_contents)
if __name__ == '__main__':
diff --git a/utils/perf_diff.py b/utils/perf_diff.py
index 47cb1efc..0fcada6c 100755
--- a/utils/perf_diff.py
+++ b/utils/perf_diff.py
@@ -1,12 +1,11 @@
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
-
"""One-line documentation for perf_diff module.
A detailed description of perf_diff.
"""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
import optparse
import re
@@ -15,8 +14,8 @@ import sys
import misc
import tabulator
-ROWS_TO_SHOW = "Rows_to_show_in_the_perf_table"
-TOTAL_EVENTS = "Total_events_of_this_profile"
+ROWS_TO_SHOW = 'Rows_to_show_in_the_perf_table'
+TOTAL_EVENTS = 'Total_events_of_this_profile'
def GetPerfDictFromReport(report_file):
@@ -28,7 +27,7 @@ def GetPerfDictFromReport(report_file):
output[k][ROWS_TO_SHOW] = 0
output[k][TOTAL_EVENTS] = 0
for function in v.functions:
- out_key = "%s" % (function.name)
+ out_key = '%s' % (function.name)
output[k][out_key] = function.count
output[k][TOTAL_EVENTS] += function.count
if function.percent > 1:
@@ -45,13 +44,13 @@ def _SortDictionaryByValue(d):
else:
return x
- sorted_l = sorted(l,
- key=lambda x: GetFloat(x[1]))
+ sorted_l = sorted(l, key=lambda x: GetFloat(x[1]))
sorted_l.reverse()
return [f[0] for f in sorted_l]
class Tabulator(object):
+
def __init__(self, all_dicts):
self._all_dicts = all_dicts
@@ -69,7 +68,7 @@ class Tabulator(object):
else:
fields[f] = max(fields[f], d[f])
table = []
- header = ["name"]
+ header = ['name']
for i in range(len(dicts)):
header.append(i)
@@ -83,27 +82,29 @@ class Tabulator(object):
if f in d:
row.append(d[f])
else:
- row.append("0")
+ row.append('0')
table.append(row)
print tabulator.GetSimpleTable(table)
class Function(object):
+
def __init__(self):
self.count = 0
- self.name = ""
+ self.name = ''
self.percent = 0
class Section(object):
+
def __init__(self, contents):
self.raw_contents = contents
self._ParseSection()
def _ParseSection(self):
- matches = re.findall(r"Events: (\w+)\s+(.*)", self.raw_contents)
- assert len(matches) <= 1, "More than one event found in 1 section"
+ matches = re.findall(r'Events: (\w+)\s+(.*)', self.raw_contents)
+ assert len(matches) <= 1, 'More than one event found in 1 section'
if not matches:
return
match = matches[0]
@@ -114,14 +115,14 @@ class Section(object):
for line in self.raw_contents.splitlines():
if not line.strip():
continue
- if "%" not in line:
+ if '%' not in line:
continue
- if not line.startswith("#"):
- fields = [f for f in line.split(" ") if f]
+ if not line.startswith('#'):
+ fields = [f for f in line.split(' ') if f]
function = Function()
- function.percent = float(fields[0].strip("%"))
+ function.percent = float(fields[0].strip('%'))
function.count = int(fields[1])
- function.name = " ".join(fields[2:])
+ function.name = ' '.join(fields[2:])
self.functions.append(function)
@@ -134,7 +135,7 @@ class PerfReport(object):
self.sections = {}
self.metadata = {}
self._section_contents = []
- self._section_header = ""
+ self._section_header = ''
self._SplitSections()
self._ParseSections()
self._ParseSectionHeader()
@@ -145,8 +146,8 @@ class PerfReport(object):
# report was generated, not when the data was captured.
for line in self._section_header.splitlines():
line = line[2:]
- if ":" in line:
- key, val = line.strip().split(":", 1)
+ if ':' in line:
+ key, val = line.strip().split(':', 1)
key = key.strip()
val = val.strip()
self.metadata[key] = val
@@ -164,22 +165,22 @@ class PerfReport(object):
# TODO(asharif): Do this better.
def _GetHumanReadableName(self, section_name):
- if not "raw" in section_name:
+ if not 'raw' in section_name:
return section_name
- raw_number = section_name.strip().split(" ")[-1]
+ raw_number = section_name.strip().split(' ')[-1]
for line in self._section_header.splitlines():
if raw_number in line:
- name = line.strip().split(" ")[5]
+ name = line.strip().split(' ')[5]
return name
def _SplitSections(self):
self._section_contents = []
- indices = [m.start() for m in re.finditer("# Events:", self._perf_contents)]
+ indices = [m.start() for m in re.finditer('# Events:', self._perf_contents)]
indices.append(len(self._perf_contents))
for i in range(len(indices) - 1):
- section_content = self._perf_contents[indices[i]:indices[i+1]]
+ section_content = self._perf_contents[indices[i]:indices[i + 1]]
self._section_contents.append(section_content)
- self._section_header = ""
+ self._section_header = ''
if indices:
self._section_header = self._perf_contents[0:indices[0]]
@@ -201,7 +202,7 @@ class PerfDiffer(object):
summary_dicts = []
for report in self._reports:
d = {}
- filename_dicts.append({"file": report.perf_file})
+ filename_dicts.append({'file': report.perf_file})
for section_name in section_names:
if section_name in report.sections:
d[section_name] = report.sections[section_name].count
@@ -210,8 +211,7 @@ class PerfDiffer(object):
all_dicts = [filename_dicts, summary_dicts]
for section_name in section_names:
- function_names = self._GetTopFunctions(section_name,
- self._num_symbols)
+ function_names = self._GetTopFunctions(section_name, self._num_symbols)
self._FindCommonFunctions(section_name)
dicts = []
for report in self._reports:
@@ -224,13 +224,13 @@ class PerfDiffer(object):
for function in section.functions:
if function.name in function_names:
- key = "%s %s" % (section.name, function.name)
+ key = '%s %s' % (section.name, function.name)
d[key] = function.count
# Compute a factor to scale the function count by in common_only
# mode.
if self._common_only and (
function.name in self._common_function_names[section.name]):
- d[key + " scaled"] = common_scaling_factor * function.count
+ d[key + ' scaled'] = common_scaling_factor * function.count
dicts.append(d)
all_dicts.append(dicts)
@@ -245,15 +245,13 @@ class PerfDiffer(object):
if section.name not in sections:
sections[section.name] = section.count
else:
- sections[section.name] = max(sections[section.name],
- section.count)
+ sections[section.name] = max(sections[section.name], section.count)
return _SortDictionaryByValue(sections)
def _GetCommonScalingFactor(self, section):
unique_count = self._GetCount(
- section,
- lambda x: x in self._common_function_names[section.name])
- return 100.0/unique_count
+ section, lambda x: x in self._common_function_names[section.name])
+ return 100.0 / unique_count
def _GetCount(self, section, filter_fun=None):
total_count = 0
@@ -297,17 +295,17 @@ class PerfDiffer(object):
def Main(argv):
"""The entry of the main."""
parser = optparse.OptionParser()
- parser.add_option("-n",
- "--num_symbols",
- dest="num_symbols",
- default="5",
- help="The number of symbols to show.")
- parser.add_option("-c",
- "--common_only",
- dest="common_only",
- action="store_true",
+ parser.add_option('-n',
+ '--num_symbols',
+ dest='num_symbols',
+ default='5',
+ help='The number of symbols to show.')
+ parser.add_option('-c',
+ '--common_only',
+ dest='common_only',
+ action='store_true',
default=False,
- help="Diff common symbols only.")
+ help='Diff common symbols only.')
options, args = parser.parse_args(argv)
@@ -324,5 +322,5 @@ def Main(argv):
return 0
-if __name__ == "__main__":
+if __name__ == '__main__':
sys.exit(Main(sys.argv))
diff --git a/utils/pstat.py b/utils/pstat.py
index dae681e6..732ac1f1 100644
--- a/utils/pstat.py
+++ b/utils/pstat.py
@@ -20,9 +20,8 @@
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
-#
-"""
-pstat.py module
+#
+"""pstat.py module
#################################################
####### Written by: Gary Strangman ###########
@@ -43,7 +42,7 @@ functions include:
linexand (listoflists,columnlist,valuelist)
linexor (listoflists,columnlist,valuelist)
linedelimited (inlist,delimiter)
- lineincols (inlist,colsize)
+ lineincols (inlist,colsize)
lineincustcols (inlist,colsizes)
list2string (inlist)
makelol(inlist)
@@ -117,8 +116,9 @@ __version__ = 0.4
### Array functions (for NumPy-enabled computers) appear below.
###
-def abut (source,*args):
- """
+
+def abut(source, *args):
+ """
Like the |Stat abut command. It concatenates two lists side-by-side
and returns the result. '2D' lists are also accomodated for either argument
(source or addon). CAUTION: If one list is shorter, it will be repeated
@@ -130,42 +130,42 @@ Returns: a list of lists as long as the LONGEST list past, source on the
'left', lists in <args> attached consecutively on the 'right'
"""
- if type(source) not in [ListType,TupleType]:
- source = [source]
- for addon in args:
- if type(addon) not in [ListType,TupleType]:
- addon = [addon]
- if len(addon) < len(source): # is source list longer?
- if len(source) % len(addon) == 0: # are they integer multiples?
- repeats = len(source)/len(addon) # repeat addon n times
- origadd = copy.deepcopy(addon)
- for i in range(repeats-1):
- addon = addon + origadd
- else:
- repeats = len(source)/len(addon)+1 # repeat addon x times,
- origadd = copy.deepcopy(addon) # x is NOT an integer
- for i in range(repeats-1):
- addon = addon + origadd
- addon = addon[0:len(source)]
- elif len(source) < len(addon): # is addon list longer?
- if len(addon) % len(source) == 0: # are they integer multiples?
- repeats = len(addon)/len(source) # repeat source n times
- origsour = copy.deepcopy(source)
- for i in range(repeats-1):
- source = source + origsour
- else:
- repeats = len(addon)/len(source)+1 # repeat source x times,
- origsour = copy.deepcopy(source) # x is NOT an integer
- for i in range(repeats-1):
- source = source + origsour
- source = source[0:len(addon)]
-
- source = simpleabut(source,addon)
- return source
-
-
-def simpleabut (source, addon):
- """
+ if type(source) not in [ListType, TupleType]:
+ source = [source]
+ for addon in args:
+ if type(addon) not in [ListType, TupleType]:
+ addon = [addon]
+ if len(addon) < len(source): # is source list longer?
+ if len(source) % len(addon) == 0: # are they integer multiples?
+ repeats = len(source) / len(addon) # repeat addon n times
+ origadd = copy.deepcopy(addon)
+ for i in range(repeats - 1):
+ addon = addon + origadd
+ else:
+ repeats = len(source) / len(addon) + 1 # repeat addon x times,
+ origadd = copy.deepcopy(addon) # x is NOT an integer
+ for i in range(repeats - 1):
+ addon = addon + origadd
+ addon = addon[0:len(source)]
+ elif len(source) < len(addon): # is addon list longer?
+ if len(addon) % len(source) == 0: # are they integer multiples?
+ repeats = len(addon) / len(source) # repeat source n times
+ origsour = copy.deepcopy(source)
+ for i in range(repeats - 1):
+ source = source + origsour
+ else:
+ repeats = len(addon) / len(source) + 1 # repeat source x times,
+ origsour = copy.deepcopy(source) # x is NOT an integer
+ for i in range(repeats - 1):
+ source = source + origsour
+ source = source[0:len(addon)]
+
+ source = simpleabut(source, addon)
+ return source
+
+
+def simpleabut(source, addon):
+ """
Concatenates two lists as columns and returns the result. '2D' lists
are also accomodated for either argument (source or addon). This DOES NOT
repeat either list to make the 2 lists of equal length. Beware of list pairs
@@ -176,32 +176,32 @@ Usage: simpleabut(source,addon) where source, addon=list (or list-of-lists)
Returns: a list of lists as long as source, with source on the 'left' and
addon on the 'right'
"""
- if type(source) not in [ListType,TupleType]:
- source = [source]
- if type(addon) not in [ListType,TupleType]:
- addon = [addon]
- minlen = min(len(source),len(addon))
- list = copy.deepcopy(source) # start abut process
- if type(source[0]) not in [ListType,TupleType]:
- if type(addon[0]) not in [ListType,TupleType]:
- for i in range(minlen):
- list[i] = [source[i]] + [addon[i]] # source/addon = column
- else:
- for i in range(minlen):
- list[i] = [source[i]] + addon[i] # addon=list-of-lists
+ if type(source) not in [ListType, TupleType]:
+ source = [source]
+ if type(addon) not in [ListType, TupleType]:
+ addon = [addon]
+ minlen = min(len(source), len(addon))
+ list = copy.deepcopy(source) # start abut process
+ if type(source[0]) not in [ListType, TupleType]:
+ if type(addon[0]) not in [ListType, TupleType]:
+ for i in range(minlen):
+ list[i] = [source[i]] + [addon[i]] # source/addon = column
else:
- if type(addon[0]) not in [ListType,TupleType]:
- for i in range(minlen):
- list[i] = source[i] + [addon[i]] # source=list-of-lists
- else:
- for i in range(minlen):
- list[i] = source[i] + addon[i] # source/addon = list-of-lists
- source = list
- return source
+ for i in range(minlen):
+ list[i] = [source[i]] + addon[i] # addon=list-of-lists
+ else:
+ if type(addon[0]) not in [ListType, TupleType]:
+ for i in range(minlen):
+ list[i] = source[i] + [addon[i]] # source=list-of-lists
+ else:
+ for i in range(minlen):
+ list[i] = source[i] + addon[i] # source/addon = list-of-lists
+ source = list
+ return source
-def colex (listoflists,cnums):
- """
+def colex(listoflists, cnums):
+ """
Extracts from listoflists the columns specified in the list 'cnums'
(cnums can be an integer, a sequence of integers, or a string-expression that
corresponds to a slice operation on the variable x ... e.g., 'x[3:]' will colex
@@ -211,25 +211,30 @@ Usage: colex (listoflists,cnums)
Returns: a list-of-lists corresponding to the columns from listoflists
specified by cnums, in the order the column numbers appear in cnums
"""
- global index
- column = 0
- if type(cnums) in [ListType,TupleType]: # if multiple columns to get
- index = cnums[0]
- column = map(lambda x: x[index], listoflists)
- for col in cnums[1:]:
- index = col
- column = abut(column,map(lambda x: x[index], listoflists))
- elif type(cnums) == StringType: # if an 'x[3:]' type expr.
- evalstring = 'map(lambda x: x'+cnums+', listoflists)'
- column = eval(evalstring)
- else: # else it's just 1 col to get
- index = cnums
- column = map(lambda x: x[index], listoflists)
- return column
-
-
-def collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):
- """
+ global index
+ column = 0
+ if type(cnums) in [ListType, TupleType]: # if multiple columns to get
+ index = cnums[0]
+ column = map(lambda x: x[index], listoflists)
+ for col in cnums[1:]:
+ index = col
+ column = abut(column, map(lambda x: x[index], listoflists))
+ elif type(cnums) == StringType: # if an 'x[3:]' type expr.
+ evalstring = 'map(lambda x: x' + cnums + ', listoflists)'
+ column = eval(evalstring)
+ else: # else it's just 1 col to get
+ index = cnums
+ column = map(lambda x: x[index], listoflists)
+ return column
+
+
+def collapse(listoflists,
+ keepcols,
+ collapsecols,
+ fcn1=None,
+ fcn2=None,
+ cfcn=None):
+ """
Averages data in collapsecol, keeping all unique items in keepcols
(using unique, which keeps unique LISTS of column numbers), retaining the
unique sets of values in keepcols, the mean for each. Setting fcn1
@@ -239,75 +244,79 @@ cfcn is the collapse function to apply (defaults to mean, defined here in the
pstat module to avoid circular imports with stats.py, but harmonicmean or
others could be passed).
-Usage: collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)
+Usage: collapse
+(listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)
Returns: a list of lists with all unique permutations of entries appearing in
columns ("conditions") specified by keepcols, abutted with the result of
cfcn (if cfcn=None, defaults to the mean) of each column specified by
collapsecols.
"""
- def collmean (inlist):
- s = 0
- for item in inlist:
- s = s + item
- return s/float(len(inlist))
-
- if type(keepcols) not in [ListType,TupleType]:
- keepcols = [keepcols]
- if type(collapsecols) not in [ListType,TupleType]:
- collapsecols = [collapsecols]
- if cfcn == None:
- cfcn = collmean
- if keepcols == []:
- means = [0]*len(collapsecols)
- for i in range(len(collapsecols)):
- avgcol = colex(listoflists,collapsecols[i])
- means[i] = cfcn(avgcol)
- if fcn1:
- try:
- test = fcn1(avgcol)
- except:
- test = 'N/A'
- means[i] = [means[i], test]
- if fcn2:
- try:
- test = fcn2(avgcol)
- except:
- test = 'N/A'
- try:
- means[i] = means[i] + [len(avgcol)]
- except TypeError:
- means[i] = [means[i],len(avgcol)]
- return means
- else:
- values = colex(listoflists,keepcols)
- uniques = unique(values)
- uniques.sort()
- newlist = []
- if type(keepcols) not in [ListType,TupleType]: keepcols = [keepcols]
- for item in uniques:
- if type(item) not in [ListType,TupleType]: item =[item]
- tmprows = linexand(listoflists,keepcols,item)
- for col in collapsecols:
- avgcol = colex(tmprows,col)
- item.append(cfcn(avgcol))
- if fcn1 <> None:
- try:
- test = fcn1(avgcol)
- except:
- test = 'N/A'
- item.append(test)
- if fcn2 <> None:
- try:
- test = fcn2(avgcol)
- except:
- test = 'N/A'
- item.append(test)
- newlist.append(item)
- return newlist
-
-
-def dm (listoflists,criterion):
- """
+
+ def collmean(inlist):
+ s = 0
+ for item in inlist:
+ s = s + item
+ return s / float(len(inlist))
+
+ if type(keepcols) not in [ListType, TupleType]:
+ keepcols = [keepcols]
+ if type(collapsecols) not in [ListType, TupleType]:
+ collapsecols = [collapsecols]
+ if cfcn == None:
+ cfcn = collmean
+ if keepcols == []:
+ means = [0] * len(collapsecols)
+ for i in range(len(collapsecols)):
+ avgcol = colex(listoflists, collapsecols[i])
+ means[i] = cfcn(avgcol)
+ if fcn1:
+ try:
+ test = fcn1(avgcol)
+ except:
+ test = 'N/A'
+ means[i] = [means[i], test]
+ if fcn2:
+ try:
+ test = fcn2(avgcol)
+ except:
+ test = 'N/A'
+ try:
+ means[i] = means[i] + [len(avgcol)]
+ except TypeError:
+ means[i] = [means[i], len(avgcol)]
+ return means
+ else:
+ values = colex(listoflists, keepcols)
+ uniques = unique(values)
+ uniques.sort()
+ newlist = []
+ if type(keepcols) not in [ListType, TupleType]:
+ keepcols = [keepcols]
+ for item in uniques:
+ if type(item) not in [ListType, TupleType]:
+ item = [item]
+ tmprows = linexand(listoflists, keepcols, item)
+ for col in collapsecols:
+ avgcol = colex(tmprows, col)
+ item.append(cfcn(avgcol))
+ if fcn1 <> None:
+ try:
+ test = fcn1(avgcol)
+ except:
+ test = 'N/A'
+ item.append(test)
+ if fcn2 <> None:
+ try:
+ test = fcn2(avgcol)
+ except:
+ test = 'N/A'
+ item.append(test)
+ newlist.append(item)
+ return newlist
+
+
+def dm(listoflists, criterion):
+ """
Returns rows from the passed list of lists that meet the criteria in
the passed criterion expression (a string as a function of x; e.g., 'x[3]>=9'
will return all rows where the 4th column>=9 and "x[2]=='N'" will return rows
@@ -316,27 +325,27 @@ with column 2 equal to the string 'N').
Usage: dm (listoflists, criterion)
Returns: rows from listoflists that meet the specified criterion.
"""
- function = 'filter(lambda x: '+criterion+',listoflists)'
- lines = eval(function)
- return lines
+ function = 'filter(lambda x: ' + criterion + ',listoflists)'
+ lines = eval(function)
+ return lines
def flat(l):
- """
+ """
Returns the flattened version of a '2D' list. List-correlate to the a.ravel()()
method of NumPy arrays.
Usage: flat(l)
"""
- newl = []
- for i in range(len(l)):
- for j in range(len(l[i])):
- newl.append(l[i][j])
- return newl
+ newl = []
+ for i in range(len(l)):
+ for j in range(len(l[i])):
+ newl.append(l[i][j])
+ return newl
-def linexand (listoflists,columnlist,valuelist):
- """
+def linexand(listoflists, columnlist, valuelist):
+ """
Returns the rows of a list of lists where col (from columnlist) = val
(from valuelist) for EVERY pair of values (columnlist[i],valuelists[i]).
len(columnlist) must equal len(valuelist).
@@ -344,25 +353,26 @@ len(columnlist) must equal len(valuelist).
Usage: linexand (listoflists,columnlist,valuelist)
Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ALL i
"""
- if type(columnlist) not in [ListType,TupleType]:
- columnlist = [columnlist]
- if type(valuelist) not in [ListType,TupleType]:
- valuelist = [valuelist]
- criterion = ''
- for i in range(len(columnlist)):
- if type(valuelist[i])==StringType:
- critval = '\'' + valuelist[i] + '\''
- else:
- critval = str(valuelist[i])
- criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'
- criterion = criterion[0:-3] # remove the "and" after the last crit
- function = 'filter(lambda x: '+criterion+',listoflists)'
- lines = eval(function)
- return lines
+ if type(columnlist) not in [ListType, TupleType]:
+ columnlist = [columnlist]
+ if type(valuelist) not in [ListType, TupleType]:
+ valuelist = [valuelist]
+ criterion = ''
+ for i in range(len(columnlist)):
+ if type(valuelist[i]) == StringType:
+ critval = '\'' + valuelist[i] + '\''
+ else:
+ critval = str(valuelist[i])
+ criterion = criterion + ' x[' + str(columnlist[
+ i]) + ']==' + critval + ' and'
+ criterion = criterion[0:-3] # remove the "and" after the last crit
+ function = 'filter(lambda x: ' + criterion + ',listoflists)'
+ lines = eval(function)
+ return lines
-def linexor (listoflists,columnlist,valuelist):
- """
+def linexor(listoflists, columnlist, valuelist):
+ """
Returns the rows of a list of lists where col (from columnlist) = val
(from valuelist) for ANY pair of values (colunmlist[i],valuelist[i[).
One value is required for each column in columnlist. If only one value
@@ -372,65 +382,65 @@ valuelist values are all assumed to pertain to the same column.
Usage: linexor (listoflists,columnlist,valuelist)
Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ANY i
"""
- if type(columnlist) not in [ListType,TupleType]:
- columnlist = [columnlist]
- if type(valuelist) not in [ListType,TupleType]:
- valuelist = [valuelist]
- criterion = ''
- if len(columnlist) == 1 and len(valuelist) > 1:
- columnlist = columnlist*len(valuelist)
- for i in range(len(columnlist)): # build an exec string
- if type(valuelist[i])==StringType:
- critval = '\'' + valuelist[i] + '\''
- else:
- critval = str(valuelist[i])
- criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or'
- criterion = criterion[0:-2] # remove the "or" after the last crit
- function = 'filter(lambda x: '+criterion+',listoflists)'
- lines = eval(function)
- return lines
+ if type(columnlist) not in [ListType, TupleType]:
+ columnlist = [columnlist]
+ if type(valuelist) not in [ListType, TupleType]:
+ valuelist = [valuelist]
+ criterion = ''
+ if len(columnlist) == 1 and len(valuelist) > 1:
+ columnlist = columnlist * len(valuelist)
+ for i in range(len(columnlist)): # build an exec string
+ if type(valuelist[i]) == StringType:
+ critval = '\'' + valuelist[i] + '\''
+ else:
+ critval = str(valuelist[i])
+ criterion = criterion + ' x[' + str(columnlist[i]) + ']==' + critval + ' or'
+ criterion = criterion[0:-2] # remove the "or" after the last crit
+ function = 'filter(lambda x: ' + criterion + ',listoflists)'
+ lines = eval(function)
+ return lines
-def linedelimited (inlist,delimiter):
- """
+def linedelimited(inlist, delimiter):
+ """
Returns a string composed of elements in inlist, with each element
separated by 'delimiter.' Used by function writedelimited. Use '\t'
for tab-delimiting.
Usage: linedelimited (inlist,delimiter)
"""
- outstr = ''
- for item in inlist:
- if type(item) <> StringType:
- item = str(item)
- outstr = outstr + item + delimiter
- outstr = outstr[0:-1]
- return outstr
+ outstr = ''
+ for item in inlist:
+ if type(item) <> StringType:
+ item = str(item)
+ outstr = outstr + item + delimiter
+ outstr = outstr[0:-1]
+ return outstr
-def lineincols (inlist,colsize):
- """
+def lineincols(inlist, colsize):
+ """
Returns a string composed of elements in inlist, with each element
right-aligned in columns of (fixed) colsize.
Usage: lineincols (inlist,colsize) where colsize is an integer
"""
- outstr = ''
- for item in inlist:
- if type(item) <> StringType:
- item = str(item)
- size = len(item)
- if size <= colsize:
- for i in range(colsize-size):
- outstr = outstr + ' '
- outstr = outstr + item
- else:
- outstr = outstr + item[0:colsize+1]
- return outstr
+ outstr = ''
+ for item in inlist:
+ if type(item) <> StringType:
+ item = str(item)
+ size = len(item)
+ if size <= colsize:
+ for i in range(colsize - size):
+ outstr = outstr + ' '
+ outstr = outstr + item
+ else:
+ outstr = outstr + item[0:colsize + 1]
+ return outstr
-def lineincustcols (inlist,colsizes):
- """
+def lineincustcols(inlist, colsizes):
+ """
Returns a string composed of elements in inlist, with each element
right-aligned in a column of width specified by a sequence colsizes. The
length of colsizes must be greater than or equal to the number of columns
@@ -439,56 +449,56 @@ in inlist.
Usage: lineincustcols (inlist,colsizes)
Returns: formatted string created from inlist
"""
- outstr = ''
- for i in range(len(inlist)):
- if type(inlist[i]) <> StringType:
- item = str(inlist[i])
- else:
- item = inlist[i]
- size = len(item)
- if size <= colsizes[i]:
- for j in range(colsizes[i]-size):
- outstr = outstr + ' '
- outstr = outstr + item
- else:
- outstr = outstr + item[0:colsizes[i]+1]
- return outstr
+ outstr = ''
+ for i in range(len(inlist)):
+ if type(inlist[i]) <> StringType:
+ item = str(inlist[i])
+ else:
+ item = inlist[i]
+ size = len(item)
+ if size <= colsizes[i]:
+ for j in range(colsizes[i] - size):
+ outstr = outstr + ' '
+ outstr = outstr + item
+ else:
+ outstr = outstr + item[0:colsizes[i] + 1]
+ return outstr
-def list2string (inlist,delimit=' '):
- """
+def list2string(inlist, delimit=' '):
+ """
Converts a 1D list to a single long string for file output, using
the string.join function.
Usage: list2string (inlist,delimit=' ')
Returns: the string created from inlist
"""
- stringlist = map(makestr,inlist)
- return string.join(stringlist,delimit)
+ stringlist = map(makestr, inlist)
+ return string.join(stringlist, delimit)
def makelol(inlist):
- """
+ """
Converts a 1D list to a 2D list (i.e., a list-of-lists). Useful when you
want to use put() to write a 1D list one item per line in the file.
Usage: makelol(inlist)
Returns: if l = [1,2,'hi'] then returns [[1],[2],['hi']] etc.
"""
- x = []
- for item in inlist:
- x.append([item])
- return x
+ x = []
+ for item in inlist:
+ x.append([item])
+ return x
-def makestr (x):
- if type(x) <> StringType:
- x = str(x)
- return x
+def makestr(x):
+ if type(x) <> StringType:
+ x = str(x)
+ return x
-def printcc (lst,extra=2):
- """
+def printcc(lst, extra=2):
+ """
Prints a list of lists in columns, customized by the max size of items
within the columns (max size of items in col, plus 'extra' number of spaces).
Use 'dashes' or '\\n' in the list-of-lists to print dashes or blank lines,
@@ -497,85 +507,88 @@ respectively.
Usage: printcc (lst,extra=2)
Returns: None
"""
- if type(lst[0]) not in [ListType,TupleType]:
- lst = [lst]
- rowstokill = []
- list2print = copy.deepcopy(lst)
- for i in range(len(lst)):
- if lst[i] == ['\n'] or lst[i]=='\n' or lst[i]=='dashes' or lst[i]=='' or lst[i]==['']:
- rowstokill = rowstokill + [i]
- rowstokill.reverse() # delete blank rows from the end
- for row in rowstokill:
- del list2print[row]
- maxsize = [0]*len(list2print[0])
- for col in range(len(list2print[0])):
- items = colex(list2print,col)
- items = map(makestr,items)
- maxsize[col] = max(map(len,items)) + extra
- for row in lst:
- if row == ['\n'] or row == '\n' or row == '' or row == ['']:
- print
- elif row == ['dashes'] or row == 'dashes':
- dashes = [0]*len(maxsize)
- for j in range(len(maxsize)):
- dashes[j] = '-'*(maxsize[j]-2)
- print lineincustcols(dashes,maxsize)
- else:
- print lineincustcols(row,maxsize)
- return None
+ if type(lst[0]) not in [ListType, TupleType]:
+ lst = [lst]
+ rowstokill = []
+ list2print = copy.deepcopy(lst)
+ for i in range(len(lst)):
+ if lst[i] == [
+ '\n'
+ ] or lst[i] == '\n' or lst[i] == 'dashes' or lst[i] == '' or lst[i] == ['']:
+ rowstokill = rowstokill + [i]
+ rowstokill.reverse() # delete blank rows from the end
+ for row in rowstokill:
+ del list2print[row]
+ maxsize = [0] * len(list2print[0])
+ for col in range(len(list2print[0])):
+ items = colex(list2print, col)
+ items = map(makestr, items)
+ maxsize[col] = max(map(len, items)) + extra
+ for row in lst:
+ if row == ['\n'] or row == '\n' or row == '' or row == ['']:
+ print
+ elif row == ['dashes'] or row == 'dashes':
+ dashes = [0] * len(maxsize)
+ for j in range(len(maxsize)):
+ dashes[j] = '-' * (maxsize[j] - 2)
+ print lineincustcols(dashes, maxsize)
+ else:
+ print lineincustcols(row, maxsize)
+ return None
-def printincols (listoflists,colsize):
- """
+def printincols(listoflists, colsize):
+ """
Prints a list of lists in columns of (fixed) colsize width, where
colsize is an integer.
Usage: printincols (listoflists,colsize)
Returns: None
"""
- for row in listoflists:
- print lineincols(row,colsize)
- return None
+ for row in listoflists:
+ print lineincols(row, colsize)
+ return None
-def pl (listoflists):
- """
+def pl(listoflists):
+ """
Prints a list of lists, 1 list (row) at a time.
Usage: pl(listoflists)
Returns: None
"""
- for row in listoflists:
- if row[-1] == '\n':
- print row,
- else:
- print row
- return None
+ for row in listoflists:
+ if row[-1] == '\n':
+ print row,
+ else:
+ print row
+ return None
def printl(listoflists):
- """Alias for pl."""
- pl(listoflists)
- return
+ """Alias for pl."""
+ pl(listoflists)
+ return
-def replace (inlst,oldval,newval):
- """
+def replace(inlst, oldval, newval):
+ """
Replaces all occurrences of 'oldval' with 'newval', recursively.
Usage: replace (inlst,oldval,newval)
"""
- lst = inlst*1
- for i in range(len(lst)):
- if type(lst[i]) not in [ListType,TupleType]:
- if lst[i]==oldval: lst[i]=newval
- else:
- lst[i] = replace(lst[i],oldval,newval)
- return lst
+ lst = inlst * 1
+ for i in range(len(lst)):
+ if type(lst[i]) not in [ListType, TupleType]:
+ if lst[i] == oldval:
+ lst[i] = newval
+ else:
+ lst[i] = replace(lst[i], oldval, newval)
+ return lst
-def recode (inlist,listmap,cols=None):
- """
+def recode(inlist, listmap, cols=None):
+ """
Changes the values in a list to a new set of values (useful when
you need to recode data from (e.g.) strings to numbers. cols defaults
to None (meaning all columns are recoded).
@@ -583,81 +596,81 @@ to None (meaning all columns are recoded).
Usage: recode (inlist,listmap,cols=None) cols=recode cols, listmap=2D list
Returns: inlist with the appropriate values replaced with new ones
"""
- lst = copy.deepcopy(inlist)
- if cols != None:
- if type(cols) not in [ListType,TupleType]:
- cols = [cols]
- for col in cols:
- for row in range(len(lst)):
- try:
- idx = colex(listmap,0).index(lst[row][col])
- lst[row][col] = listmap[idx][1]
- except ValueError:
- pass
- else:
- for row in range(len(lst)):
- for col in range(len(lst)):
- try:
- idx = colex(listmap,0).index(lst[row][col])
- lst[row][col] = listmap[idx][1]
- except ValueError:
- pass
- return lst
+ lst = copy.deepcopy(inlist)
+ if cols != None:
+ if type(cols) not in [ListType, TupleType]:
+ cols = [cols]
+ for col in cols:
+ for row in range(len(lst)):
+ try:
+ idx = colex(listmap, 0).index(lst[row][col])
+ lst[row][col] = listmap[idx][1]
+ except ValueError:
+ pass
+ else:
+ for row in range(len(lst)):
+ for col in range(len(lst)):
+ try:
+ idx = colex(listmap, 0).index(lst[row][col])
+ lst[row][col] = listmap[idx][1]
+ except ValueError:
+ pass
+ return lst
-def remap (listoflists,criterion):
- """
+def remap(listoflists, criterion):
+ """
Remaps values in a given column of a 2D list (listoflists). This requires
a criterion as a function of 'x' so that the result of the following is
-returned ... map(lambda x: 'criterion',listoflists).
+returned ... map(lambda x: 'criterion',listoflists).
Usage: remap(listoflists,criterion) criterion=string
Returns: remapped version of listoflists
"""
- function = 'map(lambda x: '+criterion+',listoflists)'
- lines = eval(function)
- return lines
+ function = 'map(lambda x: ' + criterion + ',listoflists)'
+ lines = eval(function)
+ return lines
-def roundlist (inlist,digits):
- """
+def roundlist(inlist, digits):
+ """
Goes through each element in a 1D or 2D inlist, and applies the following
function to all elements of FloatType ... round(element,digits).
Usage: roundlist(inlist,digits)
Returns: list with rounded floats
"""
- if type(inlist[0]) in [IntType, FloatType]:
- inlist = [inlist]
- l = inlist*1
- for i in range(len(l)):
- for j in range(len(l[i])):
- if type(l[i][j])==FloatType:
- l[i][j] = round(l[i][j],digits)
- return l
-
-
-def sortby(listoflists,sortcols):
- """
+ if type(inlist[0]) in [IntType, FloatType]:
+ inlist = [inlist]
+ l = inlist * 1
+ for i in range(len(l)):
+ for j in range(len(l[i])):
+ if type(l[i][j]) == FloatType:
+ l[i][j] = round(l[i][j], digits)
+ return l
+
+
+def sortby(listoflists, sortcols):
+ """
Sorts a list of lists on the column(s) specified in the sequence
sortcols.
Usage: sortby(listoflists,sortcols)
Returns: sorted list, unchanged column ordering
"""
- newlist = abut(colex(listoflists,sortcols),listoflists)
- newlist.sort()
- try:
- numcols = len(sortcols)
- except TypeError:
- numcols = 1
- crit = '[' + str(numcols) + ':]'
- newlist = colex(newlist,crit)
- return newlist
-
-
-def unique (inlist):
- """
+ newlist = abut(colex(listoflists, sortcols), listoflists)
+ newlist.sort()
+ try:
+ numcols = len(sortcols)
+ except TypeError:
+ numcols = 1
+ crit = '[' + str(numcols) + ':]'
+ newlist = colex(newlist, crit)
+ return newlist
+
+
+def unique(inlist):
+ """
Returns all unique items in the passed list. If the a list-of-lists
is passed, unique LISTS are found (i.e., items in the first dimension are
compared).
@@ -665,37 +678,37 @@ compared).
Usage: unique (inlist)
Returns: the unique elements (or rows) in inlist
"""
- uniques = []
- for item in inlist:
- if item not in uniques:
- uniques.append(item)
- return uniques
+ uniques = []
+ for item in inlist:
+ if item not in uniques:
+ uniques.append(item)
+ return uniques
+
def duplicates(inlist):
- """
+ """
Returns duplicate items in the FIRST dimension of the passed list.
Usage: duplicates (inlist)
"""
- dups = []
- for i in range(len(inlist)):
- if inlist[i] in inlist[i+1:]:
- dups.append(inlist[i])
- return dups
+ dups = []
+ for i in range(len(inlist)):
+ if inlist[i] in inlist[i + 1:]:
+ dups.append(inlist[i])
+ return dups
def nonrepeats(inlist):
- """
+ """
Returns items that are NOT duplicated in the first dim of the passed list.
Usage: nonrepeats (inlist)
"""
- nonrepeats = []
- for i in range(len(inlist)):
- if inlist.count(inlist[i]) == 1:
- nonrepeats.append(inlist[i])
- return nonrepeats
-
+ nonrepeats = []
+ for i in range(len(inlist)):
+ if inlist.count(inlist[i]) == 1:
+ nonrepeats.append(inlist[i])
+ return nonrepeats
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
@@ -714,10 +727,10 @@ Usage: nonrepeats (inlist)
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
-try: # DEFINE THESE *ONLY* IF numpy IS AVAILABLE
- import numpy as N
+try: # DEFINE THESE *ONLY* IF numpy IS AVAILABLE
+ import numpy as N
- def aabut (source, *args):
+ def aabut(source, *args):
"""
Like the |Stat abut command. It concatenates two arrays column-wise
and returns the result. CAUTION: If one array is shorter, it will be
@@ -727,26 +740,25 @@ Usage: aabut (source, args) where args=any # of arrays
Returns: an array as long as the LONGEST array past, source appearing on the
'left', arrays in <args> attached on the 'right'.
"""
- if len(source.shape)==1:
- width = 1
- source = N.resize(source,[source.shape[0],width])
+ if len(source.shape) == 1:
+ width = 1
+ source = N.resize(source, [source.shape[0], width])
else:
- width = source.shape[1]
+ width = source.shape[1]
for addon in args:
- if len(addon.shape)==1:
- width = 1
- addon = N.resize(addon,[source.shape[0],width])
- else:
- width = source.shape[1]
- if len(addon) < len(source):
- addon = N.resize(addon,[source.shape[0],addon.shape[1]])
- elif len(source) < len(addon):
- source = N.resize(source,[addon.shape[0],source.shape[1]])
- source = N.concatenate((source,addon),1)
+ if len(addon.shape) == 1:
+ width = 1
+ addon = N.resize(addon, [source.shape[0], width])
+ else:
+ width = source.shape[1]
+ if len(addon) < len(source):
+ addon = N.resize(addon, [source.shape[0], addon.shape[1]])
+ elif len(source) < len(addon):
+ source = N.resize(source, [addon.shape[0], source.shape[1]])
+ source = N.concatenate((source, addon), 1)
return source
-
- def acolex (a,indices,axis=1):
+ def acolex(a, indices, axis=1):
"""
Extracts specified indices (a list) from passed array, along passed
axis (column extraction is default). BEWARE: A 1D array is presumed to be a
@@ -755,18 +767,17 @@ column-array (and that the whole array will be returned as a column).
Usage: acolex (a,indices,axis=1)
Returns: the columns of a specified by indices
"""
- if type(indices) not in [ListType,TupleType,N.ndarray]:
- indices = [indices]
+ if type(indices) not in [ListType, TupleType, N.ndarray]:
+ indices = [indices]
if len(N.shape(a)) == 1:
- cols = N.resize(a,[a.shape[0],1])
+ cols = N.resize(a, [a.shape[0], 1])
else:
-# print a[:3]
- cols = N.take(a,indices,axis)
+ # print a[:3]
+ cols = N.take(a, indices, axis)
# print cols[:3]
return cols
-
- def acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):
+ def acollapse(a, keepcols, collapsecols, fcn1=None, fcn2=None, cfcn=None):
"""
Averages data in collapsecol, keeping all unique items in keepcols
(using unique, which keeps unique LISTS of column numbers), retaining
@@ -778,90 +789,88 @@ Returns: unique 'conditions' specified by the contents of columns specified
by keepcols, abutted with the mean(s) of column(s) specified by
collapsecols
"""
- def acollmean (inarray):
- return N.sum(N.ravel(inarray))
- if type(keepcols) not in [ListType,TupleType,N.ndarray]:
- keepcols = [keepcols]
- if type(collapsecols) not in [ListType,TupleType,N.ndarray]:
- collapsecols = [collapsecols]
+ def acollmean(inarray):
+ return N.sum(N.ravel(inarray))
+
+ if type(keepcols) not in [ListType, TupleType, N.ndarray]:
+ keepcols = [keepcols]
+ if type(collapsecols) not in [ListType, TupleType, N.ndarray]:
+ collapsecols = [collapsecols]
if cfcn == None:
- cfcn = acollmean
+ cfcn = acollmean
if keepcols == []:
- avgcol = acolex(a,collapsecols)
- means = N.sum(avgcol)/float(len(avgcol))
- if fcn1<>None:
+ avgcol = acolex(a, collapsecols)
+ means = N.sum(avgcol) / float(len(avgcol))
+ if fcn1 <> None:
+ try:
+ test = fcn1(avgcol)
+ except:
+ test = N.array(['N/A'] * len(means))
+ means = aabut(means, test)
+ if fcn2 <> None:
+ try:
+ test = fcn2(avgcol)
+ except:
+ test = N.array(['N/A'] * len(means))
+ means = aabut(means, test)
+ return means
+ else:
+ if type(keepcols) not in [ListType, TupleType, N.ndarray]:
+ keepcols = [keepcols]
+ values = colex(a, keepcols) # so that "item" can be appended (below)
+ uniques = unique(values) # get a LIST, so .sort keeps rows intact
+ uniques.sort()
+ newlist = []
+ for item in uniques:
+ if type(item) not in [ListType, TupleType, N.ndarray]:
+ item = [item]
+ tmprows = alinexand(a, keepcols, item)
+ for col in collapsecols:
+ avgcol = acolex(tmprows, col)
+ item.append(acollmean(avgcol))
+ if fcn1 <> None:
try:
- test = fcn1(avgcol)
+ test = fcn1(avgcol)
except:
- test = N.array(['N/A']*len(means))
- means = aabut(means,test)
- if fcn2<>None:
+ test = 'N/A'
+ item.append(test)
+ if fcn2 <> None:
try:
- test = fcn2(avgcol)
+ test = fcn2(avgcol)
except:
- test = N.array(['N/A']*len(means))
- means = aabut(means,test)
- return means
- else:
- if type(keepcols) not in [ListType,TupleType,N.ndarray]:
- keepcols = [keepcols]
- values = colex(a,keepcols) # so that "item" can be appended (below)
- uniques = unique(values) # get a LIST, so .sort keeps rows intact
- uniques.sort()
- newlist = []
- for item in uniques:
- if type(item) not in [ListType,TupleType,N.ndarray]:
- item =[item]
- tmprows = alinexand(a,keepcols,item)
- for col in collapsecols:
- avgcol = acolex(tmprows,col)
- item.append(acollmean(avgcol))
- if fcn1<>None:
- try:
- test = fcn1(avgcol)
- except:
- test = 'N/A'
- item.append(test)
- if fcn2<>None:
- try:
- test = fcn2(avgcol)
- except:
- test = 'N/A'
- item.append(test)
- newlist.append(item)
- try:
- new_a = N.array(newlist)
- except TypeError:
- new_a = N.array(newlist,'O')
- return new_a
-
-
- def adm (a,criterion):
+ test = 'N/A'
+ item.append(test)
+ newlist.append(item)
+ try:
+ new_a = N.array(newlist)
+ except TypeError:
+ new_a = N.array(newlist, 'O')
+ return new_a
+
+ def adm(a, criterion):
"""
Returns rows from the passed list of lists that meet the criteria in
the passed criterion expression (a string as a function of x).
Usage: adm (a,criterion) where criterion is like 'x[2]==37'
"""
- function = 'filter(lambda x: '+criterion+',a)'
+ function = 'filter(lambda x: ' + criterion + ',a)'
lines = eval(function)
try:
- lines = N.array(lines)
+ lines = N.array(lines)
except:
- lines = N.array(lines,dtype='O')
+ lines = N.array(lines, dtype='O')
return lines
-
- def isstring(x):
- if type(x)==StringType:
- return 1
+ def isstring(x):
+ if type(x) == StringType:
+ return 1
else:
- return 0
+ return 0
-
- def alinexand (a,columnlist,valuelist):
+ def alinexand(a, columnlist, valuelist):
"""
Returns the rows of an array where col (from columnlist) = val
(from valuelist). One value is required for each column in columnlist.
@@ -869,22 +878,22 @@ Returns the rows of an array where col (from columnlist) = val
Usage: alinexand (a,columnlist,valuelist)
Returns: the rows of a where columnlist[i]=valuelist[i] for ALL i
"""
- if type(columnlist) not in [ListType,TupleType,N.ndarray]:
- columnlist = [columnlist]
- if type(valuelist) not in [ListType,TupleType,N.ndarray]:
- valuelist = [valuelist]
+ if type(columnlist) not in [ListType, TupleType, N.ndarray]:
+ columnlist = [columnlist]
+ if type(valuelist) not in [ListType, TupleType, N.ndarray]:
+ valuelist = [valuelist]
criterion = ''
for i in range(len(columnlist)):
- if type(valuelist[i])==StringType:
- critval = '\'' + valuelist[i] + '\''
- else:
- critval = str(valuelist[i])
- criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'
- criterion = criterion[0:-3] # remove the "and" after the last crit
- return adm(a,criterion)
-
-
- def alinexor (a,columnlist,valuelist):
+ if type(valuelist[i]) == StringType:
+ critval = '\'' + valuelist[i] + '\''
+ else:
+ critval = str(valuelist[i])
+ criterion = criterion + ' x[' + str(columnlist[
+ i]) + ']==' + critval + ' and'
+ criterion = criterion[0:-3] # remove the "and" after the last crit
+ return adm(a, criterion)
+
+ def alinexor(a, columnlist, valuelist):
"""
Returns the rows of an array where col (from columnlist) = val (from
valuelist). One value is required for each column in columnlist.
@@ -895,35 +904,34 @@ other list.
Usage: alinexor (a,columnlist,valuelist)
Returns: the rows of a where columnlist[i]=valuelist[i] for ANY i
"""
- if type(columnlist) not in [ListType,TupleType,N.ndarray]:
- columnlist = [columnlist]
- if type(valuelist) not in [ListType,TupleType,N.ndarray]:
- valuelist = [valuelist]
+ if type(columnlist) not in [ListType, TupleType, N.ndarray]:
+ columnlist = [columnlist]
+ if type(valuelist) not in [ListType, TupleType, N.ndarray]:
+ valuelist = [valuelist]
criterion = ''
if len(columnlist) == 1 and len(valuelist) > 1:
- columnlist = columnlist*len(valuelist)
+ columnlist = columnlist * len(valuelist)
elif len(valuelist) == 1 and len(columnlist) > 1:
- valuelist = valuelist*len(columnlist)
+ valuelist = valuelist * len(columnlist)
for i in range(len(columnlist)):
- if type(valuelist[i])==StringType:
- critval = '\'' + valuelist[i] + '\''
- else:
- critval = str(valuelist[i])
- criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or'
- criterion = criterion[0:-2] # remove the "or" after the last crit
- return adm(a,criterion)
-
-
- def areplace (a,oldval,newval):
+ if type(valuelist[i]) == StringType:
+ critval = '\'' + valuelist[i] + '\''
+ else:
+ critval = str(valuelist[i])
+ criterion = criterion + ' x[' + str(columnlist[
+ i]) + ']==' + critval + ' or'
+ criterion = criterion[0:-2] # remove the "or" after the last crit
+ return adm(a, criterion)
+
+ def areplace(a, oldval, newval):
"""
Replaces all occurrences of oldval with newval in array a.
Usage: areplace(a,oldval,newval)
"""
- return N.where(a==oldval,newval,a)
-
+ return N.where(a == oldval, newval, a)
- def arecode (a,listmap,col='all'):
+ def arecode(a, listmap, col='all'):
"""
Remaps the values in an array to a new set of values (useful when
you need to recode data from (e.g.) strings to numbers as most stats
@@ -935,27 +943,29 @@ Returns: a version of array a where listmap[i][0] = (instead) listmap[i][1]
"""
ashape = a.shape
if col == 'all':
- work = a.ravel()
+ work = a.ravel()
else:
- work = acolex(a,col)
- work = work.ravel()
+ work = acolex(a, col)
+ work = work.ravel()
for pair in listmap:
- if type(pair[1]) == StringType or work.dtype.char=='O' or a.dtype.char=='O':
- work = N.array(work,dtype='O')
- a = N.array(a,dtype='O')
- for i in range(len(work)):
- if work[i]==pair[0]:
- work[i] = pair[1]
- if col == 'all':
- return N.reshape(work,ashape)
- else:
- return N.concatenate([a[:,0:col],work[:,N.newaxis],a[:,col+1:]],1)
- else: # must be a non-Object type array and replacement
- work = N.where(work==pair[0],pair[1],work)
- return N.concatenate([a[:,0:col],work[:,N.newaxis],a[:,col+1:]],1)
-
-
- def arowcompare(row1, row2):
+ if type(pair[
+ 1]) == StringType or work.dtype.char == 'O' or a.dtype.char == 'O':
+ work = N.array(work, dtype='O')
+ a = N.array(a, dtype='O')
+ for i in range(len(work)):
+ if work[i] == pair[0]:
+ work[i] = pair[1]
+ if col == 'all':
+ return N.reshape(work, ashape)
+ else:
+ return N.concatenate(
+ [a[:, 0:col], work[:, N.newaxis], a[:, col + 1:]], 1)
+ else: # must be a non-Object type array and replacement
+ work = N.where(work == pair[0], pair[1], work)
+ return N.concatenate(
+ [a[:, 0:col], work[:, N.newaxis], a[:, col + 1:]], 1)
+
+ def arowcompare(row1, row2):
"""
Compares two rows from an array, regardless of whether it is an
array of numbers or of python objects (which requires the cmp function).
@@ -965,15 +975,15 @@ Usage: arowcompare(row1,row2)
Returns: an array of equal length containing 1s where the two rows had
identical elements and 0 otherwise
"""
- return
- if row1.dtype.char=='O' or row2.dtype=='O':
- cmpvect = N.logical_not(abs(N.array(map(cmp,row1,row2)))) # cmp fcn gives -1,0,1
+ return
+ if row1.dtype.char == 'O' or row2.dtype == 'O':
+ cmpvect = N.logical_not(abs(N.array(map(cmp, row1, row2)))
+ ) # cmp fcn gives -1,0,1
else:
- cmpvect = N.equal(row1,row2)
+ cmpvect = N.equal(row1, row2)
return cmpvect
-
- def arowsame(row1, row2):
+ def arowsame(row1, row2):
"""
Compares two rows from an array, regardless of whether it is an
array of numbers or of python objects (which requires the cmp function).
@@ -981,11 +991,10 @@ array of numbers or of python objects (which requires the cmp function).
Usage: arowsame(row1,row2)
Returns: 1 if the two rows are identical, 0 otherwise.
"""
- cmpval = N.alltrue(arowcompare(row1,row2))
+ cmpval = N.alltrue(arowcompare(row1, row2))
return cmpval
-
- def asortrows(a,axis=0):
+ def asortrows(a, axis=0):
"""
Sorts an array "by rows". This differs from the Numeric.sort() function,
which sorts elements WITHIN the given axis. Instead, this function keeps
@@ -995,10 +1004,9 @@ relative to one another.
Usage: asortrows(a,axis=0)
Returns: sorted version of a
"""
- return N.sort(a,axis=axis,kind='mergesort')
+ return N.sort(a, axis=axis, kind='mergesort')
-
- def aunique(inarray):
+ def aunique(inarray):
"""
Returns unique items in the FIRST dimension of the passed array. Only
works on arrays NOT including string items.
@@ -1006,40 +1014,39 @@ works on arrays NOT including string items.
Usage: aunique (inarray)
"""
uniques = N.array([inarray[0]])
- if len(uniques.shape) == 1: # IF IT'S A 1D ARRAY
+ if len(uniques.shape) == 1: # IF IT'S A 1D ARRAY
+ for item in inarray[1:]:
+ if N.add.reduce(N.equal(uniques, item).ravel()) == 0:
+ try:
+ uniques = N.concatenate([uniques, N.array[N.newaxis, :]])
+ except TypeError:
+ uniques = N.concatenate([uniques, N.array([item])])
+ else: # IT MUST BE A 2+D ARRAY
+ if inarray.dtype.char != 'O': # not an Object array
for item in inarray[1:]:
- if N.add.reduce(N.equal(uniques,item).ravel()) == 0:
- try:
- uniques = N.concatenate([uniques,N.array[N.newaxis,:]])
- except TypeError:
- uniques = N.concatenate([uniques,N.array([item])])
- else: # IT MUST BE A 2+D ARRAY
- if inarray.dtype.char != 'O': # not an Object array
- for item in inarray[1:]:
- if not N.sum(N.alltrue(N.equal(uniques,item),1)):
- try:
- uniques = N.concatenate( [uniques,item[N.newaxis,:]] )
- except TypeError: # the item to add isn't a list
- uniques = N.concatenate([uniques,N.array([item])])
- else:
- pass # this item is already in the uniques array
- else: # must be an Object array, alltrue/equal functions don't work
- for item in inarray[1:]:
- newflag = 1
- for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=>
- test = N.sum(abs(N.array(map(cmp,item,unq))))
- if test == 0: # if item identical to any 1 row in uniques
- newflag = 0 # then not a novel item to add
- break
- if newflag == 1:
- try:
- uniques = N.concatenate( [uniques,item[N.newaxis,:]] )
- except TypeError: # the item to add isn't a list
- uniques = N.concatenate([uniques,N.array([item])])
+ if not N.sum(N.alltrue(N.equal(uniques, item), 1)):
+ try:
+ uniques = N.concatenate([uniques, item[N.newaxis, :]])
+ except TypeError: # the item to add isn't a list
+ uniques = N.concatenate([uniques, N.array([item])])
+ else:
+ pass # this item is already in the uniques array
+ else: # must be an Object array, alltrue/equal functions don't work
+ for item in inarray[1:]:
+ newflag = 1
+ for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=>
+ test = N.sum(abs(N.array(map(cmp, item, unq))))
+ if test == 0: # if item identical to any 1 row in uniques
+ newflag = 0 # then not a novel item to add
+ break
+ if newflag == 1:
+ try:
+ uniques = N.concatenate([uniques, item[N.newaxis, :]])
+ except TypeError: # the item to add isn't a list
+ uniques = N.concatenate([uniques, N.array([item])])
return uniques
-
- def aduplicates(inarray):
+ def aduplicates(inarray):
"""
Returns duplicate items in the FIRST dimension of the passed array. Only
works on arrays NOT including string items.
@@ -1047,22 +1054,22 @@ works on arrays NOT including string items.
Usage: aunique (inarray)
"""
inarray = N.array(inarray)
- if len(inarray.shape) == 1: # IF IT'S A 1D ARRAY
- dups = []
- inarray = inarray.tolist()
- for i in range(len(inarray)):
- if inarray[i] in inarray[i+1:]:
- dups.append(inarray[i])
- dups = aunique(dups)
- else: # IT MUST BE A 2+D ARRAY
- dups = []
- aslist = inarray.tolist()
- for i in range(len(aslist)):
- if aslist[i] in aslist[i+1:]:
- dups.append(aslist[i])
- dups = unique(dups)
- dups = N.array(dups)
+ if len(inarray.shape) == 1: # IF IT'S A 1D ARRAY
+ dups = []
+ inarray = inarray.tolist()
+ for i in range(len(inarray)):
+ if inarray[i] in inarray[i + 1:]:
+ dups.append(inarray[i])
+ dups = aunique(dups)
+ else: # IT MUST BE A 2+D ARRAY
+ dups = []
+ aslist = inarray.tolist()
+ for i in range(len(aslist)):
+ if aslist[i] in aslist[i + 1:]:
+ dups.append(aslist[i])
+ dups = unique(dups)
+ dups = N.array(dups)
return dups
-except ImportError: # IF NUMERIC ISN'T AVAILABLE, SKIP ALL arrayfuncs
- pass
+except ImportError: # IF NUMERIC ISN'T AVAILABLE, SKIP ALL arrayfuncs
+ pass
diff --git a/utils/stats.py b/utils/stats.py
index aceed824..813e618a 100644
--- a/utils/stats.py
+++ b/utils/stats.py
@@ -21,8 +21,7 @@
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
-"""
-stats.py module
+"""stats.py module
(Requires pstat.py module.)
@@ -224,7 +223,7 @@ SUPPORT FUNCTIONS: writecc
## changed name of skewness and askewness to skew and askew
## fixed (a)histogram (which sometimes counted points <lowerlimit)
-import pstat # required 3rd party module
+import pstat # required 3rd party module
import math, string, copy # required python modules
from types import *
@@ -234,7 +233,7 @@ __version__ = 0.6
class Dispatch:
- """
+ """
The Dispatch class, care of David Ascher, allows different functions to
be called depending on the argument types. This way, there can be one
function name regardless of the argument type. To access function doc
@@ -243,20 +242,19 @@ array arguments, respectively. That is, print stats.lmean.__doc__ or
print stats.amean.__doc__ or whatever.
"""
- def __init__(self, *tuples):
- self._dispatch = {}
- for func, types in tuples:
- for t in types:
- if t in self._dispatch.keys():
- raise ValueError, "can't have two dispatches on "+str(t)
- self._dispatch[t] = func
- self._types = self._dispatch.keys()
-
- def __call__(self, arg1, *args, **kw):
- if type(arg1) not in self._types:
- raise TypeError, "don't know how to dispatch %s arguments" % type(arg1)
- return apply(self._dispatch[type(arg1)], (arg1,) + args, kw)
+ def __init__(self, *tuples):
+ self._dispatch = {}
+ for func, types in tuples:
+ for t in types:
+ if t in self._dispatch.keys():
+ raise ValueError, "can't have two dispatches on " + str(t)
+ self._dispatch[t] = func
+ self._types = self._dispatch.keys()
+ def __call__(self, arg1, *args, **kw):
+ if type(arg1) not in self._types:
+ raise TypeError, "don't know how to dispatch %s arguments" % type(arg1)
+ return apply(self._dispatch[type(arg1)], (arg1,) + args, kw)
##########################################################################
######################## LIST-BASED FUNCTIONS ########################
@@ -268,48 +266,49 @@ print stats.amean.__doc__ or whatever.
####### CENTRAL TENDENCY #########
####################################
-def lgeometricmean (inlist):
- """
+
+def lgeometricmean(inlist):
+ """
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
- mult = 1.0
- one_over_n = 1.0/len(inlist)
- for item in inlist:
- mult = mult * pow(item,one_over_n)
- return mult
+ mult = 1.0
+ one_over_n = 1.0 / len(inlist)
+ for item in inlist:
+ mult = mult * pow(item, one_over_n)
+ return mult
-def lharmonicmean (inlist):
- """
+def lharmonicmean(inlist):
+ """
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
- sum = 0
- for item in inlist:
- sum = sum + 1.0/item
- return len(inlist) / sum
+ sum = 0
+ for item in inlist:
+ sum = sum + 1.0 / item
+ return len(inlist) / sum
-def lmean (inlist):
- """
+def lmean(inlist):
+ """
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
- sum = 0
- for item in inlist:
- sum = sum + item
- return sum/float(len(inlist))
+ sum = 0
+ for item in inlist:
+ sum = sum + item
+ return sum / float(len(inlist))
-def lmedian (inlist,numbins=1000):
- """
+def lmedian(inlist, numbins=1000):
+ """
Returns the computed median value of a list of numbers, given the
number of bins to use for the histogram (more bins brings the computed value
closer to the median score, default number of bins = 1000). See G.W.
@@ -317,40 +316,42 @@ Heiman's Basic Stats (1st Edition), or CRC Probability & Statistics.
Usage: lmedian (inlist, numbins=1000)
"""
- (hist, smallest, binsize, extras) = histogram(inlist,numbins,[min(inlist),max(inlist)]) # make histog
- cumhist = cumsum(hist) # make cumulative histogram
- for i in range(len(cumhist)): # get 1st(!) index holding 50%ile score
- if cumhist[i]>=len(inlist)/2.0:
- cfbin = i
- break
- LRL = smallest + binsize*cfbin # get lower read limit of that bin
- cfbelow = cumhist[cfbin-1]
- freq = float(hist[cfbin]) # frequency IN the 50%ile bin
- median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula
- return median
-
-
-def lmedianscore (inlist):
- """
+ (hist, smallest, binsize, extras) = histogram(
+ inlist, numbins, [min(inlist), max(inlist)]) # make histog
+ cumhist = cumsum(hist) # make cumulative histogram
+ for i in range(len(cumhist)): # get 1st(!) index holding 50%ile score
+ if cumhist[i] >= len(inlist) / 2.0:
+ cfbin = i
+ break
+ LRL = smallest + binsize * cfbin # get lower read limit of that bin
+ cfbelow = cumhist[cfbin - 1]
+ freq = float(hist[cfbin]) # frequency IN the 50%ile bin
+ median = LRL + (
+ (len(inlist) / 2.0 - cfbelow) / float(freq)) * binsize # median formula
+ return median
+
+
+def lmedianscore(inlist):
+ """
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
- newlist = copy.deepcopy(inlist)
- newlist.sort()
- if len(newlist) % 2 == 0: # if even number of scores, average middle 2
- index = len(newlist)/2 # integer division correct
- median = float(newlist[index] + newlist[index-1]) /2
- else:
- index = len(newlist)/2 # int divsion gives mid value when count from 0
- median = newlist[index]
- return median
+ newlist = copy.deepcopy(inlist)
+ newlist.sort()
+ if len(newlist) % 2 == 0: # if even number of scores, average middle 2
+ index = len(newlist) / 2 # integer division correct
+ median = float(newlist[index] + newlist[index - 1]) / 2
+ else:
+ index = len(newlist) / 2 # int divsion gives mid value when count from 0
+ median = newlist[index]
+ return median
def lmode(inlist):
- """
+ """
Returns a list of the modal (most common) score(s) in the passed
list. If there is more than one such score, all are returned. The
bin-count for the mode(s) is also returned.
@@ -359,219 +360,225 @@ Usage: lmode(inlist)
Returns: bin-count for mode(s), a list of modal value(s)
"""
- scores = pstat.unique(inlist)
- scores.sort()
- freq = []
- for item in scores:
- freq.append(inlist.count(item))
- maxfreq = max(freq)
- mode = []
- stillmore = 1
- while stillmore:
- try:
- indx = freq.index(maxfreq)
- mode.append(scores[indx])
- del freq[indx]
- del scores[indx]
- except ValueError:
- stillmore=0
- return maxfreq, mode
-
+ scores = pstat.unique(inlist)
+ scores.sort()
+ freq = []
+ for item in scores:
+ freq.append(inlist.count(item))
+ maxfreq = max(freq)
+ mode = []
+ stillmore = 1
+ while stillmore:
+ try:
+ indx = freq.index(maxfreq)
+ mode.append(scores[indx])
+ del freq[indx]
+ del scores[indx]
+ except ValueError:
+ stillmore = 0
+ return maxfreq, mode
####################################
############ MOMENTS #############
####################################
-def lmoment(inlist,moment=1):
- """
+
+def lmoment(inlist, moment=1):
+ """
Calculates the nth moment about the mean for a sample (defaults to
the 1st moment). Used to calculate coefficients of skewness and kurtosis.
Usage: lmoment(inlist,moment=1)
Returns: appropriate moment (r) from ... 1/n * SUM((inlist(i)-mean)**r)
"""
- if moment == 1:
- return 0.0
- else:
- mn = mean(inlist)
- n = len(inlist)
- s = 0
- for x in inlist:
- s = s + (x-mn)**moment
- return s/float(n)
+ if moment == 1:
+ return 0.0
+ else:
+ mn = mean(inlist)
+ n = len(inlist)
+ s = 0
+ for x in inlist:
+ s = s + (x - mn)**moment
+ return s / float(n)
def lvariation(inlist):
- """
+ """
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6.
Usage: lvariation(inlist)
"""
- return 100.0*samplestdev(inlist)/float(mean(inlist))
+ return 100.0 * samplestdev(inlist) / float(mean(inlist))
def lskew(inlist):
- """
+ """
Returns the skewness of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lskew(inlist)
"""
- return moment(inlist,3)/pow(moment(inlist,2),1.5)
+ return moment(inlist, 3) / pow(moment(inlist, 2), 1.5)
def lkurtosis(inlist):
- """
+ """
Returns the kurtosis of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lkurtosis(inlist)
"""
- return moment(inlist,4)/pow(moment(inlist,2),2.0)
+ return moment(inlist, 4) / pow(moment(inlist, 2), 2.0)
def ldescribe(inlist):
- """
+ """
Returns some descriptive statistics of the passed list (assumed to be 1D).
Usage: ldescribe(inlist)
Returns: n, mean, standard deviation, skew, kurtosis
"""
- n = len(inlist)
- mm = (min(inlist),max(inlist))
- m = mean(inlist)
- sd = stdev(inlist)
- sk = skew(inlist)
- kurt = kurtosis(inlist)
- return n, mm, m, sd, sk, kurt
-
+ n = len(inlist)
+ mm = (min(inlist), max(inlist))
+ m = mean(inlist)
+ sd = stdev(inlist)
+ sk = skew(inlist)
+ kurt = kurtosis(inlist)
+ return n, mm, m, sd, sk, kurt
####################################
####### FREQUENCY STATS ##########
####################################
+
def litemfreq(inlist):
- """
+ """
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
- scores = pstat.unique(inlist)
- scores.sort()
- freq = []
- for item in scores:
- freq.append(inlist.count(item))
- return pstat.abut(scores, freq)
+ scores = pstat.unique(inlist)
+ scores.sort()
+ freq = []
+ for item in scores:
+ freq.append(inlist.count(item))
+ return pstat.abut(scores, freq)
-def lscoreatpercentile (inlist, percent):
- """
+def lscoreatpercentile(inlist, percent):
+ """
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
- if percent > 1:
- print "\nDividing percent>1 by 100 in lscoreatpercentile().\n"
- percent = percent / 100.0
- targetcf = percent*len(inlist)
- h, lrl, binsize, extras = histogram(inlist)
- cumhist = cumsum(copy.deepcopy(h))
- for i in range(len(cumhist)):
- if cumhist[i] >= targetcf:
- break
- score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
- return score
-
-
-def lpercentileofscore (inlist, score,histbins=10,defaultlimits=None):
- """
+ if percent > 1:
+ print '\nDividing percent>1 by 100 in lscoreatpercentile().\n'
+ percent = percent / 100.0
+ targetcf = percent * len(inlist)
+ h, lrl, binsize, extras = histogram(inlist)
+ cumhist = cumsum(copy.deepcopy(h))
+ for i in range(len(cumhist)):
+ if cumhist[i] >= targetcf:
+ break
+ score = binsize * (
+ (targetcf - cumhist[i - 1]) / float(h[i])) + (lrl + binsize * i)
+ return score
+
+
+def lpercentileofscore(inlist, score, histbins=10, defaultlimits=None):
+ """
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
- h, lrl, binsize, extras = histogram(inlist,histbins,defaultlimits)
- cumhist = cumsum(copy.deepcopy(h))
- i = int((score - lrl)/float(binsize))
- pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
- return pct
+ h, lrl, binsize, extras = histogram(inlist, histbins, defaultlimits)
+ cumhist = cumsum(copy.deepcopy(h))
+ i = int((score - lrl) / float(binsize))
+ pct = (cumhist[i - 1] + (
+ (score -
+ (lrl + binsize * i)) / float(binsize)) * h[i]) / float(len(inlist)) * 100
+ return pct
-def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):
- """
+def lhistogram(inlist, numbins=10, defaultreallimits=None, printextras=0):
+ """
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
-Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
+Usage: lhistogram (inlist, numbins=10,
+defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
- if (defaultreallimits <> None):
- if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
- lowerreallimit = defaultreallimits
- upperreallimit = 1.000001 * max(inlist)
- else: # assume both limits given
- lowerreallimit = defaultreallimits[0]
- upperreallimit = defaultreallimits[1]
- binsize = (upperreallimit-lowerreallimit)/float(numbins)
- else: # no limits given for histogram, both must be calc'd
- estbinwidth=(max(inlist)-min(inlist))/float(numbins) +1e-6 #1=>cover all
- binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
- lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
- bins = [0]*(numbins)
- extrapoints = 0
- for num in inlist:
- try:
- if (num-lowerreallimit) < 0:
- extrapoints = extrapoints + 1
- else:
- bintoincrement = int((num-lowerreallimit)/float(binsize))
- bins[bintoincrement] = bins[bintoincrement] + 1
- except:
- extrapoints = extrapoints + 1
- if (extrapoints > 0 and printextras == 1):
- print '\nPoints outside given histogram range =',extrapoints
- return (bins, lowerreallimit, binsize, extrapoints)
+ if (defaultreallimits <> None):
+ if type(defaultreallimits) not in [ListType, TupleType] or len(
+ defaultreallimits) == 1: # only one limit given, assumed to be lower one & upper is calc'd
+ lowerreallimit = defaultreallimits
+ upperreallimit = 1.000001 * max(inlist)
+ else: # assume both limits given
+ lowerreallimit = defaultreallimits[0]
+ upperreallimit = defaultreallimits[1]
+ binsize = (upperreallimit - lowerreallimit) / float(numbins)
+ else: # no limits given for histogram, both must be calc'd
+ estbinwidth = (max(inlist) -
+ min(inlist)) / float(numbins) + 1e-6 #1=>cover all
+ binsize = ((max(inlist) - min(inlist) + estbinwidth)) / float(numbins)
+ lowerreallimit = min(inlist) - binsize / 2 #lower real limit,1st bin
+ bins = [0] * (numbins)
+ extrapoints = 0
+ for num in inlist:
+ try:
+ if (num - lowerreallimit) < 0:
+ extrapoints = extrapoints + 1
+ else:
+ bintoincrement = int((num - lowerreallimit) / float(binsize))
+ bins[bintoincrement] = bins[bintoincrement] + 1
+ except:
+ extrapoints = extrapoints + 1
+ if (extrapoints > 0 and printextras == 1):
+ print '\nPoints outside given histogram range =', extrapoints
+ return (bins, lowerreallimit, binsize, extrapoints)
-def lcumfreq(inlist,numbins=10,defaultreallimits=None):
- """
+def lcumfreq(inlist, numbins=10, defaultreallimits=None):
+ """
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
- h,l,b,e = histogram(inlist,numbins,defaultreallimits)
- cumhist = cumsum(copy.deepcopy(h))
- return cumhist,l,b,e
+ h, l, b, e = histogram(inlist, numbins, defaultreallimits)
+ cumhist = cumsum(copy.deepcopy(h))
+ return cumhist, l, b, e
-def lrelfreq(inlist,numbins=10,defaultreallimits=None):
- """
+def lrelfreq(inlist, numbins=10, defaultreallimits=None):
+ """
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
- h,l,b,e = histogram(inlist,numbins,defaultreallimits)
- for i in range(len(h)):
- h[i] = h[i]/float(len(inlist))
- return h,l,b,e
-
+ h, l, b, e = histogram(inlist, numbins, defaultreallimits)
+ for i in range(len(h)):
+ h[i] = h[i] / float(len(inlist))
+ return h, l, b, e
####################################
##### VARIABILITY FUNCTIONS ######
####################################
+
def lobrientransform(*args):
- """
+ """
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. From
Maxwell and Delaney, p.112.
@@ -579,60 +586,60 @@ Maxwell and Delaney, p.112.
Usage: lobrientransform(*args)
Returns: transformed data for use in an ANOVA
"""
- TINY = 1e-10
- k = len(args)
- n = [0.0]*k
- v = [0.0]*k
- m = [0.0]*k
- nargs = []
- for i in range(k):
- nargs.append(copy.deepcopy(args[i]))
- n[i] = float(len(nargs[i]))
- v[i] = var(nargs[i])
- m[i] = mean(nargs[i])
- for j in range(k):
- for i in range(n[j]):
- t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
- t2 = 0.5*v[j]*(n[j]-1.0)
- t3 = (n[j]-1.0)*(n[j]-2.0)
- nargs[j][i] = (t1-t2) / float(t3)
- check = 1
- for j in range(k):
- if v[j] - mean(nargs[j]) > TINY:
- check = 0
- if check <> 1:
- raise ValueError, 'Problem in obrientransform.'
- else:
- return nargs
-
-
-def lsamplevar (inlist):
- """
+ TINY = 1e-10
+ k = len(args)
+ n = [0.0] * k
+ v = [0.0] * k
+ m = [0.0] * k
+ nargs = []
+ for i in range(k):
+ nargs.append(copy.deepcopy(args[i]))
+ n[i] = float(len(nargs[i]))
+ v[i] = var(nargs[i])
+ m[i] = mean(nargs[i])
+ for j in range(k):
+ for i in range(n[j]):
+ t1 = (n[j] - 1.5) * n[j] * (nargs[j][i] - m[j])**2
+ t2 = 0.5 * v[j] * (n[j] - 1.0)
+ t3 = (n[j] - 1.0) * (n[j] - 2.0)
+ nargs[j][i] = (t1 - t2) / float(t3)
+ check = 1
+ for j in range(k):
+ if v[j] - mean(nargs[j]) > TINY:
+ check = 0
+ if check <> 1:
+ raise ValueError, 'Problem in obrientransform.'
+ else:
+ return nargs
+
+
+def lsamplevar(inlist):
+ """
Returns the variance of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample variance only).
Usage: lsamplevar(inlist)
"""
- n = len(inlist)
- mn = mean(inlist)
- deviations = []
- for item in inlist:
- deviations.append(item-mn)
- return ss(deviations)/float(n)
+ n = len(inlist)
+ mn = mean(inlist)
+ deviations = []
+ for item in inlist:
+ deviations.append(item - mn)
+ return ss(deviations) / float(n)
-def lsamplestdev (inlist):
- """
+def lsamplestdev(inlist):
+ """
Returns the standard deviation of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample stdev only).
Usage: lsamplestdev(inlist)
"""
- return math.sqrt(samplevar(inlist))
+ return math.sqrt(samplevar(inlist))
-def lcov (x,y, keepdims=0):
- """
+def lcov(x, y, keepdims=0):
+ """
Returns the estimated covariance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
@@ -642,96 +649,96 @@ same number of dimensions as inarray.
Usage: lcov(x,y,keepdims=0)
"""
- n = len(x)
- xmn = mean(x)
- ymn = mean(y)
- xdeviations = [0]*len(x)
- ydeviations = [0]*len(y)
- for i in range(len(x)):
- xdeviations[i] = x[i] - xmn
- ydeviations[i] = y[i] - ymn
- ss = 0.0
- for i in range(len(xdeviations)):
- ss = ss + xdeviations[i]*ydeviations[i]
- return ss/float(n-1)
-
-
-def lvar (inlist):
- """
+ n = len(x)
+ xmn = mean(x)
+ ymn = mean(y)
+ xdeviations = [0] * len(x)
+ ydeviations = [0] * len(y)
+ for i in range(len(x)):
+ xdeviations[i] = x[i] - xmn
+ ydeviations[i] = y[i] - ymn
+ ss = 0.0
+ for i in range(len(xdeviations)):
+ ss = ss + xdeviations[i] * ydeviations[i]
+ return ss / float(n - 1)
+
+
+def lvar(inlist):
+ """
Returns the variance of the values in the passed list using N-1
for the denominator (i.e., for estimating population variance).
Usage: lvar(inlist)
"""
- n = len(inlist)
- mn = mean(inlist)
- deviations = [0]*len(inlist)
- for i in range(len(inlist)):
- deviations[i] = inlist[i] - mn
- return ss(deviations)/float(n-1)
+ n = len(inlist)
+ mn = mean(inlist)
+ deviations = [0] * len(inlist)
+ for i in range(len(inlist)):
+ deviations[i] = inlist[i] - mn
+ return ss(deviations) / float(n - 1)
-def lstdev (inlist):
- """
+def lstdev(inlist):
+ """
Returns the standard deviation of the values in the passed list
using N-1 in the denominator (i.e., to estimate population stdev).
Usage: lstdev(inlist)
"""
- return math.sqrt(var(inlist))
+ return math.sqrt(var(inlist))
def lsterr(inlist):
- """
+ """
Returns the standard error of the values in the passed list using N-1
in the denominator (i.e., to estimate population standard error).
Usage: lsterr(inlist)
"""
- return stdev(inlist) / float(math.sqrt(len(inlist)))
+ return stdev(inlist) / float(math.sqrt(len(inlist)))
-def lsem (inlist):
- """
+def lsem(inlist):
+ """
Returns the estimated standard error of the mean (sx-bar) of the
values in the passed list. sem = stdev / sqrt(n)
Usage: lsem(inlist)
"""
- sd = stdev(inlist)
- n = len(inlist)
- return sd/math.sqrt(n)
+ sd = stdev(inlist)
+ n = len(inlist)
+ return sd / math.sqrt(n)
-def lz (inlist, score):
- """
+def lz(inlist, score):
+ """
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
- z = (score-mean(inlist))/samplestdev(inlist)
- return z
+ z = (score - mean(inlist)) / samplestdev(inlist)
+ return z
-def lzs (inlist):
- """
+def lzs(inlist):
+ """
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
- zscores = []
- for item in inlist:
- zscores.append(z(inlist,item))
- return zscores
-
+ zscores = []
+ for item in inlist:
+ zscores.append(z(inlist, item))
+ return zscores
####################################
####### TRIMMING FUNCTIONS #######
####################################
-def ltrimboth (l,proportiontocut):
- """
+
+def ltrimboth(l, proportiontocut):
+ """
Slices off the passed proportion of items from BOTH ends of the passed
list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost'
10% of scores. Assumes list is sorted by magnitude. Slices off LESS if
@@ -741,13 +748,13 @@ slices off proportiontocut).
Usage: ltrimboth (l,proportiontocut)
Returns: trimmed version of list l
"""
- lowercut = int(proportiontocut*len(l))
- uppercut = len(l) - lowercut
- return l[lowercut:uppercut]
+ lowercut = int(proportiontocut * len(l))
+ uppercut = len(l) - lowercut
+ return l[lowercut:uppercut]
-def ltrim1 (l,proportiontocut,tail='right'):
- """
+def ltrim1(l, proportiontocut, tail='right'):
+ """
Slices off the passed proportion of items from ONE end of the passed
list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
@@ -756,85 +763,89 @@ slice index (i.e., conservatively slices off proportiontocut).
Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of list l
"""
- if tail == 'right':
- lowercut = 0
- uppercut = len(l) - int(proportiontocut*len(l))
- elif tail == 'left':
- lowercut = int(proportiontocut*len(l))
- uppercut = len(l)
- return l[lowercut:uppercut]
-
+ if tail == 'right':
+ lowercut = 0
+ uppercut = len(l) - int(proportiontocut * len(l))
+ elif tail == 'left':
+ lowercut = int(proportiontocut * len(l))
+ uppercut = len(l)
+ return l[lowercut:uppercut]
####################################
##### CORRELATION FUNCTIONS ######
####################################
-def lpaired(x,y):
- """
+
+def lpaired(x, y):
+ """
Interactively determines the type of data and then runs the
appropriated statistic for paired group data.
Usage: lpaired(x,y)
Returns: appropriate statistic name, value, and probability
"""
- samples = ''
- while samples not in ['i','r','I','R','c','C']:
- print '\nIndependent or related samples, or correlation (i,r,c): ',
- samples = raw_input()
-
- if samples in ['i','I','r','R']:
- print '\nComparing variances ...',
-# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
- r = obrientransform(x,y)
- f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
- if p<0.05:
- vartype='unequal, p='+str(round(p,4))
+ samples = ''
+ while samples not in ['i', 'r', 'I', 'R', 'c', 'C']:
+ print '\nIndependent or related samples, or correlation (i,r,c): ',
+ samples = raw_input()
+
+ if samples in ['i', 'I', 'r', 'R']:
+ print '\nComparing variances ...',
+ # USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
+ r = obrientransform(x, y)
+ f, p = F_oneway(pstat.colex(r, 0), pstat.colex(r, 1))
+ if p < 0.05:
+ vartype = 'unequal, p=' + str(round(p, 4))
+ else:
+ vartype = 'equal'
+ print vartype
+ if samples in ['i', 'I']:
+ if vartype[0] == 'e':
+ t, p = ttest_ind(x, y, 0)
+ print '\nIndependent samples t-test: ', round(t, 4), round(p, 4)
+ else:
+ if len(x) > 20 or len(y) > 20:
+ z, p = ranksums(x, y)
+ print '\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(
+ p, 4)
else:
- vartype='equal'
- print vartype
- if samples in ['i','I']:
- if vartype[0]=='e':
- t,p = ttest_ind(x,y,0)
- print '\nIndependent samples t-test: ', round(t,4),round(p,4)
- else:
- if len(x)>20 or len(y)>20:
- z,p = ranksums(x,y)
- print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
- else:
- u,p = mannwhitneyu(x,y)
- print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
-
- else: # RELATED SAMPLES
- if vartype[0]=='e':
- t,p = ttest_rel(x,y,0)
- print '\nRelated samples t-test: ', round(t,4),round(p,4)
- else:
- t,p = ranksums(x,y)
- print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
- else: # CORRELATION ANALYSIS
- corrtype = ''
- while corrtype not in ['c','C','r','R','d','D']:
- print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
- corrtype = raw_input()
- if corrtype in ['c','C']:
- m,b,r,p,see = linregress(x,y)
- print '\nLinear regression for continuous variables ...'
- lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
- pstat.printcc(lol)
- elif corrtype in ['r','R']:
- r,p = spearmanr(x,y)
- print '\nCorrelation for ranked variables ...'
- print "Spearman's r: ",round(r,4),round(p,4)
- else: # DICHOTOMOUS
- r,p = pointbiserialr(x,y)
- print '\nAssuming x contains a dichotomous variable ...'
- print 'Point Biserial r: ',round(r,4),round(p,4)
- print '\n\n'
- return None
-
-
-def lpearsonr(x,y):
- """
+ u, p = mannwhitneyu(x, y)
+ print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(
+ u, 4), round(p, 4)
+
+ else: # RELATED SAMPLES
+ if vartype[0] == 'e':
+ t, p = ttest_rel(x, y, 0)
+ print '\nRelated samples t-test: ', round(t, 4), round(p, 4)
+ else:
+ t, p = ranksums(x, y)
+ print '\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4)
+ else: # CORRELATION ANALYSIS
+ corrtype = ''
+ while corrtype not in ['c', 'C', 'r', 'R', 'd', 'D']:
+ print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
+ corrtype = raw_input()
+ if corrtype in ['c', 'C']:
+ m, b, r, p, see = linregress(x, y)
+ print '\nLinear regression for continuous variables ...'
+ lol = [['Slope', 'Intercept', 'r', 'Prob', 'SEestimate'],
+ [round(m, 4), round(b, 4), round(r, 4), round(p, 4), round(see, 4)]
+ ]
+ pstat.printcc(lol)
+ elif corrtype in ['r', 'R']:
+ r, p = spearmanr(x, y)
+ print '\nCorrelation for ranked variables ...'
+ print "Spearman's r: ", round(r, 4), round(p, 4)
+ else: # DICHOTOMOUS
+ r, p = pointbiserialr(x, y)
+ print '\nAssuming x contains a dichotomous variable ...'
+ print 'Point Biserial r: ', round(r, 4), round(p, 4)
+ print '\n\n'
+ return None
+
+
+def lpearsonr(x, y):
+ """
Calculates a Pearson correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (2nd), p.195.
@@ -842,63 +853,64 @@ Sci (2nd), p.195.
Usage: lpearsonr(x,y) where x and y are equal-length lists
Returns: Pearson's r value, two-tailed p-value
"""
- TINY = 1.0e-30
- if len(x) <> len(y):
- raise ValueError, 'Input values not paired in pearsonr. Aborting.'
- n = len(x)
- x = map(float,x)
- y = map(float,y)
- xmean = mean(x)
- ymean = mean(y)
- r_num = n*(summult(x,y)) - sum(x)*sum(y)
- r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
- r = (r_num / r_den) # denominator already a float
- df = n-2
- t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
- prob = betai(0.5*df,0.5,df/float(df+t*t))
- return r, prob
-
-
-def llincc(x,y):
- """
+ TINY = 1.0e-30
+ if len(x) <> len(y):
+ raise ValueError, 'Input values not paired in pearsonr. Aborting.'
+ n = len(x)
+ x = map(float, x)
+ y = map(float, y)
+ xmean = mean(x)
+ ymean = mean(y)
+ r_num = n * (summult(x, y)) - sum(x) * sum(y)
+ r_den = math.sqrt((n * ss(x) - square_of_sums(x)) *
+ (n * ss(y) - square_of_sums(y)))
+ r = (r_num / r_den) # denominator already a float
+ df = n - 2
+ t = r * math.sqrt(df / ((1.0 - r + TINY) * (1.0 + r + TINY)))
+ prob = betai(0.5 * df, 0.5, df / float(df + t * t))
+ return r, prob
+
+
+def llincc(x, y):
+ """
Calculates Lin's concordance correlation coefficient.
Usage: alincc(x,y) where x, y are equal-length arrays
Returns: Lin's CC
"""
- covar = lcov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n
- xvar = lvar(x)*(len(x)-1)/float(len(x)) # correct denom to n
- yvar = lvar(y)*(len(y)-1)/float(len(y)) # correct denom to n
- lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))
- return lincc
+ covar = lcov(x, y) * (len(x) - 1) / float(len(x)) # correct denom to n
+ xvar = lvar(x) * (len(x) - 1) / float(len(x)) # correct denom to n
+ yvar = lvar(y) * (len(y) - 1) / float(len(y)) # correct denom to n
+ lincc = (2 * covar) / ((xvar + yvar) + ((amean(x) - amean(y))**2))
+ return lincc
-def lspearmanr(x,y):
- """
+def lspearmanr(x, y):
+ """
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
- TINY = 1e-30
- if len(x) <> len(y):
- raise ValueError, 'Input values not paired in spearmanr. Aborting.'
- n = len(x)
- rankx = rankdata(x)
- ranky = rankdata(y)
- dsq = sumdiffsquared(rankx,ranky)
- rs = 1 - 6*dsq / float(n*(n**2-1))
- t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
- df = n-2
- probrs = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
-# probability values for rs are from part 2 of the spearman function in
-# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
- return rs, probrs
-
-
-def lpointbiserialr(x,y):
- """
+ TINY = 1e-30
+ if len(x) <> len(y):
+ raise ValueError, 'Input values not paired in spearmanr. Aborting.'
+ n = len(x)
+ rankx = rankdata(x)
+ ranky = rankdata(y)
+ dsq = sumdiffsquared(rankx, ranky)
+ rs = 1 - 6 * dsq / float(n * (n**2 - 1))
+ t = rs * math.sqrt((n - 2) / ((rs + 1.0) * (1.0 - rs)))
+ df = n - 2
+ probrs = betai(0.5 * df, 0.5, df / (df + t * t)) # t already a float
+ # probability values for rs are from part 2 of the spearman function in
+ # Numerical Recipies, p.510. They are close to tables, but not exact. (?)
+ return rs, probrs
+
+
+def lpointbiserialr(x, y):
+ """
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
@@ -906,98 +918,99 @@ Sci (1st), p.194.
Usage: lpointbiserialr(x,y) where x,y are equal-length lists
Returns: Point-biserial r, two-tailed p-value
"""
- TINY = 1e-30
- if len(x) <> len(y):
- raise ValueError, 'INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.'
- data = pstat.abut(x,y)
- categories = pstat.unique(x)
- if len(categories) <> 2:
- raise ValueError, "Exactly 2 categories required for pointbiserialr()."
- else: # there are 2 categories, continue
- codemap = pstat.abut(categories,range(2))
- recoded = pstat.recode(data,codemap,0)
- x = pstat.linexand(data,0,categories[0])
- y = pstat.linexand(data,0,categories[1])
- xmean = mean(pstat.colex(x,1))
- ymean = mean(pstat.colex(y,1))
- n = len(data)
- adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
- rpb = (ymean - xmean)/samplestdev(pstat.colex(data,1))*adjust
- df = n-2
- t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
- prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
- return rpb, prob
-
-
-def lkendalltau(x,y):
- """
+ TINY = 1e-30
+ if len(x) <> len(y):
+ raise ValueError, 'INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.'
+ data = pstat.abut(x, y)
+ categories = pstat.unique(x)
+ if len(categories) <> 2:
+ raise ValueError, 'Exactly 2 categories required for pointbiserialr().'
+ else: # there are 2 categories, continue
+ codemap = pstat.abut(categories, range(2))
+ recoded = pstat.recode(data, codemap, 0)
+ x = pstat.linexand(data, 0, categories[0])
+ y = pstat.linexand(data, 0, categories[1])
+ xmean = mean(pstat.colex(x, 1))
+ ymean = mean(pstat.colex(y, 1))
+ n = len(data)
+ adjust = math.sqrt((len(x) / float(n)) * (len(y) / float(n)))
+ rpb = (ymean - xmean) / samplestdev(pstat.colex(data, 1)) * adjust
+ df = n - 2
+ t = rpb * math.sqrt(df / ((1.0 - rpb + TINY) * (1.0 + rpb + TINY)))
+ prob = betai(0.5 * df, 0.5, df / (df + t * t)) # t already a float
+ return rpb, prob
+
+
+def lkendalltau(x, y):
+ """
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
- n1 = 0
- n2 = 0
- iss = 0
- for j in range(len(x)-1):
- for k in range(j,len(y)):
- a1 = x[j] - x[k]
- a2 = y[j] - y[k]
- aa = a1 * a2
- if (aa): # neither list has a tie
- n1 = n1 + 1
- n2 = n2 + 1
- if aa > 0:
- iss = iss + 1
- else:
- iss = iss -1
- else:
- if (a1):
- n1 = n1 + 1
- else:
- n2 = n2 + 1
- tau = iss / math.sqrt(n1*n2)
- svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
- z = tau / math.sqrt(svar)
- prob = erfcc(abs(z)/1.4142136)
- return tau, prob
+ n1 = 0
+ n2 = 0
+ iss = 0
+ for j in range(len(x) - 1):
+ for k in range(j, len(y)):
+ a1 = x[j] - x[k]
+ a2 = y[j] - y[k]
+ aa = a1 * a2
+ if (aa): # neither list has a tie
+ n1 = n1 + 1
+ n2 = n2 + 1
+ if aa > 0:
+ iss = iss + 1
+ else:
+ iss = iss - 1
+ else:
+ if (a1):
+ n1 = n1 + 1
+ else:
+ n2 = n2 + 1
+ tau = iss / math.sqrt(n1 * n2)
+ svar = (4.0 * len(x) + 10.0) / (9.0 * len(x) * (len(x) - 1))
+ z = tau / math.sqrt(svar)
+ prob = erfcc(abs(z) / 1.4142136)
+ return tau, prob
-def llinregress(x,y):
- """
+def llinregress(x, y):
+ """
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
- TINY = 1.0e-20
- if len(x) <> len(y):
- raise ValueError, 'Input values not paired in linregress. Aborting.'
- n = len(x)
- x = map(float,x)
- y = map(float,y)
- xmean = mean(x)
- ymean = mean(y)
- r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
- r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
- r = r_num / r_den
- z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
- df = n-2
- t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
- prob = betai(0.5*df,0.5,df/(df+t*t))
- slope = r_num / float(n*ss(x) - square_of_sums(x))
- intercept = ymean - slope*xmean
- sterrest = math.sqrt(1-r*r)*samplestdev(y)
- return slope, intercept, r, prob, sterrest
-
+ TINY = 1.0e-20
+ if len(x) <> len(y):
+ raise ValueError, 'Input values not paired in linregress. Aborting.'
+ n = len(x)
+ x = map(float, x)
+ y = map(float, y)
+ xmean = mean(x)
+ ymean = mean(y)
+ r_num = float(n * (summult(x, y)) - sum(x) * sum(y))
+ r_den = math.sqrt((n * ss(x) - square_of_sums(x)) *
+ (n * ss(y) - square_of_sums(y)))
+ r = r_num / r_den
+ z = 0.5 * math.log((1.0 + r + TINY) / (1.0 - r + TINY))
+ df = n - 2
+ t = r * math.sqrt(df / ((1.0 - r + TINY) * (1.0 + r + TINY)))
+ prob = betai(0.5 * df, 0.5, df / (df + t * t))
+ slope = r_num / float(n * ss(x) - square_of_sums(x))
+ intercept = ymean - slope * xmean
+ sterrest = math.sqrt(1 - r * r) * samplestdev(y)
+ return slope, intercept, r, prob, sterrest
####################################
##### INFERENTIAL STATISTICS #####
####################################
-def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
- """
+
+def lttest_1samp(a, popmean, printit=0, name='Sample', writemode='a'):
+ """
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
@@ -1006,25 +1019,23 @@ using the given writemode (default=append). Returns t-value, and prob.
Usage: lttest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
- x = mean(a)
- v = var(a)
- n = len(a)
- df = n-1
- svar = ((n-1)*v)/float(df)
- t = (x-popmean)/math.sqrt(svar*(1.0/n))
- prob = betai(0.5*df,0.5,float(df)/(df+t*t))
+ x = mean(a)
+ v = var(a)
+ n = len(a)
+ df = n - 1
+ svar = ((n - 1) * v) / float(df)
+ t = (x - popmean) / math.sqrt(svar * (1.0 / n))
+ prob = betai(0.5 * df, 0.5, float(df) / (df + t * t))
- if printit <> 0:
- statname = 'Single-sample T-test.'
- outputpairedstats(printit,writemode,
- 'Population','--',popmean,0,0,0,
- name,n,x,v,min(a),max(a),
- statname,t,prob)
- return t,prob
+ if printit <> 0:
+ statname = 'Single-sample T-test.'
+ outputpairedstats(printit, writemode, 'Population', '--', popmean, 0, 0, 0,
+ name, n, x, v, min(a), max(a), statname, t, prob)
+ return t, prob
-def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
- """
+def lttest_ind(a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
+ """
Calculates the t-obtained T-test on TWO INDEPENDENT samples of
scores a, and b. From Numerical Recipies, p.483. If printit=1, results
are printed to the screen. If printit='filename', the results are output
@@ -1034,30 +1045,33 @@ and prob.
Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed prob
"""
- x1 = mean(a)
- x2 = mean(b)
- v1 = stdev(a)**2
- v2 = stdev(b)**2
- n1 = len(a)
- n2 = len(b)
- df = n1+n2-2
- svar = ((n1-1)*v1+(n2-1)*v2)/float(df)
- if not svar:
- svar = 1.0e-26
- t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))
- prob = betai(0.5*df,0.5,df/(df+t*t))
-
- if printit <> 0:
- statname = 'Independent samples T-test.'
- outputpairedstats(printit,writemode,
- name1,n1,x1,v1,min(a),max(a),
- name2,n2,x2,v2,min(b),max(b),
- statname,t,prob)
- return t,prob
-
-
-def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):
- """
+ x1 = mean(a)
+ x2 = mean(b)
+ v1 = stdev(a)**2
+ v2 = stdev(b)**2
+ n1 = len(a)
+ n2 = len(b)
+ df = n1 + n2 - 2
+ svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
+ if not svar:
+ svar = 1.0e-26
+ t = (x1 - x2) / math.sqrt(svar * (1.0 / n1 + 1.0 / n2))
+ prob = betai(0.5 * df, 0.5, df / (df + t * t))
+
+ if printit <> 0:
+ statname = 'Independent samples T-test.'
+ outputpairedstats(printit, writemode, name1, n1, x1, v1, min(a), max(a),
+ name2, n2, x2, v2, min(b), max(b), statname, t, prob)
+ return t, prob
+
+
+def lttest_rel(a,
+ b,
+ printit=0,
+ name1='Sample1',
+ name2='Sample2',
+ writemode='a'):
+ """
Calculates the t-obtained T-test on TWO RELATED samples of scores,
a and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output to
@@ -1067,33 +1081,31 @@ and prob.
Usage: lttest_rel(a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a')
Returns: t-value, two-tailed prob
"""
- if len(a)<>len(b):
- raise ValueError, 'Unequal length lists in ttest_rel.'
- x1 = mean(a)
- x2 = mean(b)
- v1 = var(a)
- v2 = var(b)
- n = len(a)
- cov = 0
- for i in range(len(a)):
- cov = cov + (a[i]-x1) * (b[i]-x2)
- df = n-1
- cov = cov / float(df)
- sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))
- t = (x1-x2)/sd
- prob = betai(0.5*df,0.5,df/(df+t*t))
-
- if printit <> 0:
- statname = 'Related samples T-test.'
- outputpairedstats(printit,writemode,
- name1,n,x1,v1,min(a),max(a),
- name2,n,x2,v2,min(b),max(b),
- statname,t,prob)
- return t, prob
-
-
-def lchisquare(f_obs,f_exp=None):
- """
+ if len(a) <> len(b):
+ raise ValueError, 'Unequal length lists in ttest_rel.'
+ x1 = mean(a)
+ x2 = mean(b)
+ v1 = var(a)
+ v2 = var(b)
+ n = len(a)
+ cov = 0
+ for i in range(len(a)):
+ cov = cov + (a[i] - x1) * (b[i] - x2)
+ df = n - 1
+ cov = cov / float(df)
+ sd = math.sqrt((v1 + v2 - 2.0 * cov) / float(n))
+ t = (x1 - x2) / sd
+ prob = betai(0.5 * df, 0.5, df / (df + t * t))
+
+ if printit <> 0:
+ statname = 'Related samples T-test.'
+ outputpairedstats(printit, writemode, name1, n, x1, v1, min(a), max(a),
+ name2, n, x2, v2, min(b), max(b), statname, t, prob)
+ return t, prob
+
+
+def lchisquare(f_obs, f_exp=None):
+ """
Calculates a one-way chi square for list of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
@@ -1101,56 +1113,56 @@ be equally distributed across all groups.
Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
- k = len(f_obs) # number of groups
- if f_exp == None:
- f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
- chisq = 0
- for i in range(len(f_obs)):
- chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])
- return chisq, chisqprob(chisq, k-1)
+ k = len(f_obs) # number of groups
+ if f_exp == None:
+ f_exp = [sum(f_obs) / float(k)] * len(f_obs) # create k bins with = freq.
+ chisq = 0
+ for i in range(len(f_obs)):
+ chisq = chisq + (f_obs[i] - f_exp[i])**2 / float(f_exp[i])
+ return chisq, chisqprob(chisq, k - 1)
-def lks_2samp (data1,data2):
- """
+def lks_2samp(data1, data2):
+ """
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
- j1 = 0
- j2 = 0
- fn1 = 0.0
- fn2 = 0.0
- n1 = len(data1)
- n2 = len(data2)
- en1 = n1
- en2 = n2
- d = 0.0
- data1.sort()
- data2.sort()
- while j1 < n1 and j2 < n2:
- d1=data1[j1]
- d2=data2[j2]
- if d1 <= d2:
- fn1 = (j1)/float(en1)
- j1 = j1 + 1
- if d2 <= d1:
- fn2 = (j2)/float(en2)
- j2 = j2 + 1
- dt = (fn2-fn1)
- if math.fabs(dt) > math.fabs(d):
- d = dt
- try:
- en = math.sqrt(en1*en2/float(en1+en2))
- prob = ksprob((en+0.12+0.11/en)*abs(d))
- except:
- prob = 1.0
- return d, prob
-
-
-def lmannwhitneyu(x,y):
- """
+ j1 = 0
+ j2 = 0
+ fn1 = 0.0
+ fn2 = 0.0
+ n1 = len(data1)
+ n2 = len(data2)
+ en1 = n1
+ en2 = n2
+ d = 0.0
+ data1.sort()
+ data2.sort()
+ while j1 < n1 and j2 < n2:
+ d1 = data1[j1]
+ d2 = data2[j2]
+ if d1 <= d2:
+ fn1 = (j1) / float(en1)
+ j1 = j1 + 1
+ if d2 <= d1:
+ fn2 = (j2) / float(en2)
+ j2 = j2 + 1
+ dt = (fn2 - fn1)
+ if math.fabs(dt) > math.fabs(d):
+ d = dt
+ try:
+ en = math.sqrt(en1 * en2 / float(en1 + en2))
+ prob = ksprob((en + 0.12 + 0.11 / en) * abs(d))
+ except:
+ prob = 1.0
+ return d, prob
+
+
+def lmannwhitneyu(x, y):
+ """
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. NOTE: Mann-Whitney U is
@@ -1161,26 +1173,26 @@ just 2 groups.
Usage: lmannwhitneyu(data)
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
- n1 = len(x)
- n2 = len(y)
- ranked = rankdata(x+y)
- rankx = ranked[0:n1] # get the x-ranks
- ranky = ranked[n1:] # the rest are y-ranks
- u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
- u2 = n1*n2 - u1 # remainder is U for y
- bigu = max(u1,u2)
- smallu = min(u1,u2)
- proportion = bigu/float(n1*n2)
- T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
- if T == 0:
- raise ValueError, 'All numbers are identical in lmannwhitneyu'
- sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
- z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
- return smallu, 1.0 - zprob(z) #, proportion
+ n1 = len(x)
+ n2 = len(y)
+ ranked = rankdata(x + y)
+ rankx = ranked[0:n1] # get the x-ranks
+ ranky = ranked[n1:] # the rest are y-ranks
+ u1 = n1 * n2 + (n1 * (n1 + 1)) / 2.0 - sum(rankx) # calc U for x
+ u2 = n1 * n2 - u1 # remainder is U for y
+ bigu = max(u1, u2)
+ smallu = min(u1, u2)
+ proportion = bigu / float(n1 * n2)
+ T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
+ if T == 0:
+ raise ValueError, 'All numbers are identical in lmannwhitneyu'
+ sd = math.sqrt(T * n1 * n2 * (n1 + n2 + 1) / 12.0)
+ z = abs((bigu - n1 * n2 / 2.0) / sd) # normal approximation for prob calc
+ return smallu, 1.0 - zprob(z) #, proportion
def ltiecorrect(rankvals):
- """
+ """
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
@@ -1188,24 +1200,24 @@ New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: ltiecorrect(rankvals)
Returns: T correction factor for U or H
"""
- sorted,posn = shellsort(rankvals)
- n = len(sorted)
- T = 0.0
- i = 0
- while (i<n-1):
- if sorted[i] == sorted[i+1]:
- nties = 1
- while (i<n-1) and (sorted[i] == sorted[i+1]):
- nties = nties +1
- i = i +1
- T = T + nties**3 - nties
- i = i+1
- T = T / float(n**3-n)
- return 1.0 - T
-
-
-def lranksums(x,y):
- """
+ sorted, posn = shellsort(rankvals)
+ n = len(sorted)
+ T = 0.0
+ i = 0
+ while (i < n - 1):
+ if sorted[i] == sorted[i + 1]:
+ nties = 1
+ while (i < n - 1) and (sorted[i] == sorted[i + 1]):
+ nties = nties + 1
+ i = i + 1
+ T = T + nties**3 - nties
+ i = i + 1
+ T = T / float(n**3 - n)
+ return 1.0 - T
+
+
+def lranksums(x, y):
+ """
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
@@ -1213,54 +1225,54 @@ have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
- n1 = len(x)
- n2 = len(y)
- alldata = x+y
- ranked = rankdata(alldata)
- x = ranked[:n1]
- y = ranked[n1:]
- s = sum(x)
- expected = n1*(n1+n2+1) / 2.0
- z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
- prob = 2*(1.0 -zprob(abs(z)))
- return z, prob
-
-
-def lwilcoxont(x,y):
- """
+ n1 = len(x)
+ n2 = len(y)
+ alldata = x + y
+ ranked = rankdata(alldata)
+ x = ranked[:n1]
+ y = ranked[n1:]
+ s = sum(x)
+ expected = n1 * (n1 + n2 + 1) / 2.0
+ z = (s - expected) / math.sqrt(n1 * n2 * (n1 + n2 + 1) / 12.0)
+ prob = 2 * (1.0 - zprob(abs(z)))
+ return z, prob
+
+
+def lwilcoxont(x, y):
+ """
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: lwilcoxont(x,y)
Returns: a t-statistic, two-tail probability estimate
"""
- if len(x) <> len(y):
- raise ValueError, 'Unequal N in wilcoxont. Aborting.'
- d=[]
- for i in range(len(x)):
- diff = x[i] - y[i]
- if diff <> 0:
- d.append(diff)
- count = len(d)
- absd = map(abs,d)
- absranked = rankdata(absd)
- r_plus = 0.0
- r_minus = 0.0
- for i in range(len(absd)):
- if d[i] < 0:
- r_minus = r_minus + absranked[i]
- else:
- r_plus = r_plus + absranked[i]
- wt = min(r_plus, r_minus)
- mn = count * (count+1) * 0.25
- se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
- z = math.fabs(wt-mn) / se
- prob = 2*(1.0 -zprob(abs(z)))
- return wt, prob
+ if len(x) <> len(y):
+ raise ValueError, 'Unequal N in wilcoxont. Aborting.'
+ d = []
+ for i in range(len(x)):
+ diff = x[i] - y[i]
+ if diff <> 0:
+ d.append(diff)
+ count = len(d)
+ absd = map(abs, d)
+ absranked = rankdata(absd)
+ r_plus = 0.0
+ r_minus = 0.0
+ for i in range(len(absd)):
+ if d[i] < 0:
+ r_minus = r_minus + absranked[i]
+ else:
+ r_plus = r_plus + absranked[i]
+ wt = min(r_plus, r_minus)
+ mn = count * (count + 1) * 0.25
+ se = math.sqrt(count * (count + 1) * (2.0 * count + 1.0) / 24.0)
+ z = math.fabs(wt - mn) / se
+ prob = 2 * (1.0 - zprob(abs(z)))
+ return wt, prob
def lkruskalwallish(*args):
- """
+ """
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
@@ -1269,33 +1281,33 @@ and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
- args = list(args)
- n = [0]*len(args)
- all = []
- n = map(len,args)
- for i in range(len(args)):
- all = all + args[i]
- ranked = rankdata(all)
- T = tiecorrect(ranked)
- for i in range(len(args)):
- args[i] = ranked[0:n[i]]
- del ranked[0:n[i]]
- rsums = []
- for i in range(len(args)):
- rsums.append(sum(args[i])**2)
- rsums[i] = rsums[i] / float(n[i])
- ssbn = sum(rsums)
- totaln = sum(n)
- h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
- df = len(args) - 1
- if T == 0:
- raise ValueError, 'All numbers are identical in lkruskalwallish'
- h = h / float(T)
- return h, chisqprob(h,df)
+ args = list(args)
+ n = [0] * len(args)
+ all = []
+ n = map(len, args)
+ for i in range(len(args)):
+ all = all + args[i]
+ ranked = rankdata(all)
+ T = tiecorrect(ranked)
+ for i in range(len(args)):
+ args[i] = ranked[0:n[i]]
+ del ranked[0:n[i]]
+ rsums = []
+ for i in range(len(args)):
+ rsums.append(sum(args[i])**2)
+ rsums[i] = rsums[i] / float(n[i])
+ ssbn = sum(rsums)
+ totaln = sum(n)
+ h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
+ df = len(args) - 1
+ if T == 0:
+ raise ValueError, 'All numbers are identical in lkruskalwallish'
+ h = h / float(T)
+ return h, chisqprob(h, df)
def lfriedmanchisquare(*args):
- """
+ """
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
@@ -1306,102 +1318,106 @@ level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
- k = len(args)
- if k < 3:
- raise ValueError, 'Less than 3 levels. Friedman test not appropriate.'
- n = len(args[0])
- data = apply(pstat.abut,tuple(args))
- for i in range(len(data)):
- data[i] = rankdata(data[i])
- ssbn = 0
- for i in range(k):
- ssbn = ssbn + sum(args[i])**2
- chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
- return chisq, chisqprob(chisq,k-1)
-
+ k = len(args)
+ if k < 3:
+ raise ValueError, 'Less than 3 levels. Friedman test not appropriate.'
+ n = len(args[0])
+ data = apply(pstat.abut, tuple(args))
+ for i in range(len(data)):
+ data[i] = rankdata(data[i])
+ ssbn = 0
+ for i in range(k):
+ ssbn = ssbn + sum(args[i])**2
+ chisq = 12.0 / (k * n * (k + 1)) * ssbn - 3 * n * (k + 1)
+ return chisq, chisqprob(chisq, k - 1)
####################################
#### PROBABILITY CALCULATIONS ####
####################################
-def lchisqprob(chisq,df):
- """
+
+def lchisqprob(chisq, df):
+ """
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
- BIG = 20.0
- def ex(x):
- BIG = 20.0
- if x < -BIG:
- return 0.0
- else:
- return math.exp(x)
+ BIG = 20.0
- if chisq <=0 or df < 1:
- return 1.0
- a = 0.5 * chisq
- if df%2 == 0:
- even = 1
+ def ex(x):
+ BIG = 20.0
+ if x < -BIG:
+ return 0.0
else:
- even = 0
- if df > 1:
- y = ex(-a)
+ return math.exp(x)
+
+ if chisq <= 0 or df < 1:
+ return 1.0
+ a = 0.5 * chisq
+ if df % 2 == 0:
+ even = 1
+ else:
+ even = 0
+ if df > 1:
+ y = ex(-a)
+ if even:
+ s = y
+ else:
+ s = 2.0 * zprob(-math.sqrt(chisq))
+ if (df > 2):
+ chisq = 0.5 * (df - 1.0)
if even:
- s = y
+ z = 1.0
else:
- s = 2.0 * zprob(-math.sqrt(chisq))
- if (df > 2):
- chisq = 0.5 * (df - 1.0)
- if even:
- z = 1.0
- else:
- z = 0.5
- if a > BIG:
- if even:
- e = 0.0
- else:
- e = math.log(math.sqrt(math.pi))
- c = math.log(a)
- while (z <= chisq):
- e = math.log(z) + e
- s = s + ex(c*z-a-e)
- z = z + 1.0
- return s
- else:
- if even:
- e = 1.0
- else:
- e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
- c = 0.0
- while (z <= chisq):
- e = e * (a/float(z))
- c = c + e
- z = z + 1.0
- return (c*y+s)
+ z = 0.5
+ if a > BIG:
+ if even:
+ e = 0.0
+ else:
+ e = math.log(math.sqrt(math.pi))
+ c = math.log(a)
+ while (z <= chisq):
+ e = math.log(z) + e
+ s = s + ex(c * z - a - e)
+ z = z + 1.0
+ return s
else:
- return s
+ if even:
+ e = 1.0
+ else:
+ e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
+ c = 0.0
+ while (z <= chisq):
+ e = e * (a / float(z))
+ c = c + e
+ z = z + 1.0
+ return (c * y + s)
+ else:
+ return s
def lerfcc(x):
- """
+ """
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
- z = abs(x)
- t = 1.0 / (1.0+0.5*z)
- ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
- if x >= 0:
- return ans
- else:
- return 2.0 - ans
+ z = abs(x)
+ t = 1.0 / (1.0 + 0.5 * z)
+ ans = t * math.exp(-z * z - 1.26551223 + t * (1.00002368 + t * (
+ 0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (0.27886807 + t * (
+ -1.13520398 + t * (1.48851587 + t * (-0.82215223 + t * 0.17087277)))))
+ ))))
+ if x >= 0:
+ return ans
+ else:
+ return 2.0 - ans
def lzprob(z):
- """
+ """
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
@@ -1411,106 +1427,103 @@ Adapted from z.c in Gary Perlman's |Stat.
Usage: lzprob(z)
"""
- Z_MAX = 6.0 # maximum meaningful z-value
- if z == 0.0:
- x = 0.0
+ Z_MAX = 6.0 # maximum meaningful z-value
+ if z == 0.0:
+ x = 0.0
+ else:
+ y = 0.5 * math.fabs(z)
+ if y >= (Z_MAX * 0.5):
+ x = 1.0
+ elif (y < 1.0):
+ w = y * y
+ x = ((
+ ((((((0.000124818987 * w - 0.001075204047) * w + 0.005198775019) * w -
+ 0.019198292004) * w + 0.059054035642) * w - 0.151968751364) * w +
+ 0.319152932694) * w - 0.531923007300) * w + 0.797884560593) * y * 2.0
else:
- y = 0.5 * math.fabs(z)
- if y >= (Z_MAX*0.5):
- x = 1.0
- elif (y < 1.0):
- w = y*y
- x = ((((((((0.000124818987 * w
- -0.001075204047) * w +0.005198775019) * w
- -0.019198292004) * w +0.059054035642) * w
- -0.151968751364) * w +0.319152932694) * w
- -0.531923007300) * w +0.797884560593) * y * 2.0
- else:
- y = y - 2.0
- x = (((((((((((((-0.000045255659 * y
- +0.000152529290) * y -0.000019538132) * y
- -0.000676904986) * y +0.001390604284) * y
- -0.000794620820) * y -0.002034254874) * y
- +0.006549791214) * y -0.010557625006) * y
- +0.011630447319) * y -0.009279453341) * y
- +0.005353579108) * y -0.002141268741) * y
- +0.000535310849) * y +0.999936657524
- if z > 0.0:
- prob = ((x+1.0)*0.5)
- else:
- prob = ((1.0-x)*0.5)
- return prob
+ y = y - 2.0
+ x = (((((((
+ ((((((-0.000045255659 * y + 0.000152529290) * y - 0.000019538132) * y
+ - 0.000676904986) * y + 0.001390604284) * y - 0.000794620820) * y
+ - 0.002034254874) * y + 0.006549791214) * y - 0.010557625006) * y +
+ 0.011630447319) * y - 0.009279453341) * y + 0.005353579108) * y -
+ 0.002141268741) * y + 0.000535310849) * y + 0.999936657524
+ if z > 0.0:
+ prob = ((x + 1.0) * 0.5)
+ else:
+ prob = ((1.0 - x) * 0.5)
+ return prob
def lksprob(alam):
- """
+ """
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
- fac = 2.0
- sum = 0.0
- termbf = 0.0
- a2 = -2.0*alam*alam
- for j in range(1,201):
- term = fac*math.exp(a2*j*j)
- sum = sum + term
- if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
- return sum
- fac = -fac
- termbf = math.fabs(term)
- return 1.0 # Get here only if fails to converge; was 0.0!!
-
-
-def lfprob (dfnum, dfden, F):
- """
+ fac = 2.0
+ sum = 0.0
+ termbf = 0.0
+ a2 = -2.0 * alam * alam
+ for j in range(1, 201):
+ term = fac * math.exp(a2 * j * j)
+ sum = sum + term
+ if math.fabs(term) <= (0.001 * termbf) or math.fabs(term) < (1.0e-8 * sum):
+ return sum
+ fac = -fac
+ termbf = math.fabs(term)
+ return 1.0 # Get here only if fails to converge; was 0.0!!
+
+
+def lfprob(dfnum, dfden, F):
+ """
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
- p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
- return p
+ p = betai(0.5 * dfden, 0.5 * dfnum, dfden / float(dfden + dfnum * F))
+ return p
-def lbetacf(a,b,x):
- """
+def lbetacf(a, b, x):
+ """
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: Numerical Recipies in C.)
Usage: lbetacf(a,b,x)
"""
- ITMAX = 200
- EPS = 3.0e-7
-
- bm = az = am = 1.0
- qab = a+b
- qap = a+1.0
- qam = a-1.0
- bz = 1.0-qab*x/qap
- for i in range(ITMAX+1):
- em = float(i+1)
- tem = em + em
- d = em*(b-em)*x/((qam+tem)*(a+tem))
- ap = az + d*am
- bp = bz+d*bm
- d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
- app = ap+d*az
- bpp = bp+d*bz
- aold = az
- am = ap/bpp
- bm = bp/bpp
- az = app/bpp
- bz = 1.0
- if (abs(az-aold)<(EPS*abs(az))):
- return az
- print 'a or b too big, or ITMAX too small in Betacf.'
+ ITMAX = 200
+ EPS = 3.0e-7
+
+ bm = az = am = 1.0
+ qab = a + b
+ qap = a + 1.0
+ qam = a - 1.0
+ bz = 1.0 - qab * x / qap
+ for i in range(ITMAX + 1):
+ em = float(i + 1)
+ tem = em + em
+ d = em * (b - em) * x / ((qam + tem) * (a + tem))
+ ap = az + d * am
+ bp = bz + d * bm
+ d = -(a + em) * (qab + em) * x / ((qap + tem) * (a + tem))
+ app = ap + d * az
+ bpp = bp + d * bz
+ aold = az
+ am = ap / bpp
+ bm = bp / bpp
+ az = app / bpp
+ bz = 1.0
+ if (abs(az - aold) < (EPS * abs(az))):
+ return az
+ print 'a or b too big, or ITMAX too small in Betacf.'
def lgammln(xx):
- """
+ """
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
@@ -1518,20 +1531,20 @@ Returns the gamma function of xx.
Usage: lgammln(xx)
"""
- coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
- 0.120858003e-2, -0.536382e-5]
- x = xx - 1.0
- tmp = x + 5.5
- tmp = tmp - (x+0.5)*math.log(tmp)
- ser = 1.0
- for j in range(len(coeff)):
- x = x + 1
- ser = ser + coeff[j]/x
- return -tmp + math.log(2.50662827465*ser)
+ coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516, 0.120858003e-2,
+ -0.536382e-5]
+ x = xx - 1.0
+ tmp = x + 5.5
+ tmp = tmp - (x + 0.5) * math.log(tmp)
+ ser = 1.0
+ for j in range(len(coeff)):
+ x = x + 1
+ ser = ser + coeff[j] / x
+ return -tmp + math.log(2.50662827465 * ser)
-def lbetai(a,b,x):
- """
+def lbetai(a, b, x):
+ """
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
@@ -1542,25 +1555,25 @@ using the betacf function. (Adapted from: Numerical Recipies in C.)
Usage: lbetai(a,b,x)
"""
- if (x<0.0 or x>1.0):
- raise ValueError, 'Bad x in lbetai'
- if (x==0.0 or x==1.0):
- bt = 0.0
- else:
- bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b*
- math.log(1.0-x))
- if (x<(a+1.0)/(a+b+2.0)):
- return bt*betacf(a,b,x)/float(a)
- else:
- return 1.0-bt*betacf(b,a,1.0-x)/float(b)
-
+ if (x < 0.0 or x > 1.0):
+ raise ValueError, 'Bad x in lbetai'
+ if (x == 0.0 or x == 1.0):
+ bt = 0.0
+ else:
+ bt = math.exp(gammln(a + b) - gammln(a) - gammln(b) + a * math.log(x) + b *
+ math.log(1.0 - x))
+ if (x < (a + 1.0) / (a + b + 2.0)):
+ return bt * betacf(a, b, x) / float(a)
+ else:
+ return 1.0 - bt * betacf(b, a, 1.0 - x) / float(b)
####################################
####### ANOVA CALCULATIONS #######
####################################
+
def lF_oneway(*lists):
- """
+ """
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
@@ -1568,36 +1581,36 @@ Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
"""
- a = len(lists) # ANOVA on 'a' groups, each in it's own list
- means = [0]*a
- vars = [0]*a
- ns = [0]*a
- alldata = []
- tmp = map(N.array,lists)
- means = map(amean,tmp)
- vars = map(avar,tmp)
- ns = map(len,lists)
- for i in range(len(lists)):
- alldata = alldata + lists[i]
- alldata = N.array(alldata)
- bign = len(alldata)
- sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
- ssbn = 0
- for list in lists:
- ssbn = ssbn + asquare_of_sums(N.array(list))/float(len(list))
- ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
- sswn = sstot-ssbn
- dfbn = a-1
- dfwn = bign - a
- msb = ssbn/float(dfbn)
- msw = sswn/float(dfwn)
- f = msb/msw
- prob = fprob(dfbn,dfwn,f)
- return f, prob
-
-
-def lF_value (ER,EF,dfnum,dfden):
- """
+ a = len(lists) # ANOVA on 'a' groups, each in it's own list
+ means = [0] * a
+ vars = [0] * a
+ ns = [0] * a
+ alldata = []
+ tmp = map(N.array, lists)
+ means = map(amean, tmp)
+ vars = map(avar, tmp)
+ ns = map(len, lists)
+ for i in range(len(lists)):
+ alldata = alldata + lists[i]
+ alldata = N.array(alldata)
+ bign = len(alldata)
+ sstot = ass(alldata) - (asquare_of_sums(alldata) / float(bign))
+ ssbn = 0
+ for list in lists:
+ ssbn = ssbn + asquare_of_sums(N.array(list)) / float(len(list))
+ ssbn = ssbn - (asquare_of_sums(alldata) / float(bign))
+ sswn = sstot - ssbn
+ dfbn = a - 1
+ dfwn = bign - a
+ msb = ssbn / float(dfbn)
+ msw = sswn / float(dfwn)
+ f = msb / msw
+ prob = fprob(dfbn, dfwn, f)
+ return f, prob
+
+
+def lF_value(ER, EF, dfnum, dfden):
+ """
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
@@ -1606,15 +1619,15 @@ Returns an F-statistic given the following:
Usage: lF_value(ER,EF,dfnum,dfden)
"""
- return ((ER-EF)/float(dfnum) / (EF/float(dfden)))
-
+ return ((ER - EF) / float(dfnum) / (EF / float(dfden)))
####################################
######## SUPPORT FUNCTIONS #######
####################################
-def writecc (listoflists,file,writetype='w',extra=2):
- """
+
+def writecc(listoflists, file, writetype='w', extra=2):
+ """
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
@@ -1622,187 +1635,191 @@ to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
- if type(listoflists[0]) not in [ListType,TupleType]:
- listoflists = [listoflists]
- outfile = open(file,writetype)
- rowstokill = []
- list2print = copy.deepcopy(listoflists)
- for i in range(len(listoflists)):
- if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
- rowstokill = rowstokill + [i]
- rowstokill.reverse()
- for row in rowstokill:
- del list2print[row]
- maxsize = [0]*len(list2print[0])
- for col in range(len(list2print[0])):
- items = pstat.colex(list2print,col)
- items = map(pstat.makestr,items)
- maxsize[col] = max(map(len,items)) + extra
- for row in listoflists:
- if row == ['\n'] or row == '\n':
- outfile.write('\n')
- elif row == ['dashes'] or row == 'dashes':
- dashes = [0]*len(maxsize)
- for j in range(len(maxsize)):
- dashes[j] = '-'*(maxsize[j]-2)
- outfile.write(pstat.lineincustcols(dashes,maxsize))
- else:
- outfile.write(pstat.lineincustcols(row,maxsize))
- outfile.write('\n')
- outfile.close()
- return None
+ if type(listoflists[0]) not in [ListType, TupleType]:
+ listoflists = [listoflists]
+ outfile = open(file, writetype)
+ rowstokill = []
+ list2print = copy.deepcopy(listoflists)
+ for i in range(len(listoflists)):
+ if listoflists[i] == [
+ '\n'
+ ] or listoflists[i] == '\n' or listoflists[i] == 'dashes':
+ rowstokill = rowstokill + [i]
+ rowstokill.reverse()
+ for row in rowstokill:
+ del list2print[row]
+ maxsize = [0] * len(list2print[0])
+ for col in range(len(list2print[0])):
+ items = pstat.colex(list2print, col)
+ items = map(pstat.makestr, items)
+ maxsize[col] = max(map(len, items)) + extra
+ for row in listoflists:
+ if row == ['\n'] or row == '\n':
+ outfile.write('\n')
+ elif row == ['dashes'] or row == 'dashes':
+ dashes = [0] * len(maxsize)
+ for j in range(len(maxsize)):
+ dashes[j] = '-' * (maxsize[j] - 2)
+ outfile.write(pstat.lineincustcols(dashes, maxsize))
+ else:
+ outfile.write(pstat.lineincustcols(row, maxsize))
+ outfile.write('\n')
+ outfile.close()
+ return None
-def lincr(l,cap): # to increment a list up to a max-list of 'cap'
- """
+def lincr(l, cap): # to increment a list up to a max-list of 'cap'
+ """
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
- l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
- for i in range(len(l)):
- if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done
- l[i] = 0
- l[i+1] = l[i+1] + 1
- elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished
- l = -1
- return l
+ l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
+ for i in range(len(l)):
+ if l[i] > cap[i] and i < len(l) - 1: # if carryover AND not done
+ l[i] = 0
+ l[i + 1] = l[i + 1] + 1
+ elif l[i] > cap[i] and i == len(
+ l) - 1: # overflow past last column, must be finished
+ l = -1
+ return l
-def lsum (inlist):
- """
+def lsum(inlist):
+ """
Returns the sum of the items in the passed list.
Usage: lsum(inlist)
"""
- s = 0
- for item in inlist:
- s = s + item
- return s
+ s = 0
+ for item in inlist:
+ s = s + item
+ return s
-def lcumsum (inlist):
- """
+def lcumsum(inlist):
+ """
Returns a list consisting of the cumulative sum of the items in the
passed list.
Usage: lcumsum(inlist)
"""
- newlist = copy.deepcopy(inlist)
- for i in range(1,len(newlist)):
- newlist[i] = newlist[i] + newlist[i-1]
- return newlist
+ newlist = copy.deepcopy(inlist)
+ for i in range(1, len(newlist)):
+ newlist[i] = newlist[i] + newlist[i - 1]
+ return newlist
def lss(inlist):
- """
+ """
Squares each value in the passed list, adds up these squares and
returns the result.
Usage: lss(inlist)
"""
- ss = 0
- for item in inlist:
- ss = ss + item*item
- return ss
+ ss = 0
+ for item in inlist:
+ ss = ss + item * item
+ return ss
-def lsummult (list1,list2):
- """
+def lsummult(list1, list2):
+ """
Multiplies elements in list1 and list2, element by element, and
returns the sum of all resulting multiplications. Must provide equal
length lists.
Usage: lsummult(list1,list2)
"""
- if len(list1) <> len(list2):
- raise ValueError, "Lists not equal length in summult."
- s = 0
- for item1,item2 in pstat.abut(list1,list2):
- s = s + item1*item2
- return s
+ if len(list1) <> len(list2):
+ raise ValueError, 'Lists not equal length in summult.'
+ s = 0
+ for item1, item2 in pstat.abut(list1, list2):
+ s = s + item1 * item2
+ return s
-def lsumdiffsquared(x,y):
- """
+def lsumdiffsquared(x, y):
+ """
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
- sds = 0
- for i in range(len(x)):
- sds = sds + (x[i]-y[i])**2
- return sds
+ sds = 0
+ for i in range(len(x)):
+ sds = sds + (x[i] - y[i])**2
+ return sds
def lsquare_of_sums(inlist):
- """
+ """
Adds the values in the passed list, squares the sum, and returns
the result.
Usage: lsquare_of_sums(inlist)
Returns: sum(inlist[i])**2
"""
- s = sum(inlist)
- return float(s)*s
+ s = sum(inlist)
+ return float(s) * s
def lshellsort(inlist):
- """
+ """
Shellsort algorithm. Sorts a 1D-list.
Usage: lshellsort(inlist)
Returns: sorted-inlist, sorting-index-vector (for original list)
"""
- n = len(inlist)
- svec = copy.deepcopy(inlist)
- ivec = range(n)
- gap = n/2 # integer division needed
- while gap >0:
- for i in range(gap,n):
- for j in range(i-gap,-1,-gap):
- while j>=0 and svec[j]>svec[j+gap]:
- temp = svec[j]
- svec[j] = svec[j+gap]
- svec[j+gap] = temp
- itemp = ivec[j]
- ivec[j] = ivec[j+gap]
- ivec[j+gap] = itemp
- gap = gap / 2 # integer division needed
+ n = len(inlist)
+ svec = copy.deepcopy(inlist)
+ ivec = range(n)
+ gap = n / 2 # integer division needed
+ while gap > 0:
+ for i in range(gap, n):
+ for j in range(i - gap, -1, -gap):
+ while j >= 0 and svec[j] > svec[j + gap]:
+ temp = svec[j]
+ svec[j] = svec[j + gap]
+ svec[j + gap] = temp
+ itemp = ivec[j]
+ ivec[j] = ivec[j + gap]
+ ivec[j + gap] = itemp
+ gap = gap / 2 # integer division needed
# svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]]
- return svec, ivec
+ return svec, ivec
def lrankdata(inlist):
- """
+ """
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: lrankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
- n = len(inlist)
- svec, ivec = shellsort(inlist)
- sumranks = 0
- dupcount = 0
- newlist = [0]*n
- for i in range(n):
- sumranks = sumranks + i
- dupcount = dupcount + 1
- if i==n-1 or svec[i] <> svec[i+1]:
- averank = sumranks / float(dupcount) + 1
- for j in range(i-dupcount+1,i+1):
- newlist[ivec[j]] = averank
- sumranks = 0
- dupcount = 0
- return newlist
-
-
-def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):
- """
+ n = len(inlist)
+ svec, ivec = shellsort(inlist)
+ sumranks = 0
+ dupcount = 0
+ newlist = [0] * n
+ for i in range(n):
+ sumranks = sumranks + i
+ dupcount = dupcount + 1
+ if i == n - 1 or svec[i] <> svec[i + 1]:
+ averank = sumranks / float(dupcount) + 1
+ for j in range(i - dupcount + 1, i + 1):
+ newlist[ivec[j]] = averank
+ sumranks = 0
+ dupcount = 0
+ return newlist
+
+
+def outputpairedstats(fname, writemode, name1, n1, m1, se1, min1, max1, name2,
+ n2, m2, se2, min2, max2, statname, stat, prob):
+ """
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
@@ -1813,53 +1830,58 @@ Usage: outputpairedstats(fname,writemode,
statname,stat,prob)
Returns: None
"""
- suffix = '' # for *s after the p-value
+ suffix = '' # for *s after the p-value
+ try:
+ x = prob.shape
+ prob = prob[0]
+ except:
+ pass
+ if prob < 0.001:
+ suffix = ' ***'
+ elif prob < 0.01:
+ suffix = ' **'
+ elif prob < 0.05:
+ suffix = ' *'
+ title = [['Name', 'N', 'Mean', 'SD', 'Min', 'Max']]
+ lofl = title + [[name1, n1, round(m1, 3), round(
+ math.sqrt(se1), 3), min1, max1], [name2, n2, round(m2, 3), round(
+ math.sqrt(se2), 3), min2, max2]]
+ if type(fname) <> StringType or len(fname) == 0:
+ print
+ print statname
+ print
+ pstat.printcc(lofl)
+ print
try:
- x = prob.shape
+ if stat.shape == ():
+ stat = stat[0]
+ if prob.shape == ():
prob = prob[0]
except:
- pass
- if prob < 0.001: suffix = ' ***'
- elif prob < 0.01: suffix = ' **'
- elif prob < 0.05: suffix = ' *'
- title = [['Name','N','Mean','SD','Min','Max']]
- lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
- [name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
- if type(fname)<>StringType or len(fname)==0:
- print
- print statname
- print
- pstat.printcc(lofl)
- print
- try:
- if stat.shape == ():
- stat = stat[0]
- if prob.shape == ():
- prob = prob[0]
- except:
- pass
- print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix
- print
- else:
- file = open(fname,writemode)
- file.write('\n'+statname+'\n\n')
- file.close()
- writecc(lofl,fname,'a')
- file = open(fname,'a')
- try:
- if stat.shape == ():
- stat = stat[0]
- if prob.shape == ():
- prob = prob[0]
- except:
- pass
- file.write(pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n']))
- file.close()
- return None
+ pass
+ print 'Test statistic = ', round(stat, 3), ' p = ', round(prob, 3), suffix
+ print
+ else:
+ file = open(fname, writemode)
+ file.write('\n' + statname + '\n\n')
+ file.close()
+ writecc(lofl, fname, 'a')
+ file = open(fname, 'a')
+ try:
+ if stat.shape == ():
+ stat = stat[0]
+ if prob.shape == ():
+ prob = prob[0]
+ except:
+ pass
+ file.write(pstat.list2string(['\nTest statistic = ', round(stat, 4),
+ ' p = ', round(prob, 4), suffix, '\n\n']))
+ file.close()
+ return None
-def lfindwithin (data):
- """
+def lfindwithin(data):
+ """
Returns an integer representing a binary vector, where 1=within-
subject factor, 0=between. Input equals the entire data 2D list (i.e.,
column 0=random factor, column -1=measured values (those two are skipped).
@@ -1871,17 +1893,16 @@ designations on each factor). See also stats.anova.__doc__.
Usage: lfindwithin(data) data in |Stat format
"""
- numfact = len(data[0])-1
- withinvec = 0
- for col in range(1,numfact):
- examplelevel = pstat.unique(pstat.colex(data,col))[0]
- rows = pstat.linexand(data,col,examplelevel) # get 1 level of this factor
- factsubjs = pstat.unique(pstat.colex(rows,0))
- allsubjs = pstat.unique(pstat.colex(data,0))
- if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?
- withinvec = withinvec + (1 << col)
- return withinvec
-
+ numfact = len(data[0]) - 1
+ withinvec = 0
+ for col in range(1, numfact):
+ examplelevel = pstat.unique(pstat.colex(data, col))[0]
+ rows = pstat.linexand(data, col, examplelevel) # get 1 level of this factor
+ factsubjs = pstat.unique(pstat.colex(rows, 0))
+ allsubjs = pstat.unique(pstat.colex(data, 0))
+ if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?
+ withinvec = withinvec + (1 << col)
+ return withinvec
#########################################################
#########################################################
@@ -1890,90 +1911,89 @@ Usage: lfindwithin(data) data in |Stat format
#########################################################
## CENTRAL TENDENCY:
-geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)), )
-harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)), )
-mean = Dispatch ( (lmean, (ListType, TupleType)), )
-median = Dispatch ( (lmedian, (ListType, TupleType)), )
-medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)), )
-mode = Dispatch ( (lmode, (ListType, TupleType)), )
+geometricmean = Dispatch((lgeometricmean, (ListType, TupleType)),)
+harmonicmean = Dispatch((lharmonicmean, (ListType, TupleType)),)
+mean = Dispatch((lmean, (ListType, TupleType)),)
+median = Dispatch((lmedian, (ListType, TupleType)),)
+medianscore = Dispatch((lmedianscore, (ListType, TupleType)),)
+mode = Dispatch((lmode, (ListType, TupleType)),)
## MOMENTS:
-moment = Dispatch ( (lmoment, (ListType, TupleType)), )
-variation = Dispatch ( (lvariation, (ListType, TupleType)), )
-skew = Dispatch ( (lskew, (ListType, TupleType)), )
-kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)), )
-describe = Dispatch ( (ldescribe, (ListType, TupleType)), )
+moment = Dispatch((lmoment, (ListType, TupleType)),)
+variation = Dispatch((lvariation, (ListType, TupleType)),)
+skew = Dispatch((lskew, (ListType, TupleType)),)
+kurtosis = Dispatch((lkurtosis, (ListType, TupleType)),)
+describe = Dispatch((ldescribe, (ListType, TupleType)),)
## FREQUENCY STATISTICS:
-itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)), )
-scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)), )
-percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)), )
-histogram = Dispatch ( (lhistogram, (ListType, TupleType)), )
-cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)), )
-relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)), )
+itemfreq = Dispatch((litemfreq, (ListType, TupleType)),)
+scoreatpercentile = Dispatch((lscoreatpercentile, (ListType, TupleType)),)
+percentileofscore = Dispatch((lpercentileofscore, (ListType, TupleType)),)
+histogram = Dispatch((lhistogram, (ListType, TupleType)),)
+cumfreq = Dispatch((lcumfreq, (ListType, TupleType)),)
+relfreq = Dispatch((lrelfreq, (ListType, TupleType)),)
## VARIABILITY:
-obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)), )
-samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)), )
-samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)), )
-var = Dispatch ( (lvar, (ListType, TupleType)), )
-stdev = Dispatch ( (lstdev, (ListType, TupleType)), )
-sterr = Dispatch ( (lsterr, (ListType, TupleType)), )
-sem = Dispatch ( (lsem, (ListType, TupleType)), )
-z = Dispatch ( (lz, (ListType, TupleType)), )
-zs = Dispatch ( (lzs, (ListType, TupleType)), )
+obrientransform = Dispatch((lobrientransform, (ListType, TupleType)),)
+samplevar = Dispatch((lsamplevar, (ListType, TupleType)),)
+samplestdev = Dispatch((lsamplestdev, (ListType, TupleType)),)
+var = Dispatch((lvar, (ListType, TupleType)),)
+stdev = Dispatch((lstdev, (ListType, TupleType)),)
+sterr = Dispatch((lsterr, (ListType, TupleType)),)
+sem = Dispatch((lsem, (ListType, TupleType)),)
+z = Dispatch((lz, (ListType, TupleType)),)
+zs = Dispatch((lzs, (ListType, TupleType)),)
## TRIMMING FCNS:
-trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)), )
-trim1 = Dispatch ( (ltrim1, (ListType, TupleType)), )
+trimboth = Dispatch((ltrimboth, (ListType, TupleType)),)
+trim1 = Dispatch((ltrim1, (ListType, TupleType)),)
## CORRELATION FCNS:
-paired = Dispatch ( (lpaired, (ListType, TupleType)), )
-pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)), )
-spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)), )
-pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)), )
-kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)), )
-linregress = Dispatch ( (llinregress, (ListType, TupleType)), )
+paired = Dispatch((lpaired, (ListType, TupleType)),)
+pearsonr = Dispatch((lpearsonr, (ListType, TupleType)),)
+spearmanr = Dispatch((lspearmanr, (ListType, TupleType)),)
+pointbiserialr = Dispatch((lpointbiserialr, (ListType, TupleType)),)
+kendalltau = Dispatch((lkendalltau, (ListType, TupleType)),)
+linregress = Dispatch((llinregress, (ListType, TupleType)),)
## INFERENTIAL STATS:
-ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)), )
-ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)), )
-ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)), )
-chisquare = Dispatch ( (lchisquare, (ListType, TupleType)), )
-ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)), )
-mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)), )
-ranksums = Dispatch ( (lranksums, (ListType, TupleType)), )
-tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)), )
-wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)), )
-kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)), )
-friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)), )
+ttest_1samp = Dispatch((lttest_1samp, (ListType, TupleType)),)
+ttest_ind = Dispatch((lttest_ind, (ListType, TupleType)),)
+ttest_rel = Dispatch((lttest_rel, (ListType, TupleType)),)
+chisquare = Dispatch((lchisquare, (ListType, TupleType)),)
+ks_2samp = Dispatch((lks_2samp, (ListType, TupleType)),)
+mannwhitneyu = Dispatch((lmannwhitneyu, (ListType, TupleType)),)
+ranksums = Dispatch((lranksums, (ListType, TupleType)),)
+tiecorrect = Dispatch((ltiecorrect, (ListType, TupleType)),)
+wilcoxont = Dispatch((lwilcoxont, (ListType, TupleType)),)
+kruskalwallish = Dispatch((lkruskalwallish, (ListType, TupleType)),)
+friedmanchisquare = Dispatch((lfriedmanchisquare, (ListType, TupleType)),)
## PROBABILITY CALCS:
-chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)), )
-zprob = Dispatch ( (lzprob, (IntType, FloatType)), )
-ksprob = Dispatch ( (lksprob, (IntType, FloatType)), )
-fprob = Dispatch ( (lfprob, (IntType, FloatType)), )
-betacf = Dispatch ( (lbetacf, (IntType, FloatType)), )
-betai = Dispatch ( (lbetai, (IntType, FloatType)), )
-erfcc = Dispatch ( (lerfcc, (IntType, FloatType)), )
-gammln = Dispatch ( (lgammln, (IntType, FloatType)), )
+chisqprob = Dispatch((lchisqprob, (IntType, FloatType)),)
+zprob = Dispatch((lzprob, (IntType, FloatType)),)
+ksprob = Dispatch((lksprob, (IntType, FloatType)),)
+fprob = Dispatch((lfprob, (IntType, FloatType)),)
+betacf = Dispatch((lbetacf, (IntType, FloatType)),)
+betai = Dispatch((lbetai, (IntType, FloatType)),)
+erfcc = Dispatch((lerfcc, (IntType, FloatType)),)
+gammln = Dispatch((lgammln, (IntType, FloatType)),)
## ANOVA FUNCTIONS:
-F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)), )
-F_value = Dispatch ( (lF_value, (ListType, TupleType)), )
+F_oneway = Dispatch((lF_oneway, (ListType, TupleType)),)
+F_value = Dispatch((lF_value, (ListType, TupleType)),)
## SUPPORT FUNCTIONS:
-incr = Dispatch ( (lincr, (ListType, TupleType)), )
-sum = Dispatch ( (lsum, (ListType, TupleType)), )
-cumsum = Dispatch ( (lcumsum, (ListType, TupleType)), )
-ss = Dispatch ( (lss, (ListType, TupleType)), )
-summult = Dispatch ( (lsummult, (ListType, TupleType)), )
-square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)), )
-sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)), )
-shellsort = Dispatch ( (lshellsort, (ListType, TupleType)), )
-rankdata = Dispatch ( (lrankdata, (ListType, TupleType)), )
-findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)), )
-
+incr = Dispatch((lincr, (ListType, TupleType)),)
+sum = Dispatch((lsum, (ListType, TupleType)),)
+cumsum = Dispatch((lcumsum, (ListType, TupleType)),)
+ss = Dispatch((lss, (ListType, TupleType)),)
+summult = Dispatch((lsummult, (ListType, TupleType)),)
+square_of_sums = Dispatch((lsquare_of_sums, (ListType, TupleType)),)
+sumdiffsquared = Dispatch((lsumdiffsquared, (ListType, TupleType)),)
+shellsort = Dispatch((lshellsort, (ListType, TupleType)),)
+rankdata = Dispatch((lrankdata, (ListType, TupleType)),)
+findwithin = Dispatch((lfindwithin, (ListType, TupleType)),)
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
@@ -1995,16 +2015,16 @@ findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)), )
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
-try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
- import numpy as N
- import numpy.linalg as LA
+try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
+ import numpy as N
+ import numpy.linalg as LA
+ #####################################
+ ######## ACENTRAL TENDENCY ########
+ #####################################
-#####################################
-######## ACENTRAL TENDENCY ########
-#####################################
- def ageometricmean (inarray,dimension=None,keepdims=0):
+ def ageometricmean(inarray, dimension=None, keepdims=0):
"""
Calculates the geometric mean of the values in the passed array.
That is: n-th root of (x1 * x2 * ... * xn). Defaults to ALL values in
@@ -2017,37 +2037,36 @@ inarray, with only 1 'level' per dim that was collapsed over.
Usage: ageometricmean(inarray,dimension=None,keepdims=0)
Returns: geometric mean computed over dim(s) listed in dimension
"""
- inarray = N.array(inarray,N.float_)
+ inarray = N.array(inarray, N.float_)
if dimension == None:
- inarray = N.ravel(inarray)
- size = len(inarray)
- mult = N.power(inarray,1.0/size)
- mult = N.multiply.reduce(mult)
- elif type(dimension) in [IntType,FloatType]:
- size = inarray.shape[dimension]
- mult = N.power(inarray,1.0/size)
- mult = N.multiply.reduce(mult,dimension)
- if keepdims == 1:
- shp = list(inarray.shape)
- shp[dimension] = 1
- sum = N.reshape(sum,shp)
- else: # must be a SEQUENCE of dims to average over
- dims = list(dimension)
- dims.sort()
- dims.reverse()
- size = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
- mult = N.power(inarray,1.0/size)
+ inarray = N.ravel(inarray)
+ size = len(inarray)
+ mult = N.power(inarray, 1.0 / size)
+ mult = N.multiply.reduce(mult)
+ elif type(dimension) in [IntType, FloatType]:
+ size = inarray.shape[dimension]
+ mult = N.power(inarray, 1.0 / size)
+ mult = N.multiply.reduce(mult, dimension)
+ if keepdims == 1:
+ shp = list(inarray.shape)
+ shp[dimension] = 1
+ sum = N.reshape(sum, shp)
+ else: # must be a SEQUENCE of dims to average over
+ dims = list(dimension)
+ dims.sort()
+ dims.reverse()
+ size = N.array(N.multiply.reduce(N.take(inarray.shape, dims)), N.float_)
+ mult = N.power(inarray, 1.0 / size)
+ for dim in dims:
+ mult = N.multiply.reduce(mult, dim)
+ if keepdims == 1:
+ shp = list(inarray.shape)
for dim in dims:
- mult = N.multiply.reduce(mult,dim)
- if keepdims == 1:
- shp = list(inarray.shape)
- for dim in dims:
- shp[dim] = 1
- mult = N.reshape(mult,shp)
+ shp[dim] = 1
+ mult = N.reshape(mult, shp)
return mult
-
- def aharmonicmean (inarray,dimension=None,keepdims=0):
+ def aharmonicmean(inarray, dimension=None, keepdims=0):
"""
Calculates the harmonic mean of the values in the passed array.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Defaults to ALL values in
@@ -2062,46 +2081,45 @@ Returns: harmonic mean computed over dim(s) in dimension
"""
inarray = inarray.astype(N.float_)
if dimension == None:
- inarray = N.ravel(inarray)
- size = len(inarray)
- s = N.add.reduce(1.0 / inarray)
- elif type(dimension) in [IntType,FloatType]:
- size = float(inarray.shape[dimension])
- s = N.add.reduce(1.0/inarray, dimension)
+ inarray = N.ravel(inarray)
+ size = len(inarray)
+ s = N.add.reduce(1.0 / inarray)
+ elif type(dimension) in [IntType, FloatType]:
+ size = float(inarray.shape[dimension])
+ s = N.add.reduce(1.0 / inarray, dimension)
+ if keepdims == 1:
+ shp = list(inarray.shape)
+ shp[dimension] = 1
+ s = N.reshape(s, shp)
+ else: # must be a SEQUENCE of dims to average over
+ dims = list(dimension)
+ dims.sort()
+ nondims = []
+ for i in range(len(inarray.shape)):
+ if i not in dims:
+ nondims.append(i)
+ tinarray = N.transpose(inarray, nondims + dims) # put keep-dims first
+ idx = [0] * len(nondims)
+ if idx == []:
+ size = len(N.ravel(inarray))
+ s = asum(1.0 / inarray)
if keepdims == 1:
- shp = list(inarray.shape)
- shp[dimension] = 1
- s = N.reshape(s,shp)
- else: # must be a SEQUENCE of dims to average over
- dims = list(dimension)
- dims.sort()
- nondims = []
- for i in range(len(inarray.shape)):
- if i not in dims:
- nondims.append(i)
- tinarray = N.transpose(inarray,nondims+dims) # put keep-dims first
- idx = [0] *len(nondims)
- if idx == []:
- size = len(N.ravel(inarray))
- s = asum(1.0 / inarray)
- if keepdims == 1:
- s = N.reshape([s],N.ones(len(inarray.shape)))
- else:
- idx[0] = -1
- loopcap = N.array(tinarray.shape[0:len(nondims)]) -1
- s = N.zeros(loopcap+1,N.float_)
- while incr(idx,loopcap) <> -1:
- s[idx] = asum(1.0/tinarray[idx])
- size = N.multiply.reduce(N.take(inarray.shape,dims))
- if keepdims == 1:
- shp = list(inarray.shape)
- for dim in dims:
- shp[dim] = 1
- s = N.reshape(s,shp)
+ s = N.reshape([s], N.ones(len(inarray.shape)))
+ else:
+ idx[0] = -1
+ loopcap = N.array(tinarray.shape[0:len(nondims)]) - 1
+ s = N.zeros(loopcap + 1, N.float_)
+ while incr(idx, loopcap) <> -1:
+ s[idx] = asum(1.0 / tinarray[idx])
+ size = N.multiply.reduce(N.take(inarray.shape, dims))
+ if keepdims == 1:
+ shp = list(inarray.shape)
+ for dim in dims:
+ shp[dim] = 1
+ s = N.reshape(s, shp)
return size / s
-
- def amean (inarray,dimension=None,keepdims=0):
+ def amean(inarray, dimension=None, keepdims=0):
"""
Calculates the arithmatic mean of the values in the passed array.
That is: 1/n * (x1 + x2 + ... + xn). Defaults to ALL values in the
@@ -2114,36 +2132,35 @@ inarray, with only 1 'level' per dim that was collapsed over.
Usage: amean(inarray,dimension=None,keepdims=0)
Returns: arithematic mean calculated over dim(s) in dimension
"""
- if inarray.dtype in [N.int_, N.short,N.ubyte]:
- inarray = inarray.astype(N.float_)
+ if inarray.dtype in [N.int_, N.short, N.ubyte]:
+ inarray = inarray.astype(N.float_)
if dimension == None:
- inarray = N.ravel(inarray)
- sum = N.add.reduce(inarray)
- denom = float(len(inarray))
- elif type(dimension) in [IntType,FloatType]:
- sum = asum(inarray,dimension)
- denom = float(inarray.shape[dimension])
- if keepdims == 1:
- shp = list(inarray.shape)
- shp[dimension] = 1
- sum = N.reshape(sum,shp)
- else: # must be a TUPLE of dims to average over
- dims = list(dimension)
- dims.sort()
- dims.reverse()
- sum = inarray *1.0
+ inarray = N.ravel(inarray)
+ sum = N.add.reduce(inarray)
+ denom = float(len(inarray))
+ elif type(dimension) in [IntType, FloatType]:
+ sum = asum(inarray, dimension)
+ denom = float(inarray.shape[dimension])
+ if keepdims == 1:
+ shp = list(inarray.shape)
+ shp[dimension] = 1
+ sum = N.reshape(sum, shp)
+ else: # must be a TUPLE of dims to average over
+ dims = list(dimension)
+ dims.sort()
+ dims.reverse()
+ sum = inarray * 1.0
+ for dim in dims:
+ sum = N.add.reduce(sum, dim)
+ denom = N.array(N.multiply.reduce(N.take(inarray.shape, dims)), N.float_)
+ if keepdims == 1:
+ shp = list(inarray.shape)
for dim in dims:
- sum = N.add.reduce(sum,dim)
- denom = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
- if keepdims == 1:
- shp = list(inarray.shape)
- for dim in dims:
- shp[dim] = 1
- sum = N.reshape(sum,shp)
- return sum/denom
+ shp[dim] = 1
+ sum = N.reshape(sum, shp)
+ return sum / denom
-
- def amedian (inarray,numbins=1000):
+ def amedian(inarray, numbins=1000):
"""
Calculates the COMPUTED median value of an array of numbers, given the
number of bins to use for the histogram (more bins approaches finding the
@@ -2155,19 +2172,20 @@ Usage: amedian(inarray,numbins=1000)
Returns: median calculated over ALL values in inarray
"""
inarray = N.ravel(inarray)
- (hist, smallest, binsize, extras) = ahistogram(inarray,numbins,[min(inarray),max(inarray)])
- cumhist = N.cumsum(hist) # make cumulative histogram
- otherbins = N.greater_equal(cumhist,len(inarray)/2.0)
- otherbins = list(otherbins) # list of 0/1s, 1s start at median bin
- cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score
- LRL = smallest + binsize*cfbin # get lower read limit of that bin
- cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin
- freq = hist[cfbin] # frequency IN the 50%ile bin
- median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN
+ (hist, smallest, binsize, extras) = ahistogram(inarray, numbins,
+ [min(inarray), max(inarray)])
+ cumhist = N.cumsum(hist) # make cumulative histogram
+ otherbins = N.greater_equal(cumhist, len(inarray) / 2.0)
+ otherbins = list(otherbins) # list of 0/1s, 1s start at median bin
+ cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score
+ LRL = smallest + binsize * cfbin # get lower read limit of that bin
+ cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin
+ freq = hist[cfbin] # frequency IN the 50%ile bin
+ median = LRL + (
+ (len(inarray) / 2.0 - cfbelow) / float(freq)) * binsize # MEDIAN
return median
-
- def amedianscore (inarray,dimension=None):
+ def amedianscore(inarray, dimension=None):
"""
Returns the 'middle' score of the passed array. If there is an even
number of scores, the mean of the 2 middle scores is returned. Can function
@@ -2178,21 +2196,20 @@ Usage: amedianscore(inarray,dimension=None)
Returns: 'middle' score of the array, or the mean of the 2 middle scores
"""
if dimension == None:
- inarray = N.ravel(inarray)
- dimension = 0
- inarray = N.sort(inarray,dimension)
- if inarray.shape[dimension] % 2 == 0: # if even number of elements
- indx = inarray.shape[dimension]/2 # integer division correct
- median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0
+ inarray = N.ravel(inarray)
+ dimension = 0
+ inarray = N.sort(inarray, dimension)
+ if inarray.shape[dimension] % 2 == 0: # if even number of elements
+ indx = inarray.shape[dimension] / 2 # integer division correct
+ median = N.asarray(inarray[indx] + inarray[indx - 1]) / 2.0
else:
- indx = inarray.shape[dimension] / 2 # integer division correct
- median = N.take(inarray,[indx],dimension)
- if median.shape == (1,):
- median = median[0]
+ indx = inarray.shape[dimension] / 2 # integer division correct
+ median = N.take(inarray, [indx], dimension)
+ if median.shape == (1,):
+ median = median[0]
return median
-
- def amode(a, dimension=None):
+ def amode(a, dimension=None):
"""
Returns an array of the modal (most common) score in the passed array.
If there is more than one such score, ONLY THE FIRST is returned.
@@ -2204,24 +2221,23 @@ Returns: array of bin-counts for mode(s), array of corresponding modal values
"""
if dimension == None:
- a = N.ravel(a)
- dimension = 0
- scores = pstat.aunique(N.ravel(a)) # get ALL unique values
+ a = N.ravel(a)
+ dimension = 0
+ scores = pstat.aunique(N.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[dimension] = 1
oldmostfreq = N.zeros(testshape)
oldcounts = N.zeros(testshape)
for score in scores:
- template = N.equal(a,score)
- counts = asum(template,dimension,1)
- mostfrequent = N.where(counts>oldcounts,score,oldmostfreq)
- oldcounts = N.where(counts>oldcounts,counts,oldcounts)
- oldmostfreq = mostfrequent
+ template = N.equal(a, score)
+ counts = asum(template, dimension, 1)
+ mostfrequent = N.where(counts > oldcounts, score, oldmostfreq)
+ oldcounts = N.where(counts > oldcounts, counts, oldcounts)
+ oldmostfreq = mostfrequent
return oldcounts, mostfrequent
-
- def atmean(a,limits=None,inclusive=(1,1)):
- """
+ def atmean(a, limits=None, inclusive=(1, 1)):
+ """
Returns the arithmetic mean of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
@@ -2230,30 +2246,35 @@ inclusive list/tuple determines whether the lower and upper limiting bounds
Usage: atmean(a,limits=None,inclusive=(1,1))
"""
- if a.dtype in [N.int_, N.short,N.ubyte]:
- a = a.astype(N.float_)
- if limits == None:
- return mean(a)
- assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atmean"
- if inclusive[0]: lowerfcn = N.greater_equal
- else: lowerfcn = N.greater
- if inclusive[1]: upperfcn = N.less_equal
- else: upperfcn = N.less
- if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
- raise ValueError, "No array values within given limits (atmean)."
- elif limits[0]==None and limits[1]<>None:
- mask = upperfcn(a,limits[1])
- elif limits[0]<>None and limits[1]==None:
- mask = lowerfcn(a,limits[0])
- elif limits[0]<>None and limits[1]<>None:
- mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
- s = float(N.add.reduce(N.ravel(a*mask)))
- n = float(N.add.reduce(N.ravel(mask)))
- return s/n
-
-
- def atvar(a,limits=None,inclusive=(1,1)):
- """
+ if a.dtype in [N.int_, N.short, N.ubyte]:
+ a = a.astype(N.float_)
+ if limits == None:
+ return mean(a)
+ assert type(limits) in [ListType, TupleType, N.ndarray
+ ], 'Wrong type for limits in atmean'
+ if inclusive[0]:
+ lowerfcn = N.greater_equal
+ else:
+ lowerfcn = N.greater
+ if inclusive[1]:
+ upperfcn = N.less_equal
+ else:
+ upperfcn = N.less
+ if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(
+ N.ravel(a)):
+ raise ValueError, 'No array values within given limits (atmean).'
+ elif limits[0] == None and limits[1] <> None:
+ mask = upperfcn(a, limits[1])
+ elif limits[0] <> None and limits[1] == None:
+ mask = lowerfcn(a, limits[0])
+ elif limits[0] <> None and limits[1] <> None:
+ mask = lowerfcn(a, limits[0]) * upperfcn(a, limits[1])
+ s = float(N.add.reduce(N.ravel(a * mask)))
+ n = float(N.add.reduce(N.ravel(mask)))
+ return s / n
+
+ def atvar(a, limits=None, inclusive=(1, 1)):
+ """
Returns the sample variance of values in an array, (i.e., using N-1),
ignoring values strictly outside the sequence passed to 'limits'.
Note: either limit in the sequence, or the value of limits itself,
@@ -2263,69 +2284,76 @@ closed/inclusive (1). ASSUMES A FLAT ARRAY (OR ELSE PREFLATTENS).
Usage: atvar(a,limits=None,inclusive=(1,1))
"""
- a = a.astype(N.float_)
- if limits == None or limits == [None,None]:
- return avar(a)
- assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atvar"
- if inclusive[0]: lowerfcn = N.greater_equal
- else: lowerfcn = N.greater
- if inclusive[1]: upperfcn = N.less_equal
- else: upperfcn = N.less
- if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
- raise ValueError, "No array values within given limits (atvar)."
- elif limits[0]==None and limits[1]<>None:
- mask = upperfcn(a,limits[1])
- elif limits[0]<>None and limits[1]==None:
- mask = lowerfcn(a,limits[0])
- elif limits[0]<>None and limits[1]<>None:
- mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
-
- a = N.compress(mask,a) # squish out excluded values
- return avar(a)
-
-
- def atmin(a,lowerlimit=None,dimension=None,inclusive=1):
- """
+ a = a.astype(N.float_)
+ if limits == None or limits == [None, None]:
+ return avar(a)
+ assert type(limits) in [ListType, TupleType, N.ndarray
+ ], 'Wrong type for limits in atvar'
+ if inclusive[0]:
+ lowerfcn = N.greater_equal
+ else:
+ lowerfcn = N.greater
+ if inclusive[1]:
+ upperfcn = N.less_equal
+ else:
+ upperfcn = N.less
+ if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(
+ N.ravel(a)):
+ raise ValueError, 'No array values within given limits (atvar).'
+ elif limits[0] == None and limits[1] <> None:
+ mask = upperfcn(a, limits[1])
+ elif limits[0] <> None and limits[1] == None:
+ mask = lowerfcn(a, limits[0])
+ elif limits[0] <> None and limits[1] <> None:
+ mask = lowerfcn(a, limits[0]) * upperfcn(a, limits[1])
+
+ a = N.compress(mask, a) # squish out excluded values
+ return avar(a)
+
+ def atmin(a, lowerlimit=None, dimension=None, inclusive=1):
+ """
Returns the minimum value of a, along dimension, including only values less
than (or equal to, if inclusive=1) lowerlimit. If the limit is set to None,
all values in the array are used.
Usage: atmin(a,lowerlimit=None,dimension=None,inclusive=1)
"""
- if inclusive: lowerfcn = N.greater
- else: lowerfcn = N.greater_equal
- if dimension == None:
- a = N.ravel(a)
- dimension = 0
- if lowerlimit == None:
- lowerlimit = N.minimum.reduce(N.ravel(a))-11
- biggest = N.maximum.reduce(N.ravel(a))
- ta = N.where(lowerfcn(a,lowerlimit),a,biggest)
- return N.minimum.reduce(ta,dimension)
-
+ if inclusive:
+ lowerfcn = N.greater
+ else:
+ lowerfcn = N.greater_equal
+ if dimension == None:
+ a = N.ravel(a)
+ dimension = 0
+ if lowerlimit == None:
+ lowerlimit = N.minimum.reduce(N.ravel(a)) - 11
+ biggest = N.maximum.reduce(N.ravel(a))
+ ta = N.where(lowerfcn(a, lowerlimit), a, biggest)
+ return N.minimum.reduce(ta, dimension)
- def atmax(a,upperlimit,dimension=None,inclusive=1):
- """
+ def atmax(a, upperlimit, dimension=None, inclusive=1):
+ """
Returns the maximum value of a, along dimension, including only values greater
than (or equal to, if inclusive=1) upperlimit. If the limit is set to None,
a limit larger than the max value in the array is used.
Usage: atmax(a,upperlimit,dimension=None,inclusive=1)
"""
- if inclusive: upperfcn = N.less
- else: upperfcn = N.less_equal
- if dimension == None:
- a = N.ravel(a)
- dimension = 0
- if upperlimit == None:
- upperlimit = N.maximum.reduce(N.ravel(a))+1
- smallest = N.minimum.reduce(N.ravel(a))
- ta = N.where(upperfcn(a,upperlimit),a,smallest)
- return N.maximum.reduce(ta,dimension)
-
+ if inclusive:
+ upperfcn = N.less
+ else:
+ upperfcn = N.less_equal
+ if dimension == None:
+ a = N.ravel(a)
+ dimension = 0
+ if upperlimit == None:
+ upperlimit = N.maximum.reduce(N.ravel(a)) + 1
+ smallest = N.minimum.reduce(N.ravel(a))
+ ta = N.where(upperfcn(a, upperlimit), a, smallest)
+ return N.maximum.reduce(ta, dimension)
- def atstdev(a,limits=None,inclusive=(1,1)):
- """
+ def atstdev(a, limits=None, inclusive=(1, 1)):
+ """
Returns the standard deviation of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
@@ -2334,11 +2362,10 @@ inclusive list/tuple determines whether the lower and upper limiting bounds
Usage: atstdev(a,limits=None,inclusive=(1,1))
"""
- return N.sqrt(tvar(a,limits,inclusive))
+ return N.sqrt(tvar(a, limits, inclusive))
-
- def atsem(a,limits=None,inclusive=(1,1)):
- """
+ def atsem(a, limits=None, inclusive=(1, 1)):
+ """
Returns the standard error of the mean for the values in an array,
(i.e., using N for the denominator), ignoring values strictly outside
the sequence passed to 'limits'. Note: either limit in the sequence,
@@ -2348,33 +2375,38 @@ open/exclusive (0) or closed/inclusive (1).
Usage: atsem(a,limits=None,inclusive=(1,1))
"""
- sd = tstdev(a,limits,inclusive)
- if limits == None or limits == [None,None]:
- n = float(len(N.ravel(a)))
- limits = [min(a)-1, max(a)+1]
- assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atsem"
- if inclusive[0]: lowerfcn = N.greater_equal
- else: lowerfcn = N.greater
- if inclusive[1]: upperfcn = N.less_equal
- else: upperfcn = N.less
- if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
- raise ValueError, "No array values within given limits (atsem)."
- elif limits[0]==None and limits[1]<>None:
- mask = upperfcn(a,limits[1])
- elif limits[0]<>None and limits[1]==None:
- mask = lowerfcn(a,limits[0])
- elif limits[0]<>None and limits[1]<>None:
- mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
- term1 = N.add.reduce(N.ravel(a*a*mask))
- n = float(N.add.reduce(N.ravel(mask)))
- return sd/math.sqrt(n)
-
+ sd = tstdev(a, limits, inclusive)
+ if limits == None or limits == [None, None]:
+ n = float(len(N.ravel(a)))
+ limits = [min(a) - 1, max(a) + 1]
+ assert type(limits) in [ListType, TupleType, N.ndarray
+ ], 'Wrong type for limits in atsem'
+ if inclusive[0]:
+ lowerfcn = N.greater_equal
+ else:
+ lowerfcn = N.greater
+ if inclusive[1]:
+ upperfcn = N.less_equal
+ else:
+ upperfcn = N.less
+ if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(
+ N.ravel(a)):
+ raise ValueError, 'No array values within given limits (atsem).'
+ elif limits[0] == None and limits[1] <> None:
+ mask = upperfcn(a, limits[1])
+ elif limits[0] <> None and limits[1] == None:
+ mask = lowerfcn(a, limits[0])
+ elif limits[0] <> None and limits[1] <> None:
+ mask = lowerfcn(a, limits[0]) * upperfcn(a, limits[1])
+ term1 = N.add.reduce(N.ravel(a * a * mask))
+ n = float(N.add.reduce(N.ravel(mask)))
+ return sd / math.sqrt(n)
#####################################
############ AMOMENTS #############
#####################################
- def amoment(a,moment=1,dimension=None):
+ def amoment(a, moment=1, dimension=None):
"""
Calculates the nth moment about the mean for a sample (defaults to the
1st moment). Generally used to calculate coefficients of skewness and
@@ -2386,17 +2418,16 @@ Usage: amoment(a,moment=1,dimension=None)
Returns: appropriate moment along given dimension
"""
if dimension == None:
- a = N.ravel(a)
- dimension = 0
+ a = N.ravel(a)
+ dimension = 0
if moment == 1:
- return 0.0
+ return 0.0
else:
- mn = amean(a,dimension,1) # 1=keepdims
- s = N.power((a-mn),moment)
- return amean(s,dimension)
-
+ mn = amean(a, dimension, 1) # 1=keepdims
+ s = N.power((a - mn), moment)
+ return amean(s, dimension)
- def avariation(a,dimension=None):
+ def avariation(a, dimension=None):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6. Dimension can equal None (ravel array
@@ -2405,10 +2436,9 @@ sequence (operate over multiple dimensions).
Usage: avariation(a,dimension=None)
"""
- return 100.0*asamplestdev(a,dimension)/amean(a,dimension)
-
+ return 100.0 * asamplestdev(a, dimension) / amean(a, dimension)
- def askew(a,dimension=None):
+ def askew(a, dimension=None):
"""
Returns the skewness of a distribution (normal ==> 0.0; >0 means extra
weight in left tail). Use askewtest() to see if it's close enough.
@@ -2419,15 +2449,14 @@ dimensions).
Usage: askew(a, dimension=None)
Returns: skew of vals in a along dimension, returning ZERO where all vals equal
"""
- denom = N.power(amoment(a,2,dimension),1.5)
- zero = N.equal(denom,0)
+ denom = N.power(amoment(a, 2, dimension), 1.5)
+ zero = N.equal(denom, 0)
if type(denom) == N.ndarray and asum(zero) <> 0:
- print "Number of zeros in askew: ",asum(zero)
+ print 'Number of zeros in askew: ', asum(zero)
denom = denom + zero # prevent divide-by-zero
- return N.where(zero, 0, amoment(a,3,dimension)/denom)
+ return N.where(zero, 0, amoment(a, 3, dimension) / denom)
-
- def akurtosis(a,dimension=None):
+ def akurtosis(a, dimension=None):
"""
Returns the kurtosis of a distribution (normal ==> 3.0; >3 means
heavier in the tails, and usually more peaked). Use akurtosistest()
@@ -2438,16 +2467,15 @@ sequence (operate over multiple dimensions).
Usage: akurtosis(a,dimension=None)
Returns: kurtosis of values in a along dimension, and ZERO where all vals equal
"""
- denom = N.power(amoment(a,2,dimension),2)
- zero = N.equal(denom,0)
+ denom = N.power(amoment(a, 2, dimension), 2)
+ zero = N.equal(denom, 0)
if type(denom) == N.ndarray and asum(zero) <> 0:
- print "Number of zeros in akurtosis: ",asum(zero)
+ print 'Number of zeros in akurtosis: ', asum(zero)
denom = denom + zero # prevent divide-by-zero
- return N.where(zero,0,amoment(a,4,dimension)/denom)
-
+ return N.where(zero, 0, amoment(a, 4, dimension) / denom)
- def adescribe(inarray,dimension=None):
- """
+ def adescribe(inarray, dimension=None):
+ """
Returns several descriptive statistics of the passed array. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
@@ -2455,23 +2483,22 @@ which to operate), or a sequence (operate over multiple dimensions).
Usage: adescribe(inarray,dimension=None)
Returns: n, (min,max), mean, standard deviation, skew, kurtosis
"""
- if dimension == None:
- inarray = N.ravel(inarray)
- dimension = 0
- n = inarray.shape[dimension]
- mm = (N.minimum.reduce(inarray),N.maximum.reduce(inarray))
- m = amean(inarray,dimension)
- sd = astdev(inarray,dimension)
- skew = askew(inarray,dimension)
- kurt = akurtosis(inarray,dimension)
- return n, mm, m, sd, skew, kurt
-
+ if dimension == None:
+ inarray = N.ravel(inarray)
+ dimension = 0
+ n = inarray.shape[dimension]
+ mm = (N.minimum.reduce(inarray), N.maximum.reduce(inarray))
+ m = amean(inarray, dimension)
+ sd = astdev(inarray, dimension)
+ skew = askew(inarray, dimension)
+ kurt = akurtosis(inarray, dimension)
+ return n, mm, m, sd, skew, kurt
#####################################
######## NORMALITY TESTS ##########
#####################################
- def askewtest(a,dimension=None):
+ def askewtest(a, dimension=None):
"""
Tests whether the skew is significantly different from a normal
distribution. Dimension can equal None (ravel array first), an
@@ -2482,21 +2509,21 @@ Usage: askewtest(a,dimension=None)
Returns: z-score and 2-tail z-probability
"""
if dimension == None:
- a = N.ravel(a)
- dimension = 0
- b2 = askew(a,dimension)
+ a = N.ravel(a)
+ dimension = 0
+ b2 = askew(a, dimension)
n = float(a.shape[dimension])
- y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )
- beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )
- W2 = -1 + N.sqrt(2*(beta2-1))
- delta = 1/N.sqrt(N.log(N.sqrt(W2)))
- alpha = N.sqrt(2/(W2-1))
- y = N.where(y==0,1,y)
- Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))
- return Z, (1.0-zprob(Z))*2
-
-
- def akurtosistest(a,dimension=None):
+ y = b2 * N.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
+ beta2 = (3.0 * (n * n + 27 * n - 70) * (n + 1) *
+ (n + 3)) / ((n - 2.0) * (n + 5) * (n + 7) * (n + 9))
+ W2 = -1 + N.sqrt(2 * (beta2 - 1))
+ delta = 1 / N.sqrt(N.log(N.sqrt(W2)))
+ alpha = N.sqrt(2 / (W2 - 1))
+ y = N.where(y == 0, 1, y)
+ Z = delta * N.log(y / alpha + N.sqrt((y / alpha)**2 + 1))
+ return Z, (1.0 - zprob(Z)) * 2
+
+ def akurtosistest(a, dimension=None):
"""
Tests whether a dataset has normal kurtosis (i.e.,
kurtosis=3(n-1)/(n+1)) Valid only for n>20. Dimension can equal None
@@ -2507,28 +2534,31 @@ Usage: akurtosistest(a,dimension=None)
Returns: z-score and 2-tail z-probability, returns 0 for bad pixels
"""
if dimension == None:
- a = N.ravel(a)
- dimension = 0
+ a = N.ravel(a)
+ dimension = 0
n = float(a.shape[dimension])
- if n<20:
- print "akurtosistest only valid for n>=20 ... continuing anyway, n=",n
- b2 = akurtosis(a,dimension)
- E = 3.0*(n-1) /(n+1)
- varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
- x = (b2-E)/N.sqrt(varb2)
- sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5))/
- (n*(n-2)*(n-3)))
- A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2)))
- term1 = 1 -2/(9.0*A)
- denom = 1 +x*N.sqrt(2/(A-4.0))
- denom = N.where(N.less(denom,0), 99, denom)
- term2 = N.where(N.equal(denom,0), term1, N.power((1-2.0/A)/denom,1/3.0))
- Z = ( term1 - term2 ) / N.sqrt(2/(9.0*A))
- Z = N.where(N.equal(denom,99), 0, Z)
- return Z, (1.0-zprob(Z))*2
-
-
- def anormaltest(a,dimension=None):
+ if n < 20:
+ print 'akurtosistest only valid for n>=20 ... continuing anyway, n=', n
+ b2 = akurtosis(a, dimension)
+ E = 3.0 * (n - 1) / (n + 1)
+ varb2 = 24.0 * n * (n - 2) * (n - 3) / ((n + 1) * (n + 1) * (n + 3) *
+ (n + 5))
+ x = (b2 - E) / N.sqrt(varb2)
+ sqrtbeta1 = 6.0 * (n * n - 5 * n + 2) / ((n + 7) * (n + 9)) * N.sqrt(
+ (6.0 * (n + 3) * (n + 5)) / (n * (n - 2) * (n - 3)))
+ A = 6.0 + 8.0 / sqrtbeta1 * (2.0 / sqrtbeta1 +
+ N.sqrt(1 + 4.0 / (sqrtbeta1**2)))
+ term1 = 1 - 2 / (9.0 * A)
+ denom = 1 + x * N.sqrt(2 / (A - 4.0))
+ denom = N.where(N.less(denom, 0), 99, denom)
+ term2 = N.where(
+ N.equal(denom, 0), term1, N.power(
+ (1 - 2.0 / A) / denom, 1 / 3.0))
+ Z = (term1 - term2) / N.sqrt(2 / (9.0 * A))
+ Z = N.where(N.equal(denom, 99), 0, Z)
+ return Z, (1.0 - zprob(Z)) * 2
+
+ def anormaltest(a, dimension=None):
"""
Tests whether skew and/OR kurtosis of dataset differs from normal
curve. Can operate over multiple dimensions. Dimension can equal
@@ -2539,19 +2569,18 @@ Usage: anormaltest(a,dimension=None)
Returns: z-score and 2-tail probability
"""
if dimension == None:
- a = N.ravel(a)
- dimension = 0
- s,p = askewtest(a,dimension)
- k,p = akurtosistest(a,dimension)
- k2 = N.power(s,2) + N.power(k,2)
- return k2, achisqprob(k2,2)
-
+ a = N.ravel(a)
+ dimension = 0
+ s, p = askewtest(a, dimension)
+ k, p = akurtosistest(a, dimension)
+ k2 = N.power(s, 2) + N.power(k, 2)
+ return k2, achisqprob(k2, 2)
#####################################
###### AFREQUENCY FUNCTIONS #######
#####################################
- def aitemfreq(a):
+ def aitemfreq(a):
"""
Returns a 2D array of item frequencies. Column 1 contains item values,
column 2 contains their respective counts. Assumes a 1D array is passed.
@@ -2564,27 +2593,26 @@ Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
scores = N.sort(scores)
freq = N.zeros(len(scores))
for i in range(len(scores)):
- freq[i] = N.add.reduce(N.equal(a,scores[i]))
+ freq[i] = N.add.reduce(N.equal(a, scores[i]))
return N.array(pstat.aabut(scores, freq))
-
- def ascoreatpercentile (inarray, percent):
+ def ascoreatpercentile(inarray, percent):
"""
Usage: ascoreatpercentile(inarray,percent) 0<percent<100
Returns: score at given percentile, relative to inarray distribution
"""
percent = percent / 100.0
- targetcf = percent*len(inarray)
+ targetcf = percent * len(inarray)
h, lrl, binsize, extras = histogram(inarray)
- cumhist = cumsum(h*1)
+ cumhist = cumsum(h * 1)
for i in range(len(cumhist)):
- if cumhist[i] >= targetcf:
- break
- score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
+ if cumhist[i] >= targetcf:
+ break
+ score = binsize * (
+ (targetcf - cumhist[i - 1]) / float(h[i])) + (lrl + binsize * i)
return score
-
- def apercentileofscore (inarray,score,histbins=10,defaultlimits=None):
+ def apercentileofscore(inarray, score, histbins=10, defaultlimits=None):
"""
Note: result of this function depends on the values used to histogram
the data(!).
@@ -2592,14 +2620,14 @@ the data(!).
Usage: apercentileofscore(inarray,score,histbins=10,defaultlimits=None)
Returns: percentile-position of score (0-100) relative to inarray
"""
- h, lrl, binsize, extras = histogram(inarray,histbins,defaultlimits)
- cumhist = cumsum(h*1)
- i = int((score - lrl)/float(binsize))
- pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inarray)) * 100
+ h, lrl, binsize, extras = histogram(inarray, histbins, defaultlimits)
+ cumhist = cumsum(h * 1)
+ i = int((score - lrl) / float(binsize))
+ pct = (cumhist[i - 1] + ((score - (lrl + binsize * i)) / float(binsize)) *
+ h[i]) / float(len(inarray)) * 100
return pct
-
- def ahistogram (inarray,numbins=10,defaultlimits=None,printextras=1):
+ def ahistogram(inarray, numbins=10, defaultlimits=None, printextras=1):
"""
Returns (i) an array of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
@@ -2611,34 +2639,33 @@ following: array of bin values, lowerreallimit, binsize, extrapoints.
Usage: ahistogram(inarray,numbins=10,defaultlimits=None,printextras=1)
Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range)
"""
- inarray = N.ravel(inarray) # flatten any >1D arrays
+ inarray = N.ravel(inarray) # flatten any >1D arrays
if (defaultlimits <> None):
- lowerreallimit = defaultlimits[0]
- upperreallimit = defaultlimits[1]
- binsize = (upperreallimit-lowerreallimit) / float(numbins)
+ lowerreallimit = defaultlimits[0]
+ upperreallimit = defaultlimits[1]
+ binsize = (upperreallimit - lowerreallimit) / float(numbins)
else:
- Min = N.minimum.reduce(inarray)
- Max = N.maximum.reduce(inarray)
- estbinwidth = float(Max - Min)/float(numbins) + 1e-6
- binsize = (Max-Min+estbinwidth)/float(numbins)
- lowerreallimit = Min - binsize/2.0 #lower real limit,1st bin
+ Min = N.minimum.reduce(inarray)
+ Max = N.maximum.reduce(inarray)
+ estbinwidth = float(Max - Min) / float(numbins) + 1e-6
+ binsize = (Max - Min + estbinwidth) / float(numbins)
+ lowerreallimit = Min - binsize / 2.0 #lower real limit,1st bin
bins = N.zeros(numbins)
extrapoints = 0
for num in inarray:
- try:
- if (num-lowerreallimit) < 0:
- extrapoints = extrapoints + 1
- else:
- bintoincrement = int((num-lowerreallimit) / float(binsize))
- bins[bintoincrement] = bins[bintoincrement] + 1
- except: # point outside lower/upper limits
- extrapoints = extrapoints + 1
+ try:
+ if (num - lowerreallimit) < 0:
+ extrapoints = extrapoints + 1
+ else:
+ bintoincrement = int((num - lowerreallimit) / float(binsize))
+ bins[bintoincrement] = bins[bintoincrement] + 1
+ except: # point outside lower/upper limits
+ extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
- print '\nPoints outside given histogram range =',extrapoints
+ print '\nPoints outside given histogram range =', extrapoints
return (bins, lowerreallimit, binsize, extrapoints)
-
- def acumfreq(a,numbins=10,defaultreallimits=None):
+ def acumfreq(a, numbins=10, defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
@@ -2647,12 +2674,11 @@ lower and upper limits on values to include.
Usage: acumfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
- h,l,b,e = histogram(a,numbins,defaultreallimits)
- cumhist = cumsum(h*1)
- return cumhist,l,b,e
-
+ h, l, b, e = histogram(a, numbins, defaultreallimits)
+ cumhist = cumsum(h * 1)
+ return cumhist, l, b, e
- def arelfreq(a,numbins=10,defaultreallimits=None):
+ def arelfreq(a, numbins=10, defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
@@ -2661,16 +2687,15 @@ lower and upper limits on values to include.
Usage: arelfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
- h,l,b,e = histogram(a,numbins,defaultreallimits)
- h = N.array(h/float(a.shape[0]))
- return h,l,b,e
-
+ h, l, b, e = histogram(a, numbins, defaultreallimits)
+ h = N.array(h / float(a.shape[0]))
+ return h, l, b, e
#####################################
###### AVARIABILITY FUNCTIONS #####
#####################################
- def aobrientransform(*args):
+ def aobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
@@ -2683,32 +2708,31 @@ Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
- n = N.zeros(k,N.float_)
- v = N.zeros(k,N.float_)
- m = N.zeros(k,N.float_)
+ n = N.zeros(k, N.float_)
+ v = N.zeros(k, N.float_)
+ m = N.zeros(k, N.float_)
nargs = []
for i in range(k):
- nargs.append(args[i].astype(N.float_))
- n[i] = float(len(nargs[i]))
- v[i] = var(nargs[i])
- m[i] = mean(nargs[i])
+ nargs.append(args[i].astype(N.float_))
+ n[i] = float(len(nargs[i]))
+ v[i] = var(nargs[i])
+ m[i] = mean(nargs[i])
for j in range(k):
- for i in range(n[j]):
- t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
- t2 = 0.5*v[j]*(n[j]-1.0)
- t3 = (n[j]-1.0)*(n[j]-2.0)
- nargs[j][i] = (t1-t2) / float(t3)
+ for i in range(n[j]):
+ t1 = (n[j] - 1.5) * n[j] * (nargs[j][i] - m[j])**2
+ t2 = 0.5 * v[j] * (n[j] - 1.0)
+ t3 = (n[j] - 1.0) * (n[j] - 2.0)
+ nargs[j][i] = (t1 - t2) / float(t3)
check = 1
for j in range(k):
- if v[j] - mean(nargs[j]) > TINY:
- check = 0
+ if v[j] - mean(nargs[j]) > TINY:
+ check = 0
if check <> 1:
- raise ValueError, 'Lack of convergence in obrientransform.'
+ raise ValueError, 'Lack of convergence in obrientransform.'
else:
- return N.array(nargs)
-
+ return N.array(nargs)
- def asamplevar (inarray,dimension=None,keepdims=0):
+ def asamplevar(inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
@@ -2719,24 +2743,23 @@ with the same number of dimensions as inarray.
Usage: asamplevar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
- inarray = N.ravel(inarray)
- dimension = 0
+ inarray = N.ravel(inarray)
+ dimension = 0
if dimension == 1:
- mn = amean(inarray,dimension)[:,N.NewAxis]
+ mn = amean(inarray, dimension)[:, N.NewAxis]
else:
- mn = amean(inarray,dimension,keepdims=1)
+ mn = amean(inarray, dimension, keepdims=1)
deviations = inarray - mn
if type(dimension) == ListType:
- n = 1
- for d in dimension:
- n = n*inarray.shape[d]
+ n = 1
+ for d in dimension:
+ n = n * inarray.shape[d]
else:
- n = inarray.shape[dimension]
- svar = ass(deviations,dimension,keepdims) / float(n)
+ n = inarray.shape[dimension]
+ svar = ass(deviations, dimension, keepdims) / float(n)
return svar
-
- def asamplestdev (inarray, dimension=None, keepdims=0):
+ def asamplestdev(inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
@@ -2746,10 +2769,9 @@ with the same number of dimensions as inarray.
Usage: asamplestdev(inarray,dimension=None,keepdims=0)
"""
- return N.sqrt(asamplevar(inarray,dimension,keepdims))
-
+ return N.sqrt(asamplevar(inarray, dimension, keepdims))
- def asignaltonoise(instack,dimension=0):
+ def asignaltonoise(instack, dimension=0):
"""
Calculates signal-to-noise. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
@@ -2759,12 +2781,11 @@ Usage: asignaltonoise(instack,dimension=0):
Returns: array containing the value of (mean/stdev) along dimension,
or 0 when stdev=0
"""
- m = mean(instack,dimension)
- sd = stdev(instack,dimension)
- return N.where(sd==0,0,m/sd)
+ m = mean(instack, dimension)
+ sd = stdev(instack, dimension)
+ return N.where(sd == 0, 0, m / sd)
-
- def acov (x,y, dimension=None,keepdims=0):
+ def acov(x, y, dimension=None, keepdims=0):
"""
Returns the estimated covariance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
@@ -2775,24 +2796,23 @@ same number of dimensions as inarray.
Usage: acov(x,y,dimension=None,keepdims=0)
"""
if dimension == None:
- x = N.ravel(x)
- y = N.ravel(y)
- dimension = 0
- xmn = amean(x,dimension,1) # keepdims
+ x = N.ravel(x)
+ y = N.ravel(y)
+ dimension = 0
+ xmn = amean(x, dimension, 1) # keepdims
xdeviations = x - xmn
- ymn = amean(y,dimension,1) # keepdims
+ ymn = amean(y, dimension, 1) # keepdims
ydeviations = y - ymn
if type(dimension) == ListType:
- n = 1
- for d in dimension:
- n = n*x.shape[d]
+ n = 1
+ for d in dimension:
+ n = n * x.shape[d]
else:
- n = x.shape[dimension]
- covar = N.sum(xdeviations*ydeviations)/float(n-1)
+ n = x.shape[dimension]
+ covar = N.sum(xdeviations * ydeviations) / float(n - 1)
return covar
-
- def avar (inarray, dimension=None,keepdims=0):
+ def avar(inarray, dimension=None, keepdims=0):
"""
Returns the estimated population variance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
@@ -2803,21 +2823,20 @@ same number of dimensions as inarray.
Usage: avar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
- inarray = N.ravel(inarray)
- dimension = 0
- mn = amean(inarray,dimension,1)
+ inarray = N.ravel(inarray)
+ dimension = 0
+ mn = amean(inarray, dimension, 1)
deviations = inarray - mn
if type(dimension) == ListType:
- n = 1
- for d in dimension:
- n = n*inarray.shape[d]
+ n = 1
+ for d in dimension:
+ n = n * inarray.shape[d]
else:
- n = inarray.shape[dimension]
- var = ass(deviations,dimension,keepdims)/float(n-1)
+ n = inarray.shape[dimension]
+ var = ass(deviations, dimension, keepdims) / float(n - 1)
return var
-
- def astdev (inarray, dimension=None, keepdims=0):
+ def astdev(inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard deviation of the values in
the passed array (i.e., N-1). Dimension can equal None (ravel array
@@ -2827,10 +2846,9 @@ an array with the same number of dimensions as inarray.
Usage: astdev(inarray,dimension=None,keepdims=0)
"""
- return N.sqrt(avar(inarray,dimension,keepdims))
+ return N.sqrt(avar(inarray, dimension, keepdims))
-
- def asterr (inarray, dimension=None, keepdims=0):
+ def asterr(inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard error of the values in the
passed array (i.e., N-1). Dimension can equal None (ravel array
@@ -2841,12 +2859,12 @@ an array with the same number of dimensions as inarray.
Usage: asterr(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
- inarray = N.ravel(inarray)
- dimension = 0
- return astdev(inarray,dimension,keepdims) / float(N.sqrt(inarray.shape[dimension]))
-
+ inarray = N.ravel(inarray)
+ dimension = 0
+ return astdev(inarray, dimension,
+ keepdims) / float(N.sqrt(inarray.shape[dimension]))
- def asem (inarray, dimension=None, keepdims=0):
+ def asem(inarray, dimension=None, keepdims=0):
"""
Returns the standard error of the mean (i.e., using N) of the values
in the passed array. Dimension can equal None (ravel array first), an
@@ -2857,19 +2875,18 @@ same number of dimensions as inarray.
Usage: asem(inarray,dimension=None, keepdims=0)
"""
if dimension == None:
- inarray = N.ravel(inarray)
- dimension = 0
+ inarray = N.ravel(inarray)
+ dimension = 0
if type(dimension) == ListType:
- n = 1
- for d in dimension:
- n = n*inarray.shape[d]
+ n = 1
+ for d in dimension:
+ n = n * inarray.shape[d]
else:
- n = inarray.shape[dimension]
- s = asamplestdev(inarray,dimension,keepdims) / N.sqrt(n-1)
+ n = inarray.shape[dimension]
+ s = asamplestdev(inarray, dimension, keepdims) / N.sqrt(n - 1)
return s
-
- def az (a, score):
+ def az(a, score):
"""
Returns the z-score of a given input score, given thearray from which
that score came. Not appropriate for population calculations, nor for
@@ -2877,11 +2894,10 @@ arrays > 1D.
Usage: az(a, score)
"""
- z = (score-amean(a)) / asamplestdev(a)
+ z = (score - amean(a)) / asamplestdev(a)
return z
-
- def azs (a):
+ def azs(a):
"""
Returns a 1D array of z-scores, one for each score in the passed array,
computed relative to the passed array.
@@ -2890,11 +2906,10 @@ Usage: azs(a)
"""
zscores = []
for item in a:
- zscores.append(z(a,item))
+ zscores.append(z(a, item))
return N.array(zscores)
-
- def azmap (scores, compare, dimension=0):
+ def azmap(scores, compare, dimension=0):
"""
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to
array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0
@@ -2902,18 +2917,17 @@ of the compare array.
Usage: azs(scores, compare, dimension=0)
"""
- mns = amean(compare,dimension)
- sstd = asamplestdev(compare,0)
+ mns = amean(compare, dimension)
+ sstd = asamplestdev(compare, 0)
return (scores - mns) / sstd
-
#####################################
####### ATRIMMING FUNCTIONS #######
#####################################
## deleted around() as it's in numpy now
- def athreshold(a,threshmin=None,threshmax=None,newval=0):
+ def athreshold(a, threshmin=None, threshmax=None, newval=0):
"""
Like Numeric.clip() except that values <threshmid or >threshmax are replaced
by newval instead of by threshmin/threshmax (respectively).
@@ -2923,14 +2937,13 @@ Returns: a, with values <threshmin or >threshmax replaced with newval
"""
mask = N.zeros(a.shape)
if threshmin <> None:
- mask = mask + N.where(a<threshmin,1,0)
+ mask = mask + N.where(a < threshmin, 1, 0)
if threshmax <> None:
- mask = mask + N.where(a>threshmax,1,0)
- mask = N.clip(mask,0,1)
- return N.where(mask,newval,a)
-
+ mask = mask + N.where(a > threshmax, 1, 0)
+ mask = N.clip(mask, 0, 1)
+ return N.where(mask, newval, a)
- def atrimboth (a,proportiontocut):
+ def atrimboth(a, proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
array (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND
@@ -2942,12 +2955,11 @@ proportiontocut).
Usage: atrimboth (a,proportiontocut)
Returns: trimmed version of array a
"""
- lowercut = int(proportiontocut*len(a))
+ lowercut = int(proportiontocut * len(a))
uppercut = len(a) - lowercut
return a[lowercut:uppercut]
-
- def atrim1 (a,proportiontocut,tail='right'):
+ def atrim1(a, proportiontocut, tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
array (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
@@ -2958,19 +2970,18 @@ Usage: atrim1(a,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of array a
"""
if string.lower(tail) == 'right':
- lowercut = 0
- uppercut = len(a) - int(proportiontocut*len(a))
+ lowercut = 0
+ uppercut = len(a) - int(proportiontocut * len(a))
elif string.lower(tail) == 'left':
- lowercut = int(proportiontocut*len(a))
- uppercut = len(a)
+ lowercut = int(proportiontocut * len(a))
+ uppercut = len(a)
return a[lowercut:uppercut]
-
#####################################
##### ACORRELATION FUNCTIONS ######
#####################################
- def acovariance(X):
+ def acovariance(X):
"""
Computes the covariance matrix of a matrix X. Requires a 2D matrix input.
@@ -2978,13 +2989,12 @@ Usage: acovariance(X)
Returns: covariance matrix of X
"""
if len(X.shape) <> 2:
- raise TypeError, "acovariance requires 2D matrices"
+ raise TypeError, 'acovariance requires 2D matrices'
n = X.shape[0]
- mX = amean(X,0)
- return N.dot(N.transpose(X),X) / float(n) - N.multiply.outer(mX,mX)
+ mX = amean(X, 0)
+ return N.dot(N.transpose(X), X) / float(n) - N.multiply.outer(mX, mX)
-
- def acorrelation(X):
+ def acorrelation(X):
"""
Computes the correlation matrix of a matrix X. Requires a 2D matrix input.
@@ -2993,10 +3003,9 @@ Returns: correlation matrix of X
"""
C = acovariance(X)
V = N.diagonal(C)
- return C / N.sqrt(N.multiply.outer(V,V))
-
+ return C / N.sqrt(N.multiply.outer(V, V))
- def apaired(x,y):
+ def apaired(x, y):
"""
Interactively determines the type of data in x and y, and then runs the
appropriated statistic for paired group data.
@@ -3005,64 +3014,69 @@ Usage: apaired(x,y) x,y = the two arrays of values to be compared
Returns: appropriate statistic name, value, and probability
"""
samples = ''
- while samples not in ['i','r','I','R','c','C']:
- print '\nIndependent or related samples, or correlation (i,r,c): ',
- samples = raw_input()
-
- if samples in ['i','I','r','R']:
- print '\nComparing variances ...',
-# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
- r = obrientransform(x,y)
- f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
- if p<0.05:
- vartype='unequal, p='+str(round(p,4))
+ while samples not in ['i', 'r', 'I', 'R', 'c', 'C']:
+ print '\nIndependent or related samples, or correlation (i,r,c): ',
+ samples = raw_input()
+
+ if samples in ['i', 'I', 'r', 'R']:
+ print '\nComparing variances ...',
+ # USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
+ r = obrientransform(x, y)
+ f, p = F_oneway(pstat.colex(r, 0), pstat.colex(r, 1))
+ if p < 0.05:
+ vartype = 'unequal, p=' + str(round(p, 4))
+ else:
+ vartype = 'equal'
+ print vartype
+ if samples in ['i', 'I']:
+ if vartype[0] == 'e':
+ t, p = ttest_ind(x, y, None, 0)
+ print '\nIndependent samples t-test: ', round(t, 4), round(p, 4)
else:
- vartype='equal'
- print vartype
- if samples in ['i','I']:
- if vartype[0]=='e':
- t,p = ttest_ind(x,y,None,0)
- print '\nIndependent samples t-test: ', round(t,4),round(p,4)
- else:
- if len(x)>20 or len(y)>20:
- z,p = ranksums(x,y)
- print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
- else:
- u,p = mannwhitneyu(x,y)
- print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
-
- else: # RELATED SAMPLES
- if vartype[0]=='e':
- t,p = ttest_rel(x,y,0)
- print '\nRelated samples t-test: ', round(t,4),round(p,4)
- else:
- t,p = ranksums(x,y)
- print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
+ if len(x) > 20 or len(y) > 20:
+ z, p = ranksums(x, y)
+ print '\nRank Sums test (NONparametric, n>20): ', round(
+ z, 4), round(p, 4)
+ else:
+ u, p = mannwhitneyu(x, y)
+ print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(
+ u, 4), round(p, 4)
+
+ else: # RELATED SAMPLES
+ if vartype[0] == 'e':
+ t, p = ttest_rel(x, y, 0)
+ print '\nRelated samples t-test: ', round(t, 4), round(p, 4)
+ else:
+ t, p = ranksums(x, y)
+ print '\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4)
else: # CORRELATION ANALYSIS
- corrtype = ''
- while corrtype not in ['c','C','r','R','d','D']:
- print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
- corrtype = raw_input()
- if corrtype in ['c','C']:
- m,b,r,p,see = linregress(x,y)
- print '\nLinear regression for continuous variables ...'
- lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
- pstat.printcc(lol)
- elif corrtype in ['r','R']:
- r,p = spearmanr(x,y)
- print '\nCorrelation for ranked variables ...'
- print "Spearman's r: ",round(r,4),round(p,4)
- else: # DICHOTOMOUS
- r,p = pointbiserialr(x,y)
- print '\nAssuming x contains a dichotomous variable ...'
- print 'Point Biserial r: ',round(r,4),round(p,4)
+ corrtype = ''
+ while corrtype not in ['c', 'C', 'r', 'R', 'd', 'D']:
+ print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
+ corrtype = raw_input()
+ if corrtype in ['c', 'C']:
+ m, b, r, p, see = linregress(x, y)
+ print '\nLinear regression for continuous variables ...'
+ lol = [
+ ['Slope', 'Intercept', 'r', 'Prob', 'SEestimate'],
+ [round(m, 4), round(b, 4), round(r, 4), round(p, 4), round(see, 4)]
+ ]
+ pstat.printcc(lol)
+ elif corrtype in ['r', 'R']:
+ r, p = spearmanr(x, y)
+ print '\nCorrelation for ranked variables ...'
+ print "Spearman's r: ", round(r, 4), round(p, 4)
+ else: # DICHOTOMOUS
+ r, p = pointbiserialr(x, y)
+ print '\nAssuming x contains a dichotomous variable ...'
+ print 'Point Biserial r: ', round(r, 4), round(p, 4)
print '\n\n'
return None
-
- def dices(x,y):
+ def dices(x, y):
"""
-Calculates Dice's coefficient ... (2*number of common terms)/(number of terms in x +
+Calculates Dice's coefficient ... (2*number of common terms)/(number of terms in
+x +
number of terms in y). Returns a value between 0 (orthogonal) and 1.
Usage: dices(x,y)
@@ -3072,12 +3086,12 @@ Usage: dices(x,y)
y = sets.Set(y)
common = len(x.intersection(y))
total = float(len(x) + len(y))
- return 2*common/total
-
+ return 2 * common / total
- def icc(x,y=None,verbose=0):
+ def icc(x, y=None, verbose=0):
"""
-Calculates intraclass correlation coefficients using simple, Type I sums of squares.
+Calculates intraclass correlation coefficients using simple, Type I sums of
+squares.
If only one variable is passed, assumed it's an Nx2 matrix
Usage: icc(x,y=None,verbose=0)
@@ -3085,25 +3099,24 @@ Returns: icc rho, prob ####PROB IS A GUESS BASED ON PEARSON
"""
TINY = 1.0e-20
if y:
- all = N.concatenate([x,y],0)
+ all = N.concatenate([x, y], 0)
else:
- all = x+0
- x = all[:,0]
- y = all[:,1]
- totalss = ass(all-mean(all))
- pairmeans = (x+y)/2.
- withinss = ass(x-pairmeans) + ass(y-pairmeans)
+ all = x + 0
+ x = all[:, 0]
+ y = all[:, 1]
+ totalss = ass(all - mean(all))
+ pairmeans = (x + y) / 2.
+ withinss = ass(x - pairmeans) + ass(y - pairmeans)
withindf = float(len(x))
- betwdf = float(len(x)-1)
+ betwdf = float(len(x) - 1)
withinms = withinss / withindf
- betweenms = (totalss-withinss) / betwdf
- rho = (betweenms-withinms)/(withinms+betweenms)
- t = rho*math.sqrt(betwdf/((1.0-rho+TINY)*(1.0+rho+TINY)))
- prob = abetai(0.5*betwdf,0.5,betwdf/(betwdf+t*t),verbose)
+ betweenms = (totalss - withinss) / betwdf
+ rho = (betweenms - withinms) / (withinms + betweenms)
+ t = rho * math.sqrt(betwdf / ((1.0 - rho + TINY) * (1.0 + rho + TINY)))
+ prob = abetai(0.5 * betwdf, 0.5, betwdf / (betwdf + t * t), verbose)
return rho, prob
-
- def alincc(x,y):
+ def alincc(x, y):
"""
Calculates Lin's concordance correlation coefficient.
@@ -3112,14 +3125,13 @@ Returns: Lin's CC
"""
x = N.ravel(x)
y = N.ravel(y)
- covar = acov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n
- xvar = avar(x)*(len(x)-1)/float(len(x)) # correct denom to n
- yvar = avar(y)*(len(y)-1)/float(len(y)) # correct denom to n
- lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))
+ covar = acov(x, y) * (len(x) - 1) / float(len(x)) # correct denom to n
+ xvar = avar(x) * (len(x) - 1) / float(len(x)) # correct denom to n
+ yvar = avar(y) * (len(y) - 1) / float(len(y)) # correct denom to n
+ lincc = (2 * covar) / ((xvar + yvar) + ((amean(x) - amean(y))**2))
return lincc
-
- def apearsonr(x,y,verbose=1):
+ def apearsonr(x, y, verbose=1):
"""
Calculates a Pearson correlation coefficient and returns p. Taken
from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195.
@@ -3131,16 +3143,16 @@ Returns: Pearson's r, two-tailed p-value
n = len(x)
xmean = amean(x)
ymean = amean(y)
- r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
- r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
+ r_num = n * (N.add.reduce(x * y)) - N.add.reduce(x) * N.add.reduce(y)
+ r_den = math.sqrt((n * ass(x) - asquare_of_sums(x)) *
+ (n * ass(y) - asquare_of_sums(y)))
r = (r_num / r_den)
- df = n-2
- t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
- prob = abetai(0.5*df,0.5,df/(df+t*t),verbose)
- return r,prob
-
+ df = n - 2
+ t = r * math.sqrt(df / ((1.0 - r + TINY) * (1.0 + r + TINY)))
+ prob = abetai(0.5 * df, 0.5, df / (df + t * t), verbose)
+ return r, prob
- def aspearmanr(x,y):
+ def aspearmanr(x, y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
@@ -3152,17 +3164,16 @@ Returns: Spearman's r, two-tailed p-value
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
- dsq = N.add.reduce((rankx-ranky)**2)
- rs = 1 - 6*dsq / float(n*(n**2-1))
- t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
- df = n-2
- probrs = abetai(0.5*df,0.5,df/(df+t*t))
-# probability values for rs are from part 2 of the spearman function in
-# Numerical Recipies, p.510. They close to tables, but not exact.(?)
+ dsq = N.add.reduce((rankx - ranky)**2)
+ rs = 1 - 6 * dsq / float(n * (n**2 - 1))
+ t = rs * math.sqrt((n - 2) / ((rs + 1.0) * (1.0 - rs)))
+ df = n - 2
+ probrs = abetai(0.5 * df, 0.5, df / (df + t * t))
+ # probability values for rs are from part 2 of the spearman function in
+ # Numerical Recipies, p.510. They close to tables, but not exact.(?)
return rs, probrs
-
- def apointbiserialr(x,y):
+ def apointbiserialr(x, y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
@@ -3173,26 +3184,26 @@ Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
categories = pstat.aunique(x)
- data = pstat.aabut(x,y)
+ data = pstat.aabut(x, y)
if len(categories) <> 2:
- raise ValueError, "Exactly 2 categories required (in x) for pointbiserialr()."
- else: # there are 2 categories, continue
- codemap = pstat.aabut(categories,N.arange(2))
- recoded = pstat.arecode(data,codemap,0)
- x = pstat.alinexand(data,0,categories[0])
- y = pstat.alinexand(data,0,categories[1])
- xmean = amean(pstat.acolex(x,1))
- ymean = amean(pstat.acolex(y,1))
- n = len(data)
- adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
- rpb = (ymean - xmean)/asamplestdev(pstat.acolex(data,1))*adjust
- df = n-2
- t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
- prob = abetai(0.5*df,0.5,df/(df+t*t))
- return rpb, prob
-
-
- def akendalltau(x,y):
+ raise ValueError, ('Exactly 2 categories required (in x) for '
+ 'pointbiserialr().')
+ else: # there are 2 categories, continue
+ codemap = pstat.aabut(categories, N.arange(2))
+ recoded = pstat.arecode(data, codemap, 0)
+ x = pstat.alinexand(data, 0, categories[0])
+ y = pstat.alinexand(data, 0, categories[1])
+ xmean = amean(pstat.acolex(x, 1))
+ ymean = amean(pstat.acolex(y, 1))
+ n = len(data)
+ adjust = math.sqrt((len(x) / float(n)) * (len(y) / float(n)))
+ rpb = (ymean - xmean) / asamplestdev(pstat.acolex(data, 1)) * adjust
+ df = n - 2
+ t = rpb * math.sqrt(df / ((1.0 - rpb + TINY) * (1.0 + rpb + TINY)))
+ prob = abetai(0.5 * df, 0.5, df / (df + t * t))
+ return rpb, prob
+
+ def akendalltau(x, y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-cases.@@@
@@ -3203,31 +3214,30 @@ Returns: Kendall's tau, two-tailed p-value
n1 = 0
n2 = 0
iss = 0
- for j in range(len(x)-1):
- for k in range(j,len(y)):
- a1 = x[j] - x[k]
- a2 = y[j] - y[k]
- aa = a1 * a2
- if (aa): # neither array has a tie
- n1 = n1 + 1
- n2 = n2 + 1
- if aa > 0:
- iss = iss + 1
- else:
- iss = iss -1
- else:
- if (a1):
- n1 = n1 + 1
- else:
- n2 = n2 + 1
- tau = iss / math.sqrt(n1*n2)
- svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
+ for j in range(len(x) - 1):
+ for k in range(j, len(y)):
+ a1 = x[j] - x[k]
+ a2 = y[j] - y[k]
+ aa = a1 * a2
+ if (aa): # neither array has a tie
+ n1 = n1 + 1
+ n2 = n2 + 1
+ if aa > 0:
+ iss = iss + 1
+ else:
+ iss = iss - 1
+ else:
+ if (a1):
+ n1 = n1 + 1
+ else:
+ n2 = n2 + 1
+ tau = iss / math.sqrt(n1 * n2)
+ svar = (4.0 * len(x) + 10.0) / (9.0 * len(x) * (len(x) - 1))
z = tau / math.sqrt(svar)
- prob = erfcc(abs(z)/1.4142136)
+ prob = erfcc(abs(z) / 1.4142136)
return tau, prob
-
- def alinregress(*args):
+ def alinregress(*args):
"""
Calculates a regression line on two arrays, x and y, corresponding to x,y
pairs. If a single 2D array is passed, alinregress finds dim with 2 levels
@@ -3238,32 +3248,33 @@ Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
- args = args[0]
- if len(args) == 2:
- x = args[0]
- y = args[1]
- else:
- x = args[:,0]
- y = args[:,1]
- else:
+ args = args[0]
+ if len(args) == 2:
x = args[0]
y = args[1]
+ else:
+ x = args[:, 0]
+ y = args[:, 1]
+ else:
+ x = args[0]
+ y = args[1]
n = len(x)
xmean = amean(x)
ymean = amean(y)
- r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
- r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
+ r_num = n * (N.add.reduce(x * y)) - N.add.reduce(x) * N.add.reduce(y)
+ r_den = math.sqrt((n * ass(x) - asquare_of_sums(x)) *
+ (n * ass(y) - asquare_of_sums(y)))
r = r_num / r_den
- z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
- df = n-2
- t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
- prob = abetai(0.5*df,0.5,df/(df+t*t))
- slope = r_num / (float(n)*ass(x) - asquare_of_sums(x))
- intercept = ymean - slope*xmean
- sterrest = math.sqrt(1-r*r)*asamplestdev(y)
+ z = 0.5 * math.log((1.0 + r + TINY) / (1.0 - r + TINY))
+ df = n - 2
+ t = r * math.sqrt(df / ((1.0 - r + TINY) * (1.0 + r + TINY)))
+ prob = abetai(0.5 * df, 0.5, df / (df + t * t))
+ slope = r_num / (float(n) * ass(x) - asquare_of_sums(x))
+ intercept = ymean - slope * xmean
+ sterrest = math.sqrt(1 - r * r) * asamplestdev(y)
return slope, intercept, r, prob, sterrest, n
- def amasslinregress(*args):
+ def amasslinregress(*args):
"""
Calculates a regression line on one 1D array (x) and one N-D array (y).
@@ -3271,49 +3282,50 @@ Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
- args = args[0]
- if len(args) == 2:
- x = N.ravel(args[0])
- y = args[1]
- else:
- x = N.ravel(args[:,0])
- y = args[:,1]
- else:
- x = args[0]
+ args = args[0]
+ if len(args) == 2:
+ x = N.ravel(args[0])
y = args[1]
+ else:
+ x = N.ravel(args[:, 0])
+ y = args[:, 1]
+ else:
+ x = args[0]
+ y = args[1]
x = x.astype(N.float_)
y = y.astype(N.float_)
n = len(x)
xmean = amean(x)
- ymean = amean(y,0)
+ ymean = amean(y, 0)
shp = N.ones(len(y.shape))
shp[0] = len(x)
x.shape = shp
print x.shape, y.shape
- r_num = n*(N.add.reduce(x*y,0)) - N.add.reduce(x)*N.add.reduce(y,0)
- r_den = N.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y,0)-asquare_of_sums(y,0)))
- zerodivproblem = N.equal(r_den,0)
- r_den = N.where(zerodivproblem,1,r_den) # avoid zero-division in 1st place
+ r_num = n * (N.add.reduce(x * y, 0)) - N.add.reduce(x) * N.add.reduce(y, 0)
+ r_den = N.sqrt((n * ass(x) - asquare_of_sums(x)) *
+ (n * ass(y, 0) - asquare_of_sums(y, 0)))
+ zerodivproblem = N.equal(r_den, 0)
+ r_den = N.where(zerodivproblem, 1, r_den
+ ) # avoid zero-division in 1st place
r = r_num / r_den # need to do this nicely for matrix division
- r = N.where(zerodivproblem,0.0,r)
- z = 0.5*N.log((1.0+r+TINY)/(1.0-r+TINY))
- df = n-2
- t = r*N.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
- prob = abetai(0.5*df,0.5,df/(df+t*t))
-
- ss = float(n)*ass(x)-asquare_of_sums(x)
- s_den = N.where(ss==0,1,ss) # avoid zero-division in 1st place
+ r = N.where(zerodivproblem, 0.0, r)
+ z = 0.5 * N.log((1.0 + r + TINY) / (1.0 - r + TINY))
+ df = n - 2
+ t = r * N.sqrt(df / ((1.0 - r + TINY) * (1.0 + r + TINY)))
+ prob = abetai(0.5 * df, 0.5, df / (df + t * t))
+
+ ss = float(n) * ass(x) - asquare_of_sums(x)
+ s_den = N.where(ss == 0, 1, ss) # avoid zero-division in 1st place
slope = r_num / s_den
- intercept = ymean - slope*xmean
- sterrest = N.sqrt(1-r*r)*asamplestdev(y,0)
+ intercept = ymean - slope * xmean
+ sterrest = N.sqrt(1 - r * r) * asamplestdev(y, 0)
return slope, intercept, r, prob, sterrest, n
-
#####################################
##### AINFERENTIAL STATISTICS #####
#####################################
- def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
+ def attest_1samp(a, popmean, printit=0, name='Sample', writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
@@ -3324,26 +3336,29 @@ Usage: attest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
if type(a) != N.ndarray:
- a = N.array(a)
+ a = N.array(a)
x = amean(a)
v = avar(a)
n = len(a)
- df = n-1
- svar = ((n-1)*v) / float(df)
- t = (x-popmean)/math.sqrt(svar*(1.0/n))
- prob = abetai(0.5*df,0.5,df/(df+t*t))
+ df = n - 1
+ svar = ((n - 1) * v) / float(df)
+ t = (x - popmean) / math.sqrt(svar * (1.0 / n))
+ prob = abetai(0.5 * df, 0.5, df / (df + t * t))
if printit <> 0:
- statname = 'Single-sample T-test.'
- outputpairedstats(printit,writemode,
- 'Population','--',popmean,0,0,0,
- name,n,x,v,N.minimum.reduce(N.ravel(a)),
- N.maximum.reduce(N.ravel(a)),
- statname,t,prob)
- return t,prob
-
+ statname = 'Single-sample T-test.'
+ outputpairedstats(printit, writemode, 'Population', '--', popmean, 0, 0,
+ 0, name, n, x, v, N.minimum.reduce(N.ravel(a)),
+ N.maximum.reduce(N.ravel(a)), statname, t, prob)
+ return t, prob
- def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):
+ def attest_ind(a,
+ b,
+ dimension=None,
+ printit=0,
+ name1='Samp1',
+ name2='Samp2',
+ writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores
a, and b. From Numerical Recipies, p.483. If printit=1, results are
@@ -3357,44 +3372,44 @@ Usage: attest_ind (a,b,dimension=None,printit=0,
Returns: t-value, two-tailed p-value
"""
if dimension == None:
- a = N.ravel(a)
- b = N.ravel(b)
- dimension = 0
- x1 = amean(a,dimension)
- x2 = amean(b,dimension)
- v1 = avar(a,dimension)
- v2 = avar(b,dimension)
+ a = N.ravel(a)
+ b = N.ravel(b)
+ dimension = 0
+ x1 = amean(a, dimension)
+ x2 = amean(b, dimension)
+ v1 = avar(a, dimension)
+ v2 = avar(b, dimension)
n1 = a.shape[dimension]
n2 = b.shape[dimension]
- df = n1+n2-2
- svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
- zerodivproblem = N.equal(svar,0)
- svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place
- t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!
- t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
- probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
+ df = n1 + n2 - 2
+ svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
+ zerodivproblem = N.equal(svar, 0)
+ svar = N.where(zerodivproblem, 1, svar) # avoid zero-division in 1st place
+ t = (x1 - x2) / N.sqrt(svar *
+ (1.0 / n1 + 1.0 / n2)) # N-D COMPUTATION HERE!!!!!!
+ t = N.where(zerodivproblem, 1.0, t) # replace NaN/wrong t-values with 1.0
+ probs = abetai(0.5 * df, 0.5, float(df) / (df + t * t))
if type(t) == N.ndarray:
- probs = N.reshape(probs,t.shape)
+ probs = N.reshape(probs, t.shape)
if probs.shape == (1,):
- probs = probs[0]
+ probs = probs[0]
if printit <> 0:
- if type(t) == N.ndarray:
- t = t[0]
- if type(probs) == N.ndarray:
- probs = probs[0]
- statname = 'Independent samples T-test.'
- outputpairedstats(printit,writemode,
- name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),
- N.maximum.reduce(N.ravel(a)),
- name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),
- N.maximum.reduce(N.ravel(b)),
- statname,t,probs)
- return
+ if type(t) == N.ndarray:
+ t = t[0]
+ if type(probs) == N.ndarray:
+ probs = probs[0]
+ statname = 'Independent samples T-test.'
+ outputpairedstats(printit, writemode, name1, n1, x1, v1,
+ N.minimum.reduce(N.ravel(a)),
+ N.maximum.reduce(N.ravel(a)), name2, n2, x2, v2,
+ N.minimum.reduce(N.ravel(b)),
+ N.maximum.reduce(N.ravel(b)), statname, t, probs)
+ return
return t, probs
- def ap2t(pval,df):
+ def ap2t(pval, df):
"""
Tries to compute a t-value from a p-value (or pval array) and associated df.
SLOW for large numbers of elements(!) as it re-computes p-values 20 times
@@ -3407,24 +3422,29 @@ Returns: an array of t-values with the shape of pval
pval = N.array(pval)
signs = N.sign(pval)
pval = abs(pval)
- t = N.ones(pval.shape,N.float_)*50
- step = N.ones(pval.shape,N.float_)*25
- print "Initial ap2t() prob calc"
- prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
+ t = N.ones(pval.shape, N.float_) * 50
+ step = N.ones(pval.shape, N.float_) * 25
+ print 'Initial ap2t() prob calc'
+ prob = abetai(0.5 * df, 0.5, float(df) / (df + t * t))
print 'ap2t() iter: ',
for i in range(10):
- print i,' ',
- t = N.where(pval<prob,t+step,t-step)
- prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
- step = step/2
+ print i, ' ',
+ t = N.where(pval < prob, t + step, t - step)
+ prob = abetai(0.5 * df, 0.5, float(df) / (df + t * t))
+ step = step / 2
print
# since this is an ugly hack, we get ugly boundaries
- t = N.where(t>99.9,1000,t) # hit upper-boundary
- t = t+signs
- return t #, prob, pval
-
-
- def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):
+ t = N.where(t > 99.9, 1000, t) # hit upper-boundary
+ t = t + signs
+ return t #, prob, pval
+
+ def attest_rel(a,
+ b,
+ dimension=None,
+ printit=0,
+ name1='Samp1',
+ name2='Samp2',
+ writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores, a
and b. From Numerical Recipies, p.483. If printit=1, results are
@@ -3438,43 +3458,44 @@ Usage: attest_rel(a,b,dimension=None,printit=0,
Returns: t-value, two-tailed p-value
"""
if dimension == None:
- a = N.ravel(a)
- b = N.ravel(b)
- dimension = 0
- if len(a)<>len(b):
- raise ValueError, 'Unequal length arrays.'
- x1 = amean(a,dimension)
- x2 = amean(b,dimension)
- v1 = avar(a,dimension)
- v2 = avar(b,dimension)
+ a = N.ravel(a)
+ b = N.ravel(b)
+ dimension = 0
+ if len(a) <> len(b):
+ raise ValueError, 'Unequal length arrays.'
+ x1 = amean(a, dimension)
+ x2 = amean(b, dimension)
+ v1 = avar(a, dimension)
+ v2 = avar(b, dimension)
n = a.shape[dimension]
- df = float(n-1)
- d = (a-b).astype('d')
-
- denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)
- zerodivproblem = N.equal(denom,0)
- denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place
- t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!
- t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
- probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
+ df = float(n - 1)
+ d = (a - b).astype('d')
+
+ denom = N.sqrt(
+ (n * N.add.reduce(d * d, dimension) - N.add.reduce(d, dimension)**2) /
+ df)
+ zerodivproblem = N.equal(denom, 0)
+ denom = N.where(zerodivproblem, 1, denom
+ ) # avoid zero-division in 1st place
+ t = N.add.reduce(d, dimension) / denom # N-D COMPUTATION HERE!!!!!!
+ t = N.where(zerodivproblem, 1.0, t) # replace NaN/wrong t-values with 1.0
+ probs = abetai(0.5 * df, 0.5, float(df) / (df + t * t))
if type(t) == N.ndarray:
- probs = N.reshape(probs,t.shape)
+ probs = N.reshape(probs, t.shape)
if probs.shape == (1,):
- probs = probs[0]
+ probs = probs[0]
if printit <> 0:
- statname = 'Related samples T-test.'
- outputpairedstats(printit,writemode,
- name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),
- N.maximum.reduce(N.ravel(a)),
- name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),
- N.maximum.reduce(N.ravel(b)),
- statname,t,probs)
- return
+ statname = 'Related samples T-test.'
+ outputpairedstats(printit, writemode, name1, n, x1, v1,
+ N.minimum.reduce(N.ravel(a)),
+ N.maximum.reduce(N.ravel(a)), name2, n, x2, v2,
+ N.minimum.reduce(N.ravel(b)),
+ N.maximum.reduce(N.ravel(b)), statname, t, probs)
+ return
return t, probs
-
- def achisquare(f_obs,f_exp=None):
+ def achisquare(f_obs, f_exp=None):
"""
Calculates a one-way chi square for array of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
@@ -3487,13 +3508,12 @@ Returns: chisquare-statistic, associated p-value
k = len(f_obs)
if f_exp == None:
- f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs),N.float_)
+ f_exp = N.array([sum(f_obs) / float(k)] * len(f_obs), N.float_)
f_exp = f_exp.astype(N.float_)
- chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
- return chisq, achisqprob(chisq, k-1)
+ chisq = N.add.reduce((f_obs - f_exp)**2 / f_exp)
+ return chisq, achisqprob(chisq, k - 1)
-
- def aks_2samp (data1,data2):
+ def aks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. Modified from
Numerical Recipies in C, page 493. Returns KS D-value, prob. Not ufunc-
@@ -3502,38 +3522,37 @@ like.
Usage: aks_2samp(data1,data2) where data1 and data2 are 1D arrays
Returns: KS D-value, p-value
"""
- j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE
- j2 = 0 # N.zeros(data2.shape[1:])
- fn1 = 0.0 # N.zeros(data1.shape[1:],N.float_)
- fn2 = 0.0 # N.zeros(data2.shape[1:],N.float_)
+ j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE
+ j2 = 0 # N.zeros(data2.shape[1:])
+ fn1 = 0.0 # N.zeros(data1.shape[1:],N.float_)
+ fn2 = 0.0 # N.zeros(data2.shape[1:],N.float_)
n1 = data1.shape[0]
n2 = data2.shape[0]
- en1 = n1*1
- en2 = n2*1
- d = N.zeros(data1.shape[1:],N.float_)
- data1 = N.sort(data1,0)
- data2 = N.sort(data2,0)
+ en1 = n1 * 1
+ en2 = n2 * 1
+ d = N.zeros(data1.shape[1:], N.float_)
+ data1 = N.sort(data1, 0)
+ data2 = N.sort(data2, 0)
while j1 < n1 and j2 < n2:
- d1=data1[j1]
- d2=data2[j2]
- if d1 <= d2:
- fn1 = (j1)/float(en1)
- j1 = j1 + 1
- if d2 <= d1:
- fn2 = (j2)/float(en2)
- j2 = j2 + 1
- dt = (fn2-fn1)
- if abs(dt) > abs(d):
- d = dt
+ d1 = data1[j1]
+ d2 = data2[j2]
+ if d1 <= d2:
+ fn1 = (j1) / float(en1)
+ j1 = j1 + 1
+ if d2 <= d1:
+ fn2 = (j2) / float(en2)
+ j2 = j2 + 1
+ dt = (fn2 - fn1)
+ if abs(dt) > abs(d):
+ d = dt
# try:
- en = math.sqrt(en1*en2/float(en1+en2))
- prob = aksprob((en+0.12+0.11/en)*N.fabs(d))
-# except:
-# prob = 1.0
+ en = math.sqrt(en1 * en2 / float(en1 + en2))
+ prob = aksprob((en + 0.12 + 0.11 / en) * N.fabs(d))
+ # except:
+ # prob = 1.0
return d, prob
-
- def amannwhitneyu(x,y):
+ def amannwhitneyu(x, y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
@@ -3546,23 +3565,22 @@ Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
- ranked = rankdata(N.concatenate((x,y)))
- rankx = ranked[0:n1] # get the x-ranks
- ranky = ranked[n1:] # the rest are y-ranks
- u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
- u2 = n1*n2 - u1 # remainder is U for y
- bigu = max(u1,u2)
- smallu = min(u1,u2)
- proportion = bigu/float(n1*n2)
+ ranked = rankdata(N.concatenate((x, y)))
+ rankx = ranked[0:n1] # get the x-ranks
+ ranky = ranked[n1:] # the rest are y-ranks
+ u1 = n1 * n2 + (n1 * (n1 + 1)) / 2.0 - sum(rankx) # calc U for x
+ u2 = n1 * n2 - u1 # remainder is U for y
+ bigu = max(u1, u2)
+ smallu = min(u1, u2)
+ proportion = bigu / float(n1 * n2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
- raise ValueError, 'All numbers are identical in amannwhitneyu'
- sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
- z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
+ raise ValueError, 'All numbers are identical in amannwhitneyu'
+ sd = math.sqrt(T * n1 * n2 * (n1 + n2 + 1) / 12.0)
+ z = abs((bigu - n1 * n2 / 2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - azprob(z), proportion
-
- def atiecorrect(rankvals):
+ def atiecorrect(rankvals):
"""
Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
@@ -3572,23 +3590,22 @@ code.
Usage: atiecorrect(rankvals)
Returns: T correction factor for U or H
"""
- sorted,posn = ashellsort(N.array(rankvals))
+ sorted, posn = ashellsort(N.array(rankvals))
n = len(sorted)
T = 0.0
i = 0
- while (i<n-1):
- if sorted[i] == sorted[i+1]:
- nties = 1
- while (i<n-1) and (sorted[i] == sorted[i+1]):
- nties = nties +1
- i = i +1
- T = T + nties**3 - nties
- i = i+1
- T = T / float(n**3-n)
+ while (i < n - 1):
+ if sorted[i] == sorted[i + 1]:
+ nties = 1
+ while (i < n - 1) and (sorted[i] == sorted[i + 1]):
+ nties = nties + 1
+ i = i + 1
+ T = T + nties**3 - nties
+ i = i + 1
+ T = T / float(n**3 - n)
return 1.0 - T
-
- def aranksums(x,y):
+ def aranksums(x, y):
"""
Calculates the rank sums statistic on the provided scores and returns
the result.
@@ -3598,18 +3615,17 @@ Returns: z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
- alldata = N.concatenate((x,y))
+ alldata = N.concatenate((x, y))
ranked = arankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
- expected = n1*(n1+n2+1) / 2.0
- z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
- prob = 2*(1.0 - azprob(abs(z)))
+ expected = n1 * (n1 + n2 + 1) / 2.0
+ z = (s - expected) / math.sqrt(n1 * n2 * (n1 + n2 + 1) / 12.0)
+ prob = 2 * (1.0 - azprob(abs(z)))
return z, prob
-
- def awilcoxont(x,y):
+ def awilcoxont(x, y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
@@ -3618,29 +3634,28 @@ Usage: awilcoxont(x,y) where x,y are equal-length arrays for 2 conditions
Returns: t-statistic, two-tailed p-value
"""
if len(x) <> len(y):
- raise ValueError, 'Unequal N in awilcoxont. Aborting.'
- d = x-y
- d = N.compress(N.not_equal(d,0),d) # Keep all non-zero differences
+ raise ValueError, 'Unequal N in awilcoxont. Aborting.'
+ d = x - y
+ d = N.compress(N.not_equal(d, 0), d) # Keep all non-zero differences
count = len(d)
absd = abs(d)
absranked = arankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
- if d[i] < 0:
- r_minus = r_minus + absranked[i]
- else:
- r_plus = r_plus + absranked[i]
+ if d[i] < 0:
+ r_minus = r_minus + absranked[i]
+ else:
+ r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
- mn = count * (count+1) * 0.25
- se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
- z = math.fabs(wt-mn) / se
- z = math.fabs(wt-mn) / se
- prob = 2*(1.0 -zprob(abs(z)))
+ mn = count * (count + 1) * 0.25
+ se = math.sqrt(count * (count + 1) * (2.0 * count + 1.0) / 24.0)
+ z = math.fabs(wt - mn) / se
+ z = math.fabs(wt - mn) / se
+ prob = 2 * (1.0 - zprob(abs(z)))
return wt, prob
-
- def akruskalwallish(*args):
+ def akruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
@@ -3650,33 +3665,32 @@ independent samples.
Usage: akruskalwallish(*args) args are separate arrays for 3+ conditions
Returns: H-statistic (corrected for ties), associated p-value
"""
- assert len(args) == 3, "Need at least 3 groups in stats.akruskalwallish()"
+ assert len(args) == 3, 'Need at least 3 groups in stats.akruskalwallish()'
args = list(args)
- n = [0]*len(args)
- n = map(len,args)
+ n = [0] * len(args)
+ n = map(len, args)
all = []
for i in range(len(args)):
- all = all + args[i].tolist()
+ all = all + args[i].tolist()
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
- args[i] = ranked[0:n[i]]
- del ranked[0:n[i]]
+ args[i] = ranked[0:n[i]]
+ del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
- rsums.append(sum(args[i])**2)
- rsums[i] = rsums[i] / float(n[i])
+ rsums.append(sum(args[i])**2)
+ rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
- h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
+ h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = len(args) - 1
if T == 0:
- raise ValueError, 'All numbers are identical in akruskalwallish'
+ raise ValueError, 'All numbers are identical in akruskalwallish'
h = h / float(T)
- return h, chisqprob(h,df)
+ return h, chisqprob(h, df)
-
- def afriedmanchisquare(*args):
+ def afriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for
@@ -3690,22 +3704,22 @@ Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
- raise ValueError, '\nLess than 3 levels. Friedman test not appropriate.\n'
+ raise ValueError, ('\nLess than 3 levels. Friedman test not '
+ 'appropriate.\n')
n = len(args[0])
- data = apply(pstat.aabut,args)
+ data = apply(pstat.aabut, args)
data = data.astype(N.float_)
for i in range(len(data)):
- data[i] = arankdata(data[i])
- ssbn = asum(asum(args,1)**2)
- chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
- return chisq, achisqprob(chisq,k-1)
-
+ data[i] = arankdata(data[i])
+ ssbn = asum(asum(args, 1)**2)
+ chisq = 12.0 / (k * n * (k + 1)) * ssbn - 3 * n * (k + 1)
+ return chisq, achisqprob(chisq, k - 1)
#####################################
#### APROBABILITY CALCULATIONS ####
#####################################
- def achisqprob(chisq,df):
+ def achisqprob(chisq, df):
"""
Returns the (1-tail) probability value associated with the provided chi-square
value and df. Heavily modified from chisq.c in Gary Perlman's |Stat. Can
@@ -3714,80 +3728,83 @@ handle multiple dimensions.
Usage: achisqprob(chisq,df) chisq=chisquare stat., df=degrees of freedom
"""
BIG = 200.0
+
def ex(x):
- BIG = 200.0
- exponents = N.where(N.less(x,-BIG),-BIG,x)
- return N.exp(exponents)
+ BIG = 200.0
+ exponents = N.where(N.less(x, -BIG), -BIG, x)
+ return N.exp(exponents)
if type(chisq) == N.ndarray:
- arrayflag = 1
+ arrayflag = 1
else:
- arrayflag = 0
- chisq = N.array([chisq])
+ arrayflag = 0
+ chisq = N.array([chisq])
if df < 1:
- return N.ones(chisq.shape,N.float)
- probs = N.zeros(chisq.shape,N.float_)
- probs = N.where(N.less_equal(chisq,0),1.0,probs) # set prob=1 for chisq<0
+ return N.ones(chisq.shape, N.float)
+ probs = N.zeros(chisq.shape, N.float_)
+ probs = N.where(
+ N.less_equal(chisq, 0), 1.0, probs) # set prob=1 for chisq<0
a = 0.5 * chisq
if df > 1:
- y = ex(-a)
- if df%2 == 0:
- even = 1
- s = y*1
- s2 = s*1
+ y = ex(-a)
+ if df % 2 == 0:
+ even = 1
+ s = y * 1
+ s2 = s * 1
else:
- even = 0
- s = 2.0 * azprob(-N.sqrt(chisq))
- s2 = s*1
+ even = 0
+ s = 2.0 * azprob(-N.sqrt(chisq))
+ s2 = s * 1
if (df > 2):
- chisq = 0.5 * (df - 1.0)
- if even:
- z = N.ones(probs.shape,N.float_)
- else:
- z = 0.5 *N.ones(probs.shape,N.float_)
- if even:
- e = N.zeros(probs.shape,N.float_)
- else:
- e = N.log(N.sqrt(N.pi)) *N.ones(probs.shape,N.float_)
- c = N.log(a)
- mask = N.zeros(probs.shape)
- a_big = N.greater(a,BIG)
- a_big_frozen = -1 *N.ones(probs.shape,N.float_)
- totalelements = N.multiply.reduce(N.array(probs.shape))
- while asum(mask)<>totalelements:
- e = N.log(z) + e
- s = s + ex(c*z-a-e)
- z = z + 1.0
-# print z, e, s
- newmask = N.greater(z,chisq)
- a_big_frozen = N.where(newmask*N.equal(mask,0)*a_big, s, a_big_frozen)
- mask = N.clip(newmask+mask,0,1)
- if even:
- z = N.ones(probs.shape,N.float_)
- e = N.ones(probs.shape,N.float_)
- else:
- z = 0.5 *N.ones(probs.shape,N.float_)
- e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape,N.float_)
- c = 0.0
- mask = N.zeros(probs.shape)
- a_notbig_frozen = -1 *N.ones(probs.shape,N.float_)
- while asum(mask)<>totalelements:
- e = e * (a/z.astype(N.float_))
- c = c + e
- z = z + 1.0
-# print '#2', z, e, c, s, c*y+s2
- newmask = N.greater(z,chisq)
- a_notbig_frozen = N.where(newmask*N.equal(mask,0)*(1-a_big),
- c*y+s2, a_notbig_frozen)
- mask = N.clip(newmask+mask,0,1)
- probs = N.where(N.equal(probs,1),1,
- N.where(N.greater(a,BIG),a_big_frozen,a_notbig_frozen))
- return probs
+ chisq = 0.5 * (df - 1.0)
+ if even:
+ z = N.ones(probs.shape, N.float_)
+ else:
+ z = 0.5 * N.ones(probs.shape, N.float_)
+ if even:
+ e = N.zeros(probs.shape, N.float_)
+ else:
+ e = N.log(N.sqrt(N.pi)) * N.ones(probs.shape, N.float_)
+ c = N.log(a)
+ mask = N.zeros(probs.shape)
+ a_big = N.greater(a, BIG)
+ a_big_frozen = -1 * N.ones(probs.shape, N.float_)
+ totalelements = N.multiply.reduce(N.array(probs.shape))
+ while asum(mask) <> totalelements:
+ e = N.log(z) + e
+ s = s + ex(c * z - a - e)
+ z = z + 1.0
+ # print z, e, s
+ newmask = N.greater(z, chisq)
+ a_big_frozen = N.where(newmask * N.equal(mask, 0) * a_big, s,
+ a_big_frozen)
+ mask = N.clip(newmask + mask, 0, 1)
+ if even:
+ z = N.ones(probs.shape, N.float_)
+ e = N.ones(probs.shape, N.float_)
+ else:
+ z = 0.5 * N.ones(probs.shape, N.float_)
+ e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape, N.float_)
+ c = 0.0
+ mask = N.zeros(probs.shape)
+ a_notbig_frozen = -1 * N.ones(probs.shape, N.float_)
+ while asum(mask) <> totalelements:
+ e = e * (a / z.astype(N.float_))
+ c = c + e
+ z = z + 1.0
+ # print '#2', z, e, c, s, c*y+s2
+ newmask = N.greater(z, chisq)
+ a_notbig_frozen = N.where(newmask * N.equal(mask, 0) * (1 - a_big),
+ c * y + s2, a_notbig_frozen)
+ mask = N.clip(newmask + mask, 0, 1)
+ probs = N.where(
+ N.equal(probs, 1), 1, N.where(
+ N.greater(a, BIG), a_big_frozen, a_notbig_frozen))
+ return probs
else:
- return s
-
+ return s
- def aerfcc(x):
+ def aerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional error
everywhere less than 1.2e-7. Adapted from Numerical Recipies. Can
@@ -3796,12 +3813,14 @@ handle multiple dimensions.
Usage: aerfcc(x)
"""
z = abs(x)
- t = 1.0 / (1.0+0.5*z)
- ans = t * N.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
- return N.where(N.greater_equal(x,0), ans, 2.0-ans)
+ t = 1.0 / (1.0 + 0.5 * z)
+ ans = t * N.exp(-z * z - 1.26551223 + t * (1.00002368 + t * (
+ 0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (
+ 0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (
+ -0.82215223 + t * 0.17087277)))))))))
+ return N.where(N.greater_equal(x, 0), ans, 2.0 - ans)
-
- def azprob(z):
+ def azprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
@@ -3812,77 +3831,77 @@ Adapted from z.c in Gary Perlman's |Stat. Can handle multiple dimensions.
Usage: azprob(z) where z is a z-value
"""
+
def yfunc(y):
- x = (((((((((((((-0.000045255659 * y
- +0.000152529290) * y -0.000019538132) * y
- -0.000676904986) * y +0.001390604284) * y
- -0.000794620820) * y -0.002034254874) * y
- +0.006549791214) * y -0.010557625006) * y
- +0.011630447319) * y -0.009279453341) * y
- +0.005353579108) * y -0.002141268741) * y
- +0.000535310849) * y +0.999936657524
- return x
+ x = (((((((
+ ((((((-0.000045255659 * y + 0.000152529290) * y - 0.000019538132) * y
+ - 0.000676904986) * y + 0.001390604284) * y - 0.000794620820) * y
+ - 0.002034254874) * y + 0.006549791214) * y - 0.010557625006) * y +
+ 0.011630447319) * y - 0.009279453341) * y + 0.005353579108) * y -
+ 0.002141268741) * y + 0.000535310849) * y + 0.999936657524
+ return x
def wfunc(w):
- x = ((((((((0.000124818987 * w
- -0.001075204047) * w +0.005198775019) * w
- -0.019198292004) * w +0.059054035642) * w
- -0.151968751364) * w +0.319152932694) * w
- -0.531923007300) * w +0.797884560593) * N.sqrt(w) * 2.0
- return x
-
- Z_MAX = 6.0 # maximum meaningful z-value
- x = N.zeros(z.shape,N.float_) # initialize
+ x = ((((((((0.000124818987 * w - 0.001075204047) * w + 0.005198775019) * w
+ - 0.019198292004) * w + 0.059054035642) * w - 0.151968751364) *
+ w + 0.319152932694) * w - 0.531923007300) * w +
+ 0.797884560593) * N.sqrt(w) * 2.0
+ return x
+
+ Z_MAX = 6.0 # maximum meaningful z-value
+ x = N.zeros(z.shape, N.float_) # initialize
y = 0.5 * N.fabs(z)
- x = N.where(N.less(y,1.0),wfunc(y*y),yfunc(y-2.0)) # get x's
- x = N.where(N.greater(y,Z_MAX*0.5),1.0,x) # kill those with big Z
- prob = N.where(N.greater(z,0),(x+1)*0.5,(1-x)*0.5)
+ x = N.where(N.less(y, 1.0), wfunc(y * y), yfunc(y - 2.0)) # get x's
+ x = N.where(N.greater(y, Z_MAX * 0.5), 1.0, x) # kill those with big Z
+ prob = N.where(N.greater(z, 0), (x + 1) * 0.5, (1 - x) * 0.5)
return prob
-
- def aksprob(alam):
- """
+ def aksprob(alam):
+ """
Returns the probability value for a K-S statistic computed via ks_2samp.
Adapted from Numerical Recipies. Can handle multiple dimensions.
Usage: aksprob(alam)
"""
- if type(alam) == N.ndarray:
- frozen = -1 *N.ones(alam.shape,N.float64)
- alam = alam.astype(N.float64)
- arrayflag = 1
- else:
- frozen = N.array(-1.)
- alam = N.array(alam,N.float64)
- arrayflag = 1
- mask = N.zeros(alam.shape)
- fac = 2.0 *N.ones(alam.shape,N.float_)
- sum = N.zeros(alam.shape,N.float_)
- termbf = N.zeros(alam.shape,N.float_)
- a2 = N.array(-2.0*alam*alam,N.float64)
- totalelements = N.multiply.reduce(N.array(mask.shape))
- for j in range(1,201):
- if asum(mask) == totalelements:
- break
- exponents = (a2*j*j)
- overflowmask = N.less(exponents,-746)
- frozen = N.where(overflowmask,0,frozen)
- mask = mask+overflowmask
- term = fac*N.exp(exponents)
- sum = sum + term
- newmask = N.where(N.less_equal(abs(term),(0.001*termbf)) +
- N.less(abs(term),1.0e-8*sum), 1, 0)
- frozen = N.where(newmask*N.equal(mask,0), sum, frozen)
- mask = N.clip(mask+newmask,0,1)
- fac = -fac
- termbf = abs(term)
- if arrayflag:
- return N.where(N.equal(frozen,-1), 1.0, frozen) # 1.0 if doesn't converge
- else:
- return N.where(N.equal(frozen,-1), 1.0, frozen)[0] # 1.0 if doesn't converge
-
-
- def afprob (dfnum, dfden, F):
+ if type(alam) == N.ndarray:
+ frozen = -1 * N.ones(alam.shape, N.float64)
+ alam = alam.astype(N.float64)
+ arrayflag = 1
+ else:
+ frozen = N.array(-1.)
+ alam = N.array(alam, N.float64)
+ arrayflag = 1
+ mask = N.zeros(alam.shape)
+ fac = 2.0 * N.ones(alam.shape, N.float_)
+ sum = N.zeros(alam.shape, N.float_)
+ termbf = N.zeros(alam.shape, N.float_)
+ a2 = N.array(-2.0 * alam * alam, N.float64)
+ totalelements = N.multiply.reduce(N.array(mask.shape))
+ for j in range(1, 201):
+ if asum(mask) == totalelements:
+ break
+ exponents = (a2 * j * j)
+ overflowmask = N.less(exponents, -746)
+ frozen = N.where(overflowmask, 0, frozen)
+ mask = mask + overflowmask
+ term = fac * N.exp(exponents)
+ sum = sum + term
+ newmask = N.where(
+ N.less_equal(
+ abs(term), (0.001 * termbf)) + N.less(
+ abs(term), 1.0e-8 * sum), 1, 0)
+ frozen = N.where(newmask * N.equal(mask, 0), sum, frozen)
+ mask = N.clip(mask + newmask, 0, 1)
+ fac = -fac
+ termbf = abs(term)
+ if arrayflag:
+ return N.where(
+ N.equal(frozen, -1), 1.0, frozen) # 1.0 if doesn't converge
+ else:
+ return N.where(
+ N.equal(frozen, -1), 1.0, frozen)[0] # 1.0 if doesn't converge
+
+ def afprob(dfnum, dfden, F):
"""
Returns the 1-tailed significance level (p-value) of an F statistic
given the degrees of freedom for the numerator (dfR-dfF) and the degrees
@@ -3891,12 +3910,11 @@ of freedom for the denominator (dfF). Can handle multiple dims for F.
Usage: afprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
if type(F) == N.ndarray:
- return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))
+ return abetai(0.5 * dfden, 0.5 * dfnum, dfden / (1.0 * dfden + dfnum * F))
else:
- return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
-
+ return abetai(0.5 * dfden, 0.5 * dfnum, dfden / float(dfden + dfnum * F))
- def abetacf(a,b,x,verbose=1):
+ def abetacf(a, b, x, verbose=1):
"""
Evaluates the continued fraction form of the incomplete Beta function,
betai. (Adapted from: Numerical Recipies in C.) Can handle multiple
@@ -3909,46 +3927,46 @@ Usage: abetacf(a,b,x,verbose=1)
arrayflag = 1
if type(x) == N.ndarray:
- frozen = N.ones(x.shape,N.float_) *-1 #start out w/ -1s, should replace all
+ frozen = N.ones(x.shape,
+ N.float_) * -1 #start out w/ -1s, should replace all
else:
- arrayflag = 0
- frozen = N.array([-1])
- x = N.array([x])
+ arrayflag = 0
+ frozen = N.array([-1])
+ x = N.array([x])
mask = N.zeros(x.shape)
bm = az = am = 1.0
- qab = a+b
- qap = a+1.0
- qam = a-1.0
- bz = 1.0-qab*x/qap
- for i in range(ITMAX+1):
- if N.sum(N.ravel(N.equal(frozen,-1)))==0:
- break
- em = float(i+1)
- tem = em + em
- d = em*(b-em)*x/((qam+tem)*(a+tem))
- ap = az + d*am
- bp = bz+d*bm
- d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
- app = ap+d*az
- bpp = bp+d*bz
- aold = az*1
- am = ap/bpp
- bm = bp/bpp
- az = app/bpp
- bz = 1.0
- newmask = N.less(abs(az-aold),EPS*abs(az))
- frozen = N.where(newmask*N.equal(mask,0), az, frozen)
- mask = N.clip(mask+newmask,0,1)
- noconverge = asum(N.equal(frozen,-1))
+ qab = a + b
+ qap = a + 1.0
+ qam = a - 1.0
+ bz = 1.0 - qab * x / qap
+ for i in range(ITMAX + 1):
+ if N.sum(N.ravel(N.equal(frozen, -1))) == 0:
+ break
+ em = float(i + 1)
+ tem = em + em
+ d = em * (b - em) * x / ((qam + tem) * (a + tem))
+ ap = az + d * am
+ bp = bz + d * bm
+ d = -(a + em) * (qab + em) * x / ((qap + tem) * (a + tem))
+ app = ap + d * az
+ bpp = bp + d * bz
+ aold = az * 1
+ am = ap / bpp
+ bm = bp / bpp
+ az = app / bpp
+ bz = 1.0
+ newmask = N.less(abs(az - aold), EPS * abs(az))
+ frozen = N.where(newmask * N.equal(mask, 0), az, frozen)
+ mask = N.clip(mask + newmask, 0, 1)
+ noconverge = asum(N.equal(frozen, -1))
if noconverge <> 0 and verbose:
- print 'a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements'
+ print 'a or b too big, or ITMAX too small in Betacf for ', noconverge, ' elements'
if arrayflag:
- return frozen
+ return frozen
else:
- return frozen[0]
+ return frozen[0]
-
- def agammln(xx):
+ def agammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
@@ -3961,15 +3979,14 @@ Usage: agammln(xx)
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
- tmp = tmp - (x+0.5)*N.log(tmp)
+ tmp = tmp - (x + 0.5) * N.log(tmp)
ser = 1.0
for j in range(len(coeff)):
- x = x + 1
- ser = ser + coeff[j]/x
- return -tmp + N.log(2.50662827465*ser)
-
+ x = x + 1
+ ser = ser + coeff[j] / x
+ return -tmp + N.log(2.50662827465 * ser)
- def abetai(a,b,x,verbose=1):
+ def abetai(a, b, x, verbose=1):
"""
Returns the incomplete beta function:
@@ -3984,37 +4001,36 @@ Usage: abetai(a,b,x,verbose=1)
"""
TINY = 1e-15
if type(a) == N.ndarray:
- if asum(N.less(x,0)+N.greater(x,1)) <> 0:
- raise ValueError, 'Bad x in abetai'
- x = N.where(N.equal(x,0),TINY,x)
- x = N.where(N.equal(x,1.0),1-TINY,x)
-
- bt = N.where(N.equal(x,0)+N.equal(x,1), 0, -1)
- exponents = ( gammln(a+b)-gammln(a)-gammln(b)+a*N.log(x)+b*
- N.log(1.0-x) )
+ if asum(N.less(x, 0) + N.greater(x, 1)) <> 0:
+ raise ValueError, 'Bad x in abetai'
+ x = N.where(N.equal(x, 0), TINY, x)
+ x = N.where(N.equal(x, 1.0), 1 - TINY, x)
+
+ bt = N.where(N.equal(x, 0) + N.equal(x, 1), 0, -1)
+ exponents = (gammln(a + b) - gammln(a) - gammln(b) + a * N.log(x) + b *
+ N.log(1.0 - x))
# 746 (below) is the MAX POSSIBLE BEFORE OVERFLOW
- exponents = N.where(N.less(exponents,-740),-740,exponents)
+ exponents = N.where(N.less(exponents, -740), -740, exponents)
bt = N.exp(exponents)
if type(x) == N.ndarray:
- ans = N.where(N.less(x,(a+1)/(a+b+2.0)),
- bt*abetacf(a,b,x,verbose)/float(a),
- 1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b))
+ ans = N.where(
+ N.less(x, (a + 1) / (a + b + 2.0)), bt * abetacf(a, b, x, verbose) /
+ float(a), 1.0 - bt * abetacf(b, a, 1.0 - x, verbose) / float(b))
else:
- if x<(a+1)/(a+b+2.0):
- ans = bt*abetacf(a,b,x,verbose)/float(a)
- else:
- ans = 1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b)
+ if x < (a + 1) / (a + b + 2.0):
+ ans = bt * abetacf(a, b, x, verbose) / float(a)
+ else:
+ ans = 1.0 - bt * abetacf(b, a, 1.0 - x, verbose) / float(b)
return ans
-
#####################################
####### AANOVA CALCULATIONS #######
#####################################
- import numpy.linalg, operator
- LA = numpy.linalg
+ import numpy.linalg, operator
+ LA = numpy.linalg
- def aglm(data,para):
+ def aglm(data, para):
"""
Calculates a linear model fit ... anova/ancova/lin-regress/t-test/etc. Taken
from:
@@ -4026,29 +4042,31 @@ Usage: aglm(data,para)
Returns: statistic, p-value ???
"""
if len(para) <> len(data):
- print "data and para must be same length in aglm"
- return
+ print 'data and para must be same length in aglm'
+ return
n = len(para)
p = pstat.aunique(para)
- x = N.zeros((n,len(p))) # design matrix
+ x = N.zeros((n, len(p))) # design matrix
for l in range(len(p)):
- x[:,l] = N.equal(para,p[l])
- b = N.dot(N.dot(LA.inv(N.dot(N.transpose(x),x)), # i.e., b=inv(X'X)X'Y
- N.transpose(x)),
- data)
- diffs = (data - N.dot(x,b))
- s_sq = 1./(n-len(p)) * N.dot(N.transpose(diffs), diffs)
+ x[:, l] = N.equal(para, p[l])
+ b = N.dot(
+ N.dot(
+ LA.inv(N.dot(
+ N.transpose(x), x)), # i.e., b=inv(X'X)X'Y
+ N.transpose(x)),
+ data)
+ diffs = (data - N.dot(x, b))
+ s_sq = 1. / (n - len(p)) * N.dot(N.transpose(diffs), diffs)
if len(p) == 2: # ttest_ind
- c = N.array([1,-1])
- df = n-2
- fact = asum(1.0/asum(x,0)) # i.e., 1/n1 + 1/n2 + 1/n3 ...
- t = N.dot(c,b) / N.sqrt(s_sq*fact)
- probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
- return t, probs
-
+ c = N.array([1, -1])
+ df = n - 2
+ fact = asum(1.0 / asum(x, 0)) # i.e., 1/n1 + 1/n2 + 1/n3 ...
+ t = N.dot(c, b) / N.sqrt(s_sq * fact)
+ probs = abetai(0.5 * df, 0.5, float(df) / (df + t * t))
+ return t, probs
- def aF_oneway(*args):
+ def aF_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
@@ -4057,33 +4075,32 @@ Usage: aF_oneway (*args) where *args is 2 or more arrays, one per
treatment group
Returns: f-value, probability
"""
- na = len(args) # ANOVA on 'na' groups, each in it's own array
- means = [0]*na
- vars = [0]*na
- ns = [0]*na
+ na = len(args) # ANOVA on 'na' groups, each in it's own array
+ means = [0] * na
+ vars = [0] * na
+ ns = [0] * na
alldata = []
- tmp = map(N.array,args)
- means = map(amean,tmp)
- vars = map(avar,tmp)
- ns = map(len,args)
+ tmp = map(N.array, args)
+ means = map(amean, tmp)
+ vars = map(avar, tmp)
+ ns = map(len, args)
alldata = N.concatenate(args)
bign = len(alldata)
- sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
+ sstot = ass(alldata) - (asquare_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
- ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a))
- ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
- sswn = sstot-ssbn
- dfbn = na-1
+ ssbn = ssbn + asquare_of_sums(N.array(a)) / float(len(a))
+ ssbn = ssbn - (asquare_of_sums(alldata) / float(bign))
+ sswn = sstot - ssbn
+ dfbn = na - 1
dfwn = bign - na
- msb = ssbn/float(dfbn)
- msw = sswn/float(dfwn)
- f = msb/msw
- prob = fprob(dfbn,dfwn,f)
+ msb = ssbn / float(dfbn)
+ msw = sswn / float(dfwn)
+ f = msb / msw
+ prob = fprob(dfbn, dfwn, f)
return f, prob
-
- def aF_value (ER,EF,dfR,dfF):
+ def aF_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
@@ -4091,29 +4108,30 @@ Returns an F-statistic given the following:
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
"""
- return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF)))
-
-
- def outputfstats(Enum, Eden, dfnum, dfden, f, prob):
- Enum = round(Enum,3)
- Eden = round(Eden,3)
- dfnum = round(Enum,3)
- dfden = round(dfden,3)
- f = round(f,3)
- prob = round(prob,3)
- suffix = '' # for *s after the p-value
- if prob < 0.001: suffix = ' ***'
- elif prob < 0.01: suffix = ' **'
- elif prob < 0.05: suffix = ' *'
- title = [['EF/ER','DF','Mean Square','F-value','prob','']]
- lofl = title+[[Enum, dfnum, round(Enum/float(dfnum),3), f, prob, suffix],
- [Eden, dfden, round(Eden/float(dfden),3),'','','']]
- pstat.printcc(lofl)
- return
-
-
- def F_value_multivariate(ER, EF, dfnum, dfden):
- """
+ return ((ER - EF) / float(dfR - dfF) / (EF / float(dfF)))
+
+ def outputfstats(Enum, Eden, dfnum, dfden, f, prob):
+ Enum = round(Enum, 3)
+ Eden = round(Eden, 3)
+ dfnum = round(Enum, 3)
+ dfden = round(dfden, 3)
+ f = round(f, 3)
+ prob = round(prob, 3)
+ suffix = '' # for *s after the p-value
+ if prob < 0.001:
+ suffix = ' ***'
+ elif prob < 0.01:
+ suffix = ' **'
+ elif prob < 0.05:
+ suffix = ' *'
+ title = [['EF/ER', 'DF', 'Mean Square', 'F-value', 'prob', '']]
+ lofl = title + [[Enum, dfnum, round(Enum / float(dfnum), 3), f, prob, suffix
+ ], [Eden, dfden, round(Eden / float(dfden), 3), '', '', '']]
+ pstat.printcc(lofl)
+ return
+
+ def F_value_multivariate(ER, EF, dfnum, dfden):
+ """
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
@@ -4121,33 +4139,31 @@ Returns an F-statistic given the following:
dfF = degrees of freedom associated with the Restricted model
where ER and EF are matrices from a multivariate F calculation.
"""
- if type(ER) in [IntType, FloatType]:
- ER = N.array([[ER]])
- if type(EF) in [IntType, FloatType]:
- EF = N.array([[EF]])
- n_um = (LA.det(ER) - LA.det(EF)) / float(dfnum)
- d_en = LA.det(EF) / float(dfden)
- return n_um / d_en
-
+ if type(ER) in [IntType, FloatType]:
+ ER = N.array([[ER]])
+ if type(EF) in [IntType, FloatType]:
+ EF = N.array([[EF]])
+ n_um = (LA.det(ER) - LA.det(EF)) / float(dfnum)
+ d_en = LA.det(EF) / float(dfden)
+ return n_um / d_en
#####################################
####### ASUPPORT FUNCTIONS ########
#####################################
- def asign(a):
+ def asign(a):
"""
Usage: asign(a)
Returns: array shape of a, with -1 where a<0 and +1 where a>=0
"""
a = N.asarray(a)
if ((type(a) == type(1.4)) or (type(a) == type(1))):
- return a-a-N.less(a,0)+N.greater(a,0)
+ return a - a - N.less(a, 0) + N.greater(a, 0)
else:
- return N.zeros(N.shape(a))-N.less(a,0)+N.greater(a,0)
-
+ return N.zeros(N.shape(a)) - N.less(a, 0) + N.greater(a, 0)
- def asum (a, dimension=None,keepdims=0):
- """
+ def asum(a, dimension=None, keepdims=0):
+ """
An alternative to the Numeric.add.reduce function, which allows one to
(1) collapse over multiple dimensions at once, and/or (2) to retain
all dimensions in the original array (squashing one down to size.
@@ -4159,32 +4175,31 @@ dimensions as the input array.
Usage: asum(a, dimension=None, keepdims=0)
Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1
"""
- if type(a) == N.ndarray and a.dtype in [N.int_, N.short, N.ubyte]:
- a = a.astype(N.float_)
- if dimension == None:
- s = N.sum(N.ravel(a))
- elif type(dimension) in [IntType,FloatType]:
- s = N.add.reduce(a, dimension)
- if keepdims == 1:
- shp = list(a.shape)
- shp[dimension] = 1
- s = N.reshape(s,shp)
- else: # must be a SEQUENCE of dims to sum over
- dims = list(dimension)
- dims.sort()
- dims.reverse()
- s = a *1.0
+ if type(a) == N.ndarray and a.dtype in [N.int_, N.short, N.ubyte]:
+ a = a.astype(N.float_)
+ if dimension == None:
+ s = N.sum(N.ravel(a))
+ elif type(dimension) in [IntType, FloatType]:
+ s = N.add.reduce(a, dimension)
+ if keepdims == 1:
+ shp = list(a.shape)
+ shp[dimension] = 1
+ s = N.reshape(s, shp)
+ else: # must be a SEQUENCE of dims to sum over
+ dims = list(dimension)
+ dims.sort()
+ dims.reverse()
+ s = a * 1.0
+ for dim in dims:
+ s = N.add.reduce(s, dim)
+ if keepdims == 1:
+ shp = list(a.shape)
for dim in dims:
- s = N.add.reduce(s,dim)
- if keepdims == 1:
- shp = list(a.shape)
- for dim in dims:
- shp[dim] = 1
- s = N.reshape(s,shp)
- return s
-
+ shp[dim] = 1
+ s = N.reshape(s, shp)
+ return s
- def acumsum (a,dimension=None):
+ def acumsum(a, dimension=None):
"""
Returns an array consisting of the cumulative sum of the items in the
passed array. Dimension can equal None (ravel array first), an
@@ -4194,20 +4209,19 @@ over multiple dimensions, but this last one just barely makes sense).
Usage: acumsum(a,dimension=None)
"""
if dimension == None:
- a = N.ravel(a)
- dimension = 0
+ a = N.ravel(a)
+ dimension = 0
if type(dimension) in [ListType, TupleType, N.ndarray]:
- dimension = list(dimension)
- dimension.sort()
- dimension.reverse()
- for d in dimension:
- a = N.add.accumulate(a,d)
- return a
+ dimension = list(dimension)
+ dimension.sort()
+ dimension.reverse()
+ for d in dimension:
+ a = N.add.accumulate(a, d)
+ return a
else:
- return N.add.accumulate(a,dimension)
+ return N.add.accumulate(a, dimension)
-
- def ass(inarray, dimension=None, keepdims=0):
+ def ass(inarray, dimension=None, keepdims=0):
"""
Squares each value in the passed array, adds these squares & returns
the result. Unfortunate function name. :-) Defaults to ALL values in
@@ -4220,12 +4234,11 @@ Usage: ass(inarray, dimension=None, keepdims=0)
Returns: sum-along-'dimension' for (inarray*inarray)
"""
if dimension == None:
- inarray = N.ravel(inarray)
- dimension = 0
- return asum(inarray*inarray,dimension,keepdims)
-
+ inarray = N.ravel(inarray)
+ dimension = 0
+ return asum(inarray * inarray, dimension, keepdims)
- def asummult (array1,array2,dimension=None,keepdims=0):
+ def asummult(array1, array2, dimension=None, keepdims=0):
"""
Multiplies elements in array1 and array2, element by element, and
returns the sum (along 'dimension') of all resulting multiplications.
@@ -4236,13 +4249,12 @@ dimensions). A trivial function, but included for completeness.
Usage: asummult(array1,array2,dimension=None,keepdims=0)
"""
if dimension == None:
- array1 = N.ravel(array1)
- array2 = N.ravel(array2)
- dimension = 0
- return asum(array1*array2,dimension,keepdims)
-
+ array1 = N.ravel(array1)
+ array2 = N.ravel(array2)
+ dimension = 0
+ return asum(array1 * array2, dimension, keepdims)
- def asquare_of_sums(inarray, dimension=None, keepdims=0):
+ def asquare_of_sums(inarray, dimension=None, keepdims=0):
"""
Adds the values in the passed array, squares that sum, and returns the
result. Dimension can equal None (ravel array first), an integer (the
@@ -4254,16 +4266,15 @@ Usage: asquare_of_sums(inarray, dimension=None, keepdims=0)
Returns: the square of the sum over dim(s) in dimension
"""
if dimension == None:
- inarray = N.ravel(inarray)
- dimension = 0
- s = asum(inarray,dimension,keepdims)
+ inarray = N.ravel(inarray)
+ dimension = 0
+ s = asum(inarray, dimension, keepdims)
if type(s) == N.ndarray:
- return s.astype(N.float_)*s
+ return s.astype(N.float_) * s
else:
- return float(s)*s
+ return float(s) * s
-
- def asumdiffsquared(a,b, dimension=None, keepdims=0):
+ def asumdiffsquared(a, b, dimension=None, keepdims=0):
"""
Takes pairwise differences of the values in arrays a and b, squares
these differences, and returns the sum of these squares. Dimension
@@ -4275,12 +4286,11 @@ Usage: asumdiffsquared(a,b)
Returns: sum[ravel(a-b)**2]
"""
if dimension == None:
- inarray = N.ravel(a)
- dimension = 0
- return asum((a-b)**2,dimension,keepdims)
-
+ inarray = N.ravel(a)
+ dimension = 0
+ return asum((a - b)**2, dimension, keepdims)
- def ashellsort(inarray):
+ def ashellsort(inarray):
"""
Shellsort algorithm. Sorts a 1D-array.
@@ -4288,25 +4298,24 @@ Usage: ashellsort(inarray)
Returns: sorted-inarray, sorting-index-vector (for original array)
"""
n = len(inarray)
- svec = inarray *1.0
+ svec = inarray * 1.0
ivec = range(n)
- gap = n/2 # integer division needed
- while gap >0:
- for i in range(gap,n):
- for j in range(i-gap,-1,-gap):
- while j>=0 and svec[j]>svec[j+gap]:
- temp = svec[j]
- svec[j] = svec[j+gap]
- svec[j+gap] = temp
- itemp = ivec[j]
- ivec[j] = ivec[j+gap]
- ivec[j+gap] = itemp
- gap = gap / 2 # integer division needed
+ gap = n / 2 # integer division needed
+ while gap > 0:
+ for i in range(gap, n):
+ for j in range(i - gap, -1, -gap):
+ while j >= 0 and svec[j] > svec[j + gap]:
+ temp = svec[j]
+ svec[j] = svec[j + gap]
+ svec[j + gap] = temp
+ itemp = ivec[j]
+ ivec[j] = ivec[j + gap]
+ ivec[j + gap] = itemp
+ gap = gap / 2 # integer division needed
# svec is now sorted input vector, ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
-
- def arankdata(inarray):
+ def arankdata(inarray):
"""
Ranks the data in inarray, dealing with ties appropritely. Assumes
a 1D inarray. Adapted from Gary Perlman's |Stat ranksort.
@@ -4318,20 +4327,19 @@ Returns: array of length equal to inarray, containing rank scores
svec, ivec = ashellsort(inarray)
sumranks = 0
dupcount = 0
- newarray = N.zeros(n,N.float_)
+ newarray = N.zeros(n, N.float_)
for i in range(n):
- sumranks = sumranks + i
- dupcount = dupcount + 1
- if i==n-1 or svec[i] <> svec[i+1]:
- averank = sumranks / float(dupcount) + 1
- for j in range(i-dupcount+1,i+1):
- newarray[ivec[j]] = averank
- sumranks = 0
- dupcount = 0
+ sumranks = sumranks + i
+ dupcount = dupcount + 1
+ if i == n - 1 or svec[i] <> svec[i + 1]:
+ averank = sumranks / float(dupcount) + 1
+ for j in range(i - dupcount + 1, i + 1):
+ newarray[ivec[j]] = averank
+ sumranks = 0
+ dupcount = 0
return newarray
-
- def afindwithin(data):
+ def afindwithin(data):
"""
Returns a binary vector, 1=within-subject factor, 0=between. Input
equals the entire data array (i.e., column 1=random factor, last
@@ -4339,190 +4347,171 @@ column = measured values.
Usage: afindwithin(data) data in |Stat format
"""
- numfact = len(data[0])-2
- withinvec = [0]*numfact
- for col in range(1,numfact+1):
- rows = pstat.linexand(data,col,pstat.unique(pstat.colex(data,1))[0]) # get 1 level of this factor
- if len(pstat.unique(pstat.colex(rows,0))) < len(rows): # if fewer subjects than scores on this factor
- withinvec[col-1] = 1
+ numfact = len(data[0]) - 2
+ withinvec = [0] * numfact
+ for col in range(1, numfact + 1):
+ rows = pstat.linexand(data, col, pstat.unique(pstat.colex(data, 1))[0]
+ ) # get 1 level of this factor
+ if len(pstat.unique(pstat.colex(rows, 0))) < len(
+ rows): # if fewer subjects than scores on this factor
+ withinvec[col - 1] = 1
return withinvec
-
- #########################################################
- #########################################################
- ###### RE-DEFINE DISPATCHES TO INCLUDE ARRAYS #########
- #########################################################
- #########################################################
-
-## CENTRAL TENDENCY:
- geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)),
- (ageometricmean, (N.ndarray,)) )
- harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)),
- (aharmonicmean, (N.ndarray,)) )
- mean = Dispatch ( (lmean, (ListType, TupleType)),
- (amean, (N.ndarray,)) )
- median = Dispatch ( (lmedian, (ListType, TupleType)),
- (amedian, (N.ndarray,)) )
- medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)),
- (amedianscore, (N.ndarray,)) )
- mode = Dispatch ( (lmode, (ListType, TupleType)),
- (amode, (N.ndarray,)) )
- tmean = Dispatch ( (atmean, (N.ndarray,)) )
- tvar = Dispatch ( (atvar, (N.ndarray,)) )
- tstdev = Dispatch ( (atstdev, (N.ndarray,)) )
- tsem = Dispatch ( (atsem, (N.ndarray,)) )
-
-## VARIATION:
- moment = Dispatch ( (lmoment, (ListType, TupleType)),
- (amoment, (N.ndarray,)) )
- variation = Dispatch ( (lvariation, (ListType, TupleType)),
- (avariation, (N.ndarray,)) )
- skew = Dispatch ( (lskew, (ListType, TupleType)),
- (askew, (N.ndarray,)) )
- kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)),
- (akurtosis, (N.ndarray,)) )
- describe = Dispatch ( (ldescribe, (ListType, TupleType)),
- (adescribe, (N.ndarray,)) )
-
-## DISTRIBUTION TESTS
-
- skewtest = Dispatch ( (askewtest, (ListType, TupleType)),
- (askewtest, (N.ndarray,)) )
- kurtosistest = Dispatch ( (akurtosistest, (ListType, TupleType)),
- (akurtosistest, (N.ndarray,)) )
- normaltest = Dispatch ( (anormaltest, (ListType, TupleType)),
- (anormaltest, (N.ndarray,)) )
-
-## FREQUENCY STATS:
- itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)),
- (aitemfreq, (N.ndarray,)) )
- scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)),
- (ascoreatpercentile, (N.ndarray,)) )
- percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)),
- (apercentileofscore, (N.ndarray,)) )
- histogram = Dispatch ( (lhistogram, (ListType, TupleType)),
- (ahistogram, (N.ndarray,)) )
- cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)),
- (acumfreq, (N.ndarray,)) )
- relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)),
- (arelfreq, (N.ndarray,)) )
-
-## VARIABILITY:
- obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)),
- (aobrientransform, (N.ndarray,)) )
- samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)),
- (asamplevar, (N.ndarray,)) )
- samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)),
- (asamplestdev, (N.ndarray,)) )
- signaltonoise = Dispatch( (asignaltonoise, (N.ndarray,)),)
- var = Dispatch ( (lvar, (ListType, TupleType)),
- (avar, (N.ndarray,)) )
- stdev = Dispatch ( (lstdev, (ListType, TupleType)),
- (astdev, (N.ndarray,)) )
- sterr = Dispatch ( (lsterr, (ListType, TupleType)),
- (asterr, (N.ndarray,)) )
- sem = Dispatch ( (lsem, (ListType, TupleType)),
- (asem, (N.ndarray,)) )
- z = Dispatch ( (lz, (ListType, TupleType)),
- (az, (N.ndarray,)) )
- zs = Dispatch ( (lzs, (ListType, TupleType)),
- (azs, (N.ndarray,)) )
-
-## TRIMMING FCNS:
- threshold = Dispatch( (athreshold, (N.ndarray,)),)
- trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)),
- (atrimboth, (N.ndarray,)) )
- trim1 = Dispatch ( (ltrim1, (ListType, TupleType)),
- (atrim1, (N.ndarray,)) )
-
-## CORRELATION FCNS:
- paired = Dispatch ( (lpaired, (ListType, TupleType)),
- (apaired, (N.ndarray,)) )
- lincc = Dispatch ( (llincc, (ListType, TupleType)),
- (alincc, (N.ndarray,)) )
- pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)),
- (apearsonr, (N.ndarray,)) )
- spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)),
- (aspearmanr, (N.ndarray,)) )
- pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)),
- (apointbiserialr, (N.ndarray,)) )
- kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)),
- (akendalltau, (N.ndarray,)) )
- linregress = Dispatch ( (llinregress, (ListType, TupleType)),
- (alinregress, (N.ndarray,)) )
-
-## INFERENTIAL STATS:
- ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)),
- (attest_1samp, (N.ndarray,)) )
- ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)),
- (attest_ind, (N.ndarray,)) )
- ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)),
- (attest_rel, (N.ndarray,)) )
- chisquare = Dispatch ( (lchisquare, (ListType, TupleType)),
- (achisquare, (N.ndarray,)) )
- ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)),
- (aks_2samp, (N.ndarray,)) )
- mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)),
- (amannwhitneyu, (N.ndarray,)) )
- tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)),
- (atiecorrect, (N.ndarray,)) )
- ranksums = Dispatch ( (lranksums, (ListType, TupleType)),
- (aranksums, (N.ndarray,)) )
- wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)),
- (awilcoxont, (N.ndarray,)) )
- kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)),
- (akruskalwallish, (N.ndarray,)) )
- friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)),
- (afriedmanchisquare, (N.ndarray,)) )
-
-## PROBABILITY CALCS:
- chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)),
- (achisqprob, (N.ndarray,)) )
- zprob = Dispatch ( (lzprob, (IntType, FloatType)),
- (azprob, (N.ndarray,)) )
- ksprob = Dispatch ( (lksprob, (IntType, FloatType)),
- (aksprob, (N.ndarray,)) )
- fprob = Dispatch ( (lfprob, (IntType, FloatType)),
- (afprob, (N.ndarray,)) )
- betacf = Dispatch ( (lbetacf, (IntType, FloatType)),
- (abetacf, (N.ndarray,)) )
- betai = Dispatch ( (lbetai, (IntType, FloatType)),
- (abetai, (N.ndarray,)) )
- erfcc = Dispatch ( (lerfcc, (IntType, FloatType)),
- (aerfcc, (N.ndarray,)) )
- gammln = Dispatch ( (lgammln, (IntType, FloatType)),
- (agammln, (N.ndarray,)) )
-
-## ANOVA FUNCTIONS:
- F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)),
- (aF_oneway, (N.ndarray,)) )
- F_value = Dispatch ( (lF_value, (ListType, TupleType)),
- (aF_value, (N.ndarray,)) )
-
-## SUPPORT FUNCTIONS:
- incr = Dispatch ( (lincr, (ListType, TupleType, N.ndarray)), )
- sum = Dispatch ( (lsum, (ListType, TupleType)),
- (asum, (N.ndarray,)) )
- cumsum = Dispatch ( (lcumsum, (ListType, TupleType)),
- (acumsum, (N.ndarray,)) )
- ss = Dispatch ( (lss, (ListType, TupleType)),
- (ass, (N.ndarray,)) )
- summult = Dispatch ( (lsummult, (ListType, TupleType)),
- (asummult, (N.ndarray,)) )
- square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)),
- (asquare_of_sums, (N.ndarray,)) )
- sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)),
- (asumdiffsquared, (N.ndarray,)) )
- shellsort = Dispatch ( (lshellsort, (ListType, TupleType)),
- (ashellsort, (N.ndarray,)) )
- rankdata = Dispatch ( (lrankdata, (ListType, TupleType)),
- (arankdata, (N.ndarray,)) )
- findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)),
- (afindwithin, (N.ndarray,)) )
+ #########################################################
+ #########################################################
+ ###### RE-DEFINE DISPATCHES TO INCLUDE ARRAYS #########
+ #########################################################
+ #########################################################
+
+ ## CENTRAL TENDENCY:
+ geometricmean = Dispatch(
+ (lgeometricmean, (ListType, TupleType)), (ageometricmean, (N.ndarray,)))
+ harmonicmean = Dispatch(
+ (lharmonicmean, (ListType, TupleType)), (aharmonicmean, (N.ndarray,)))
+ mean = Dispatch((lmean, (ListType, TupleType)), (amean, (N.ndarray,)))
+ median = Dispatch((lmedian, (ListType, TupleType)), (amedian, (N.ndarray,)))
+ medianscore = Dispatch(
+ (lmedianscore, (ListType, TupleType)), (amedianscore, (N.ndarray,)))
+ mode = Dispatch((lmode, (ListType, TupleType)), (amode, (N.ndarray,)))
+ tmean = Dispatch((atmean, (N.ndarray,)))
+ tvar = Dispatch((atvar, (N.ndarray,)))
+ tstdev = Dispatch((atstdev, (N.ndarray,)))
+ tsem = Dispatch((atsem, (N.ndarray,)))
+
+ ## VARIATION:
+ moment = Dispatch((lmoment, (ListType, TupleType)), (amoment, (N.ndarray,)))
+ variation = Dispatch(
+ (lvariation, (ListType, TupleType)), (avariation, (N.ndarray,)))
+ skew = Dispatch((lskew, (ListType, TupleType)), (askew, (N.ndarray,)))
+ kurtosis = Dispatch(
+ (lkurtosis, (ListType, TupleType)), (akurtosis, (N.ndarray,)))
+ describe = Dispatch(
+ (ldescribe, (ListType, TupleType)), (adescribe, (N.ndarray,)))
+
+ ## DISTRIBUTION TESTS
+
+ skewtest = Dispatch(
+ (askewtest, (ListType, TupleType)), (askewtest, (N.ndarray,)))
+ kurtosistest = Dispatch(
+ (akurtosistest, (ListType, TupleType)), (akurtosistest, (N.ndarray,)))
+ normaltest = Dispatch(
+ (anormaltest, (ListType, TupleType)), (anormaltest, (N.ndarray,)))
+
+ ## FREQUENCY STATS:
+ itemfreq = Dispatch(
+ (litemfreq, (ListType, TupleType)), (aitemfreq, (N.ndarray,)))
+ scoreatpercentile = Dispatch(
+ (lscoreatpercentile, (ListType, TupleType)), (ascoreatpercentile,
+ (N.ndarray,)))
+ percentileofscore = Dispatch(
+ (lpercentileofscore, (ListType, TupleType)), (apercentileofscore,
+ (N.ndarray,)))
+ histogram = Dispatch(
+ (lhistogram, (ListType, TupleType)), (ahistogram, (N.ndarray,)))
+ cumfreq = Dispatch(
+ (lcumfreq, (ListType, TupleType)), (acumfreq, (N.ndarray,)))
+ relfreq = Dispatch(
+ (lrelfreq, (ListType, TupleType)), (arelfreq, (N.ndarray,)))
+
+ ## VARIABILITY:
+ obrientransform = Dispatch(
+ (lobrientransform, (ListType, TupleType)), (aobrientransform,
+ (N.ndarray,)))
+ samplevar = Dispatch(
+ (lsamplevar, (ListType, TupleType)), (asamplevar, (N.ndarray,)))
+ samplestdev = Dispatch(
+ (lsamplestdev, (ListType, TupleType)), (asamplestdev, (N.ndarray,)))
+ signaltonoise = Dispatch((asignaltonoise, (N.ndarray,)),)
+ var = Dispatch((lvar, (ListType, TupleType)), (avar, (N.ndarray,)))
+ stdev = Dispatch((lstdev, (ListType, TupleType)), (astdev, (N.ndarray,)))
+ sterr = Dispatch((lsterr, (ListType, TupleType)), (asterr, (N.ndarray,)))
+ sem = Dispatch((lsem, (ListType, TupleType)), (asem, (N.ndarray,)))
+ z = Dispatch((lz, (ListType, TupleType)), (az, (N.ndarray,)))
+ zs = Dispatch((lzs, (ListType, TupleType)), (azs, (N.ndarray,)))
+
+ ## TRIMMING FCNS:
+ threshold = Dispatch((athreshold, (N.ndarray,)),)
+ trimboth = Dispatch(
+ (ltrimboth, (ListType, TupleType)), (atrimboth, (N.ndarray,)))
+ trim1 = Dispatch((ltrim1, (ListType, TupleType)), (atrim1, (N.ndarray,)))
+
+ ## CORRELATION FCNS:
+ paired = Dispatch((lpaired, (ListType, TupleType)), (apaired, (N.ndarray,)))
+ lincc = Dispatch((llincc, (ListType, TupleType)), (alincc, (N.ndarray,)))
+ pearsonr = Dispatch(
+ (lpearsonr, (ListType, TupleType)), (apearsonr, (N.ndarray,)))
+ spearmanr = Dispatch(
+ (lspearmanr, (ListType, TupleType)), (aspearmanr, (N.ndarray,)))
+ pointbiserialr = Dispatch(
+ (lpointbiserialr, (ListType, TupleType)), (apointbiserialr, (N.ndarray,)))
+ kendalltau = Dispatch(
+ (lkendalltau, (ListType, TupleType)), (akendalltau, (N.ndarray,)))
+ linregress = Dispatch(
+ (llinregress, (ListType, TupleType)), (alinregress, (N.ndarray,)))
+
+ ## INFERENTIAL STATS:
+ ttest_1samp = Dispatch(
+ (lttest_1samp, (ListType, TupleType)), (attest_1samp, (N.ndarray,)))
+ ttest_ind = Dispatch(
+ (lttest_ind, (ListType, TupleType)), (attest_ind, (N.ndarray,)))
+ ttest_rel = Dispatch(
+ (lttest_rel, (ListType, TupleType)), (attest_rel, (N.ndarray,)))
+ chisquare = Dispatch(
+ (lchisquare, (ListType, TupleType)), (achisquare, (N.ndarray,)))
+ ks_2samp = Dispatch(
+ (lks_2samp, (ListType, TupleType)), (aks_2samp, (N.ndarray,)))
+ mannwhitneyu = Dispatch(
+ (lmannwhitneyu, (ListType, TupleType)), (amannwhitneyu, (N.ndarray,)))
+ tiecorrect = Dispatch(
+ (ltiecorrect, (ListType, TupleType)), (atiecorrect, (N.ndarray,)))
+ ranksums = Dispatch(
+ (lranksums, (ListType, TupleType)), (aranksums, (N.ndarray,)))
+ wilcoxont = Dispatch(
+ (lwilcoxont, (ListType, TupleType)), (awilcoxont, (N.ndarray,)))
+ kruskalwallish = Dispatch(
+ (lkruskalwallish, (ListType, TupleType)), (akruskalwallish, (N.ndarray,)))
+ friedmanchisquare = Dispatch(
+ (lfriedmanchisquare, (ListType, TupleType)), (afriedmanchisquare,
+ (N.ndarray,)))
+
+ ## PROBABILITY CALCS:
+ chisqprob = Dispatch(
+ (lchisqprob, (IntType, FloatType)), (achisqprob, (N.ndarray,)))
+ zprob = Dispatch((lzprob, (IntType, FloatType)), (azprob, (N.ndarray,)))
+ ksprob = Dispatch((lksprob, (IntType, FloatType)), (aksprob, (N.ndarray,)))
+ fprob = Dispatch((lfprob, (IntType, FloatType)), (afprob, (N.ndarray,)))
+ betacf = Dispatch((lbetacf, (IntType, FloatType)), (abetacf, (N.ndarray,)))
+ betai = Dispatch((lbetai, (IntType, FloatType)), (abetai, (N.ndarray,)))
+ erfcc = Dispatch((lerfcc, (IntType, FloatType)), (aerfcc, (N.ndarray,)))
+ gammln = Dispatch((lgammln, (IntType, FloatType)), (agammln, (N.ndarray,)))
+
+ ## ANOVA FUNCTIONS:
+ F_oneway = Dispatch(
+ (lF_oneway, (ListType, TupleType)), (aF_oneway, (N.ndarray,)))
+ F_value = Dispatch(
+ (lF_value, (ListType, TupleType)), (aF_value, (N.ndarray,)))
+
+ ## SUPPORT FUNCTIONS:
+ incr = Dispatch((lincr, (ListType, TupleType, N.ndarray)),)
+ sum = Dispatch((lsum, (ListType, TupleType)), (asum, (N.ndarray,)))
+ cumsum = Dispatch((lcumsum, (ListType, TupleType)), (acumsum, (N.ndarray,)))
+ ss = Dispatch((lss, (ListType, TupleType)), (ass, (N.ndarray,)))
+ summult = Dispatch(
+ (lsummult, (ListType, TupleType)), (asummult, (N.ndarray,)))
+ square_of_sums = Dispatch(
+ (lsquare_of_sums, (ListType, TupleType)), (asquare_of_sums, (N.ndarray,)))
+ sumdiffsquared = Dispatch(
+ (lsumdiffsquared, (ListType, TupleType)), (asumdiffsquared, (N.ndarray,)))
+ shellsort = Dispatch(
+ (lshellsort, (ListType, TupleType)), (ashellsort, (N.ndarray,)))
+ rankdata = Dispatch(
+ (lrankdata, (ListType, TupleType)), (arankdata, (N.ndarray,)))
+ findwithin = Dispatch(
+ (lfindwithin, (ListType, TupleType)), (afindwithin, (N.ndarray,)))
###################### END OF NUMERIC FUNCTION BLOCK #####################
###################### END OF STATISTICAL FUNCTIONS ######################
except ImportError:
- pass
+ pass
diff --git a/utils/tabulator.py b/utils/tabulator.py
index 0a2eae65..eb7a8d39 100644
--- a/utils/tabulator.py
+++ b/utils/tabulator.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Table generating, analyzing and printing functions.
This defines several classes that are used to generate, analyze and print
@@ -63,7 +61,6 @@ table:
"""
-
import getpass
import math
import sys
@@ -100,9 +97,9 @@ class TableGenerator(object):
SORT_BY_VALUES = 2
SORT_BY_VALUES_DESC = 3
- MISSING_VALUE = "x"
+ MISSING_VALUE = 'x'
- def __init__(self, d, l, sort=SORT_BY_KEYS, key_name="keys"):
+ def __init__(self, d, l, sort=SORT_BY_KEYS, key_name='keys'):
self._runs = d
self._labels = l
self._sort = sort
@@ -147,7 +144,7 @@ class TableGenerator(object):
# pylint: disable=unnecessary-lambda
return sorted(keys, key=lambda x: self._GetHighestValue(x), reverse=True)
else:
- assert 0, "Unimplemented sort %s" % self._sort
+ assert 0, 'Unimplemented sort %s' % self._sort
def _GetKeys(self):
keys = self._AggregateKeys()
@@ -201,7 +198,7 @@ class TableGenerator(object):
row.append(v)
# If we got a 'unit' value, append the units name to the key name.
if unit:
- keyname = row[0] + " (%s) " % unit
+ keyname = row[0] + ' (%s) ' % unit
row[0] = keyname
table.append(row)
rows += 1
@@ -229,7 +226,7 @@ class Result(object):
# pylint: disable=unused-argument
def _Literal(self, cell, values, baseline_values):
- cell.value = " ".join([str(v) for v in values])
+ cell.value = ' '.join([str(v) for v in values])
def _ComputeFloat(self, cell, values, baseline_values):
self._Literal(cell, values, baseline_values)
@@ -242,13 +239,13 @@ class Result(object):
def _GetGmean(self, values):
if not values:
- return float("nan")
+ return float('nan')
if any([v < 0 for v in values]):
- return float("nan")
+ return float('nan')
if any([v == 0 for v in values]):
return 0.0
log_list = [math.log(v) for v in values]
- gmean_log = sum(log_list)/len(log_list)
+ gmean_log = sum(log_list) / len(log_list)
return math.exp(gmean_log)
def Compute(self, cell, values, baseline_values):
@@ -263,7 +260,7 @@ class Result(object):
all_floats = True
values = _StripNone(values)
if not values:
- cell.value = ""
+ cell.value = ''
return
if _AllFloat(values):
float_values = _GetFloats(values)
@@ -278,7 +275,7 @@ class Result(object):
all_floats = False
else:
if self.NeedsBaseline():
- cell.value = ""
+ cell.value = ''
return
float_baseline_values = None
if all_floats:
@@ -289,6 +286,7 @@ class Result(object):
class LiteralResult(Result):
+
def __init__(self, iteration=0):
super(LiteralResult, self).__init__()
self.iteration = iteration
@@ -297,7 +295,7 @@ class LiteralResult(Result):
try:
cell.value = values[self.iteration]
except IndexError:
- cell.value = "-"
+ cell.value = '-'
class NonEmptyCountResult(Result):
@@ -331,14 +329,16 @@ class NonEmptyCountResult(Result):
class StringMeanResult(Result):
+
def _ComputeString(self, cell, values, baseline_values):
if self._AllStringsSame(values):
cell.value = str(values[0])
else:
- cell.value = "?"
+ cell.value = '?'
class AmeanResult(StringMeanResult):
+
def _ComputeFloat(self, cell, values, baseline_values):
cell.value = numpy.mean(values)
@@ -348,6 +348,7 @@ class RawResult(Result):
class MinResult(Result):
+
def _ComputeFloat(self, cell, values, baseline_values):
cell.value = min(values)
@@ -355,10 +356,11 @@ class MinResult(Result):
if values:
cell.value = min(values)
else:
- cell.value = ""
+ cell.value = ''
class MaxResult(Result):
+
def _ComputeFloat(self, cell, values, baseline_values):
cell.value = max(values)
@@ -366,29 +368,33 @@ class MaxResult(Result):
if values:
cell.value = max(values)
else:
- cell.value = ""
+ cell.value = ''
class NumericalResult(Result):
+
def _ComputeString(self, cell, values, baseline_values):
- cell.value = "?"
+ cell.value = '?'
class StdResult(NumericalResult):
+
def _ComputeFloat(self, cell, values, baseline_values):
cell.value = numpy.std(values)
class CoeffVarResult(NumericalResult):
+
def _ComputeFloat(self, cell, values, baseline_values):
if numpy.mean(values) != 0.0:
- noise = numpy.abs(numpy.std(values)/numpy.mean(values))
+ noise = numpy.abs(numpy.std(values) / numpy.mean(values))
else:
noise = 0.0
cell.value = noise
class ComparisonResult(Result):
+
def NeedsBaseline(self):
return True
@@ -401,26 +407,28 @@ class ComparisonResult(Result):
baseline_value = baseline_values[0]
if value is not None and baseline_value is not None:
if value == baseline_value:
- cell.value = "SAME"
+ cell.value = 'SAME'
else:
- cell.value = "DIFFERENT"
+ cell.value = 'DIFFERENT'
else:
- cell.value = "?"
+ cell.value = '?'
class PValueResult(ComparisonResult):
+
def _ComputeFloat(self, cell, values, baseline_values):
if len(values) < 2 or len(baseline_values) < 2:
- cell.value = float("nan")
+ cell.value = float('nan')
return
import stats # pylint: disable=g-import-not-at-top
_, cell.value = stats.lttest_ind(values, baseline_values)
def _ComputeString(self, cell, values, baseline_values):
- return float("nan")
+ return float('nan')
class KeyAwareComparisonResult(ComparisonResult):
+
def _IsLowerBetter(self, key):
# TODO(llozano): Trying to guess direction by looking at the name of the
# test does not seem like a good idea. Test frameworks should provide this
@@ -443,27 +451,26 @@ class KeyAwareComparisonResult(ComparisonResult):
# --texture_upload_count--texture_upload_count--count (high is good)
# --total_deferred_image_decode_count--count (low is good)
# --total_tiles_analyzed--total_tiles_analyzed--count (high is good)
- lower_is_better_keys = ["milliseconds", "ms_", "seconds_", "KB",
- "rdbytes", "wrbytes", "dropped_percent",
- "(ms)", "(seconds)",
- "--ms", "--average_num_missing_tiles",
- "--experimental_jank", "--experimental_mean_frame",
- "--experimental_median_frame_time",
- "--total_deferred_image_decode_count",
- "--seconds"]
+ lower_is_better_keys = ['milliseconds', 'ms_', 'seconds_', 'KB', 'rdbytes',
+ 'wrbytes', 'dropped_percent', '(ms)', '(seconds)',
+ '--ms', '--average_num_missing_tiles',
+ '--experimental_jank', '--experimental_mean_frame',
+ '--experimental_median_frame_time',
+ '--total_deferred_image_decode_count', '--seconds']
return any([l in key for l in lower_is_better_keys])
def _InvertIfLowerIsBetter(self, cell):
if self._IsLowerBetter(cell.name):
if cell.value:
- cell.value = 1.0/cell.value
+ cell.value = 1.0 / cell.value
class AmeanRatioResult(KeyAwareComparisonResult):
+
def _ComputeFloat(self, cell, values, baseline_values):
if numpy.mean(baseline_values) != 0:
- cell.value = numpy.mean(values)/numpy.mean(baseline_values)
+ cell.value = numpy.mean(values) / numpy.mean(baseline_values)
elif numpy.mean(values) != 0:
cell.value = 0.00
# cell.value = 0 means the values and baseline_values have big difference
@@ -473,9 +480,10 @@ class AmeanRatioResult(KeyAwareComparisonResult):
class GmeanRatioResult(KeyAwareComparisonResult):
+
def _ComputeFloat(self, cell, values, baseline_values):
if self._GetGmean(baseline_values) != 0:
- cell.value = self._GetGmean(values)/self._GetGmean(baseline_values)
+ cell.value = self._GetGmean(values) / self._GetGmean(baseline_values)
elif self._GetGmean(values) != 0:
cell.value = 0.00
else:
@@ -492,7 +500,7 @@ class Color(object):
self.a = a
def __str__(self):
- return "r: %s g: %s: b: %s: a: %s" % (self.r, self.g, self.b, self.a)
+ return 'r: %s g: %s: b: %s: a: %s' % (self.r, self.g, self.b, self.a)
def Round(self):
"""Round RGBA values to the nearest integer."""
@@ -503,7 +511,7 @@ class Color(object):
def GetRGB(self):
"""Get a hex representation of the color."""
- return "%02x%02x%02x" % (self.r, self.g, self.b)
+ return '%02x%02x%02x' % (self.r, self.g, self.b)
@classmethod
def Lerp(cls, ratio, a, b):
@@ -518,10 +526,10 @@ class Color(object):
Linearly interpolated color.
"""
ret = cls()
- ret.r = (b.r - a.r)*ratio + a.r
- ret.g = (b.g - a.g)*ratio + a.g
- ret.b = (b.b - a.b)*ratio + a.b
- ret.a = (b.a - a.a)*ratio + a.a
+ ret.r = (b.r - a.r) * ratio + a.r
+ ret.g = (b.g - a.g) * ratio + a.g
+ ret.b = (b.b - a.b) * ratio + a.b
+ ret.a = (b.a - a.a) * ratio + a.a
return ret
@@ -540,14 +548,14 @@ class Format(object):
cell: The cell whose attributes are to be populated.
"""
if cell.value is None:
- cell.string_value = ""
+ cell.string_value = ''
if isinstance(cell.value, float):
self._ComputeFloat(cell)
else:
self._ComputeString(cell)
def _ComputeFloat(self, cell):
- cell.string_value = "{0:.2f}".format(cell.value)
+ cell.string_value = '{0:.2f}'.format(cell.value)
def _ComputeString(self, cell):
cell.string_value = str(cell.value)
@@ -558,24 +566,21 @@ class Format(object):
if math.isnan(value):
return mid
if value > mid_value:
- value = max_value - mid_value/value
+ value = max_value - mid_value / value
return self._GetColorBetweenRange(value, min_value, mid_value, max_value,
low, mid, high, power)
- def _GetColorBetweenRange(self,
- value,
- min_value, mid_value, max_value,
- low_color, mid_color, high_color,
- power):
+ def _GetColorBetweenRange(self, value, min_value, mid_value, max_value,
+ low_color, mid_color, high_color, power):
assert value <= max_value
assert value >= min_value
if value > mid_value:
- value = (max_value - value)/(max_value - mid_value)
+ value = (max_value - value) / (max_value - mid_value)
value **= power
ret = Color.Lerp(value, high_color, mid_color)
else:
- value = (value - min_value)/(mid_value - min_value)
+ value = (value - min_value) / (mid_value - min_value)
value **= power
ret = Color.Lerp(value, low_color, mid_color)
ret.Round()
@@ -583,8 +588,9 @@ class Format(object):
class PValueFormat(Format):
+
def _ComputeFloat(self, cell):
- cell.string_value = "%0.2f" % float(cell.value)
+ cell.string_value = '%0.2f' % float(cell.value)
if float(cell.value) < 0.05:
cell.bgcolor = self._GetColor(cell.value,
Color(255, 255, 0, 0),
@@ -603,7 +609,7 @@ class StorageFormat(Format):
def _ComputeFloat(self, cell):
base = 1024
- suffices = ["K", "M", "G"]
+ suffices = ['K', 'M', 'G']
v = float(cell.value)
current = 0
while v >= base**(current + 1) and current < len(suffices):
@@ -611,7 +617,7 @@ class StorageFormat(Format):
if current:
divisor = base**current
- cell.string_value = "%1.1f%s" % ((v/divisor), suffices[current - 1])
+ cell.string_value = '%1.1f%s' % ((v / divisor), suffices[current - 1])
else:
cell.string_value = str(cell.value)
@@ -624,7 +630,7 @@ class CoeffVarFormat(Format):
"""
def _ComputeFloat(self, cell):
- cell.string_value = "%1.1f%%" % (float(cell.value) * 100)
+ cell.string_value = '%1.1f%%' % (float(cell.value) * 100)
cell.color = self._GetColor(cell.value,
Color(0, 255, 0, 0),
Color(0, 0, 0, 0),
@@ -641,11 +647,9 @@ class PercentFormat(Format):
"""
def _ComputeFloat(self, cell):
- cell.string_value = "%+1.1f%%" % ((float(cell.value) - 1) * 100)
- cell.color = self._GetColor(cell.value,
- Color(255, 0, 0, 0),
- Color(0, 0, 0, 0),
- Color(0, 255, 0, 0))
+ cell.string_value = '%+1.1f%%' % ((float(cell.value) - 1) * 100)
+ cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
+ Color(0, 0, 0, 0), Color(0, 255, 0, 0))
class RatioFormat(Format):
@@ -656,11 +660,9 @@ class RatioFormat(Format):
"""
def _ComputeFloat(self, cell):
- cell.string_value = "%+1.1f%%" % ((cell.value - 1) * 100)
- cell.color = self._GetColor(cell.value,
- Color(255, 0, 0, 0),
- Color(0, 0, 0, 0),
- Color(0, 255, 0, 0))
+ cell.string_value = '%+1.1f%%' % ((cell.value - 1) * 100)
+ cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
+ Color(0, 0, 0, 0), Color(0, 255, 0, 0))
class ColorBoxFormat(Format):
@@ -674,11 +676,9 @@ class ColorBoxFormat(Format):
"""
def _ComputeFloat(self, cell):
- cell.string_value = "--"
- bgcolor = self._GetColor(cell.value,
- Color(255, 0, 0, 0),
- Color(255, 255, 255, 0),
- Color(0, 255, 0, 0))
+ cell.string_value = '--'
+ bgcolor = self._GetColor(cell.value, Color(255, 0, 0, 0),
+ Color(255, 255, 255, 0), Color(0, 255, 0, 0))
cell.bgcolor = bgcolor
cell.color = bgcolor
@@ -721,9 +721,9 @@ class Cell(object):
def __str__(self):
l = []
- l.append("value: %s" % self.value)
- l.append("string_value: %s" % self.string_value)
- return " ".join(l)
+ l.append('value: %s' % self.value)
+ l.append('string_value: %s' % self.string_value)
+ return ' '.join(l)
class Column(object):
@@ -734,7 +734,7 @@ class Column(object):
fmt: an object of the Format class.
"""
- def __init__(self, result, fmt, name=""):
+ def __init__(self, result, fmt, name=''):
self.result = result
self.fmt = fmt
self.name = name
@@ -774,7 +774,7 @@ class TableFormatter(object):
for row in self._table[1:]:
# It does not make sense to put retval in the summary table.
- if str(row[0]) == "retval" and table_type == "summary":
+ if str(row[0]) == 'retval' and table_type == 'summary':
# Check to see if any runs passed, and update all_failed.
all_failed = True
for values in row[1:]:
@@ -811,17 +811,17 @@ class TableFormatter(object):
# If this is a summary table, and the only row in it is 'retval', and
# all the test runs failed, we need to a 'Results' row to the output
# table.
- if table_type == "summary" and all_failed and len(self._table) == 2:
+ if table_type == 'summary' and all_failed and len(self._table) == 2:
labels_row = self._table[0]
key = Cell()
- key.string_value = "Results"
+ key.string_value = 'Results'
out_row = [key]
baseline = None
for value in labels_row[1:]:
for column in self._columns:
cell = Cell()
cell.name = key.string_value
- column.result.Compute(cell, ["Fail"], baseline)
+ column.result.Compute(cell, ['Fail'], baseline)
column.fmt.Compute(cell)
out_row.append(cell)
if not row_index:
@@ -832,7 +832,7 @@ class TableFormatter(object):
"""Generate Column name at the top of table."""
key = Cell()
key.header = True
- key.string_value = "Keys"
+ key.string_value = 'Keys'
header = [key]
for column in self._table_columns:
cell = Cell()
@@ -843,8 +843,8 @@ class TableFormatter(object):
result_name = column.result.__class__.__name__
format_name = column.fmt.__class__.__name__
- cell.string_value = "%s %s" % (result_name.replace("Result", ""),
- format_name.replace("Format", ""))
+ cell.string_value = '%s %s' % (result_name.replace('Result', ''),
+ format_name.replace('Format', ''))
header.append(cell)
@@ -873,15 +873,15 @@ class TableFormatter(object):
def AddLabelName(self):
"""Put label on the top of the table."""
top_header = []
- base_colspan = len([c for c in self._columns
- if not c.result.NeedsBaseline()])
+ base_colspan = len([c for c in self._columns if not c.result.NeedsBaseline()
+ ])
compare_colspan = len(self._columns)
# Find the row with the key 'retval', if it exists. This
# will be used to calculate the number of iterations that passed and
# failed for each image label.
retval_row = None
for row in self._table:
- if row[0] == "retval":
+ if row[0] == 'retval':
retval_row = row
# The label is organized as follows
# "keys" label_base, label_comparison1, label_comparison2
@@ -896,7 +896,7 @@ class TableFormatter(object):
retval_values = retval_row[column_position]
if type(retval_values) is list:
passes, fails = self.GetPassesAndFails(retval_values)
- cell.string_value = str(label) + " (pass:%d fail:%d)" % (passes,
+ cell.string_value = str(label) + ' (pass:%d fail:%d)' % (passes,
fails)
else:
cell.string_value = str(label)
@@ -911,11 +911,11 @@ class TableFormatter(object):
self._out_table = [top_header] + self._out_table
def _PrintOutTable(self):
- o = ""
+ o = ''
for row in self._out_table:
for cell in row:
- o += str(cell) + " "
- o += "\n"
+ o += str(cell) + ' '
+ o += '\n'
print o
def GetCellTable(self, table_type, headers=True):
@@ -959,12 +959,12 @@ class TablePrinter(object):
row_style = Cell()
for cell in row:
if cell.color_row:
- assert cell.color, "Cell color not set but color_row set!"
- assert not row_style.color, "Multiple row_style.colors found!"
+ assert cell.color, 'Cell color not set but color_row set!'
+ assert not row_style.color, 'Multiple row_style.colors found!'
row_style.color = cell.color
if cell.bgcolor_row:
- assert cell.bgcolor, "Cell bgcolor not set but bgcolor_row set!"
- assert not row_style.bgcolor, "Multiple row_style.bgcolors found!"
+ assert cell.bgcolor, 'Cell bgcolor not set but bgcolor_row set!'
+ assert not row_style.bgcolor, 'Multiple row_style.bgcolors found!'
row_style.bgcolor = cell.bgcolor
self._row_styles.append(row_style)
@@ -976,8 +976,7 @@ class TablePrinter(object):
column_style = Cell()
for row in self._table:
if not any([cell.colspan != 1 for cell in row]):
- column_style.width = max(column_style.width,
- len(row[i].string_value))
+ column_style.width = max(column_style.width, len(row[i].string_value))
self._column_styles.append(column_style)
def _GetBGColorFix(self, color):
@@ -985,16 +984,15 @@ class TablePrinter(object):
rgb = color.GetRGB()
prefix, _ = colortrans.rgb2short(rgb)
# pylint: disable=anomalous-backslash-in-string
- prefix = "\033[48;5;%sm" % prefix
- suffix = "\033[0m"
+ prefix = '\033[48;5;%sm' % prefix
+ suffix = '\033[0m'
elif self._output_type in [self.EMAIL, self.HTML]:
rgb = color.GetRGB()
- prefix = ("<FONT style=\"BACKGROUND-COLOR:#{0}\">"
- .format(rgb))
- suffix = "</FONT>"
+ prefix = ("<FONT style=\"BACKGROUND-COLOR:#{0}\">".format(rgb))
+ suffix = '</FONT>'
elif self._output_type in [self.PLAIN, self.TSV]:
- prefix = ""
- suffix = ""
+ prefix = ''
+ suffix = ''
return prefix, suffix
def _GetColorFix(self, color):
@@ -1002,15 +1000,15 @@ class TablePrinter(object):
rgb = color.GetRGB()
prefix, _ = colortrans.rgb2short(rgb)
# pylint: disable=anomalous-backslash-in-string
- prefix = "\033[38;5;%sm" % prefix
- suffix = "\033[0m"
+ prefix = '\033[38;5;%sm' % prefix
+ suffix = '\033[0m'
elif self._output_type in [self.EMAIL, self.HTML]:
rgb = color.GetRGB()
- prefix = "<FONT COLOR=#{0}>".format(rgb)
- suffix = "</FONT>"
+ prefix = '<FONT COLOR=#{0}>'.format(rgb)
+ suffix = '</FONT>'
elif self._output_type in [self.PLAIN, self.TSV]:
- prefix = ""
- suffix = ""
+ prefix = ''
+ suffix = ''
return prefix, suffix
def Print(self):
@@ -1029,11 +1027,11 @@ class TablePrinter(object):
if cell.color:
p, s = self._GetColorFix(cell.color)
- out = "%s%s%s" % (p, out, s)
+ out = '%s%s%s' % (p, out, s)
if cell.bgcolor:
p, s = self._GetBGColorFix(cell.bgcolor)
- out = "%s%s%s" % (p, out, s)
+ out = '%s%s%s' % (p, out, s)
if self._output_type in [self.PLAIN, self.CONSOLE, self.EMAIL]:
if cell.width:
@@ -1051,51 +1049,51 @@ class TablePrinter(object):
for k in range(cell.colspan):
width += self._column_styles[start + k].width
if width > raw_width:
- padding = ("%" + str(width - raw_width) + "s") % ""
+ padding = ('%' + str(width - raw_width) + 's') % ''
out = padding + out
if self._output_type == self.HTML:
if cell.header:
- tag = "th"
+ tag = 'th'
else:
- tag = "td"
+ tag = 'td'
out = "<{0} colspan = \"{2}\"> {1} </{0}>".format(tag, out, cell.colspan)
return out
def _GetHorizontalSeparator(self):
if self._output_type in [self.CONSOLE, self.PLAIN, self.EMAIL]:
- return " "
+ return ' '
if self._output_type == self.HTML:
- return ""
+ return ''
if self._output_type == self.TSV:
- return "\t"
+ return '\t'
def _GetVerticalSeparator(self):
if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]:
- return "\n"
+ return '\n'
if self._output_type == self.HTML:
- return "</tr>\n<tr>"
+ return '</tr>\n<tr>'
def _GetPrefix(self):
if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]:
- return ""
+ return ''
if self._output_type == self.HTML:
return "<p></p><table id=\"box-table-a\">\n<tr>"
def _GetSuffix(self):
if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]:
- return ""
+ return ''
if self._output_type == self.HTML:
- return "</tr>\n</table>"
+ return '</tr>\n</table>'
def _GetStringValue(self):
- o = ""
+ o = ''
o += self._GetPrefix()
for i in range(len(self._table)):
row = self._table[i]
# Apply row color and bgcolor.
- p = s = bgp = bgs = ""
+ p = s = bgp = bgs = ''
if self._row_styles[i].bgcolor:
bgp, bgs = self._GetBGColorFix(self._row_styles[i].bgcolor)
if self._row_styles[i].color:
@@ -1129,13 +1127,10 @@ def GetSimpleTable(table, out_to=TablePrinter.CONSOLE):
will produce a colored table that can be printed to the console.
"""
columns = [
- Column(AmeanResult(),
- Format()),
- Column(AmeanRatioResult(),
- PercentFormat()),
- Column(AmeanRatioResult(),
- ColorBoxFormat()),
- ]
+ Column(AmeanResult(), Format()),
+ Column(AmeanRatioResult(), PercentFormat()),
+ Column(AmeanRatioResult(), ColorBoxFormat()),
+ ]
our_table = [table[0]]
for row in table[1:]:
our_row = [row[0]]
@@ -1166,61 +1161,68 @@ def GetComplexTable(runs, labels, out_to=TablePrinter.CONSOLE):
"""
tg = TableGenerator(runs, labels, TableGenerator.SORT_BY_VALUES_DESC)
table = tg.GetTable()
- columns = [Column(LiteralResult(),
- Format(),
- "Literal"),
- Column(AmeanResult(),
- Format()),
- Column(StdResult(),
- Format()),
- Column(CoeffVarResult(),
- CoeffVarFormat()),
- Column(NonEmptyCountResult(),
- Format()),
- Column(AmeanRatioResult(),
- PercentFormat()),
- Column(AmeanRatioResult(),
- RatioFormat()),
- Column(GmeanRatioResult(),
- RatioFormat()),
- Column(PValueResult(),
- PValueFormat()),
- ]
+ columns = [Column(LiteralResult(), Format(), 'Literal'),
+ Column(AmeanResult(), Format()),
+ Column(StdResult(), Format()),
+ Column(CoeffVarResult(), CoeffVarFormat()),
+ Column(NonEmptyCountResult(), Format()),
+ Column(AmeanRatioResult(), PercentFormat()),
+ Column(AmeanRatioResult(), RatioFormat()),
+ Column(GmeanRatioResult(), RatioFormat()),
+ Column(PValueResult(), PValueFormat())]
tf = TableFormatter(table, columns)
cell_table = tf.GetCellTable()
tp = TablePrinter(cell_table, out_to)
return tp.Print()
-if __name__ == "__main__":
+
+if __name__ == '__main__':
# Run a few small tests here.
- runs = [[{"k1": "10", "k2": "12", "k5": "40", "k6": "40",
- "ms_1": "20", "k7": "FAIL", "k8": "PASS", "k9": "PASS",
- "k10": "0"},
- {"k1": "13", "k2": "14", "k3": "15", "ms_1": "10", "k8": "PASS",
- "k9": "FAIL", "k10": "0"}],
- [{"k1": "50", "k2": "51", "k3": "52", "k4": "53", "k5": "35", "k6":
- "45", "ms_1": "200", "ms_2": "20", "k7": "FAIL", "k8": "PASS", "k9":
- "PASS"}]]
- labels = ["vanilla", "modified"]
+ runs = [[{'k1': '10',
+ 'k2': '12',
+ 'k5': '40',
+ 'k6': '40',
+ 'ms_1': '20',
+ 'k7': 'FAIL',
+ 'k8': 'PASS',
+ 'k9': 'PASS',
+ 'k10': '0'}, {'k1': '13',
+ 'k2': '14',
+ 'k3': '15',
+ 'ms_1': '10',
+ 'k8': 'PASS',
+ 'k9': 'FAIL',
+ 'k10': '0'}], [{'k1': '50',
+ 'k2': '51',
+ 'k3': '52',
+ 'k4': '53',
+ 'k5': '35',
+ 'k6': '45',
+ 'ms_1': '200',
+ 'ms_2': '20',
+ 'k7': 'FAIL',
+ 'k8': 'PASS',
+ 'k9': 'PASS'}]]
+ labels = ['vanilla', 'modified']
t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
print t
email = GetComplexTable(runs, labels, TablePrinter.EMAIL)
- runs = [[{"k1": "1"}, {"k1": "1.1"}, {"k1": "1.2"}],
- [{"k1": "5"}, {"k1": "5.1"}, {"k1": "5.2"}]]
+ runs = [[{'k1': '1'}, {'k1': '1.1'}, {'k1': '1.2'}],
+ [{'k1': '5'}, {'k1': '5.1'}, {'k1': '5.2'}]]
t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
print t
simple_table = [
- ["binary", "b1", "b2", "b3"],
- ["size", 100, 105, 108],
- ["rodata", 100, 80, 70],
- ["data", 100, 100, 100],
- ["debug", 100, 140, 60],
- ]
+ ['binary', 'b1', 'b2', 'b3'],
+ ['size', 100, 105, 108],
+ ['rodata', 100, 80, 70],
+ ['data', 100, 100, 100],
+ ['debug', 100, 140, 60],
+ ]
t = GetSimpleTable(simple_table)
print t
email += GetSimpleTable(simple_table, TablePrinter.HTML)
email_to = [getpass.getuser()]
email = "<pre style='font-size: 13px'>%s</pre>" % email
- EmailSender().SendEmail(email_to, "SimpleTableTest", email, msg_type="html")
+ EmailSender().SendEmail(email_to, 'SimpleTableTest', email, msg_type='html')
diff --git a/utils/tabulator_test.py b/utils/tabulator_test.py
index 16406107..b46fcc1f 100644
--- a/utils/tabulator_test.py
+++ b/utils/tabulator_test.py
@@ -1,8 +1,7 @@
# Copyright 2012 Google Inc. All Rights Reserved.
-
"""Tests for the tabulator module."""
-__author__ = "asharif@google.com (Ahmad Sharif)"
+__author__ = 'asharif@google.com (Ahmad Sharif)'
# System modules
import unittest
@@ -12,12 +11,13 @@ import tabulator
class TabulatorTest(unittest.TestCase):
+
def testResult(self):
- table = ["k1", ["1", "3"], ["55"]]
+ table = ['k1', ['1', '3'], ['55']]
result = tabulator.Result()
cell = tabulator.Cell()
result.Compute(cell, table[2], table[1])
- expected = " ".join([str(float(v)) for v in table[2]])
+ expected = ' '.join([str(float(v)) for v in table[2]])
self.assertTrue(cell.value == expected)
result = tabulator.AmeanResult()
@@ -28,13 +28,13 @@ class TabulatorTest(unittest.TestCase):
def testStringMean(self):
smr = tabulator.StringMeanResult()
cell = tabulator.Cell()
- value = "PASS"
+ value = 'PASS'
values = [value for _ in range(3)]
smr.Compute(cell, values, None)
self.assertTrue(cell.value == value)
- values.append("FAIL")
+ values.append('FAIL')
smr.Compute(cell, values, None)
- self.assertTrue(cell.value == "?")
+ self.assertTrue(cell.value == '?')
def testStorageFormat(self):
sf = tabulator.StorageFormat()
@@ -42,13 +42,13 @@ class TabulatorTest(unittest.TestCase):
base = 1024.0
cell.value = base
sf.Compute(cell)
- self.assertTrue(cell.string_value == "1.0K")
+ self.assertTrue(cell.string_value == '1.0K')
cell.value = base**2
sf.Compute(cell)
- self.assertTrue(cell.string_value == "1.0M")
+ self.assertTrue(cell.string_value == '1.0M')
cell.value = base**3
sf.Compute(cell)
- self.assertTrue(cell.string_value == "1.0G")
+ self.assertTrue(cell.string_value == '1.0G')
def testLerp(self):
c1 = tabulator.Color(0, 0, 0, 0)
@@ -67,53 +67,54 @@ class TabulatorTest(unittest.TestCase):
self.assertTrue(b >= 0.99e+308 and b <= 1.01e+308)
def testTableGenerator(self):
- runs = [[{"k1": "10", "k2": "12"},
- {"k1": "13", "k2": "14", "k3": "15"}],
- [{"k1": "50", "k2": "51", "k3": "52", "k4": "53"}]]
- labels = ["vanilla", "modified"]
+ runs = [[{'k1': '10',
+ 'k2': '12'}, {'k1': '13',
+ 'k2': '14',
+ 'k3': '15'}], [{'k1': '50',
+ 'k2': '51',
+ 'k3': '52',
+ 'k4': '53'}]]
+ labels = ['vanilla', 'modified']
tg = tabulator.TableGenerator(runs, labels)
table = tg.GetTable()
header = table.pop(0)
- self.assertTrue(header == ["keys", "vanilla", "modified"])
+ self.assertTrue(header == ['keys', 'vanilla', 'modified'])
row = table.pop(0)
- self.assertTrue(row == ["k1", ["10", "13"], ["50"]])
+ self.assertTrue(row == ['k1', ['10', '13'], ['50']])
row = table.pop(0)
- self.assertTrue(row == ["k2", ["12", "14"], ["51"]])
+ self.assertTrue(row == ['k2', ['12', '14'], ['51']])
row = table.pop(0)
- self.assertTrue(row == ["k3", [None, "15"], ["52"]])
+ self.assertTrue(row == ['k3', [None, '15'], ['52']])
row = table.pop(0)
- self.assertTrue(row == ["k4", [None, None], ["53"]])
+ self.assertTrue(row == ['k4', [None, None], ['53']])
table = tg.GetTable()
columns = [
- tabulator.Column(tabulator.AmeanResult(),
- tabulator.Format()),
+ tabulator.Column(tabulator.AmeanResult(), tabulator.Format()),
tabulator.Column(tabulator.AmeanRatioResult(),
tabulator.PercentFormat()),
- ]
+ ]
tf = tabulator.TableFormatter(table, columns)
table = tf.GetCellTable()
self.assertTrue(table)
def testColspan(self):
simple_table = [
- ["binary", "b1", "b2", "b3"],
- ["size", 100, 105, 108],
- ["rodata", 100, 80, 70],
- ["data", 100, 100, 100],
- ["debug", 100, 140, 60],
- ]
+ ['binary', 'b1', 'b2', 'b3'],
+ ['size', 100, 105, 108],
+ ['rodata', 100, 80, 70],
+ ['data', 100, 100, 100],
+ ['debug', 100, 140, 60],
+ ]
columns = [
- tabulator.Column(tabulator.AmeanResult(),
- tabulator.Format()),
- tabulator.Column(tabulator.MinResult(),
- tabulator.Format()),
+ tabulator.Column(tabulator.AmeanResult(), tabulator.Format()),
+ tabulator.Column(tabulator.MinResult(), tabulator.Format()),
tabulator.Column(tabulator.AmeanRatioResult(),
tabulator.PercentFormat()),
tabulator.Column(tabulator.AmeanRatioResult(),
tabulator.ColorBoxFormat()),
- ]
+ ]
our_table = [simple_table[0]]
for row in simple_table[1:]:
our_row = [row[0]]
@@ -131,5 +132,6 @@ class TabulatorTest(unittest.TestCase):
for cell in row:
self.assertTrue(cell.colspan == 1)
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/utils/timeline.py b/utils/timeline.py
index aab209b6..8fa73a2d 100644
--- a/utils/timeline.py
+++ b/utils/timeline.py
@@ -1,8 +1,5 @@
-#!/usr/bin/python
-#
# Copyright 2012 Google Inc. All Rights Reserved.
#
-
"""Tools for recording and reporting timeline of benchmark_run."""
__author__ = 'yunlian@google.com (Yunlian Jiang)'
@@ -11,6 +8,7 @@ import time
class Event(object):
+
def __init__(self, name='', cur_time=0):
self.name = name
self.timestamp = cur_time
@@ -30,7 +28,7 @@ class Timeline(object):
self.events.append(cur_event)
def GetEvents(self):
- return([e.name for e in self.events])
+ return ([e.name for e in self.events])
def GetEventDict(self):
tl = {}
diff --git a/utils/timeline_test.py b/utils/timeline_test.py
index 1f4d178a..17482c2b 100644
--- a/utils/timeline_test.py
+++ b/utils/timeline_test.py
@@ -1,5 +1,4 @@
# Copyright 2012 Google Inc. All Rights Reserved.
-
"""Tests for time_line.py."""
__author__ = 'yunlian@google.com (Yunlian Jiang)'
@@ -17,7 +16,7 @@ class TimeLineTest(unittest.TestCase):
tl.Record('A')
t = time.time()
t1 = tl.events[0].timestamp
- self.assertEqual(int(t1-t), 0)
+ self.assertEqual(int(t1 - t), 0)
self.assertRaises(AssertionError, tl.Record, 'A')
def testGetEvents(self):
@@ -34,7 +33,7 @@ class TimeLineTest(unittest.TestCase):
tl.Record('A')
t = time.time()
t1 = tl.GetEventTime('A')
- self.assertEqual(int(t1-t), 0)
+ self.assertEqual(int(t1 - t), 0)
self.assertRaises(IndexError, tl.GetEventTime, 'B')
def testGetLastEventTime(self):
@@ -43,12 +42,13 @@ class TimeLineTest(unittest.TestCase):
tl.Record('A')
t = time.time()
t1 = tl.GetLastEventTime()
- self.assertEqual(int(t1-t), 0)
+ self.assertEqual(int(t1 - t), 0)
time.sleep(2)
tl.Record('B')
t = time.time()
t1 = tl.GetLastEventTime()
- self.assertEqual(int(t1-t), 0)
+ self.assertEqual(int(t1 - t), 0)
+
if __name__ == '__main__':
unittest.main()
diff --git a/weekly_report.py b/weekly_report.py
index 65f74f26..aa53f1d6 100755
--- a/weekly_report.py
+++ b/weekly_report.py
@@ -12,16 +12,15 @@ from utils import constants
from utils import command_executer
WEEKDAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
-DATA_ROOT_DIR = os.path.join(constants.CROSTC_WORKSPACE,
- 'weekly_test_data')
+DATA_ROOT_DIR = os.path.join(constants.CROSTC_WORKSPACE, 'weekly_test_data')
EXPERIMENT_FILE = os.path.join(DATA_ROOT_DIR, 'weekly_report')
-MAIL_PROGRAM = "~/var/bin/mail-sheriff"
+MAIL_PROGRAM = '~/var/bin/mail-sheriff'
def Generate_Vanilla_Report_File(vanilla_image_paths, board, remote,
chromeos_root, cmd_executer):
- experiment_header = """
+ experiment_header = """
name: weekly_vanilla_report
cache_only: True
same_specs: False
@@ -29,42 +28,42 @@ board: %s
remote: %s
""" % (board, remote)
-
- experiment_tests = """
+ experiment_tests = """
benchmark: all_toolchain_perf {
suite: telemetry_Crosperf
iterations: 3
}
"""
- filename = "%s_%s_vanilla.exp" % (EXPERIMENT_FILE, board)
- if os.path.exists(filename):
- cmd = "rm %s" % filename
- cmd_executer.RunCommand(cmd)
+ filename = '%s_%s_vanilla.exp' % (EXPERIMENT_FILE, board)
+ if os.path.exists(filename):
+ cmd = 'rm %s' % filename
+ cmd_executer.RunCommand(cmd)
- with open(filename, "w") as f:
- print >>f, experiment_header
- print >>f, experiment_tests
+ with open(filename, 'w') as f:
+ print >> f, experiment_header
+ print >> f, experiment_tests
- # Add each vanilla image
- for test_path in vanilla_image_paths:
- pieces = test_path.split("/")
- test_name = pieces[-1]
- test_image = """
+ # Add each vanilla image
+ for test_path in vanilla_image_paths:
+ pieces = test_path.split('/')
+ test_name = pieces[-1]
+ test_image = """
%s {
chromeos_root: %s
chromeos_image: %s
}
-""" % (test_name, chromeos_root, os.path.join (test_path,
- "chromiumos_test_image.bin"))
- print >>f, test_image
+""" % (test_name, chromeos_root, os.path.join(test_path,
+ 'chromiumos_test_image.bin'))
+ print >> f, test_image
+
+ return filename
- return filename
def Generate_Test_File(test_image_paths, vanilla_image_path, board, remote,
chromeos_root, cmd_executer):
- experiment_header = """
+ experiment_header = """
name: weekly_report
cache_only: True
same_specs: False
@@ -72,190 +71,182 @@ board: %s
remote: %s
""" % (board, remote)
-
- experiment_tests = """
+ experiment_tests = """
benchmark: all_toolchain_perf {
suite: telemetry_Crosperf
iterations: 3
}
"""
- filename = "%s_%s.exp" % (EXPERIMENT_FILE, board)
- if os.path.exists(filename):
- cmd = "rm %s" % filename
- cmd_executer.RunCommand(cmd)
+ filename = '%s_%s.exp' % (EXPERIMENT_FILE, board)
+ if os.path.exists(filename):
+ cmd = 'rm %s' % filename
+ cmd_executer.RunCommand(cmd)
- with open(filename, "w") as f:
- print >>f, experiment_header
- print >>f, experiment_tests
+ with open(filename, 'w') as f:
+ print >> f, experiment_header
+ print >> f, experiment_tests
- # Add vanilla image (first)
- vanilla_image = """
+ # Add vanilla image (first)
+ vanilla_image = """
%s {
chromeos_root: %s
chromeos_image: %s
}
-""" % (vanilla_image_path.split("/")[-1],
- chromeos_root, os.path.join(vanilla_image_path,
- "chromiumos_test_image.bin"))
+""" % (vanilla_image_path.split('/')[-1], chromeos_root,
+ os.path.join(vanilla_image_path, 'chromiumos_test_image.bin'))
- print >>f, vanilla_image
+ print >> f, vanilla_image
- # Add each test image
- for test_path in test_image_paths:
- pieces = test_path.split("/")
- test_name = pieces[-1]
- test_image = """
+ # Add each test image
+ for test_path in test_image_paths:
+ pieces = test_path.split('/')
+ test_name = pieces[-1]
+ test_image = """
%s {
chromeos_root: %s
chromeos_image: %s
}
-""" % (test_name, chromeos_root, os.path.join (test_path,
- "chromiumos_test_image.bin"))
- print >>f, test_image
-
- return filename
+""" % (test_name, chromeos_root, os.path.join(test_path,
+ 'chromiumos_test_image.bin'))
+ print >> f, test_image
+ return filename
def Main(argv):
- parser = optparse.OptionParser()
- parser.add_option('-b', '--board', dest='board',
- help='Target board.')
- parser.add_option("-r", "--remote", dest="remote",
- help="Target device.")
- parser.add_option("-v", "--vanilla_only", dest="vanilla_only",
- action="store_true",
+ parser = optparse.OptionParser()
+ parser.add_option('-b', '--board', dest='board', help='Target board.')
+ parser.add_option('-r', '--remote', dest='remote', help='Target device.')
+ parser.add_option('-v',
+ '--vanilla_only',
+ dest='vanilla_only',
+ action='store_true',
default=False,
- help="Generate a report comparing only the vanilla images.")
-
- options = parser.parse_args(argv[1:])[0]
-
- if not options.board:
- print "Must specify a board."
- return 1
-
- if not options.remote:
- print "Must specify at least one remote."
- return 1
-
- cmd_executer = command_executer.GetCommandExecuter(log_level="average")
-
- # Find starting index, for cycling through days of week, generating
- # reports starting 6 days ago from today. Generate list of indices for
- # order in which to look at weekdays for report:
- todays_index = datetime.datetime.today().isoweekday()
- indices = []
- start = todays_index + 1
- end = start + 7
- for i in range(start,end):
- indices.append(i % 7)
- # E.g. if today is Sunday, then start report with last Monday, so
- # indices = [1, 2, 3, 4, 5, 6, 0].
-
-
-
- # Find all the test image tar files, untar them and add them to
- # the list. Also find and untar vanilla image tar files, and keep
- # track of the first vanilla image.
- report_image_paths = []
- vanilla_image_paths = []
- first_vanilla_image = None
- for i in indices:
- day = WEEKDAYS[i]
- data_path = os.path.join(DATA_ROOT_DIR, options.board, day)
- if os.path.exists(data_path):
- # First, untar the test image.
- tar_file_name = "%s_test_image.tar" % day
- tar_file_path = os.path.join(data_path, tar_file_name)
- image_dir = "%s_test_image" % day
- image_path = os.path.join(data_path, image_dir)
- if os.path.exists(tar_file_path):
- if not os.path.exists(image_path):
- os.makedirs(image_path)
- cmd = ("cd %s; tar -xvf %s -C %s --strip-components 1" %
- (data_path, tar_file_path, image_path))
- ret = cmd_executer.RunCommand(cmd)
- if not ret:
- report_image_paths.append(image_path)
- # Next, untar the vanilla image.
- vanilla_file = "%s_vanilla_image.tar" % day
- v_file_path = os.path.join(data_path, vanilla_file)
- image_dir = "%s_vanilla_image" % day
- image_path = os.path.join(data_path, image_dir)
- if os.path.exists(v_file_path):
- if not os.path.exists(image_path):
- os.makedirs(image_path)
- cmd = ("cd %s; tar -xvf %s -C %s --strip-components 1" %
- (data_path, v_file_path, image_path))
- ret = cmd_executer.RunCommand(cmd)
- if not ret:
- vanilla_image_paths.append(image_path)
- if not first_vanilla_image:
- first_vanilla_image = image_path
-
- # Find a chroot we can use. Look for a directory containing both
- # an experiment file and a chromeos directory (the experiment file will
- # only be created if both images built successfully, i.e. the chroot is
- # good).
- chromeos_root = None
- timestamp = datetime.datetime.strftime(datetime.datetime.now(),
- "%Y-%m-%d_%H:%M:%S")
- results_dir = os.path.join(os.path.expanduser("~/nightly_test_reports"),
- "%s.%s" % (timestamp,
- options.board),
- "weekly_tests")
-
- for day in WEEKDAYS:
- startdir = os.path.join(constants.CROSTC_WORKSPACE, day)
- num_dirs = os.listdir(startdir)
- for d in num_dirs:
- exp_file = os.path.join(startdir, d, "toolchain_experiment.txt")
- chroot = os.path.join(startdir, d, "chromeos")
- if os.path.exists(chroot) and os.path.exists(exp_file):
- chromeos_root = chroot
- if chromeos_root:
- break;
- if chromeos_root:
- break;
-
- if not chromeos_root:
- print "Unable to locate a usable chroot. Exiting without report."
- return 1
-
-
- # Create the Crosperf experiment file for generating the weekly report.
- if not options.vanilla_only:
- filename = Generate_Test_File (report_image_paths, first_vanilla_image,
- options.board, options.remote,
- chromeos_root, cmd_executer)
- else:
- filename = Generate_Vanilla_Report_File (vanilla_image_paths,
- options.board, options.remote,
- chromeos_root, cmd_executer)
-
- # Run Crosperf on the file to generate the weekly report.
- cmd = ("%s/toolchain-utils/crosperf/crosperf "
- "%s --no_email=True --results_dir=%s" %
- (constants.CROSTC_WORKSPACE, filename, results_dir))
- retval = cmd_executer.RunCommand(cmd)
- if retval == 0:
- # Send the email, if the crosperf command worked.
- filename = os.path.join(results_dir, "msg_body.html")
- if (os.path.exists(filename) and
- os.path.exists(os.path.expanduser(MAIL_PROGRAM))):
- vanilla_string = " "
- if options.vanilla_only:
- vanilla_string = " Vanilla "
- command = ('cat %s | %s -s "Weekly%sReport results, %s" -team -html'
- % (filename, MAIL_PROGRAM, vanilla_string, options.board))
- retval = cmd_executer.RunCommand(command)
-
- return retval
-
+ help='Generate a report comparing only the vanilla images.')
+
+ options = parser.parse_args(argv[1:])[0]
+
+ if not options.board:
+ print 'Must specify a board.'
+ return 1
+
+ if not options.remote:
+ print 'Must specify at least one remote.'
+ return 1
+
+ cmd_executer = command_executer.GetCommandExecuter(log_level='average')
+
+ # Find starting index, for cycling through days of week, generating
+ # reports starting 6 days ago from today. Generate list of indices for
+ # order in which to look at weekdays for report:
+ todays_index = datetime.datetime.today().isoweekday()
+ indices = []
+ start = todays_index + 1
+ end = start + 7
+ for i in range(start, end):
+ indices.append(i % 7)
+ # E.g. if today is Sunday, then start report with last Monday, so
+ # indices = [1, 2, 3, 4, 5, 6, 0].
+
+ # Find all the test image tar files, untar them and add them to
+ # the list. Also find and untar vanilla image tar files, and keep
+ # track of the first vanilla image.
+ report_image_paths = []
+ vanilla_image_paths = []
+ first_vanilla_image = None
+ for i in indices:
+ day = WEEKDAYS[i]
+ data_path = os.path.join(DATA_ROOT_DIR, options.board, day)
+ if os.path.exists(data_path):
+ # First, untar the test image.
+ tar_file_name = '%s_test_image.tar' % day
+ tar_file_path = os.path.join(data_path, tar_file_name)
+ image_dir = '%s_test_image' % day
+ image_path = os.path.join(data_path, image_dir)
+ if os.path.exists(tar_file_path):
+ if not os.path.exists(image_path):
+ os.makedirs(image_path)
+ cmd = ('cd %s; tar -xvf %s -C %s --strip-components 1' %
+ (data_path, tar_file_path, image_path))
+ ret = cmd_executer.RunCommand(cmd)
+ if not ret:
+ report_image_paths.append(image_path)
+ # Next, untar the vanilla image.
+ vanilla_file = '%s_vanilla_image.tar' % day
+ v_file_path = os.path.join(data_path, vanilla_file)
+ image_dir = '%s_vanilla_image' % day
+ image_path = os.path.join(data_path, image_dir)
+ if os.path.exists(v_file_path):
+ if not os.path.exists(image_path):
+ os.makedirs(image_path)
+ cmd = ('cd %s; tar -xvf %s -C %s --strip-components 1' %
+ (data_path, v_file_path, image_path))
+ ret = cmd_executer.RunCommand(cmd)
+ if not ret:
+ vanilla_image_paths.append(image_path)
+ if not first_vanilla_image:
+ first_vanilla_image = image_path
+
+ # Find a chroot we can use. Look for a directory containing both
+ # an experiment file and a chromeos directory (the experiment file will
+ # only be created if both images built successfully, i.e. the chroot is
+ # good).
+ chromeos_root = None
+ timestamp = datetime.datetime.strftime(datetime.datetime.now(),
+ '%Y-%m-%d_%H:%M:%S')
+ results_dir = os.path.join(
+ os.path.expanduser('~/nightly_test_reports'), '%s.%s' % (
+ timestamp, options.board), 'weekly_tests')
+
+ for day in WEEKDAYS:
+ startdir = os.path.join(constants.CROSTC_WORKSPACE, day)
+ num_dirs = os.listdir(startdir)
+ for d in num_dirs:
+ exp_file = os.path.join(startdir, d, 'toolchain_experiment.txt')
+ chroot = os.path.join(startdir, d, 'chromeos')
+ if os.path.exists(chroot) and os.path.exists(exp_file):
+ chromeos_root = chroot
+ if chromeos_root:
+ break
+ if chromeos_root:
+ break
+
+ if not chromeos_root:
+ print 'Unable to locate a usable chroot. Exiting without report.'
+ return 1
+
+ # Create the Crosperf experiment file for generating the weekly report.
+ if not options.vanilla_only:
+ filename = Generate_Test_File(report_image_paths, first_vanilla_image,
+ options.board, options.remote, chromeos_root,
+ cmd_executer)
+ else:
+ filename = Generate_Vanilla_Report_File(vanilla_image_paths, options.board,
+ options.remote, chromeos_root,
+ cmd_executer)
+
+ # Run Crosperf on the file to generate the weekly report.
+ cmd = ('%s/toolchain-utils/crosperf/crosperf '
+ '%s --no_email=True --results_dir=%s' %
+ (constants.CROSTC_WORKSPACE, filename, results_dir))
+ retval = cmd_executer.RunCommand(cmd)
+ if retval == 0:
+ # Send the email, if the crosperf command worked.
+ filename = os.path.join(results_dir, 'msg_body.html')
+ if (os.path.exists(filename) and
+ os.path.exists(os.path.expanduser(MAIL_PROGRAM))):
+ vanilla_string = ' '
+ if options.vanilla_only:
+ vanilla_string = ' Vanilla '
+ command = ('cat %s | %s -s "Weekly%sReport results, %s" -team -html' %
+ (filename, MAIL_PROGRAM, vanilla_string, options.board))
+ retval = cmd_executer.RunCommand(command)
+
+ return retval
if __name__ == '__main__':
- retval = Main(sys.argv)
- sys.exit(retval)
+ retval = Main(sys.argv)
+ sys.exit(retval)