aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorLuis Lozano <llozano@chromium.org>2015-12-15 13:49:30 -0800
committerLuis Lozano <llozano@chromium.org>2015-12-16 17:36:06 +0000
commitf2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe (patch)
tree185d243c7eed7c7a0db6f0e640746cadc1479ea9 /crosperf
parent2a66f70fef907c1cb15229cb58e5129cb620ac98 (diff)
downloadtoolchain-utils-f2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe.tar.gz
Run pyformat on all the toolchain-utils files.
This gets rid of a lot of lint issues. Ran by doing this: for f in *.py; do echo -n "$f " ; if [ -x $f ]; then pyformat -i --remove_trailing_comma --yapf --force_quote_type=double $f ; else pyformat -i --remove_shebang --remove_trailing_comma --yapf --force_quote_type=double $f ; fi ; done BUG=chromium:567921 TEST=Ran simple crosperf run. Change-Id: I59778835fdaa5f706d2e1765924389f9e97433d1 Reviewed-on: https://chrome-internal-review.googlesource.com/242031 Reviewed-by: Luis Lozano <llozano@chromium.org> Commit-Queue: Luis Lozano <llozano@chromium.org> Tested-by: Luis Lozano <llozano@chromium.org> Reviewed-by: Yunlian Jiang <yunlian@google.com>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark.py19
-rw-r--r--crosperf/benchmark_run.py183
-rwxr-xr-xcrosperf/benchmark_run_unittest.py352
-rwxr-xr-xcrosperf/benchmark_unittest.py49
-rw-r--r--crosperf/column_chart.py16
-rw-r--r--crosperf/compare_machines.py28
-rw-r--r--crosperf/config.py1
-rwxr-xr-xcrosperf/config_unittest.py1
-rwxr-xr-xcrosperf/crosperf.py62
-rwxr-xr-xcrosperf/crosperf_test.py6
-rwxr-xr-xcrosperf/crosperf_unittest.py28
-rw-r--r--crosperf/download_images.py61
-rwxr-xr-xcrosperf/download_images_buildid_test.py78
-rwxr-xr-xcrosperf/download_images_unittest.py78
-rw-r--r--crosperf/experiment.py52
-rw-r--r--crosperf/experiment_factory.py186
-rwxr-xr-xcrosperf/experiment_factory_unittest.py172
-rw-r--r--crosperf/experiment_file.py57
-rwxr-xr-xcrosperf/experiment_file_unittest.py43
-rw-r--r--crosperf/experiment_runner.py81
-rwxr-xr-xcrosperf/experiment_runner_unittest.py125
-rw-r--r--crosperf/experiment_status.py64
-rw-r--r--crosperf/field.py68
-rwxr-xr-xcrosperf/flag_test_unittest.py2
-rw-r--r--crosperf/help.py28
-rw-r--r--crosperf/image_checksummer.py32
-rw-r--r--crosperf/label.py77
-rw-r--r--crosperf/machine_image_manager.py273
-rwxr-xr-xcrosperf/machine_image_manager_unittest.py485
-rw-r--r--crosperf/machine_manager.py227
-rwxr-xr-xcrosperf/machine_manager_unittest.py344
-rw-r--r--crosperf/mock_instance.py175
-rw-r--r--crosperf/perf_table.py15
-rw-r--r--crosperf/results_cache.py322
-rwxr-xr-xcrosperf/results_cache_unittest.py480
-rw-r--r--crosperf/results_organizer.py23
-rwxr-xr-xcrosperf/results_organizer_unittest.py154
-rw-r--r--crosperf/results_report.py218
-rw-r--r--crosperf/results_sorter.py4
-rw-r--r--crosperf/schedv2.py605
-rwxr-xr-xcrosperf/schedv2_unittest.py283
-rw-r--r--crosperf/settings.py19
-rw-r--r--crosperf/settings_factory.py315
-rwxr-xr-xcrosperf/settings_factory_unittest.py27
-rwxr-xr-xcrosperf/settings_unittest.py109
-rw-r--r--crosperf/suite_runner.py245
-rwxr-xr-xcrosperf/suite_runner_unittest.py181
-rw-r--r--crosperf/test_flag.py4
-rw-r--r--crosperf/translate_xbuddy.py10
49 files changed, 3266 insertions, 3201 deletions
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index 7fabf0b4..91e0f7c8 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -15,9 +14,17 @@ class Benchmark(object):
arguments.
"""
- def __init__(self, name, test_name, test_args, iterations,
- rm_chroot_tmp, perf_args, suite="",
- show_all_results=False, retries=0, run_local=False):
+ def __init__(self,
+ name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite='',
+ show_all_results=False,
+ retries=0,
+ run_local=False):
self.name = name
#For telemetry, this is the benchmark name.
self.test_name = test_name
@@ -30,8 +37,8 @@ class Benchmark(object):
self.suite = suite
self.show_all_results = show_all_results
self.retries = retries
- if self.suite == "telemetry":
+ if self.suite == 'telemetry':
self.show_all_results = True
if run_local and self.suite != 'telemetry_Crosperf':
- raise Exception("run_local is only supported by telemetry_Crosperf.")
+ raise Exception('run_local is only supported by telemetry_Crosperf.')
self.run_local = run_local
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index f10326b0..fb3d6f33 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -20,23 +19,18 @@ from results_cache import Result
from results_cache import ResultsCache
from results_cache import TelemetryResult
+STATUS_FAILED = 'FAILED'
+STATUS_SUCCEEDED = 'SUCCEEDED'
+STATUS_IMAGING = 'IMAGING'
+STATUS_RUNNING = 'RUNNING'
+STATUS_WAITING = 'WAITING'
+STATUS_PENDING = 'PENDING'
-STATUS_FAILED = "FAILED"
-STATUS_SUCCEEDED = "SUCCEEDED"
-STATUS_IMAGING = "IMAGING"
-STATUS_RUNNING = "RUNNING"
-STATUS_WAITING = "WAITING"
-STATUS_PENDING = "PENDING"
class BenchmarkRun(threading.Thread):
- def __init__(self, name, benchmark,
- label,
- iteration,
- cache_conditions,
- machine_manager,
- logger_to_use,
- log_level,
- share_cache):
+
+ def __init__(self, name, benchmark, label, iteration, cache_conditions,
+ machine_manager, logger_to_use, log_level, share_cache):
threading.Thread.__init__(self)
self.name = name
self._logger = logger_to_use
@@ -54,7 +48,7 @@ class BenchmarkRun(threading.Thread):
self.cache_conditions = cache_conditions
self.runs_complete = 0
self.cache_hit = False
- self.failure_reason = ""
+ self.failure_reason = ''
self.test_args = benchmark.test_args
self.profiler_args = self._GetExtraAutotestArgs()
self._ce = command_executer.GetCommandExecuter(self._logger,
@@ -71,24 +65,13 @@ class BenchmarkRun(threading.Thread):
# Just use the first machine for running the cached version,
# without locking it.
self.cache = ResultsCache()
- self.cache.Init(self.label.chromeos_image,
- self.label.chromeos_root,
- self.benchmark.test_name,
- self.iteration,
- self.test_args,
- self.profiler_args,
- self.machine_manager,
- self.machine,
- self.label.board,
- self.cache_conditions,
- self._logger,
- self.log_level,
- self.label,
- self.share_cache,
- self.benchmark.suite,
- self.benchmark.show_all_results,
- self.benchmark.run_local
- )
+ self.cache.Init(self.label.chromeos_image, self.label.chromeos_root,
+ self.benchmark.test_name, self.iteration, self.test_args,
+ self.profiler_args, self.machine_manager, self.machine,
+ self.label.board, self.cache_conditions, self._logger,
+ self.log_level, self.label, self.share_cache,
+ self.benchmark.suite, self.benchmark.show_all_results,
+ self.benchmark.run_local)
self.result = self.cache.ReadResult()
self.cache_hit = (self.result is not None)
@@ -100,24 +83,22 @@ class BenchmarkRun(threading.Thread):
self.ReadCache()
if self.result:
- self._logger.LogOutput("%s: Cache hit." % self.name)
+ self._logger.LogOutput('%s: Cache hit.' % self.name)
self._logger.LogOutput(self.result.out, print_to_console=False)
self._logger.LogError(self.result.err, print_to_console=False)
elif self.label.cache_only:
- self._logger.LogOutput("%s: No cache hit." % self.name)
- output = "%s: No Cache hit." % self.name
+ self._logger.LogOutput('%s: No cache hit.' % self.name)
+ output = '%s: No Cache hit.' % self.name
retval = 1
- err = "No cache hit."
- self.result = Result.CreateFromRun(self._logger, self.log_level,
- self.label, self.machine,
- output, err, retval,
- self.benchmark.show_all_results,
- self.benchmark.test_name,
- self.benchmark.suite)
+ err = 'No cache hit.'
+ self.result = Result.CreateFromRun(
+ self._logger, self.log_level, self.label, self.machine, output, err,
+ retval, self.benchmark.show_all_results, self.benchmark.test_name,
+ self.benchmark.suite)
else:
- self._logger.LogOutput("%s: No cache hit." % self.name)
+ self._logger.LogOutput('%s: No cache hit.' % self.name)
self.timeline.Record(STATUS_WAITING)
# Try to acquire a machine now.
self.machine = self.AcquireMachine()
@@ -131,7 +112,7 @@ class BenchmarkRun(threading.Thread):
if self.machine and not self.label.chrome_version:
self.label.chrome_version = self.machine_manager.GetChromeVersion(
- self.machine)
+ self.machine)
if self.terminated:
return
@@ -140,7 +121,7 @@ class BenchmarkRun(threading.Thread):
self.timeline.Record(STATUS_SUCCEEDED)
else:
if self.timeline.GetLastEvent() != STATUS_FAILED:
- self.failure_reason = "Return value of test suite was non-zero."
+ self.failure_reason = 'Return value of test suite was non-zero.'
self.timeline.Record(STATUS_FAILED)
except Exception, e:
@@ -155,19 +136,19 @@ class BenchmarkRun(threading.Thread):
pass
elif self.machine:
if not self.machine.IsReachable():
- self._logger.LogOutput("Machine %s is not reachable, removing it."
- % self.machine.name)
+ self._logger.LogOutput('Machine %s is not reachable, removing it.' %
+ self.machine.name)
self.machine_manager.RemoveMachine(self.machine.name)
- self._logger.LogOutput("Releasing machine: %s" % self.machine.name)
+ self._logger.LogOutput('Releasing machine: %s' % self.machine.name)
self.machine_manager.ReleaseMachine(self.machine)
- self._logger.LogOutput("Released machine: %s" % self.machine.name)
+ self._logger.LogOutput('Released machine: %s' % self.machine.name)
def Terminate(self):
self.terminated = True
self.suite_runner.Terminate()
if self.timeline.GetLastEvent() != STATUS_FAILED:
self.timeline.Record(STATUS_FAILED)
- self.failure_reason = "Thread terminated."
+ self.failure_reason = 'Thread terminated.'
def AcquireMachine(self):
if self.owner_thread is not None:
@@ -177,14 +158,13 @@ class BenchmarkRun(threading.Thread):
while True:
machine = None
if self.terminated:
- raise Exception("Thread terminated while trying to acquire machine.")
+ raise Exception('Thread terminated while trying to acquire machine.')
machine = self.machine_manager.AcquireMachine(self.label)
if machine:
- self._logger.LogOutput("%s: Machine %s acquired at %s" %
- (self.name,
- machine.name,
+ self._logger.LogOutput('%s: Machine %s acquired at %s' %
+ (self.name, machine.name,
datetime.datetime.now()))
break
else:
@@ -193,26 +173,25 @@ class BenchmarkRun(threading.Thread):
return machine
def _GetExtraAutotestArgs(self):
- if self.benchmark.perf_args and self.benchmark.suite == "telemetry":
- self._logger.LogError("Telemetry does not support profiler.")
- self.benchmark.perf_args = ""
+ if self.benchmark.perf_args and self.benchmark.suite == 'telemetry':
+ self._logger.LogError('Telemetry does not support profiler.')
+ self.benchmark.perf_args = ''
- if self.benchmark.perf_args and self.benchmark.suite == "test_that":
- self._logger.LogError("test_that does not support profiler.")
- self.benchmark.perf_args = ""
+ if self.benchmark.perf_args and self.benchmark.suite == 'test_that':
+ self._logger.LogError('test_that does not support profiler.')
+ self.benchmark.perf_args = ''
if self.benchmark.perf_args:
- perf_args_list = self.benchmark.perf_args.split(" ")
- perf_args_list = [perf_args_list[0]] + ["-a"] + perf_args_list[1:]
- perf_args = " ".join(perf_args_list)
- if not perf_args_list[0] in ["record", "stat"]:
- raise Exception("perf_args must start with either record or stat")
- extra_test_args = ["--profiler=custom_perf",
- ("--profiler_args='perf_options=\"%s\"'" %
- perf_args)]
- return " ".join(extra_test_args)
+ perf_args_list = self.benchmark.perf_args.split(' ')
+ perf_args_list = [perf_args_list[0]] + ['-a'] + perf_args_list[1:]
+ perf_args = ' '.join(perf_args_list)
+ if not perf_args_list[0] in ['record', 'stat']:
+ raise Exception('perf_args must start with either record or stat')
+ extra_test_args = ['--profiler=custom_perf',
+ ("--profiler_args='perf_options=\"%s\"'" % perf_args)]
+ return ' '.join(extra_test_args)
else:
- return ""
+ return ''
def RunTest(self, machine):
self.timeline.Record(STATUS_IMAGING)
@@ -221,25 +200,16 @@ class BenchmarkRun(threading.Thread):
# guarenteed.
pass
else:
- self.machine_manager.ImageMachine(machine,
- self.label)
+ self.machine_manager.ImageMachine(machine, self.label)
self.timeline.Record(STATUS_RUNNING)
- [retval, out, err] = self.suite_runner.Run(machine.name,
- self.label,
- self.benchmark,
- self.test_args,
- self.profiler_args)
+ [retval, out, err] = self.suite_runner.Run(machine.name, self.label,
+ self.benchmark, self.test_args,
+ self.profiler_args)
self.run_completed = True
- return Result.CreateFromRun(self._logger,
- self.log_level,
- self.label,
- self.machine,
- out,
- err,
- retval,
+ return Result.CreateFromRun(self._logger, self.log_level, self.label,
+ self.machine, out, err, retval,
self.benchmark.show_all_results,
- self.benchmark.test_name,
- self.benchmark.suite)
+ self.benchmark.test_name, self.benchmark.suite)
def SetCacheConditions(self, cache_conditions):
self.cache_conditions = cache_conditions
@@ -257,42 +227,27 @@ class MockBenchmarkRun(BenchmarkRun):
# Just use the first machine for running the cached version,
# without locking it.
self.cache = MockResultsCache()
- self.cache.Init(self.label.chromeos_image,
- self.label.chromeos_root,
- self.benchmark.test_name,
- self.iteration,
- self.test_args,
- self.profiler_args,
- self.machine_manager,
- self.machine,
- self.label.board,
- self.cache_conditions,
- self._logger,
- self.log_level,
- self.label,
- self.share_cache,
- self.benchmark.suite,
- self.benchmark.show_all_results,
- self.benchmark.run_local
- )
+ self.cache.Init(self.label.chromeos_image, self.label.chromeos_root,
+ self.benchmark.test_name, self.iteration, self.test_args,
+ self.profiler_args, self.machine_manager, self.machine,
+ self.label.board, self.cache_conditions, self._logger,
+ self.log_level, self.label, self.share_cache,
+ self.benchmark.suite, self.benchmark.show_all_results,
+ self.benchmark.run_local)
self.result = self.cache.ReadResult()
self.cache_hit = (self.result is not None)
-
def RunTest(self, machine):
"""Remove Result.CreateFromRun for testing."""
self.timeline.Record(STATUS_IMAGING)
- self.machine_manager.ImageMachine(machine,
- self.label)
+ self.machine_manager.ImageMachine(machine, self.label)
self.timeline.Record(STATUS_RUNNING)
- [retval, out, err] = self.suite_runner.Run(machine.name,
- self.label,
- self.benchmark,
- self.test_args,
+ [retval, out, err] = self.suite_runner.Run(machine.name, self.label,
+ self.benchmark, self.test_args,
self.profiler_args)
self.run_completed = True
- rr = MockResult("logger", self.label, self.log_level, machine)
+ rr = MockResult('logger', self.label, self.log_level, machine)
rr.out = out
rr.err = err
rr.retval = retval
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 920b7d71..744f89c1 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -3,7 +3,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Testing of benchmark_run."""
import mock
@@ -28,57 +27,60 @@ from results_cache import ResultsCache
class BenchmarkRunTest(unittest.TestCase):
- """
- Unit tests for the BenchmarkRun class and all of its methods.
+ """Unit tests for the BenchmarkRun class and all of its methods.
"""
def setUp(self):
- self.test_benchmark = Benchmark("page_cycler.netsim.top_10", # name
- "page_cycler.netsim.top_10", # test_name
- "", # test_args
+ self.test_benchmark = Benchmark('page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
1, # iterations
False, # rm_chroot_tmp
- "", # perf_args
- suite="telemetry_Crosperf") # suite
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
- self.test_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos2-row1-rack4-host9.cros",
- image_args="", cache_dir="", cache_only=False,
- log_level="average", compiler="gcc")
+ self.test_label = MockLabel('test1',
+ 'image1',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos2-row1-rack4-host9.cros',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+ self.test_cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
+ CacheConditions.CHECKSUMS_MATCH]
- self.test_cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
- CacheConditions.CHECKSUMS_MATCH]
-
- self.mock_logger = logger.GetLogger(log_dir="", mock=True)
+ self.mock_logger = logger.GetLogger(log_dir='', mock=True)
self.mock_machine_manager = mock.Mock(spec=MachineManager)
def testDryRun(self):
- my_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos2-row1-rack4-host9.cros",
- image_args="", cache_dir="", cache_only=False,
- log_level="average", compiler="gcc")
-
- logging_level = "average"
- m = MockMachineManager("/tmp/chromeos_root", 0, logging_level)
- m.AddMachine("chromeos2-row1-rack4-host9.cros")
- bench = Benchmark("page_cycler.netsim.top_10", # name
- "page_cycler.netsim.top_10", # test_name
- "", # test_args
+ my_label = MockLabel('test1',
+ 'image1',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos2-row1-rack4-host9.cros',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+ logging_level = 'average'
+ m = MockMachineManager('/tmp/chromeos_root', 0, logging_level)
+ m.AddMachine('chromeos2-row1-rack4-host9.cros')
+ bench = Benchmark('page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
1, # iterations
False, # rm_chroot_tmp
- "", # perf_args
- suite="telemetry_Crosperf") # suite
- b = benchmark_run.MockBenchmarkRun("test run",
- bench,
- my_label,
- 1,
- [],
- m,
- logger.GetLogger(),
- logging_level,
- "")
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
+ b = benchmark_run.MockBenchmarkRun('test run', bench, my_label, 1, [], m,
+ logger.GetLogger(), logging_level, '')
b.cache = MockResultsCache()
b.suite_runner = MockSuiteRunner()
b.start()
@@ -90,8 +92,7 @@ class BenchmarkRunTest(unittest.TestCase):
'log_level', 'share_cache']
arg_spec = inspect.getargspec(benchmark_run.BenchmarkRun.__init__)
self.assertEqual(len(arg_spec.args), len(args_list))
- self.assertEqual (arg_spec.args, args_list)
-
+ self.assertEqual(arg_spec.args, args_list)
def test_init(self):
# Nothing really worth testing here; just field assignments.
@@ -102,64 +103,63 @@ class BenchmarkRunTest(unittest.TestCase):
pass
def test_run(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogOutput(msg, print_to_console=False):
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.log_output.append(msg)
def MockLogError(msg, print_to_console=False):
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.log_error.append(msg)
def MockRecordStatus(msg):
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.status.append(msg)
def FakeReadCache():
- "Helper function for test_run."
+ 'Helper function for test_run.'
br.cache = mock.Mock(spec=ResultsCache)
self.called_ReadCache = True
return 0
def FakeReadCacheSucceed():
- "Helper function for test_run."
+ 'Helper function for test_run.'
br.cache = mock.Mock(spec=ResultsCache)
br.result = mock.Mock(spec=Result)
- br.result.out = "result.out stuff"
- br.result.err = "result.err stuff"
+ br.result.out = 'result.out stuff'
+ br.result.err = 'result.err stuff'
br.result.retval = 0
self.called_ReadCache = True
return 0
def FakeReadCacheException():
- "Helper function for test_run."
- raise Exception("This is an exception test; it is supposed to happen")
+ 'Helper function for test_run.'
+ raise Exception('This is an exception test; it is supposed to happen')
def FakeAcquireMachine():
- "Helper function for test_run."
- mock_machine = MockCrosMachine ('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
+ 'Helper function for test_run.'
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
return mock_machine
def FakeRunTest(_machine):
- "Helper function for test_run."
+ 'Helper function for test_run.'
mock_result = mock.Mock(spec=Result)
mock_result.retval = 0
return mock_result
def FakeRunTestFail(_machine):
- "Helper function for test_run."
+ 'Helper function for test_run.'
mock_result = mock.Mock(spec=Result)
mock_result.retval = 1
return mock_result
def ResetTestValues():
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.log_output = []
self.log_error = []
self.status = []
@@ -177,53 +177,52 @@ class BenchmarkRunTest(unittest.TestCase):
# First test: No cache hit, all goes well.
ResetTestValues()
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (len(self.log_error), 0)
- self.assertEqual (self.status, ['WAITING', 'SUCCEEDED'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING', 'SUCCEEDED'])
# Second test: No cached result found; test run was "terminated" for some
# reason.
ResetTestValues()
br.terminated = True
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (len(self.log_error), 0)
- self.assertEqual (self.status, ['WAITING'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING'])
# Third test. No cached result found; RunTest failed for some reason.
ResetTestValues()
br.terminated = False
br.RunTest = FakeRunTestFail
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (len(self.log_error), 0)
- self.assertEqual (self.status, ['WAITING', 'FAILED'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING', 'FAILED'])
# Fourth test: ReadCache found a cached result.
ResetTestValues()
br.RunTest = FakeRunTest
br.ReadCache = FakeReadCacheSucceed
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: Cache hit.',
- 'result.out stuff',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (self.log_error, ['result.err stuff'])
- self.assertEqual (self.status, ['SUCCEEDED'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: Cache hit.', 'result.out stuff',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(self.log_error, ['result.err stuff'])
+ self.assertEqual(self.status, ['SUCCEEDED'])
# Fifth test: ReadCache generates an exception; does the try/finally block
# work?
@@ -231,183 +230,172 @@ class BenchmarkRunTest(unittest.TestCase):
br.ReadCache = FakeReadCacheException
br.machine = FakeAcquireMachine()
br.run()
- self.assertEqual (self.log_error,
- ["Benchmark run: 'test_run' failed: This is an exception test; it is supposed to happen"])
- self.assertEqual (self.status, ['FAILED'])
-
+ self.assertEqual(self.log_error, [
+ "Benchmark run: 'test_run' failed: This is an exception test; it is "
+ "supposed to happen"
+ ])
+ self.assertEqual(self.status, ['FAILED'])
def test_terminate_pass(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventPassed():
- "Helper function for test_terminate_pass"
+ 'Helper function for test_terminate_pass'
return benchmark_run.STATUS_SUCCEEDED
def RecordStub(status):
- "Helper function for test_terminate_pass"
+ 'Helper function for test_terminate_pass'
self.status = status
self.status = benchmark_run.STATUS_SUCCEEDED
- self.assertFalse (br.terminated)
- self.assertFalse (br.suite_runner._ct.IsTerminated())
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner._ct.IsTerminated())
br.timeline.GetLastEvent = GetLastEventPassed
br.timeline.Record = RecordStub
br.Terminate()
- self.assertTrue (br.terminated)
- self.assertTrue (br.suite_runner._ct.IsTerminated())
- self.assertEqual (self.status, benchmark_run.STATUS_FAILED)
-
-
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner._ct.IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
def test_terminate_fail(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventFailed():
- "Helper function for test_terminate_fail"
+ 'Helper function for test_terminate_fail'
return benchmark_run.STATUS_FAILED
def RecordStub(status):
- "Helper function for test_terminate_fail"
+ 'Helper function for test_terminate_fail'
self.status = status
self.status = benchmark_run.STATUS_SUCCEEDED
- self.assertFalse (br.terminated)
- self.assertFalse (br.suite_runner._ct.IsTerminated())
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner._ct.IsTerminated())
br.timeline.GetLastEvent = GetLastEventFailed
br.timeline.Record = RecordStub
br.Terminate()
- self.assertTrue (br.terminated)
- self.assertTrue (br.suite_runner._ct.IsTerminated())
- self.assertEqual (self.status, benchmark_run.STATUS_SUCCEEDED)
-
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner._ct.IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
def test_acquire_machine(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
-
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
br.terminated = True
- self.assertRaises (Exception, br.AcquireMachine)
+ self.assertRaises(Exception, br.AcquireMachine)
br.terminated = False
- mock_machine = MockCrosMachine ('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
self.mock_machine_manager.AcquireMachine.return_value = mock_machine
machine = br.AcquireMachine()
- self.assertEqual (machine.name, 'chromeos1-row3-rack5-host7.cros')
-
+ self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
def test_get_extra_autotest_args(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogError(err_msg):
- "Helper function for test_get_extra_autotest_args"
+ 'Helper function for test_get_extra_autotest_args'
self.err_msg = err_msg
self.mock_logger.LogError = MockLogError
result = br._GetExtraAutotestArgs()
- self.assertEqual(result, "")
+ self.assertEqual(result, '')
- self.test_benchmark.perf_args = "record -e cycles"
+ self.test_benchmark.perf_args = 'record -e cycles'
result = br._GetExtraAutotestArgs()
- self.assertEqual(result,
-"--profiler=custom_perf --profiler_args='perf_options=\"record -a -e cycles\"'")
+ self.assertEqual(
+ result,
+ "--profiler=custom_perf --profiler_args='perf_options=\"record -a -e "
+ "cycles\"'")
- self.test_benchmark.suite = "telemetry"
+ self.test_benchmark.suite = 'telemetry'
result = br._GetExtraAutotestArgs()
- self.assertEqual(result, "")
- self.assertEqual(self.err_msg, "Telemetry does not support profiler.")
+ self.assertEqual(result, '')
+ self.assertEqual(self.err_msg, 'Telemetry does not support profiler.')
- self.test_benchmark.perf_args = "record -e cycles"
- self.test_benchmark.suite = "test_that"
+ self.test_benchmark.perf_args = 'record -e cycles'
+ self.test_benchmark.suite = 'test_that'
result = br._GetExtraAutotestArgs()
- self.assertEqual(result, "")
- self.assertEqual(self.err_msg, "test_that does not support profiler.")
+ self.assertEqual(result, '')
+ self.assertEqual(self.err_msg, 'test_that does not support profiler.')
- self.test_benchmark.perf_args = "junk args"
- self.test_benchmark.suite = "telemetry_Crosperf"
+ self.test_benchmark.perf_args = 'junk args'
+ self.test_benchmark.suite = 'telemetry_Crosperf'
self.assertRaises(Exception, br._GetExtraAutotestArgs)
-
@mock.patch.object(SuiteRunner, 'Run')
@mock.patch.object(Result, 'CreateFromRun')
def test_run_test(self, mock_result, mock_runner):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
self.status = []
+
def MockRecord(status):
self.status.append(status)
br.timeline.Record = MockRecord
- mock_machine = MockCrosMachine ('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
- mock_runner.return_value = [0, "{'Score':100}", ""]
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
+ mock_runner.return_value = [0, "{'Score':100}", '']
br.RunTest(mock_machine)
self.assertTrue(br.run_completed)
- self.assertEqual (self.status, [ benchmark_run.STATUS_IMAGING,
- benchmark_run.STATUS_RUNNING])
-
- self.assertEqual (br.machine_manager.ImageMachine.call_count, 1)
- br.machine_manager.ImageMachine.assert_called_with (mock_machine,
- self.test_label)
- self.assertEqual (mock_runner.call_count, 1)
- mock_runner.assert_called_with (mock_machine.name, br.label,
- br.benchmark, "", br.profiler_args)
-
- self.assertEqual (mock_result.call_count, 1)
- mock_result.assert_called_with (self.mock_logger, 'average',
- self.test_label, None, "{'Score':100}",
- "", 0, False, 'page_cycler.netsim.top_10',
- 'telemetry_Crosperf')
+ self.assertEqual(self.status, [benchmark_run.STATUS_IMAGING,
+ benchmark_run.STATUS_RUNNING])
+ self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
+ br.machine_manager.ImageMachine.assert_called_with(mock_machine,
+ self.test_label)
+ self.assertEqual(mock_runner.call_count, 1)
+ mock_runner.assert_called_with(mock_machine.name, br.label, br.benchmark,
+ '', br.profiler_args)
+ self.assertEqual(mock_result.call_count, 1)
+ mock_result.assert_called_with(
+ self.mock_logger, 'average', self.test_label, None, "{'Score':100}", '',
+ 0, False, 'page_cycler.netsim.top_10', 'telemetry_Crosperf')
def test_set_cache_conditions(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
- phony_cache_conditions = [ 123, 456, True, False ]
+ phony_cache_conditions = [123, 456, True, False]
self.assertEqual(br.cache_conditions, self.test_cache_conditions)
- br.SetCacheConditions (phony_cache_conditions)
+ br.SetCacheConditions(phony_cache_conditions)
self.assertEqual(br.cache_conditions, phony_cache_conditions)
br.SetCacheConditions(self.test_cache_conditions)
self.assertEqual(br.cache_conditions, self.test_cache_conditions)
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py
index 469ac0e5..32fb721e 100755
--- a/crosperf/benchmark_unittest.py
+++ b/crosperf/benchmark_unittest.py
@@ -7,39 +7,40 @@ from benchmark import Benchmark
import unittest
+
class BenchmarkTestCase(unittest.TestCase):
def test_benchmark(self):
# Test creating a benchmark with all the fields filled out.
- b1 = Benchmark("b1_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles", # perf_args
- "telemetry_Crosperf", # suite
- True) # show_all_results
+ b1 = Benchmark('b1_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry_Crosperf', # suite
+ True) # show_all_results
# Test creating a benchmark field with default fields left out.
- b2 = Benchmark("b2_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles") # perf_args
- self.assertEqual(b2.suite, "")
+ b2 = Benchmark('b2_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles') # perf_args
+ self.assertEqual(b2.suite, '')
self.assertFalse(b2.show_all_results)
# Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
# and see what happens.
- b3 = Benchmark("b3_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles", # perf_args
- "telemetry", # suite
- False) # show_all_results
+ b3 = Benchmark('b3_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry', # suite
+ False) # show_all_results
self.assertTrue(b3.show_all_results)
# Check to see if the args to Benchmark have changed since the last time
@@ -50,7 +51,7 @@ class BenchmarkTestCase(unittest.TestCase):
arg_spec = inspect.getargspec(Benchmark.__init__)
self.assertEqual(len(arg_spec.args), len(args_list))
for arg in args_list:
- self.assertIn (arg, arg_spec.args)
+ self.assertIn(arg, arg_spec.args)
if __name__ == '__main__':
diff --git a/crosperf/column_chart.py b/crosperf/column_chart.py
index 3be0f19a..7e6821d0 100644
--- a/crosperf/column_chart.py
+++ b/crosperf/column_chart.py
@@ -1,9 +1,10 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Module to draw column chart."""
+
class ColumnChart(object):
"""class to draw column chart."""
+
def __init__(self, title, width, height):
self.title = title
self.chart_div = filter(str.isalnum, title)
@@ -26,18 +27,18 @@ class ColumnChart(object):
self.rows.append(row)
def GetJavascript(self):
- res = "var data = new google.visualization.DataTable();\n"
+ res = 'var data = new google.visualization.DataTable();\n'
for column in self.columns:
res += "data.addColumn('%s', '%s');\n" % column
- res += "data.addRows(%s);\n" % len(self.rows)
+ res += 'data.addRows(%s);\n' % len(self.rows)
for row in range(len(self.rows)):
for column in range(len(self.columns)):
val = self.rows[row][column]
if isinstance(val, str):
val = "'%s'" % val
- res += "data.setValue(%s, %s, %s);\n" % (row, column, val)
+ res += 'data.setValue(%s, %s, %s);\n' % (row, column, val)
- series_javascript = ""
+ series_javascript = ''
for series in self.series:
series_javascript += "%s: {type: '%s', color: '%s'}, " % series
@@ -48,9 +49,10 @@ chart_%s.draw(data, {width: %s, height: %s, title: '%s', legend: 'none',
seriesType: "bars", lineWidth: 0, pointSize: 5, series: {%s},
vAxis: {minValue: 0}})
"""
+
res += chart_add_javascript % (self.chart_div, self.chart_div,
- self.chart_div, self.width,
- self.height, self.title, series_javascript)
+ self.chart_div, self.width, self.height,
+ self.title, series_javascript)
return res
def GetDiv(self):
diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py
index f04fa2ed..0a61eeb9 100644
--- a/crosperf/compare_machines.py
+++ b/crosperf/compare_machines.py
@@ -1,7 +1,6 @@
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Module to compare two machines."""
from __future__ import print_function
@@ -12,30 +11,31 @@ import argparse
from machine_manager import CrosMachine
+
def PrintUsage(msg):
print(msg)
- print("Usage: ")
- print("\n compare_machines.py --chromeos_root=/path/to/chroot/ "
- "machine1 machine2 ...")
+ print('Usage: ')
+ print('\n compare_machines.py --chromeos_root=/path/to/chroot/ '
+ 'machine1 machine2 ...')
def Main(argv):
parser = argparse.ArgumentParser()
- parser.add_argument("--chromeos_root", default="/path/to/chromeos",
- dest="chromeos_root",
- help="ChromeOS root checkout directory")
- parser.add_argument("remotes", nargs=argparse.REMAINDER)
+ parser.add_argument('--chromeos_root',
+ default='/path/to/chromeos',
+ dest='chromeos_root',
+ help='ChromeOS root checkout directory')
+ parser.add_argument('remotes', nargs=argparse.REMAINDER)
options = parser.parse_args(argv)
machine_list = options.remotes
if len(machine_list) < 2:
- PrintUsage("ERROR: Must specify at least two machines.")
+ PrintUsage('ERROR: Must specify at least two machines.')
return 1
elif not os.path.exists(options.chromeos_root):
- PrintUsage("Error: chromeos_root does not exist %s" %
- options.chromeos_root)
+ PrintUsage('Error: chromeos_root does not exist %s' % options.chromeos_root)
return 1
chroot = options.chromeos_root
@@ -48,13 +48,13 @@ def Main(argv):
ret = 0
for cm in cros_machines:
- print("checksum for %s : %s" % (cm.name, cm.machine_checksum))
+ print('checksum for %s : %s' % (cm.name, cm.machine_checksum))
if cm.machine_checksum != test_machine_checksum:
ret = 1
- print("Machine checksums do not all match")
+ print('Machine checksums do not all match')
if ret == 0:
- print("Machines all match.")
+ print('Machines all match.')
return ret
diff --git a/crosperf/config.py b/crosperf/config.py
index 75f88256..58e053c8 100644
--- a/crosperf/config.py
+++ b/crosperf/config.py
@@ -1,5 +1,4 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""A configure file."""
config = {}
diff --git a/crosperf/config_unittest.py b/crosperf/config_unittest.py
index 098ea7c7..397f2c2c 100755
--- a/crosperf/config_unittest.py
+++ b/crosperf/config_unittest.py
@@ -6,6 +6,7 @@ import config
import unittest
+
class ConfigTestCase(unittest.TestCase):
def test_config(self):
diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py
index 21553909..28e78f5b 100755
--- a/crosperf/crosperf.py
+++ b/crosperf/crosperf.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""The driver script for running performance benchmarks on ChromeOS."""
import atexit
@@ -21,32 +20,33 @@ import test_flag
class MyIndentedHelpFormatter(optparse.IndentedHelpFormatter):
+
def format_description(self, description):
return description
def SetupParserOptions(parser):
"""Add all options to the parser."""
- parser.add_option("--dry_run",
- dest="dry_run",
- help=("Parse the experiment file and "
- "show what will be done"),
- action="store_true",
+ parser.add_option('--dry_run',
+ dest='dry_run',
+ help=('Parse the experiment file and '
+ 'show what will be done'),
+ action='store_true',
default=False)
# Allow each of the global fields to be overridden by passing in
# options. Add each global field as an option.
- option_settings = GlobalSettings("")
+ option_settings = GlobalSettings('')
for field_name in option_settings.fields:
field = option_settings.fields[field_name]
- parser.add_option("--%s" % field.name,
+ parser.add_option('--%s' % field.name,
dest=field.name,
help=field.description,
- action="store")
+ action='store')
def ConvertOptionsToSettings(options):
"""Convert options passed in into global settings."""
- option_settings = GlobalSettings("option_settings")
+ option_settings = GlobalSettings('option_settings')
for option_name in options.__dict__:
if (options.__dict__[option_name] is not None and
option_name in option_settings.fields):
@@ -73,18 +73,19 @@ def Main(argv):
parser = optparse.OptionParser(usage=Help().GetUsage(),
description=Help().GetHelp(),
formatter=MyIndentedHelpFormatter(),
- version="%prog 3.0")
+ version='%prog 3.0')
- parser.add_option("--noschedv2",
- dest="noschedv2",
+ parser.add_option('--noschedv2',
+ dest='noschedv2',
default=False,
- action="store_true",
- help=("Do not use new scheduler. "
- "Use original scheduler instead."))
- parser.add_option("-l", "--log_dir",
- dest="log_dir",
- default="",
- help="The log_dir, default is under <crosperf_logs>/logs")
+ action='store_true',
+ help=('Do not use new scheduler. '
+ 'Use original scheduler instead.'))
+ parser.add_option('-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under <crosperf_logs>/logs')
SetupParserOptions(parser)
options, args = parser.parse_args(argv)
@@ -98,22 +99,21 @@ def Main(argv):
if len(args) == 2:
experiment_filename = args[1]
else:
- parser.error("Invalid number arguments.")
+ parser.error('Invalid number arguments.')
working_directory = os.getcwd()
if options.dry_run:
test_flag.SetTestMode(True)
- experiment_file = ExperimentFile(open(experiment_filename, "rb"),
- option_settings)
- if not experiment_file.GetGlobalSettings().GetField("name"):
+ experiment_file = ExperimentFile(
+ open(experiment_filename, 'rb'), option_settings)
+ if not experiment_file.GetGlobalSettings().GetField('name'):
experiment_name = os.path.basename(experiment_filename)
- experiment_file.GetGlobalSettings().SetField("name", experiment_name)
+ experiment_file.GetGlobalSettings().SetField('name', experiment_name)
experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory,
- log_dir)
+ working_directory, log_dir)
- json_report = experiment_file.GetGlobalSettings().GetField("json_report")
+ json_report = experiment_file.GetGlobalSettings().GetField('json_report')
signal.signal(signal.SIGTERM, CallExitHandler)
atexit.register(Cleanup, experiment)
@@ -121,10 +121,12 @@ def Main(argv):
if options.dry_run:
runner = MockExperimentRunner(experiment, json_report)
else:
- runner = ExperimentRunner(experiment, json_report,
+ runner = ExperimentRunner(experiment,
+ json_report,
using_schedv2=(not options.noschedv2))
runner.Run()
-if __name__ == "__main__":
+
+if __name__ == '__main__':
Main(sys.argv)
diff --git a/crosperf/crosperf_test.py b/crosperf/crosperf_test.py
index 0c50e7b5..09aefcb6 100755
--- a/crosperf/crosperf_test.py
+++ b/crosperf/crosperf_test.py
@@ -8,7 +8,6 @@ import unittest
import crosperf
from utils.file_utils import FileUtils
-
EXPERIMENT_FILE_1 = """
board: x86-alex
remote: chromeos-alex3
@@ -28,13 +27,14 @@ EXPERIMENT_FILE_1 = """
class CrosPerfTest(unittest.TestCase):
+
def testDryRun(self):
filehandle, filename = tempfile.mkstemp()
os.write(filehandle, EXPERIMENT_FILE_1)
- crosperf.Main(["", filename, "--dry_run"])
+ crosperf.Main(['', filename, '--dry_run'])
os.remove(filename)
-if __name__ == "__main__":
+if __name__ == '__main__':
FileUtils.Configure(True)
unittest.main()
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 082d8a6c..42a78ef8 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -1,14 +1,12 @@
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
-
"""Unittest for crosperf."""
import atexit
import os
import optparse
import StringIO
-
import mock
import unittest
@@ -40,6 +38,7 @@ EXPERIMENT_FILE_1 = """
}
"""
+
class CrosperfTest(unittest.TestCase):
def setUp(self):
@@ -51,27 +50,28 @@ class CrosperfTest(unittest.TestCase):
parser = optparse.OptionParser(usage=Help().GetUsage(),
description=Help().GetHelp(),
formatter=crosperf.MyIndentedHelpFormatter(),
- version="%prog 3.0")
- parser.add_option("-l", "--log_dir",
- dest="log_dir",
- default="",
- help="The log_dir, default is under <crosperf_logs>/logs")
+ version='%prog 3.0')
+ parser.add_option('-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under <crosperf_logs>/logs')
options_before = parser._get_all_options()
self.assertEqual(len(options_before), 3)
crosperf.SetupParserOptions(parser)
options_after = parser._get_all_options()
self.assertEqual(len(options_after), 29)
-
def test_convert_options_to_settings(self):
parser = optparse.OptionParser(usage=Help().GetUsage(),
description=Help().GetHelp(),
formatter=crosperf.MyIndentedHelpFormatter(),
- version="%prog 3.0")
- parser.add_option("-l", "--log_dir",
- dest="log_dir",
- default="",
- help="The log_dir, default is under <crosperf_logs>/logs")
+ version='%prog 3.0')
+ parser.add_option('-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under <crosperf_logs>/logs')
crosperf.SetupParserOptions(parser)
argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
options, args = parser.parse_args(argv)
@@ -86,5 +86,5 @@ class CrosperfTest(unittest.TestCase):
self.assertFalse(settings.GetField('rerun'))
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/download_images.py b/crosperf/download_images.py
index 8fecf8b3..55fda51b 100644
--- a/crosperf/download_images.py
+++ b/crosperf/download_images.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (c) 2014, 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -11,27 +10,29 @@ import test_flag
from utils import command_executer
+
class MissingImage(Exception):
"""Raised when the requested image does not exist in gs://"""
+
class ImageDownloader(object):
- def __init__(self, logger_to_use=None, log_level="verbose",
- cmd_exec=None):
+ def __init__(self, logger_to_use=None, log_level='verbose', cmd_exec=None):
self._logger = logger_to_use
self.log_level = log_level
- self._ce = cmd_exec or command_executer.GetCommandExecuter(self._logger,
- log_level = self.log_level)
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger,
+ log_level=self.log_level)
- def _GetBuildID (self, chromeos_root, xbuddy_label):
+ def _GetBuildID(self, chromeos_root, xbuddy_label):
# Get the translation of the xbuddy_label into the real Google Storage
# image name.
- command = ("cd ~/trunk/src/third_party/toolchain-utils/crosperf; "
+ command = ('cd ~/trunk/src/third_party/toolchain-utils/crosperf; '
"python translate_xbuddy.py '%s'" % xbuddy_label)
retval, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(
chromeos_root, command)
if not build_id_tuple_str:
- raise MissingImage ("Unable to find image for '%s'" % xbuddy_label)
+ raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
build_id_tuple = ast.literal_eval(build_id_tuple_str)
build_id = build_id_tuple[0]
@@ -39,14 +40,13 @@ class ImageDownloader(object):
return build_id
def _DownloadImage(self, chromeos_root, build_id, image_name):
- if self.log_level == "average":
- self._logger.LogOutput ("Preparing to download %s image to local "
- "directory." % build_id)
+ if self.log_level == 'average':
+ self._logger.LogOutput('Preparing to download %s image to local '
+ 'directory.' % build_id)
# Make sure the directory for downloading the image exists.
- download_path = os.path.join(chromeos_root, "chroot/tmp",
- build_id)
- image_path = os.path.join(download_path, "chromiumos_test_image.bin")
+ download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
+ image_path = os.path.join(download_path, 'chromiumos_test_image.bin')
if not os.path.exists(download_path):
os.makedirs(download_path)
@@ -54,10 +54,10 @@ class ImageDownloader(object):
# download the image.
status = 0
if not os.path.exists(image_path):
- command = "gsutil cp %s /tmp/%s" % (image_name, build_id)
+ command = 'gsutil cp %s /tmp/%s' % (image_name, build_id)
- if self.log_level != "verbose":
- self._logger.LogOutput ("CMD: %s" % command)
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
status = self._ce.ChrootRunCommand(chromeos_root, command)
if status == 0:
@@ -67,34 +67,33 @@ class ImageDownloader(object):
def _UncompressImage(self, chromeos_root, build_id):
# Check to see if the file has already been uncompresssed, etc.
- if os.path.exists(os.path.join(chromeos_root, "chroot/tmp", build_id,
- "chromiumos_test_image.bin")):
+ if os.path.exists(os.path.join(chromeos_root, 'chroot/tmp', build_id,
+ 'chromiumos_test_image.bin')):
return 0
# Uncompress and untar the downloaded image.
- command = ("cd /tmp/%s ;unxz chromiumos_test_image.tar.xz; "
- "tar -xvf chromiumos_test_image.tar" % build_id)
- if self.log_level != "verbose":
- self._logger.LogOutput("CMD: %s" % command)
- print("(Uncompressing and un-tarring may take a couple of minutes..."
- "please be patient.)")
+ command = ('cd /tmp/%s ;unxz chromiumos_test_image.tar.xz; '
+ 'tar -xvf chromiumos_test_image.tar' % build_id)
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
+ print('(Uncompressing and un-tarring may take a couple of minutes...'
+ 'please be patient.)')
retval = self._ce.ChrootRunCommand(chromeos_root, command)
return retval
-
def Run(self, chromeos_root, xbuddy_label):
build_id = self._GetBuildID(chromeos_root, xbuddy_label)
- image_name = ("gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz"
+ image_name = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz'
% build_id)
# Verify that image exists for build_id, before attempting to
# download it.
status = 0
if not test_flag.GetTestMode():
- cmd = "gsutil ls %s" % image_name
+ cmd = 'gsutil ls %s' % image_name
status = self._ce.ChrootRunCommand(chromeos_root, cmd)
if status != 0:
- raise MissingImage("Cannot find official image: %s." % image_name)
+ raise MissingImage('Cannot find official image: %s.' % image_name)
image_path = self._DownloadImage(chromeos_root, build_id, image_name)
retval = 0
if image_path:
@@ -102,7 +101,7 @@ class ImageDownloader(object):
else:
retval = 1
- if retval == 0 and self.log_level != "quiet":
- self._logger.LogOutput("Using image from %s." % image_path)
+ if retval == 0 and self.log_level != 'quiet':
+ self._logger.LogOutput('Using image from %s.' % image_path)
return retval, image_path
diff --git a/crosperf/download_images_buildid_test.py b/crosperf/download_images_buildid_test.py
index bfa61006..e3352f8e 100755
--- a/crosperf/download_images_buildid_test.py
+++ b/crosperf/download_images_buildid_test.py
@@ -7,7 +7,6 @@ import sys
import download_images
-
#On May 1, 2014:
#latest : lumpy-release/R34-5500.132.0
#latest-beta : lumpy-release/R35-5712.43.0
@@ -20,90 +19,89 @@ class ImageDownloaderBuildIDTest(object):
def __init__(self):
parser = optparse.OptionParser()
- parser.add_option("-c", "--chromeos_root", dest="chromeos_root",
- help="Directory containing ChromeOS root.")
+ parser.add_option('-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='Directory containing ChromeOS root.')
options = parser.parse_args(sys.argv[1:])[0]
if options.chromeos_root is None:
- self._usage(parser, "--chromeos_root must be set")
+ self._usage(parser, '--chromeos_root must be set')
self.chromeos_root = options.chromeos_root
self.tests_passed = 0
self.tests_run = 0
self.tests_failed = 0
def _usage(self, parser, message):
- print "ERROR: " + message
+ print 'ERROR: ' + message
parser.print_help()
sys.exit(0)
def print_test_status(self):
- print "----------------------------------------\n"
- print "Tests attempted: %d" % self.tests_run
- print "Tests passed: %d" % self.tests_passed
- print "Tests failed: %d" % self.tests_failed
- print "\n----------------------------------------"
-
- def assert_failure (self, msg):
- print "Assert failure: %s" % msg
+ print '----------------------------------------\n'
+ print 'Tests attempted: %d' % self.tests_run
+ print 'Tests passed: %d' % self.tests_passed
+ print 'Tests failed: %d' % self.tests_failed
+ print '\n----------------------------------------'
+
+ def assert_failure(self, msg):
+ print 'Assert failure: %s' % msg
self.print_test_status()
sys.exit(1)
-
def assertIsNotNone(self, arg, arg_name):
if arg == None:
self.tests_failed = self.tests_failed + 1
- self.assert_failure ("%s is not None" % arg_name)
-
+ self.assert_failure('%s is not None' % arg_name)
def assertNotEqual(self, arg1, arg2, arg1_name, arg2_name):
if arg1 == arg2:
self.tests_failed = self.tests_failed + 1
- self.assert_failure ("%s is not NotEqual to %s" % (arg1_name, arg2_name))
+ self.assert_failure('%s is not NotEqual to %s' % (arg1_name, arg2_name))
def assertEqual(self, arg1, arg2, arg1_name, arg2_name):
if arg1 != arg2:
self.tests_failed = self.tests_failed + 1
- self.assert_failure ("%s is not Equal to %s" % (arg1_name, arg2_name))
+ self.assert_failure('%s is not Equal to %s' % (arg1_name, arg2_name))
-
- def test_one_id (self, downloader, test_id, result_string, exact_match):
+ def test_one_id(self, downloader, test_id, result_string, exact_match):
print "Translating '%s'" % test_id
self.tests_run = self.tests_run + 1
result = downloader._GetBuildID(self.chromeos_root, test_id)
# Verify that we got a build id back.
- self.assertIsNotNone(result, "result")
+ self.assertIsNotNone(result, 'result')
# Verify that the result either contains or exactly matches the
# result_string, depending on the exact_match argument.
if exact_match:
- self.assertEqual (result, result_string, "result", result_string)
+ self.assertEqual(result, result_string, 'result', result_string)
else:
- self.assertNotEqual (result.find(result_string), -1, "result.find", "-1")
+ self.assertNotEqual(result.find(result_string), -1, 'result.find', '-1')
self.tests_passed = self.tests_passed + 1
def test_get_build_id(self):
+ """Test that the actual translating of xbuddy names is working properly.
"""
- Test that the actual translating of xbuddy names is working properly.
- """
- downloader = download_images.ImageDownloader(log_level="quiet")
-
- self.test_one_id (downloader, "remote/lumpy/latest-dev", "lumpy-release/R",
- False)
- self.test_one_id (downloader,
- "remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86",
- "trybot-lumpy-release-afdo-use/R35-5672.0.0-b86", True)
- self.test_one_id (downloader, "remote/lumpy-release/R35-5672.0.0",
- "lumpy-release/R35-5672.0.0", True)
- self.test_one_id (downloader, "remote/lumpy/latest-dev", "lumpy-release/R",
- False)
- self.test_one_id (downloader, "remote/lumpy/latest-official",
- "lumpy-release/R", False)
- self.test_one_id (downloader, "remote/lumpy/latest-beta", "lumpy-release/R",
- False)
+ downloader = download_images.ImageDownloader(log_level='quiet')
+
+ self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R',
+ False)
+ self.test_one_id(downloader,
+ 'remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86',
+ 'trybot-lumpy-release-afdo-use/R35-5672.0.0-b86', True)
+ self.test_one_id(downloader, 'remote/lumpy-release/R35-5672.0.0',
+ 'lumpy-release/R35-5672.0.0', True)
+ self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R',
+ False)
+ self.test_one_id(downloader, 'remote/lumpy/latest-official',
+ 'lumpy-release/R', False)
+ self.test_one_id(downloader, 'remote/lumpy/latest-beta', 'lumpy-release/R',
+ False)
self.print_test_status()
+
if __name__ == '__main__':
tester = ImageDownloaderBuildIDTest()
tester.test_get_build_id()
diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py
index 9ca40a81..237369b7 100755
--- a/crosperf/download_images_unittest.py
+++ b/crosperf/download_images_unittest.py
@@ -12,10 +12,10 @@ from cros_utils import logger
import test_flag
-MOCK_LOGGER = logger.GetLogger(log_dir="", mock=True)
+MOCK_LOGGER = logger.GetLogger(log_dir='', mock=True)
-class ImageDownloaderTestcast(unittest.TestCase):
+class ImageDownloaderTestcast(unittest.TestCase):
@mock.patch.object(os, 'makedirs')
@mock.patch.object(os.path, 'exists')
@@ -23,9 +23,9 @@ class ImageDownloaderTestcast(unittest.TestCase):
# Set mock and test values.
mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- test_chroot = "/usr/local/home/chromeos"
- test_build_id = "lumpy-release/R36-5814.0.0"
- image_path = ("gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz"
+ test_chroot = '/usr/local/home/chromeos'
+ test_build_id = 'lumpy-release/R36-5814.0.0'
+ image_path = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz'
% test_build_id)
downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER,
@@ -38,16 +38,23 @@ class ImageDownloaderTestcast(unittest.TestCase):
# Verify os.path.exists was called twice, with proper arguments.
self.assertEqual(mock_path_exists.call_count, 2)
- mock_path_exists.assert_called_with('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
- mock_path_exists.assert_any_call('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
+ mock_path_exists.assert_any_call(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
# Verify we called os.mkdirs
self.assertEqual(mock_mkdirs.call_count, 1)
- mock_mkdirs.assert_called_with('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+ mock_mkdirs.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
# Verify we called ChrootRunCommand once, with proper arguments.
- self.assertEqual (mock_cmd_exec.ChrootRunCommand.call_count, 1)
- mock_cmd_exec.ChrootRunCommand.assert_called_with('/usr/local/home/chromeos', 'gsutil cp gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/chromiumos_test_image.tar.xz /tmp/lumpy-release/R36-5814.0.0')
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 1)
+ mock_cmd_exec.ChrootRunCommand.assert_called_with(
+ '/usr/local/home/chromeos',
+ 'gsutil cp '
+ 'gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/chromiumos_test_image.tar.xz'
+ ' /tmp/lumpy-release/R36-5814.0.0')
# Reset the velues in the mocks; set os.path.exists to always return True.
mock_path_exists.reset_mock()
@@ -55,19 +62,19 @@ class ImageDownloaderTestcast(unittest.TestCase):
mock_path_exists.return_value = True
# Run downloader
- downloader._DownloadImage(test_chroot, test_build_id,image_path)
+ downloader._DownloadImage(test_chroot, test_build_id, image_path)
# Verify os.path.exists was called twice, with proper arguments.
self.assertEqual(mock_path_exists.call_count, 2)
- mock_path_exists.assert_called_with('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
- mock_path_exists.assert_any_call('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
+ mock_path_exists.assert_any_call(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
# Verify we made no RunCommand or ChrootRunCommand calls (since
# os.path.exists returned True, there was no work do be done).
- self.assertEqual (mock_cmd_exec.RunCommand.call_count, 0)
- self.assertEqual (mock_cmd_exec.ChrootRunCommand.call_count, 0)
-
-
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0)
@mock.patch.object(os.path, 'exists')
def test_uncompress_image(self, mock_path_exists):
@@ -85,12 +92,16 @@ class ImageDownloaderTestcast(unittest.TestCase):
downloader._UncompressImage(test_chroot, test_build_id)
# Verify os.path.exists was called once, with correct arguments.
- self.assertEqual (mock_path_exists.call_count, 1)
- mock_path_exists.assert_called_with('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
+ self.assertEqual(mock_path_exists.call_count, 1)
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
# Verify ChrootRunCommand was called, with correct arguments.
- self.assertEqual (mock_cmd_exec.ChrootRunCommand.call_count, 1)
- mock_cmd_exec.ChrootRunCommand.assert_called_with('/usr/local/home/chromeos', 'cd /tmp/lumpy-release/R36-5814.0.0 ;unxz chromiumos_test_image.tar.xz; tar -xvf chromiumos_test_image.tar')
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 1)
+ mock_cmd_exec.ChrootRunCommand.assert_called_with(
+ '/usr/local/home/chromeos',
+ 'cd /tmp/lumpy-release/R36-5814.0.0 ;unxz '
+ 'chromiumos_test_image.tar.xz; tar -xvf chromiumos_test_image.tar')
# Set os.path.exists to always return False and run uncompress.
mock_path_exists.reset_mock()
@@ -99,19 +110,18 @@ class ImageDownloaderTestcast(unittest.TestCase):
downloader._UncompressImage(test_chroot, test_build_id)
# Verify os.path.exists was called once, with correct arguments.
- self.assertEqual (mock_path_exists.call_count, 1)
- mock_path_exists.assert_called_with('/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
+ self.assertEqual(mock_path_exists.call_count, 1)
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0/chromiumos_test_image.bin')
# Verify ChrootRunCommand was not called.
- self.assertEqual (mock_cmd_exec.ChrootRunCommand.call_count, 0)
-
-
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0)
def test_run(self):
# Set test arguments
- test_chroot = "/usr/local/home/chromeos"
- test_build_id = "remote/lumpy/latest-dev"
+ test_chroot = '/usr/local/home/chromeos'
+ test_build_id = 'remote/lumpy/latest-dev'
# Set values to test/check.
self.called_download_image = False
@@ -125,7 +135,7 @@ class ImageDownloaderTestcast(unittest.TestCase):
def GoodDownloadImage(root, build_id, image_path):
self.called_download_image = True
- return "chromiumos_test_image.bin"
+ return 'chromiumos_test_image.bin'
def BadDownloadImage(root, build_id, image_path):
self.called_download_image = True
@@ -147,8 +157,8 @@ class ImageDownloaderTestcast(unittest.TestCase):
downloader.Run(test_chroot, test_build_id)
# Make sure it called both _DownloadImage and _UncompressImage
- self.assertTrue (self.called_download_image)
- self.assertTrue (self.called_uncompress_image)
+ self.assertTrue(self.called_download_image)
+ self.assertTrue(self.called_uncompress_image)
# Reset values; Now use fake stub that simulates DownloadImage failing.
self.called_download_image = False
@@ -156,11 +166,11 @@ class ImageDownloaderTestcast(unittest.TestCase):
downloader._DownloadImage = BadDownloadImage
# Call Run again.
- downloader.Run (test_chroot, test_build_id)
+ downloader.Run(test_chroot, test_build_id)
# Verify that UncompressImage was not called, since _DownloadImage "failed"
- self.assertTrue (self.called_download_image)
- self.assertFalse (self.called_uncompress_image)
+ self.assertTrue(self.called_download_image)
+ self.assertFalse(self.called_uncompress_image)
if __name__ == '__main__':
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index 0926193b..de172cb1 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The experiment setting module."""
from __future__ import print_function
@@ -21,13 +20,14 @@ from machine_manager import MachineManager
from machine_manager import MockMachineManager
import test_flag
+
class Experiment(object):
"""Class representing an Experiment to be run."""
- def __init__(self, name, remote, working_directory,
- chromeos_root, cache_conditions, labels, benchmarks,
- experiment_file, email_to, acquire_timeout, log_dir,
- log_level, share_cache, results_directory, locks_directory):
+ def __init__(self, name, remote, working_directory, chromeos_root,
+ cache_conditions, labels, benchmarks, experiment_file, email_to,
+ acquire_timeout, log_dir, log_level, share_cache,
+ results_directory, locks_directory):
self.name = name
self.working_directory = working_directory
self.remote = remote
@@ -37,7 +37,7 @@ class Experiment(object):
self.email_to = email_to
if not results_directory:
self.results_directory = os.path.join(self.working_directory,
- self.name + "_results")
+ self.name + '_results')
else:
self.results_directory = misc.CanonicalizePath(results_directory)
self.log_dir = log_dir
@@ -54,11 +54,11 @@ class Experiment(object):
self.locked_machines = []
if not remote:
- raise RuntimeError("No remote hosts specified")
+ raise RuntimeError('No remote hosts specified')
if not self.benchmarks:
- raise RuntimeError("No benchmarks specified")
+ raise RuntimeError('No benchmarks specified')
if not self.labels:
- raise RuntimeError("No labels specified")
+ raise RuntimeError('No labels specified')
# We need one chromeos_root to run the benchmarks in, but it doesn't
# matter where it is, unless the ABIs are different.
@@ -68,8 +68,8 @@ class Experiment(object):
chromeos_root = label.chromeos_root
break
if not chromeos_root:
- raise RuntimeError("No chromeos_root given and could not determine "
- "one from the image path.")
+ raise RuntimeError('No chromeos_root given and could not determine '
+ 'one from the image path.')
if test_flag.GetTestMode():
self.machine_manager = MockMachineManager(chromeos_root, acquire_timeout,
@@ -86,7 +86,7 @@ class Experiment(object):
# machines. This is a subset of self.remote. We make both lists the same.
self.remote = [m.name for m in self.machine_manager._all_machines]
if not self.remote:
- raise RuntimeError("No machine available for running experiment.")
+ raise RuntimeError('No machine available for running experiment.')
for label in labels:
# We filter out label remotes that are not reachable (not in
@@ -121,22 +121,15 @@ class Experiment(object):
for benchmark in self.benchmarks:
for iteration in range(1, benchmark.iterations + 1):
- benchmark_run_name = "%s: %s (%s)" % (label.name, benchmark.name,
+ benchmark_run_name = '%s: %s (%s)' % (label.name, benchmark.name,
iteration)
- full_name = "%s_%s_%s" % (label.name, benchmark.name, iteration)
- logger_to_use = logger.Logger(self.log_dir,
- "run.%s" % (full_name),
+ full_name = '%s_%s_%s' % (label.name, benchmark.name, iteration)
+ logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
True)
benchmark_runs.append(benchmark_run.BenchmarkRun(
- benchmark_run_name,
- benchmark,
- label,
- iteration,
- self.cache_conditions,
- self.machine_manager,
- logger_to_use,
- self.log_level,
- self.share_cache))
+ benchmark_run_name, benchmark, label, iteration,
+ self.cache_conditions, self.machine_manager, logger_to_use,
+ self.log_level, self.share_cache))
return benchmark_runs
@@ -211,10 +204,9 @@ class Experiment(object):
return
# If we locked any machines earlier, make sure we unlock them now.
- lock_mgr = afe_lock_machine.AFELockManager(all_machines, "",
- self.labels[0].chromeos_root,
- None)
- machine_states = lock_mgr.GetMachineStates("unlock")
+ lock_mgr = afe_lock_machine.AFELockManager(
+ all_machines, '', self.labels[0].chromeos_root, None)
+ machine_states = lock_mgr.GetMachineStates('unlock')
for k, state in machine_states.iteritems():
- if state["locked"]:
+ if state['locked']:
lock_mgr.UpdateLockInAFE(False, k)
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index bd31d78f..24508c9d 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""A module to generate experiments."""
from __future__ import print_function
@@ -29,8 +28,7 @@ telemetry_perfv2_tests = ['dromaeo.domcoreattr',
'kraken',
'octane',
'robohornet_pro',
- 'sunspider',
- ]
+ 'sunspider']
telemetry_pagecycler_tests = ['page_cycler.intl_ar_fa_he',
'page_cycler.intl_es_fr_pt-BR',
@@ -41,8 +39,7 @@ telemetry_pagecycler_tests = ['page_cycler.intl_ar_fa_he',
'page_cycler.moz',
'page_cycler.netsim.top_10',
'page_cycler.tough_layout_cases',
- 'page_cycler.typical_25',
- ]
+ 'page_cycler.typical_25']
telemetry_toolchain_old_perf_tests = ['dromaeo.domcoremodify',
'page_cycler.intl_es_fr_pt-BR',
@@ -53,8 +50,7 @@ telemetry_toolchain_old_perf_tests = ['dromaeo.domcoremodify',
'page_cycler.typical_25',
'robohornet_pro',
'spaceport',
- 'tab_switching.top_10',
- ]
+ 'tab_switching.top_10']
telemetry_toolchain_perf_tests = ['octane',
'kraken',
'speedometer',
@@ -62,8 +58,8 @@ telemetry_toolchain_perf_tests = ['octane',
'dromaeo.domcoremodify',
'smoothness.tough_webgl_cases',
'page_cycler.typical_25',
- 'media.tough_video_cases',
- ]
+ 'media.tough_video_cases']
+
class ExperimentFactory(object):
"""Factory class for building an Experiment, given an ExperimentFile as input.
@@ -78,19 +74,17 @@ class ExperimentFactory(object):
show_all_results, retries, run_local):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(test_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries,
- run_local)
+ telemetry_benchmark = Benchmark(
+ test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
+ suite, show_all_results, retries, run_local)
benchmarks.append(telemetry_benchmark)
-
def GetExperiment(self, experiment_file, working_directory, log_dir):
"""Construct an experiment from an experiment file."""
global_settings = experiment_file.GetGlobalSettings()
- experiment_name = global_settings.GetField("name")
- board = global_settings.GetField("board")
- remote = global_settings.GetField("remote")
+ experiment_name = global_settings.GetField('name')
+ board = global_settings.GetField('board')
+ remote = global_settings.GetField('remote')
# This is used to remove the ",' from the remote if user
# add them to the remote string.
new_remote = []
@@ -99,56 +93,56 @@ class ExperimentFactory(object):
c = re.sub('["\']', '', i)
new_remote.append(c)
remote = new_remote
- chromeos_root = global_settings.GetField("chromeos_root")
- rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
- perf_args = global_settings.GetField("perf_args")
- acquire_timeout = global_settings.GetField("acquire_timeout")
- cache_dir = global_settings.GetField("cache_dir")
- cache_only = global_settings.GetField("cache_only")
- config.AddConfig("no_email", global_settings.GetField("no_email"))
- share_cache = global_settings.GetField("share_cache")
- results_dir = global_settings.GetField("results_dir")
- use_file_locks = global_settings.GetField("use_file_locks")
- locks_dir = global_settings.GetField("locks_dir")
+ chromeos_root = global_settings.GetField('chromeos_root')
+ rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
+ perf_args = global_settings.GetField('perf_args')
+ acquire_timeout = global_settings.GetField('acquire_timeout')
+ cache_dir = global_settings.GetField('cache_dir')
+ cache_only = global_settings.GetField('cache_only')
+ config.AddConfig('no_email', global_settings.GetField('no_email'))
+ share_cache = global_settings.GetField('share_cache')
+ results_dir = global_settings.GetField('results_dir')
+ use_file_locks = global_settings.GetField('use_file_locks')
+ locks_dir = global_settings.GetField('locks_dir')
# If we pass a blank locks_dir to the Experiment, it will use the AFE server
# lock mechanism. So if the user specified use_file_locks, but did not
# specify a locks dir, set the locks dir to the default locks dir in
# file_lock_machine.
if use_file_locks and not locks_dir:
locks_dir = file_lock_machine.Machine.LOCKS_DIR
- chrome_src = global_settings.GetField("chrome_src")
- show_all_results = global_settings.GetField("show_all_results")
- log_level = global_settings.GetField("logging_level")
- if log_level not in ("quiet", "average", "verbose"):
- log_level = "verbose"
+ chrome_src = global_settings.GetField('chrome_src')
+ show_all_results = global_settings.GetField('show_all_results')
+ log_level = global_settings.GetField('logging_level')
+ if log_level not in ('quiet', 'average', 'verbose'):
+ log_level = 'verbose'
# Default cache hit conditions. The image checksum in the cache and the
# computed checksum of the image must match. Also a cache file must exist.
cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
CacheConditions.CHECKSUMS_MATCH]
- if global_settings.GetField("rerun_if_failed"):
+ if global_settings.GetField('rerun_if_failed'):
cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
- if global_settings.GetField("rerun"):
+ if global_settings.GetField('rerun'):
cache_conditions.append(CacheConditions.FALSE)
- if global_settings.GetField("same_machine"):
+ if global_settings.GetField('same_machine'):
cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
- if global_settings.GetField("same_specs"):
+ if global_settings.GetField('same_specs'):
cache_conditions.append(CacheConditions.MACHINES_MATCH)
# Construct benchmarks.
# Some fields are common with global settings. The values are
# inherited and/or merged with the global settings values.
benchmarks = []
- all_benchmark_settings = experiment_file.GetSettings("benchmark")
+ all_benchmark_settings = experiment_file.GetSettings('benchmark')
for benchmark_settings in all_benchmark_settings:
benchmark_name = benchmark_settings.name
- test_name = benchmark_settings.GetField("test_name")
+ test_name = benchmark_settings.GetField('test_name')
if not test_name:
test_name = benchmark_name
- test_args = benchmark_settings.GetField("test_args")
- iterations = benchmark_settings.GetField("iterations")
- suite = benchmark_settings.GetField("suite")
- retries = benchmark_settings.GetField("retries")
- run_local = benchmark_settings.GetField("run_local")
+ test_args = benchmark_settings.GetField('test_args')
+ iterations = benchmark_settings.GetField('iterations')
+ suite = benchmark_settings.GetField('suite')
+ retries = benchmark_settings.GetField('retries')
+ run_local = benchmark_settings.GetField('run_local')
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
@@ -168,71 +162,81 @@ class ExperimentFactory(object):
run_local)
# Add non-telemetry toolchain-perf benchmarks:
benchmarks.append(Benchmark('graphics_WebGLAquarium',
- 'graphics_WebGLAquarium', '', iterations,
- rm_chroot_tmp, perf_args, '',
- show_all_results, retries,
+ 'graphics_WebGLAquarium',
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
run_local=False))
elif test_name == 'all_toolchain_perf_old':
- self._AppendBenchmarkSet(benchmarks,
- telemetry_toolchain_old_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local)
+ self._AppendBenchmarkSet(
+ benchmarks, telemetry_toolchain_old_perf_tests, test_args,
+ iterations, rm_chroot_tmp, perf_args, suite, show_all_results,
+ retries, run_local)
else:
- benchmark = Benchmark(test_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
+ benchmark = Benchmark(test_name, test_name, test_args, iterations,
+ rm_chroot_tmp, perf_args, suite,
show_all_results, retries, run_local)
benchmarks.append(benchmark)
else:
# Add the single benchmark.
- benchmark = Benchmark(benchmark_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local=False)
+ benchmark = Benchmark(benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local=False)
benchmarks.append(benchmark)
if not benchmarks:
- raise RuntimeError("No benchmarks specified")
+ raise RuntimeError('No benchmarks specified')
# Construct labels.
# Some fields are common with global settings. The values are
# inherited and/or merged with the global settings values.
labels = []
- all_label_settings = experiment_file.GetSettings("label")
+ all_label_settings = experiment_file.GetSettings('label')
all_remote = list(remote)
for label_settings in all_label_settings:
label_name = label_settings.name
- image = label_settings.GetField("chromeos_image")
- chromeos_root = label_settings.GetField("chromeos_root")
- my_remote = label_settings.GetField("remote")
- compiler = label_settings.GetField("compiler")
+ image = label_settings.GetField('chromeos_image')
+ chromeos_root = label_settings.GetField('chromeos_root')
+ my_remote = label_settings.GetField('remote')
+ compiler = label_settings.GetField('compiler')
new_remote = []
if my_remote:
for i in my_remote:
c = re.sub('["\']', '', i)
new_remote.append(c)
my_remote = new_remote
- if image == "":
- build = label_settings.GetField("build")
+ if image == '':
+ build = label_settings.GetField('build')
if len(build) == 0:
raise RuntimeError("Can not have empty 'build' field!")
image = label_settings.GetXbuddyPath(build, board, chromeos_root,
log_level)
- cache_dir = label_settings.GetField("cache_dir")
- chrome_src = label_settings.GetField("chrome_src")
+ cache_dir = label_settings.GetField('cache_dir')
+ chrome_src = label_settings.GetField('chrome_src')
- # TODO(yunlian): We should consolidate code in machine_manager.py
- # to derermine whether we are running from within google or not
- if ("corp.google.com" in socket.gethostname() and
- (not my_remote
- or my_remote == remote
- and global_settings.GetField("board") != board)):
+ # TODO(yunlian): We should consolidate code in machine_manager.py
+ # to derermine whether we are running from within google or not
+ if ('corp.google.com' in socket.gethostname() and
+ (not my_remote or my_remote == remote and
+ global_settings.GetField('board') != board)):
my_remote = self.GetDefaultRemotes(board)
- if global_settings.GetField("same_machine") and len(my_remote) > 1:
- raise RuntimeError("Only one remote is allowed when same_machine "
- "is turned on")
+ if global_settings.GetField('same_machine') and len(my_remote) > 1:
+ raise RuntimeError('Only one remote is allowed when same_machine '
+ 'is turned on')
all_remote += my_remote
- image_args = label_settings.GetField("image_args")
+ image_args = label_settings.GetField('image_args')
if test_flag.GetTestMode():
# pylint: disable=too-many-function-args
label = MockLabel(label_name, image, chromeos_root, board, my_remote,
@@ -245,37 +249,35 @@ class ExperimentFactory(object):
labels.append(label)
if not labels:
- raise RuntimeError("No labels specified")
+ raise RuntimeError('No labels specified')
- email = global_settings.GetField("email")
+ email = global_settings.GetField('email')
all_remote += list(set(my_remote))
all_remote = list(set(all_remote))
- experiment = Experiment(experiment_name, all_remote,
- working_directory, chromeos_root,
- cache_conditions, labels, benchmarks,
- experiment_file.Canonicalize(),
- email, acquire_timeout, log_dir, log_level,
- share_cache,
+ experiment = Experiment(experiment_name, all_remote, working_directory,
+ chromeos_root, cache_conditions, labels, benchmarks,
+ experiment_file.Canonicalize(), email,
+ acquire_timeout, log_dir, log_level, share_cache,
results_dir, locks_dir)
return experiment
def GetDefaultRemotes(self, board):
- default_remotes_file = os.path.join(os.path.dirname(__file__),
- "default_remotes")
+ default_remotes_file = os.path.join(
+ os.path.dirname(__file__), 'default_remotes')
try:
with open(default_remotes_file) as f:
for line in f:
- key, v = line.split(":")
+ key, v = line.split(':')
if key.strip() == board:
- remotes = v.strip().split(" ")
+ remotes = v.strip().split(' ')
if remotes:
return remotes
else:
- raise RuntimeError("There is no remote for {0}".format(board))
+ raise RuntimeError('There is no remote for {0}'.format(board))
except IOError:
# TODO: rethrow instead of throwing different exception.
- raise RuntimeError("IOError while reading file {0}"
+ raise RuntimeError('IOError while reading file {0}'
.format(default_remotes_file))
else:
- raise RuntimeError("There is not remote for {0}".format(board))
+ raise RuntimeError('There is not remote for {0}'.format(board))
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 148b7e4b..97561008 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -45,171 +45,162 @@ class ExperimentFactoryTest(unittest.TestCase):
def testLoadExperimentFile1(self):
experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory="",
- log_dir="")
- self.assertEqual(experiment.remote, ["chromeos-alex3"])
+ working_directory='',
+ log_dir='')
+ self.assertEqual(experiment.remote, ['chromeos-alex3'])
self.assertEqual(len(experiment.benchmarks), 1)
- self.assertEqual(experiment.benchmarks[0].name, "PageCycler")
- self.assertEqual(experiment.benchmarks[0].test_name, "PageCycler")
+ self.assertEqual(experiment.benchmarks[0].name, 'PageCycler')
+ self.assertEqual(experiment.benchmarks[0].test_name, 'PageCycler')
self.assertEqual(experiment.benchmarks[0].iterations, 3)
self.assertEqual(len(experiment.labels), 2)
self.assertEqual(experiment.labels[0].chromeos_image,
- "/usr/local/google/cros_image1.bin")
- self.assertEqual(experiment.labels[0].board,
- "x86-alex")
-
-
+ '/usr/local/google/cros_image1.bin')
+ self.assertEqual(experiment.labels[0].board, 'x86-alex')
def test_append_benchmark_set(self):
ef = ExperimentFactory()
bench_list = []
ef._AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_perfv2_tests,
- "", 1, False, "", "telemetry_Crosperf", False, 0,
- False)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_perfv2_tests))
+ experiment_factory.telemetry_perfv2_tests, '', 1,
+ False, '', 'telemetry_Crosperf', False, 0, False)
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_perfv2_tests))
self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
bench_list = []
ef._AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_pagecycler_tests,
- "", 1, False, "", "telemetry_Crosperf", False, 0,
- False)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_pagecycler_tests))
+ experiment_factory.telemetry_pagecycler_tests, '', 1,
+ False, '', 'telemetry_Crosperf', False, 0, False)
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_pagecycler_tests))
self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
bench_list = []
- ef._AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_toolchain_perf_tests,
- "", 1, False, "", "telemetry_Crosperf", False, 0,
- False)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_toolchain_perf_tests))
+ ef._AppendBenchmarkSet(
+ bench_list, experiment_factory.telemetry_toolchain_perf_tests, '', 1,
+ False, '', 'telemetry_Crosperf', False, 0, False)
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_toolchain_perf_tests))
self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
-
-
@mock.patch.object(socket, 'gethostname')
@mock.patch.object(machine_manager.MachineManager, 'AddMachine')
def test_get_experiment(self, mock_machine_manager, mock_socket):
test_flag.SetTestMode(False)
self.append_benchmark_call_args = []
+
def FakeAppendBenchmarkSet(bench_list, set_list, args, iters, rm_ch,
perf_args, suite, show_all):
- "Helper function for test_get_experiment"
+ 'Helper function for test_get_experiment'
arg_list = [bench_list, set_list, args, iters, rm_ch, perf_args, suite,
show_all]
self.append_benchmark_call_args.append(args_list)
def FakeGetDefaultRemotes(board):
- return ["fake_chromeos_machine1.cros",
- "fake_chromeos_machine2.cros"]
+ return ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros']
def FakeGetXbuddyPath(build, board, chroot, log_level):
- return "fake_image_path"
+ return 'fake_image_path'
ef = ExperimentFactory()
ef._AppendBenchmarkSet = FakeAppendBenchmarkSet
ef.GetDefaultRemotes = FakeGetDefaultRemotes
- label_settings = settings_factory.LabelSettings("image_label")
- benchmark_settings = settings_factory.BenchmarkSettings("bench_test")
- global_settings = settings_factory.GlobalSettings("test_name")
+ label_settings = settings_factory.LabelSettings('image_label')
+ benchmark_settings = settings_factory.BenchmarkSettings('bench_test')
+ global_settings = settings_factory.GlobalSettings('test_name')
label_settings.GetXbuddyPath = FakeGetXbuddyPath
- mock_experiment_file = ExperimentFile(StringIO.StringIO(""))
+ mock_experiment_file = ExperimentFile(StringIO.StringIO(''))
mock_experiment_file.all_settings = []
test_flag.SetTestMode(True)
# Basic test.
- global_settings.SetField("name","unittest_test")
- global_settings.SetField("board", "lumpy")
- global_settings.SetField("remote", "123.45.67.89 123.45.76.80")
- benchmark_settings.SetField("test_name", "kraken")
- benchmark_settings.SetField("suite", "telemetry_Crosperf")
- benchmark_settings.SetField("iterations", 1)
- label_settings.SetField("chromeos_image", "chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin")
- label_settings.SetField("chrome_src", "/usr/local/google/home/chrome-top")
-
+ global_settings.SetField('name', 'unittest_test')
+ global_settings.SetField('board', 'lumpy')
+ global_settings.SetField('remote', '123.45.67.89 123.45.76.80')
+ benchmark_settings.SetField('test_name', 'kraken')
+ benchmark_settings.SetField('suite', 'telemetry_Crosperf')
+ benchmark_settings.SetField('iterations', 1)
+ label_settings.SetField(
+ 'chromeos_image',
+ 'chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin')
+ label_settings.SetField('chrome_src', '/usr/local/google/home/chrome-top')
mock_experiment_file.global_settings = global_settings
- mock_experiment_file.all_settings.append (label_settings)
- mock_experiment_file.all_settings.append (benchmark_settings)
- mock_experiment_file.all_settings.append (global_settings)
+ mock_experiment_file.all_settings.append(label_settings)
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ mock_experiment_file.all_settings.append(global_settings)
- mock_socket.return_value = ""
+ mock_socket.return_value = ''
# First test. General test.
- exp = ef.GetExperiment(mock_experiment_file, "", "")
- self.assertEqual(exp.remote, ["123.45.67.89", "123.45.76.80"])
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(exp.remote, ['123.45.67.89', '123.45.76.80'])
self.assertEqual(exp.cache_conditions, [0, 2, 1])
- self.assertEqual(exp.log_level, "average")
+ self.assertEqual(exp.log_level, 'average')
self.assertEqual(len(exp.benchmarks), 1)
- self.assertEqual(exp.benchmarks[0].name, "kraken")
- self.assertEqual(exp.benchmarks[0].test_name, "kraken")
+ self.assertEqual(exp.benchmarks[0].name, 'kraken')
+ self.assertEqual(exp.benchmarks[0].test_name, 'kraken')
self.assertEqual(exp.benchmarks[0].iterations, 1)
- self.assertEqual(exp.benchmarks[0].suite, "telemetry_Crosperf")
+ self.assertEqual(exp.benchmarks[0].suite, 'telemetry_Crosperf')
self.assertFalse(exp.benchmarks[0].show_all_results)
self.assertEqual(len(exp.labels), 1)
self.assertEqual(exp.labels[0].chromeos_image,
- "chromeos/src/build/images/lumpy/latest/"
- "chromiumos_test_image.bin")
- self.assertEqual(exp.labels[0].board, "lumpy")
+ 'chromeos/src/build/images/lumpy/latest/'
+ 'chromiumos_test_image.bin')
+ self.assertEqual(exp.labels[0].board, 'lumpy')
# Second test: Remotes listed in labels.
test_flag.SetTestMode(True)
- label_settings.SetField("remote", "chromeos1.cros chromeos2.cros")
- exp = ef.GetExperiment(mock_experiment_file, "", "")
- self.assertEqual(exp.remote,
- ["chromeos1.cros", "chromeos2.cros", "123.45.67.89",
- "123.45.76.80", ])
+ label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros')
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(exp.remote, ['chromeos1.cros',
+ 'chromeos2.cros',
+ '123.45.67.89',
+ '123.45.76.80'])
# Third test: Automatic fixing of bad logging_level param:
- global_settings.SetField("logging_level", "really loud!")
- exp = ef.GetExperiment(mock_experiment_file, "", "")
- self.assertEqual(exp.log_level, "verbose")
+ global_settings.SetField('logging_level', 'really loud!')
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(exp.log_level, 'verbose')
# Fourth test: Setting cache conditions; only 1 remote with "same_machine"
- global_settings.SetField("rerun_if_failed", "true")
- global_settings.SetField("rerun", "true")
- global_settings.SetField("same_machine", "true")
- global_settings.SetField("same_specs", "true")
-
- self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, "",
- "")
- label_settings.SetField("remote", "")
- global_settings.SetField("remote", "123.45.67.89")
- exp = ef.GetExperiment(mock_experiment_file, "", "")
+ global_settings.SetField('rerun_if_failed', 'true')
+ global_settings.SetField('rerun', 'true')
+ global_settings.SetField('same_machine', 'true')
+ global_settings.SetField('same_specs', 'true')
+
+ self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '', '')
+ label_settings.SetField('remote', '')
+ global_settings.SetField('remote', '123.45.67.89')
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1])
# Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all
# remotes (Call GetDefaultRemotes).
- mock_socket.return_value = "test.corp.google.com"
- global_settings.SetField("remote", "")
- global_settings.SetField("same_machine", "false")
+ mock_socket.return_value = 'test.corp.google.com'
+ global_settings.SetField('remote', '')
+ global_settings.SetField('same_machine', 'false')
- label_settings_2 = settings_factory.LabelSettings("official_image_label")
- label_settings_2.SetField("chromeos_root", "chromeos")
- label_settings_2.SetField("build", "official-dev")
+ label_settings_2 = settings_factory.LabelSettings('official_image_label')
+ label_settings_2.SetField('chromeos_root', 'chromeos')
+ label_settings_2.SetField('build', 'official-dev')
label_settings_2.GetXbuddyPath = FakeGetXbuddyPath
- mock_experiment_file.all_settings.append (label_settings_2)
- exp = ef.GetExperiment(mock_experiment_file, "", "")
+ mock_experiment_file.all_settings.append(label_settings_2)
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
self.assertEqual(len(exp.labels), 2)
- self.assertEqual(exp.labels[1].chromeos_image, "fake_image_path")
- self.assertEqual(exp.remote, ["fake_chromeos_machine1.cros",
- "fake_chromeos_machine2.cros"])
-
-
+ self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path')
+ self.assertEqual(exp.remote, ['fake_chromeos_machine1.cros',
+ 'fake_chromeos_machine2.cros'])
def test_get_default_remotes(self):
board_list = ['x86-alex', 'lumpy', 'stumpy', 'parrot', 'daisy', 'peach_pit',
@@ -224,7 +215,8 @@ class ExperimentFactoryTest(unittest.TestCase):
remotes = ef.GetDefaultRemotes(b)
self.assertEqual(len(remotes), 3)
-if __name__ == "__main__":
+
+if __name__ == '__main__':
FileUtils.Configure(True)
test_flag.SetTestMode(True)
unittest.main()
diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py
index 3cb46dcc..7967855b 100644
--- a/crosperf/experiment_file.py
+++ b/crosperf/experiment_file.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -31,11 +30,11 @@ class ExperimentFile(object):
"""
# Field regex, e.g. "iterations: 3"
- _FIELD_VALUE_RE = re.compile(r"(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)")
+ _FIELD_VALUE_RE = re.compile(r'(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)')
# Open settings regex, e.g. "label {"
- _OPEN_SETTINGS_RE = re.compile(r"(?:([\w.-]+):)?\s*([\w.-]+)\s*{")
+ _OPEN_SETTINGS_RE = re.compile(r'(?:([\w.-]+):)?\s*([\w.-]+)\s*{')
# Close settings regex.
- _CLOSE_SETTINGS_RE = re.compile(r"}")
+ _CLOSE_SETTINGS_RE = re.compile(r'}')
def __init__(self, experiment_file, overrides=None):
"""Construct object from file-like experiment_file.
@@ -48,7 +47,7 @@ class ExperimentFile(object):
Exception: if invalid build type or description is invalid.
"""
self.all_settings = []
- self.global_settings = SettingsFactory().GetSettings("global", "global")
+ self.global_settings = SettingsFactory().GetSettings('global', 'global')
self.all_settings.append(self.global_settings)
self._Parse(experiment_file)
@@ -84,7 +83,7 @@ class ExperimentFile(object):
match = ExperimentFile._OPEN_SETTINGS_RE.match(line)
settings_type = match.group(1)
if settings_type is None:
- settings_type = ""
+ settings_type = ''
settings_name = match.group(2)
settings = SettingsFactory().GetSettings(settings_name, settings_type)
settings.SetParentSettings(self.global_settings)
@@ -100,7 +99,7 @@ class ExperimentFile(object):
elif ExperimentFile._CLOSE_SETTINGS_RE.match(line):
return settings
- raise Exception("Unexpected EOF while parsing settings block.")
+ raise Exception('Unexpected EOF while parsing settings block.')
def _Parse(self, experiment_file):
"""Parse experiment file and create settings."""
@@ -123,44 +122,44 @@ class ExperimentFile(object):
field = self._ParseField(reader)
self.global_settings.SetField(field[0], field[1], field[2])
else:
- raise Exception("Unexpected line.")
+ raise Exception('Unexpected line.')
except Exception, err:
- raise Exception("Line %d: %s\n==> %s" % (reader.LineNo(), str(err),
+ raise Exception('Line %d: %s\n==> %s' % (reader.LineNo(), str(err),
reader.CurrentLine(False)))
def Canonicalize(self):
"""Convert parsed experiment file back into an experiment file."""
- res = ""
- board = ""
+ res = ''
+ board = ''
for field_name in self.global_settings.fields:
field = self.global_settings.fields[field_name]
if field.assigned:
- res += "%s: %s\n" % (field.name, field.GetString())
- if field.name == "board":
+ res += '%s: %s\n' % (field.name, field.GetString())
+ if field.name == 'board':
board = field.GetString()
- res += "\n"
+ res += '\n'
for settings in self.all_settings:
- if settings.settings_type != "global":
- res += "%s: %s {\n" % (settings.settings_type, settings.name)
+ if settings.settings_type != 'global':
+ res += '%s: %s {\n' % (settings.settings_type, settings.name)
for field_name in settings.fields:
field = settings.fields[field_name]
if field.assigned:
- res += "\t%s: %s\n" % (field.name, field.GetString())
- if field.name == "chromeos_image":
- real_file = (os.path.realpath
- (os.path.expanduser(field.GetString())))
+ res += '\t%s: %s\n' % (field.name, field.GetString())
+ if field.name == 'chromeos_image':
+ real_file = (
+ os.path.realpath(os.path.expanduser(field.GetString())))
if real_file != field.GetString():
- res += "\t#actual_image: %s\n" % real_file
- if field.name == "build":
- chromeos_root_field = settings.fields["chromeos_root"]
+ res += '\t#actual_image: %s\n' % real_file
+ if field.name == 'build':
+ chromeos_root_field = settings.fields['chromeos_root']
if chromeos_root_field:
chromeos_root = chromeos_root_field.GetString()
value = field.GetString()
- xbuddy_path = settings.GetXbuddyPath (value, board, chromeos_root,
- "quiet")
- res += "\t#actual_image: %s\n" % xbuddy_path
- res += "}\n\n"
+ xbuddy_path = settings.GetXbuddyPath(value, board, chromeos_root,
+ 'quiet')
+ res += '\t#actual_image: %s\n' % xbuddy_path
+ res += '}\n\n'
return res
@@ -187,8 +186,8 @@ class ExperimentFileReader(object):
def _StripComment(self, line):
"""Strip comments starting with # from a line."""
- if "#" in line:
- line = line[:line.find("#")] + line[-1]
+ if '#' in line:
+ line = line[:line.find('#')] + line[-1]
return line
def LineNo(self):
diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py
index 1adba31e..97779410 100755
--- a/crosperf/experiment_file_unittest.py
+++ b/crosperf/experiment_file_unittest.py
@@ -64,7 +64,7 @@ EXPERIMENT_FILE_3 = """
}
"""
-OUTPUT_FILE="""board: x86-alex
+OUTPUT_FILE = """board: x86-alex
remote: chromeos-alex3
perf_args: record -a -e cycles
@@ -82,39 +82,41 @@ label: image2 {
\tchromeos_image: /usr/local/google/cros_image2.bin
}\n\n"""
+
class ExperimentFileTest(unittest.TestCase):
+
def testLoadExperimentFile1(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
experiment_file = ExperimentFile(input_file)
global_settings = experiment_file.GetGlobalSettings()
- self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"])
- self.assertEqual(global_settings.GetField("perf_args"),
- "record -a -e cycles")
- benchmark_settings = experiment_file.GetSettings("benchmark")
+ self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
+ self.assertEqual(
+ global_settings.GetField('perf_args'), 'record -a -e cycles')
+ benchmark_settings = experiment_file.GetSettings('benchmark')
self.assertEqual(len(benchmark_settings), 1)
- self.assertEqual(benchmark_settings[0].name, "PageCycler")
- self.assertEqual(benchmark_settings[0].GetField("iterations"), 3)
+ self.assertEqual(benchmark_settings[0].name, 'PageCycler')
+ self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
- label_settings = experiment_file.GetSettings("label")
+ label_settings = experiment_file.GetSettings('label')
self.assertEqual(len(label_settings), 2)
- self.assertEqual(label_settings[0].name, "image1")
- self.assertEqual(label_settings[0].GetField("chromeos_image"),
- "/usr/local/google/cros_image1.bin")
- self.assertEqual(label_settings[1].GetField("remote"), ["chromeos-lumpy1"])
- self.assertEqual(label_settings[0].GetField("remote"), ["chromeos-alex3"])
+ self.assertEqual(label_settings[0].name, 'image1')
+ self.assertEqual(label_settings[0].GetField('chromeos_image'),
+ '/usr/local/google/cros_image1.bin')
+ self.assertEqual(label_settings[1].GetField('remote'), ['chromeos-lumpy1'])
+ self.assertEqual(label_settings[0].GetField('remote'), ['chromeos-alex3'])
def testOverrideSetting(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_2)
experiment_file = ExperimentFile(input_file)
global_settings = experiment_file.GetGlobalSettings()
- self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"])
+ self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
- benchmark_settings = experiment_file.GetSettings("benchmark")
+ benchmark_settings = experiment_file.GetSettings('benchmark')
self.assertEqual(len(benchmark_settings), 2)
- self.assertEqual(benchmark_settings[0].name, "PageCycler")
- self.assertEqual(benchmark_settings[0].GetField("iterations"), 3)
- self.assertEqual(benchmark_settings[1].name, "AndroidBench")
- self.assertEqual(benchmark_settings[1].GetField("iterations"), 2)
+ self.assertEqual(benchmark_settings[0].name, 'PageCycler')
+ self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
+ self.assertEqual(benchmark_settings[1].name, 'AndroidBench')
+ self.assertEqual(benchmark_settings[1].GetField('iterations'), 2)
def testDuplicateLabel(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_3)
@@ -126,5 +128,6 @@ class ExperimentFileTest(unittest.TestCase):
res = experiment_file.Canonicalize()
self.assertEqual(res, OUTPUT_FILE)
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 2f1e8668..2a654e69 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -1,5 +1,4 @@
# Copyright 2011-2015 Google Inc. All Rights Reserved.
-
"""The experiment runner module."""
import getpass
import os
@@ -30,7 +29,11 @@ class ExperimentRunner(object):
STATUS_TIME_DELAY = 30
THREAD_MONITOR_DELAY = 2
- def __init__(self, experiment, json_report, using_schedv2=False, log=None,
+ def __init__(self,
+ experiment,
+ json_report,
+ using_schedv2=False,
+ log=None,
cmd_exec=None):
self._experiment = experiment
self.l = log or logger.GetLogger(experiment.log_dir)
@@ -38,7 +41,7 @@ class ExperimentRunner(object):
self._terminated = False
self.json_report = json_report
self.locked_machines = []
- if experiment.log_level != "verbose":
+ if experiment.log_level != 'verbose':
self.STATUS_TIME_DELAY = 10
# Setting this to True will use crosperf sched v2 (feature in progress).
@@ -89,23 +92,22 @@ class ExperimentRunner(object):
else:
lock_mgr = afe_lock_machine.AFELockManager(
self._GetMachineList(),
- "",
+ '',
experiment.labels[0].chromeos_root,
None,
- log=self.l,
- )
+ log=self.l,)
for m in lock_mgr.machines:
if not lock_mgr.MachineIsKnown(m):
lock_mgr.AddLocalMachine(m)
- machine_states = lock_mgr.GetMachineStates("lock")
- lock_mgr.CheckMachineLocks(machine_states, "lock")
+ machine_states = lock_mgr.GetMachineStates('lock')
+ lock_mgr.CheckMachineLocks(machine_states, 'lock')
self.locked_machines = lock_mgr.UpdateMachines(True)
self._experiment.locked_machines = self.locked_machines
self._UpdateMachineList(self.locked_machines)
self._experiment.machine_manager.RemoveNonLockedMachines(
self.locked_machines)
if len(self.locked_machines) == 0:
- raise RuntimeError("Unable to lock any machines.")
+ raise RuntimeError('Unable to lock any machines.')
def _UnlockAllMachines(self, experiment):
"""Attempt to globally unlock all of the machines requested for run.
@@ -118,13 +120,12 @@ class ExperimentRunner(object):
lock_mgr = afe_lock_machine.AFELockManager(
self.locked_machines,
- "",
+ '',
experiment.labels[0].chromeos_root,
None,
- log=self.l,
- )
- machine_states = lock_mgr.GetMachineStates("unlock")
- lock_mgr.CheckMachineLocks(machine_states, "unlock")
+ log=self.l,)
+ machine_states = lock_mgr.GetMachineStates('unlock')
+ lock_mgr.CheckMachineLocks(machine_states, 'unlock')
lock_mgr.UpdateMachines(False)
def _ClearCacheEntries(self, experiment):
@@ -138,7 +139,7 @@ class ExperimentRunner(object):
br.benchmark.show_all_results, br.benchmark.run_local)
cache_dir = cache._GetCacheDirForWrite()
if os.path.exists(cache_dir):
- self.l.LogOutput("Removing cache dir: %s" % cache_dir)
+ self.l.LogOutput('Removing cache dir: %s' % cache_dir)
shutil.rmtree(cache_dir)
def _Run(self, experiment):
@@ -153,15 +154,15 @@ class ExperimentRunner(object):
status = ExperimentStatus(experiment)
experiment.Run()
last_status_time = 0
- last_status_string = ""
+ last_status_string = ''
try:
- if experiment.log_level != "verbose":
+ if experiment.log_level != 'verbose':
self.l.LogStartDots()
while not experiment.IsComplete():
if last_status_time + self.STATUS_TIME_DELAY < time.time():
last_status_time = time.time()
- border = "=============================="
- if experiment.log_level == "verbose":
+ border = '=============================='
+ if experiment.log_level == 'verbose':
self.l.LogOutput(border)
self.l.LogOutput(status.GetProgressString())
self.l.LogOutput(status.GetStatusString())
@@ -179,12 +180,12 @@ class ExperimentRunner(object):
time.sleep(self.THREAD_MONITOR_DELAY)
except KeyboardInterrupt:
self._terminated = True
- self.l.LogError("Ctrl-c pressed. Cleaning up...")
+ self.l.LogError('Ctrl-c pressed. Cleaning up...')
experiment.Terminate()
raise
except SystemExit:
self._terminated = True
- self.l.LogError("Unexpected exit. Cleaning up...")
+ self.l.LogError('Unexpected exit. Cleaning up...')
experiment.Terminate()
raise
finally:
@@ -201,28 +202,28 @@ class ExperimentRunner(object):
if not benchmark_run.cache_hit:
send_mail = True
break
- if (not send_mail and not experiment.email_to
- or config.GetConfig("no_email")):
+ if (not send_mail and not experiment.email_to or
+ config.GetConfig('no_email')):
return
label_names = []
for label in experiment.labels:
label_names.append(label.name)
- subject = "%s: %s" % (experiment.name, " vs. ".join(label_names))
+ subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
text_report = TextResultsReport(experiment, True).GetReport()
- text_report += ("\nResults are stored in %s.\n" %
+ text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
html_report = HTMLResultsReport(experiment).GetReport()
- attachment = EmailSender.Attachment("report.html", html_report)
+ attachment = EmailSender.Attachment('report.html', html_report)
email_to = experiment.email_to or []
email_to.append(getpass.getuser())
EmailSender().SendEmail(email_to,
subject,
text_report,
attachments=[attachment],
- msg_type="html")
+ msg_type='html')
def _StoreResults(self, experiment):
if self._terminated:
@@ -230,32 +231,30 @@ class ExperimentRunner(object):
results_directory = experiment.results_directory
FileUtils().RmDir(results_directory)
FileUtils().MkDirP(results_directory)
- self.l.LogOutput("Storing experiment file in %s." % results_directory)
- experiment_file_path = os.path.join(results_directory,
- "experiment.exp")
+ self.l.LogOutput('Storing experiment file in %s.' % results_directory)
+ experiment_file_path = os.path.join(results_directory, 'experiment.exp')
FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)
- self.l.LogOutput("Storing results report in %s." % results_directory)
- results_table_path = os.path.join(results_directory, "results.html")
+ self.l.LogOutput('Storing results report in %s.' % results_directory)
+ results_table_path = os.path.join(results_directory, 'results.html')
report = HTMLResultsReport(experiment).GetReport()
if self.json_report:
JSONResultsReport(experiment).GetReport(results_directory)
FileUtils().WriteFile(results_table_path, report)
- self.l.LogOutput("Storing email message body in %s." % results_directory)
- msg_file_path = os.path.join(results_directory, "msg_body.html")
+ self.l.LogOutput('Storing email message body in %s.' % results_directory)
+ msg_file_path = os.path.join(results_directory, 'msg_body.html')
text_report = TextResultsReport(experiment, True).GetReport()
- text_report += ("\nResults are stored in %s.\n" %
+ text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
FileUtils().WriteFile(msg_file_path, msg_body)
- self.l.LogOutput("Storing results of each benchmark run.")
+ self.l.LogOutput('Storing results of each benchmark run.')
for benchmark_run in experiment.benchmark_runs:
if benchmark_run.result:
benchmark_run_name = filter(str.isalnum, benchmark_run.name)
- benchmark_run_path = os.path.join(results_directory,
- benchmark_run_name)
+ benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
benchmark_run.result.CopyResultsTo(benchmark_run_path)
benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
@@ -281,10 +280,10 @@ class MockExperimentRunner(ExperimentRunner):
experiment.name)
def _PrintTable(self, experiment):
- self.l.LogOutput("Would print the experiment table.")
+ self.l.LogOutput('Would print the experiment table.')
def _Email(self, experiment):
- self.l.LogOutput("Would send result email.")
+ self.l.LogOutput('Would send result email.')
def _StoreResults(self, experiment):
- self.l.LogOutput("Would store the results.")
+ self.l.LogOutput('Would store the results.')
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index 46f50934..f665587e 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -44,6 +44,7 @@ EXPERIMENT_FILE_1 = """
}
"""
+
class FakeLogger(object):
def __init__(self):
@@ -85,6 +86,7 @@ class FakeLogger(object):
self.LogEndDotsCount = 0
self.LogAppendDotCount = 0
+
class ExperimentRunnerTest(unittest.TestCase):
run_counter = 0
@@ -95,33 +97,32 @@ class ExperimentRunnerTest(unittest.TestCase):
test_flag.SetTestMode(True)
experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory="",
- log_dir="")
+ working_directory='',
+ log_dir='')
return experiment
- @mock.patch.object (machine_manager.MachineManager, 'AddMachine')
- @mock.patch.object (os.path, 'isfile')
+ @mock.patch.object(machine_manager.MachineManager, 'AddMachine')
+ @mock.patch.object(os.path, 'isfile')
def setUp(self, mock_isfile, mock_addmachine):
mock_isfile.return_value = True
self.exp = self.make_fake_experiment()
-
def test_init(self):
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
- self.assertFalse (er._terminated)
- self.assertEqual (er.STATUS_TIME_DELAY, 10)
+ self.assertFalse(er._terminated)
+ self.assertEqual(er.STATUS_TIME_DELAY, 10)
- self.exp.log_level = "verbose"
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ self.exp.log_level = 'verbose'
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
- self.assertEqual (er.STATUS_TIME_DELAY, 30)
-
-
+ self.assertEqual(er.STATUS_TIME_DELAY, 30)
@mock.patch.object(experiment_status.ExperimentStatus, 'GetStatusString')
@mock.patch.object(experiment_status.ExperimentStatus, 'GetProgressString')
@@ -150,13 +151,14 @@ class ExperimentRunnerTest(unittest.TestCase):
self.exp.IsComplete = FakeIsComplete
# Test 1: log_level == "quiet"
- self.exp.log_level = "quiet"
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ self.exp.log_level = 'quiet'
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
- log = self.mock_logger,
- cmd_exec =self.mock_cmd_exec)
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = "Fake status string"
+ mock_status_string.return_value = 'Fake status string'
er._Run(self.exp)
self.assertEqual(self.run_count, 1)
self.assertTrue(self.is_complete_count > 0)
@@ -174,14 +176,15 @@ class ExperimentRunnerTest(unittest.TestCase):
# Test 2: log_level == "average"
self.mock_logger.Reset()
reset()
- self.exp.log_level = "average"
+ self.exp.log_level = 'average'
mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = "Fake status string"
+ mock_status_string.return_value = 'Fake status string'
er._Run(self.exp)
self.assertEqual(self.run_count, 1)
self.assertTrue(self.is_complete_count > 0)
@@ -196,19 +199,19 @@ class ExperimentRunnerTest(unittest.TestCase):
'=============================='])
self.assertEqual(len(self.mock_logger.error_msgs), 0)
-
# Test 3: log_level == "verbose"
self.mock_logger.Reset()
reset()
- self.exp.log_level = "verbose"
+ self.exp.log_level = 'verbose'
mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = "Fake status string"
- mock_progress_string.return_value = "Fake progress string"
+ mock_status_string.return_value = 'Fake status string'
+ mock_progress_string.return_value = 'Fake progress string'
er._Run(self.exp)
self.assertEqual(self.run_count, 1)
self.assertTrue(self.is_complete_count > 0)
@@ -219,28 +222,25 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_progress_string.call_count, 2)
self.assertEqual(mock_status_string.call_count, 2)
self.assertEqual(self.mock_logger.output_msgs,
- ['==============================',
- 'Fake progress string', 'Fake status string',
- '==============================',
- '==============================',
- 'Fake progress string', 'Fake status string',
- '=============================='])
+ ['==============================', 'Fake progress string',
+ 'Fake status string', '==============================',
+ '==============================', 'Fake progress string',
+ 'Fake status string', '=============================='])
self.assertEqual(len(self.mock_logger.error_msgs), 0)
-
@mock.patch.object(TextResultsReport, 'GetReport')
def test_print_table(self, mock_report):
self.mock_logger.Reset()
- mock_report.return_value = "This is a fake experiment report."
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ mock_report.return_value = 'This is a fake experiment report.'
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
er._PrintTable(self.exp)
self.assertEqual(mock_report.call_count, 1)
self.assertEqual(self.mock_logger.output_msgs,
- [ 'This is a fake experiment report.' ])
-
+ ['This is a fake experiment report.'])
@mock.patch.object(HTMLResultsReport, 'GetReport')
@mock.patch.object(TextResultsReport, 'GetReport')
@@ -250,14 +250,15 @@ class ExperimentRunnerTest(unittest.TestCase):
def test_email(self, mock_getuser, mock_emailer, mock_attachment,
mock_text_report, mock_html_report):
- mock_getuser.return_value = "john.smith@google.com"
- mock_text_report.return_value = "This is a fake text report."
- mock_html_report.return_value = "This is a fake html report."
+ mock_getuser.return_value = 'john.smith@google.com'
+ mock_text_report.return_value = 'This is a fake text report.'
+ mock_html_report.return_value = 'This is a fake html report.'
self.mock_logger.Reset()
- config.AddConfig("no_email", True)
- self.exp.email_to = ["jane.doe@google.com"]
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ config.AddConfig('no_email', True)
+ self.exp.email_to = ['jane.doe@google.com']
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
@@ -271,7 +272,7 @@ class ExperimentRunnerTest(unittest.TestCase):
# Test 2. Config: email. exp.email_to set; cache hit. => send email
self.mock_logger.Reset()
- config.AddConfig("no_email", False)
+ config.AddConfig('no_email', False)
for r in self.exp.benchmark_runs:
r.cache_hit = True
er._Email(self.exp)
@@ -285,7 +286,7 @@ class ExperimentRunnerTest(unittest.TestCase):
(['john.smith@google.com', 'jane.doe@google.com'],
': image1 vs. image2',
"<pre style='font-size: 13px'>This is a fake text "
- "report.\nResults are stored in _results.\n</pre>"))
+ 'report.\nResults are stored in _results.\n</pre>'))
self.assertTrue(type(mock_emailer.call_args[1]) is dict)
self.assertEqual(len(mock_emailer.call_args[1]), 2)
self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
@@ -301,7 +302,7 @@ class ExperimentRunnerTest(unittest.TestCase):
mock_attachment.reset_mock()
mock_text_report.reset_mock()
mock_html_report.reset_mock()
- config.AddConfig("no_email", False)
+ config.AddConfig('no_email', False)
for r in self.exp.benchmark_runs:
r.cache_hit = False
er._Email(self.exp)
@@ -315,7 +316,7 @@ class ExperimentRunnerTest(unittest.TestCase):
(['john.smith@google.com', 'jane.doe@google.com'],
': image1 vs. image2',
"<pre style='font-size: 13px'>This is a fake text "
- "report.\nResults are stored in _results.\n</pre>"))
+ 'report.\nResults are stored in _results.\n</pre>'))
self.assertTrue(type(mock_emailer.call_args[1]) is dict)
self.assertEqual(len(mock_emailer.call_args[1]), 2)
self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
@@ -340,10 +341,9 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_html_report.call_count, 1)
self.assertEqual(len(mock_emailer.call_args), 2)
self.assertEqual(mock_emailer.call_args[0],
- (['john.smith@google.com'],
- ': image1 vs. image2',
+ (['john.smith@google.com'], ': image1 vs. image2',
"<pre style='font-size: 13px'>This is a fake text "
- "report.\nResults are stored in _results.\n</pre>"))
+ 'report.\nResults are stored in _results.\n</pre>'))
self.assertTrue(type(mock_emailer.call_args[1]) is dict)
self.assertEqual(len(mock_emailer.call_args[1]), 2)
self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
@@ -379,13 +379,14 @@ class ExperimentRunnerTest(unittest.TestCase):
mock_report, mock_writefile, mock_mkdir, mock_rmdir):
self.mock_logger.Reset()
- self.exp.results_directory='/usr/local/crosperf-results'
+ self.exp.results_directory = '/usr/local/crosperf-results'
bench_run = self.exp.benchmark_runs[5]
bench_path = '/usr/local/crosperf-results/' + filter(str.isalnum,
bench_run.name)
- self.assertEqual (len(self.exp.benchmark_runs), 6)
+ self.assertEqual(len(self.exp.benchmark_runs), 6)
- er = experiment_runner.ExperimentRunner(self.exp, json_report=False,
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
using_schedv2=False,
log=self.mock_logger,
cmd_exec=self.mock_cmd_exec)
@@ -402,8 +403,8 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(self.mock_logger.LogOutputCount, 0)
# Test 2. _terminated is false; everything works properly.
- fake_result = Result(self.mock_logger, self.exp.labels[0], "average",
- "daisy1")
+ fake_result = Result(self.mock_logger, self.exp.labels[0], 'average',
+ 'daisy1')
for r in self.exp.benchmark_runs:
r.result = fake_result
er._terminated = False
@@ -413,7 +414,7 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_copy.call_count, 6)
mock_copy.called_with(bench_path)
self.assertEqual(mock_writefile.call_count, 3)
- self.assertEqual (len(mock_writefile.call_args_list), 3)
+ self.assertEqual(len(mock_writefile.call_args_list), 3)
first_args = mock_writefile.call_args_list[0]
second_args = mock_writefile.call_args_list[1]
self.assertEqual(first_args[0][0],
@@ -425,12 +426,12 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_rmdir.call_count, 1)
mock_rmdir.called_with('/usr/local/crosperf-results')
self.assertEqual(self.mock_logger.LogOutputCount, 4)
- self.assertEqual(self.mock_logger.output_msgs,
- ['Storing experiment file in /usr/local/crosperf-results.',
- 'Storing results report in /usr/local/crosperf-results.',
- 'Storing email message body in /usr/local/crosperf-results.',
- 'Storing results of each benchmark run.'])
-
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ ['Storing experiment file in /usr/local/crosperf-results.',
+ 'Storing results report in /usr/local/crosperf-results.',
+ 'Storing email message body in /usr/local/crosperf-results.',
+ 'Storing results of each benchmark run.'])
if __name__ == '__main__':
diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py
index 93ada967..8cada078 100644
--- a/crosperf/experiment_status.py
+++ b/crosperf/experiment_status.py
@@ -1,5 +1,4 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""The class to show the banner."""
from __future__ import print_function
@@ -19,14 +18,14 @@ class ExperimentStatus(object):
self.log_level = experiment.log_level
def _GetProgressBar(self, num_complete, num_total):
- ret = "Done: %s%%" % int(100.0 * num_complete / num_total)
+ ret = 'Done: %s%%' % int(100.0 * num_complete / num_total)
bar_length = 50
- done_char = ">"
- undone_char = " "
+ done_char = '>'
+ undone_char = ' '
num_complete_chars = bar_length * num_complete / num_total
num_undone_chars = bar_length - num_complete_chars
- ret += " [%s%s]" % (num_complete_chars * done_char, num_undone_chars *
- undone_char)
+ ret += ' [%s%s]' % (num_complete_chars * done_char,
+ num_undone_chars * undone_char)
return ret
def GetProgressString(self):
@@ -62,26 +61,25 @@ class ExperimentStatus(object):
# first long job, after a series of short jobs). For now, if that
# happens, we set the ETA to "Unknown."
#
- eta_seconds = (float(self.num_total - self.experiment.num_complete -1) *
- time_completed_jobs / self.experiment.num_run_complete
- + (time_completed_jobs / self.experiment.num_run_complete
- - (current_time - self.new_job_start_time)))
+ eta_seconds = (float(self.num_total - self.experiment.num_complete - 1) *
+ time_completed_jobs / self.experiment.num_run_complete +
+ (time_completed_jobs / self.experiment.num_run_complete -
+ (current_time - self.new_job_start_time)))
eta_seconds = int(eta_seconds)
if eta_seconds > 0:
eta = datetime.timedelta(seconds=eta_seconds)
else:
- eta = "Unknown"
+ eta = 'Unknown'
except ZeroDivisionError:
- eta = "Unknown"
+ eta = 'Unknown'
strings = []
- strings.append("Current time: %s Elapsed: %s ETA: %s" %
+ strings.append('Current time: %s Elapsed: %s ETA: %s' %
(datetime.datetime.now(),
- datetime.timedelta(seconds=int(elapsed_time)),
- eta))
+ datetime.timedelta(seconds=int(elapsed_time)), eta))
strings.append(self._GetProgressBar(self.experiment.num_complete,
self.num_total))
- return "\n".join(strings)
+ return '\n'.join(strings)
def GetStatusString(self):
"""Get the status string of all the benchmark_runs."""
@@ -93,26 +91,26 @@ class ExperimentStatus(object):
status_strings = []
for key, val in status_bins.items():
- if key == "RUNNING":
- status_strings.append("%s: %s" %
+ if key == 'RUNNING':
+ status_strings.append('%s: %s' %
(key, self._GetNamesAndIterations(val)))
else:
- status_strings.append("%s: %s" %
+ status_strings.append('%s: %s' %
(key, self._GetCompactNamesAndIterations(val)))
- thread_status = ""
- thread_status_format = "Thread Status: \n{}\n"
+ thread_status = ''
+ thread_status_format = 'Thread Status: \n{}\n'
if (self.experiment.schedv2() is None and
- self.experiment.log_level == "verbose"):
- # Add the machine manager status.
+ self.experiment.log_level == 'verbose'):
+ # Add the machine manager status.
thread_status = thread_status_format.format(
self.experiment.machine_manager.AsString())
elif self.experiment.schedv2():
- # In schedv2 mode, we always print out thread status.
- thread_status = thread_status_format.format(
- self.experiment.schedv2().threads_status_as_string())
+ # In schedv2 mode, we always print out thread status.
+ thread_status = thread_status_format.format(self.experiment.schedv2(
+ ).threads_status_as_string())
- result = "{}{}".format(thread_status, "\n".join(status_strings))
+ result = '{}{}'.format(thread_status, '\n'.join(status_strings))
return result
@@ -121,9 +119,9 @@ class ExperimentStatus(object):
t = time.time()
for benchmark_run in benchmark_runs:
t_last = benchmark_run.timeline.GetLastEventTime()
- elapsed = str(datetime.timedelta(seconds=int(t-t_last)))
+ elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
- return " %s (%s)" % (len(strings), ", ".join(strings))
+ return ' %s (%s)' % (len(strings), ', '.join(strings))
def _GetCompactNamesAndIterations(self, benchmark_runs):
output = ''
@@ -144,8 +142,8 @@ class ExperimentStatus(object):
benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
for key, val in benchmark_iterations.items():
val.sort()
- iterations = ",".join(map(str, val))
- strings.append("{} [{}]".format(key, iterations))
- output += " " + label + ": " + ", ".join(strings) + "\n"
+ iterations = ','.join(map(str, val))
+ strings.append('{} [{}]'.format(key, iterations))
+ output += ' ' + label + ': ' + ', '.join(strings) + '\n'
- return " %s \n%s" % (len(benchmark_runs), output)
+ return ' %s \n%s' % (len(benchmark_runs), output)
diff --git a/crosperf/field.py b/crosperf/field.py
index b70fb557..e25ffe30 100644
--- a/crosperf/field.py
+++ b/crosperf/field.py
@@ -1,7 +1,7 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Module to represent a Field in an experiment file."""
+
class Field(object):
"""Class representing a Field in an experiment file."""
@@ -37,8 +37,13 @@ class Field(object):
class TextField(Field):
"""Class of text field."""
- def __init__(self, name, required=False, default="", inheritable=False,
- description=""):
+
+ def __init__(self,
+ name,
+ required=False,
+ default='',
+ inheritable=False,
+ description=''):
super(TextField, self).__init__(name, required, default, inheritable,
description)
@@ -48,15 +53,20 @@ class TextField(Field):
class BooleanField(Field):
"""Class of boolean field."""
- def __init__(self, name, required=False, default=False, inheritable=False,
- description=""):
+
+ def __init__(self,
+ name,
+ required=False,
+ default=False,
+ inheritable=False,
+ description=''):
super(BooleanField, self).__init__(name, required, default, inheritable,
description)
def _Parse(self, value):
- if value.lower() == "true":
+ if value.lower() == 'true':
return True
- elif value.lower() == "false":
+ elif value.lower() == 'false':
return False
raise Exception("Invalid value for '%s'. Must be true or false." %
self.name)
@@ -64,8 +74,13 @@ class BooleanField(Field):
class IntegerField(Field):
"""Class of integer field."""
- def __init__(self, name, required=False, default=0, inheritable=False,
- description=""):
+
+ def __init__(self,
+ name,
+ required=False,
+ default=0,
+ inheritable=False,
+ description=''):
super(IntegerField, self).__init__(name, required, default, inheritable,
description)
@@ -75,8 +90,13 @@ class IntegerField(Field):
class FloatField(Field):
"""Class of float field."""
- def __init__(self, name, required=False, default=0, inheritable=False,
- description=""):
+
+ def __init__(self,
+ name,
+ required=False,
+ default=0,
+ inheritable=False,
+ description=''):
super(FloatField, self).__init__(name, required, default, inheritable,
description)
@@ -86,8 +106,13 @@ class FloatField(Field):
class ListField(Field):
"""Class of list field."""
- def __init__(self, name, required=False, default=None, inheritable=False,
- description=""):
+
+ def __init__(self,
+ name,
+ required=False,
+ default=None,
+ inheritable=False,
+ description=''):
super(ListField, self).__init__(name, required, default, inheritable,
description)
@@ -95,7 +120,7 @@ class ListField(Field):
return value.split()
def GetString(self):
- return " ".join(self._value)
+ return ' '.join(self._value)
def Append(self, value):
v = self._Parse(value)
@@ -105,16 +130,23 @@ class ListField(Field):
self._value += v
self.assigned = True
+
class EnumField(Field):
"""Class of enum field."""
- def __init__(self, name, options, required=False, default="",
- inheritable=False, description=""):
+
+ def __init__(self,
+ name,
+ options,
+ required=False,
+ default='',
+ inheritable=False,
+ description=''):
super(EnumField, self).__init__(name, required, default, inheritable,
description)
self.options = options
def _Parse(self, value):
if value not in self.options:
- raise Exception("Invalid enum value for field '%s'. Must be one of (%s)"
- % (self.name, ", ".join(self.options)))
+ raise Exception("Invalid enum value for field '%s'. Must be one of (%s)" %
+ (self.name, ', '.join(self.options)))
return str(value)
diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py
index f3678cfd..3a60cb83 100755
--- a/crosperf/flag_test_unittest.py
+++ b/crosperf/flag_test_unittest.py
@@ -6,6 +6,7 @@ import test_flag
import unittest
+
class FlagTestCase(unittest.TestCase):
def test_test_flag(self):
@@ -14,7 +15,6 @@ class FlagTestCase(unittest.TestCase):
self.assertTrue(type(test_flag._is_test) is list)
self.assertEqual(len(test_flag._is_test), 1)
-
# Verify that the getting the flag works and that the flag
# contains False, its starting value.
save_flag = test_flag.GetTestMode()
diff --git a/crosperf/help.py b/crosperf/help.py
index 81e39b55..e10035aa 100644
--- a/crosperf/help.py
+++ b/crosperf/help.py
@@ -1,5 +1,4 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Module to print help message."""
from __future__ import print_function
@@ -13,29 +12,30 @@ from settings_factory import LabelSettings
class Help(object):
"""The help class."""
+
def GetUsage(self):
return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0])
def _WrapLine(self, line):
- return "\n".join(textwrap.wrap(line, 80))
+ return '\n'.join(textwrap.wrap(line, 80))
def _GetFieldDescriptions(self, fields):
- res = ""
+ res = ''
for field_name in fields:
field = fields[field_name]
- res += "Field:\t\t%s\n" % field.name
- res += self._WrapLine("Description:\t%s" % field.description) + "\n"
- res += "Type:\t\t%s\n" % type(field).__name__.replace("Field", "")
- res += "Required:\t%s\n" % field.required
+ res += 'Field:\t\t%s\n' % field.name
+ res += self._WrapLine('Description:\t%s' % field.description) + '\n'
+ res += 'Type:\t\t%s\n' % type(field).__name__.replace('Field', '')
+ res += 'Required:\t%s\n' % field.required
if field.default:
- res += "Default:\t%s\n" % field.default
- res += "\n"
+ res += 'Default:\t%s\n' % field.default
+ res += '\n'
return res
def GetHelp(self):
- global_fields = self._GetFieldDescriptions(GlobalSettings("").fields)
- benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings("").fields)
- label_fields = self._GetFieldDescriptions(LabelSettings("").fields)
+ global_fields = self._GetFieldDescriptions(GlobalSettings('').fields)
+ benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings('').fields)
+ label_fields = self._GetFieldDescriptions(LabelSettings('').fields)
return """%s is a script for running performance experiments on
ChromeOS. It allows one to run ChromeOS Autotest benchmarks over
@@ -110,5 +110,5 @@ experiment file). Crosperf runs the experiment and caches the results
generates and displays a report based on the run, and emails the
report to the user. If the results were all read out of the cache,
then by default no email is generated.
-""" % (sys.argv[0], sys.argv[0], global_fields,
- benchmark_fields, label_fields, sys.argv[0])
+""" % (sys.argv[0], sys.argv[0], global_fields, benchmark_fields, label_fields,
+ sys.argv[0])
diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py
index f4c02277..3571ad95 100644
--- a/crosperf/image_checksummer.py
+++ b/crosperf/image_checksummer.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
@@ -10,7 +9,9 @@ from cros_utils.file_utils import FileUtils
class ImageChecksummer(object):
+
class PerImageChecksummer(object):
+
def __init__(self, label, log_level):
self._lock = threading.Lock()
self.label = label
@@ -23,17 +24,17 @@ class ImageChecksummer(object):
logger.GetLogger().LogOutput("Acquiring checksum for '%s'." %
self.label.name)
self._checksum = None
- if self.label.image_type != "local":
- raise Exception("Called Checksum on non-local image!")
+ if self.label.image_type != 'local':
+ raise Exception('Called Checksum on non-local image!')
if self.label.chromeos_image:
if os.path.exists(self.label.chromeos_image):
self._checksum = FileUtils().Md5File(self.label.chromeos_image,
log_level=self.log_level)
- logger.GetLogger().LogOutput("Computed checksum is "
- ": %s" % self._checksum)
+ logger.GetLogger().LogOutput('Computed checksum is '
+ ': %s' % self._checksum)
if not self._checksum:
- raise Exception("Checksum computing error.")
- logger.GetLogger().LogOutput("Checksum is: %s" % self._checksum)
+ raise Exception('Checksum computing error.')
+ logger.GetLogger().LogOutput('Checksum is: %s' % self._checksum)
return self._checksum
_instance = None
@@ -43,23 +44,22 @@ class ImageChecksummer(object):
def __new__(cls, *args, **kwargs):
with cls._lock:
if not cls._instance:
- cls._instance = super(ImageChecksummer, cls).__new__(cls,
- *args, **kwargs)
+ cls._instance = super(ImageChecksummer, cls).__new__(cls, *args,
+ **kwargs)
return cls._instance
def Checksum(self, label, log_level):
- if label.image_type != "local":
- raise Exception("Attempt to call Checksum on non-local image.")
+ if label.image_type != 'local':
+ raise Exception('Attempt to call Checksum on non-local image.')
with self._lock:
if label.name not in self._per_image_checksummers:
- self._per_image_checksummers[label.name] = (ImageChecksummer.
- PerImageChecksummer(label,
- log_level))
+ self._per_image_checksummers[label.name] = (
+ ImageChecksummer.PerImageChecksummer(label, log_level))
checksummer = self._per_image_checksummers[label.name]
try:
return checksummer.Checksum()
except Exception, e:
- logger.GetLogger().LogError("Could not compute checksum of image in label"
- " '%s'."% label.name)
+ logger.GetLogger().LogError('Could not compute checksum of image in label'
+ " '%s'." % label.name)
raise e
diff --git a/crosperf/label.py b/crosperf/label.py
index a34416d2..b9fc9330 100644
--- a/crosperf/label.py
+++ b/crosperf/label.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""The label of benchamrks."""
from __future__ import print_function
@@ -16,15 +15,25 @@ from cros_utils import misc
class Label(object):
"""The label class."""
- def __init__(self, name, chromeos_image, chromeos_root, board, remote,
- image_args, cache_dir, cache_only, log_level, compiler,
+
+ def __init__(self,
+ name,
+ chromeos_image,
+ chromeos_root,
+ board,
+ remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
chrome_src=None):
self.image_type = self._GetImageType(chromeos_image)
# Expand ~
chromeos_root = os.path.expanduser(chromeos_root)
- if self.image_type == "local":
+ if self.image_type == 'local':
chromeos_image = os.path.expanduser(chromeos_image)
self.name = name
@@ -35,11 +44,11 @@ class Label(object):
self.cache_dir = cache_dir
self.cache_only = cache_only
self.log_level = log_level
- self.chrome_version = ""
+ self.chrome_version = ''
self.compiler = compiler
if not chromeos_root:
- if self.image_type == "local":
+ if self.image_type == 'local':
chromeos_root = FileUtils().ChromeOSRootFromImage(chromeos_image)
if not chromeos_root:
raise Exception("No ChromeOS root given for label '%s' and could not "
@@ -48,22 +57,21 @@ class Label(object):
else:
chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chromeos_root)
if not chromeos_root:
- raise Exception("Invalid ChromeOS root given for label '%s': '%s'."
- % (name, chromeos_root))
+ raise Exception("Invalid ChromeOS root given for label '%s': '%s'." %
+ (name, chromeos_root))
self.chromeos_root = chromeos_root
if not chrome_src:
self.chrome_src = os.path.join(
- self.chromeos_root,
- ".cache/distfiles/target/chrome-src-internal")
+ self.chromeos_root, '.cache/distfiles/target/chrome-src-internal')
if not os.path.exists(self.chrome_src):
self.chrome_src = os.path.join(self.chromeos_root,
- ".cache/distfiles/target/chrome-src")
+ '.cache/distfiles/target/chrome-src')
else:
chromeos_src = misc.CanonicalizePath(chrome_src)
if not chromeos_src:
- raise Exception("Invalid Chrome src given for label '%s': '%s'."
- % (name, chrome_src))
+ raise Exception("Invalid Chrome src given for label '%s': '%s'." %
+ (name, chrome_src))
self.chrome_src = chromeos_src
self._SetupChecksum()
@@ -72,19 +80,19 @@ class Label(object):
"""Compute label checksum only once."""
self.checksum = None
- if self.image_type == "local":
+ if self.image_type == 'local':
self.checksum = ImageChecksummer().Checksum(self, self.log_level)
- elif self.image_type == "trybot":
+ elif self.image_type == 'trybot':
self.checksum = hashlib.md5(self.chromeos_image).hexdigest()
def _GetImageType(self, chromeos_image):
image_type = None
- if chromeos_image.find("xbuddy://") < 0:
- image_type = "local"
- elif chromeos_image.find("trybot") >= 0:
- image_type = "trybot"
+ if chromeos_image.find('xbuddy://') < 0:
+ image_type = 'local'
+ elif chromeos_image.find('trybot') >= 0:
+ image_type = 'trybot'
else:
- image_type = "official"
+ image_type = 'official'
return image_type
def __hash__(self):
@@ -102,10 +110,21 @@ class Label(object):
return 'label[name="{}"]'.format(self.name)
+
class MockLabel(object):
"""The mock label class."""
- def __init__(self, name, chromeos_image, chromeos_root, board, remote,
- image_args, cache_dir, cache_only, log_level, compiler,
+
+ def __init__(self,
+ name,
+ chromeos_image,
+ chromeos_root,
+ board,
+ remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
chrome_src=None):
self.name = name
self.chromeos_image = chromeos_image
@@ -114,7 +133,7 @@ class MockLabel(object):
self.cache_dir = cache_dir
self.cache_only = cache_only
if not chromeos_root:
- self.chromeos_root = "/tmp/chromeos_root"
+ self.chromeos_root = '/tmp/chromeos_root'
else:
self.chromeos_root = chromeos_root
self.image_args = image_args
@@ -123,14 +142,14 @@ class MockLabel(object):
self.checksum = ''
self.log_level = log_level
self.compiler = compiler
- self.chrome_version = "Fake Chrome Version 50"
+ self.chrome_version = 'Fake Chrome Version 50'
def _GetImageType(self, chromeos_image):
image_type = None
- if chromeos_image.find("xbuddy://") < 0:
- image_type = "local"
- elif chromeos_image.find("trybot") >= 0:
- image_type = "trybot"
+ if chromeos_image.find('xbuddy://') < 0:
+ image_type = 'local'
+ elif chromeos_image.find('trybot') >= 0:
+ image_type = 'trybot'
else:
- image_type = "official"
+ image_type = 'official'
return image_type
diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py
index 31d0bd6b..3b96140e 100644
--- a/crosperf/machine_image_manager.py
+++ b/crosperf/machine_image_manager.py
@@ -1,9 +1,9 @@
-#!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
+
class MachineImageManager(object):
- """Management of allocating images to duts.
+ """Management of allocating images to duts.
* Data structure we have -
@@ -132,28 +132,28 @@ class MachineImageManager(object):
"""
- def __init__(self, labels, duts):
- self.labels_ = labels
- self.duts_ = duts
- self.n_labels_ = len(labels)
- self.n_duts_ = len(duts)
- self.dut_name_ordinal_ = dict()
- for idx, dut in enumerate(self.duts_):
- self.dut_name_ordinal_[dut.name] = idx
-
- # Generate initial matrix containg 'X' or ' '.
- self.matrix_ = [['X' if (l.remote and len(l.remote)) else ' ' \
- for d in range(self.n_duts_)] for l in self.labels_]
- for ol, l in enumerate(self.labels_):
- if l.remote:
- for r in l.remote:
- self.matrix_[ol][self.dut_name_ordinal_[r]] = ' '
-
- self.label_duts_ = [[] for _ in range(self.n_labels_)]
- self.allocate_log_ = []
-
- def compute_initial_allocation(self):
- """Compute the initial label-dut allocation.
+ def __init__(self, labels, duts):
+ self.labels_ = labels
+ self.duts_ = duts
+ self.n_labels_ = len(labels)
+ self.n_duts_ = len(duts)
+ self.dut_name_ordinal_ = dict()
+ for idx, dut in enumerate(self.duts_):
+ self.dut_name_ordinal_[dut.name] = idx
+
+ # Generate initial matrix containg 'X' or ' '.
+ self.matrix_ = [['X' if (l.remote and len(l.remote)) else ' ' \
+ for d in range(self.n_duts_)] for l in self.labels_]
+ for ol, l in enumerate(self.labels_):
+ if l.remote:
+ for r in l.remote:
+ self.matrix_[ol][self.dut_name_ordinal_[r]] = ' '
+
+ self.label_duts_ = [[] for _ in range(self.n_labels_)]
+ self.allocate_log_ = []
+
+ def compute_initial_allocation(self):
+ """Compute the initial label-dut allocation.
This method finds the most efficient way that every label gets imaged at
least once.
@@ -163,35 +163,35 @@ class MachineImageManager(object):
otherwise True.
"""
- if self.n_duts_ == 1:
- for i, v in self.matrix_vertical_generator(0):
- if v != 'X':
- self.matrix_[i][0] = 'Y'
- return
+ if self.n_duts_ == 1:
+ for i, v in self.matrix_vertical_generator(0):
+ if v != 'X':
+ self.matrix_[i][0] = 'Y'
+ return
- if self.n_labels_ == 1:
- for j, v in self.matrix_horizontal_generator(0):
- if v != 'X':
- self.matrix_[0][j] = 'Y'
- return
+ if self.n_labels_ == 1:
+ for j, v in self.matrix_horizontal_generator(0):
+ if v != 'X':
+ self.matrix_[0][j] = 'Y'
+ return
- if self.n_duts_ >= self.n_labels_:
- n = 1
- else:
- n = self.n_labels_ - self.n_duts_ + 1
- while n <= self.n_labels_:
- if self._compute_initial_allocation_internal(0, n):
- break
- n += 1
+ if self.n_duts_ >= self.n_labels_:
+ n = 1
+ else:
+ n = self.n_labels_ - self.n_duts_ + 1
+ while n <= self.n_labels_:
+ if self._compute_initial_allocation_internal(0, n):
+ break
+ n += 1
- return n <= self.n_labels_
+ return n <= self.n_labels_
- def _record_allocate_log(self, label_i, dut_j):
- self.allocate_log_.append((label_i, dut_j))
- self.label_duts_[label_i].append(dut_j)
+ def _record_allocate_log(self, label_i, dut_j):
+ self.allocate_log_.append((label_i, dut_j))
+ self.label_duts_[label_i].append(dut_j)
- def allocate(self, dut, schedv2=None):
- """Allocate a label for dut.
+ def allocate(self, dut, schedv2=None):
+ """Allocate a label for dut.
Arguments:
dut: the dut that asks for a new image.
@@ -202,101 +202,100 @@ class MachineImageManager(object):
a label to image onto the dut or None if no more available images for
the dut.
"""
- j = self.dut_name_ordinal_[dut.name]
- # 'can_' prefix means candidate label's.
- can_reimage_number = 999
- can_i = 999
- can_label = None
- can_pending_br_num = 0
- for i, v in self.matrix_vertical_generator(j):
- label = self.labels_[i]
-
- # 2 optimizations here regarding allocating label to dut.
- # Note schedv2 might be None in case we do not need this
- # optimization or we are in testing mode.
- if schedv2 is not None:
- pending_br_num = len(schedv2._label_brl_map[label])
- if pending_br_num == 0:
- # (A) - we have finished all br of this label,
- # apparently, we do not want to reimaeg dut to
- # this label.
- continue
- else:
- # In case we do not have a schedv2 instance, mark
- # pending_br_num as 0, so pending_br_num >=
- # can_pending_br_num is always True.
- pending_br_num = 0
-
- # For this time being, I just comment this out until we have a
- # better estimation how long each benchmarkrun takes.
- # if (pending_br_num <= 5 and
- # len(self.label_duts_[i]) >= 1):
- # # (B) this is heuristic - if there are just a few test cases
- # # (say <5) left undone for this label, and there is at least
- # # 1 other machine working on this lable, we probably not want
- # # to bother to reimage this dut to help with these 5 test
- # # cases
- # continue
-
- if v == 'Y':
- self.matrix_[i][j] = '_'
- self._record_allocate_log(i, j)
- return label
- if v == ' ':
- label_reimage_number = len(self.label_duts_[i])
- if ((can_label is None) or
- (label_reimage_number < can_reimage_number or
- (label_reimage_number == can_reimage_number and
- pending_br_num >= can_pending_br_num))):
- can_reimage_number = label_reimage_number
- can_i = i
- can_label = label
- can_pending_br_num = pending_br_num
-
- # All labels are marked either '_' (already taken) or 'X' (not
- # compatible), so return None to notify machine thread to quit.
- if can_label is None:
- return None
-
- # At this point, we don't find any 'Y' for the machine, so we go the
- # 'min' approach.
- self.matrix_[can_i][j] = '_'
- self._record_allocate_log(can_i, j)
- return can_label
-
- def matrix_vertical_generator(self, col):
- """Iterate matrix vertically at column 'col'.
+ j = self.dut_name_ordinal_[dut.name]
+ # 'can_' prefix means candidate label's.
+ can_reimage_number = 999
+ can_i = 999
+ can_label = None
+ can_pending_br_num = 0
+ for i, v in self.matrix_vertical_generator(j):
+ label = self.labels_[i]
+
+ # 2 optimizations here regarding allocating label to dut.
+ # Note schedv2 might be None in case we do not need this
+ # optimization or we are in testing mode.
+ if schedv2 is not None:
+ pending_br_num = len(schedv2._label_brl_map[label])
+ if pending_br_num == 0:
+ # (A) - we have finished all br of this label,
+ # apparently, we do not want to reimaeg dut to
+ # this label.
+ continue
+ else:
+ # In case we do not have a schedv2 instance, mark
+ # pending_br_num as 0, so pending_br_num >=
+ # can_pending_br_num is always True.
+ pending_br_num = 0
+
+ # For this time being, I just comment this out until we have a
+ # better estimation how long each benchmarkrun takes.
+ # if (pending_br_num <= 5 and
+ # len(self.label_duts_[i]) >= 1):
+ # # (B) this is heuristic - if there are just a few test cases
+ # # (say <5) left undone for this label, and there is at least
+ # # 1 other machine working on this lable, we probably not want
+ # # to bother to reimage this dut to help with these 5 test
+ # # cases
+ # continue
+
+ if v == 'Y':
+ self.matrix_[i][j] = '_'
+ self._record_allocate_log(i, j)
+ return label
+ if v == ' ':
+ label_reimage_number = len(self.label_duts_[i])
+ if ((can_label is None) or
+ (label_reimage_number < can_reimage_number or
+ (label_reimage_number == can_reimage_number and
+ pending_br_num >= can_pending_br_num))):
+ can_reimage_number = label_reimage_number
+ can_i = i
+ can_label = label
+ can_pending_br_num = pending_br_num
+
+ # All labels are marked either '_' (already taken) or 'X' (not
+ # compatible), so return None to notify machine thread to quit.
+ if can_label is None:
+ return None
+
+ # At this point, we don't find any 'Y' for the machine, so we go the
+ # 'min' approach.
+ self.matrix_[can_i][j] = '_'
+ self._record_allocate_log(can_i, j)
+ return can_label
+
+ def matrix_vertical_generator(self, col):
+ """Iterate matrix vertically at column 'col'.
Yield row number i and value at matrix_[i][col].
"""
- for i, l in enumerate(self.labels_):
- yield i, self.matrix_[i][col]
+ for i, l in enumerate(self.labels_):
+ yield i, self.matrix_[i][col]
- def matrix_horizontal_generator(self, row):
- """Iterate matrix horizontally at row 'row'.
+ def matrix_horizontal_generator(self, row):
+ """Iterate matrix horizontally at row 'row'.
Yield col number j and value at matrix_[row][j].
"""
- for j, d in enumerate(self.duts_):
- yield j, self.matrix_[row][j]
-
-
- def _compute_initial_allocation_internal(self, level, N):
- """ Search matrix for d with N. """
-
- if level == self.n_labels_:
+ for j, d in enumerate(self.duts_):
+ yield j, self.matrix_[row][j]
+
+ def _compute_initial_allocation_internal(self, level, N):
+ """ Search matrix for d with N. """
+
+ if level == self.n_labels_:
+ return True
+
+ for j, v in self.matrix_horizontal_generator(level):
+ if v == ' ':
+ # Before we put a 'Y', we check how many Y column 'j' has.
+ # Note y[0] is row idx, y[1] is the cell value.
+ ny = reduce(lambda x, y: x + 1 if (y[1] == 'Y') else x,
+ self.matrix_vertical_generator(j), 0)
+ if ny < N:
+ self.matrix_[level][j] = 'Y'
+ if self._compute_initial_allocation_internal(level + 1, N):
return True
+ self.matrix_[level][j] = ' '
- for j, v in self.matrix_horizontal_generator(level):
- if v == ' ':
- # Before we put a 'Y', we check how many Y column 'j' has.
- # Note y[0] is row idx, y[1] is the cell value.
- ny = reduce(lambda x, y: x + 1 if (y[1] == 'Y') else x,
- self.matrix_vertical_generator(j), 0)
- if ny < N:
- self.matrix_[level][j] = 'Y'
- if self._compute_initial_allocation_internal(level + 1, N):
- return True
- self.matrix_[level][j] = ' '
-
- return False
+ return False
diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py
index 60e8354a..220c4cf4 100755
--- a/crosperf/machine_image_manager_unittest.py
+++ b/crosperf/machine_image_manager_unittest.py
@@ -7,274 +7,231 @@ import unittest
from machine_image_manager import MachineImageManager
+
class MockLabel(object):
- def __init__(self, name, remotes=None):
- self.name = name
- self.remote = remotes
+ def __init__(self, name, remotes=None):
+ self.name = name
+ self.remote = remotes
- def __hash__(self):
- """Provide hash function for label.
+ def __hash__(self):
+ """Provide hash function for label.
This is required because Label object is used inside a dict as key.
"""
- return hash(self.name)
+ return hash(self.name)
- def __eq__(self, other):
- """Provide eq function for label.
+ def __eq__(self, other):
+ """Provide eq function for label.
This is required because Label object is used inside a dict as key.
"""
- return isinstance(other, MockLabel) and other.name == self.name
+ return isinstance(other, MockLabel) and other.name == self.name
+
class MockDut(object):
- def __init__(self, name, label=None):
- self.name = name
- self.label_ = label
+ def __init__(self, name, label=None):
+ self.name = name
+ self.label_ = label
class MachineImageManagerTester(unittest.TestCase):
- def gen_duts_by_name(self, *names):
- duts = []
- for n in names:
- duts.append(MockDut(n))
- return duts
-
- def print_matrix(self, matrix):
- for r in matrix:
- for v in r:
- print '{} '.format('.' if v == ' ' else v),
- print('')
-
- def create_labels_and_duts_from_pattern(self, pattern):
- labels = []
- duts = []
- for i, r in enumerate(pattern):
- l = MockLabel('l{}'.format(i), [])
- for j, v in enumerate(r.split()):
- if v == '.':
- l.remote.append('m{}'.format(j))
- if i == 0:
- duts.append(MockDut('m{}'.format(j)))
- labels.append(l)
- return labels, duts
-
- def check_matrix_against_pattern(self, matrix, pattern):
- for i, s in enumerate(pattern):
- for j, v in enumerate(s.split()):
- self.assertTrue(v == '.' and matrix[i][j] == ' ' or
- v == matrix[i][j])
-
- def pattern_based_test(self, input, output):
- labels, duts = self.create_labels_and_duts_from_pattern(input)
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.check_matrix_against_pattern(mim.matrix_, output)
- return mim
-
- def test_single_dut(self):
- labels = [MockLabel('l1'),
- MockLabel('l2'),
- MockLabel('l3')]
- dut = MockDut('m1')
- mim = MachineImageManager(labels, [dut])
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [['Y'], ['Y'], ['Y']])
-
- def test_single_label(self):
- labels = [MockLabel('l1')]
- duts = self.gen_duts_by_name('m1', 'm2', 'm3')
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
-
- def test_case1(self):
- labels = [MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
- duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'],
- ['X', ' ', ' '],
- [' ', 'X', 'X']])
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'],
- ['X', ' ', 'Y'],
- ['Y', 'X', 'X']])
-
- def test_case2(self):
- labels = [MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
- duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'],
- ['X', ' ', ' '],
- [' ', 'X', 'X']])
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'],
- ['X', ' ', 'Y'],
- ['Y', 'X', 'X']])
-
- def test_case3(self):
- labels = [MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
- duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'],
- ['X', ' ', 'Y'],
- ['Y', 'X', 'X']])
-
- def test_case4(self):
- labels = [MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])]
- duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'],
- ['X', ' ', 'Y'],
- ['Y', 'X', 'X']])
-
- def test_case5(self):
- labels = [MockLabel('l1', ['m3']),
- MockLabel('l2', ['m3']),
- MockLabel('l3', ['m1'])]
- duts = self.gen_duts_by_name('m1', 'm2', 'm3')
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'],
- ['X', 'X', 'Y'],
- ['Y', 'X', 'X']])
-
- def test_2x2_with_allocation(self):
- labels = [MockLabel('l0'), MockLabel('l1')]
- duts = [MockDut('m0'), MockDut('m1')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.assertTrue(mim.allocate(duts[0]) == labels[0])
- self.assertTrue(mim.allocate(duts[0]) == labels[1])
- self.assertTrue(mim.allocate(duts[0]) is None)
- self.assertTrue(mim.matrix_[0][0] == '_')
- self.assertTrue(mim.matrix_[1][0] == '_')
- self.assertTrue(mim.allocate(duts[1]) == labels[1])
-
- def test_10x10_general(self):
- """Gen 10x10 matrix."""
- n = 10
- labels = []
- duts = []
- for i in range(n):
- labels.append(MockLabel('l{}'.format(i)))
- duts.append(MockDut('m{}'.format(i)))
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- for i in range(n):
- for j in range(n):
- if i == j:
- self.assertTrue(mim.matrix_[i][j] == 'Y')
- else:
- self.assertTrue(mim.matrix_[i][j] == ' ')
- self.assertTrue(mim.allocate(duts[3]).name == 'l3')
-
- def test_random_generated(self):
- n = 10
- labels = []
- duts = []
- for i in range(10):
- # generate 3-5 machines that is compatible with this label
- l = MockLabel('l{}'.format(i), [])
- r = random.random()
- for _ in range(4):
- t = int(r * 10) % n
- r *= 10
- l.remote.append('m{}'.format(t))
- labels.append(l)
- duts.append(MockDut('m{}'.format(i)))
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
-
- def test_10x10_fully_random(self):
- input = ['X . . . X X . X X .',
- 'X X . X . X . X X .',
- 'X X X . . X . X . X',
- 'X . X X . . X X . X',
- 'X X X X . . . X . .',
- 'X X . X . X . . X .',
- '. X . X . X X X . .',
- '. X . X X . X X . .',
- 'X X . . . X X X . .',
- '. X X X X . . . . X']
- output = ['X Y . . X X . X X .',
- 'X X Y X . X . X X .',
- 'X X X Y . X . X . X',
- 'X . X X Y . X X . X',
- 'X X X X . Y . X . .',
- 'X X . X . X Y . X .',
- 'Y X . X . X X X . .',
- '. X . X X . X X Y .',
- 'X X . . . X X X . Y',
- '. X X X X . . Y . X']
- self.pattern_based_test(input, output)
-
- def test_10x10_fully_random2(self):
- input = ['X . X . . X . X X X',
- 'X X X X X X . . X .',
- 'X . X X X X X . . X',
- 'X X X . X . X X . .',
- '. X . X . X X X X X',
- 'X X X X X X X . . X',
- 'X . X X X X X . . X',
- 'X X X . X X X X . .',
- 'X X X . . . X X X X',
- '. X X . X X X . X X']
- output = ['X . X Y . X . X X X',
- 'X X X X X X Y . X .',
- 'X Y X X X X X . . X',
- 'X X X . X Y X X . .',
- '. X Y X . X X X X X',
- 'X X X X X X X Y . X',
- 'X . X X X X X . Y X',
- 'X X X . X X X X . Y',
- 'X X X . Y . X X X X',
- 'Y X X . X X X . X X']
- self.pattern_based_test(input, output)
-
- def test_3x4_with_allocation(self):
- input = ['X X . .',
- '. . X .',
- 'X . X .']
- output = ['X X Y .',
- 'Y . X .',
- 'X Y X .']
- mim = self.pattern_based_test(input, output)
- self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[3]) is None)
- self.assertTrue(mim.allocate(mim.duts_[2]) is None)
- self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[1]) == None)
- self.assertTrue(mim.allocate(mim.duts_[0]) == None)
- self.assertTrue(mim.label_duts_[0] == [2, 3])
- self.assertTrue(mim.label_duts_[1] == [0, 3, 1])
- self.assertTrue(mim.label_duts_[2] == [3, 1])
- self.assertTrue(mim.allocate_log_ ==
- [(0, 2),
- (2, 3),
- (1, 0),
- (2, 1),
- (1, 3),
- (0, 3),
- (1, 1)])
-
- def test_cornercase_1(self):
- """This corner case is brought up by Caroline.
+ def gen_duts_by_name(self, *names):
+ duts = []
+ for n in names:
+ duts.append(MockDut(n))
+ return duts
+
+ def print_matrix(self, matrix):
+ for r in matrix:
+ for v in r:
+ print '{} '.format('.' if v == ' ' else v),
+ print('')
+
+ def create_labels_and_duts_from_pattern(self, pattern):
+ labels = []
+ duts = []
+ for i, r in enumerate(pattern):
+ l = MockLabel('l{}'.format(i), [])
+ for j, v in enumerate(r.split()):
+ if v == '.':
+ l.remote.append('m{}'.format(j))
+ if i == 0:
+ duts.append(MockDut('m{}'.format(j)))
+ labels.append(l)
+ return labels, duts
+
+ def check_matrix_against_pattern(self, matrix, pattern):
+ for i, s in enumerate(pattern):
+ for j, v in enumerate(s.split()):
+ self.assertTrue(v == '.' and matrix[i][j] == ' ' or v == matrix[i][j])
+
+ def pattern_based_test(self, input, output):
+ labels, duts = self.create_labels_and_duts_from_pattern(input)
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.check_matrix_against_pattern(mim.matrix_, output)
+ return mim
+
+ def test_single_dut(self):
+ labels = [MockLabel('l1'), MockLabel('l2'), MockLabel('l3')]
+ dut = MockDut('m1')
+ mim = MachineImageManager(labels, [dut])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [['Y'], ['Y'], ['Y']])
+
+ def test_single_label(self):
+ labels = [MockLabel('l1')]
+ duts = self.gen_duts_by_name('m1', 'm2', 'm3')
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
+
+ def test_case1(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
+ 'X']])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case2(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
+ 'X']])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case3(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case4(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case5(self):
+ labels = [MockLabel('l1', ['m3']), MockLabel('l2', ['m3']),
+ MockLabel('l3', ['m1'])]
+ duts = self.gen_duts_by_name('m1', 'm2', 'm3')
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_2x2_with_allocation(self):
+ labels = [MockLabel('l0'), MockLabel('l1')]
+ duts = [MockDut('m0'), MockDut('m1')]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.assertTrue(mim.allocate(duts[0]) == labels[0])
+ self.assertTrue(mim.allocate(duts[0]) == labels[1])
+ self.assertTrue(mim.allocate(duts[0]) is None)
+ self.assertTrue(mim.matrix_[0][0] == '_')
+ self.assertTrue(mim.matrix_[1][0] == '_')
+ self.assertTrue(mim.allocate(duts[1]) == labels[1])
+
+ def test_10x10_general(self):
+ """Gen 10x10 matrix."""
+ n = 10
+ labels = []
+ duts = []
+ for i in range(n):
+ labels.append(MockLabel('l{}'.format(i)))
+ duts.append(MockDut('m{}'.format(i)))
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ for i in range(n):
+ for j in range(n):
+ if i == j:
+ self.assertTrue(mim.matrix_[i][j] == 'Y')
+ else:
+ self.assertTrue(mim.matrix_[i][j] == ' ')
+ self.assertTrue(mim.allocate(duts[3]).name == 'l3')
+
+ def test_random_generated(self):
+ n = 10
+ labels = []
+ duts = []
+ for i in range(10):
+ # generate 3-5 machines that is compatible with this label
+ l = MockLabel('l{}'.format(i), [])
+ r = random.random()
+ for _ in range(4):
+ t = int(r * 10) % n
+ r *= 10
+ l.remote.append('m{}'.format(t))
+ labels.append(l)
+ duts.append(MockDut('m{}'.format(i)))
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+
+ def test_10x10_fully_random(self):
+ input = ['X . . . X X . X X .', 'X X . X . X . X X .',
+ 'X X X . . X . X . X', 'X . X X . . X X . X',
+ 'X X X X . . . X . .', 'X X . X . X . . X .',
+ '. X . X . X X X . .', '. X . X X . X X . .',
+ 'X X . . . X X X . .', '. X X X X . . . . X']
+ output = ['X Y . . X X . X X .', 'X X Y X . X . X X .',
+ 'X X X Y . X . X . X', 'X . X X Y . X X . X',
+ 'X X X X . Y . X . .', 'X X . X . X Y . X .',
+ 'Y X . X . X X X . .', '. X . X X . X X Y .',
+ 'X X . . . X X X . Y', '. X X X X . . Y . X']
+ self.pattern_based_test(input, output)
+
+ def test_10x10_fully_random2(self):
+ input = ['X . X . . X . X X X', 'X X X X X X . . X .',
+ 'X . X X X X X . . X', 'X X X . X . X X . .',
+ '. X . X . X X X X X', 'X X X X X X X . . X',
+ 'X . X X X X X . . X', 'X X X . X X X X . .',
+ 'X X X . . . X X X X', '. X X . X X X . X X']
+ output = ['X . X Y . X . X X X', 'X X X X X X Y . X .',
+ 'X Y X X X X X . . X', 'X X X . X Y X X . .',
+ '. X Y X . X X X X X', 'X X X X X X X Y . X',
+ 'X . X X X X X . Y X', 'X X X . X X X X . Y',
+ 'X X X . Y . X X X X', 'Y X X . X X X . X X']
+ self.pattern_based_test(input, output)
+
+ def test_3x4_with_allocation(self):
+ input = ['X X . .', '. . X .', 'X . X .']
+ output = ['X X Y .', 'Y . X .', 'X Y X .']
+ mim = self.pattern_based_test(input, output)
+ self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[3]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[2]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[1]) == None)
+ self.assertTrue(mim.allocate(mim.duts_[0]) == None)
+ self.assertTrue(mim.label_duts_[0] == [2, 3])
+ self.assertTrue(mim.label_duts_[1] == [0, 3, 1])
+ self.assertTrue(mim.label_duts_[2] == [3, 1])
+ self.assertTrue(mim.allocate_log_ == [(0, 2), (2, 3), (1, 0), (2, 1), (
+ 1, 3), (0, 3), (1, 1)])
+
+ def test_cornercase_1(self):
+ """This corner case is brought up by Caroline.
The description is -
@@ -310,19 +267,15 @@ class MachineImageManagerTester(unittest.TestCase):
"""
- input = ['. X X',
- '. X X',
- '. X X', ]
- output = ['Y X X',
- 'Y X X',
- 'Y X X', ]
- mim = self.pattern_based_test(input, output)
- self.assertTrue(mim.allocate(mim.duts_[1]) is None)
- self.assertTrue(mim.allocate(mim.duts_[2]) is None)
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[0]) is None)
+ input = ['. X X', '. X X', '. X X']
+ output = ['Y X X', 'Y X X', 'Y X X']
+ mim = self.pattern_based_test(input, output)
+ self.assertTrue(mim.allocate(mim.duts_[1]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[2]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[0]) is None)
if __name__ == '__main__':
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index 7bada0d1..86c63a20 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Machine Manager module."""
from __future__ import print_function
@@ -20,24 +19,30 @@ import test_flag
from cros_utils import command_executer
from cros_utils import logger
-CHECKSUM_FILE = "/usr/local/osimage_checksum_file"
+CHECKSUM_FILE = '/usr/local/osimage_checksum_file'
+
class BadChecksum(Exception):
"""Raised if all machines for a label don't have the same checksum."""
pass
+
class BadChecksumString(Exception):
"""Raised if all machines for a label don't have the same checksum string."""
pass
+
class MissingLocksDirectory(Exception):
"""Raised when cannot find/access the machine locks directory."""
+
class CrosCommandError(Exception):
"""Raised when an error occurs running command on DUT."""
+
class CrosMachine(object):
"""The machine class."""
+
def __init__(self, name, chromeos_root, log_level, cmd_exec=None):
self.name = name
self.image = None
@@ -71,7 +76,7 @@ class CrosMachine(object):
self.machine_id_checksum = self._GetMD5Checksum(self.machine_id)
def IsReachable(self):
- command = "ls"
+ command = 'ls'
ret = self.ce.CrosRunCommand(command,
machine=self.name,
chromeos_root=self.chromeos_root)
@@ -113,55 +118,63 @@ class CrosMachine(object):
def _GetMemoryInfo(self):
#TODO yunlian: when the machine in rebooting, it will not return
#meminfo, the assert does not catch it either
- command = "cat /proc/meminfo"
+ command = 'cat /proc/meminfo'
ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- assert ret == 0, "Could not get meminfo from machine: %s" % self.name
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
+ assert ret == 0, 'Could not get meminfo from machine: %s' % self.name
if ret == 0:
self._ParseMemoryInfo()
def _GetCPUInfo(self):
- command = "cat /proc/cpuinfo"
+ command = 'cat /proc/cpuinfo'
ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- assert ret == 0, "Could not get cpuinfo from machine: %s" % self.name
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
+ assert ret == 0, 'Could not get cpuinfo from machine: %s' % self.name
def _ComputeMachineChecksumString(self):
- self.checksum_string = ""
- exclude_lines_list = ["MHz", "BogoMIPS", "bogomips"]
+ self.checksum_string = ''
+ exclude_lines_list = ['MHz', 'BogoMIPS', 'bogomips']
for line in self.cpuinfo.splitlines():
if not any([e in line for e in exclude_lines_list]):
self.checksum_string += line
- self.checksum_string += " " + str(self.phys_kbytes)
+ self.checksum_string += ' ' + str(self.phys_kbytes)
def _GetMD5Checksum(self, ss):
if ss:
return hashlib.md5(ss).hexdigest()
else:
- return ""
+ return ''
def _GetMachineID(self):
- command = "dump_vpd_log --full --stdout"
+ command = 'dump_vpd_log --full --stdout'
_, if_out, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
b = if_out.splitlines()
- a = [l for l in b if "Product" in l]
+ a = [l for l in b if 'Product' in l]
if len(a):
self.machine_id = a[0]
return
- command = "ifconfig"
+ command = 'ifconfig'
_, if_out, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
b = if_out.splitlines()
- a = [l for l in b if "HWaddr" in l]
+ a = [l for l in b if 'HWaddr' in l]
if len(a):
- self.machine_id = "_".join(a)
+ self.machine_id = '_'.join(a)
return
- a = [l for l in b if "ether" in l]
+ a = [l for l in b if 'ether' in l]
if len(a):
- self.machine_id = "_".join(a)
+ self.machine_id = '_'.join(a)
return
- assert 0, "Could not get machine_id from machine: %s" % self.name
+ assert 0, 'Could not get machine_id from machine: %s' % self.name
def __str__(self):
l = []
@@ -170,7 +183,7 @@ class CrosMachine(object):
l.append(str(self.checksum))
l.append(str(self.locked))
l.append(str(self.released_time))
- return ", ".join(l)
+ return ', '.join(l)
class MachineManager(object):
@@ -184,8 +197,14 @@ class MachineManager(object):
multiple benchmark runs within the same experiment from trying to use the
same machine at the same time.
"""
- def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir,
- cmd_exec=None, lgr=None):
+
+ def __init__(self,
+ chromeos_root,
+ acquire_timeout,
+ log_level,
+ locks_dir,
+ cmd_exec=None,
+ lgr=None):
self._lock = threading.RLock()
self._all_machines = []
self._machines = []
@@ -202,8 +221,8 @@ class MachineManager(object):
self.logger = lgr or logger.GetLogger()
if self.locks_dir and not os.path.isdir(self.locks_dir):
- raise MissingLocksDirectory("Cannot access locks directory: %s"
- % self.locks_dir)
+ raise MissingLocksDirectory('Cannot access locks directory: %s' %
+ self.locks_dir)
self._initialized_machines = []
self.chromeos_root = chromeos_root
@@ -220,15 +239,17 @@ class MachineManager(object):
def GetChromeVersion(self, machine):
"""Get the version of Chrome running on the DUT."""
- cmd = "/opt/google/chrome/chrome --version"
+ cmd = '/opt/google/chrome/chrome --version'
ret, version, _ = self.ce.CrosRunCommandWOutput(
- cmd, machine=machine.name, chromeos_root=self.chromeos_root)
+ cmd,
+ machine=machine.name,
+ chromeos_root=self.chromeos_root)
if ret != 0:
- raise CrosCommandError("Couldn't get Chrome version from %s."
- % machine.name)
+ raise CrosCommandError("Couldn't get Chrome version from %s." %
+ machine.name)
if ret != 0:
- version = ""
+ version = ''
return version.rstrip()
def ImageMachine(self, machine, label):
@@ -239,41 +260,40 @@ class MachineManager(object):
chromeos_root = label.chromeos_root
if not chromeos_root:
chromeos_root = self.chromeos_root
- image_chromeos_args = [image_chromeos.__file__,
- "--no_lock",
- "--chromeos_root=%s" % chromeos_root,
- "--image=%s" % label.chromeos_image,
- "--image_args=%s" % label.image_args,
- "--remote=%s" % machine.name,
- "--logging_level=%s" % self.log_level]
+ image_chromeos_args = [image_chromeos.__file__, '--no_lock',
+ '--chromeos_root=%s' % chromeos_root,
+ '--image=%s' % label.chromeos_image,
+ '--image_args=%s' % label.image_args, '--remote=%s' %
+ machine.name, '--logging_level=%s' % self.log_level]
if label.board:
- image_chromeos_args.append("--board=%s" % label.board)
+ image_chromeos_args.append('--board=%s' % label.board)
# Currently can't image two machines at once.
# So have to serialized on this lock.
save_ce_log_level = self.ce.log_level
- if self.log_level != "verbose":
- self.ce.log_level = "average"
+ if self.log_level != 'verbose':
+ self.ce.log_level = 'average'
with self.image_lock:
- if self.log_level != "verbose":
- self.logger.LogOutput("Pushing image onto machine.")
- self.logger.LogOutput("Running image_chromeos.DoImage with %s"
- % " ".join(image_chromeos_args))
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Pushing image onto machine.')
+ self.logger.LogOutput('Running image_chromeos.DoImage with %s' %
+ ' '.join(image_chromeos_args))
retval = 0
if not test_flag.GetTestMode():
retval = image_chromeos.DoImage(image_chromeos_args)
if retval:
- cmd = "reboot && exit"
- if self.log_level != "verbose":
- self.logger.LogOutput("reboot & exit.")
- self.ce.CrosRunCommand(cmd, machine=machine.name,
+ cmd = 'reboot && exit'
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('reboot & exit.')
+ self.ce.CrosRunCommand(cmd,
+ machine=machine.name,
chromeos_root=self.chromeos_root)
time.sleep(60)
- if self.log_level != "verbose":
- self.logger.LogOutput("Pushing image onto machine.")
- self.logger.LogOutput("Running image_chromeos.DoImage with %s"
- % " ".join(image_chromeos_args))
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Pushing image onto machine.')
+ self.logger.LogOutput('Running image_chromeos.DoImage with %s' %
+ ' '.join(image_chromeos_args))
retval = image_chromeos.DoImage(image_chromeos_args)
if retval:
raise Exception("Could not image machine: '%s'." % machine.name)
@@ -305,7 +325,7 @@ class MachineManager(object):
common_checksum = cs
# Make sure this machine's checksum matches our 'common' checksum.
if cs != common_checksum:
- raise BadChecksum("Machine checksums do not match!")
+ raise BadChecksum('Machine checksums do not match!')
self.machine_checksum[label.name] = common_checksum
def ComputeCommonCheckSumString(self, label):
@@ -332,9 +352,10 @@ class MachineManager(object):
sys.argv[0])
if locked:
self._machines.append(cros_machine)
- command = "cat %s" % CHECKSUM_FILE
+ command = 'cat %s' % CHECKSUM_FILE
ret, out, _ = self.ce.CrosRunCommandWOutput(
- command, chromeos_root=self.chromeos_root,
+ command,
+ chromeos_root=self.chromeos_root,
machine=cros_machine.name)
if ret == 0:
cros_machine.checksum = out.strip()
@@ -345,27 +366,24 @@ class MachineManager(object):
def AddMachine(self, machine_name):
with self._lock:
for m in self._all_machines:
- assert m.name != machine_name, "Tried to double-add %s" % machine_name
+ assert m.name != machine_name, 'Tried to double-add %s' % machine_name
- if self.log_level != "verbose":
- self.logger.LogOutput("Setting up remote access to %s" % machine_name)
- self.logger.LogOutput(
- "Checking machine characteristics for %s" % machine_name)
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Setting up remote access to %s' % machine_name)
+ self.logger.LogOutput('Checking machine characteristics for %s' %
+ machine_name)
cm = CrosMachine(machine_name, self.chromeos_root, self.log_level)
if cm.machine_checksum:
self._all_machines.append(cm)
-
def RemoveMachine(self, machine_name):
with self._lock:
- self._machines = [m for m in self._machines
- if m.name != machine_name]
+ self._machines = [m for m in self._machines if m.name != machine_name]
if self.locks_dir:
res = file_lock_machine.Machine(machine_name,
self.locks_dir).Unlock(True)
if not res:
- self.logger.LogError("Could not unlock machine: '%s'."
- % machine_name)
+ self.logger.LogError("Could not unlock machine: '%s'." % machine_name)
def ForceSameImageToAllMachines(self, label):
machines = self.GetMachines(label)
@@ -396,21 +414,23 @@ class MachineManager(object):
machine_names = []
for machine in machines:
machine_names.append(machine.name)
- self.logger.LogFatal("Could not acquire any of the "
- "following machines: '%s'"
- % ", ".join(machine_names))
+ self.logger.LogFatal('Could not acquire any of the '
+ "following machines: '%s'" %
+ ', '.join(machine_names))
### for m in self._machines:
### if (m.locked and time.time() - m.released_time < 10 and
### m.checksum == image_checksum):
### return None
- for m in [machine for machine in self.GetAvailableMachines(label)
+ for m in [machine
+ for machine in self.GetAvailableMachines(label)
if not machine.locked]:
if image_checksum and (m.checksum == image_checksum):
m.locked = True
m.test_run = threading.current_thread()
return m
- for m in [machine for machine in self.GetAvailableMachines(label)
+ for m in [machine
+ for machine in self.GetAvailableMachines(label)
if not machine.locked]:
if not m.checksum:
m.locked = True
@@ -422,7 +442,8 @@ class MachineManager(object):
# the number of re-images.
# TODO(asharif): If we centralize the thread-scheduler, we wont need this
# code and can implement minimal reimaging code more cleanly.
- for m in [machine for machine in self.GetAvailableMachines(label)
+ for m in [machine
+ for machine in self.GetAvailableMachines(label)
if not machine.locked]:
if time.time() - m.released_time > 15:
# The release time gap is too large, so it is probably in the start
@@ -448,10 +469,10 @@ class MachineManager(object):
with self._lock:
for m in self._machines:
if machine.name == m.name:
- assert m.locked == True, "Tried to double-release %s" % m.name
+ assert m.locked == True, 'Tried to double-release %s' % m.name
m.released_time = time.time()
m.locked = False
- m.status = "Available"
+ m.status = 'Available'
break
def Cleanup(self):
@@ -461,40 +482,36 @@ class MachineManager(object):
res = file_lock_machine.Machine(m.name, self.locks_dir).Unlock(True)
if not res:
- self.logger.LogError("Could not unlock machine: '%s'."
- % m.name)
+ self.logger.LogError("Could not unlock machine: '%s'." % m.name)
def __str__(self):
with self._lock:
- l = ["MachineManager Status:"]
+ l = ['MachineManager Status:']
for m in self._machines:
l.append(str(m))
- return "\n".join(l)
+ return '\n'.join(l)
def AsString(self):
with self._lock:
- stringify_fmt = "%-30s %-10s %-4s %-25s %-32s"
- header = stringify_fmt % ("Machine", "Thread", "Lock", "Status",
- "Checksum")
+ stringify_fmt = '%-30s %-10s %-4s %-25s %-32s'
+ header = stringify_fmt % ('Machine', 'Thread', 'Lock', 'Status',
+ 'Checksum')
table = [header]
for m in self._machines:
if m.test_run:
test_name = m.test_run.name
test_status = m.test_run.timeline.GetLastEvent()
else:
- test_name = ""
- test_status = ""
+ test_name = ''
+ test_status = ''
try:
- machine_string = stringify_fmt % (m.name,
- test_name,
- m.locked,
- test_status,
- m.checksum)
+ machine_string = stringify_fmt % (m.name, test_name, m.locked,
+ test_status, m.checksum)
except ValueError:
- machine_string = ""
+ machine_string = ''
table.append(machine_string)
- return "Machine Status:\n%s" % "\n".join(table)
+ return 'Machine Status:\n%s' % '\n'.join(table)
def GetAllCPUInfo(self, labels):
"""Get cpuinfo for labels, merge them if their cpuinfo are the same."""
@@ -507,12 +524,12 @@ class MachineManager(object):
else:
dic[machine.cpuinfo].append(label.name)
break
- output = ""
+ output = ''
for key, v in dic.items():
- output += " ".join(v)
- output += "\n-------------------\n"
+ output += ' '.join(v)
+ output += '\n-------------------\n'
output += key
- output += "\n\n\n"
+ output += '\n\n\n'
return output
@@ -618,7 +635,7 @@ power management:
self.released_time = time.time()
self.test_run = None
self.chromeos_root = chromeos_root
- self.checksum_string = re.sub(r"\d", "", name)
+ self.checksum_string = re.sub(r'\d', '', name)
#In test, we assume "lumpy1", "lumpy2" are the same machine.
self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
self.log_level = log_level
@@ -635,25 +652,25 @@ power management:
def _GetCPUInfo(self):
self.cpuinfo = self.CPUINFO_STRING
+
class MockMachineManager(MachineManager):
"""Mock machine manager class."""
- def __init__(self, chromeos_root, acquire_timeout,
- log_level):
+
+ def __init__(self, chromeos_root, acquire_timeout, log_level):
super(MockMachineManager, self).__init__(
- chromeos_root, acquire_timeout,
- log_level,
+ chromeos_root, acquire_timeout, log_level,
file_lock_machine.Machine.LOCKS_DIR)
def _TryToLockMachine(self, cros_machine):
self._machines.append(cros_machine)
- cros_machine.checksum = ""
+ cros_machine.checksum = ''
def AddMachine(self, machine_name):
with self._lock:
for m in self._all_machines:
- assert m.name != machine_name, "Tried to double-add %s" % machine_name
+ assert m.name != machine_name, 'Tried to double-add %s' % machine_name
cm = MockCrosMachine(machine_name, self.chromeos_root, self.log_level)
- assert cm.machine_checksum, ("Could not find checksum for machine %s" %
+ assert cm.machine_checksum, ('Could not find checksum for machine %s' %
machine_name)
# In Original MachineManager, the test is 'if cm.machine_checksum:' - if a
# machine is unreachable, then its machine_checksum is None. Here we
@@ -663,7 +680,7 @@ class MockMachineManager(MachineManager):
self._all_machines.append(cm)
def GetChromeVersion(self, machine):
- return "Mock Chrome Version R50"
+ return 'Mock Chrome Version R50'
def AcquireMachine(self, label):
for machine in self._all_machines:
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index 7aed09d4..abbbaff7 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
-
"""Unittest for machine_manager."""
import os.path
import time
@@ -22,35 +21,37 @@ from benchmark_run import MockBenchmarkRun
from cros_utils import command_executer
from cros_utils import logger
+
class MyMachineManager(machine_manager.MachineManager):
def __init__(self, chromeos_root):
- super(MyMachineManager, self).__init__(chromeos_root, 0, "average",
+ super(MyMachineManager, self).__init__(chromeos_root, 0, 'average',
file_lock_machine.Machine.LOCKS_DIR)
def _TryToLockMachine(self, cros_machine):
self._machines.append(cros_machine)
- cros_machine.checksum = ""
+ cros_machine.checksum = ''
def AddMachine(self, machine_name):
with self._lock:
for m in self._all_machines:
- assert m.name != machine_name, "Tried to double-add %s" % machine_name
+ assert m.name != machine_name, 'Tried to double-add %s' % machine_name
cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root,
- "average")
- assert cm.machine_checksum, ("Could not find checksum for machine %s" %
+ 'average')
+ assert cm.machine_checksum, ('Could not find checksum for machine %s' %
machine_name)
self._all_machines.append(cm)
-CHROMEOS_ROOT = "/tmp/chromeos-root"
-MACHINE_NAMES = ["lumpy1", "lumpy2", "lumpy3", "daisy1", "daisy2"]
-LABEL_LUMPY = label.MockLabel("lumpy", "lumpy_chromeos_image", CHROMEOS_ROOT,
- "lumpy",
- ["lumpy1", "lumpy2", "lumpy3", "lumpy4"],
- "", "", False, "average," "gcc", None)
-LABEL_MIX = label.MockLabel("mix", "chromeos_image", CHROMEOS_ROOT, "mix",
- ["daisy1", "daisy2", "lumpy3", "lumpy4"],
- "", "", False, "average", "gcc", None)
+
+CHROMEOS_ROOT = '/tmp/chromeos-root'
+MACHINE_NAMES = ['lumpy1', 'lumpy2', 'lumpy3', 'daisy1', 'daisy2']
+LABEL_LUMPY = label.MockLabel('lumpy', 'lumpy_chromeos_image', CHROMEOS_ROOT,
+ 'lumpy', ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'],
+ '', '', False, 'average,'
+ 'gcc', None)
+LABEL_MIX = label.MockLabel('mix', 'chromeos_image', CHROMEOS_ROOT, 'mix',
+ ['daisy1', 'daisy2', 'lumpy3', 'lumpy4'], '', '',
+ False, 'average', 'gcc', None)
class MachineManagerTest(unittest.TestCase):
@@ -66,15 +67,14 @@ class MachineManagerTest(unittest.TestCase):
mock_daisy1 = mock.Mock(spec=machine_manager.CrosMachine)
mock_daisy2 = mock.Mock(spec=machine_manager.CrosMachine)
- @mock.patch.object (os.path, 'isdir')
+ @mock.patch.object(os.path, 'isdir')
def setUp(self, mock_isdir):
mock_isdir.return_value = True
- self.mm = machine_manager.MachineManager("/usr/local/chromeos", 0,
- "average",
- file_lock_machine.Machine.LOCKS_DIR,
- self.mock_cmd_exec,
- self.mock_logger)
+ self.mm = machine_manager.MachineManager(
+ '/usr/local/chromeos', 0, 'average',
+ file_lock_machine.Machine.LOCKS_DIR, self.mock_cmd_exec,
+ self.mock_logger)
self.mock_lumpy1.name = 'lumpy1'
self.mock_lumpy2.name = 'lumpy2'
@@ -94,25 +94,24 @@ class MachineManagerTest(unittest.TestCase):
self.mock_lumpy4.checksum_string = 'lumpy_checksum_str'
self.mock_daisy1.checksum_string = 'daisy_checksum_str'
self.mock_daisy2.checksum_string = 'daisy_checksum_str'
- self.mock_lumpy1.cpuinfo = "lumpy_cpu_info"
- self.mock_lumpy2.cpuinfo = "lumpy_cpu_info"
- self.mock_lumpy3.cpuinfo = "lumpy_cpu_info"
- self.mock_lumpy4.cpuinfo = "lumpy_cpu_info"
- self.mock_daisy1.cpuinfo = "daisy_cpu_info"
- self.mock_daisy2.cpuinfo = "daisy_cpu_info"
+ self.mock_lumpy1.cpuinfo = 'lumpy_cpu_info'
+ self.mock_lumpy2.cpuinfo = 'lumpy_cpu_info'
+ self.mock_lumpy3.cpuinfo = 'lumpy_cpu_info'
+ self.mock_lumpy4.cpuinfo = 'lumpy_cpu_info'
+ self.mock_daisy1.cpuinfo = 'daisy_cpu_info'
+ self.mock_daisy2.cpuinfo = 'daisy_cpu_info'
self.mm._all_machines.append(self.mock_daisy1)
self.mm._all_machines.append(self.mock_daisy2)
self.mm._all_machines.append(self.mock_lumpy1)
self.mm._all_machines.append(self.mock_lumpy2)
self.mm._all_machines.append(self.mock_lumpy3)
-
def testGetMachines(self):
manager = MyMachineManager(CHROMEOS_ROOT)
for m in MACHINE_NAMES:
manager.AddMachine(m)
names = [m.name for m in manager.GetMachines(LABEL_LUMPY)]
- self.assertEqual(names, ["lumpy1", "lumpy2", "lumpy3"])
+ self.assertEqual(names, ['lumpy1', 'lumpy2', 'lumpy3'])
def testGetAvailableMachines(self):
manager = MyMachineManager(CHROMEOS_ROOT)
@@ -122,17 +121,17 @@ class MachineManagerTest(unittest.TestCase):
if int(m.name[-1]) % 2:
manager._TryToLockMachine(m)
names = [m.name for m in manager.GetAvailableMachines(LABEL_LUMPY)]
- self.assertEqual(names, ["lumpy1", "lumpy3"])
+ self.assertEqual(names, ['lumpy1', 'lumpy3'])
@mock.patch.object(time, 'sleep')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
@mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
- def test_image_machine(self, mock_checksummer, mock_run_croscmd,
- mock_run_cmd, mock_sleep):
+ def test_image_machine(self, mock_checksummer, mock_run_croscmd, mock_run_cmd,
+ mock_sleep):
def FakeMD5Checksum(input_str):
- return "machine_fake_md5_checksum"
+ return 'machine_fake_md5_checksum'
self.fake_logger_count = 0
self.fake_logger_msgs = []
@@ -148,7 +147,7 @@ class MachineManagerTest(unittest.TestCase):
mock_run_croscmd.reset_mock()
mock_checksummer.reset_mock()
mock_sleep.reset_mock()
- machine.checksum = "fake_md5_checksum"
+ machine.checksum = 'fake_md5_checksum'
self.mm.checksum = None
self.mm.num_reimages = 0
@@ -158,20 +157,20 @@ class MachineManagerTest(unittest.TestCase):
self.mm.logger.LogOutput = FakeLogOutput
machine = self.mock_lumpy1
machine._GetMD5Checksum = FakeMD5Checksum
- machine.checksum = "fake_md5_checksum"
- mock_checksummer.return_value = "fake_md5_checksum"
- self.mock_cmd_exec.log_level = "verbose"
+ machine.checksum = 'fake_md5_checksum'
+ mock_checksummer.return_value = 'fake_md5_checksum'
+ self.mock_cmd_exec.log_level = 'verbose'
test_flag.SetTestMode(True)
# Test 1: label.image_type == "local"
- LABEL_LUMPY.image_type = "local"
+ LABEL_LUMPY.image_type = 'local'
self.mm.ImageMachine(machine, LABEL_LUMPY)
self.assertEqual(mock_run_cmd.call_count, 0)
self.assertEqual(mock_run_croscmd.call_count, 0)
#Test 2: label.image_type == "trybot"
ResetValues()
- LABEL_LUMPY.image_type = "trybot"
+ LABEL_LUMPY.image_type = 'trybot'
mock_run_cmd.return_value = 0
self.mm.ImageMachine(machine, LABEL_LUMPY)
self.assertEqual(mock_run_croscmd.call_count, 0)
@@ -180,7 +179,7 @@ class MachineManagerTest(unittest.TestCase):
# Test 3: label.image_type is neither local nor trybot; retval from
# RunCommand is 1, i.e. image_chromeos fails...
ResetValues()
- LABEL_LUMPY.image_type = "other"
+ LABEL_LUMPY.image_type = 'other'
mock_run_cmd.return_value = 1
try:
self.mm.ImageMachine(machine, LABEL_LUMPY)
@@ -195,10 +194,10 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(image_call_args[1].split('/')[-1], 'image_chromeos.pyc')
image_call_args = image_call_args[2:]
self.assertEqual(image_call_args,
- [ '--chromeos_root=/tmp/chromeos-root',
- '--image=lumpy_chromeos_image',
- '--image_args=', '--remote=lumpy1',
- '--logging_level=average', '--board=lumpy'])
+ ['--chromeos_root=/tmp/chromeos-root',
+ '--image=lumpy_chromeos_image', '--image_args=',
+ '--remote=lumpy1', '--logging_level=average',
+ '--board=lumpy'])
self.assertEqual(mock_run_croscmd.call_args[0][0], 'reboot && exit')
# Test 4: Everything works properly. Trybot image type.
@@ -210,7 +209,6 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(mock_run_croscmd.call_count, 0)
self.assertEqual(mock_sleep.call_count, 0)
-
def test_compute_common_checksum(self):
self.mm.machine_checksum = {}
@@ -219,8 +217,8 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(len(self.mm.machine_checksum), 1)
self.mm.machine_checksum = {}
- self.assertRaises(machine_manager.BadChecksum, self.mm.ComputeCommonCheckSum, LABEL_MIX)
-
+ self.assertRaises(machine_manager.BadChecksum,
+ self.mm.ComputeCommonCheckSum, LABEL_MIX)
def test_compute_common_checksum_string(self):
self.mm.machine_checksum_string = {}
@@ -235,19 +233,18 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(self.mm.machine_checksum_string['mix'],
'daisy_checksum_str')
-
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
def test_try_to_lock_machine(self, mock_cros_runcmd):
self.assertRaises(self.mm._TryToLockMachine, None)
- mock_cros_runcmd.return_value = [0, "false_lock_checksum", ""]
+ mock_cros_runcmd.return_value = [0, 'false_lock_checksum', '']
self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
self.mm._machines = []
self.mm._TryToLockMachine(self.mock_lumpy1)
self.assertEqual(len(self.mm._machines), 1)
self.assertEqual(self.mm._machines[0], self.mock_lumpy1)
- self.assertEqual(self.mock_lumpy1.checksum, "false_lock_checksum")
+ self.assertEqual(self.mock_lumpy1.checksum, 'false_lock_checksum')
self.assertEqual(mock_cros_runcmd.call_count, 1)
cmd_str = mock_cros_runcmd.call_args[0][0]
self.assertEqual(cmd_str, 'cat /usr/local/osimage_checksum_file')
@@ -256,31 +253,28 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(args_dict['machine'], self.mock_lumpy1.name)
self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
-
@mock.patch.object(machine_manager, 'CrosMachine')
def test_add_machine(self, mock_machine):
mock_machine.machine_checksum = 'daisy123'
- self.assertEqual (len(self.mm._all_machines), 5)
+ self.assertEqual(len(self.mm._all_machines), 5)
self.mm.AddMachine('daisy3')
- self.assertEqual (len(self.mm._all_machines), 6)
+ self.assertEqual(len(self.mm._all_machines), 6)
self.assertRaises(Exception, self.mm.AddMachine, 'lumpy1')
-
def test_remove_machine(self):
self.mm._machines = self.mm._all_machines
self.assertTrue(self.mock_lumpy2 in self.mm._machines)
self.mm.RemoveMachine(self.mock_lumpy2.name)
self.assertFalse(self.mock_lumpy2 in self.mm._machines)
-
def test_force_same_image_to_all_machines(self):
self.image_log = []
def FakeImageMachine(machine, label_arg):
image = label_arg.chromeos_image
- self.image_log.append("Pushed %s onto %s" % (image, machine.name))
+ self.image_log.append('Pushed %s onto %s' % (image, machine.name))
def FakeSetUpChecksumInfo():
pass
@@ -299,17 +293,15 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(self.image_log[2],
'Pushed lumpy_chromeos_image onto lumpy3')
-
-
@mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
- @mock.patch.object(hashlib,'md5')
+ @mock.patch.object(hashlib, 'md5')
def test_acquire_machine(self, mock_md5, mock_checksum):
self.msgs = []
self.log_fatal_msgs = []
def FakeLock(machine):
- self.msgs.append("Tried to lock %s" % machine.name)
+ self.msgs.append('Tried to lock %s' % machine.name)
def FakeLogFatal(msg):
self.log_fatal_msgs.append(msg)
@@ -317,17 +309,17 @@ class MachineManagerTest(unittest.TestCase):
self.mm._TryToLockMachine = FakeLock
self.mm.logger.LogFatal = FakeLogFatal
- mock_md5.return_value = "123456"
- mock_checksum.return_value = "fake_md5_checksum"
+ mock_md5.return_value = '123456'
+ mock_checksum.return_value = 'fake_md5_checksum'
self.mm._machines = self.mm._all_machines
self.mock_lumpy1.locked = True
self.mock_lumpy2.locked = True
self.mock_lumpy3.locked = False
- self.mock_lumpy3.checksum = "fake_md5_checksum"
+ self.mock_lumpy3.checksum = 'fake_md5_checksum'
self.mock_daisy1.locked = True
self.mock_daisy2.locked = False
- self.mock_daisy2.checksum = "fake_md5_checksum"
+ self.mock_daisy2.checksum = 'fake_md5_checksum'
self.mock_lumpy1.released_time = time.time()
self.mock_lumpy2.released_time = time.time()
@@ -341,8 +333,7 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(m, self.mock_lumpy1)
self.assertTrue(self.mock_lumpy1.locked)
self.assertEqual(mock_md5.call_count, 0)
- self.assertEqual(self.msgs, ['Tried to lock lumpy1',
- 'Tried to lock lumpy2',
+ self.assertEqual(self.msgs, ['Tried to lock lumpy1', 'Tried to lock lumpy2',
'Tried to lock lumpy3'])
# Test the second return statment (machine is unlocked, has no checksum)
@@ -358,7 +349,7 @@ class MachineManagerTest(unittest.TestCase):
# - checksums don't match
# - current time minus release time is > 20.
self.mock_lumpy1.locked = False
- self.mock_lumpy1.checksum = "123"
+ self.mock_lumpy1.checksum = '123'
self.mock_lumpy1.released_time = time.time() - 8
m = self.mm.AcquireMachine(LABEL_LUMPY)
self.assertEqual(m, self.mock_lumpy1)
@@ -370,8 +361,7 @@ class MachineManagerTest(unittest.TestCase):
# Restore values of mock_lumpy1, so other tests succeed.
self.mock_lumpy1.locked = save_locked
- self.mock_lumpy1.checksum = "123"
-
+ self.mock_lumpy1.checksum = '123'
def test_get_available_machines(self):
self.mm._machines = self.mm._all_machines
@@ -387,7 +377,6 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(machine_list, [self.mock_lumpy1, self.mock_lumpy2,
self.mock_lumpy3])
-
def test_get_machines(self):
machine_list = self.mm.GetMachines()
self.assertEqual(machine_list, self.mm._all_machines)
@@ -400,7 +389,6 @@ class MachineManagerTest(unittest.TestCase):
self.assertEqual(machine_list, [self.mock_lumpy1, self.mock_lumpy2,
self.mock_lumpy3])
-
def test_release_machines(self):
self.mm._machines = [self.mock_lumpy1, self.mock_daisy2]
@@ -411,45 +399,49 @@ class MachineManagerTest(unittest.TestCase):
self.assertTrue(self.mock_lumpy1.locked)
self.mm.ReleaseMachine(self.mock_lumpy1)
self.assertFalse(self.mock_lumpy1.locked)
- self.assertEqual(self.mock_lumpy1.status, "Available")
+ self.assertEqual(self.mock_lumpy1.status, 'Available')
self.assertTrue(self.mock_daisy2.locked)
self.mm.ReleaseMachine(self.mock_daisy2)
self.assertFalse(self.mock_daisy2.locked)
- self.assertEqual(self.mock_daisy2.status, "Available")
+ self.assertEqual(self.mock_daisy2.status, 'Available')
# Test double-relase...
self.assertRaises(AssertionError, self.mm.ReleaseMachine, self.mock_lumpy1)
-
def test_cleanup(self):
self.mock_logger.reset_mock()
self.mm.Cleanup()
self.assertEqual(self.mock_logger.call_count, 0)
- OUTPUT_STR = 'Machine Status:\nMachine Thread Lock Status Checksum \nlumpy1 test run True PENDING 123 \nlumpy2 test run False PENDING 123 \nlumpy3 test run False PENDING 123 \ndaisy1 test run False PENDING 678 \ndaisy2 test run True PENDING 678 '
+ OUTPUT_STR = ('Machine Status:\nMachine Thread '
+ 'Lock Status Checksum'
+ ' \nlumpy1 test '
+ 'run True PENDING 123'
+ ' \nlumpy2 '
+ 'test run False PENDING 123'
+ ' \nlumpy3 '
+ 'test run False PENDING 123'
+ ' \ndaisy1 '
+ 'test run False PENDING 678'
+ ' \ndaisy2 '
+ 'test run True PENDING 678'
+ ' ')
def test_as_string(self):
mock_logger = mock.Mock(spec=logger.Logger)
- bench = Benchmark("page_cycler.netsim.top_10", # name
- "page_cycler.netsim.top_10", # test_name
- "", # test_args
+ bench = Benchmark('page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
1, # iteratins
False, # rm_chroot_tmp
- "", # perf_args
- suite="telemetry_Crosperf") # suite
-
- test_run = MockBenchmarkRun("test run",
- bench,
- LABEL_LUMPY,
- 1,
- [],
- self.mm,
- mock_logger,
- "verbose",
- "")
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
+
+ test_run = MockBenchmarkRun('test run', bench, LABEL_LUMPY, 1, [], self.mm,
+ mock_logger, 'verbose', '')
self.mm._machines = [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3,
self.mock_daisy1, self.mock_daisy2]
@@ -466,16 +458,15 @@ class MachineManagerTest(unittest.TestCase):
self.mock_daisy1.locked = False
self.mock_daisy2.locked = True
- self.mock_lumpy1.checksum = "123"
- self.mock_lumpy2.checksum = "123"
- self.mock_lumpy3.checksum = "123"
- self.mock_daisy1.checksum = "678"
- self.mock_daisy2.checksum = "678"
+ self.mock_lumpy1.checksum = '123'
+ self.mock_lumpy2.checksum = '123'
+ self.mock_lumpy3.checksum = '123'
+ self.mock_daisy1.checksum = '678'
+ self.mock_daisy2.checksum = '678'
output = self.mm.AsString()
self.assertEqual(output, self.OUTPUT_STR)
-
def test_get_all_cpu_info(self):
info = self.mm.GetAllCPUInfo([LABEL_LUMPY, LABEL_MIX])
self.assertEqual(info,
@@ -483,7 +474,6 @@ class MachineManagerTest(unittest.TestCase):
'------------------\ndaisy_cpu_info\n\n\n')
-
MEMINFO_STRING = """MemTotal: 3990332 kB
MemFree: 2608396 kB
Buffers: 147168 kB
@@ -574,7 +564,35 @@ address sizes: 36 bits physical, 48 bits virtual
power management:
"""
-CHECKSUM_STRING = "processor: 0vendor_id: GenuineIntelcpu family: 6model: 42model name: Intel(R) Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache size: 2048 KBphysical id: 0siblings: 2core id: 0cpu cores: 2apicid: 0initial apicid: 0fpu: yesfpu_exception: yescpuid level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush size: 64cache_alignment: 64address sizes: 36 bits physical, 48 bits virtualpower management:processor: 1vendor_id: GenuineIntelcpu family: 6model: 42model name: Intel(R) Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache size: 2048 KBphysical id: 0siblings: 2core id: 1cpu cores: 2apicid: 2initial apicid: 2fpu: yesfpu_exception: yescpuid level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush size: 64cache_alignment: 64address sizes: 36 bits physical, 48 bits virtualpower management: 4194304"
+CHECKSUM_STRING = ('processor: 0vendor_id: GenuineIntelcpu family: 6model: '
+ '42model name: Intel(R) Celeron(R) CPU 867 @ '
+ '1.30GHzstepping: 7microcode: 0x25cache size: 2048 '
+ 'KBphysical id: 0siblings: 2core id: 0cpu cores: 2apicid: '
+ '0initial apicid: 0fpu: yesfpu_exception: yescpuid level: '
+ '13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep'
+ ' mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse '
+ 'sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc '
+ 'arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc '
+ 'aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 '
+ 'ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt '
+ 'tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts '
+ 'dts tpr_shadow vnmi flexpriority ept vpidclflush size: '
+ '64cache_alignment: 64address sizes: 36 bits physical, 48 '
+ 'bits virtualpower management:processor: 1vendor_id: '
+ 'GenuineIntelcpu family: 6model: 42model name: Intel(R) '
+ 'Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache'
+ ' size: 2048 KBphysical id: 0siblings: 2core id: 1cpu cores:'
+ ' 2apicid: 2initial apicid: 2fpu: yesfpu_exception: yescpuid'
+ ' level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 '
+ 'apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx '
+ 'fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm '
+ 'constant_tsc arch_perfmon pebs bts rep_good nopl xtopology '
+ 'nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl '
+ 'vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic '
+ 'popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt '
+ 'pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush '
+ 'size: 64cache_alignment: 64address sizes: 36 bits physical,'
+ ' 48 bits virtualpower management: 4194304')
DUMP_VPD_STRING = """
"PBA_SN"="Pba.txt"
@@ -592,7 +610,6 @@ DUMP_VPD_STRING = """
"ActivateDate"="2013-38"
"""
-
IFCONFIG_STRING = """
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.17.129.247 netmask 255.255.254.0 broadcast 172.17.129.255
@@ -640,12 +657,11 @@ class CrosMachineTest(unittest.TestCase):
@mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_init(self, mock_setup):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
self.assertEqual(mock_setup.call_count, 1)
- self.assertEqual(cm.chromeos_root, "/usr/local/chromeos")
- self.assertEqual(cm.log_level, "average")
-
+ self.assertEqual(cm.chromeos_root, '/usr/local/chromeos')
+ self.assertEqual(cm.log_level, 'average')
@mock.patch.object(machine_manager.CrosMachine, 'IsReachable')
@mock.patch.object(machine_manager.CrosMachine, '_GetMemoryInfo')
@@ -661,19 +677,19 @@ class CrosMachineTest(unittest.TestCase):
# Test 1. Machine is not reachable; SetUpChecksumInfo is called via
# __init__.
mock_isreachable.return_value = False
- mock_md5sum.return_value = "md5_checksum"
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
- cm.checksum_string = "This is a checksum string."
- cm.machine_id = "machine_id1"
+ mock_md5sum.return_value = 'md5_checksum'
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ cm.checksum_string = 'This is a checksum string.'
+ cm.machine_id = 'machine_id1'
self.assertEqual(mock_isreachable.call_count, 1)
self.assertIsNone(cm.machine_checksum)
self.assertEqual(mock_meminfo.call_count, 0)
# Test 2. Machine is reachable. Call explicitly.
mock_isreachable.return_value = True
- cm.checksum_string = "This is a checksum string."
- cm.machine_id = "machine_id1"
+ cm.checksum_string = 'This is a checksum string.'
+ cm.machine_id = 'machine_id1'
cm.SetUpChecksumInfo()
self.assertEqual(mock_isreachable.call_count, 2)
self.assertEqual(mock_meminfo.call_count, 1)
@@ -681,19 +697,18 @@ class CrosMachineTest(unittest.TestCase):
self.assertEqual(mock_checkstring.call_count, 1)
self.assertEqual(mock_machineid.call_count, 1)
self.assertEqual(mock_md5sum.call_count, 2)
- self.assertEqual(cm.machine_checksum, "md5_checksum")
- self.assertEqual(cm.machine_id_checksum, "md5_checksum")
+ self.assertEqual(cm.machine_checksum, 'md5_checksum')
+ self.assertEqual(cm.machine_id_checksum, 'md5_checksum')
self.assertEqual(mock_md5sum.call_args_list[0][0][0],
- "This is a checksum string.")
- self.assertEqual(mock_md5sum.call_args_list[1][0][0],
- "machine_id1")
+ 'This is a checksum string.')
+ self.assertEqual(mock_md5sum.call_args_list[1][0][0], 'machine_id1')
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_is_reachable(self, mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
# Test 1. CrosRunCommand returns 1 (fail)
@@ -710,106 +725,103 @@ class CrosMachineTest(unittest.TestCase):
self.assertEqual(mock_run_cmd.call_count, 2)
first_args = mock_run_cmd.call_args_list[0]
second_args = mock_run_cmd.call_args_list[1]
- self.assertEqual (first_args[0], second_args[0])
- self.assertEqual (first_args[1], second_args[1])
- self.assertEqual (len(first_args[0]), 1)
- self.assertEqual (len(first_args[1]), 2)
- self.assertEqual (first_args[0][0], 'ls')
+ self.assertEqual(first_args[0], second_args[0])
+ self.assertEqual(first_args[1], second_args[1])
+ self.assertEqual(len(first_args[0]), 1)
+ self.assertEqual(len(first_args[1]), 2)
+ self.assertEqual(first_args[0][0], 'ls')
args_dict = first_args[1]
- self.assertEqual (args_dict['machine'], 'daisy.cros')
- self.assertEqual (args_dict['chromeos_root'], '/usr/local/chromeos')
-
+ self.assertEqual(args_dict['machine'], 'daisy.cros')
+ self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_parse_memory_info(self, mock_setup):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
cm.meminfo = MEMINFO_STRING
cm._ParseMemoryInfo()
self.assertEqual(cm.phys_kbytes, 4194304)
-
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_get_memory_info(self, mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
- mock_run_cmd.return_value = [0, MEMINFO_STRING, ""]
+ mock_run_cmd.return_value = [0, MEMINFO_STRING, '']
cm._GetMemoryInfo()
self.assertEqual(mock_run_cmd.call_count, 1)
call_args = mock_run_cmd.call_args_list[0]
- self.assertEqual(call_args[0][0], "cat /proc/meminfo")
+ self.assertEqual(call_args[0][0], 'cat /proc/meminfo')
args_dict = call_args[1]
self.assertEqual(args_dict['machine'], 'daisy.cros')
self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
self.assertEqual(cm.meminfo, MEMINFO_STRING)
self.assertEqual(cm.phys_kbytes, 4194304)
- mock_run_cmd.return_value = [1, MEMINFO_STRING, ""]
- self.assertRaises (cm._GetMemoryInfo)
-
+ mock_run_cmd.return_value = [1, MEMINFO_STRING, '']
+ self.assertRaises(cm._GetMemoryInfo)
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_get_cpu_info(self, mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
- mock_run_cmd.return_value = [0, CPUINFO_STRING, ""]
+ mock_run_cmd.return_value = [0, CPUINFO_STRING, '']
cm._GetCPUInfo()
self.assertEqual(mock_run_cmd.call_count, 1)
call_args = mock_run_cmd.call_args_list[0]
- self.assertEqual(call_args[0][0], "cat /proc/cpuinfo")
+ self.assertEqual(call_args[0][0], 'cat /proc/cpuinfo')
args_dict = call_args[1]
self.assertEqual(args_dict['machine'], 'daisy.cros')
self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
self.assertEqual(cm.cpuinfo, CPUINFO_STRING)
-
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_compute_machine_checksum_string(self, mock_setup):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
cm.cpuinfo = CPUINFO_STRING
cm.meminfo = MEMINFO_STRING
cm._ParseMemoryInfo()
cm._ComputeMachineChecksumString()
self.assertEqual(cm.checksum_string, CHECKSUM_STRING)
-
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_get_md5_checksum(self, mock_setup):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
- temp_str = "abcde"
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ temp_str = 'abcde'
checksum_str = cm._GetMD5Checksum(temp_str)
- self.assertEqual(checksum_str, "ab56b4d92b40713acc5af89985d4b786")
+ self.assertEqual(checksum_str, 'ab56b4d92b40713acc5af89985d4b786')
- temp_str = ""
+ temp_str = ''
checksum_str = cm._GetMD5Checksum(temp_str)
- self.assertEqual(checksum_str, "")
-
+ self.assertEqual(checksum_str, '')
@mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
def test_get_machine_id(self, mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine("daisy.cros", "/usr/local/chromeos",
- "average", self.mock_cmd_exec)
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
- mock_run_cmd.return_value = [0, DUMP_VPD_STRING, ""]
+ mock_run_cmd.return_value = [0, DUMP_VPD_STRING, '']
cm._GetMachineID()
self.assertEqual(cm.machine_id, '"Product_S/N"="HT4L91SC300208"')
- mock_run_cmd.return_value = [0, IFCONFIG_STRING, ""]
+ mock_run_cmd.return_value = [0, IFCONFIG_STRING, '']
cm._GetMachineID()
- self.assertEqual(cm.machine_id, " ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether 44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)")
+ self.assertEqual(
+ cm.machine_id,
+ ' ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ '
+ 'ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether '
+ '44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)')
- mock_run_cmd.return_value = [0, "invalid hardware config", ""]
+ mock_run_cmd.return_value = [0, 'invalid hardware config', '']
self.assertRaises(cm._GetMachineID)
-
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
index 62ff2da4..b689565c 100644
--- a/crosperf/mock_instance.py
+++ b/crosperf/mock_instance.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""This contains some mock instances for testing."""
from benchmark import Benchmark
@@ -12,103 +10,114 @@ from label import MockLabel
from machine_manager import MockMachineManager
from results_cache import MockResultsCache
-perf_args = "record -a -e cycles"
-label1 = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos-alex1", image_args="",
- cache_dir="", cache_only=False, log_level="average",
- compiler="gcc")
-
-label2 = MockLabel("test2", "image2", "/tmp/test_benchmark_run_2",
- "x86-alex", "chromeos-alex2", image_args="",
- cache_dir="", cache_only=False, log_level="average",
- compiler="gcc")
-
-benchmark1 = Benchmark("benchmark1", "autotest_name_1",
- "autotest_args", 2, "", perf_args, "", "")
-
-benchmark2 = Benchmark("benchmark2", "autotest_name_2",
- "autotest_args", 2, "", perf_args, "", "")
-
+perf_args = 'record -a -e cycles'
+label1 = MockLabel('test1',
+ 'image1',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos-alex1',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+label2 = MockLabel('test2',
+ 'image2',
+ '/tmp/test_benchmark_run_2',
+ 'x86-alex',
+ 'chromeos-alex2',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '',
+ perf_args, '', '')
+
+benchmark2 = Benchmark('benchmark2', 'autotest_name_2', 'autotest_args', 2, '',
+ perf_args, '', '')
keyval = {}
keyval[0] = {'': 'PASS',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'test{1}': '2',
- 'test{2}': '4',
- 'ms_1': '2.1',
- 'total': '5',
- 'bool': 'True'}
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'ms_1': '2.1',
+ 'total': '5',
+ 'bool': 'True'}
keyval[1] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6',
- 'test{1}': '3',
- 'test{2}': '4',
- 'bool': 'FALSE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6',
+ 'test{1}': '3',
+ 'test{2}': '4',
+ 'bool': 'FALSE'}
keyval[2] = {'': 'PASS',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6',
- 'test{1}': '5',
- 'test{2}': '4',
- 'bool': 'FALSE'}
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6',
+ 'test{1}': '5',
+ 'test{2}': '4',
+ 'bool': 'FALSE'}
keyval[3] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '6',
- 'bool': 'FALSE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'FALSE'}
keyval[4] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '6',
- 'bool': 'TRUE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'TRUE'}
keyval[5] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '2',
- 'bool': 'TRUE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '2',
+ 'bool': 'TRUE'}
keyval[6] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '4',
- 'bool': 'TRUE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'bool': 'TRUE'}
keyval[7] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7',
- 'test{1}': '1',
- 'test{2}': '6',
- 'bool': 'TRUE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7',
+ 'test{1}': '1',
+ 'test{2}': '6',
+ 'bool': 'TRUE'}
keyval[8] = {'': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '3.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '8',
- 'bool': 'TRUE'}
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '3.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '8',
+ 'bool': 'TRUE'}
diff --git a/crosperf/perf_table.py b/crosperf/perf_table.py
index 7e21c83c..c996719d 100644
--- a/crosperf/perf_table.py
+++ b/crosperf/perf_table.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright 2012 Google Inc. All Rights Reserved.
"""Parse perf report data for tabulator."""
@@ -11,8 +9,10 @@ from utils import perf_diff
def ParsePerfReport(perf_file):
"""It should return a dict."""
- return {"cycles": {"foo": 10, "bar": 20},
- "cache_miss": {"foo": 20, "bar": 10}}
+ return {'cycles': {'foo': 10,
+ 'bar': 20},
+ 'cache_miss': {'foo': 20,
+ 'bar': 10}}
class PerfTable(object):
@@ -37,12 +37,11 @@ class PerfTable(object):
def GenerateData(self):
for label in self._label_names:
for benchmark in self._experiment.benchmarks:
- for i in range(1, benchmark.iterations+1):
+ for i in range(1, benchmark.iterations + 1):
dir_name = label + benchmark.name + str(i)
dir_name = filter(str.isalnum, dir_name)
- perf_file = os.path.join(self._experiment.results_directory,
- dir_name,
- "perf.data.report.0")
+ perf_file = os.path.join(self._experiment.results_directory, dir_name,
+ 'perf.data.report.0')
if os.path.exists(perf_file):
self.ReadPerfReport(perf_file, label, benchmark.name, i - 1)
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index cdf14315..fc619738 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Module to deal with result cache."""
import getpass
@@ -24,12 +22,13 @@ from image_checksummer import ImageChecksummer
import results_report
import test_flag
-SCRATCH_DIR = os.path.expanduser("~/cros_scratch")
-RESULTS_FILE = "results.txt"
-MACHINE_FILE = "machine.txt"
-AUTOTEST_TARBALL = "autotest.tbz2"
-PERF_RESULTS_FILE = "perf-results.txt"
-CACHE_KEYS_FILE = "cache_keys.txt"
+SCRATCH_DIR = os.path.expanduser('~/cros_scratch')
+RESULTS_FILE = 'results.txt'
+MACHINE_FILE = 'machine.txt'
+AUTOTEST_TARBALL = 'autotest.tbz2'
+PERF_RESULTS_FILE = 'perf-results.txt'
+CACHE_KEYS_FILE = 'cache_keys.txt'
+
class Result(object):
""" This class manages what exactly is stored inside the cache without knowing
@@ -40,8 +39,9 @@ class Result(object):
def __init__(self, logger, label, log_level, machine, cmd_exec=None):
self._chromeos_root = label.chromeos_root
self._logger = logger
- self._ce = cmd_exec or command_executer.GetCommandExecuter(self._logger,
- log_level=log_level)
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger,
+ log_level=log_level)
self._temp_dir = None
self.label = label
self.results_dir = None
@@ -54,28 +54,26 @@ class Result(object):
file_index = 0
for file_to_copy in files_to_copy:
if not os.path.isdir(dest_dir):
- command = "mkdir -p %s" % dest_dir
+ command = 'mkdir -p %s' % dest_dir
self._ce.RunCommand(command)
dest_file = os.path.join(dest_dir,
- ("%s.%s" % (os.path.basename(file_to_copy),
+ ('%s.%s' % (os.path.basename(file_to_copy),
file_index)))
- ret = self._ce.CopyFiles(file_to_copy,
- dest_file,
- recursive=False)
+ ret = self._ce.CopyFiles(file_to_copy, dest_file, recursive=False)
if ret:
- raise Exception("Could not copy results file: %s" % file_to_copy)
+ raise Exception('Could not copy results file: %s' % file_to_copy)
def CopyResultsTo(self, dest_dir):
self._CopyFilesTo(dest_dir, self.perf_data_files)
self._CopyFilesTo(dest_dir, self.perf_report_files)
if len(self.perf_data_files) or len(self.perf_report_files):
- self._logger.LogOutput("Perf results files stored in %s." % dest_dir)
+ self._logger.LogOutput('Perf results files stored in %s.' % dest_dir)
def _GetNewKeyvals(self, keyvals_dict):
# Initialize 'units' dictionary.
units_dict = {}
for k in keyvals_dict:
- units_dict[k] = ""
+ units_dict[k] = ''
results_files = self._GetDataMeasurementsFiles()
for f in results_files:
# Make sure we can find the results file
@@ -85,26 +83,23 @@ class Result(object):
# Otherwise get the base filename and create the correct
# path for it.
f_dir, f_base = misc.GetRoot(f)
- data_filename = os.path.join(self._chromeos_root, "chroot/tmp",
+ data_filename = os.path.join(self._chromeos_root, 'chroot/tmp',
self._temp_dir, f_base)
if os.path.exists(data_filename):
- with open(data_filename, "r") as data_file:
+ with open(data_filename, 'r') as data_file:
lines = data_file.readlines()
for line in lines:
tmp_dict = json.loads(line)
- graph_name = tmp_dict["graph"]
- graph_str = (graph_name + "__") if graph_name else ""
- key = graph_str + tmp_dict["description"]
- keyvals_dict[key] = tmp_dict["value"]
- units_dict[key] = tmp_dict["units"]
+ graph_name = tmp_dict['graph']
+ graph_str = (graph_name + '__') if graph_name else ''
+ key = graph_str + tmp_dict['description']
+ keyvals_dict[key] = tmp_dict['value']
+ units_dict[key] = tmp_dict['units']
return keyvals_dict, units_dict
-
def _AppendTelemetryUnits(self, keyvals_dict, units_dict):
- """
- keyvals_dict is the dictionary of key-value pairs that is used for
- generating Crosperf reports.
+ """keyvals_dict is the dictionary of key-value pairs that is used for generating Crosperf reports.
units_dict is a dictionary of the units for the return values in
keyvals_dict. We need to associate the units with the return values,
@@ -119,31 +114,31 @@ class Result(object):
results_dict = {}
for k in keyvals_dict:
# We don't want these lines in our reports; they add no useful data.
- if k == "" or k == "telemetry_Crosperf":
+ if k == '' or k == 'telemetry_Crosperf':
continue
val = keyvals_dict[k]
units = units_dict[k]
- new_val = [ val, units ]
+ new_val = [val, units]
results_dict[k] = new_val
return results_dict
def _GetKeyvals(self, show_all):
- results_in_chroot = os.path.join(self._chromeos_root,
- "chroot", "tmp")
+ results_in_chroot = os.path.join(self._chromeos_root, 'chroot', 'tmp')
if not self._temp_dir:
self._temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
- command = "cp -r {0}/* {1}".format(self.results_dir, self._temp_dir)
+ command = 'cp -r {0}/* {1}'.format(self.results_dir, self._temp_dir)
self._ce.RunCommand(command, print_to_console=False)
- command = ("python generate_test_report --no-color --csv %s" %
- (os.path.join("/tmp", os.path.basename(self._temp_dir))))
- _, out, _ = self._ce.ChrootRunCommandWOutput(
- self._chromeos_root, command, print_to_console=False)
+ command = ('python generate_test_report --no-color --csv %s' %
+ (os.path.join('/tmp', os.path.basename(self._temp_dir))))
+ _, out, _ = self._ce.ChrootRunCommandWOutput(self._chromeos_root,
+ command,
+ print_to_console=False)
keyvals_dict = {}
tmp_dir_in_chroot = misc.GetInsideChrootPath(self._chromeos_root,
self._temp_dir)
for line in out.splitlines():
- tokens = re.split("=|,", line)
+ tokens = re.split('=|,', line)
key = tokens[-2]
if key.startswith(tmp_dir_in_chroot):
key = key[len(tmp_dir_in_chroot) + 1:]
@@ -153,39 +148,37 @@ class Result(object):
# Check to see if there is a perf_measurements file and get the
# data from it if so.
keyvals_dict, units_dict = self._GetNewKeyvals(keyvals_dict)
- if self.suite == "telemetry_Crosperf":
+ if self.suite == 'telemetry_Crosperf':
# For telemtry_Crosperf results, append the units to the return
# results, for use in generating the reports.
- keyvals_dict = self._AppendTelemetryUnits(keyvals_dict,
- units_dict)
+ keyvals_dict = self._AppendTelemetryUnits(keyvals_dict, units_dict)
return keyvals_dict
def _GetResultsDir(self):
- mo = re.search(r"Results placed in (\S+)", self.out)
+ mo = re.search(r'Results placed in (\S+)', self.out)
if mo:
result = mo.group(1)
return result
- raise Exception("Could not find results directory.")
+ raise Exception('Could not find results directory.')
def _FindFilesInResultsDir(self, find_args):
if not self.results_dir:
return None
- command = "find %s %s" % (self.results_dir,
- find_args)
+ command = 'find %s %s' % (self.results_dir, find_args)
ret, out, _ = self._ce.RunCommandWOutput(command, print_to_console=False)
if ret:
- raise Exception("Could not run find command!")
+ raise Exception('Could not run find command!')
return out
def _GetPerfDataFiles(self):
- return self._FindFilesInResultsDir("-name perf.data").splitlines()
+ return self._FindFilesInResultsDir('-name perf.data').splitlines()
def _GetPerfReportFiles(self):
- return self._FindFilesInResultsDir("-name perf.data.report").splitlines()
+ return self._FindFilesInResultsDir('-name perf.data.report').splitlines()
def _GetDataMeasurementsFiles(self):
- return self._FindFilesInResultsDir("-name perf_measurements").splitlines()
+ return self._FindFilesInResultsDir('-name perf_measurements').splitlines()
def _GeneratePerfReportFiles(self):
perf_report_files = []
@@ -194,65 +187,57 @@ class Result(object):
# file.
chroot_perf_data_file = misc.GetInsideChrootPath(self._chromeos_root,
perf_data_file)
- perf_report_file = "%s.report" % perf_data_file
+ perf_report_file = '%s.report' % perf_data_file
if os.path.exists(perf_report_file):
- raise Exception("Perf report file already exists: %s" %
+ raise Exception('Perf report file already exists: %s' %
perf_report_file)
chroot_perf_report_file = misc.GetInsideChrootPath(self._chromeos_root,
perf_report_file)
- perf_path = os.path.join (self._chromeos_root,
- "chroot",
- "usr/bin/perf")
+ perf_path = os.path.join(self._chromeos_root, 'chroot', 'usr/bin/perf')
- perf_file = "/usr/sbin/perf"
+ perf_file = '/usr/sbin/perf'
if os.path.exists(perf_path):
- perf_file = "/usr/bin/perf"
+ perf_file = '/usr/bin/perf'
# The following is a hack, to use the perf.static binary that
# was given to us by Stephane Eranian, until he can figure out
# why "normal" perf cannot properly symbolize ChromeOS perf.data files.
# Get the directory containing the 'crosperf' script.
dirname, _ = misc.GetRoot(sys.argv[0])
- perf_path = os.path.join (dirname, "..", "perf.static")
+ perf_path = os.path.join(dirname, '..', 'perf.static')
if os.path.exists(perf_path):
# copy the executable into the chroot so that it can be found.
src_path = perf_path
- dst_path = os.path.join (self._chromeos_root, "chroot",
- "tmp/perf.static")
- command = "cp %s %s" % (src_path,dst_path)
- self._ce.RunCommand (command)
- perf_file = "/tmp/perf.static"
-
- command = ("%s report "
- "-n "
- "--symfs /build/%s "
- "--vmlinux /build/%s/usr/lib/debug/boot/vmlinux "
- "--kallsyms /build/%s/boot/System.map-* "
- "-i %s --stdio "
- "> %s" %
- (perf_file,
- self._board,
- self._board,
- self._board,
- chroot_perf_data_file,
- chroot_perf_report_file))
+ dst_path = os.path.join(self._chromeos_root, 'chroot',
+ 'tmp/perf.static')
+ command = 'cp %s %s' % (src_path, dst_path)
+ self._ce.RunCommand(command)
+ perf_file = '/tmp/perf.static'
+
+ command = ('%s report '
+ '-n '
+ '--symfs /build/%s '
+ '--vmlinux /build/%s/usr/lib/debug/boot/vmlinux '
+ '--kallsyms /build/%s/boot/System.map-* '
+ '-i %s --stdio '
+ '> %s' % (perf_file, self._board, self._board, self._board,
+ chroot_perf_data_file, chroot_perf_report_file))
self._ce.ChrootRunCommand(self._chromeos_root, command)
# Add a keyval to the dictionary for the events captured.
- perf_report_files.append(
- misc.GetOutsideChrootPath(self._chromeos_root,
- chroot_perf_report_file))
+ perf_report_files.append(misc.GetOutsideChrootPath(
+ self._chromeos_root, chroot_perf_report_file))
return perf_report_files
def _GatherPerfResults(self):
report_id = 0
for perf_report_file in self.perf_report_files:
- with open(perf_report_file, "r") as f:
+ with open(perf_report_file, 'r') as f:
report_contents = f.read()
- for group in re.findall(r"Events: (\S+) (\S+)", report_contents):
+ for group in re.findall(r'Events: (\S+) (\S+)', report_contents):
num_events = group[0]
event_name = group[1]
- key = "perf_%s_%s" % (report_id, event_name)
+ key = 'perf_%s_%s' % (report_id, event_name)
value = str(misc.UnitToNumber(num_events))
self.keyvals[key] = value
@@ -279,7 +264,7 @@ class Result(object):
# cache hit or miss. It should process results agnostic of the cache hit
# state.
self.keyvals = self._GetKeyvals(show_all)
- self.keyvals["retval"] = self.retval
+ self.keyvals['retval'] = self.retval
# Generate report from all perf.data files.
# Now parse all perf report files and include them in keyvals.
self._GatherPerfResults()
@@ -288,21 +273,20 @@ class Result(object):
self.test_name = test
self.suite = suite
# Read in everything from the cache directory.
- with open(os.path.join(cache_dir, RESULTS_FILE), "r") as f:
+ with open(os.path.join(cache_dir, RESULTS_FILE), 'r') as f:
self.out = pickle.load(f)
self.err = pickle.load(f)
self.retval = pickle.load(f)
# Untar the tarball to a temporary directory
- self._temp_dir = tempfile.mkdtemp(dir=os.path.join(self._chromeos_root,
- "chroot", "tmp"))
+ self._temp_dir = tempfile.mkdtemp(
+ dir=os.path.join(self._chromeos_root, 'chroot', 'tmp'))
- command = ("cd %s && tar xf %s" %
- (self._temp_dir,
- os.path.join(cache_dir, AUTOTEST_TARBALL)))
+ command = ('cd %s && tar xf %s' %
+ (self._temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
ret = self._ce.RunCommand(command, print_to_console=False)
if ret:
- raise Exception("Could not untar cached tarball")
+ raise Exception('Could not untar cached tarball')
self.results_dir = self._temp_dir
self.perf_data_files = self._GetPerfDataFiles()
self.perf_report_files = self._GetPerfReportFiles()
@@ -311,13 +295,13 @@ class Result(object):
def CleanUp(self, rm_chroot_tmp):
if rm_chroot_tmp and self.results_dir:
dirname, basename = misc.GetRoot(self.results_dir)
- if basename.find("test_that_results_") != -1:
- command = "rm -rf %s" % self.results_dir
+ if basename.find('test_that_results_') != -1:
+ command = 'rm -rf %s' % self.results_dir
else:
- command = "rm -rf %s" % dirname
+ command = 'rm -rf %s' % dirname
self._ce.RunCommand(command)
if self._temp_dir:
- command = "rm -rf %s" % self._temp_dir
+ command = 'rm -rf %s' % self._temp_dir
self._ce.RunCommand(command)
def StoreToCacheDir(self, cache_dir, machine_manager, key_list):
@@ -325,54 +309,62 @@ class Result(object):
temp_dir = tempfile.mkdtemp()
# Store to the temp directory.
- with open(os.path.join(temp_dir, RESULTS_FILE), "w") as f:
+ with open(os.path.join(temp_dir, RESULTS_FILE), 'w') as f:
pickle.dump(self.out, f)
pickle.dump(self.err, f)
pickle.dump(self.retval, f)
if not test_flag.GetTestMode():
- with open(os.path.join(temp_dir, CACHE_KEYS_FILE), "w") as f:
- f.write("%s\n" % self.label.name)
- f.write("%s\n" % self.label.chrome_version)
- f.write("%s\n" % self.machine.checksum_string)
+ with open(os.path.join(temp_dir, CACHE_KEYS_FILE), 'w') as f:
+ f.write('%s\n' % self.label.name)
+ f.write('%s\n' % self.label.chrome_version)
+ f.write('%s\n' % self.machine.checksum_string)
for k in key_list:
f.write(k)
- f.write("\n")
+ f.write('\n')
if self.results_dir:
tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
- command = ("cd %s && "
- "tar "
- "--exclude=var/spool "
- "--exclude=var/log "
- "-cjf %s ." % (self.results_dir, tarball))
+ command = ('cd %s && '
+ 'tar '
+ '--exclude=var/spool '
+ '--exclude=var/log '
+ '-cjf %s .' % (self.results_dir, tarball))
ret = self._ce.RunCommand(command)
if ret:
raise Exception("Couldn't store autotest output directory.")
# Store machine info.
# TODO(asharif): Make machine_manager a singleton, and don't pass it into
# this function.
- with open(os.path.join(temp_dir, MACHINE_FILE), "w") as f:
+ with open(os.path.join(temp_dir, MACHINE_FILE), 'w') as f:
f.write(machine_manager.machine_checksum_string[self.label.name])
if os.path.exists(cache_dir):
- command = "rm -rf {0}".format(cache_dir)
+ command = 'rm -rf {0}'.format(cache_dir)
self._ce.RunCommand(command)
- command = "mkdir -p {0} && ".format(os.path.dirname(cache_dir))
- command += "chmod g+x {0} && ".format(temp_dir)
- command += "mv {0} {1}".format(temp_dir, cache_dir)
+ command = 'mkdir -p {0} && '.format(os.path.dirname(cache_dir))
+ command += 'chmod g+x {0} && '.format(temp_dir)
+ command += 'mv {0} {1}'.format(temp_dir, cache_dir)
ret = self._ce.RunCommand(command)
if ret:
- command = "rm -rf {0}".format(temp_dir)
+ command = 'rm -rf {0}'.format(temp_dir)
self._ce.RunCommand(command)
- raise Exception("Could not move dir %s to dir %s" %
- (temp_dir, cache_dir))
+ raise Exception('Could not move dir %s to dir %s' % (temp_dir, cache_dir))
@classmethod
- def CreateFromRun(cls, logger, log_level, label, machine, out, err, retval,
- show_all, test, suite="telemetry_Crosperf"):
- if suite == "telemetry":
+ def CreateFromRun(cls,
+ logger,
+ log_level,
+ label,
+ machine,
+ out,
+ err,
+ retval,
+ show_all,
+ test,
+ suite='telemetry_Crosperf'):
+ if suite == 'telemetry':
result = TelemetryResult(logger, label, log_level, machine)
else:
result = cls(logger, label, log_level, machine)
@@ -380,9 +372,16 @@ class Result(object):
return result
@classmethod
- def CreateFromCacheHit(cls, logger, log_level, label, machine, cache_dir,
- show_all, test, suite="telemetry_Crosperf"):
- if suite == "telemetry":
+ def CreateFromCacheHit(cls,
+ logger,
+ log_level,
+ label,
+ machine,
+ cache_dir,
+ show_all,
+ test,
+ suite='telemetry_Crosperf'):
+ if suite == 'telemetry':
result = TelemetryResult(logger, label, log_level, machine)
else:
result = cls(logger, label, log_level, machine)
@@ -390,7 +389,7 @@ class Result(object):
result._PopulateFromCacheDir(cache_dir, show_all, test, suite)
except Exception as e:
- logger.LogError("Exception while using cache: %s" % e)
+ logger.LogError('Exception while using cache: %s' % e)
return None
return result
@@ -424,24 +423,24 @@ class TelemetryResult(Result):
self.keyvals = {}
if lines:
- if lines[0].startswith("JSON.stringify"):
+ if lines[0].startswith('JSON.stringify'):
lines = lines[1:]
if not lines:
return
- labels = lines[0].split(",")
+ labels = lines[0].split(',')
for line in lines[1:]:
- fields = line.split(",")
+ fields = line.split(',')
if len(fields) != len(labels):
continue
for i in range(1, len(labels)):
- key = "%s %s" % (fields[0], labels[i])
+ key = '%s %s' % (fields[0], labels[i])
value = fields[i]
self.keyvals[key] = value
- self.keyvals["retval"] = self.retval
+ self.keyvals['retval'] = self.retval
def _PopulateFromCacheDir(self, cache_dir):
- with open(os.path.join(cache_dir, RESULTS_FILE), "r") as f:
+ with open(os.path.join(cache_dir, RESULTS_FILE), 'r') as f:
self.out = pickle.load(f)
self.err = pickle.load(f)
self.retval = pickle.load(f)
@@ -474,17 +473,16 @@ class CacheConditions(object):
class ResultsCache(object):
-
""" This class manages the key of the cached runs without worrying about what
is exactly stored (value). The value generation is handled by the Results
class.
"""
CACHE_VERSION = 6
- def Init(self, chromeos_image, chromeos_root, test_name, iteration,
- test_args, profiler_args, machine_manager, machine, board,
- cache_conditions, logger_to_use, log_level, label, share_cache,
- suite, show_all_results, run_local):
+ def Init(self, chromeos_image, chromeos_root, test_name, iteration, test_args,
+ profiler_args, machine_manager, machine, board, cache_conditions,
+ logger_to_use, log_level, label, share_cache, suite,
+ show_all_results, run_local):
self.chromeos_image = chromeos_image
self.chromeos_root = chromeos_root
self.test_name = test_name
@@ -519,19 +517,17 @@ class ResultsCache(object):
def _GetCacheDirForWrite(self, get_keylist=False):
cache_path = self._FormCacheDir(self._GetCacheKeyList(False))[0]
if get_keylist:
- args_str = "%s_%s_%s" % (self.test_args,
- self.profiler_args,
+ args_str = '%s_%s_%s' % (self.test_args, self.profiler_args,
self.run_local)
version, image = results_report.ParseChromeosImage(
self.label.chromeos_image)
- keylist = [version, image, self.label.board,
- self.machine.name, self.test_name, str(self.iteration),
- args_str]
+ keylist = [version, image, self.label.board, self.machine.name,
+ self.test_name, str(self.iteration), args_str]
return cache_path, keylist
return cache_path
def _FormCacheDir(self, list_of_strings):
- cache_key = " ".join(list_of_strings)
+ cache_key = ' '.join(list_of_strings)
cache_dir = misc.GetFilenameFromString(cache_key)
if self.label.cache_dir:
cache_home = os.path.abspath(os.path.expanduser(self.label.cache_dir))
@@ -540,36 +536,36 @@ class ResultsCache(object):
cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
if len(self.share_cache):
- for path in [x.strip() for x in self.share_cache.split(",")]:
+ for path in [x.strip() for x in self.share_cache.split(',')]:
if os.path.exists(path):
cache_path.append(os.path.join(path, cache_dir))
else:
- self._logger.LogFatal("Unable to find shared cache: %s" % path)
+ self._logger.LogFatal('Unable to find shared cache: %s' % path)
return cache_path
def _GetCacheKeyList(self, read):
if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
- machine_checksum = "*"
+ machine_checksum = '*'
else:
machine_checksum = self.machine_manager.machine_checksum[self.label.name]
if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions:
- checksum = "*"
- elif self.label.image_type == "trybot":
+ checksum = '*'
+ elif self.label.image_type == 'trybot':
checksum = hashlib.md5(self.label.chromeos_image).hexdigest()
- elif self.label.image_type == "official":
- checksum = "*"
+ elif self.label.image_type == 'official':
+ checksum = '*'
else:
checksum = ImageChecksummer().Checksum(self.label, self.log_level)
if read and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions:
- image_path_checksum = "*"
+ image_path_checksum = '*'
else:
image_path_checksum = hashlib.md5(self.chromeos_image).hexdigest()
- machine_id_checksum = ""
+ machine_id_checksum = ''
if read and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions:
- machine_id_checksum = "*"
+ machine_id_checksum = '*'
else:
if self.machine and self.machine.name in self.label.remote:
machine_id_checksum = self.machine.machine_id_checksum
@@ -579,23 +575,17 @@ class ResultsCache(object):
machine_id_checksum = machine.machine_id_checksum
break
- temp_test_args = "%s %s %s" % (self.test_args,
- self.profiler_args,
+ temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
self.run_local)
- test_args_checksum = hashlib.md5(
- "".join(temp_test_args)).hexdigest()
- return (image_path_checksum,
- self.test_name, str(self.iteration),
- test_args_checksum,
- checksum,
- machine_checksum,
- machine_id_checksum,
+ test_args_checksum = hashlib.md5(''.join(temp_test_args)).hexdigest()
+ return (image_path_checksum, self.test_name, str(self.iteration),
+ test_args_checksum, checksum, machine_checksum, machine_id_checksum,
str(self.CACHE_VERSION))
def ReadResult(self):
if CacheConditions.FALSE in self.cache_conditions:
cache_dir = self._GetCacheDirForWrite()
- command = "rm -rf {0}".format(cache_dir)
+ command = 'rm -rf {0}'.format(cache_dir)
self._ce.RunCommand(command)
return None
cache_dir = self._GetCacheDirForRead()
@@ -607,15 +597,10 @@ class ResultsCache(object):
return None
if self.log_level == 'verbose':
- self._logger.LogOutput("Trying to read from cache dir: %s" % cache_dir)
- result = Result.CreateFromCacheHit(self._logger,
- self.log_level,
- self.label,
- self.machine,
- cache_dir,
- self.show_all,
- self.test_name,
- self.suite)
+ self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir)
+ result = Result.CreateFromCacheHit(self._logger, self.log_level, self.label,
+ self.machine, cache_dir, self.show_all,
+ self.test_name, self.suite)
if not result:
return None
@@ -631,6 +616,7 @@ class ResultsCache(object):
class MockResultsCache(ResultsCache):
+
def Init(self, *args):
pass
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 790b4718..11746db7 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -120,12 +120,40 @@ INFO : Test results:
INFO : Elapsed time: 0m18s
"""
-
-keyvals = {'': 'PASS', 'b_stdio_putcgetc__0_': '0.100005711667', 'b_string_strstr___azbycxdwevfugthsirjqkplomn__': '0.0133123556667', 'b_malloc_thread_local__0_': '0.01138439', 'b_string_strlen__0_': '0.044893587', 'b_malloc_sparse__0_': '0.015053784', 'b_string_memset__0_': '0.00275405066667', 'platform_LibCBench': 'PASS', 'b_pthread_uselesslock__0_': '0.0294113346667', 'b_string_strchr__0_': '0.00456903', 'b_pthread_create_serial1__0_': '0.0291785246667', 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__': '0.118360778', 'b_string_strstr___aaaaaaaaaaaaaacccccccccccc__': '0.0135694476667', 'b_pthread_createjoin_serial1__0_': '0.031907936', 'b_malloc_thread_stress__0_': '0.0367894733333', 'b_regex_search____a_b_c__d_b__': '0.00165455066667', 'b_malloc_bubble__0_': '0.015066374', 'b_malloc_big2__0_': '0.002951359', 'b_stdio_putcgetc_unlocked__0_': '0.0371443833333', 'b_pthread_createjoin_serial2__0_': '0.043485347', 'b_regex_search___a_25_b__': '0.0496191923333', 'b_utf8_bigbuf__0_': '0.0473772253333', 'b_malloc_big1__0_': '0.00375231466667', 'b_regex_compile____a_b_c__d_b__': '0.00529833933333', 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__': '0.068957325', 'b_malloc_tiny2__0_': '0.000581407333333', 'b_utf8_onebyone__0_': '0.130938538333', 'b_malloc_tiny1__0_': '0.000768474333333', 'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333'}
-
+keyvals = {'': 'PASS',
+ 'b_stdio_putcgetc__0_': '0.100005711667',
+ 'b_string_strstr___azbycxdwevfugthsirjqkplomn__': '0.0133123556667',
+ 'b_malloc_thread_local__0_': '0.01138439',
+ 'b_string_strlen__0_': '0.044893587',
+ 'b_malloc_sparse__0_': '0.015053784',
+ 'b_string_memset__0_': '0.00275405066667',
+ 'platform_LibCBench': 'PASS',
+ 'b_pthread_uselesslock__0_': '0.0294113346667',
+ 'b_string_strchr__0_': '0.00456903',
+ 'b_pthread_create_serial1__0_': '0.0291785246667',
+ 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__':
+ '0.118360778',
+ 'b_string_strstr___aaaaaaaaaaaaaacccccccccccc__': '0.0135694476667',
+ 'b_pthread_createjoin_serial1__0_': '0.031907936',
+ 'b_malloc_thread_stress__0_': '0.0367894733333',
+ 'b_regex_search____a_b_c__d_b__': '0.00165455066667',
+ 'b_malloc_bubble__0_': '0.015066374',
+ 'b_malloc_big2__0_': '0.002951359',
+ 'b_stdio_putcgetc_unlocked__0_': '0.0371443833333',
+ 'b_pthread_createjoin_serial2__0_': '0.043485347',
+ 'b_regex_search___a_25_b__': '0.0496191923333',
+ 'b_utf8_bigbuf__0_': '0.0473772253333',
+ 'b_malloc_big1__0_': '0.00375231466667',
+ 'b_regex_compile____a_b_c__d_b__': '0.00529833933333',
+ 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__': '0.068957325',
+ 'b_malloc_tiny2__0_': '0.000581407333333',
+ 'b_utf8_onebyone__0_': '0.130938538333',
+ 'b_malloc_tiny1__0_': '0.000768474333333',
+ 'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333'}
TMP_DIR1 = '/tmp/tmpAbcXyz'
+
class MockResult(Result):
def __init__(self, logger, label, logging_level, machine):
@@ -141,24 +169,22 @@ class MockResult(Result):
class ResultTest(unittest.TestCase):
mock_label = MockLabel('mock_label', 'chromeos_image', '/tmp', 'lumpy',
- 'remote', 'image_args', 'cache_dir', 'average',
- 'gcc', None)
+ 'remote', 'image_args', 'cache_dir', 'average', 'gcc',
+ None)
mock_logger = mock.Mock(spec=logger.Logger)
mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
def testCreateFromRun(self):
result = MockResult.CreateFromRun(logger.GetLogger(), 'average',
- self.mock_label, 'remote1',
- OUTPUT, error, 0, True, 0)
+ self.mock_label, 'remote1', OUTPUT, error,
+ 0, True, 0)
self.assertEqual(result.keyvals, keyvals)
self.assertEqual(result.chroot_results_dir,
'/tmp/test_that.PO1234567/platform_LibCBench')
self.assertEqual(result.results_dir,
- '/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench')
+ '/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench')
self.assertEqual(result.retval, 0)
-
-
def setUp(self):
self.result = Result(self.mock_logger, self.mock_label, 'average',
self.mock_cmd_exec)
@@ -204,8 +230,7 @@ class ResultTest(unittest.TestCase):
mock_copyfiles.return_value = 1
self.assertRaises(Exception, self.result._CopyFilesTo, dest_dir, files)
-
- @mock.patch.object (Result, '_CopyFilesTo')
+ @mock.patch.object(Result, '_CopyFilesTo')
def test_copy_results_to(self, mock_CopyFilesTo):
perf_data_files = ['/tmp/perf.data.0', '/tmp/perf.data.1',
'/tmp/perf.data.2']
@@ -224,7 +249,6 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_CopyFilesTo.call_args_list[1][0],
('/tmp/results/', perf_report_files))
-
def test_get_new_keyvals(self):
kv_dict = {}
@@ -235,51 +259,73 @@ class ResultTest(unittest.TestCase):
self.result._GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles
kv_dict2, udict = self.result._GetNewKeyvals(kv_dict)
self.assertEqual(kv_dict2,
- {u'Box2D__Box2D': 4775, u'Mandreel__Mandreel': 6620,
- u'Gameboy__Gameboy': 9901, u'Crypto__Crypto': 8737,
- u'telemetry_page_measurement_results__num_errored': 0,
- u'telemetry_page_measurement_results__num_failed': 0,
- u'PdfJS__PdfJS': 6455, u'Total__Score': 7918,
- u'EarleyBoyer__EarleyBoyer': 14340,
- u'MandreelLatency__MandreelLatency': 5188,
- u'CodeLoad__CodeLoad': 6271, u'DeltaBlue__DeltaBlue': 14401,
- u'Typescript__Typescript': 9815,
- u'SplayLatency__SplayLatency': 7653, u'zlib__zlib': 16094,
- u'Richards__Richards': 10358, u'RegExp__RegExp': 1765,
- u'NavierStokes__NavierStokes': 9815, u'Splay__Splay': 4425,
- u'RayTrace__RayTrace': 16600})
- self.assertEqual(udict,
- {u'Box2D__Box2D': u'score', u'Mandreel__Mandreel': u'score',
- u'Gameboy__Gameboy': u'score', u'Crypto__Crypto': u'score',
- u'telemetry_page_measurement_results__num_errored': u'count',
- u'telemetry_page_measurement_results__num_failed': u'count',
- u'PdfJS__PdfJS': u'score', u'Total__Score': u'score',
- u'EarleyBoyer__EarleyBoyer': u'score',
- u'MandreelLatency__MandreelLatency': u'score',
- u'CodeLoad__CodeLoad': u'score',
- u'DeltaBlue__DeltaBlue': u'score',
- u'Typescript__Typescript': u'score',
- u'SplayLatency__SplayLatency': u'score', u'zlib__zlib': u'score',
- u'Richards__Richards': u'score', u'RegExp__RegExp': u'score',
- u'NavierStokes__NavierStokes': u'score',
- u'Splay__Splay': u'score', u'RayTrace__RayTrace': u'score'})
-
+ {u'Box2D__Box2D': 4775,
+ u'Mandreel__Mandreel': 6620,
+ u'Gameboy__Gameboy': 9901,
+ u'Crypto__Crypto': 8737,
+ u'telemetry_page_measurement_results__num_errored': 0,
+ u'telemetry_page_measurement_results__num_failed': 0,
+ u'PdfJS__PdfJS': 6455,
+ u'Total__Score': 7918,
+ u'EarleyBoyer__EarleyBoyer': 14340,
+ u'MandreelLatency__MandreelLatency': 5188,
+ u'CodeLoad__CodeLoad': 6271,
+ u'DeltaBlue__DeltaBlue': 14401,
+ u'Typescript__Typescript': 9815,
+ u'SplayLatency__SplayLatency': 7653,
+ u'zlib__zlib': 16094,
+ u'Richards__Richards': 10358,
+ u'RegExp__RegExp': 1765,
+ u'NavierStokes__NavierStokes': 9815,
+ u'Splay__Splay': 4425,
+ u'RayTrace__RayTrace': 16600})
+ self.assertEqual(
+ udict, {u'Box2D__Box2D': u'score',
+ u'Mandreel__Mandreel': u'score',
+ u'Gameboy__Gameboy': u'score',
+ u'Crypto__Crypto': u'score',
+ u'telemetry_page_measurement_results__num_errored': u'count',
+ u'telemetry_page_measurement_results__num_failed': u'count',
+ u'PdfJS__PdfJS': u'score',
+ u'Total__Score': u'score',
+ u'EarleyBoyer__EarleyBoyer': u'score',
+ u'MandreelLatency__MandreelLatency': u'score',
+ u'CodeLoad__CodeLoad': u'score',
+ u'DeltaBlue__DeltaBlue': u'score',
+ u'Typescript__Typescript': u'score',
+ u'SplayLatency__SplayLatency': u'score',
+ u'zlib__zlib': u'score',
+ u'Richards__Richards': u'score',
+ u'RegExp__RegExp': u'score',
+ u'NavierStokes__NavierStokes': u'score',
+ u'Splay__Splay': u'score',
+ u'RayTrace__RayTrace': u'score'})
def test_append_telemetry_units(self):
- kv_dict = {u'Box2D__Box2D': 4775, u'Mandreel__Mandreel': 6620,
- u'Gameboy__Gameboy': 9901, u'Crypto__Crypto': 8737,
- u'PdfJS__PdfJS': 6455, u'Total__Score': 7918,
+ kv_dict = {u'Box2D__Box2D': 4775,
+ u'Mandreel__Mandreel': 6620,
+ u'Gameboy__Gameboy': 9901,
+ u'Crypto__Crypto': 8737,
+ u'PdfJS__PdfJS': 6455,
+ u'Total__Score': 7918,
u'EarleyBoyer__EarleyBoyer': 14340,
u'MandreelLatency__MandreelLatency': 5188,
- u'CodeLoad__CodeLoad': 6271, u'DeltaBlue__DeltaBlue': 14401,
+ u'CodeLoad__CodeLoad': 6271,
+ u'DeltaBlue__DeltaBlue': 14401,
u'Typescript__Typescript': 9815,
- u'SplayLatency__SplayLatency': 7653, u'zlib__zlib': 16094,
- u'Richards__Richards': 10358, u'RegExp__RegExp': 1765,
- u'NavierStokes__NavierStokes': 9815, u'Splay__Splay': 4425,
+ u'SplayLatency__SplayLatency': 7653,
+ u'zlib__zlib': 16094,
+ u'Richards__Richards': 10358,
+ u'RegExp__RegExp': 1765,
+ u'NavierStokes__NavierStokes': 9815,
+ u'Splay__Splay': 4425,
u'RayTrace__RayTrace': 16600}
- units_dict = {u'Box2D__Box2D': u'score', u'Mandreel__Mandreel': u'score',
- u'Gameboy__Gameboy': u'score', u'Crypto__Crypto': u'score',
- u'PdfJS__PdfJS': u'score', u'Total__Score': u'score',
+ units_dict = {u'Box2D__Box2D': u'score',
+ u'Mandreel__Mandreel': u'score',
+ u'Gameboy__Gameboy': u'score',
+ u'Crypto__Crypto': u'score',
+ u'PdfJS__PdfJS': u'score',
+ u'Total__Score': u'score',
u'EarleyBoyer__EarleyBoyer': u'score',
u'MandreelLatency__MandreelLatency': u'score',
u'CodeLoad__CodeLoad': u'score',
@@ -287,9 +333,11 @@ class ResultTest(unittest.TestCase):
u'Typescript__Typescript': u'score',
u'SplayLatency__SplayLatency': u'score',
u'zlib__zlib': u'score',
- u'Richards__Richards': u'score', u'RegExp__RegExp': u'score',
+ u'Richards__Richards': u'score',
+ u'RegExp__RegExp': u'score',
u'NavierStokes__NavierStokes': u'score',
- u'Splay__Splay': u'score', u'RayTrace__RayTrace': u'score'}
+ u'Splay__Splay': u'score',
+ u'RayTrace__RayTrace': u'score'}
results_dict = self.result._AppendTelemetryUnits(kv_dict, units_dict)
self.assertEqual(results_dict,
@@ -312,11 +360,10 @@ class ResultTest(unittest.TestCase):
u'RayTrace__RayTrace': [16600, u'score'],
u'NavierStokes__NavierStokes': [9815, u'score']})
-
- @mock.patch.object (misc, 'GetInsideChrootPath')
- @mock.patch.object (tempfile, 'mkdtemp')
- @mock.patch.object (command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object (command_executer.CommandExecuter, 'ChrootRunCommand')
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(tempfile, 'mkdtemp')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_get_keyvals(self, mock_chrootruncmd, mock_runcmd, mock_mkdtemp,
mock_getpath):
@@ -334,16 +381,14 @@ class ResultTest(unittest.TestCase):
def FakeGetNewKeyvals(kv_dict):
self.kv_dict = kv_dict
self.call_GetNewKeyvals = True
- return_kvdict = { 'first_time' : 680, 'Total' : 10}
- return_udict = { 'first_time' : 'ms', 'Total' : 'score'}
+ return_kvdict = {'first_time': 680, 'Total': 10}
+ return_udict = {'first_time': 'ms', 'Total': 'score'}
return return_kvdict, return_udict
-
mock_mkdtemp.return_value = TMP_DIR1
mock_chrootruncmd.return_value = ['',
('%s,PASS\n%s/telemetry_Crosperf,PASS\n')
- % (TMP_DIR1, TMP_DIR1),
- '']
+ % (TMP_DIR1, TMP_DIR1), '']
mock_getpath.return_value = TMP_DIR1
self.result._ce.ChrootRunCommand = mock_chrootruncmd
self.result._ce.RunCommand = mock_runcmd
@@ -354,26 +399,22 @@ class ResultTest(unittest.TestCase):
# Test 1. no self._temp_dir.
res = self.result._GetKeyvals(True)
self.assertTrue(self.call_GetNewKeyvals)
- self.assertEqual(self.kv_dict, { '': 'PASS', 'telemetry_Crosperf': 'PASS' })
+ self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
self.assertEqual(mock_runcmd.call_count, 1)
self.assertEqual(mock_runcmd.call_args_list[0][0],
('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1,))
self.assertEqual(mock_chrootruncmd.call_count, 1)
- self.assertEqual(mock_chrootruncmd.call_args_list[0][0],
- ('/tmp',
- ('python generate_test_report --no-color --csv %s') %
- TMP_DIR1))
+ self.assertEqual(mock_chrootruncmd.call_args_list[0][0], (
+ '/tmp', ('python generate_test_report --no-color --csv %s') % TMP_DIR1))
self.assertEqual(mock_getpath.call_count, 1)
self.assertEqual(mock_mkdtemp.call_count, 1)
self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
-
# Test 2. self._temp_dir
reset()
mock_chrootruncmd.return_value = ['',
('/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/'
- 'telemetry_Crosperf,PASS\n'),
- '']
+ 'telemetry_Crosperf,PASS\n'), '']
mock_getpath.return_value = '/tmp/tmpJCajRG'
self.result._temp_dir = '/tmp/tmpJCajRG'
res = self.result._GetKeyvals(True)
@@ -381,7 +422,7 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_mkdtemp.call_count, 0)
self.assertEqual(mock_chrootruncmd.call_count, 1)
self.assertTrue(self.call_GetNewKeyvals)
- self.assertEqual(self.kv_dict, { '': 'PASS', 'telemetry_Crosperf': 'PASS' })
+ self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
# Test 3. suite != telemetry_Crosperf. Normally this would be for
@@ -392,8 +433,7 @@ class ResultTest(unittest.TestCase):
reset()
self.result.suite = ''
res = self.result._GetKeyvals(True)
- self.assertEqual(res, {'Total': 10, 'first_time': 680 })
-
+ self.assertEqual(res, {'Total': 10, 'first_time': 680})
def test_get_results_dir(self):
@@ -402,11 +442,9 @@ class ResultTest(unittest.TestCase):
self.result.out = OUTPUT
resdir = self.result._GetResultsDir()
- self.assertEqual(resdir,
- '/tmp/test_that.PO1234567/platform_LibCBench')
-
+ self.assertEqual(resdir, '/tmp/test_that.PO1234567/platform_LibCBench')
- @mock.patch.object (command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
def test_find_files_in_results_dir(self, mock_runcmd):
self.result.results_dir = None
@@ -427,9 +465,7 @@ class ResultTest(unittest.TestCase):
self.assertRaises(Exception, self.result._FindFilesInResultsDir,
'-name perf.data')
-
-
- @mock.patch.object (Result, '_FindFilesInResultsDir')
+ @mock.patch.object(Result, '_FindFilesInResultsDir')
def test_get_perf_data_files(self, mock_findfiles):
self.args = None
@@ -439,7 +475,6 @@ class ResultTest(unittest.TestCase):
self.assertEqual(res, ['line1', 'line1'])
self.assertEqual(mock_findfiles.call_args_list[0][0], ('-name perf.data',))
-
def test_get_perf_report_files(self):
self.args = None
@@ -452,7 +487,6 @@ class ResultTest(unittest.TestCase):
self.assertEqual(res, ['line1', 'line1'])
self.assertEqual(self.args, '-name perf.data.report')
-
def test_get_data_measurement_files(self):
self.args = None
@@ -465,9 +499,8 @@ class ResultTest(unittest.TestCase):
self.assertEqual(res, ['line1', 'line1'])
self.assertEqual(self.args, '-name perf_measurements')
-
- @mock.patch.object (misc, 'GetInsideChrootPath')
- @mock.patch.object (command_executer.CommandExecuter, 'ChrootRunCommand')
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath):
fake_file = '/usr/chromeos/chroot/tmp/results/fake_file'
self.result.perf_data_files = ['/tmp/results/perf.data']
@@ -483,9 +516,7 @@ class ResultTest(unittest.TestCase):
'--kallsyms /build/lumpy/boot/System.map-* -i '
'%s --stdio > %s') % (fake_file, fake_file)))
-
-
- @mock.patch.object (misc, 'GetOutsideChrootPath')
+ @mock.patch.object(misc, 'GetOutsideChrootPath')
def test_populate_from_run(self, mock_getpath):
def FakeGetResultsDir():
@@ -527,9 +558,9 @@ class ResultTest(unittest.TestCase):
def FakeGetKeyvals(show_all):
if show_all:
- return { 'first_time' : 680, 'Total' : 10}
+ return {'first_time': 680, 'Total': 10}
else:
- return { 'Total' : 10}
+ return {'Total': 10}
def FakeGatherPerfResults():
self.callGatherPerfResults = True
@@ -543,18 +574,17 @@ class ResultTest(unittest.TestCase):
self.result._ProcessResults(True)
self.assertTrue(self.callGatherPerfResults)
self.assertEqual(len(self.result.keyvals), 3)
- self.assertEqual(self.result.keyvals,
- { 'first_time' : 680, 'Total' : 10, 'retval' : 0 })
+ self.assertEqual(self.result.keyvals, {'first_time': 680,
+ 'Total': 10,
+ 'retval': 0})
self.result.retval = 1
self.result._ProcessResults(False)
self.assertEqual(len(self.result.keyvals), 2)
- self.assertEqual(self.result.keyvals,
- { 'Total' : 10, 'retval' : 1 })
-
+ self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 1})
- @mock.patch.object (misc, 'GetInsideChrootPath')
- @mock.patch.object (command_executer.CommandExecuter, 'ChrootRunCommand')
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_populate_from_cache_dir(self, mock_runchrootcmd, mock_getpath):
def FakeMkdtemp(dir=''):
@@ -566,8 +596,7 @@ class ResultTest(unittest.TestCase):
self.result._ce.ChrootRunCommand = mock_runchrootcmd
mock_runchrootcmd.return_value = ['',
('%s,PASS\n%s/\telemetry_Crosperf,PASS\n')
- % (TMP_DIR1, TMP_DIR1),
- '']
+ % (TMP_DIR1, TMP_DIR1), '']
mock_getpath.return_value = TMP_DIR1
self.tmpdir = tempfile.mkdtemp()
save_real_mkdtemp = tempfile.mkdtemp
@@ -575,54 +604,47 @@ class ResultTest(unittest.TestCase):
self.result._PopulateFromCacheDir(cache_dir, True, 'sunspider',
'telemetry_Crosperf')
- self.assertEqual(self.result.keyvals,
- {u'Total__Total': [444.0, u'ms'],
- u'regexp-dna__regexp-dna': [16.2, u'ms'],
- u'telemetry_page_measurement_results__num_failed':
- [0, u'count'],
- u'telemetry_page_measurement_results__num_errored':
- [0, u'count'],
- u'string-fasta__string-fasta': [23.2, u'ms'],
- u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
- u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte':
- [3.2, u'ms'],
- u'access-nsieve__access-nsieve': [7.9, u'ms'],
- u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
- u'string-validate-input__string-validate-input':
- [19.3, u'ms'],
- u'3d-raytrace__3d-raytrace': [24.7, u'ms'],
- u'3d-cube__3d-cube': [28.0, u'ms'],
- u'string-unpack-code__string-unpack-code': [46.7, u'ms'],
- u'date-format-tofte__date-format-tofte': [26.3, u'ms'],
- u'math-partial-sums__math-partial-sums': [22.0, u'ms'],
- '\telemetry_Crosperf': ['PASS', ''],
- u'crypto-aes__crypto-aes': [15.2, u'ms'],
- u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'],
- u'crypto-md5__crypto-md5': [10.5, u'ms'],
- u'string-tagcloud__string-tagcloud': [52.8, u'ms'],
- u'access-nbody__access-nbody': [8.5, u'ms'],
- 'retval': 0,
- u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'],
- u'math-cordic__math-cordic': [8.7, u'ms'],
- u'access-binary-trees__access-binary-trees': [4.5, u'ms'],
- u'controlflow-recursive__controlflow-recursive':
- [4.4, u'ms'],
- u'access-fannkuch__access-fannkuch': [17.8, u'ms'],
- u'string-base64__string-base64': [16.0, u'ms'],
- u'date-format-xparb__date-format-xparb': [20.9, u'ms'],
- u'3d-morph__3d-morph': [22.1, u'ms'],
- u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms']
- })
-
+ self.assertEqual(
+ self.result.keyvals,
+ {u'Total__Total': [444.0, u'ms'],
+ u'regexp-dna__regexp-dna': [16.2, u'ms'],
+ u'telemetry_page_measurement_results__num_failed': [0, u'count'],
+ u'telemetry_page_measurement_results__num_errored': [0, u'count'],
+ u'string-fasta__string-fasta': [23.2, u'ms'],
+ u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
+ u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': [3.2, u'ms'],
+ u'access-nsieve__access-nsieve': [7.9, u'ms'],
+ u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
+ u'string-validate-input__string-validate-input': [19.3, u'ms'],
+ u'3d-raytrace__3d-raytrace': [24.7, u'ms'],
+ u'3d-cube__3d-cube': [28.0, u'ms'],
+ u'string-unpack-code__string-unpack-code': [46.7, u'ms'],
+ u'date-format-tofte__date-format-tofte': [26.3, u'ms'],
+ u'math-partial-sums__math-partial-sums': [22.0, u'ms'],
+ '\telemetry_Crosperf': ['PASS', ''],
+ u'crypto-aes__crypto-aes': [15.2, u'ms'],
+ u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'],
+ u'crypto-md5__crypto-md5': [10.5, u'ms'],
+ u'string-tagcloud__string-tagcloud': [52.8, u'ms'],
+ u'access-nbody__access-nbody': [8.5, u'ms'],
+ 'retval': 0,
+ u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'],
+ u'math-cordic__math-cordic': [8.7, u'ms'],
+ u'access-binary-trees__access-binary-trees': [4.5, u'ms'],
+ u'controlflow-recursive__controlflow-recursive': [4.4, u'ms'],
+ u'access-fannkuch__access-fannkuch': [17.8, u'ms'],
+ u'string-base64__string-base64': [16.0, u'ms'],
+ u'date-format-xparb__date-format-xparb': [20.9, u'ms'],
+ u'3d-morph__3d-morph': [22.1, u'ms'],
+ u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms']})
# Clean up after test.
tempfile.mkdtemp = save_real_mkdtemp
command = 'rm -Rf %s' % self.tmpdir
self.result._ce.RunCommand(command)
-
- @mock.patch.object (misc, 'GetRoot')
- @mock.patch.object (command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(misc, 'GetRoot')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
def test_cleanup(self, mock_runcmd, mock_getroot):
# Test 1. 'rm_chroot_tmp' is True; self.results_dir exists;
@@ -673,9 +695,8 @@ class ResultTest(unittest.TestCase):
self.assertEqual(mock_getroot.call_count, 0)
self.assertEqual(mock_runcmd.call_count, 0)
-
- @mock.patch.object (misc, 'GetInsideChrootPath')
- @mock.patch.object (command_executer.CommandExecuter, 'ChrootRunCommand')
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_store_to_cache_dir(self, mock_chrootruncmd, mock_getpath):
def FakeMkdtemp(dir=''):
@@ -728,19 +749,102 @@ class ResultTest(unittest.TestCase):
self.result._ce.RunCommand(command)
-TELEMETRY_RESULT_KEYVALS = {'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html math-cordic (ms)': '11.4', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html access-nbody (ms)': '6.9', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html access-fannkuch (ms)': '26.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html math-spectral-norm (ms)': '6.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html bitops-nsieve-bits (ms)': '9.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html math-partial-sums (ms)': '32.8', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html regexp-dna (ms)': '16.1', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html 3d-cube (ms)': '42.7', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html crypto-md5 (ms)': '10.8', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html crypto-sha1 (ms)': '12.4', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html string-tagcloud (ms)': '47.2', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html string-fasta (ms)': '36.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html access-binary-trees (ms)': '7.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html date-format-xparb (ms)': '138.1', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html crypto-aes (ms)': '19.2', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html Total (ms)': '656.5', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html string-base64 (ms)': '17.5', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html string-validate-input (ms)': '24.8', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html 3d-raytrace (ms)': '28.7', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html controlflow-recursive (ms)': '5.3', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html bitops-bits-in-byte (ms)': '9.8', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html 3d-morph (ms)': '50.2', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html bitops-bitwise-and (ms)': '8.8', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html access-nsieve (ms)': '8.6', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html date-format-tofte (ms)': '31.2', 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html bitops-3bit-bits-in-byte (ms)': '3.5', 'retval': 0, 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html string-unpack-code (ms)': '45.0'}
+TELEMETRY_RESULT_KEYVALS = {
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'math-cordic (ms)':
+ '11.4',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-nbody (ms)':
+ '6.9',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-fannkuch (ms)':
+ '26.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'math-spectral-norm (ms)':
+ '6.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-nsieve-bits (ms)':
+ '9.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'math-partial-sums (ms)':
+ '32.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'regexp-dna (ms)':
+ '16.1',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ '3d-cube (ms)':
+ '42.7',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'crypto-md5 (ms)':
+ '10.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'crypto-sha1 (ms)':
+ '12.4',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-tagcloud (ms)':
+ '47.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-fasta (ms)':
+ '36.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-binary-trees (ms)':
+ '7.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'date-format-xparb (ms)':
+ '138.1',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'crypto-aes (ms)':
+ '19.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'Total (ms)':
+ '656.5',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-base64 (ms)':
+ '17.5',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-validate-input (ms)':
+ '24.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ '3d-raytrace (ms)':
+ '28.7',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'controlflow-recursive (ms)':
+ '5.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-bits-in-byte (ms)':
+ '9.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ '3d-morph (ms)':
+ '50.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-bitwise-and (ms)':
+ '8.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-nsieve (ms)':
+ '8.6',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'date-format-tofte (ms)':
+ '31.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-3bit-bits-in-byte (ms)':
+ '3.5',
+ 'retval': 0,
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-unpack-code (ms)':
+ '45.0'
+}
PURE_TELEMETRY_OUTPUT = """page_name,3d-cube (ms),3d-morph (ms),3d-raytrace (ms),Total (ms),access-binary-trees (ms),access-fannkuch (ms),access-nbody (ms),access-nsieve (ms),bitops-3bit-bits-in-byte (ms),bitops-bits-in-byte (ms),bitops-bitwise-and (ms),bitops-nsieve-bits (ms),controlflow-recursive (ms),crypto-aes (ms),crypto-md5 (ms),crypto-sha1 (ms),date-format-tofte (ms),date-format-xparb (ms),math-cordic (ms),math-partial-sums (ms),math-spectral-norm (ms),regexp-dna (ms),string-base64 (ms),string-fasta (ms),string-tagcloud (ms),string-unpack-code (ms),string-validate-input (ms)\r\nhttp://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html,42.7,50.2,28.7,656.5,7.3,26.3,6.9,8.6,3.5,9.8,8.8,9.3,5.3,19.2,10.8,12.4,31.2,138.1,11.4,32.8,6.3,16.1,17.5,36.3,47.2,45.0,24.8\r\n"""
+
class TelemetryResultTest(unittest.TestCase):
mock_logger = mock.Mock(spec=logger.Logger)
mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
mock_label = MockLabel('mock_label', 'chromeos_image', '/tmp', 'lumpy',
- 'remote', 'image_args', 'cache_dir', 'average',
- 'gcc', None)
- mock_machine = machine_manager.MockCrosMachine('falco.cros',
- '/tmp/chromeos',
+ 'remote', 'image_args', 'cache_dir', 'average', 'gcc',
+ None)
+ mock_machine = machine_manager.MockCrosMachine('falco.cros', '/tmp/chromeos',
'average')
def test_populate_from_run(self):
@@ -749,8 +853,8 @@ class TelemetryResultTest(unittest.TestCase):
self.callFakeProcessResults = True
self.callFakeProcessResults = False
- self.result = TelemetryResult(self.mock_logger, self.mock_label,
- 'average', self.mock_cmd_exec)
+ self.result = TelemetryResult(self.mock_logger, self.mock_label, 'average',
+ self.mock_cmd_exec)
self.result._ProcessResults = FakeProcessResults
self.result._PopulateFromRun(OUTPUT, error, 3, False, 'fake_test',
'telemetry_Crosperf')
@@ -759,11 +863,10 @@ class TelemetryResultTest(unittest.TestCase):
self.assertEqual(self.result.err, error)
self.assertEqual(self.result.retval, 3)
-
def test_populate_from_cache_dir_and_process_results(self):
- self.result = TelemetryResult(self.mock_logger, self.mock_label,
- 'average', self.mock_machine)
+ self.result = TelemetryResult(self.mock_logger, self.mock_label, 'average',
+ self.mock_machine)
current_path = os.getcwd()
cache_dir = os.path.join(current_path,
'test_cache/test_puretelemetry_input')
@@ -778,14 +881,14 @@ class ResultsCacheTest(unittest.TestCase):
mock_logger = mock.Mock(spec=logger.Logger)
mock_label = MockLabel('mock_label', 'chromeos_image', '/tmp', 'lumpy',
- 'remote', 'image_args', 'cache_dir', 'average',
- 'gcc', None)
+ 'remote', 'image_args', 'cache_dir', 'average', 'gcc',
+ None)
+
def setUp(self):
self.results_cache = ResultsCache()
mock_machine = machine_manager.MockCrosMachine('falco.cros',
- '/tmp/chromeos',
- 'average')
+ '/tmp/chromeos', 'average')
mock_mm = machine_manager.MockMachineManager('/tmp/chromeos_root', 0,
'average')
@@ -794,9 +897,9 @@ class ResultsCacheTest(unittest.TestCase):
self.results_cache.Init(self.mock_label.chromeos_image,
self.mock_label.chromeos_root,
'sunspider',
- 1, # benchmark_run.iteration,
- '', # benchmark_run.test_args,
- '', # benchmark_run.profiler_args,
+ 1, # benchmark_run.iteration,
+ '', # benchmark_run.test_args,
+ '', # benchmark_run.profiler_args,
mock_mm,
mock_machine,
self.mock_label.board,
@@ -805,24 +908,20 @@ class ResultsCacheTest(unittest.TestCase):
self.mock_logger,
'average',
self.mock_label,
- '', # benchmark_run.share_cache
+ '', # benchmark_run.share_cache
'telemetry_Crosperf',
- True, # benchmark_run.show_all_results
- False) # benchmark_run.run_local
-
+ True, # benchmark_run.show_all_results
+ False) # benchmark_run.run_local
- @mock.patch.object (image_checksummer.ImageChecksummer, 'Checksum')
+ @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
def test_get_cache_dir_for_write(self, mock_checksum):
def FakeGetMachines(label):
- m1 = machine_manager.MockCrosMachine('lumpy1.cros',
- self.results_cache.chromeos_root,
- 'average')
- m2 = machine_manager.MockCrosMachine('lumpy2.cros',
- self.results_cache.chromeos_root,
- 'average')
- return [m1, m2]
-
+ m1 = machine_manager.MockCrosMachine(
+ 'lumpy1.cros', self.results_cache.chromeos_root, 'average')
+ m2 = machine_manager.MockCrosMachine(
+ 'lumpy2.cros', self.results_cache.chromeos_root, 'average')
+ return [m1, m2]
mock_checksum.return_value = 'FakeImageChecksumabc123'
self.results_cache.machine_manager.GetMachines = FakeGetMachines
@@ -841,7 +940,6 @@ class ResultsCacheTest(unittest.TestCase):
'abc987__6')
self.assertEqual(result_path, comp_path)
-
def test_form_cache_dir(self):
# This is very similar to the previous test (_FormCacheDir is called
# from _GetCacheDirForWrite).
@@ -856,21 +954,17 @@ class ResultsCacheTest(unittest.TestCase):
comp_path = os.path.join(os.getcwd(), 'cache_dir', test_dirname)
self.assertEqual(path1, comp_path)
-
- @mock.patch.object (image_checksummer.ImageChecksummer, 'Checksum')
+ @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
def test_get_cache_key_list(self, mock_checksum):
# This tests the mechanism that generates the various pieces of the
# cache directory name, based on various conditions.
def FakeGetMachines(label):
- m1 = machine_manager.MockCrosMachine('lumpy1.cros',
- self.results_cache.chromeos_root,
- 'average')
- m2 = machine_manager.MockCrosMachine('lumpy2.cros',
- self.results_cache.chromeos_root,
- 'average')
- return [m1, m2]
-
+ m1 = machine_manager.MockCrosMachine(
+ 'lumpy1.cros', self.results_cache.chromeos_root, 'average')
+ m2 = machine_manager.MockCrosMachine(
+ 'lumpy2.cros', self.results_cache.chromeos_root, 'average')
+ return [m1, m2]
mock_checksum.return_value = 'FakeImageChecksumabc123'
self.results_cache.machine_manager.GetMachines = FakeGetMachines
@@ -879,7 +973,7 @@ class ResultsCacheTest(unittest.TestCase):
# Test 1. Generating cache name for reading (not writing).
key_list = self.results_cache._GetCacheKeyList(True)
- self.assertEqual(key_list[0], '*') # Machine checksum value, for read.
+ self.assertEqual(key_list[0], '*') # Machine checksum value, for read.
self.assertEqual(key_list[1], 'sunspider')
self.assertEqual(key_list[2], '1')
self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
@@ -929,13 +1023,13 @@ class ResultsCacheTest(unittest.TestCase):
self.assertEqual(key_list[4], 'FakeImageChecksumabc123')
self.assertEqual(key_list[5], 'FakeMachineChecksumabc987')
-
- @mock.patch.object (command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object (os.path, 'isdir')
- @mock.patch.object (Result, 'CreateFromCacheHit')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(os.path, 'isdir')
+ @mock.patch.object(Result, 'CreateFromCacheHit')
def test_read_result(self, mock_create, mock_isdir, mock_runcmd):
self.fakeCacheReturnResult = None
+
def FakeGetCacheDirForRead():
return self.fakeCacheReturnResult
@@ -950,7 +1044,7 @@ class ResultsCacheTest(unittest.TestCase):
# Set up results_cache _GetCacheDirFor{Read,Write} to return
# self.fakeCacheReturnResult, which is initially None (see above).
# So initially, no cache dir is returned.
- self.results_cache._GetCacheDirForRead = FakeGetCacheDirForRead
+ self.results_cache._GetCacheDirForRead = FakeGetCacheDirForRead
self.results_cache._GetCacheDirForWrite = FakeGetCacheDirForWrite
mock_isdir.return_value = True
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index efd70c63..39554c41 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Parse data from benchmark_runs for tabulator."""
from __future__ import print_function
@@ -13,7 +12,8 @@ import sys
from cros_utils import misc
-TELEMETRY_RESULT_DEFAULTS_FILE = "default-telemetry-results.json"
+TELEMETRY_RESULT_DEFAULTS_FILE = 'default-telemetry-results.json'
+
class ResultOrganizer(object):
"""Create a dict from benchmark_runs.
@@ -30,11 +30,14 @@ class ResultOrganizer(object):
]}.
"""
- def __init__(self, benchmark_runs, labels, benchmarks=None,
+ def __init__(self,
+ benchmark_runs,
+ labels,
+ benchmarks=None,
json_report=False):
self.result = {}
self.labels = []
- self.prog = re.compile(r"(\w+)\{(\d+)\}")
+ self.prog = re.compile(r'(\w+)\{(\d+)\}')
self.benchmarks = benchmarks
if not self.benchmarks:
self.benchmarks = []
@@ -62,7 +65,7 @@ class ResultOrganizer(object):
if not show_all_results:
summary_list = self._GetSummaryResults(benchmark.test_name)
if len(summary_list) > 0:
- summary_list.append("retval")
+ summary_list.append('retval')
else:
# Did not find test_name in json file; therefore show everything.
show_all_results = True
@@ -77,7 +80,8 @@ class ResultOrganizer(object):
cur_dict['retval'] = 1
# TODO: This output should be sent via logger.
print("WARNING: Test '%s' appears to have succeeded but returned"
- " no results." % benchmark_name, file=sys.stderr)
+ ' no results.' % benchmark_name,
+ file=sys.stderr)
if json_report and benchmark_run.machine:
cur_dict['machine'] = benchmark_run.machine.name
cur_dict['machine_checksum'] = benchmark_run.machine.checksum
@@ -117,8 +121,7 @@ class ResultOrganizer(object):
for run in label:
for key in run:
if re.match(self.prog, key):
- max_dup = max(max_dup,
- int(re.search(self.prog, key).group(2)))
+ max_dup = max(max_dup, int(re.search(self.prog, key).group(2)))
return max_dup
def _GetNonDupLabel(self, max_dup, label):
@@ -134,7 +137,7 @@ class ResultOrganizer(object):
if re.match(self.prog, key):
new_key = re.search(self.prog, key).group(1)
index = int(re.search(self.prog, key).group(2))
- new_label[start_index+index][new_key] = str(value)
+ new_label[start_index + index][new_key] = str(value)
del new_run[key]
return new_label
@@ -144,4 +147,4 @@ class ResultOrganizer(object):
if benchmark.name == bench:
if not benchmark.iteration_adjusted:
benchmark.iteration_adjusted = True
- benchmark.iterations *= (max_dup +1)
+ benchmark.iterations *= (max_dup + 1)
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
index c170f0a3..914ecc5e 100755
--- a/crosperf/results_organizer_unittest.py
+++ b/crosperf/results_organizer_unittest.py
@@ -3,7 +3,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Testing of ResultsOrganizer. We create some labels, benchmark_runs
and then create a ResultsOrganizer, after that, we compare the result of
ResultOrganizer"""
@@ -17,101 +16,88 @@ from results_organizer import ResultOrganizer
import mock_instance
result = {'benchmark1': [[{'': 'PASS',
- 'bool': 'True',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'ms_1': '2.1',
- 'total': '5'},
- {'test': '2'},
- {'test': '4'},
- {'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6'},
- {'test': '3'},
- {'test': '4'}],
- [{'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6'},
- {'test': '5'},
- {'test': '4'},
- {'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7'},
- {'test': '2'},
- {'test': '6'}]],
- 'benchmark2': [[{'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7'},
- {'test': '2'},
- {'test': '6'},
- {'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7'},
- {'test': '2'},
- {'test': '2'}],
- [{'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7'},
- {'test': '2'},
- {'test': '4'},
- {'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7'},
- {'test': '1'},
- {'test': '6'}]]}
+ 'bool': 'True',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'ms_1': '2.1',
+ 'total': '5'}, {'test': '2'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6'}, {'test': '3'}, {'test': '4'}],
+ [{'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6'}, {'test': '5'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7'}, {'test': '2'}, {'test': '6'}]],
+ 'benchmark2': [[{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7'}, {'test': '2'}, {'test': '6'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7'}, {'test': '2'}, {'test': '2'}],
+ [{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7'}, {'test': '2'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7'}, {'test': '1'}, {'test': '6'}]]}
+
class ResultOrganizerTest(unittest.TestCase):
+
def testResultOrganizer(self):
labels = [mock_instance.label1, mock_instance.label2]
benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
- benchmark_runs = [None]*8
- benchmark_runs[0] = BenchmarkRun("b1", benchmarks[0],
- labels[0], 1, "", "", "", "average", "")
- benchmark_runs[1] = BenchmarkRun("b2", benchmarks[0],
- labels[0], 2, "", "", "", "average", "")
- benchmark_runs[2] = BenchmarkRun("b3", benchmarks[0],
- labels[1], 1, "", "", "", "average", "")
- benchmark_runs[3] = BenchmarkRun("b4", benchmarks[0],
- labels[1], 2, "", "", "", "average", "")
- benchmark_runs[4] = BenchmarkRun("b5", benchmarks[1],
- labels[0], 1, "", "", "", "average", "")
- benchmark_runs[5] = BenchmarkRun("b6", benchmarks[1],
- labels[0], 2, "", "", "", "average", "")
- benchmark_runs[6] = BenchmarkRun("b7", benchmarks[1],
- labels[1], 1, "", "", "", "average", "")
- benchmark_runs[7] = BenchmarkRun("b8", benchmarks[1],
- labels[1], 2, "", "", "", "average", "")
+ benchmark_runs = [None] * 8
+ benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '',
+ '', 'average', '')
i = 0
for b in benchmark_runs:
- b.result = Result("", b.label, "average", "machine")
+ b.result = Result('', b.label, 'average', 'machine')
b.result.keyvals = mock_instance.keyval[i]
i += 1
ro = ResultOrganizer(benchmark_runs, labels, benchmarks)
self.assertEqual(ro.result, result)
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 9734eb32..f5d71aee 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -1,7 +1,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""A module to handle the report format."""
from __future__ import print_function
@@ -63,8 +62,8 @@ def ParseChromeosImage(chromeos_image):
# chromeos_image should have been something like:
# <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
num_pieces = len(pieces)
- if pieces[num_pieces-1] == "chromiumos_test_image.bin":
- version = pieces[num_pieces-2]
+ if pieces[num_pieces - 1] == 'chromiumos_test_image.bin':
+ version = pieces[num_pieces - 2]
# Find last '.' in the version and chop it off (removing the .datatime
# piece from local builds).
loc = version.rfind('.')
@@ -79,6 +78,7 @@ def ParseChromeosImage(chromeos_image):
image = real_file
return version, image
+
class ResultsReport(object):
"""Class to handle the report format."""
MAX_COLOR_CODE = 255
@@ -100,54 +100,41 @@ class ResultsReport(object):
return labels
def GetFullTables(self, perf=False):
- columns = [Column(RawResult(),
- Format()),
- Column(MinResult(),
- Format()),
- Column(MaxResult(),
- Format()),
- Column(AmeanResult(),
- Format()),
- Column(StdResult(),
- Format(), "StdDev"),
- Column(CoeffVarResult(),
- CoeffVarFormat(), "StdDev/Mean"),
- Column(GmeanRatioResult(),
- RatioFormat(), "GmeanSpeedup"),
- Column(PValueResult(),
- PValueFormat(), "p-value")
- ]
+ columns = [Column(RawResult(), Format()), Column(
+ MinResult(), Format()), Column(MaxResult(),
+ Format()), Column(AmeanResult(),
+ Format()),
+ Column(StdResult(), Format(),
+ 'StdDev'), Column(CoeffVarResult(), CoeffVarFormat(),
+ 'StdDev/Mean'),
+ Column(GmeanRatioResult(), RatioFormat(),
+ 'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
+ 'p-value')]
if not perf:
- return self._GetTables(self.labels, self.benchmark_runs, columns,
- "full")
- return self._GetPerfTables(self.labels, columns, "full")
+ return self._GetTables(self.labels, self.benchmark_runs, columns, 'full')
+ return self._GetPerfTables(self.labels, columns, 'full')
def GetSummaryTables(self, perf=False):
- columns = [Column(AmeanResult(),
- Format()),
- Column(StdResult(),
- Format(), "StdDev"),
- Column(CoeffVarResult(),
- CoeffVarFormat(), "StdDev/Mean"),
- Column(GmeanRatioResult(),
- RatioFormat(), "GmeanSpeedup"),
- Column(PValueResult(),
- PValueFormat(), "p-value")
- ]
+ columns = [Column(AmeanResult(), Format()), Column(StdResult(), Format(),
+ 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
+ Column(GmeanRatioResult(), RatioFormat(),
+ 'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
+ 'p-value')]
if not perf:
return self._GetTables(self.labels, self.benchmark_runs, columns,
- "summary")
- return self._GetPerfTables(self.labels, columns, "summary")
+ 'summary')
+ return self._GetPerfTables(self.labels, columns, 'summary')
def _ParseColumn(self, columns, iteration):
new_column = []
for column in columns:
- if column.result.__class__.__name__ != "RawResult":
- #TODO(asharif): tabulator should support full table natively.
+ if column.result.__class__.__name__ != 'RawResult':
+ #TODO(asharif): tabulator should support full table natively.
new_column.append(column)
else:
for i in range(iteration):
- cc = Column(LiteralResult(i), Format(), str(i+1))
+ cc = Column(LiteralResult(i), Format(), str(i + 1))
new_column.append(cc)
return new_column
@@ -159,12 +146,12 @@ class ResultsReport(object):
return True
def _GetTableHeader(self, benchmark):
- benchmark_info = ("Benchmark: {0}; Iterations: {1}"
+ benchmark_info = ('Benchmark: {0}; Iterations: {1}'
.format(benchmark.name, benchmark.iterations))
cell = Cell()
cell.string_value = benchmark_info
cell.header = True
- return [[cell]]
+ return [[cell]]
def _GetTables(self, labels, benchmark_runs, columns, table_type):
tables = []
@@ -179,10 +166,10 @@ class ResultsReport(object):
break
ben_table = self._GetTableHeader(benchmark)
- if self._AreAllRunsEmpty(runs):
+ if self._AreAllRunsEmpty(runs):
cell = Cell()
- cell.string_value = ("This benchmark contains no result."
- " Is the benchmark name valid?")
+ cell.string_value = ('This benchmark contains no result.'
+ ' Is the benchmark name valid?')
cell_table = [[cell]]
else:
tg = TableGenerator(runs, label_name)
@@ -214,7 +201,8 @@ class ResultsReport(object):
row_info = p_table.row_info[benchmark]
table = []
for event in benchmark_data:
- tg = TableGenerator(benchmark_data[event], label_names,
+ tg = TableGenerator(benchmark_data[event],
+ label_names,
sort=TableGenerator.SORT_BY_VALUES_DESC)
table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
parsed_columns = self._ParseColumn(columns, ben.iterations)
@@ -228,19 +216,19 @@ class ResultsReport(object):
return tables
def PrintTables(self, tables, out_to):
- output = ""
+ output = ''
if not tables:
return output
for table in tables:
- if out_to == "HTML":
+ if out_to == 'HTML':
tp = TablePrinter(table, TablePrinter.HTML)
- elif out_to == "PLAIN":
+ elif out_to == 'PLAIN':
tp = TablePrinter(table, TablePrinter.PLAIN)
- elif out_to == "CONSOLE":
+ elif out_to == 'CONSOLE':
tp = TablePrinter(table, TablePrinter.CONSOLE)
- elif out_to == "TSV":
+ elif out_to == 'TSV':
tp = TablePrinter(table, TablePrinter.TSV)
- elif out_to == "EMAIL":
+ elif out_to == 'EMAIL':
tp = TablePrinter(table, TablePrinter.EMAIL)
else:
pass
@@ -293,16 +281,21 @@ CPUInfo
def GetStatusTable(self):
"""Generate the status table by the tabulator."""
- table = [["", ""]]
- columns = [Column(LiteralResult(iteration=0), Format(), "Status"),
- Column(LiteralResult(iteration=1), Format(), "Failing Reason")]
+ table = [['', '']]
+ columns = [Column(
+ LiteralResult(iteration=0),
+ Format(),
+ 'Status'), Column(
+ LiteralResult(iteration=1),
+ Format(),
+ 'Failing Reason')]
for benchmark_run in self.benchmark_runs:
status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
benchmark_run.failure_reason]]
table.append(status)
tf = TableFormatter(table, columns)
- cell_table = tf.GetCellTable("status")
+ cell_table = tf.GetCellTable('status')
return [cell_table]
def GetReport(self):
@@ -313,23 +306,20 @@ CPUInfo
if not perf_table:
perf_table = None
if not self.email:
- return self.TEXT % (self.experiment.name,
- self.PrintTables(summary_table, "CONSOLE"),
- self.experiment.machine_manager.num_reimages,
- self.PrintTables(status_table, "CONSOLE"),
- self.PrintTables(perf_table, "CONSOLE"),
- self.experiment.experiment_file,
- self.experiment.machine_manager.GetAllCPUInfo(
- self.experiment.labels))
-
- return self.TEXT % (self.experiment.name,
- self.PrintTables(summary_table, "EMAIL"),
- self.experiment.machine_manager.num_reimages,
- self.PrintTables(status_table, "EMAIL"),
- self.PrintTables(perf_table, "EMAIL"),
- self.experiment.experiment_file,
- self.experiment.machine_manager.GetAllCPUInfo(
- self.experiment.labels))
+ return self.TEXT % (
+ self.experiment.name, self.PrintTables(summary_table, 'CONSOLE'),
+ self.experiment.machine_manager.num_reimages,
+ self.PrintTables(status_table, 'CONSOLE'),
+ self.PrintTables(perf_table, 'CONSOLE'),
+ self.experiment.experiment_file,
+ self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))
+
+ return self.TEXT % (
+ self.experiment.name, self.PrintTables(summary_table, 'EMAIL'),
+ self.experiment.machine_manager.num_reimages,
+ self.PrintTables(status_table, 'EMAIL'),
+ self.PrintTables(perf_table, 'EMAIL'), self.experiment.experiment_file,
+ self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))
class HTMLResultsReport(ResultsReport):
@@ -489,11 +479,11 @@ pre {
</div>""" % (table, table, table)
def GetReport(self):
- chart_javascript = ""
+ chart_javascript = ''
charts = self._GetCharts(self.labels, self.benchmark_runs)
for chart in charts:
chart_javascript += chart.GetJavascript()
- chart_divs = ""
+ chart_divs = ''
for chart in charts:
chart_divs += chart.GetDiv()
@@ -501,30 +491,23 @@ pre {
full_table = self.GetFullTables()
perf_table = self.GetSummaryTables(perf=True)
if perf_table:
- perf_html = self.PERF_HTML % (
- self.PrintTables(perf_table, "HTML"),
- self.PrintTables(perf_table, "PLAIN"),
- self.PrintTables(perf_table, "TSV"),
- self._GetTabMenuHTML("perf")
- )
+ perf_html = self.PERF_HTML % (self.PrintTables(perf_table, 'HTML'),
+ self.PrintTables(perf_table, 'PLAIN'),
+ self.PrintTables(perf_table, 'TSV'),
+ self._GetTabMenuHTML('perf'))
perf_init = "switchTab('perf', 'html');"
else:
- perf_html = ""
- perf_init = ""
-
- return self.HTML % (perf_init,
- chart_javascript,
- self.PrintTables(summary_table, "HTML"),
- self.PrintTables(summary_table, "PLAIN"),
- self.PrintTables(summary_table, "TSV"),
- self._GetTabMenuHTML("summary"),
- perf_html,
- chart_divs,
- self.PrintTables(full_table, "HTML"),
- self.PrintTables(full_table, "PLAIN"),
- self.PrintTables(full_table, "TSV"),
- self._GetTabMenuHTML("full"),
- self.experiment.experiment_file)
+ perf_html = ''
+ perf_init = ''
+
+ return self.HTML % (
+ perf_init, chart_javascript, self.PrintTables(summary_table, 'HTML'),
+ self.PrintTables(summary_table, 'PLAIN'),
+ self.PrintTables(summary_table, 'TSV'), self._GetTabMenuHTML('summary'),
+ perf_html, chart_divs, self.PrintTables(full_table, 'HTML'),
+ self.PrintTables(full_table, 'PLAIN'),
+ self.PrintTables(full_table, 'TSV'), self._GetTabMenuHTML('full'),
+ self.experiment.experiment_file)
def _GetCharts(self, labels, benchmark_runs):
charts = []
@@ -534,32 +517,26 @@ pre {
runs = result[item]
tg = TableGenerator(runs, ro.labels)
table = tg.GetTable()
- columns = [Column(AmeanResult(),
- Format()),
- Column(MinResult(),
- Format()),
- Column(MaxResult(),
- Format())
- ]
+ columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
+ Column(MaxResult(), Format())]
tf = TableFormatter(table, columns)
- data_table = tf.GetCellTable("full")
+ data_table = tf.GetCellTable('full')
for i in range(2, len(data_table)):
cur_row_data = data_table[i]
test_key = cur_row_data[0].string_value
- title = "{0}: {1}".format(item, test_key.replace("/", ""))
+ title = '{0}: {1}'.format(item, test_key.replace('/', ''))
chart = ColumnChart(title, 300, 200)
- chart.AddColumn("Label", "string")
- chart.AddColumn("Average", "number")
- chart.AddColumn("Min", "number")
- chart.AddColumn("Max", "number")
- chart.AddSeries("Min", "line", "black")
- chart.AddSeries("Max", "line", "black")
+ chart.AddColumn('Label', 'string')
+ chart.AddColumn('Average', 'number')
+ chart.AddColumn('Min', 'number')
+ chart.AddColumn('Max', 'number')
+ chart.AddSeries('Min', 'line', 'black')
+ chart.AddSeries('Max', 'line', 'black')
cur_index = 1
for label in ro.labels:
- chart.AddRow([label, cur_row_data[cur_index].value,
- cur_row_data[cur_index + 1].value,
- cur_row_data[cur_index + 2].value])
+ chart.AddRow([label, cur_row_data[cur_index].value, cur_row_data[
+ cur_index + 1].value, cur_row_data[cur_index + 2].value])
if isinstance(cur_row_data[cur_index].value, str):
chart = None
break
@@ -568,8 +545,10 @@ pre {
charts.append(chart)
return charts
+
class JSONResultsReport(ResultsReport):
"""class to generate JASON report."""
+
def __init__(self, experiment, date=None, time=None):
super(JSONResultsReport, self).__init__(experiment)
self.ro = ResultOrganizer(experiment.benchmark_runs,
@@ -581,8 +560,8 @@ class JSONResultsReport(ResultsReport):
self.defaults = TelemetryDefaults()
if not self.date:
timestamp = datetime.datetime.strftime(datetime.datetime.now(),
- "%Y-%m-%d %H:%M:%S")
- date, time = timestamp.split(" ")
+ '%Y-%m-%d %H:%M:%S')
+ date, time = timestamp.split(' ')
self.date = date
self.time = time
@@ -653,9 +632,8 @@ class JSONResultsReport(ResultsReport):
json_results['detailed_results'] = detail_results
final_results.append(json_results)
- filename = "report_%s_%s_%s.%s.json" % (board, self.date,
- self.time.replace(':', '.'),
- compiler_string)
+ filename = 'report_%s_%s_%s.%s.json' % (
+ board, self.date, self.time.replace(':', '.'), compiler_string)
fullname = os.path.join(results_dir, filename)
- with open(fullname, "w") as fp:
+ with open(fullname, 'w') as fp:
json.dump(final_results, fp, indent=2)
diff --git a/crosperf/results_sorter.py b/crosperf/results_sorter.py
index e2caa41e..1ebbb8b4 100644
--- a/crosperf/results_sorter.py
+++ b/crosperf/results_sorter.py
@@ -1,8 +1,10 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Module to sort the results."""
+
+
class ResultSorter(object):
"""Class to sort the results."""
+
def __init__(self, benchmark_runs):
self.table = {}
for benchmark_run in benchmark_runs:
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index b73d384f..3a31d93c 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
@@ -16,79 +15,79 @@ from cros_utils import logger
class DutWorker(Thread):
- """Working thread for a dut."""
-
- def __init__(self, dut, sched):
- super(DutWorker, self).__init__(name='DutWorker-{}'.format(dut.name))
- self._dut = dut
- self._sched = sched
- self._stat_num_br_run = 0
- self._stat_num_reimage = 0
- self._stat_annotation = ""
- self._logger = logger.GetLogger(self._sched._experiment.log_dir)
- self.daemon = True
- self._terminated = False
- self._active_br = None
- # Race condition accessing _active_br between _execute_benchmark_run and
- # _terminate, so lock it up.
- self._active_br_lock = Lock()
-
- def terminate(self):
- self._terminated = True
- with self._active_br_lock:
- if self._active_br is not None:
- # BenchmarkRun.Terminate() terminates any running testcase via
- # suite_runner.Terminate and updates timeline.
- self._active_br.Terminate()
-
- def run(self):
- """Do the "run-test->(optionally reimage)->run-test" chore.
+ """Working thread for a dut."""
+
+ def __init__(self, dut, sched):
+ super(DutWorker, self).__init__(name='DutWorker-{}'.format(dut.name))
+ self._dut = dut
+ self._sched = sched
+ self._stat_num_br_run = 0
+ self._stat_num_reimage = 0
+ self._stat_annotation = ''
+ self._logger = logger.GetLogger(self._sched._experiment.log_dir)
+ self.daemon = True
+ self._terminated = False
+ self._active_br = None
+ # Race condition accessing _active_br between _execute_benchmark_run and
+ # _terminate, so lock it up.
+ self._active_br_lock = Lock()
+
+ def terminate(self):
+ self._terminated = True
+ with self._active_br_lock:
+ if self._active_br is not None:
+ # BenchmarkRun.Terminate() terminates any running testcase via
+ # suite_runner.Terminate and updates timeline.
+ self._active_br.Terminate()
+
+ def run(self):
+ """Do the "run-test->(optionally reimage)->run-test" chore.
Note - 'br' below means 'benchmark_run'.
"""
- # Firstly, handle benchmarkruns that have cache hit.
- br = self._sched.get_cached_benchmark_run()
- while br:
- try:
- self._stat_annotation = 'finishing cached {}'.format(br)
- br.run()
- except:
- traceback.print_exc(file=sys.stdout)
- br = self._sched.get_cached_benchmark_run()
-
- # Secondly, handle benchmarkruns that needs to be run on dut.
- self._setup_dut_label()
- try:
- self._logger.LogOutput("{} started.".format(self))
- while not self._terminated:
- br = self._sched.get_benchmark_run(self._dut)
- if br is None:
- # No br left for this label. Considering reimaging.
- label = self._sched.allocate_label(self._dut)
- if label is None:
- # No br even for other labels. We are done.
- self._logger.LogOutput("ImageManager found no label "
- "for dut, stopping working "
- "thread {}.".format(self))
- break
- if self._reimage(label):
- # Reimage to run other br fails, dut is doomed, stop
- # this thread.
- self._logger.LogWarning("Re-image failed, dut "
- "in an unstable state, stopping "
- "working thread {}.".format(self))
- break
- else:
- # Execute the br.
- self._execute_benchmark_run(br)
- finally:
- self._stat_annotation = "finished"
- # Thread finishes. Notify scheduler that I'm done.
- self._sched.dut_worker_finished(self)
-
- def _reimage(self, label):
- """Reimage image to label.
+ # Firstly, handle benchmarkruns that have cache hit.
+ br = self._sched.get_cached_benchmark_run()
+ while br:
+ try:
+ self._stat_annotation = 'finishing cached {}'.format(br)
+ br.run()
+ except:
+ traceback.print_exc(file=sys.stdout)
+ br = self._sched.get_cached_benchmark_run()
+
+ # Secondly, handle benchmarkruns that needs to be run on dut.
+ self._setup_dut_label()
+ try:
+ self._logger.LogOutput('{} started.'.format(self))
+ while not self._terminated:
+ br = self._sched.get_benchmark_run(self._dut)
+ if br is None:
+ # No br left for this label. Considering reimaging.
+ label = self._sched.allocate_label(self._dut)
+ if label is None:
+ # No br even for other labels. We are done.
+ self._logger.LogOutput('ImageManager found no label '
+ 'for dut, stopping working '
+ 'thread {}.'.format(self))
+ break
+ if self._reimage(label):
+ # Reimage to run other br fails, dut is doomed, stop
+ # this thread.
+ self._logger.LogWarning('Re-image failed, dut '
+ 'in an unstable state, stopping '
+ 'working thread {}.'.format(self))
+ break
+ else:
+ # Execute the br.
+ self._execute_benchmark_run(br)
+ finally:
+ self._stat_annotation = 'finished'
+ # Thread finishes. Notify scheduler that I'm done.
+ self._sched.dut_worker_finished(self)
+
+ def _reimage(self, label):
+ """Reimage image to label.
Args:
label: the label to remimage onto dut.
@@ -97,235 +96,233 @@ class DutWorker(Thread):
0 if successful, otherwise 1.
"""
- # Termination could happen anywhere, check it.
- if self._terminated:
- return 1
-
- self._logger.LogOutput('Reimaging {} using {}'.format(self, label))
- self._stat_num_reimage += 1
- self._stat_annotation = 'reimaging using "{}"'.format(label.name)
- try:
- # Note, only 1 reimage at any given time, this is guaranteed in
- # ImageMachine, so no sync needed below.
- retval = self._sched._experiment.machine_manager.ImageMachine(
- self._dut, label)
- if retval:
- return 1
- except:
- return 1
-
- self._dut.label = label
- return 0
-
- def _execute_benchmark_run(self, br):
- """Execute a single benchmark_run.
+ # Termination could happen anywhere, check it.
+ if self._terminated:
+ return 1
+
+ self._logger.LogOutput('Reimaging {} using {}'.format(self, label))
+ self._stat_num_reimage += 1
+ self._stat_annotation = 'reimaging using "{}"'.format(label.name)
+ try:
+ # Note, only 1 reimage at any given time, this is guaranteed in
+ # ImageMachine, so no sync needed below.
+ retval = self._sched._experiment.machine_manager.ImageMachine(self._dut,
+ label)
+ if retval:
+ return 1
+ except:
+ return 1
+
+ self._dut.label = label
+ return 0
+
+ def _execute_benchmark_run(self, br):
+ """Execute a single benchmark_run.
Note - this function never throws exceptions.
"""
- # Termination could happen anywhere, check it.
- if self._terminated:
- return
+ # Termination could happen anywhere, check it.
+ if self._terminated:
+ return
+
+ self._logger.LogOutput('{} started working on {}'.format(self, br))
+ self._stat_num_br_run += 1
+ self._stat_annotation = 'executing {}'.format(br)
+ # benchmark_run.run does not throws, but just play it safe here.
+ try:
+ assert br.owner_thread is None
+ br.owner_thread = self
+ with self._active_br_lock:
+ self._active_br = br
+ br.run()
+ finally:
+ self._sched._experiment.BenchmarkRunFinished(br)
+ with self._active_br_lock:
+ self._active_br = None
- self._logger.LogOutput('{} started working on {}'.format(self, br))
- self._stat_num_br_run += 1
- self._stat_annotation = 'executing {}'.format(br)
- # benchmark_run.run does not throws, but just play it safe here.
- try:
- assert br.owner_thread is None
- br.owner_thread = self
- with self._active_br_lock:
- self._active_br = br
- br.run()
- finally:
- self._sched._experiment.BenchmarkRunFinished(br)
- with self._active_br_lock:
- self._active_br = None
-
- def _setup_dut_label(self):
- """Try to match dut image with a certain experiment label.
+ def _setup_dut_label(self):
+ """Try to match dut image with a certain experiment label.
If such match is found, we just skip doing reimage and jump to execute
some benchmark_runs.
"""
- checksum_file = "/usr/local/osimage_checksum_file"
- try:
- rv, checksum, _ = command_executer.GetCommandExecuter().\
- CrosRunCommandWOutput(
- "cat " + checksum_file,
- chromeos_root=self._sched._labels[0].chromeos_root,
- machine=self._dut.name,
- print_to_console=False)
- if rv == 0:
- checksum = checksum.strip()
- for l in self._sched._labels:
- if l.checksum == checksum:
- self._logger.LogOutput(
- "Dut '{}' is pre-installed with '{}'".format(
- self._dut.name, l))
- self._dut.label = l
- return
- except:
- traceback.print_exc(file=sys.stdout)
- self._dut.label = None
-
- def __str__(self):
- return 'DutWorker[dut="{}", label="{}"]'.format(
- self._dut.name, self._dut.label.name if self._dut.label else "None")
-
- def dut(self):
- return self._dut
-
- def status_str(self):
- """Report thread status."""
-
- return ('Worker thread "{}", label="{}", benchmark_run={}, '
- 'reimage={}, now {}'.format(
- self._dut.name,
- 'None' if self._dut.label is None else self._dut.label.name,
- self._stat_num_br_run,
- self._stat_num_reimage,
- self._stat_annotation))
+ checksum_file = '/usr/local/osimage_checksum_file'
+ try:
+ rv, checksum, _ = command_executer.GetCommandExecuter().\
+ CrosRunCommandWOutput(
+ 'cat ' + checksum_file,
+ chromeos_root=self._sched._labels[0].chromeos_root,
+ machine=self._dut.name,
+ print_to_console=False)
+ if rv == 0:
+ checksum = checksum.strip()
+ for l in self._sched._labels:
+ if l.checksum == checksum:
+ self._logger.LogOutput("Dut '{}' is pre-installed with '{}'".format(
+ self._dut.name, l))
+ self._dut.label = l
+ return
+ except:
+ traceback.print_exc(file=sys.stdout)
+ self._dut.label = None
+
+ def __str__(self):
+ return 'DutWorker[dut="{}", label="{}"]'.format(
+ self._dut.name, self._dut.label.name if self._dut.label else 'None')
+
+ def dut(self):
+ return self._dut
+
+ def status_str(self):
+ """Report thread status."""
+
+ return ('Worker thread "{}", label="{}", benchmark_run={}, '
+ 'reimage={}, now {}'.format(
+ self._dut.name, 'None' if self._dut.label is None else
+ self._dut.label.name, self._stat_num_br_run,
+ self._stat_num_reimage, self._stat_annotation))
+
class BenchmarkRunCacheReader(Thread):
- """The thread to read cache for a list of benchmark_runs.
+ """The thread to read cache for a list of benchmark_runs.
On creation, each instance of this class is given a br_list, which is a
subset of experiment._benchmark_runs.
"""
- def __init__(self, schedv2, br_list):
- super(BenchmarkRunCacheReader, self).__init__()
- self._schedv2 = schedv2
- self._br_list = br_list
- self._logger = self._schedv2._logger
-
- def run(self):
- for br in self._br_list:
- try:
- br.ReadCache()
- if br.cache_hit:
- self._logger.LogOutput('Cache hit - {}'.format(br))
- with self._schedv2._lock_on('_cached_br_list'):
- self._schedv2._cached_br_list.append(br)
- else:
- self._logger.LogOutput('Cache not hit - {}'.format(br))
- except:
- traceback.print_exc(file=sys.stderr)
+ def __init__(self, schedv2, br_list):
+ super(BenchmarkRunCacheReader, self).__init__()
+ self._schedv2 = schedv2
+ self._br_list = br_list
+ self._logger = self._schedv2._logger
+
+ def run(self):
+ for br in self._br_list:
+ try:
+ br.ReadCache()
+ if br.cache_hit:
+ self._logger.LogOutput('Cache hit - {}'.format(br))
+ with self._schedv2._lock_on('_cached_br_list'):
+ self._schedv2._cached_br_list.append(br)
+ else:
+ self._logger.LogOutput('Cache not hit - {}'.format(br))
+ except:
+ traceback.print_exc(file=sys.stderr)
class Schedv2(object):
- """New scheduler for crosperf."""
+ """New scheduler for crosperf."""
- def __init__(self, experiment):
- self._experiment = experiment
- self._logger = logger.GetLogger(experiment.log_dir)
+ def __init__(self, experiment):
+ self._experiment = experiment
+ self._logger = logger.GetLogger(experiment.log_dir)
- # Create shortcuts to nested data structure. "_duts" points to a list of
- # locked machines. _labels points to a list of all labels.
- self._duts = self._experiment.machine_manager._all_machines
- self._labels = self._experiment.labels
+ # Create shortcuts to nested data structure. "_duts" points to a list of
+ # locked machines. _labels points to a list of all labels.
+ self._duts = self._experiment.machine_manager._all_machines
+ self._labels = self._experiment.labels
- # Bookkeeping for synchronization.
- self._workers_lock = Lock()
- self._lock_map = defaultdict(lambda: Lock())
+ # Bookkeeping for synchronization.
+ self._workers_lock = Lock()
+ self._lock_map = defaultdict(lambda: Lock())
- # Test mode flag
- self._in_test_mode = test_flag.GetTestMode()
+ # Test mode flag
+ self._in_test_mode = test_flag.GetTestMode()
- # Read benchmarkrun cache.
- self._read_br_cache()
+ # Read benchmarkrun cache.
+ self._read_br_cache()
- # Mapping from label to a list of benchmark_runs.
- self._label_brl_map = dict([(l, []) for l in self._labels])
- for br in self._experiment.benchmark_runs:
- assert br.label in self._label_brl_map
- # Only put no-cache-hit br into the map.
- if br not in self._cached_br_list:
- self._label_brl_map[br.label].append(br)
+ # Mapping from label to a list of benchmark_runs.
+ self._label_brl_map = dict([(l, []) for l in self._labels])
+ for br in self._experiment.benchmark_runs:
+ assert br.label in self._label_brl_map
+ # Only put no-cache-hit br into the map.
+ if br not in self._cached_br_list:
+ self._label_brl_map[br.label].append(br)
- # Use machine image manager to calculate initial label allocation.
- self._mim = MachineImageManager(self._labels, self._duts)
- self._mim.compute_initial_allocation()
+ # Use machine image manager to calculate initial label allocation.
+ self._mim = MachineImageManager(self._labels, self._duts)
+ self._mim.compute_initial_allocation()
- # Create worker thread, 1 per dut.
- self._active_workers = [DutWorker(dut, self) for dut in self._duts]
- self._finished_workers = []
+ # Create worker thread, 1 per dut.
+ self._active_workers = [DutWorker(dut, self) for dut in self._duts]
+ self._finished_workers = []
- # Termination flag.
- self._terminated = False
+ # Termination flag.
+ self._terminated = False
- def run_sched(self):
- """Start all dut worker threads and return immediately."""
+ def run_sched(self):
+ """Start all dut worker threads and return immediately."""
- [w.start() for w in self._active_workers]
+ [w.start() for w in self._active_workers]
- def _read_br_cache(self):
- """Use multi-threading to read cache for all benchmarkruns.
+ def _read_br_cache(self):
+ """Use multi-threading to read cache for all benchmarkruns.
We do this by firstly creating a few threads, and then assign each
thread a segment of all brs. Each thread will check cache status for
each br and put those with cache into '_cached_br_list'."""
- self._cached_br_list = []
- n_benchmarkruns = len(self._experiment.benchmark_runs)
- if n_benchmarkruns <= 4:
- # Use single thread to read cache.
- self._logger.LogOutput(('Starting to read cache status for '
- '{} benchmark runs ...').format(n_benchmarkruns))
- BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run()
- return
-
- # Split benchmarkruns set into segments. Each segment will be handled by
- # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
- n_threads = max(2, min(20, (n_benchmarkruns + 3) / 4))
- self._logger.LogOutput(('Starting {} threads to read cache status for '
- '{} benchmark runs ...').format(
- n_threads, n_benchmarkruns))
- benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) / n_threads
- benchmarkrun_segments = []
- for i in range(n_threads - 1):
- start = i * benchmarkruns_per_thread
- end = (i + 1) * benchmarkruns_per_thread
- benchmarkrun_segments.append(
- self._experiment.benchmark_runs[start : end])
- benchmarkrun_segments.append(self._experiment.benchmark_runs[
- (n_threads - 1) * benchmarkruns_per_thread:])
-
- # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
- assert (sum([len(x) for x in benchmarkrun_segments]) == n_benchmarkruns)
-
- # Create and start all readers.
- cache_readers = [
- BenchmarkRunCacheReader(self, x) for x in benchmarkrun_segments]
-
- for x in cache_readers:
- x.start()
-
- # Wait till all readers finish.
- for x in cache_readers:
- x.join()
-
- # Summarize.
- self._logger.LogOutput(
- 'Total {} cache hit out of {} benchmark_runs.'.format(
- len(self._cached_br_list), n_benchmarkruns))
-
- def get_cached_benchmark_run(self):
- """Get a benchmark_run with 'cache hit'.
+ self._cached_br_list = []
+ n_benchmarkruns = len(self._experiment.benchmark_runs)
+ if n_benchmarkruns <= 4:
+ # Use single thread to read cache.
+ self._logger.LogOutput(('Starting to read cache status for '
+ '{} benchmark runs ...').format(n_benchmarkruns))
+ BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run()
+ return
+
+ # Split benchmarkruns set into segments. Each segment will be handled by
+ # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
+ n_threads = max(2, min(20, (n_benchmarkruns + 3) / 4))
+ self._logger.LogOutput(('Starting {} threads to read cache status for '
+ '{} benchmark runs ...').format(n_threads,
+ n_benchmarkruns))
+ benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) / n_threads
+ benchmarkrun_segments = []
+ for i in range(n_threads - 1):
+ start = i * benchmarkruns_per_thread
+ end = (i + 1) * benchmarkruns_per_thread
+ benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end])
+ benchmarkrun_segments.append(self._experiment.benchmark_runs[
+ (n_threads - 1) * benchmarkruns_per_thread:])
+
+ # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
+ assert (sum([len(x) for x in benchmarkrun_segments]) == n_benchmarkruns)
+
+ # Create and start all readers.
+ cache_readers = [
+ BenchmarkRunCacheReader(self, x) for x in benchmarkrun_segments
+ ]
+
+ for x in cache_readers:
+ x.start()
+
+ # Wait till all readers finish.
+ for x in cache_readers:
+ x.join()
+
+ # Summarize.
+ self._logger.LogOutput(
+ 'Total {} cache hit out of {} benchmark_runs.'.format(
+ len(self._cached_br_list), n_benchmarkruns))
+
+ def get_cached_benchmark_run(self):
+ """Get a benchmark_run with 'cache hit'.
return:
The benchmark that has cache hit, if any. Otherwise none.
"""
- with self._lock_on('_cached_br_list'):
- if self._cached_br_list:
- return self._cached_br_list.pop()
- return None
+ with self._lock_on('_cached_br_list'):
+ if self._cached_br_list:
+ return self._cached_br_list.pop()
+ return None
- def get_benchmark_run(self, dut):
- """Get a benchmark_run (br) object for a certain dut.
+ def get_benchmark_run(self, dut):
+ """Get a benchmark_run (br) object for a certain dut.
Arguments:
dut: the dut for which a br is returned.
@@ -336,25 +333,25 @@ class Schedv2(object):
dut).
"""
- # If terminated, stop providing any br.
- if self._terminated:
- return None
+ # If terminated, stop providing any br.
+ if self._terminated:
+ return None
- # If dut bears an unrecognized label, return None.
- if dut.label is None:
- return None
+ # If dut bears an unrecognized label, return None.
+ if dut.label is None:
+ return None
- # If br list for the dut's label is empty (that means all brs for this
- # label have been done), return None.
- with self._lock_on(dut.label):
- brl = self._label_brl_map[dut.label]
- if not brl:
- return None
- # Return the first br.
- return brl.pop(0)
+ # If br list for the dut's label is empty (that means all brs for this
+ # label have been done), return None.
+ with self._lock_on(dut.label):
+ brl = self._label_brl_map[dut.label]
+ if not brl:
+ return None
+ # Return the first br.
+ return brl.pop(0)
- def allocate_label(self, dut):
- """Allocate a label to a dut.
+ def allocate_label(self, dut):
+ """Allocate a label to a dut.
The work is delegated to MachineImageManager.
@@ -368,48 +365,48 @@ class Schedv2(object):
The label or None.
"""
- if self._terminated:
- return None
+ if self._terminated:
+ return None
- return self._mim.allocate(dut, self)
+ return self._mim.allocate(dut, self)
- def dut_worker_finished(self, dut_worker):
- """Notify schedv2 that the dut_worker thread finished.
+ def dut_worker_finished(self, dut_worker):
+ """Notify schedv2 that the dut_worker thread finished.
Arguemnts:
dut_worker: the thread that is about to end."""
- self._logger.LogOutput("{} finished.".format(dut_worker))
- with self._workers_lock:
- self._active_workers.remove(dut_worker)
- self._finished_workers.append(dut_worker)
+ self._logger.LogOutput('{} finished.'.format(dut_worker))
+ with self._workers_lock:
+ self._active_workers.remove(dut_worker)
+ self._finished_workers.append(dut_worker)
- def is_complete(self):
- return len(self._active_workers) == 0
+ def is_complete(self):
+ return len(self._active_workers) == 0
- def _lock_on(self, object):
- return self._lock_map[object]
+ def _lock_on(self, object):
+ return self._lock_map[object]
- def terminate(self):
- """Mark flag so we stop providing br/reimages.
+ def terminate(self):
+ """Mark flag so we stop providing br/reimages.
Also terminate each DutWorker, so they refuse to execute br or reimage.
"""
- self._terminated = True
- for dut_worker in self._active_workers:
- dut_worker.terminate()
-
- def threads_status_as_string(self):
- """Report the dut worker threads status."""
-
- status = "{} active threads, {} finished threads.\n".format(
- len(self._active_workers), len(self._finished_workers))
- status += " Active threads:"
- for dw in self._active_workers:
- status += '\n ' + dw.status_str()
- if self._finished_workers:
- status += "\n Finished threads:"
- for dw in self._finished_workers:
- status += '\n ' + dw.status_str()
- return status
+ self._terminated = True
+ for dut_worker in self._active_workers:
+ dut_worker.terminate()
+
+ def threads_status_as_string(self):
+ """Report the dut worker threads status."""
+
+ status = '{} active threads, {} finished threads.\n'.format(
+ len(self._active_workers), len(self._finished_workers))
+ status += ' Active threads:'
+ for dw in self._active_workers:
+ status += '\n ' + dw.status_str()
+ if self._finished_workers:
+ status += '\n Finished threads:'
+ for dw in self._finished_workers:
+ status += '\n ' + dw.status_str()
+ return status
diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py
index 3276cd0f..29ffcb41 100755
--- a/crosperf/schedv2_unittest.py
+++ b/crosperf/schedv2_unittest.py
@@ -20,7 +20,6 @@ from cros_utils.command_executer import CommandExecuter
from experiment_runner_unittest import FakeLogger
from schedv2 import Schedv2
-
EXPERIMENT_FILE_1 = """\
board: daisy
remote: chromeos-daisy1.cros chromeos-daisy2.cros
@@ -41,7 +40,6 @@ image2 {
}
"""
-
EXPERIMENT_FILE_WITH_FORMAT = """\
board: daisy
remote: chromeos-daisy1.cros chromeos-daisy2.cros
@@ -65,153 +63,152 @@ image2 {{
class Schedv2Test(unittest.TestCase):
- mock_logger = FakeLogger()
- mock_cmd_exec = mock.Mock(spec=CommandExecuter)
+ mock_logger = FakeLogger()
+ mock_cmd_exec = mock.Mock(spec=CommandExecuter)
- @mock.patch('benchmark_run.BenchmarkRun',
- new=benchmark_run.MockBenchmarkRun)
- def _make_fake_experiment(self, expstr):
- """Create fake experiment from string.
+ @mock.patch('benchmark_run.BenchmarkRun', new=benchmark_run.MockBenchmarkRun)
+ def _make_fake_experiment(self, expstr):
+ """Create fake experiment from string.
Note - we mock out BenchmarkRun in this step.
"""
- experiment_file = ExperimentFile(StringIO.StringIO(expstr))
- experiment = ExperimentFactory().GetExperiment(
- experiment_file, working_directory="", log_dir="")
- return experiment
-
- def test_remote(self):
- """Test that remotes in labels are aggregated into experiment.remote."""
-
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
- self.exp.log_level = 'verbose'
- schedv2 = Schedv2(self.exp)
- self.assertIn('chromeos-daisy1.cros', self.exp.remote)
- self.assertIn('chromeos-daisy2.cros', self.exp.remote)
- self.assertIn('chromeos-daisy3.cros', self.exp.remote)
- self.assertIn('chromeos-daisy4.cros', self.exp.remote)
- self.assertIn('chromeos-daisy5.cros', self.exp.remote)
-
- def test_unreachable_remote(self):
- """Test unreachable remotes are removed from experiment remote and
+ experiment_file = ExperimentFile(StringIO.StringIO(expstr))
+ experiment = ExperimentFactory().GetExperiment(experiment_file,
+ working_directory='',
+ log_dir='')
+ return experiment
+
+ def test_remote(self):
+ """Test that remotes in labels are aggregated into experiment.remote."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
+ self.exp.log_level = 'verbose'
+ schedv2 = Schedv2(self.exp)
+ self.assertIn('chromeos-daisy1.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy2.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy3.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy4.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy5.cros', self.exp.remote)
+
+ def test_unreachable_remote(self):
+ """Test unreachable remotes are removed from experiment remote and
label.remote."""
- def MockIsReachable(cm):
- return (cm.name != 'chromeos-daisy3.cros' and
- cm.name != 'chromeos-daisy5.cros')
-
- with mock.patch('machine_manager.MockCrosMachine.IsReachable',
- new=MockIsReachable) as f:
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
- self.assertIn('chromeos-daisy1.cros', self.exp.remote)
- self.assertIn('chromeos-daisy2.cros', self.exp.remote)
- self.assertNotIn('chromeos-daisy3.cros', self.exp.remote)
- self.assertIn('chromeos-daisy4.cros', self.exp.remote)
- self.assertNotIn('chromeos-daisy5.cros', self.exp.remote)
-
- for l in self.exp.labels:
- if l.name == 'image2':
- self.assertNotIn('chromeos-daisy5.cros', l.remote)
- self.assertIn('chromeos-daisy4.cros', l.remote)
- elif l.name == 'image1':
- self.assertNotIn('chromeos-daisy3.cros', l.remote)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_1(self, reader):
- """Test benchmarkrun set is split into 5 segments."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=9))
- schedv2 = Schedv2(self.exp)
- # We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4,
- # 4, 2 brs respectively.
- # Assert that BenchmarkRunCacheReader() is called 5 times.
- self.assertEquals(reader.call_count, 5)
- # reader.call_args_list[n] - nth call.
- # reader.call_args_list[n][0] - positioned args in nth call.
- # reader.call_args_list[n][0][1] - the 2nd arg in nth call,
- # that is 'br_list' in 'schedv2.BenchmarkRunCacheReader'.
- self.assertEquals(len(reader.call_args_list[0][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[1][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[2][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[3][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[4][0][1]), 2)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_2(self, reader):
- """Test benchmarkrun set is split into 4 segments."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=8))
- schedv2 = Schedv2(self.exp)
- # We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs.
- self.assertEquals(reader.call_count, 4)
- self.assertEquals(len(reader.call_args_list[0][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[1][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[2][0][1]), 4)
- self.assertEquals(len(reader.call_args_list[3][0][1]), 4)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_3(self, reader):
- """Test benchmarkrun set is split into 2 segments."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=3))
- schedv2 = Schedv2(self.exp)
- # We have 3 * 2 == 6 brs, we use 2 threads.
- self.assertEquals(reader.call_count, 2)
- self.assertEquals(len(reader.call_args_list[0][0][1]), 3)
- self.assertEquals(len(reader.call_args_list[1][0][1]), 3)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_4(self, reader):
- """Test benchmarkrun set is not splitted."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=1))
- schedv2 = Schedv2(self.exp)
- # We have 1 * 2 == 2 br, so only 1 instance.
- self.assertEquals(reader.call_count, 1)
- self.assertEquals(len(reader.call_args_list[0][0][1]), 2)
-
- def test_cachehit(self):
- """Test cache-hit and none-cache-hit brs are properly organized."""
-
- def MockReadCache(br):
- br.cache_hit = (br.label.name == 'image2')
-
- with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
- new=MockReadCache) as f:
- # We have 2 * 30 brs, half of which are put into _cached_br_list.
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
- schedv2 = Schedv2(self.exp)
- self.assertEquals(len(schedv2._cached_br_list), 30)
- # The non-cache-hit brs are put into Schedv2._label_brl_map.
- self.assertEquals(reduce(lambda a, x: a + len(x[1]),
- schedv2._label_brl_map.iteritems(), 0),
- 30)
-
- def test_nocachehit(self):
- """Test no cache-hit."""
-
- def MockReadCache(br):
- br.cache_hit = False
-
- with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
- new=MockReadCache) as f:
- # We have 2 * 30 brs, none of which are put into _cached_br_list.
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
- schedv2 = Schedv2(self.exp)
- self.assertEquals(len(schedv2._cached_br_list), 0)
- # The non-cache-hit brs are put into Schedv2._label_brl_map.
- self.assertEquals(reduce(lambda a, x: a + len(x[1]),
- schedv2._label_brl_map.iteritems(), 0),
- 60)
+ def MockIsReachable(cm):
+ return (cm.name != 'chromeos-daisy3.cros' and
+ cm.name != 'chromeos-daisy5.cros')
+
+ with mock.patch('machine_manager.MockCrosMachine.IsReachable',
+ new=MockIsReachable) as f:
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
+ self.assertIn('chromeos-daisy1.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy2.cros', self.exp.remote)
+ self.assertNotIn('chromeos-daisy3.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy4.cros', self.exp.remote)
+ self.assertNotIn('chromeos-daisy5.cros', self.exp.remote)
+
+ for l in self.exp.labels:
+ if l.name == 'image2':
+ self.assertNotIn('chromeos-daisy5.cros', l.remote)
+ self.assertIn('chromeos-daisy4.cros', l.remote)
+ elif l.name == 'image1':
+ self.assertNotIn('chromeos-daisy3.cros', l.remote)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_1(self, reader):
+ """Test benchmarkrun set is split into 5 segments."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=9))
+ schedv2 = Schedv2(self.exp)
+ # We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4,
+ # 4, 2 brs respectively.
+ # Assert that BenchmarkRunCacheReader() is called 5 times.
+ self.assertEquals(reader.call_count, 5)
+ # reader.call_args_list[n] - nth call.
+ # reader.call_args_list[n][0] - positioned args in nth call.
+ # reader.call_args_list[n][0][1] - the 2nd arg in nth call,
+ # that is 'br_list' in 'schedv2.BenchmarkRunCacheReader'.
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[1][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[2][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[3][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[4][0][1]), 2)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_2(self, reader):
+ """Test benchmarkrun set is split into 4 segments."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=8))
+ schedv2 = Schedv2(self.exp)
+ # We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs.
+ self.assertEquals(reader.call_count, 4)
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[1][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[2][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[3][0][1]), 4)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_3(self, reader):
+ """Test benchmarkrun set is split into 2 segments."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=3))
+ schedv2 = Schedv2(self.exp)
+ # We have 3 * 2 == 6 brs, we use 2 threads.
+ self.assertEquals(reader.call_count, 2)
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 3)
+ self.assertEquals(len(reader.call_args_list[1][0][1]), 3)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_4(self, reader):
+ """Test benchmarkrun set is not splitted."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=1))
+ schedv2 = Schedv2(self.exp)
+ # We have 1 * 2 == 2 br, so only 1 instance.
+ self.assertEquals(reader.call_count, 1)
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 2)
+
+ def test_cachehit(self):
+ """Test cache-hit and none-cache-hit brs are properly organized."""
+
+ def MockReadCache(br):
+ br.cache_hit = (br.label.name == 'image2')
+
+ with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
+ new=MockReadCache) as f:
+ # We have 2 * 30 brs, half of which are put into _cached_br_list.
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=30))
+ schedv2 = Schedv2(self.exp)
+ self.assertEquals(len(schedv2._cached_br_list), 30)
+ # The non-cache-hit brs are put into Schedv2._label_brl_map.
+ self.assertEquals(
+ reduce(lambda a, x: a + len(x[1]), schedv2._label_brl_map.iteritems(),
+ 0), 30)
+
+ def test_nocachehit(self):
+ """Test no cache-hit."""
+
+ def MockReadCache(br):
+ br.cache_hit = False
+
+ with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
+ new=MockReadCache) as f:
+ # We have 2 * 30 brs, none of which are put into _cached_br_list.
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=30))
+ schedv2 = Schedv2(self.exp)
+ self.assertEquals(len(schedv2._cached_br_list), 0)
+ # The non-cache-hit brs are put into Schedv2._label_brl_map.
+ self.assertEquals(
+ reduce(lambda a, x: a + len(x[1]), schedv2._label_brl_map.iteritems(),
+ 0), 60)
if __name__ == '__main__':
- test_flag.SetTestMode(True)
- unittest.main()
-
+ test_flag.SetTestMode(True)
+ unittest.main()
diff --git a/crosperf/settings.py b/crosperf/settings.py
index 24613cf8..fe312c0f 100644
--- a/crosperf/settings.py
+++ b/crosperf/settings.py
@@ -1,5 +1,4 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""Module to get the settings from experiment file."""
from __future__ import print_function
@@ -24,13 +23,13 @@ class Settings(object):
def AddField(self, field):
name = field.name
if name in self.fields:
- raise Exception("Field %s defined previously." % name)
+ raise Exception('Field %s defined previously.' % name)
self.fields[name] = field
def SetField(self, name, value, append=False):
if name not in self.fields:
- raise Exception("'%s' is not a valid field in '%s' settings"
- % (name, self.settings_type))
+ raise Exception("'%s' is not a valid field in '%s' settings" %
+ (name, self.settings_type))
if append:
self.fields[name].Append(value)
else:
@@ -64,17 +63,17 @@ class Settings(object):
"""Check that all required fields have been set."""
for name in self.fields:
if not self.fields[name].assigned and self.fields[name].required:
- raise Exception("Field %s is invalid." % name)
+ raise Exception('Field %s is invalid.' % name)
def GetXbuddyPath(self, path_str, board, chromeos_root, log_level):
- prefix = "remote"
+ prefix = 'remote'
l = logger.GetLogger()
- if path_str.find("trybot") < 0 and path_str.find(board) < 0:
- xbuddy_path = "%s/%s/%s" % (prefix, board, path_str)
+ if path_str.find('trybot') < 0 and path_str.find(board) < 0:
+ xbuddy_path = '%s/%s/%s' % (prefix, board, path_str)
else:
- xbuddy_path = "%s/%s" % (prefix, path_str)
+ xbuddy_path = '%s/%s' % (prefix, path_str)
image_downloader = ImageDownloader(l, log_level)
retval, image_path = image_downloader.Run(chromeos_root, xbuddy_path)
if retval != 0:
- raise Exception("Unable to find/download xbuddy image.")
+ raise Exception('Unable to find/download xbuddy image.')
return image_path
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 885f7767..65cca80e 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -1,9 +1,7 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Setting files for global, benchmark and labels."""
from field import BooleanField
@@ -15,158 +13,195 @@ from settings import Settings
class BenchmarkSettings(Settings):
+
def __init__(self, name):
- super(BenchmarkSettings, self).__init__(name, "benchmark")
- self.AddField(TextField("test_name",
- description="The name of the test to run."
- "Defaults to the name of the benchmark."))
- self.AddField(TextField("test_args",
- description="Arguments to be passed to the "
- "test."))
- self.AddField(IntegerField("iterations", default=1,
- description="Number of iterations to run the "
- "test."))
- self.AddField(TextField("suite", default="",
- description="The type of the benchmark"))
- self.AddField(IntegerField("retries", default=0,
- description="Number of times to retry a "
- "benchmark run."))
- self.AddField(BooleanField("run_local",
- description="Run benchmark harness on the DUT. "
- "Currently only compatible with the suite: "
- "telemetry_Crosperf.",
- required=False, default=True))
+ super(BenchmarkSettings, self).__init__(name, 'benchmark')
+ self.AddField(TextField('test_name',
+ description='The name of the test to run.'
+ 'Defaults to the name of the benchmark.'))
+ self.AddField(TextField('test_args',
+ description='Arguments to be passed to the '
+ 'test.'))
+ self.AddField(IntegerField('iterations',
+ default=1,
+ description='Number of iterations to run the '
+ 'test.'))
+ self.AddField(TextField('suite',
+ default='',
+ description='The type of the benchmark'))
+ self.AddField(IntegerField('retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
+ self.AddField(BooleanField('run_local',
+ description='Run benchmark harness on the DUT. '
+ 'Currently only compatible with the suite: '
+ 'telemetry_Crosperf.',
+ required=False,
+ default=True))
class LabelSettings(Settings):
+
def __init__(self, name):
- super(LabelSettings, self).__init__(name, "label")
- self.AddField(TextField("chromeos_image", required=False,
- description="The path to the image to run tests "
+ super(LabelSettings, self).__init__(name, 'label')
+ self.AddField(TextField('chromeos_image',
+ required=False,
+ description='The path to the image to run tests '
"on, for local/custom-built images. See 'build' "
- "option for official or trybot images."))
- self.AddField(TextField("chromeos_root",
- description="The path to a chromeos checkout which "
- "contains a src/scripts directory. Defaults to "
- "the chromeos checkout which contains the "
- "chromeos_image."))
- self.AddField(ListField("remote", description=
- "A comma-separated list of ip's of chromeos"
- "devices to run experiments on."))
- self.AddField(TextField("image_args", required=False,
- default="",
- description="Extra arguments to pass to "
- "image_chromeos.py."))
- self.AddField(TextField("cache_dir", default="",
- description="The cache dir for this image."))
- self.AddField(TextField("compiler", default="gcc",
- description="The compiler used to build the "
- "ChromeOS image (gcc or llvm)."))
- self.AddField(TextField("chrome_src",
- description="The path to the source of chrome. "
- "This is used to run telemetry benchmarks. "
- "The default one is the src inside chroot.",
- required=False, default=""))
- self.AddField(TextField("build",
- description="The xbuddy specification for an "
- "official or trybot image to use for tests. "
+ 'option for official or trybot images.'))
+ self.AddField(TextField('chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ ListField('remote',
+ description="A comma-separated list of ip's of chromeos"
+ 'devices to run experiments on.'))
+ self.AddField(TextField('image_args',
+ required=False,
+ default='',
+ description='Extra arguments to pass to '
+ 'image_chromeos.py.'))
+ self.AddField(TextField('cache_dir',
+ default='',
+ description='The cache dir for this image.'))
+ self.AddField(TextField('compiler',
+ default='gcc',
+ description='The compiler used to build the '
+ 'ChromeOS image (gcc or llvm).'))
+ self.AddField(TextField('chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(TextField('build',
+ description='The xbuddy specification for an '
+ 'official or trybot image to use for tests. '
"'/remote' is assumed, and the board is given "
"elsewhere, so omit the '/remote/<board>/' xbuddy"
- "prefix.",
- required=False, default=""))
+ 'prefix.',
+ required=False,
+ default=''))
class GlobalSettings(Settings):
+
def __init__(self, name):
- super(GlobalSettings, self).__init__(name, "global")
- self.AddField(TextField("name",
- description="The name of the experiment. Just an "
- "identifier."))
- self.AddField(TextField("board", description="The target "
- "board for running experiments on, e.g. x86-alex."))
- self.AddField(ListField("remote",
+ super(GlobalSettings, self).__init__(name, 'global')
+ self.AddField(TextField('name',
+ description='The name of the experiment. Just an '
+ 'identifier.'))
+ self.AddField(TextField('board',
+ description='The target '
+ 'board for running experiments on, e.g. x86-alex.'))
+ self.AddField(ListField('remote',
description="A comma-separated list of ip's of "
- "chromeos devices to run experiments on."))
- self.AddField(BooleanField("rerun_if_failed", description="Whether to "
- "re-run failed test runs or not.",
+ 'chromeos devices to run experiments on.'))
+ self.AddField(BooleanField('rerun_if_failed',
+ description='Whether to '
+ 're-run failed test runs or not.',
default=False))
- self.AddField(BooleanField("rm_chroot_tmp", default=False,
- description="Whether to remove the test_that"
- "result in the chroot"))
- self.AddField(ListField("email", description="Space-seperated"
- "list of email addresses to send email to."))
- self.AddField(BooleanField("rerun", description="Whether to ignore the "
- "cache and for tests to be re-run.",
+ self.AddField(BooleanField('rm_chroot_tmp',
+ default=False,
+ description='Whether to remove the test_that'
+ 'result in the chroot'))
+ self.AddField(ListField('email',
+ description='Space-seperated'
+ 'list of email addresses to send email to.'))
+ self.AddField(BooleanField('rerun',
+ description='Whether to ignore the '
+ 'cache and for tests to be re-run.',
default=False))
- self.AddField(BooleanField("same_specs", default=True,
- description="Ensure cached runs are run on the "
- "same kind of devices which are specified as a "
- "remote."))
- self.AddField(BooleanField("same_machine", default=False,
- description="Ensure cached runs are run on the "
- "exact the same remote"))
- self.AddField(BooleanField("use_file_locks", default=False,
- description="Whether to use the file locks "
- "mechanism (deprecated) instead of the AFE "
- "server lock mechanism."))
- self.AddField(IntegerField("iterations", default=1,
- description="Number of iterations to run all "
- "tests."))
- self.AddField(TextField("chromeos_root",
- description="The path to a chromeos checkout which "
- "contains a src/scripts directory. Defaults to "
- "the chromeos checkout which contains the "
- "chromeos_image."))
- self.AddField(TextField("logging_level", default="average",
- description="The level of logging desired. "
+ self.AddField(BooleanField('same_specs',
+ default=True,
+ description='Ensure cached runs are run on the '
+ 'same kind of devices which are specified as a '
+ 'remote.'))
+ self.AddField(BooleanField('same_machine',
+ default=False,
+ description='Ensure cached runs are run on the '
+ 'exact the same remote'))
+ self.AddField(BooleanField('use_file_locks',
+ default=False,
+ description='Whether to use the file locks '
+ 'mechanism (deprecated) instead of the AFE '
+ 'server lock mechanism.'))
+ self.AddField(IntegerField('iterations',
+ default=1,
+ description='Number of iterations to run all '
+ 'tests.'))
+ self.AddField(TextField('chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(TextField('logging_level',
+ default='average',
+ description='The level of logging desired. '
"Options are 'quiet', 'average', and 'verbose'."))
- self.AddField(IntegerField("acquire_timeout", default=0,
- description="Number of seconds to wait for "
- "machine before exit if all the machines in "
- "the experiment file are busy. Default is 0"))
- self.AddField(TextField("perf_args", default="",
- description="The optional profile command. It "
- "enables perf commands to record perforamance "
- "related counters. It must start with perf "
- "command record or stat followed by arguments."))
- self.AddField(TextField("cache_dir", default="",
- description="The abs path of cache dir. "
- "Default is /home/$(whoami)/cros_scratch."))
- self.AddField(BooleanField("cache_only", default=False,
- description="Whether to use only cached "
- "results (do not rerun failed tests)."))
- self.AddField(BooleanField("no_email", default=False,
- description="Whether to disable the email to "
- "user after crosperf finishes."))
- self.AddField(BooleanField("json_report", default=False,
- description="Whether to generate a json version"
- " of the report, for archiving."))
- self.AddField(BooleanField("show_all_results", default=False,
- description="When running Telemetry tests, "
- "whether to all the results, instead of just "
- "the default (summary) results."))
- self.AddField(TextField("share_cache", default="",
- description="Path to alternate cache whose data "
- "you want to use. It accepts multiples directories"
+ self.AddField(IntegerField('acquire_timeout',
+ default=0,
+ description='Number of seconds to wait for '
+ 'machine before exit if all the machines in '
+ 'the experiment file are busy. Default is 0'))
+ self.AddField(TextField('perf_args',
+ default='',
+ description='The optional profile command. It '
+ 'enables perf commands to record perforamance '
+ 'related counters. It must start with perf '
+ 'command record or stat followed by arguments.'))
+ self.AddField(TextField('cache_dir',
+ default='',
+ description='The abs path of cache dir. '
+ 'Default is /home/$(whoami)/cros_scratch.'))
+ self.AddField(BooleanField('cache_only',
+ default=False,
+ description='Whether to use only cached '
+ 'results (do not rerun failed tests).'))
+ self.AddField(BooleanField('no_email',
+ default=False,
+ description='Whether to disable the email to '
+ 'user after crosperf finishes.'))
+ self.AddField(BooleanField('json_report',
+ default=False,
+ description='Whether to generate a json version'
+ ' of the report, for archiving.'))
+ self.AddField(BooleanField('show_all_results',
+ default=False,
+ description='When running Telemetry tests, '
+ 'whether to all the results, instead of just '
+ 'the default (summary) results.'))
+ self.AddField(TextField('share_cache',
+ default='',
+ description='Path to alternate cache whose data '
+ 'you want to use. It accepts multiples directories'
" separated by a \",\""))
- self.AddField(TextField("results_dir", default="",
- description="The results dir"))
- self.AddField(TextField("locks_dir", default="",
- description="An alternate directory to use for "
- "storing/checking machine locks. Using this field "
- "automatically sets use_file_locks to True.\n"
- "WARNING: If you use your own locks directory, "
- "there is no guarantee that someone else might not "
- "hold a lock on the same machine in a different "
- "locks directory."))
- self.AddField(TextField("chrome_src",
- description="The path to the source of chrome. "
- "This is used to run telemetry benchmarks. "
- "The default one is the src inside chroot.",
- required=False, default=""))
- self.AddField(IntegerField("retries", default=0,
- description="Number of times to retry a "
- "benchmark run."))
+ self.AddField(TextField('results_dir',
+ default='',
+ description='The results dir'))
+ self.AddField(TextField('locks_dir',
+ default='',
+ description='An alternate directory to use for '
+ 'storing/checking machine locks. Using this field '
+ 'automatically sets use_file_locks to True.\n'
+ 'WARNING: If you use your own locks directory, '
+ 'there is no guarantee that someone else might not '
+ 'hold a lock on the same machine in a different '
+ 'locks directory.'))
+ self.AddField(TextField('chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(IntegerField('retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
+
class SettingsFactory(object):
"""Factory class for building different types of Settings objects.
@@ -177,11 +212,11 @@ class SettingsFactory(object):
"""
def GetSettings(self, name, settings_type):
- if settings_type == "label" or not settings_type:
+ if settings_type == 'label' or not settings_type:
return LabelSettings(name)
- if settings_type == "global":
+ if settings_type == 'global':
return GlobalSettings(name)
- if settings_type == "benchmark":
+ if settings_type == 'benchmark':
return BenchmarkSettings(name)
raise Exception("Invalid settings type: '%s'." % settings_type)
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index 4d3ee342..5538e8cc 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
-
"""Unittest for crosperf."""
import os
@@ -14,10 +13,11 @@ import settings
from cros_utils import command_executer
from cros_utils import logger
+
class BenchmarkSettingsTest(unittest.TestCase):
def test_init(self):
- res = settings_factory.BenchmarkSettings("b_settings")
+ res = settings_factory.BenchmarkSettings('b_settings')
self.assertIsNotNone(res)
self.assertEqual(len(res.fields), 6)
self.assertEqual(res.GetField('test_name'), '')
@@ -25,10 +25,11 @@ class BenchmarkSettingsTest(unittest.TestCase):
self.assertEqual(res.GetField('iterations'), 1)
self.assertEqual(res.GetField('suite'), '')
+
class LabelSettingsTest(unittest.TestCase):
def test_init(self):
- res = settings_factory.LabelSettings("l_settings")
+ res = settings_factory.LabelSettings('l_settings')
self.assertIsNotNone(res)
self.assertEqual(len(res.fields), 8)
self.assertEqual(res.GetField('chromeos_image'), '')
@@ -43,7 +44,7 @@ class LabelSettingsTest(unittest.TestCase):
class GlobalSettingsTest(unittest.TestCase):
def test_init(self):
- res = settings_factory.GlobalSettings("g_settings")
+ res = settings_factory.GlobalSettings('g_settings')
self.assertIsNotNone(res)
self.assertEqual(len(res.fields), 25)
self.assertEqual(res.GetField('name'), '')
@@ -72,24 +73,24 @@ class GlobalSettingsTest(unittest.TestCase):
class SettingsFactoryTest(unittest.TestCase):
def test_get_settings(self):
- self.assertRaises (Exception, settings_factory.SettingsFactory.GetSettings,
- 'global', 'bad_type')
-
+ self.assertRaises(Exception, settings_factory.SettingsFactory.GetSettings,
+ 'global', 'bad_type')
- l_settings = settings_factory.SettingsFactory().GetSettings ('label', 'label')
+ l_settings = settings_factory.SettingsFactory().GetSettings('label',
+ 'label')
self.assertIsInstance(l_settings, settings_factory.LabelSettings)
self.assertEqual(len(l_settings.fields), 8)
- b_settings = settings_factory.SettingsFactory().GetSettings ('benchmark',
- 'benchmark')
+ b_settings = settings_factory.SettingsFactory().GetSettings('benchmark',
+ 'benchmark')
self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings)
self.assertEqual(len(b_settings.fields), 6)
- g_settings = settings_factory.SettingsFactory().GetSettings ('global',
- 'global')
+ g_settings = settings_factory.SettingsFactory().GetSettings('global',
+ 'global')
self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
self.assertEqual(len(g_settings.fields), 25)
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py
index 2ce5f582..e5ccfd46 100755
--- a/crosperf/settings_unittest.py
+++ b/crosperf/settings_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/python2
#
# Copyright 2014 Google Inc. All Rights Reserved.
-
"""unittest for settings."""
from __future__ import print_function
@@ -18,6 +17,7 @@ import download_images
from cros_utils import logger
+
class TestSettings(unittest.TestCase):
"""setting test class."""
@@ -29,38 +29,42 @@ class TestSettings(unittest.TestCase):
self.assertEqual(self.settings.settings_type, 'global')
self.assertIsNone(self.settings.parent)
-
def test_set_parent_settings(self):
self.assertIsNone(self.settings.parent)
- settings_parent = {'fake_parent_entry' : 0}
+ settings_parent = {'fake_parent_entry': 0}
self.settings.SetParentSettings(settings_parent)
self.assertIsNotNone(self.settings.parent)
self.assertEqual(type(self.settings.parent), dict)
self.assertEqual(self.settings.parent, settings_parent)
-
def test_add_field(self):
self.assertEqual(self.settings.fields, {})
- self.settings.AddField(IntegerField("iterations", default=1, required=False,
- description="Number of iterations to "
- "run the test."))
+ self.settings.AddField(IntegerField('iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to '
+ 'run the test.'))
self.assertEqual(len(self.settings.fields), 1)
# Adding the same field twice raises an exception.
- self.assertRaises(Exception, self.settings.AddField,
- (IntegerField("iterations", default=1, required=False,
- description="Number of iterations to run "
- "the test.")))
+ self.assertRaises(Exception,
+ self.settings.AddField,
+ (IntegerField('iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to run '
+ 'the test.')))
res = self.settings.fields['iterations']
self.assertIsInstance(res, IntegerField)
self.assertEqual(res.Get(), 1)
-
def test_set_field(self):
self.assertEqual(self.settings.fields, {})
- self.settings.AddField(IntegerField(
- "iterations", default=1, required=False,
- description="Number of iterations to run the "
- "test."))
+ self.settings.AddField(
+ IntegerField('iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to run the '
+ 'test.'))
res = self.settings.fields['iterations']
self.assertEqual(res.Get(), 1)
@@ -69,13 +73,15 @@ class TestSettings(unittest.TestCase):
self.assertEqual(res.Get(), 10)
# Setting a field that's not there raises an exception.
- self.assertRaises(Exception, self.settings.SetField,
- 'remote', 'lumpy1.cros')
-
- self.settings.AddField(ListField("remote", default=[], description=
- "A comma-separated list of ip's of "
- "chromeos devices to run "
- "experiments on."))
+ self.assertRaises(Exception, self.settings.SetField, 'remote',
+ 'lumpy1.cros')
+
+ self.settings.AddField(
+ ListField('remote',
+ default=[],
+ description="A comma-separated list of ip's of "
+ 'chromeos devices to run '
+ 'experiments on.'))
self.assertEqual(type(self.settings.fields), dict)
self.assertEqual(len(self.settings.fields), 2)
res = self.settings.fields['remote']
@@ -85,15 +91,15 @@ class TestSettings(unittest.TestCase):
res = self.settings.fields['remote']
self.assertEqual(res.Get(), ['lumpy1.cros', 'lumpy2.cros'])
-
def test_get_field(self):
# Getting a field that's not there raises an exception.
self.assertRaises(Exception, self.settings.GetField, 'iterations')
# Getting a required field that hasn't been assigned raises an exception.
- self.settings.AddField(IntegerField("iterations", required=True,
- description="Number of iterations to "
- "run the test."))
+ self.settings.AddField(IntegerField('iterations',
+ required=True,
+ description='Number of iterations to '
+ 'run the test.'))
self.assertIsNotNone(self.settings.fields['iterations'])
self.assertRaises(Exception, self.settings.GetField, 'iterations')
@@ -102,7 +108,6 @@ class TestSettings(unittest.TestCase):
res = self.settings.GetField('iterations')
self.assertEqual(res, 5)
-
def test_inherit(self):
parent_settings = settings_factory.SettingsFactory().GetSettings('global',
'global')
@@ -119,13 +124,12 @@ class TestSettings(unittest.TestCase):
label_settings.Inherit()
self.assertEqual(label_settings.GetField('chromeos_root'), '/tmp/chromeos')
-
def test_override(self):
- self.settings.AddField(ListField("email", default=[],
- description="Space-seperated"
- "list of email addresses to send "
- "email to."))
-
+ self.settings.AddField(ListField('email',
+ default=[],
+ description='Space-seperated'
+ 'list of email addresses to send '
+ 'email to.'))
global_settings = settings_factory.SettingsFactory().GetSettings('global',
'global')
@@ -140,20 +144,23 @@ class TestSettings(unittest.TestCase):
res = self.settings.GetField('email')
self.assertEqual(res, ['john.doe@google.com', 'jane.smith@google.com'])
-
def test_validate(self):
- self.settings.AddField(IntegerField("iterations", required=True,
- description="Number of iterations "
- "to run the test."))
- self.settings.AddField(ListField("remote", default=[], required=True,
- description="A comma-separated list "
+ self.settings.AddField(IntegerField('iterations',
+ required=True,
+ description='Number of iterations '
+ 'to run the test.'))
+ self.settings.AddField(ListField('remote',
+ default=[],
+ required=True,
+ description='A comma-separated list '
"of ip's of chromeos "
- "devices to run experiments on."))
- self.settings.AddField(ListField("email", default=[],
- description="Space-seperated"
- "list of email addresses to "
- "send email to."))
+ 'devices to run experiments on.'))
+ self.settings.AddField(ListField('email',
+ default=[],
+ description='Space-seperated'
+ 'list of email addresses to '
+ 'send email to.'))
# 'required' fields have not been assigned; should raise an exception.
self.assertRaises(Exception, self.settings.Validate)
@@ -167,7 +174,6 @@ class TestSettings(unittest.TestCase):
@mock.patch.object(download_images, 'ImageDownloader')
def test_get_xbuddy_path(self, mock_downloader, mock_run, mock_logger):
-
mock_run.return_value = [0, 'fake_xbuddy_translation']
mock_downloader.Run = mock_run
board = 'lumpy'
@@ -182,23 +188,21 @@ class TestSettings(unittest.TestCase):
self.assertEqual(mock_run.call_count, 1)
self.assertEqual(mock_run.call_args_list[0][0],
('/tmp/chromeos',
- 'remote/trybot-lumpy-paladin/R34-5417.0.0-b1506',))
-
+ 'remote/trybot-lumpy-paladin/R34-5417.0.0-b1506'))
mock_run.reset_mock()
self.settings.GetXbuddyPath(official_str, board, chromeos_root, log_level)
self.assertEqual(mock_run.call_count, 1)
self.assertEqual(mock_run.call_args_list[0][0],
('/tmp/chromeos',
- 'remote/lumpy-release/R34-5417.0.0',))
-
+ 'remote/lumpy-release/R34-5417.0.0'))
mock_run.reset_mock()
self.settings.GetXbuddyPath(xbuddy_str, board, chromeos_root, log_level)
self.assertEqual(mock_run.call_count, 1)
self.assertEqual(mock_run.call_args_list[0][0],
('/tmp/chromeos',
- 'remote/lumpy/latest-dev',))
+ 'remote/lumpy/latest-dev'))
mock_run.return_value = [1, 'fake_xbuddy_translation']
self.assertRaises(Exception, self.settings.GetXbuddyPath, xbuddy_str, board,
@@ -206,5 +210,6 @@ class TestSettings(unittest.TestCase):
if mock_logger:
return
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 4c94de20..48ef97a5 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -14,7 +13,8 @@ import test_flag
TEST_THAT_PATH = '/usr/bin/test_that'
CHROME_MOUNT_DIR = '/tmp/chrome_root'
-def GetProfilerArgs (profiler_args):
+
+def GetProfilerArgs(profiler_args):
# Remove "--" from in front of profiler args.
args_list = shlex.split(profiler_args)
new_list = []
@@ -27,68 +27,74 @@ def GetProfilerArgs (profiler_args):
# Remove "perf_options=" from middle of profiler args.
new_list = []
for arg in args_list:
- idx = arg.find("perf_options=")
+ idx = arg.find('perf_options=')
if idx != -1:
prefix = arg[0:idx]
- suffix = arg[idx + len("perf_options=") + 1 : -1]
+ suffix = arg[idx + len('perf_options=') + 1:-1]
new_arg = prefix + "'" + suffix + "'"
new_list.append(new_arg)
else:
new_list.append(arg)
args_list = new_list
- return " ".join(args_list)
+ return ' '.join(args_list)
class SuiteRunner(object):
""" This defines the interface from crosperf to test script.
"""
- def __init__(self, logger_to_use=None, log_level="verbose", cmd_exec=None,
+ def __init__(self,
+ logger_to_use=None,
+ log_level='verbose',
+ cmd_exec=None,
cmd_term=None):
self._logger = logger_to_use
self.log_level = log_level
- self._ce = cmd_exec or command_executer.GetCommandExecuter(self._logger,
- log_level=self.log_level)
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger,
+ log_level=self.log_level)
self._ct = cmd_term or command_executer.CommandTerminator()
def Run(self, machine, label, benchmark, test_args, profiler_args):
for i in range(0, benchmark.retries + 1):
self.PinGovernorExecutionFrequencies(machine, label.chromeos_root)
- if benchmark.suite == "telemetry":
+ if benchmark.suite == 'telemetry':
ret_tup = self.Telemetry_Run(machine, label, benchmark, profiler_args)
- elif benchmark.suite == "telemetry_Crosperf":
+ elif benchmark.suite == 'telemetry_Crosperf':
ret_tup = self.Telemetry_Crosperf_Run(machine, label, benchmark,
test_args, profiler_args)
else:
ret_tup = self.Test_That_Run(machine, label, benchmark, test_args,
profiler_args)
if ret_tup[0] != 0:
- self._logger.LogOutput("benchmark %s failed. Retries left: %s"
- % (benchmark.name, benchmark.retries - i))
+ self._logger.LogOutput('benchmark %s failed. Retries left: %s' %
+ (benchmark.name, benchmark.retries - i))
elif i > 0:
- self._logger.LogOutput("benchmark %s succeded after %s retries"
- % (benchmark.name, i))
+ self._logger.LogOutput('benchmark %s succeded after %s retries' %
+ (benchmark.name, i))
break
else:
- self._logger.LogOutput("benchmark %s succeded on first try"
- % benchmark.name)
+ self._logger.LogOutput('benchmark %s succeded on first try' %
+ benchmark.name)
break
return ret_tup
def GetHighestStaticFrequency(self, machine_name, chromeos_root):
""" Gets the highest static frequency for the specified machine
"""
- get_avail_freqs = ("cd /sys/devices/system/cpu/cpu0/cpufreq/; "
- "if [[ -e scaling_available_frequencies ]]; then "
- " cat scaling_available_frequencies; "
- "else "
- " cat scaling_max_freq ; "
- "fi")
+ get_avail_freqs = ('cd /sys/devices/system/cpu/cpu0/cpufreq/; '
+ 'if [[ -e scaling_available_frequencies ]]; then '
+ ' cat scaling_available_frequencies; '
+ 'else '
+ ' cat scaling_max_freq ; '
+ 'fi')
ret, freqs_str, _ = self._ce.CrosRunCommandWOutput(
- get_avail_freqs, machine=machine_name, chromeos_root=chromeos_root)
- self._logger.LogFatalIf(ret, "Could not get available frequencies "
- "from machine: %s" % machine_name)
+ get_avail_freqs,
+ machine=machine_name,
+ chromeos_root=chromeos_root)
+ self._logger.LogFatalIf(ret, 'Could not get available frequencies '
+ 'from machine: %s' % machine_name)
freqs = freqs_str.split()
# We need to make sure that the frequencies are sorted in decreasing
# order
@@ -99,7 +105,7 @@ class SuiteRunner(object):
if len(freqs) == 1:
return freqs[0]
# The dynamic frequency ends with a "1000". So, ignore it if found.
- if freqs[0].endswith("1000"):
+ if freqs[0].endswith('1000'):
return freqs[1]
else:
return freqs[0]
@@ -108,95 +114,93 @@ class SuiteRunner(object):
""" Set min and max frequencies to max static frequency
"""
highest_freq = self.GetHighestStaticFrequency(machine_name, chromeos_root)
- BASH_FOR = "for f in {list}; do {body}; done"
- CPUFREQ_DIRS = "/sys/devices/system/cpu/cpu*/cpufreq/"
- change_max_freq = BASH_FOR.format(list=CPUFREQ_DIRS + "scaling_max_freq",
- body="echo %s > $f" % highest_freq)
- change_min_freq = BASH_FOR.format(list=CPUFREQ_DIRS + "scaling_min_freq",
- body="echo %s > $f" % highest_freq)
- change_perf_gov = BASH_FOR.format(list=CPUFREQ_DIRS + "scaling_governor",
- body="echo performance > $f")
- if self.log_level == "average":
- self._logger.LogOutput("Pinning governor execution frequencies for %s"
- % machine_name)
- ret = self._ce.CrosRunCommand(" && ".join(("set -e ",
- change_max_freq,
- change_min_freq,
- change_perf_gov)),
+ BASH_FOR = 'for f in {list}; do {body}; done'
+ CPUFREQ_DIRS = '/sys/devices/system/cpu/cpu*/cpufreq/'
+ change_max_freq = BASH_FOR.format(list=CPUFREQ_DIRS + 'scaling_max_freq',
+ body='echo %s > $f' % highest_freq)
+ change_min_freq = BASH_FOR.format(list=CPUFREQ_DIRS + 'scaling_min_freq',
+ body='echo %s > $f' % highest_freq)
+ change_perf_gov = BASH_FOR.format(list=CPUFREQ_DIRS + 'scaling_governor',
+ body='echo performance > $f')
+ if self.log_level == 'average':
+ self._logger.LogOutput('Pinning governor execution frequencies for %s' %
+ machine_name)
+ ret = self._ce.CrosRunCommand(' && '.join((
+ 'set -e ', change_max_freq, change_min_freq, change_perf_gov)),
machine=machine_name,
chromeos_root=chromeos_root)
- self._logger.LogFatalIf(ret, "Could not pin frequencies on machine: %s"
- % machine_name)
+ self._logger.LogFatalIf(ret, 'Could not pin frequencies on machine: %s' %
+ machine_name)
def RebootMachine(self, machine_name, chromeos_root):
- command = "reboot && exit"
- self._ce.CrosRunCommand(command, machine=machine_name,
- chromeos_root=chromeos_root)
+ command = 'reboot && exit'
+ self._ce.CrosRunCommand(command,
+ machine=machine_name,
+ chromeos_root=chromeos_root)
time.sleep(60)
# Whenever we reboot the machine, we need to restore the governor settings.
self.PinGovernorExecutionFrequencies(machine_name, chromeos_root)
def Test_That_Run(self, machine, label, benchmark, test_args, profiler_args):
"""Run the test_that test.."""
- options = ""
+ options = ''
if label.board:
- options += " --board=%s" % label.board
+ options += ' --board=%s' % label.board
if test_args:
- options += " %s" % test_args
+ options += ' %s' % test_args
if profiler_args:
- self._logger.LogFatal("test_that does not support profiler.")
- command = "rm -rf /usr/local/autotest/results/*"
- self._ce.CrosRunCommand(command, machine=machine,
+ self._logger.LogFatal('test_that does not support profiler.')
+ command = 'rm -rf /usr/local/autotest/results/*'
+ self._ce.CrosRunCommand(command,
+ machine=machine,
chromeos_root=label.chromeos_root)
# We do this because some tests leave the machine in weird states.
# Rebooting between iterations has proven to help with this.
self.RebootMachine(machine, label.chromeos_root)
- command = (("%s --autotest_dir ~/trunk/src/third_party/autotest/files --fast "
- "%s %s %s") %
- (TEST_THAT_PATH, options, machine, benchmark.test_name))
- if self.log_level != "verbose":
- self._logger.LogOutput("Running test.")
- self._logger.LogOutput("CMD: %s" % command)
+ command = (
+ ('%s --autotest_dir ~/trunk/src/third_party/autotest/files --fast '
+ '%s %s %s') % (TEST_THAT_PATH, options, machine, benchmark.test_name))
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('Running test.')
+ self._logger.LogOutput('CMD: %s' % command)
# Use --no-ns-pid so that cros_sdk does not create a different
# process namespace and we can kill process created easily by
# their process group.
- return self._ce.ChrootRunCommandWOutput(
- label.chromeos_root, command, command_terminator=self._ct,
- cros_sdk_options="--no-ns-pid")
-
- def RemoveTelemetryTempFile (self, machine, chromeos_root):
- filename = "telemetry@%s" % machine
- fullname = os.path.join (chromeos_root,
- "chroot",
- "tmp",
- filename)
+ return self._ce.ChrootRunCommandWOutput(label.chromeos_root,
+ command,
+ command_terminator=self._ct,
+ cros_sdk_options='--no-ns-pid')
+
+ def RemoveTelemetryTempFile(self, machine, chromeos_root):
+ filename = 'telemetry@%s' % machine
+ fullname = os.path.join(chromeos_root, 'chroot', 'tmp', filename)
if os.path.exists(fullname):
- os.remove(fullname)
+ os.remove(fullname)
- def Telemetry_Crosperf_Run (self, machine, label, benchmark, test_args,
- profiler_args):
+ def Telemetry_Crosperf_Run(self, machine, label, benchmark, test_args,
+ profiler_args):
if not os.path.isdir(label.chrome_src):
- self._logger.LogFatal("Cannot find chrome src dir to"
- " run telemetry: %s" % label.chrome_src)
+ self._logger.LogFatal('Cannot find chrome src dir to'
+ ' run telemetry: %s' % label.chrome_src)
# Check for and remove temporary file that may have been left by
# previous telemetry runs (and which might prevent this run from
# working).
- self.RemoveTelemetryTempFile (machine, label.chromeos_root)
+ self.RemoveTelemetryTempFile(machine, label.chromeos_root)
# For telemetry runs, we can use the autotest copy from the source
# location. No need to have one under /build/<board>.
autotest_dir_arg = '--autotest_dir ~/trunk/src/third_party/autotest/files'
- profiler_args = GetProfilerArgs (profiler_args)
- fast_arg = ""
+ profiler_args = GetProfilerArgs(profiler_args)
+ fast_arg = ''
if not profiler_args:
# --fast works unless we are doing profiling (autotest limitation).
# --fast avoids unnecessary copies of syslogs.
- fast_arg = "--fast"
- args_string = ""
+ fast_arg = '--fast'
+ args_string = ''
if test_args:
# Strip double quotes off args (so we can wrap them in single
# quotes, to pass through to Telemetry).
@@ -205,68 +209,62 @@ class SuiteRunner(object):
args_string = "test_args='%s'" % test_args
cmd = ('{} {} {} --board={} --args="{} run_local={} test={} '
- '{}" {} telemetry_Crosperf'.format(TEST_THAT_PATH,
- autotest_dir_arg,
- fast_arg,
- label.board,
- args_string,
- benchmark.run_local,
- benchmark.test_name,
- profiler_args,
- machine))
+ '{}" {} telemetry_Crosperf'.format(
+ TEST_THAT_PATH, autotest_dir_arg, fast_arg, label.board,
+ args_string, benchmark.run_local, benchmark.test_name,
+ profiler_args, machine))
# Use --no-ns-pid so that cros_sdk does not create a different
# process namespace and we can kill process created easily by their
# process group.
- chrome_root_options = ("--no-ns-pid "
- "--chrome_root={} --chrome_root_mount={} "
+ chrome_root_options = ('--no-ns-pid '
+ '--chrome_root={} --chrome_root_mount={} '
"FEATURES=\"-usersandbox\" "
- "CHROME_ROOT={}".format(label.chrome_src,
- CHROME_MOUNT_DIR,
- CHROME_MOUNT_DIR))
- if self.log_level != "verbose":
- self._logger.LogOutput("Running test.")
- self._logger.LogOutput("CMD: %s" % cmd)
+ 'CHROME_ROOT={}'.format(label.chrome_src,
+ CHROME_MOUNT_DIR,
+ CHROME_MOUNT_DIR))
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('Running test.')
+ self._logger.LogOutput('CMD: %s' % cmd)
return self._ce.ChrootRunCommandWOutput(
- label.chromeos_root, cmd, command_terminator=self._ct,
+ label.chromeos_root,
+ cmd,
+ command_terminator=self._ct,
cros_sdk_options=chrome_root_options)
-
def Telemetry_Run(self, machine, label, benchmark, profiler_args):
- telemetry_run_path = ""
+ telemetry_run_path = ''
if not os.path.isdir(label.chrome_src):
- self._logger.LogFatal("Cannot find chrome src dir to"
- " run telemetry.")
+ self._logger.LogFatal('Cannot find chrome src dir to' ' run telemetry.')
else:
- telemetry_run_path = os.path.join(label.chrome_src, "src/tools/perf")
+ telemetry_run_path = os.path.join(label.chrome_src, 'src/tools/perf')
if not os.path.exists(telemetry_run_path):
- self._logger.LogFatal("Cannot find %s directory." % telemetry_run_path)
+ self._logger.LogFatal('Cannot find %s directory.' % telemetry_run_path)
if profiler_args:
- self._logger.LogFatal("Telemetry does not support the perf profiler.")
+ self._logger.LogFatal('Telemetry does not support the perf profiler.')
# Check for and remove temporary file that may have been left by
# previous telemetry runs (and which might prevent this run from
# working).
if not test_flag.GetTestMode():
- self.RemoveTelemetryTempFile (machine, label.chromeos_root)
-
- rsa_key = os.path.join(label.chromeos_root,
- "src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa")
-
- cmd = ("cd {0} && "
- "./run_measurement "
- "--browser=cros-chrome "
- "--output-format=csv "
- "--remote={1} "
- "--identity {2} "
- "{3} {4}".format(telemetry_run_path, machine,
- rsa_key,
- benchmark.test_name,
- benchmark.test_args))
- if self.log_level != "verbose":
- self._logger.LogOutput("Running test.")
- self._logger.LogOutput("CMD: %s" % cmd)
+ self.RemoveTelemetryTempFile(machine, label.chromeos_root)
+
+ rsa_key = os.path.join(
+ label.chromeos_root,
+ 'src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa')
+
+ cmd = ('cd {0} && '
+ './run_measurement '
+ '--browser=cros-chrome '
+ '--output-format=csv '
+ '--remote={1} '
+ '--identity {2} '
+ '{3} {4}'.format(telemetry_run_path, machine, rsa_key,
+ benchmark.test_name, benchmark.test_args))
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('Running test.')
+ self._logger.LogOutput('CMD: %s' % cmd)
return self._ce.RunCommandWOutput(cmd, print_to_console=False)
def Terminate(self):
@@ -274,11 +272,12 @@ class SuiteRunner(object):
class MockSuiteRunner(object):
+
def __init__(self):
self._true = True
def Run(self, *_args):
if self._true:
- return [0, "", ""]
+ return [0, '', '']
else:
- return [0, "", ""]
+ return [0, '', '']
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index d534f3a8..daff6c39 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
-
"""Unittest for machine_manager."""
import os.path
import time
@@ -30,38 +29,37 @@ class SuiteRunnerTest(unittest.TestCase):
mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
mock_cmd_term = mock.Mock(spec=command_executer.CommandTerminator)
mock_logger = mock.Mock(spec=logger.Logger)
- mock_label = label.MockLabel("lumpy", "lumpy_chromeos_image", "/tmp/chromeos",
- "lumpy", [ "lumpy1.cros", "lumpy.cros2" ],
- "", "", False, "average", "gcc", "")
- telemetry_crosperf_bench = Benchmark("b1_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles", # perf_args
- "telemetry_Crosperf", # suite
- True) # show_all_results
-
- test_that_bench = Benchmark("b2_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles") # perf_args
-
- telemetry_bench = Benchmark("b3_test", # name
- "octane", # test_name
- "", # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- "record -e cycles", # perf_args
- "telemetry", # suite
- False) # show_all_results
+ mock_label = label.MockLabel('lumpy', 'lumpy_chromeos_image', '/tmp/chromeos',
+ 'lumpy', ['lumpy1.cros', 'lumpy.cros2'], '', '',
+ False, 'average', 'gcc', '')
+ telemetry_crosperf_bench = Benchmark('b1_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry_Crosperf', # suite
+ True) # show_all_results
+
+ test_that_bench = Benchmark('b2_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles') # perf_args
+
+ telemetry_bench = Benchmark('b3_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry', # suite
+ False) # show_all_results
def setUp(self):
- self.runner = suite_runner.SuiteRunner(self.mock_logger, "verbose",
- self.mock_cmd_exec, self.mock_cmd_term)
-
+ self.runner = suite_runner.SuiteRunner(
+ self.mock_logger, 'verbose', self.mock_cmd_exec, self.mock_cmd_term)
def test_get_profiler_args(self):
input_str = ('--profiler=custom_perf --profiler_args=\'perf_options'
@@ -81,32 +79,29 @@ class SuiteRunnerTest(unittest.TestCase):
self.pin_governor_args = []
self.test_that_args = []
self.telemetry_run_args = []
- self.telemetry_crosperf_args = []
-
+ self.telemetry_crosperf_args = []
def FakePinGovernor(machine, chroot):
self.call_pin_governor = True
self.pin_governor_args = [machine, chroot]
-
def FakeTelemetryRun(machine, label, benchmark, profiler_args):
self.telemetry_run_args = [machine, label, benchmark, profiler_args]
self.call_telemetry_run = True
- return "Ran FakeTelemetryRun"
-
+ return 'Ran FakeTelemetryRun'
def FakeTelemetryCrosperfRun(machine, label, benchmark, test_args,
profiler_args):
self.telemetry_crosperf_args = [machine, label, benchmark, test_args,
profiler_args]
self.call_telemetry_crosperf_run = True
- return "Ran FakeTelemetryCrosperfRun"
-
+ return 'Ran FakeTelemetryCrosperfRun'
def FakeTestThatRun(machine, label, benchmark, test_args, profiler_args):
- self.test_that_args = [machine, label, benchmark, test_args, profiler_args]
+ self.test_that_args = [machine, label, benchmark, test_args, profiler_args
+ ]
self.call_test_that_run = True
- return "Ran FakeTestThatRun"
+ return 'Ran FakeTestThatRun'
self.runner.PinGovernorExecutionFrequencies = FakePinGovernor
self.runner.Telemetry_Run = FakeTelemetryRun
@@ -123,8 +118,9 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertTrue(self.call_telemetry_run)
self.assertFalse(self.call_test_that_run)
self.assertFalse(self.call_telemetry_crosperf_run)
- self.assertEqual(self.telemetry_run_args,
- ['fake_machine', self.mock_label, self.telemetry_bench, ''])
+ self.assertEqual(
+ self.telemetry_run_args,
+ ['fake_machine', self.mock_label, self.telemetry_bench, ''])
reset()
res = self.runner.Run(machine, self.mock_label, self.test_that_bench,
@@ -133,13 +129,13 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertFalse(self.call_telemetry_run)
self.assertTrue(self.call_test_that_run)
self.assertFalse(self.call_telemetry_crosperf_run)
- self.assertEqual(self.test_that_args,
- ['fake_machine', self.mock_label, self.test_that_bench, '',
- ''])
+ self.assertEqual(self.test_that_args, ['fake_machine', self.mock_label,
+ self.test_that_bench, '', ''])
reset()
- res = self.runner.Run(machine, self.mock_label, self.telemetry_crosperf_bench,
- test_args, profiler_args)
+ res = self.runner.Run(machine, self.mock_label,
+ self.telemetry_crosperf_bench, test_args,
+ profiler_args)
self.assertTrue(self.call_pin_governor)
self.assertFalse(self.call_telemetry_run)
self.assertFalse(self.call_test_that_run)
@@ -148,27 +144,23 @@ class SuiteRunnerTest(unittest.TestCase):
['fake_machine', self.mock_label,
self.telemetry_crosperf_bench, '', ''])
-
-
- @mock.patch.object (command_executer.CommandExecuter, 'CrosRunCommandWOutput')
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
def test_get_highest_static_frequency(self, mock_cros_runcmd):
self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
- mock_cros_runcmd.return_value = [ 0, '1666000 1333000 1000000', '']
- freq = self.runner.GetHighestStaticFrequency ('lumpy1.cros', '/tmp/chromeos')
+ mock_cros_runcmd.return_value = [0, '1666000 1333000 1000000', '']
+ freq = self.runner.GetHighestStaticFrequency('lumpy1.cros', '/tmp/chromeos')
self.assertEqual(freq, '1666000')
- mock_cros_runcmd.return_value = [ 0, '1333000', '']
- freq = self.runner.GetHighestStaticFrequency ('lumpy1.cros', '/tmp/chromeos')
+ mock_cros_runcmd.return_value = [0, '1333000', '']
+ freq = self.runner.GetHighestStaticFrequency('lumpy1.cros', '/tmp/chromeos')
self.assertEqual(freq, '1333000')
- mock_cros_runcmd.return_value = [ 0, '1661000 1333000 1000000', '']
- freq = self.runner.GetHighestStaticFrequency ('lumpy1.cros', '/tmp/chromeos')
+ mock_cros_runcmd.return_value = [0, '1661000 1333000 1000000', '']
+ freq = self.runner.GetHighestStaticFrequency('lumpy1.cros', '/tmp/chromeos')
self.assertEqual(freq, '1333000')
-
-
- @mock.patch.object (command_executer.CommandExecuter, 'CrosRunCommand')
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
def test_pin_governor_execution_frequencies(self, mock_cros_runcmd):
def FakeGetHighestFreq(machine_name, chromeos_root):
@@ -179,11 +171,18 @@ class SuiteRunnerTest(unittest.TestCase):
self.runner.PinGovernorExecutionFrequencies('lumpy1.cros', '/tmp/chromeos')
self.assertEqual(mock_cros_runcmd.call_count, 1)
cmd = mock_cros_runcmd.call_args_list[0][0]
- self.assertEqual (cmd, ('set -e && for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq; do echo 1666000 > $f; done && for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq; do echo 1666000 > $f; done && for f in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do echo performance > $f; done',))
-
-
- @mock.patch.object (time, 'sleep')
- @mock.patch.object (command_executer.CommandExecuter, 'CrosRunCommand')
+ self.assertEqual(cmd, (
+ 'set -e && for f in '
+ '/sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq; do echo '
+ '1666000 > $f; done && for f in '
+ '/sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq; do echo '
+ '1666000 > $f; done && for f in '
+ '/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do echo '
+ 'performance > $f; done',
+ ))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
def test_reboot_machine(self, mock_cros_runcmd, mock_sleep):
def FakePinGovernor(machine_name, chromeos_root):
@@ -197,16 +196,15 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_sleep.call_args_list[0][0], (60,))
-
- @mock.patch.object (command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object (command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
+ @mock.patch.object(command_executer.CommandExecuter,
+ 'ChrootRunCommandWOutput')
def test_test_that_run(self, mock_chroot_runcmd, mock_cros_runcmd):
- def FakeRebootMachine (machine, chroot):
+ def FakeRebootMachine(machine, chroot):
pass
- def FakeLogMsg (fd, termfd, msg, flush):
+ def FakeLogMsg(fd, termfd, msg, flush):
pass
save_log_msg = self.real_logger._LogMsg
@@ -217,8 +215,7 @@ class SuiteRunnerTest(unittest.TestCase):
raised_exception = False
try:
self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
- self.test_that_bench, '',
- 'record -a -e cycles')
+ self.test_that_bench, '', 'record -a -e cycles')
except:
raised_exception = True
self.assertTrue(raised_exception)
@@ -226,9 +223,8 @@ class SuiteRunnerTest(unittest.TestCase):
mock_chroot_runcmd.return_value = 0
self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd
self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
- res = self.runner.Test_That_Run ('lumpy1.cros', self.mock_label,
- self.test_that_bench, '--iterations=2',
- '')
+ res = self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
+ self.test_that_bench, '--iterations=2', '')
self.assertEqual(mock_cros_runcmd.call_count, 1)
self.assertEqual(mock_chroot_runcmd.call_count, 1)
self.assertEqual(res, 0)
@@ -245,10 +241,9 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
self.real_logger._LogMsg = save_log_msg
-
- @mock.patch.object (os.path, 'isdir')
- @mock.patch.object (command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
+ @mock.patch.object(os.path, 'isdir')
+ @mock.patch.object(command_executer.CommandExecuter,
+ 'ChrootRunCommandWOutput')
def test_telemetry_crosperf_run(self, mock_chroot_runcmd, mock_isdir):
mock_isdir.return_value = True
@@ -256,9 +251,9 @@ class SuiteRunnerTest(unittest.TestCase):
self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd
profiler_args = ('--profiler=custom_perf --profiler_args=\'perf_options'
'="record -a -e cycles,instructions"\'')
- res = self.runner.Telemetry_Crosperf_Run ('lumpy1.cros', self.mock_label,
- self.telemetry_crosperf_bench,
- '', profiler_args)
+ res = self.runner.Telemetry_Crosperf_Run('lumpy1.cros', self.mock_label,
+ self.telemetry_crosperf_bench, '',
+ profiler_args)
self.assertEqual(res, 0)
self.assertEqual(mock_chroot_runcmd.call_count, 1)
args_list = mock_chroot_runcmd.call_args_list[0][0]
@@ -277,13 +272,12 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
self.assertEqual(len(args_dict), 2)
-
- @mock.patch.object (os.path, 'isdir')
- @mock.patch.object (os.path, 'exists')
- @mock.patch.object (command_executer.CommandExecuter, 'RunCommandWOutput')
+ @mock.patch.object(os.path, 'isdir')
+ @mock.patch.object(os.path, 'exists')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_telemetry_run(self, mock_runcmd, mock_exists, mock_isdir):
- def FakeLogMsg (fd, termfd, msg, flush):
+ def FakeLogMsg(fd, termfd, msg, flush):
pass
save_log_msg = self.real_logger._LogMsg
@@ -330,13 +324,14 @@ class SuiteRunnerTest(unittest.TestCase):
self.telemetry_bench, '')
self.assertEqual(res, 0)
self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- (('cd src/tools/perf && ./run_measurement '
- '--browser=cros-chrome --output-format=csv '
- '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
- '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
+ self.assertEqual(mock_runcmd.call_args_list[0][0], (
+ ('cd src/tools/perf && ./run_measurement '
+ '--browser=cros-chrome --output-format=csv '
+ '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
+ '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
self.real_logger._LogMsg = save_log_msg
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/test_flag.py b/crosperf/test_flag.py
index 06f2ae10..0305eea7 100644
--- a/crosperf/test_flag.py
+++ b/crosperf/test_flag.py
@@ -1,8 +1,6 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-
"""A global variable for testing."""
-
_is_test = [False]
@@ -11,4 +9,4 @@ def SetTestMode(flag):
def GetTestMode():
- return _is_test[0]
+ return _is_test[0]
diff --git a/crosperf/translate_xbuddy.py b/crosperf/translate_xbuddy.py
index 57aa2167..a32854e1 100644
--- a/crosperf/translate_xbuddy.py
+++ b/crosperf/translate_xbuddy.py
@@ -9,14 +9,15 @@ if '/mnt/host/source/src/third_party/toolchain-utils/crosperf' in sys.path:
dev_path = os.path.expanduser('~/trunk/src/platform/dev')
sys.path.append(dev_path)
else:
- print ('This script can only be run from inside a ChromeOS chroot. Please '
- 'enter your chroot, go to ~/src/third_party/toolchain-utils/crosperf'
- ' and try again.')
+ print('This script can only be run from inside a ChromeOS chroot. Please '
+ 'enter your chroot, go to ~/src/third_party/toolchain-utils/crosperf'
+ ' and try again.')
sys.exit(0)
#pylint: disable=import-error
import xbuddy
+
def Main(xbuddy_string):
if not os.path.exists('./xbuddy_config.ini'):
config_path = os.path.expanduser('~/trunk/src/platform/dev/'
@@ -26,6 +27,7 @@ def Main(xbuddy_string):
build_id = x.Translate(os.path.split(xbuddy_string))
return build_id
-if __name__ == "__main__":
+
+if __name__ == '__main__':
print(Main(sys.argv[1]))
sys.exit(0)