aboutsummaryrefslogtreecommitdiff
path: root/crosperf/benchmark_run_unittest.py
diff options
context:
space:
mode:
authorLuis Lozano <llozano@chromium.org>2015-12-15 13:49:30 -0800
committerLuis Lozano <llozano@chromium.org>2015-12-16 17:36:06 +0000
commitf2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe (patch)
tree185d243c7eed7c7a0db6f0e640746cadc1479ea9 /crosperf/benchmark_run_unittest.py
parent2a66f70fef907c1cb15229cb58e5129cb620ac98 (diff)
downloadtoolchain-utils-f2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe.tar.gz
Run pyformat on all the toolchain-utils files.
This gets rid of a lot of lint issues. Ran by doing this: for f in *.py; do echo -n "$f " ; if [ -x $f ]; then pyformat -i --remove_trailing_comma --yapf --force_quote_type=double $f ; else pyformat -i --remove_shebang --remove_trailing_comma --yapf --force_quote_type=double $f ; fi ; done BUG=chromium:567921 TEST=Ran simple crosperf run. Change-Id: I59778835fdaa5f706d2e1765924389f9e97433d1 Reviewed-on: https://chrome-internal-review.googlesource.com/242031 Reviewed-by: Luis Lozano <llozano@chromium.org> Commit-Queue: Luis Lozano <llozano@chromium.org> Tested-by: Luis Lozano <llozano@chromium.org> Reviewed-by: Yunlian Jiang <yunlian@google.com>
Diffstat (limited to 'crosperf/benchmark_run_unittest.py')
-rwxr-xr-xcrosperf/benchmark_run_unittest.py352
1 files changed, 170 insertions, 182 deletions
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 920b7d71..744f89c1 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -3,7 +3,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Testing of benchmark_run."""
import mock
@@ -28,57 +27,60 @@ from results_cache import ResultsCache
class BenchmarkRunTest(unittest.TestCase):
- """
- Unit tests for the BenchmarkRun class and all of its methods.
+ """Unit tests for the BenchmarkRun class and all of its methods.
"""
def setUp(self):
- self.test_benchmark = Benchmark("page_cycler.netsim.top_10", # name
- "page_cycler.netsim.top_10", # test_name
- "", # test_args
+ self.test_benchmark = Benchmark('page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
1, # iterations
False, # rm_chroot_tmp
- "", # perf_args
- suite="telemetry_Crosperf") # suite
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
- self.test_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos2-row1-rack4-host9.cros",
- image_args="", cache_dir="", cache_only=False,
- log_level="average", compiler="gcc")
+ self.test_label = MockLabel('test1',
+ 'image1',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos2-row1-rack4-host9.cros',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+ self.test_cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
+ CacheConditions.CHECKSUMS_MATCH]
- self.test_cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
- CacheConditions.CHECKSUMS_MATCH]
-
- self.mock_logger = logger.GetLogger(log_dir="", mock=True)
+ self.mock_logger = logger.GetLogger(log_dir='', mock=True)
self.mock_machine_manager = mock.Mock(spec=MachineManager)
def testDryRun(self):
- my_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos2-row1-rack4-host9.cros",
- image_args="", cache_dir="", cache_only=False,
- log_level="average", compiler="gcc")
-
- logging_level = "average"
- m = MockMachineManager("/tmp/chromeos_root", 0, logging_level)
- m.AddMachine("chromeos2-row1-rack4-host9.cros")
- bench = Benchmark("page_cycler.netsim.top_10", # name
- "page_cycler.netsim.top_10", # test_name
- "", # test_args
+ my_label = MockLabel('test1',
+ 'image1',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos2-row1-rack4-host9.cros',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+ logging_level = 'average'
+ m = MockMachineManager('/tmp/chromeos_root', 0, logging_level)
+ m.AddMachine('chromeos2-row1-rack4-host9.cros')
+ bench = Benchmark('page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
1, # iterations
False, # rm_chroot_tmp
- "", # perf_args
- suite="telemetry_Crosperf") # suite
- b = benchmark_run.MockBenchmarkRun("test run",
- bench,
- my_label,
- 1,
- [],
- m,
- logger.GetLogger(),
- logging_level,
- "")
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
+ b = benchmark_run.MockBenchmarkRun('test run', bench, my_label, 1, [], m,
+ logger.GetLogger(), logging_level, '')
b.cache = MockResultsCache()
b.suite_runner = MockSuiteRunner()
b.start()
@@ -90,8 +92,7 @@ class BenchmarkRunTest(unittest.TestCase):
'log_level', 'share_cache']
arg_spec = inspect.getargspec(benchmark_run.BenchmarkRun.__init__)
self.assertEqual(len(arg_spec.args), len(args_list))
- self.assertEqual (arg_spec.args, args_list)
-
+ self.assertEqual(arg_spec.args, args_list)
def test_init(self):
# Nothing really worth testing here; just field assignments.
@@ -102,64 +103,63 @@ class BenchmarkRunTest(unittest.TestCase):
pass
def test_run(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogOutput(msg, print_to_console=False):
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.log_output.append(msg)
def MockLogError(msg, print_to_console=False):
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.log_error.append(msg)
def MockRecordStatus(msg):
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.status.append(msg)
def FakeReadCache():
- "Helper function for test_run."
+ 'Helper function for test_run.'
br.cache = mock.Mock(spec=ResultsCache)
self.called_ReadCache = True
return 0
def FakeReadCacheSucceed():
- "Helper function for test_run."
+ 'Helper function for test_run.'
br.cache = mock.Mock(spec=ResultsCache)
br.result = mock.Mock(spec=Result)
- br.result.out = "result.out stuff"
- br.result.err = "result.err stuff"
+ br.result.out = 'result.out stuff'
+ br.result.err = 'result.err stuff'
br.result.retval = 0
self.called_ReadCache = True
return 0
def FakeReadCacheException():
- "Helper function for test_run."
- raise Exception("This is an exception test; it is supposed to happen")
+ 'Helper function for test_run.'
+ raise Exception('This is an exception test; it is supposed to happen')
def FakeAcquireMachine():
- "Helper function for test_run."
- mock_machine = MockCrosMachine ('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
+ 'Helper function for test_run.'
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
return mock_machine
def FakeRunTest(_machine):
- "Helper function for test_run."
+ 'Helper function for test_run.'
mock_result = mock.Mock(spec=Result)
mock_result.retval = 0
return mock_result
def FakeRunTestFail(_machine):
- "Helper function for test_run."
+ 'Helper function for test_run.'
mock_result = mock.Mock(spec=Result)
mock_result.retval = 1
return mock_result
def ResetTestValues():
- "Helper function for test_run."
+ 'Helper function for test_run.'
self.log_output = []
self.log_error = []
self.status = []
@@ -177,53 +177,52 @@ class BenchmarkRunTest(unittest.TestCase):
# First test: No cache hit, all goes well.
ResetTestValues()
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (len(self.log_error), 0)
- self.assertEqual (self.status, ['WAITING', 'SUCCEEDED'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING', 'SUCCEEDED'])
# Second test: No cached result found; test run was "terminated" for some
# reason.
ResetTestValues()
br.terminated = True
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (len(self.log_error), 0)
- self.assertEqual (self.status, ['WAITING'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING'])
# Third test. No cached result found; RunTest failed for some reason.
ResetTestValues()
br.terminated = False
br.RunTest = FakeRunTestFail
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (len(self.log_error), 0)
- self.assertEqual (self.status, ['WAITING', 'FAILED'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING', 'FAILED'])
# Fourth test: ReadCache found a cached result.
ResetTestValues()
br.RunTest = FakeRunTest
br.ReadCache = FakeReadCacheSucceed
br.run()
- self.assertTrue (self.called_ReadCache)
- self.assertEqual (self.log_output,
- ['test_run: Cache hit.',
- 'result.out stuff',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'])
- self.assertEqual (self.log_error, ['result.err stuff'])
- self.assertEqual (self.status, ['SUCCEEDED'])
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output,
+ ['test_run: Cache hit.', 'result.out stuff',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'])
+ self.assertEqual(self.log_error, ['result.err stuff'])
+ self.assertEqual(self.status, ['SUCCEEDED'])
# Fifth test: ReadCache generates an exception; does the try/finally block
# work?
@@ -231,183 +230,172 @@ class BenchmarkRunTest(unittest.TestCase):
br.ReadCache = FakeReadCacheException
br.machine = FakeAcquireMachine()
br.run()
- self.assertEqual (self.log_error,
- ["Benchmark run: 'test_run' failed: This is an exception test; it is supposed to happen"])
- self.assertEqual (self.status, ['FAILED'])
-
+ self.assertEqual(self.log_error, [
+ "Benchmark run: 'test_run' failed: This is an exception test; it is "
+ "supposed to happen"
+ ])
+ self.assertEqual(self.status, ['FAILED'])
def test_terminate_pass(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventPassed():
- "Helper function for test_terminate_pass"
+ 'Helper function for test_terminate_pass'
return benchmark_run.STATUS_SUCCEEDED
def RecordStub(status):
- "Helper function for test_terminate_pass"
+ 'Helper function for test_terminate_pass'
self.status = status
self.status = benchmark_run.STATUS_SUCCEEDED
- self.assertFalse (br.terminated)
- self.assertFalse (br.suite_runner._ct.IsTerminated())
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner._ct.IsTerminated())
br.timeline.GetLastEvent = GetLastEventPassed
br.timeline.Record = RecordStub
br.Terminate()
- self.assertTrue (br.terminated)
- self.assertTrue (br.suite_runner._ct.IsTerminated())
- self.assertEqual (self.status, benchmark_run.STATUS_FAILED)
-
-
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner._ct.IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
def test_terminate_fail(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def GetLastEventFailed():
- "Helper function for test_terminate_fail"
+ 'Helper function for test_terminate_fail'
return benchmark_run.STATUS_FAILED
def RecordStub(status):
- "Helper function for test_terminate_fail"
+ 'Helper function for test_terminate_fail'
self.status = status
self.status = benchmark_run.STATUS_SUCCEEDED
- self.assertFalse (br.terminated)
- self.assertFalse (br.suite_runner._ct.IsTerminated())
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner._ct.IsTerminated())
br.timeline.GetLastEvent = GetLastEventFailed
br.timeline.Record = RecordStub
br.Terminate()
- self.assertTrue (br.terminated)
- self.assertTrue (br.suite_runner._ct.IsTerminated())
- self.assertEqual (self.status, benchmark_run.STATUS_SUCCEEDED)
-
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner._ct.IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
def test_acquire_machine(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
-
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
br.terminated = True
- self.assertRaises (Exception, br.AcquireMachine)
+ self.assertRaises(Exception, br.AcquireMachine)
br.terminated = False
- mock_machine = MockCrosMachine ('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
self.mock_machine_manager.AcquireMachine.return_value = mock_machine
machine = br.AcquireMachine()
- self.assertEqual (machine.name, 'chromeos1-row3-rack5-host7.cros')
-
+ self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
def test_get_extra_autotest_args(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
def MockLogError(err_msg):
- "Helper function for test_get_extra_autotest_args"
+ 'Helper function for test_get_extra_autotest_args'
self.err_msg = err_msg
self.mock_logger.LogError = MockLogError
result = br._GetExtraAutotestArgs()
- self.assertEqual(result, "")
+ self.assertEqual(result, '')
- self.test_benchmark.perf_args = "record -e cycles"
+ self.test_benchmark.perf_args = 'record -e cycles'
result = br._GetExtraAutotestArgs()
- self.assertEqual(result,
-"--profiler=custom_perf --profiler_args='perf_options=\"record -a -e cycles\"'")
+ self.assertEqual(
+ result,
+ "--profiler=custom_perf --profiler_args='perf_options=\"record -a -e "
+ "cycles\"'")
- self.test_benchmark.suite = "telemetry"
+ self.test_benchmark.suite = 'telemetry'
result = br._GetExtraAutotestArgs()
- self.assertEqual(result, "")
- self.assertEqual(self.err_msg, "Telemetry does not support profiler.")
+ self.assertEqual(result, '')
+ self.assertEqual(self.err_msg, 'Telemetry does not support profiler.')
- self.test_benchmark.perf_args = "record -e cycles"
- self.test_benchmark.suite = "test_that"
+ self.test_benchmark.perf_args = 'record -e cycles'
+ self.test_benchmark.suite = 'test_that'
result = br._GetExtraAutotestArgs()
- self.assertEqual(result, "")
- self.assertEqual(self.err_msg, "test_that does not support profiler.")
+ self.assertEqual(result, '')
+ self.assertEqual(self.err_msg, 'test_that does not support profiler.')
- self.test_benchmark.perf_args = "junk args"
- self.test_benchmark.suite = "telemetry_Crosperf"
+ self.test_benchmark.perf_args = 'junk args'
+ self.test_benchmark.suite = 'telemetry_Crosperf'
self.assertRaises(Exception, br._GetExtraAutotestArgs)
-
@mock.patch.object(SuiteRunner, 'Run')
@mock.patch.object(Result, 'CreateFromRun')
def test_run_test(self, mock_result, mock_runner):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
self.status = []
+
def MockRecord(status):
self.status.append(status)
br.timeline.Record = MockRecord
- mock_machine = MockCrosMachine ('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
- mock_runner.return_value = [0, "{'Score':100}", ""]
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
+ mock_runner.return_value = [0, "{'Score':100}", '']
br.RunTest(mock_machine)
self.assertTrue(br.run_completed)
- self.assertEqual (self.status, [ benchmark_run.STATUS_IMAGING,
- benchmark_run.STATUS_RUNNING])
-
- self.assertEqual (br.machine_manager.ImageMachine.call_count, 1)
- br.machine_manager.ImageMachine.assert_called_with (mock_machine,
- self.test_label)
- self.assertEqual (mock_runner.call_count, 1)
- mock_runner.assert_called_with (mock_machine.name, br.label,
- br.benchmark, "", br.profiler_args)
-
- self.assertEqual (mock_result.call_count, 1)
- mock_result.assert_called_with (self.mock_logger, 'average',
- self.test_label, None, "{'Score':100}",
- "", 0, False, 'page_cycler.netsim.top_10',
- 'telemetry_Crosperf')
+ self.assertEqual(self.status, [benchmark_run.STATUS_IMAGING,
+ benchmark_run.STATUS_RUNNING])
+ self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
+ br.machine_manager.ImageMachine.assert_called_with(mock_machine,
+ self.test_label)
+ self.assertEqual(mock_runner.call_count, 1)
+ mock_runner.assert_called_with(mock_machine.name, br.label, br.benchmark,
+ '', br.profiler_args)
+ self.assertEqual(mock_result.call_count, 1)
+ mock_result.assert_called_with(
+ self.mock_logger, 'average', self.test_label, None, "{'Score':100}", '',
+ 0, False, 'page_cycler.netsim.top_10', 'telemetry_Crosperf')
def test_set_cache_conditions(self):
- br = benchmark_run.BenchmarkRun("test_run", self.test_benchmark,
- self.test_label, 1, self.test_cache_conditions,
- self.mock_machine_manager,
- self.mock_logger,
- "average", "")
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
+ 'average', '')
- phony_cache_conditions = [ 123, 456, True, False ]
+ phony_cache_conditions = [123, 456, True, False]
self.assertEqual(br.cache_conditions, self.test_cache_conditions)
- br.SetCacheConditions (phony_cache_conditions)
+ br.SetCacheConditions(phony_cache_conditions)
self.assertEqual(br.cache_conditions, phony_cache_conditions)
br.SetCacheConditions(self.test_cache_conditions)
self.assertEqual(br.cache_conditions, self.test_cache_conditions)
-if __name__ == "__main__":
+if __name__ == '__main__':
unittest.main()