aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark.py45
-rw-r--r--crosperf/benchmark_run.py265
-rwxr-xr-xcrosperf/benchmark_run_unittest.py430
-rwxr-xr-xcrosperf/benchmark_unittest.py63
-rw-r--r--crosperf/column_chart.py59
-rw-r--r--crosperf/compare_machines.py64
-rw-r--r--crosperf/config.py13
-rwxr-xr-xcrosperf/config_unittest.py50
-rwxr-xr-xcrosperf/crosperf2
-rwxr-xr-xcrosperf/crosperf.py144
-rwxr-xr-xcrosperf/crosperf_test.py44
-rwxr-xr-xcrosperf/crosperf_unittest.py66
-rw-r--r--crosperf/default-telemetry-results.json174
-rw-r--r--crosperf/default_remotes8
-rw-r--r--crosperf/download_images.py286
-rwxr-xr-xcrosperf/download_images_buildid_test.py111
-rwxr-xr-xcrosperf/download_images_unittest.py251
-rw-r--r--crosperf/experiment.py212
-rw-r--r--crosperf/experiment_factory.py331
-rwxr-xr-xcrosperf/experiment_factory_unittest.py242
-rw-r--r--crosperf/experiment_file.py205
-rwxr-xr-xcrosperf/experiment_file_unittest.py135
-rw-r--r--crosperf/experiment_files/README34
-rw-r--r--crosperf/experiment_files/aes_perf.exp21
-rw-r--r--crosperf/experiment_files/bloat_perf.exp25
-rw-r--r--crosperf/experiment_files/morejs_perf.exp25
-rw-r--r--crosperf/experiment_files/non-telemetry-tests.exp31
-rw-r--r--crosperf/experiment_files/official-image.exp41
-rw-r--r--crosperf/experiment_files/page_cycler.exp28
-rw-r--r--crosperf/experiment_files/page_cycler_perf.exp45
-rw-r--r--crosperf/experiment_files/telemetry-crosperf-suites.exp54
-rw-r--r--crosperf/experiment_files/telemetry-crosperf-with-external-chrome-src.exp31
-rw-r--r--crosperf/experiment_files/telemetry-crosperf-with-profiler.exp35
-rw-r--r--crosperf/experiment_files/telemetry-crosperf.exp32
-rw-r--r--crosperf/experiment_files/telemetry-without-autotest.exp31
-rwxr-xr-xcrosperf/experiment_files/telemetry_perf_perf77
-rw-r--r--crosperf/experiment_files/trybot-image.exp33
-rw-r--r--crosperf/experiment_runner.py309
-rwxr-xr-xcrosperf/experiment_runner_unittest.py450
-rw-r--r--crosperf/experiment_status.py145
-rw-r--r--crosperf/field.py152
-rwxr-xr-xcrosperf/flag_test_unittest.py41
-rwxr-xr-xcrosperf/generate_report.py277
-rwxr-xr-xcrosperf/generate_report_unittest.py146
-rw-r--r--crosperf/help.py114
-rw-r--r--crosperf/image_checksummer.py69
-rw-r--r--crosperf/label.py159
-rw-r--r--crosperf/machine_image_manager.py304
-rwxr-xr-xcrosperf/machine_image_manager_unittest.py290
-rw-r--r--crosperf/machine_manager.py709
-rwxr-xr-xcrosperf/machine_manager_unittest.py845
-rw-r--r--crosperf/mock_instance.py143
-rw-r--r--crosperf/perf_files/perf.data.report.0734
-rw-r--r--crosperf/results_cache.py758
-rwxr-xr-xcrosperf/results_cache_unittest.py1178
-rw-r--r--crosperf/results_organizer.py192
-rwxr-xr-xcrosperf/results_organizer_unittest.py109
-rw-r--r--crosperf/results_report.py691
-rw-r--r--crosperf/results_report_templates.py196
-rwxr-xr-xcrosperf/results_report_unittest.py415
-rwxr-xr-xcrosperf/run_tests.sh32
-rw-r--r--crosperf/schedv2.py439
-rwxr-xr-xcrosperf/schedv2_unittest.py221
-rw-r--r--crosperf/settings.py81
-rw-r--r--crosperf/settings_factory.py304
-rwxr-xr-xcrosperf/settings_factory_unittest.py97
-rwxr-xr-xcrosperf/settings_unittest.py229
-rw-r--r--crosperf/suite_runner.py297
-rwxr-xr-xcrosperf/suite_runner_unittest.py351
-rw-r--r--crosperf/test_cache/compare_output/autotest.tbz2bin0 -> 847904 bytes
-rw-r--r--crosperf/test_cache/compare_output/machine.txt1
-rw-r--r--crosperf/test_cache/compare_output/results.txt6
-rw-r--r--crosperf/test_cache/test_input/autotest.tbz2bin0 -> 110940 bytes
-rw-r--r--crosperf/test_cache/test_input/machine.txt1
-rw-r--r--crosperf/test_cache/test_input/results.txt6
-rw-r--r--crosperf/test_cache/test_puretelemetry_input/machine.txt1
-rw-r--r--crosperf/test_cache/test_puretelemetry_input/results.txt6
-rw-r--r--crosperf/test_flag.py12
-rw-r--r--crosperf/translate_xbuddy.py33
-rw-r--r--crosperf/unittest_keyval_file.txt20
80 files changed, 14306 insertions, 0 deletions
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
new file mode 100644
index 00000000..a2a34bca
--- /dev/null
+++ b/crosperf/benchmark.py
@@ -0,0 +1,45 @@
+
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Define a type that wraps a Benchmark instance."""
+
+class Benchmark(object):
+ """Class representing a benchmark to be run.
+
+ Contains details of the benchmark suite, arguments to pass to the suite,
+ iterations to run the benchmark suite and so on. Note that the benchmark name
+ can be different to the test suite name. For example, you may want to have
+ two different benchmarks which run the same test_name with different
+ arguments.
+ """
+
+ def __init__(self,
+ name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite='',
+ show_all_results=False,
+ retries=0,
+ run_local=False):
+ self.name = name
+ #For telemetry, this is the benchmark name.
+ self.test_name = test_name
+ #For telemetry, this is the data.
+ self.test_args = test_args
+ self.iterations = iterations
+ self.perf_args = perf_args
+ self.rm_chroot_tmp = rm_chroot_tmp
+ self.iteration_adjusted = False
+ self.suite = suite
+ self.show_all_results = show_all_results
+ self.retries = retries
+ if self.suite == 'telemetry':
+ self.show_all_results = True
+ if run_local and self.suite != 'telemetry_Crosperf':
+ raise RuntimeError('run_local is only supported by telemetry_Crosperf.')
+ self.run_local = run_local
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
new file mode 100644
index 00000000..e53187e2
--- /dev/null
+++ b/crosperf/benchmark_run.py
@@ -0,0 +1,265 @@
+
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module of benchmark runs."""
+from __future__ import print_function
+
+import datetime
+import threading
+import time
+import traceback
+
+from cros_utils import command_executer
+from cros_utils import timeline
+
+from suite_runner import SuiteRunner
+from results_cache import MockResult
+from results_cache import MockResultsCache
+from results_cache import Result
+from results_cache import ResultsCache
+
+STATUS_FAILED = 'FAILED'
+STATUS_SUCCEEDED = 'SUCCEEDED'
+STATUS_IMAGING = 'IMAGING'
+STATUS_RUNNING = 'RUNNING'
+STATUS_WAITING = 'WAITING'
+STATUS_PENDING = 'PENDING'
+
+
+class BenchmarkRun(threading.Thread):
+ """The benchmarkrun class."""
+ def __init__(self, name, benchmark, label, iteration, cache_conditions,
+ machine_manager, logger_to_use, log_level, share_cache):
+ threading.Thread.__init__(self)
+ self.name = name
+ self._logger = logger_to_use
+ self.log_level = log_level
+ self.benchmark = benchmark
+ self.iteration = iteration
+ self.label = label
+ self.result = None
+ self.terminated = False
+ self.retval = None
+ self.run_completed = False
+ self.machine_manager = machine_manager
+ self.suite_runner = SuiteRunner(self._logger, self.log_level)
+ self.machine = None
+ self.cache_conditions = cache_conditions
+ self.runs_complete = 0
+ self.cache_hit = False
+ self.failure_reason = ''
+ self.test_args = benchmark.test_args
+ self.cache = None
+ self.profiler_args = self.GetExtraAutotestArgs()
+ self._ce = command_executer.GetCommandExecuter(self._logger,
+ log_level=self.log_level)
+ self.timeline = timeline.Timeline()
+ self.timeline.Record(STATUS_PENDING)
+ self.share_cache = share_cache
+ self.cache_has_been_read = False
+
+ # This is used by schedv2.
+ self.owner_thread = None
+
+ def ReadCache(self):
+ # Just use the first machine for running the cached version,
+ # without locking it.
+ self.cache = ResultsCache()
+ self.cache.Init(self.label.chromeos_image, self.label.chromeos_root,
+ self.benchmark.test_name, self.iteration, self.test_args,
+ self.profiler_args, self.machine_manager, self.machine,
+ self.label.board, self.cache_conditions, self._logger,
+ self.log_level, self.label, self.share_cache,
+ self.benchmark.suite, self.benchmark.show_all_results,
+ self.benchmark.run_local)
+
+ self.result = self.cache.ReadResult()
+ self.cache_hit = (self.result is not None)
+ self.cache_has_been_read = True
+
+ def run(self):
+ try:
+ if not self.cache_has_been_read:
+ self.ReadCache()
+
+ if self.result:
+ self._logger.LogOutput('%s: Cache hit.' % self.name)
+ self._logger.LogOutput(self.result.out, print_to_console=False)
+ self._logger.LogError(self.result.err, print_to_console=False)
+
+ elif self.label.cache_only:
+ self._logger.LogOutput('%s: No cache hit.' % self.name)
+ output = '%s: No Cache hit.' % self.name
+ retval = 1
+ err = 'No cache hit.'
+ self.result = Result.CreateFromRun(
+ self._logger, self.log_level, self.label, self.machine, output, err,
+ retval, self.benchmark.test_name,
+ self.benchmark.suite)
+
+ else:
+ self._logger.LogOutput('%s: No cache hit.' % self.name)
+ self.timeline.Record(STATUS_WAITING)
+ # Try to acquire a machine now.
+ self.machine = self.AcquireMachine()
+ self.cache.machine = self.machine
+ self.result = self.RunTest(self.machine)
+
+ self.cache.remote = self.machine.name
+ self.label.chrome_version = self.machine_manager.GetChromeVersion(
+ self.machine)
+ self.cache.StoreResult(self.result)
+
+ if not self.label.chrome_version:
+ if self.machine:
+ self.label.chrome_version = self.machine_manager.GetChromeVersion(
+ self.machine)
+ elif self.result.chrome_version:
+ self.label.chrome_version = self.result.chrome_version
+
+ if self.terminated:
+ return
+
+ if not self.result.retval:
+ self.timeline.Record(STATUS_SUCCEEDED)
+ else:
+ if self.timeline.GetLastEvent() != STATUS_FAILED:
+ self.failure_reason = 'Return value of test suite was non-zero.'
+ self.timeline.Record(STATUS_FAILED)
+
+ except Exception, e:
+ self._logger.LogError("Benchmark run: '%s' failed: %s" % (self.name, e))
+ traceback.print_exc()
+ if self.timeline.GetLastEvent() != STATUS_FAILED:
+ self.timeline.Record(STATUS_FAILED)
+ self.failure_reason = str(e)
+ finally:
+ if self.owner_thread is not None:
+ # In schedv2 mode, we do not lock machine locally. So noop here.
+ pass
+ elif self.machine:
+ if not self.machine.IsReachable():
+ self._logger.LogOutput('Machine %s is not reachable, removing it.' %
+ self.machine.name)
+ self.machine_manager.RemoveMachine(self.machine.name)
+ self._logger.LogOutput('Releasing machine: %s' % self.machine.name)
+ self.machine_manager.ReleaseMachine(self.machine)
+ self._logger.LogOutput('Released machine: %s' % self.machine.name)
+
+ def Terminate(self):
+ self.terminated = True
+ self.suite_runner.Terminate()
+ if self.timeline.GetLastEvent() != STATUS_FAILED:
+ self.timeline.Record(STATUS_FAILED)
+ self.failure_reason = 'Thread terminated.'
+
+ def AcquireMachine(self):
+ if self.owner_thread is not None:
+ # No need to lock machine locally, DutWorker, which is a thread, is
+ # responsible for running br.
+ return self.owner_thread.dut()
+ while True:
+ machine = None
+ if self.terminated:
+ raise RuntimeError('Thread terminated while trying to acquire machine.')
+
+ machine = self.machine_manager.AcquireMachine(self.label)
+
+ if machine:
+ self._logger.LogOutput('%s: Machine %s acquired at %s' %
+ (self.name, machine.name,
+ datetime.datetime.now()))
+ break
+ time.sleep(10)
+ return machine
+
+ def GetExtraAutotestArgs(self):
+ if self.benchmark.perf_args and self.benchmark.suite == 'telemetry':
+ self._logger.LogError('Telemetry does not support profiler.')
+ self.benchmark.perf_args = ''
+
+ if self.benchmark.perf_args and self.benchmark.suite == 'test_that':
+ self._logger.LogError('test_that does not support profiler.')
+ self.benchmark.perf_args = ''
+
+ if self.benchmark.perf_args:
+ perf_args_list = self.benchmark.perf_args.split(' ')
+ perf_args_list = [perf_args_list[0]] + ['-a'] + perf_args_list[1:]
+ perf_args = ' '.join(perf_args_list)
+ if not perf_args_list[0] in ['record', 'stat']:
+ raise SyntaxError('perf_args must start with either record or stat')
+ extra_test_args = ['--profiler=custom_perf',
+ ("--profiler_args='perf_options=\"%s\"'" % perf_args)]
+ return ' '.join(extra_test_args)
+ else:
+ return ''
+
+ def RunTest(self, machine):
+ self.timeline.Record(STATUS_IMAGING)
+ if self.owner_thread is not None:
+ # In schedv2 mode, do not even call ImageMachine. Machine image is
+ # guarenteed.
+ pass
+ else:
+ self.machine_manager.ImageMachine(machine, self.label)
+ self.timeline.Record(STATUS_RUNNING)
+ retval, out, err = self.suite_runner.Run(machine.name, self.label,
+ self.benchmark, self.test_args,
+ self.profiler_args)
+ self.run_completed = True
+ return Result.CreateFromRun(self._logger, self.log_level, self.label,
+ self.machine, out, err, retval,
+ self.benchmark.test_name, self.benchmark.suite)
+
+ def SetCacheConditions(self, cache_conditions):
+ self.cache_conditions = cache_conditions
+
+ def logger(self):
+ """Return the logger, only used by unittest.
+
+ Returns:
+ self._logger
+ """
+
+ return self._logger
+
+ def __str__(self):
+ """For better debugging."""
+
+ return 'BenchmarkRun[name="{}"]'.format(self.name)
+
+
+class MockBenchmarkRun(BenchmarkRun):
+ """Inherited from BenchmarkRun."""
+
+ def ReadCache(self):
+ # Just use the first machine for running the cached version,
+ # without locking it.
+ self.cache = MockResultsCache()
+ self.cache.Init(self.label.chromeos_image, self.label.chromeos_root,
+ self.benchmark.test_name, self.iteration, self.test_args,
+ self.profiler_args, self.machine_manager, self.machine,
+ self.label.board, self.cache_conditions, self._logger,
+ self.log_level, self.label, self.share_cache,
+ self.benchmark.suite, self.benchmark.show_all_results,
+ self.benchmark.run_local)
+
+ self.result = self.cache.ReadResult()
+ self.cache_hit = (self.result is not None)
+
+ def RunTest(self, machine):
+ """Remove Result.CreateFromRun for testing."""
+ self.timeline.Record(STATUS_IMAGING)
+ self.machine_manager.ImageMachine(machine, self.label)
+ self.timeline.Record(STATUS_RUNNING)
+ [retval, out, err] = self.suite_runner.Run(machine.name, self.label,
+ self.benchmark, self.test_args,
+ self.profiler_args)
+ self.run_completed = True
+ rr = MockResult('logger', self.label, self.log_level, machine)
+ rr.out = out
+ rr.err = err
+ rr.retval = retval
+ return rr
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
new file mode 100755
index 00000000..9af66a33
--- /dev/null
+++ b/crosperf/benchmark_run_unittest.py
@@ -0,0 +1,430 @@
+#!/usr/bin/env python2
+
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Testing of benchmark_run."""
+
+from __future__ import print_function
+
+import mock
+import unittest
+import inspect
+
+from cros_utils import logger
+
+import benchmark_run
+
+from suite_runner import MockSuiteRunner
+from suite_runner import SuiteRunner
+from label import MockLabel
+from benchmark import Benchmark
+from machine_manager import MockMachineManager
+from machine_manager import MachineManager
+from machine_manager import MockCrosMachine
+from results_cache import MockResultsCache
+from results_cache import CacheConditions
+from results_cache import Result
+from results_cache import ResultsCache
+
+
+class BenchmarkRunTest(unittest.TestCase):
+ """Unit tests for the BenchmarkRun class and all of its methods."""
+
+ def setUp(self):
+ self.status = []
+ self.called_ReadCache = None
+ self.log_error = []
+ self.log_output = []
+ self.err_msg = None
+ self.test_benchmark = Benchmark(
+ 'page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
+ 1, # iterations
+ False, # rm_chroot_tmp
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
+
+ self.test_label = MockLabel(
+ 'test1',
+ 'image1',
+ 'autotest_dir',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos2-row1-rack4-host9.cros',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+ self.test_cache_conditions = [
+ CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
+ ]
+
+ self.mock_logger = logger.GetLogger(log_dir='', mock=True)
+
+ self.mock_machine_manager = mock.Mock(spec=MachineManager)
+
+ def testDryRun(self):
+ my_label = MockLabel(
+ 'test1',
+ 'image1',
+ 'autotest_dir',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos2-row1-rack4-host9.cros',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+ logging_level = 'average'
+ m = MockMachineManager('/tmp/chromeos_root', 0, logging_level, '')
+ m.AddMachine('chromeos2-row1-rack4-host9.cros')
+ bench = Benchmark(
+ 'page_cycler.netsim.top_10', # name
+ 'page_cycler.netsim.top_10', # test_name
+ '', # test_args
+ 1, # iterations
+ False, # rm_chroot_tmp
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
+ b = benchmark_run.MockBenchmarkRun('test run', bench, my_label, 1, [], m,
+ logger.GetLogger(), logging_level, '')
+ b.cache = MockResultsCache()
+ b.suite_runner = MockSuiteRunner()
+ b.start()
+
+ # Make sure the arguments to BenchmarkRun.__init__ have not changed
+ # since the last time this test was updated:
+ args_list = [
+ 'self', 'name', 'benchmark', 'label', 'iteration', 'cache_conditions',
+ 'machine_manager', 'logger_to_use', 'log_level', 'share_cache'
+ ]
+ arg_spec = inspect.getargspec(benchmark_run.BenchmarkRun.__init__)
+ self.assertEqual(len(arg_spec.args), len(args_list))
+ self.assertEqual(arg_spec.args, args_list)
+
+ def test_init(self):
+ # Nothing really worth testing here; just field assignments.
+ pass
+
+ def test_read_cache(self):
+ # Nothing really worth testing here, either.
+ pass
+
+ def test_run(self):
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
+
+ def MockLogOutput(msg, print_to_console=False):
+ 'Helper function for test_run.'
+ del print_to_console
+ self.log_output.append(msg)
+
+ def MockLogError(msg, print_to_console=False):
+ 'Helper function for test_run.'
+ del print_to_console
+ self.log_error.append(msg)
+
+ def MockRecordStatus(msg):
+ 'Helper function for test_run.'
+ self.status.append(msg)
+
+ def FakeReadCache():
+ 'Helper function for test_run.'
+ br.cache = mock.Mock(spec=ResultsCache)
+ self.called_ReadCache = True
+ return 0
+
+ def FakeReadCacheSucceed():
+ 'Helper function for test_run.'
+ br.cache = mock.Mock(spec=ResultsCache)
+ br.result = mock.Mock(spec=Result)
+ br.result.out = 'result.out stuff'
+ br.result.err = 'result.err stuff'
+ br.result.retval = 0
+ self.called_ReadCache = True
+ return 0
+
+ def FakeReadCacheException():
+ 'Helper function for test_run.'
+ raise RuntimeError('This is an exception test; it is supposed to happen')
+
+ def FakeAcquireMachine():
+ 'Helper function for test_run.'
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
+ return mock_machine
+
+ def FakeRunTest(_machine):
+ 'Helper function for test_run.'
+ mock_result = mock.Mock(spec=Result)
+ mock_result.retval = 0
+ return mock_result
+
+ def FakeRunTestFail(_machine):
+ 'Helper function for test_run.'
+ mock_result = mock.Mock(spec=Result)
+ mock_result.retval = 1
+ return mock_result
+
+ def ResetTestValues():
+ 'Helper function for test_run.'
+ self.log_output = []
+ self.log_error = []
+ self.status = []
+ br.result = None
+ self.called_ReadCache = False
+
+ # Assign all the fake functions to the appropriate objects.
+ br.logger().LogOutput = MockLogOutput
+ br.logger().LogError = MockLogError
+ br.timeline.Record = MockRecordStatus
+ br.ReadCache = FakeReadCache
+ br.RunTest = FakeRunTest
+ br.AcquireMachine = FakeAcquireMachine
+
+ # First test: No cache hit, all goes well.
+ ResetTestValues()
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output, [
+ 'test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'
+ ])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING', 'SUCCEEDED'])
+
+ # Second test: No cached result found; test run was "terminated" for some
+ # reason.
+ ResetTestValues()
+ br.terminated = True
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output, [
+ 'test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'
+ ])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING'])
+
+ # Third test. No cached result found; RunTest failed for some reason.
+ ResetTestValues()
+ br.terminated = False
+ br.RunTest = FakeRunTestFail
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output, [
+ 'test_run: No cache hit.',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'
+ ])
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ['WAITING', 'FAILED'])
+
+ # Fourth test: ReadCache found a cached result.
+ ResetTestValues()
+ br.RunTest = FakeRunTest
+ br.ReadCache = FakeReadCacheSucceed
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(self.log_output, [
+ 'test_run: Cache hit.', 'result.out stuff',
+ 'Releasing machine: chromeos1-row3-rack5-host7.cros',
+ 'Released machine: chromeos1-row3-rack5-host7.cros'
+ ])
+ self.assertEqual(self.log_error, ['result.err stuff'])
+ self.assertEqual(self.status, ['SUCCEEDED'])
+
+ # Fifth test: ReadCache generates an exception; does the try/finally block
+ # work?
+ ResetTestValues()
+ br.ReadCache = FakeReadCacheException
+ br.machine = FakeAcquireMachine()
+ br.run()
+ self.assertEqual(self.log_error, [
+ "Benchmark run: 'test_run' failed: This is an exception test; it is "
+ 'supposed to happen'
+ ])
+ self.assertEqual(self.status, ['FAILED'])
+
+ def test_terminate_pass(self):
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
+
+ def GetLastEventPassed():
+ 'Helper function for test_terminate_pass'
+ return benchmark_run.STATUS_SUCCEEDED
+
+ def RecordStub(status):
+ 'Helper function for test_terminate_pass'
+ self.status = status
+
+ self.status = benchmark_run.STATUS_SUCCEEDED
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated())
+
+ br.timeline.GetLastEvent = GetLastEventPassed
+ br.timeline.Record = RecordStub
+
+ br.Terminate()
+
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
+
+ def test_terminate_fail(self):
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
+
+ def GetLastEventFailed():
+ 'Helper function for test_terminate_fail'
+ return benchmark_run.STATUS_FAILED
+
+ def RecordStub(status):
+ 'Helper function for test_terminate_fail'
+ self.status = status
+
+ self.status = benchmark_run.STATUS_SUCCEEDED
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated())
+
+ br.timeline.GetLastEvent = GetLastEventFailed
+ br.timeline.Record = RecordStub
+
+ br.Terminate()
+
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
+
+ def test_acquire_machine(self):
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
+
+ br.terminated = True
+ self.assertRaises(Exception, br.AcquireMachine)
+
+ br.terminated = False
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
+ self.mock_machine_manager.AcquireMachine.return_value = mock_machine
+
+ machine = br.AcquireMachine()
+ self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
+
+ def test_get_extra_autotest_args(self):
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
+
+ def MockLogError(err_msg):
+ 'Helper function for test_get_extra_autotest_args'
+ self.err_msg = err_msg
+
+ self.mock_logger.LogError = MockLogError
+
+ result = br.GetExtraAutotestArgs()
+ self.assertEqual(result, '')
+
+ self.test_benchmark.perf_args = 'record -e cycles'
+ result = br.GetExtraAutotestArgs()
+ self.assertEqual(
+ result,
+ "--profiler=custom_perf --profiler_args='perf_options=\"record -a -e "
+ "cycles\"'")
+
+ self.test_benchmark.suite = 'telemetry'
+ result = br.GetExtraAutotestArgs()
+ self.assertEqual(result, '')
+ self.assertEqual(self.err_msg, 'Telemetry does not support profiler.')
+
+ self.test_benchmark.perf_args = 'record -e cycles'
+ self.test_benchmark.suite = 'test_that'
+ result = br.GetExtraAutotestArgs()
+ self.assertEqual(result, '')
+ self.assertEqual(self.err_msg, 'test_that does not support profiler.')
+
+ self.test_benchmark.perf_args = 'junk args'
+ self.test_benchmark.suite = 'telemetry_Crosperf'
+ self.assertRaises(Exception, br.GetExtraAutotestArgs)
+
+ @mock.patch.object(SuiteRunner, 'Run')
+ @mock.patch.object(Result, 'CreateFromRun')
+ def test_run_test(self, mock_result, mock_runner):
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
+
+ self.status = []
+
+ def MockRecord(status):
+ self.status.append(status)
+
+ br.timeline.Record = MockRecord
+ mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
+ 'chromeos', 'average')
+ mock_runner.return_value = [0, "{'Score':100}", '']
+
+ br.RunTest(mock_machine)
+
+ self.assertTrue(br.run_completed)
+ self.assertEqual(
+ self.status,
+ [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING])
+
+ self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
+ br.machine_manager.ImageMachine.assert_called_with(mock_machine,
+ self.test_label)
+ self.assertEqual(mock_runner.call_count, 1)
+ mock_runner.assert_called_with(mock_machine.name, br.label, br.benchmark,
+ '', br.profiler_args)
+
+ self.assertEqual(mock_result.call_count, 1)
+ mock_result.assert_called_with(self.mock_logger, 'average', self.test_label,
+ None, "{'Score':100}", '', 0,
+ 'page_cycler.netsim.top_10',
+ 'telemetry_Crosperf')
+
+ def test_set_cache_conditions(self):
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '')
+
+ phony_cache_conditions = [123, 456, True, False]
+
+ self.assertEqual(br.cache_conditions, self.test_cache_conditions)
+
+ br.SetCacheConditions(phony_cache_conditions)
+ self.assertEqual(br.cache_conditions, phony_cache_conditions)
+
+ br.SetCacheConditions(self.test_cache_conditions)
+ self.assertEqual(br.cache_conditions, self.test_cache_conditions)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py
new file mode 100755
index 00000000..320ede65
--- /dev/null
+++ b/crosperf/benchmark_unittest.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python2
+#
+# Copyright 2014 Google Inc. All Rights Reserved
+"""Unit tests for the Crosperf Benchmark class."""
+
+from __future__ import print_function
+
+import inspect
+from benchmark import Benchmark
+
+import unittest
+
+
+class BenchmarkTestCase(unittest.TestCase):
+ """Individual tests for the Benchmark class."""
+
+ def test_benchmark(self):
+ # Test creating a benchmark with all the fields filled out.
+ b1 = Benchmark('b1_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry_Crosperf', # suite
+ True) # show_all_results
+ self.assertTrue(b1.suite, 'telemetry_Crosperf')
+
+ # Test creating a benchmark field with default fields left out.
+ b2 = Benchmark('b2_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles') # perf_args
+ self.assertEqual(b2.suite, '')
+ self.assertFalse(b2.show_all_results)
+
+ # Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
+ # and see what happens.
+ b3 = Benchmark('b3_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry', # suite
+ False) # show_all_results
+ self.assertTrue(b3.show_all_results)
+
+ # Check to see if the args to Benchmark have changed since the last time
+ # this test was updated.
+ args_list = ['self', 'name', 'test_name', 'test_args', 'iterations',
+ 'rm_chroot_tmp', 'perf_args', 'suite', 'show_all_results',
+ 'retries', 'run_local']
+ arg_spec = inspect.getargspec(Benchmark.__init__)
+ self.assertEqual(len(arg_spec.args), len(args_list))
+ for arg in args_list:
+ self.assertIn(arg, arg_spec.args)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/column_chart.py b/crosperf/column_chart.py
new file mode 100644
index 00000000..7e6821d0
--- /dev/null
+++ b/crosperf/column_chart.py
@@ -0,0 +1,59 @@
+# Copyright 2011 Google Inc. All Rights Reserved.
+"""Module to draw column chart."""
+
+
+class ColumnChart(object):
+ """class to draw column chart."""
+
+ def __init__(self, title, width, height):
+ self.title = title
+ self.chart_div = filter(str.isalnum, title)
+ self.width = width
+ self.height = height
+ self.columns = []
+ self.rows = []
+ self.series = []
+
+ def AddSeries(self, column_name, series_type, color):
+ for i in range(len(self.columns)):
+ if column_name == self.columns[i][1]:
+ self.series.append((i - 1, series_type, color))
+ break
+
+ def AddColumn(self, name, column_type):
+ self.columns.append((column_type, name))
+
+ def AddRow(self, row):
+ self.rows.append(row)
+
+ def GetJavascript(self):
+ res = 'var data = new google.visualization.DataTable();\n'
+ for column in self.columns:
+ res += "data.addColumn('%s', '%s');\n" % column
+ res += 'data.addRows(%s);\n' % len(self.rows)
+ for row in range(len(self.rows)):
+ for column in range(len(self.columns)):
+ val = self.rows[row][column]
+ if isinstance(val, str):
+ val = "'%s'" % val
+ res += 'data.setValue(%s, %s, %s);\n' % (row, column, val)
+
+ series_javascript = ''
+ for series in self.series:
+ series_javascript += "%s: {type: '%s', color: '%s'}, " % series
+
+ chart_add_javascript = """
+var chart_%s = new google.visualization.ComboChart(
+ document.getElementById('%s'));
+chart_%s.draw(data, {width: %s, height: %s, title: '%s', legend: 'none',
+ seriesType: "bars", lineWidth: 0, pointSize: 5, series: {%s},
+ vAxis: {minValue: 0}})
+"""
+
+ res += chart_add_javascript % (self.chart_div, self.chart_div,
+ self.chart_div, self.width, self.height,
+ self.title, series_javascript)
+ return res
+
+ def GetDiv(self):
+ return "<div id='%s' class='chart'></div>" % self.chart_div
diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py
new file mode 100644
index 00000000..0a61eeb9
--- /dev/null
+++ b/crosperf/compare_machines.py
@@ -0,0 +1,64 @@
+# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Module to compare two machines."""
+
+from __future__ import print_function
+
+import os.path
+import sys
+import argparse
+
+from machine_manager import CrosMachine
+
+
+def PrintUsage(msg):
+ print(msg)
+ print('Usage: ')
+ print('\n compare_machines.py --chromeos_root=/path/to/chroot/ '
+ 'machine1 machine2 ...')
+
+
+def Main(argv):
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--chromeos_root',
+ default='/path/to/chromeos',
+ dest='chromeos_root',
+ help='ChromeOS root checkout directory')
+ parser.add_argument('remotes', nargs=argparse.REMAINDER)
+
+ options = parser.parse_args(argv)
+
+ machine_list = options.remotes
+ if len(machine_list) < 2:
+ PrintUsage('ERROR: Must specify at least two machines.')
+ return 1
+ elif not os.path.exists(options.chromeos_root):
+ PrintUsage('Error: chromeos_root does not exist %s' % options.chromeos_root)
+ return 1
+
+ chroot = options.chromeos_root
+ cros_machines = []
+ test_machine_checksum = None
+ for m in machine_list:
+ cm = CrosMachine(m, chroot, 'average')
+ cros_machines = cros_machines + [cm]
+ test_machine_checksum = cm.machine_checksum
+
+ ret = 0
+ for cm in cros_machines:
+ print('checksum for %s : %s' % (cm.name, cm.machine_checksum))
+ if cm.machine_checksum != test_machine_checksum:
+ ret = 1
+ print('Machine checksums do not all match')
+
+ if ret == 0:
+ print('Machines all match.')
+
+ return ret
+
+
+if __name__ == '__main__':
+ retval = Main(sys.argv[1:])
+ sys.exit(retval)
diff --git a/crosperf/config.py b/crosperf/config.py
new file mode 100644
index 00000000..76175660
--- /dev/null
+++ b/crosperf/config.py
@@ -0,0 +1,13 @@
+# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""A configure file."""
+config = {}
+
+
+def GetConfig(key):
+ return config.get(key)
+
+
+def AddConfig(key, value):
+ config[key] = value
diff --git a/crosperf/config_unittest.py b/crosperf/config_unittest.py
new file mode 100755
index 00000000..637dae9e
--- /dev/null
+++ b/crosperf/config_unittest.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python2
+#
+# Copyright 2014 Google Inc. All Rights Reserved.
+"""Unit tests for config.py"""
+
+from __future__ import print_function
+
+import config
+
+import unittest
+
+
+class ConfigTestCase(unittest.TestCase):
+ """Class for the config unit tests."""
+
+ def test_config(self):
+ # Verify that config exists, that it's a dictionary, and that it's
+ # empty.
+ self.assertTrue(type(config.config) is dict)
+ self.assertEqual(len(config.config), 0)
+
+ # Verify that attempting to get a non-existant key out of the
+ # dictionary returns None.
+ self.assertIsNone(config.GetConfig('rabbit'))
+ self.assertIsNone(config.GetConfig('key1'))
+
+ config.AddConfig('key1', 16)
+ config.AddConfig('key2', 32)
+ config.AddConfig('key3', 'third value')
+
+ # Verify that after 3 calls to AddConfig we have 3 values in the
+ # dictionary.
+ self.assertEqual(len(config.config), 3)
+
+ # Verify that GetConfig works and gets the expected values.
+ self.assertIs(config.GetConfig('key2'), 32)
+ self.assertIs(config.GetConfig('key3'), 'third value')
+ self.assertIs(config.GetConfig('key1'), 16)
+
+ # Re-set config.
+ config.config.clear()
+
+ # Verify that config exists, that it's a dictionary, and that it's
+ # empty.
+ self.assertTrue(type(config.config) is dict)
+ self.assertEqual(len(config.config), 0)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/crosperf b/crosperf/crosperf
new file mode 100755
index 00000000..a29dcbfa
--- /dev/null
+++ b/crosperf/crosperf
@@ -0,0 +1,2 @@
+#!/bin/bash
+PYTHONPATH=$(dirname $0)/..:$PYTHONPATH exec python $(dirname $0)/crosperf.py "$@"
diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py
new file mode 100755
index 00000000..b78c8b9e
--- /dev/null
+++ b/crosperf/crosperf.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python2
+
+# Copyright 2011 Google Inc. All Rights Reserved.
+"""The driver script for running performance benchmarks on ChromeOS."""
+
+from __future__ import print_function
+
+import atexit
+import argparse
+import os
+import signal
+import sys
+from experiment_runner import ExperimentRunner
+from experiment_runner import MockExperimentRunner
+from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
+from settings_factory import GlobalSettings
+
+# This import causes pylint to warn about "No name 'logger' in module
+# 'cros_utils'". I do not understand why. The import works fine in python.
+# pylint: disable=no-name-in-module
+from cros_utils import logger
+
+import test_flag
+
+
+def SetupParserOptions(parser):
+ """Add all options to the parser."""
+ parser.add_argument(
+ '--dry_run',
+ dest='dry_run',
+ help=('Parse the experiment file and '
+ 'show what will be done'),
+ action='store_true',
+ default=False)
+ # Allow each of the global fields to be overridden by passing in
+ # options. Add each global field as an option.
+ option_settings = GlobalSettings('')
+ for field_name in option_settings.fields:
+ field = option_settings.fields[field_name]
+ parser.add_argument(
+ '--%s' % field.name,
+ dest=field.name,
+ help=field.description,
+ action='store')
+
+
+def ConvertOptionsToSettings(options):
+ """Convert options passed in into global settings."""
+ option_settings = GlobalSettings('option_settings')
+ for option_name in options.__dict__:
+ if (options.__dict__[option_name] is not None and
+ option_name in option_settings.fields):
+ option_settings.SetField(option_name, options.__dict__[option_name])
+ return option_settings
+
+
+def Cleanup(experiment):
+ """Handler function which is registered to the atexit handler."""
+ experiment.Cleanup()
+
+
+def CallExitHandler(signum, _):
+ """Signal handler that transforms a signal into a call to exit.
+
+ This is useful because functionality registered by "atexit" will
+ be called. It also means you can "catch" the signal by catching
+ the SystemExit exception.
+ """
+ sys.exit(128 + signum)
+
+
+def RunCrosperf(argv):
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ '--noschedv2',
+ dest='noschedv2',
+ default=False,
+ action='store_true',
+ help=('Do not use new scheduler. '
+ 'Use original scheduler instead.'))
+ parser.add_argument(
+ '-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under <crosperf_logs>/logs')
+
+ SetupParserOptions(parser)
+ options, args = parser.parse_known_args(argv)
+
+ # Convert the relevant options that are passed in into a settings
+ # object which will override settings in the experiment file.
+ option_settings = ConvertOptionsToSettings(options)
+ log_dir = os.path.abspath(os.path.expanduser(options.log_dir))
+ logger.GetLogger(log_dir)
+
+ if len(args) == 2:
+ experiment_filename = args[1]
+ else:
+ parser.error('Invalid number arguments.')
+
+ working_directory = os.getcwd()
+ if options.dry_run:
+ test_flag.SetTestMode(True)
+
+ experiment_file = ExperimentFile(
+ open(experiment_filename, 'rb'), option_settings)
+ if not experiment_file.GetGlobalSettings().GetField('name'):
+ experiment_name = os.path.basename(experiment_filename)
+ experiment_file.GetGlobalSettings().SetField('name', experiment_name)
+ experiment = ExperimentFactory().GetExperiment(experiment_file,
+ working_directory, log_dir)
+
+ json_report = experiment_file.GetGlobalSettings().GetField('json_report')
+
+ signal.signal(signal.SIGTERM, CallExitHandler)
+ atexit.register(Cleanup, experiment)
+
+ if options.dry_run:
+ runner = MockExperimentRunner(experiment, json_report)
+ else:
+ runner = ExperimentRunner(
+ experiment, json_report, using_schedv2=(not options.noschedv2))
+
+ runner.Run()
+
+
+def Main(argv):
+ try:
+ RunCrosperf(argv)
+ except Exception as ex:
+ # Flush buffers before exiting to avoid out of order printing
+ sys.stdout.flush()
+ sys.stderr.flush()
+ print('Crosperf error: %s' % repr(ex))
+ sys.stdout.flush()
+ sys.stderr.flush()
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ Main(sys.argv)
diff --git a/crosperf/crosperf_test.py b/crosperf/crosperf_test.py
new file mode 100755
index 00000000..085efafe
--- /dev/null
+++ b/crosperf/crosperf_test.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python2
+
+# Copyright 2011 Google Inc. All Rights Reserved.
+"""Test for crosperf."""
+
+from __future__ import print_function
+
+import os
+import tempfile
+import unittest
+import crosperf
+from cros_utils.file_utils import FileUtils
+
+EXPERIMENT_FILE_1 = """
+ board: x86-alex
+ remote: chromeos-alex3
+
+ benchmark: PageCycler {
+ iterations: 3
+ }
+
+ image1 {
+ chromeos_image: /usr/local/google/cros_image1.bin
+ }
+
+ image2 {
+ chromeos_image: /usr/local/google/cros_image2.bin
+ }
+ """
+
+
+class CrosPerfTest(unittest.TestCase):
+ """Class to test Crosperf."""
+
+ def testDryRun(self):
+ filehandle, filename = tempfile.mkstemp()
+ os.write(filehandle, EXPERIMENT_FILE_1)
+ crosperf.Main(['', filename, '--dry_run'])
+ os.remove(filename)
+
+
+if __name__ == '__main__':
+ FileUtils.Configure(True)
+ unittest.main()
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
new file mode 100755
index 00000000..4a468967
--- /dev/null
+++ b/crosperf/crosperf_unittest.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python2
+#
+# Copyright 2014 Google Inc. All Rights Reserved.
+"""Unittest for crosperf."""
+
+from __future__ import print_function
+
+import argparse
+import StringIO
+
+import unittest
+
+import crosperf
+import settings_factory
+import experiment_file
+
+EXPERIMENT_FILE_1 = """
+ board: x86-alex
+ remote: chromeos-alex3
+ perf_args: record -a -e cycles
+ benchmark: PageCycler {
+ iterations: 3
+ }
+
+ image1 {
+ chromeos_image: /usr/local/google/cros_image1.bin
+ }
+
+ image2 {
+ remote: chromeos-lumpy1
+ chromeos_image: /usr/local/google/cros_image2.bin
+ }
+ """
+
+
+class CrosperfTest(unittest.TestCase):
+ """Crosperf test class."""
+
+ def setUp(self):
+ input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
+ self.exp_file = experiment_file.ExperimentFile(input_file)
+
+ def test_convert_options_to_settings(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-l',
+ '--log_dir',
+ dest='log_dir',
+ default='',
+ help='The log_dir, default is under '
+ '<crosperf_logs>/logs')
+ crosperf.SetupParserOptions(parser)
+ argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
+ options, _ = parser.parse_known_args(argv)
+ settings = crosperf.ConvertOptionsToSettings(options)
+ self.assertIsNotNone(settings)
+ self.assertIsInstance(settings, settings_factory.GlobalSettings)
+ self.assertEqual(len(settings.fields), 25)
+ self.assertTrue(settings.GetField('rerun'))
+ argv = ['crosperf/crosperf.py', 'temp.exp']
+ options, _ = parser.parse_known_args(argv)
+ settings = crosperf.ConvertOptionsToSettings(options)
+ self.assertFalse(settings.GetField('rerun'))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/default-telemetry-results.json b/crosperf/default-telemetry-results.json
new file mode 100644
index 00000000..7099ac7c
--- /dev/null
+++ b/crosperf/default-telemetry-results.json
@@ -0,0 +1,174 @@
+{
+ "peacekeeper.html": [
+ "Total__Score",
+ "workerContrast01__Score",
+ "workerContrast02__Score"
+ ],
+ "page_cycler_v2.intl_hi_ru": [
+ "cold_times__page_load_time",
+ "warm_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "smoothness.tough_webgl_cases": [
+ "percentage_smooth__percentage_smooth",
+ "percentage_smooth__summary"
+ ],
+ "page_cycler_v2.intl_es_fr_pt-BR": [
+ "cold_times__page_load_time",
+ "warm_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "dromaeo.jslibeventjquery": [
+ "jslib_event_jquery__jslib_event_jquery"
+ ],
+ "browsermark": [
+ "Score__Score"
+ ],
+ "smoothness.top_25": [
+ "frame_times__frame_times",
+ "mean_frame_time__mean_frame_time"
+ ],
+ "page_cycler_v2.morejs": [
+ "warm_times__page_load_time",
+ "cold_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "page_cycler_v2.dhtml": [
+ "warm_times__page_load_time",
+ "cold_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "page_cycler_v2.bloat": [
+ "warm_times__page_load_time",
+ "cold_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "dromaeo.jslibstyleprototype": [
+ "jslib_style_prototype__jslib_style_prototype"
+ ],
+ "dromaeo.jslibstylejquery": [
+ "jslib_style_jquery__jslib_style_jquery"
+ ],
+ "dromaeo.jslibeventprototype": [
+ "jslib_event_prototype__jslib_event_prototype"
+ ],
+ "page_cycler_v2.moz": [
+ "warm_times__page_load_time",
+ "cold_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "speedometer": [
+ "Total__Total",
+ "Total__summary"
+ ],
+ "octane": [
+ "Total__Score"
+ ],
+ "jsgamebench": [
+ "Score__Score"
+ ],
+ "page_cycler_v2.indexed_db.basic_insert": [
+ "warm_times__page_load_time",
+ "cold_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "spaceport": [
+ "Score__Score"
+ ],
+ "dromaeo.jslibtraverseprototype": [
+ "jslib_traverse_prototype__jslib_traverse_prototype"
+ ],
+ "page_cycler_v2.netsim.top_10": [
+ "cold_times__page_load_time",
+ "warm_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "robohornet_pro": [
+ "Total__Total",
+ "Total__summary"
+ ],
+ "dromaeo.domcoreattr": [
+ "dom_attr__dom_attr",
+ "dom__summary"
+ ],
+ "dromaeo.jslibattrprototype": [
+ "jslib_attr_prototype__jslib_attr_prototype"
+ ],
+ "sunspider": [
+ "Total__Total",
+ "Total__summary"
+ ],
+ "dromaeo.jslibattrjquery": [
+ "jslib_attr_jquery__jslib_attr_jquery"
+ ],
+ "page_cycler_v2.typical_25": [
+ "warm_times-page_load_time__warm_times-page_load_time",
+ "cold_times-page_load_time__cold_times-page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "dromaeo.domcoretraverse": [
+ "dom_traverse__dom_traverse",
+ "dom__summary"
+ ],
+ "dromaeo.domcoremodify": [
+ "dom_modify__dom_modify",
+ "dom__summary"
+ ],
+ "page_cycler_v2.intl_ar_fa_he": [
+ "warm_times__page_load_time",
+ "cold_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "page_cycler_v2.intl_ja_zh": [
+ "warm_times__page_load_time",
+ "cold_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "graphics_WebGLAquarium": [
+ "avg_fps_1000_fishes",
+ "avg_fps_1000_fishes__summary"
+ ],
+ "page_cycler_v2.intl_ko_th_vi": [
+ "warm_times__page_load_time",
+ "cold_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "canvasmark": [
+ "Score__Score"
+ ],
+ "dromaeo.domcorequery": [
+ "dom_query__dom_query",
+ "dom__summary"
+ ],
+ "dromaeo.jslibtraversejquery": [
+ "jslib_traverse_jquery__jslib_traverse_jquery"
+ ],
+ "dromaeo.jslibmodifyprototype": [
+ "jslib_modify_prototype__jslib_modify_prototype"
+ ],
+ "page_cycler_v2.tough_layout_cases": [
+ "warm_times__page_load_time",
+ "cold_times__page_load_time",
+ "pcv1-warm@@timeToOnload_avg__summary",
+ "pcv1-cold@@timeToOnload_avg__summary"
+ ],
+ "kraken": [
+ "Total__Total",
+ "Total__summary"
+ ],
+ "dromaeo.jslibmodifyjquery": [
+ "jslib_modify_jquery__jslib_modify_jquery"
+ ]
+}
diff --git a/crosperf/default_remotes b/crosperf/default_remotes
new file mode 100644
index 00000000..619068f8
--- /dev/null
+++ b/crosperf/default_remotes
@@ -0,0 +1,8 @@
+x86-alex : chromeos2-row9-rack10-host1.cros chromeos2-row9-rack10-host3.cros chromeos2-row9-rack10-host5.cros
+lumpy : chromeos2-row9-rack9-host9.cros chromeos2-row9-rack9-host11.cros chromeos2-row9-rack9-host13.cros
+parrot : chromeos2-row9-rack9-host15.cros chromeos2-row9-rack9-host17.cros chromeos2-row9-rack9-host19.cros
+daisy : chromeos2-row9-rack9-host3.cros chromeos2-row9-rack9-host5.cros chromeos2-row9-rack9-host7.cros
+peach_pit : chromeos2-row9-rack10-host13.cros chromeos2-row9-rack10-host15.cros chromeos2-row9-rack10-host17.cros
+peppy : chromeos2-row9-rack10-host19.cros chromeos2-row9-rack10-host21.cros chromeos2-row9-rack9-host1.cros
+squawks : chromeos2-row9-rack10-host7.cros chromeos2-row9-rack10-host9.cros chromeos2-row9-rack10-host11.cros
+elm : chromeos2-row9-rack8-host19.cros chromeos2-row9-rack8-host21.cros
diff --git a/crosperf/download_images.py b/crosperf/download_images.py
new file mode 100644
index 00000000..8ceaa874
--- /dev/null
+++ b/crosperf/download_images.py
@@ -0,0 +1,286 @@
+# Copyright (c) 2014-2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Download images from Cloud Storage."""
+
+from __future__ import print_function
+
+import ast
+import os
+
+import test_flag
+
+from cros_utils import command_executer
+
+GS_UTIL = 'chromium/tools/depot_tools/gsutil.py'
+
+
+class MissingImage(Exception):
+ """Raised when the requested image does not exist in gs://"""
+
+
+class MissingFile(Exception):
+ """Raised when the requested file does not exist in gs://"""
+
+
+class RunCommandExceptionHandler(object):
+ """Handle Exceptions from calls to RunCommand"""
+
+ def __init__(self, logger_to_use, log_level, cmd_exec, command):
+ self.logger = logger_to_use
+ self.log_level = log_level
+ self.ce = cmd_exec
+ self.cleanup_command = command
+
+ def HandleException(self, _, e):
+ # Exception handler, Run specified command
+ if self.log_level != 'verbose' and self.cleanup_command is not None:
+ self.logger.LogOutput('CMD: %s' % self.cleanup_command)
+ if self.cleanup_command is not None:
+ _ = self.ce.RunCommand(self.cleanup_command)
+ # Raise exception again
+ raise e
+
+
+class ImageDownloader(object):
+ """Download images from Cloud Storage."""
+
+ def __init__(self, logger_to_use=None, log_level='verbose', cmd_exec=None):
+ self._logger = logger_to_use
+ self.log_level = log_level
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger, log_level=self.log_level)
+
+ def GetBuildID(self, chromeos_root, xbuddy_label):
+ # Get the translation of the xbuddy_label into the real Google Storage
+ # image name.
+ command = ('cd ~/trunk/src/third_party/toolchain-utils/crosperf; '
+ "python translate_xbuddy.py '%s'" % xbuddy_label)
+ _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(chromeos_root,
+ command)
+ if not build_id_tuple_str:
+ raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
+
+ build_id_tuple = ast.literal_eval(build_id_tuple_str)
+ build_id = build_id_tuple[0]
+
+ return build_id
+
+ def DownloadImage(self, chromeos_root, build_id, image_name):
+ if self.log_level == 'average':
+ self._logger.LogOutput('Preparing to download %s image to local '
+ 'directory.' % build_id)
+
+ # Make sure the directory for downloading the image exists.
+ download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
+ image_path = os.path.join(download_path, 'chromiumos_test_image.bin')
+ if not os.path.exists(download_path):
+ os.makedirs(download_path)
+
+ # Check to see if the image has already been downloaded. If not,
+ # download the image.
+ if not os.path.exists(image_path):
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ command = '%s cp %s %s' % (gsutil_cmd, image_name, download_path)
+
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
+ status = self._ce.RunCommand(command)
+ downloaded_image_name = os.path.join(download_path,
+ 'chromiumos_test_image.tar.xz')
+ if status != 0 or not os.path.exists(downloaded_image_name):
+ raise MissingImage('Cannot download image: %s.' % downloaded_image_name)
+
+ return image_path
+
+ def UncompressImage(self, chromeos_root, build_id):
+ # Check to see if the file has already been uncompresssed, etc.
+ if os.path.exists(
+ os.path.join(chromeos_root, 'chroot/tmp', build_id,
+ 'chromiumos_test_image.bin')):
+ return
+
+ # Uncompress and untar the downloaded image.
+ download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
+ command = ('cd %s ; tar -Jxf chromiumos_test_image.tar.xz ' % download_path)
+ # Cleanup command for exception handler
+ clean_cmd = ('cd %s ; rm -f chromiumos_test_image.bin ' % download_path)
+ exception_handler = RunCommandExceptionHandler(self._logger, self.log_level,
+ self._ce, clean_cmd)
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
+ print('(Uncompressing and un-tarring may take a couple of minutes...'
+ 'please be patient.)')
+ retval = self._ce.RunCommand(
+ command, except_handler=exception_handler.HandleException)
+ if retval != 0:
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % clean_cmd)
+ print('(Removing file chromiumos_test_image.bin.)')
+ # Remove partially uncompressed file
+ _ = self._ce.RunCommand(clean_cmd)
+ # Raise exception for failure to uncompress
+ raise MissingImage('Cannot uncompress image: %s.' % build_id)
+
+ # Remove compressed image
+ command = ('cd %s ; rm -f chromiumos_test_image.tar.xz; ' % download_path)
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
+ print('(Removing file chromiumos_test_image.tar.xz.)')
+ # try removing file, its ok to have an error, print if encountered
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ print('(Warning: Could not remove file chromiumos_test_image.tar.xz .)')
+
+ def DownloadSingleAutotestFile(self, chromeos_root, build_id,
+ package_file_name):
+ # Verify if package files exist
+ status = 0
+ gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
+ (build_id, package_file_name))
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ if not test_flag.GetTestMode():
+ cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
+ status = self._ce.RunCommand(cmd)
+ if status != 0:
+ raise MissingFile('Cannot find autotest package file: %s.' %
+ package_file_name)
+
+ if self.log_level == 'average':
+ self._logger.LogOutput('Preparing to download %s package to local '
+ 'directory.' % package_file_name)
+
+ # Make sure the directory for downloading the package exists.
+ download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
+ package_path = os.path.join(download_path, package_file_name)
+ if not os.path.exists(download_path):
+ os.makedirs(download_path)
+
+ # Check to see if the package file has already been downloaded. If not,
+ # download it.
+ if not os.path.exists(package_path):
+ command = '%s cp %s %s' % (gsutil_cmd, gs_package_name, download_path)
+
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
+ status = self._ce.RunCommand(command)
+ if status != 0 or not os.path.exists(package_path):
+ raise MissingFile('Cannot download package: %s .' % package_path)
+
+ def UncompressSingleAutotestFile(self, chromeos_root, build_id,
+ package_file_name, uncompress_cmd):
+ # Uncompress file
+ download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
+ command = ('cd %s ; %s %s' %
+ (download_path, uncompress_cmd, package_file_name))
+
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
+ print('(Uncompressing autotest file %s .)' % package_file_name)
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ raise MissingFile('Cannot uncompress file: %s.' % package_file_name)
+ # Remove uncompressed downloaded file
+ command = ('cd %s ; rm -f %s' % (download_path, package_file_name))
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
+ print('(Removing processed autotest file %s .)' % package_file_name)
+ # try removing file, its ok to have an error, print if encountered
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ print('(Warning: Could not remove file %s .)' % package_file_name)
+
+ def VerifyAutotestFilesExist(self, chromeos_root, build_id, package_file):
+ # Quickly verify if the files are there
+ status = 0
+ gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
+ (build_id, package_file))
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ if not test_flag.GetTestMode():
+ cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % cmd)
+ status = self._ce.RunCommand(cmd)
+ if status != 0:
+ print('(Warning: Could not find file %s )' % gs_package_name)
+ return 1
+ # Package exists on server
+ return 0
+
+ def DownloadAutotestFiles(self, chromeos_root, build_id):
+ # Download autest package files (3 files)
+ autotest_packages_name = ('autotest_packages.tar')
+ autotest_server_package_name = ('autotest_server_package.tar.bz2')
+ autotest_control_files_name = ('control_files.tar')
+
+ download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
+ # Autotest directory relative path wrt chroot
+ autotest_rel_path = os.path.join('/tmp', build_id, 'autotest_files')
+ # Absolute Path to download files
+ autotest_path = os.path.join(chromeos_root, 'chroot/tmp', build_id,
+ 'autotest_files')
+
+ if not os.path.exists(autotest_path):
+ # Quickly verify if the files are present on server
+ # If not, just exit with warning
+ status = self.VerifyAutotestFilesExist(chromeos_root, build_id,
+ autotest_packages_name)
+ if status != 0:
+ default_autotest_dir = '~/trunk/src/third_party/autotest/files'
+ print('(Warning: Could not find autotest packages .)\n'
+ '(Warning: Defaulting autotest path to %s .' %
+ default_autotest_dir)
+ return default_autotest_dir
+
+ # Files exist on server, download and uncompress them
+ self.DownloadSingleAutotestFile(chromeos_root, build_id,
+ autotest_packages_name)
+ self.DownloadSingleAutotestFile(chromeos_root, build_id,
+ autotest_server_package_name)
+ self.DownloadSingleAutotestFile(chromeos_root, build_id,
+ autotest_control_files_name)
+
+ self.UncompressSingleAutotestFile(chromeos_root, build_id,
+ autotest_packages_name, 'tar -xvf ')
+ self.UncompressSingleAutotestFile(chromeos_root, build_id,
+ autotest_server_package_name,
+ 'tar -jxvf ')
+ self.UncompressSingleAutotestFile(chromeos_root, build_id,
+ autotest_control_files_name,
+ 'tar -xvf ')
+ # Rename created autotest directory to autotest_files
+ command = ('cd %s ; mv autotest autotest_files' % download_path)
+ if self.log_level != 'verbose':
+ self._logger.LogOutput('CMD: %s' % command)
+ print('(Moving downloaded autotest files to autotest_files)')
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ raise MissingFile('Could not create directory autotest_files')
+
+ return autotest_rel_path
+
+ def Run(self, chromeos_root, xbuddy_label, autotest_path):
+ build_id = self.GetBuildID(chromeos_root, xbuddy_label)
+ image_name = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz'
+ % build_id)
+
+ # Verify that image exists for build_id, before attempting to
+ # download it.
+ status = 0
+ if not test_flag.GetTestMode():
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ cmd = '%s ls %s' % (gsutil_cmd, image_name)
+ status = self._ce.RunCommand(cmd)
+ if status != 0:
+ raise MissingImage('Cannot find official image: %s.' % image_name)
+
+ image_path = self.DownloadImage(chromeos_root, build_id, image_name)
+ self.UncompressImage(chromeos_root, build_id)
+
+ if self.log_level != 'quiet':
+ self._logger.LogOutput('Using image from %s.' % image_path)
+
+ if autotest_path == '':
+ autotest_path = self.DownloadAutotestFiles(chromeos_root, build_id)
+
+ return image_path, autotest_path
diff --git a/crosperf/download_images_buildid_test.py b/crosperf/download_images_buildid_test.py
new file mode 100755
index 00000000..3e7f00c1
--- /dev/null
+++ b/crosperf/download_images_buildid_test.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python2
+#
+# Copyright 2014 Google Inc. All Rights Reserved
+"""Test translation of xbuddy names."""
+
+from __future__ import print_function
+
+import argparse
+import sys
+
+import download_images
+
+#On May 1, 2014:
+#latest : lumpy-release/R34-5500.132.0
+#latest-beta : lumpy-release/R35-5712.43.0
+#latest-official: lumpy-release/R36-5814.0.0
+#latest-dev : lumpy-release/R36-5814.0.0
+#latest-canary : lumpy-release/R36-5814.0.0
+
+
+class ImageDownloaderBuildIDTest(object):
+ """Test translation of xbuddy names."""
+
+ def __init__(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '-c',
+ '--chromeos_root',
+ dest='chromeos_root',
+ help='Directory containing ChromeOS root.')
+
+ options = parser.parse_known_args(sys.argv[1:])[0]
+ if options.chromeos_root is None:
+ self._usage(parser, '--chromeos_root must be set')
+ self.chromeos_root = options.chromeos_root
+ self.tests_passed = 0
+ self.tests_run = 0
+ self.tests_failed = 0
+
+ def _usage(self, parser, message):
+ print('ERROR: ' + message)
+ parser.print_help()
+ sys.exit(0)
+
+ def print_test_status(self):
+ print('----------------------------------------\n')
+ print('Tests attempted: %d' % self.tests_run)
+ print('Tests passed: %d' % self.tests_passed)
+ print('Tests failed: %d' % self.tests_failed)
+ print('\n----------------------------------------')
+
+ def assert_failure(self, msg):
+ print('Assert failure: %s' % msg)
+ self.print_test_status()
+ sys.exit(1)
+
+ def assertIsNotNone(self, arg, arg_name):
+ if arg == None:
+ self.tests_failed = self.tests_failed + 1
+ self.assert_failure('%s is not None' % arg_name)
+
+ def assertNotEqual(self, arg1, arg2, arg1_name, arg2_name):
+ if arg1 == arg2:
+ self.tests_failed = self.tests_failed + 1
+ self.assert_failure('%s is not NotEqual to %s' % (arg1_name, arg2_name))
+
+ def assertEqual(self, arg1, arg2, arg1_name, arg2_name):
+ if arg1 != arg2:
+ self.tests_failed = self.tests_failed + 1
+ self.assert_failure('%s is not Equal to %s' % (arg1_name, arg2_name))
+
+ def test_one_id(self, downloader, test_id, result_string, exact_match):
+ print("Translating '%s'" % test_id)
+ self.tests_run = self.tests_run + 1
+
+ result = downloader.GetBuildID(self.chromeos_root, test_id)
+ # Verify that we got a build id back.
+ self.assertIsNotNone(result, 'result')
+
+ # Verify that the result either contains or exactly matches the
+ # result_string, depending on the exact_match argument.
+ if exact_match:
+ self.assertEqual(result, result_string, 'result', result_string)
+ else:
+ self.assertNotEqual(result.find(result_string), -1, 'result.find', '-1')
+ self.tests_passed = self.tests_passed + 1
+
+ def test_get_build_id(self):
+ """Test that the actual translating of xbuddy names is working properly."""
+ downloader = download_images.ImageDownloader(log_level='quiet')
+
+ self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R',
+ False)
+ self.test_one_id(downloader,
+ 'remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86',
+ 'trybot-lumpy-release-afdo-use/R35-5672.0.0-b86', True)
+ self.test_one_id(downloader, 'remote/lumpy-release/R35-5672.0.0',
+ 'lumpy-release/R35-5672.0.0', True)
+ self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R',
+ False)
+ self.test_one_id(downloader, 'remote/lumpy/latest-official',
+ 'lumpy-release/R', False)
+ self.test_one_id(downloader, 'remote/lumpy/latest-beta', 'lumpy-release/R',
+ False)
+
+ self.print_test_status()
+
+
+if __name__ == '__main__':
+ tester = ImageDownloaderBuildIDTest()
+ tester.test_get_build_id()
diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py
new file mode 100755
index 00000000..7a4f3850
--- /dev/null
+++ b/crosperf/download_images_unittest.py
@@ -0,0 +1,251 @@
+#!/usr/bin/env python2
+#
+# Copyright 2014 Google Inc. All Rights Reserved
+"""Download image unittest."""
+
+from __future__ import print_function
+
+import os
+import mock
+import unittest
+
+import download_images
+from cros_utils import command_executer
+from cros_utils import logger
+
+import test_flag
+
+MOCK_LOGGER = logger.GetLogger(log_dir='', mock=True)
+
+
+class ImageDownloaderTestcast(unittest.TestCase):
+ """The image downloader test class."""
+
+ def __init__(self, *args, **kwargs):
+ super(ImageDownloaderTestcast, self).__init__(*args, **kwargs)
+ self.called_download_image = False
+ self.called_uncompress_image = False
+ self.called_get_build_id = False
+ self.called_download_autotest_files = False
+
+ @mock.patch.object(os, 'makedirs')
+ @mock.patch.object(os.path, 'exists')
+ def test_download_image(self, mock_path_exists, mock_mkdirs):
+
+ # Set mock and test values.
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ test_chroot = '/usr/local/home/chromeos'
+ test_build_id = 'lumpy-release/R36-5814.0.0'
+ image_path = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz'
+ % test_build_id)
+
+ downloader = download_images.ImageDownloader(
+ logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec)
+
+ # Set os.path.exists to always return False and run downloader
+ mock_path_exists.return_value = False
+ test_flag.SetTestMode(True)
+ self.assertRaises(download_images.MissingImage, downloader.DownloadImage,
+ test_chroot, test_build_id, image_path)
+
+ # Verify os.path.exists was called twice, with proper arguments.
+ self.assertEqual(mock_path_exists.call_count, 2)
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
+ 'R36-5814.0.0/chromiumos_test_image.bin')
+ mock_path_exists.assert_any_call(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+
+ # Verify we called os.mkdirs
+ self.assertEqual(mock_mkdirs.call_count, 1)
+ mock_mkdirs.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+
+ # Verify we called RunCommand once, with proper arguments.
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 1)
+ expected_args = (
+ '/usr/local/home/chromeos/chromium/tools/depot_tools/gsutil.py '
+ 'cp gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/'
+ 'chromiumos_test_image.tar.xz '
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+
+ mock_cmd_exec.RunCommand.assert_called_with(expected_args)
+
+ # Reset the velues in the mocks; set os.path.exists to always return True.
+ mock_path_exists.reset_mock()
+ mock_cmd_exec.reset_mock()
+ mock_path_exists.return_value = True
+
+ # Run downloader
+ downloader.DownloadImage(test_chroot, test_build_id, image_path)
+
+ # Verify os.path.exists was called twice, with proper arguments.
+ self.assertEqual(mock_path_exists.call_count, 2)
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
+ 'R36-5814.0.0/chromiumos_test_image.bin')
+ mock_path_exists.assert_any_call(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
+
+ # Verify we made no RunCommand or ChrootRunCommand calls (since
+ # os.path.exists returned True, there was no work do be done).
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0)
+
+ @mock.patch.object(os.path, 'exists')
+ def test_uncompress_image(self, mock_path_exists):
+
+ # set mock and test values.
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ test_chroot = '/usr/local/home/chromeos'
+ test_build_id = 'lumpy-release/R36-5814.0.0'
+
+ downloader = download_images.ImageDownloader(
+ logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec)
+
+ # Set os.path.exists to always return False and run uncompress.
+ mock_path_exists.return_value = False
+ self.assertRaises(download_images.MissingImage, downloader.UncompressImage,
+ test_chroot, test_build_id)
+
+ # Verify os.path.exists was called once, with correct arguments.
+ self.assertEqual(mock_path_exists.call_count, 1)
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
+ 'R36-5814.0.0/chromiumos_test_image.bin')
+
+ # Verify RunCommand was called twice with correct arguments.
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 2)
+ # Call 1, should have 2 arguments
+ self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[0]), 2)
+ actual_arg = mock_cmd_exec.RunCommand.call_args_list[0][0]
+ expected_arg = (
+ 'cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; '
+ 'tar -Jxf chromiumos_test_image.tar.xz ',)
+ self.assertEqual(expected_arg, actual_arg)
+ # 2nd arg must be exception handler
+ except_handler_string = 'RunCommandExceptionHandler.HandleException'
+ self.assertTrue(
+ except_handler_string in
+ repr(mock_cmd_exec.RunCommand.call_args_list[0][1]))
+
+ # Call 2, should have 2 arguments
+ self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[1]), 2)
+ actual_arg = mock_cmd_exec.RunCommand.call_args_list[1][0]
+ expected_arg = (
+ 'cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; '
+ 'rm -f chromiumos_test_image.bin ',)
+ self.assertEqual(expected_arg, actual_arg)
+ # 2nd arg must be empty
+ self.assertTrue('{}' in repr(mock_cmd_exec.RunCommand.call_args_list[1][1]))
+
+ # Set os.path.exists to always return True and run uncompress.
+ mock_path_exists.reset_mock()
+ mock_cmd_exec.reset_mock()
+ mock_path_exists.return_value = True
+ downloader.UncompressImage(test_chroot, test_build_id)
+
+ # Verify os.path.exists was called once, with correct arguments.
+ self.assertEqual(mock_path_exists.call_count, 1)
+ mock_path_exists.assert_called_with(
+ '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
+ 'R36-5814.0.0/chromiumos_test_image.bin')
+
+ # Verify RunCommand was not called.
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
+
+ def test_run(self):
+
+ # Set test arguments
+ test_chroot = '/usr/local/home/chromeos'
+ test_build_id = 'remote/lumpy/latest-dev'
+ test_empty_autotest_path = ''
+ test_autotest_path = '/tmp/autotest'
+
+ # Set values to test/check.
+ self.called_download_image = False
+ self.called_uncompress_image = False
+ self.called_get_build_id = False
+ self.called_download_autotest_files = False
+
+ # Define fake stub functions for Run to call
+ def FakeGetBuildID(unused_root, unused_xbuddy_label):
+ self.called_get_build_id = True
+ return 'lumpy-release/R36-5814.0.0'
+
+ def GoodDownloadImage(root, build_id, image_path):
+ if root or build_id or image_path:
+ pass
+ self.called_download_image = True
+ return 'chromiumos_test_image.bin'
+
+ def BadDownloadImage(root, build_id, image_path):
+ if root or build_id or image_path:
+ pass
+ self.called_download_image = True
+ raise download_images.MissingImage('Could not download image')
+
+ def FakeUncompressImage(root, build_id):
+ if root or build_id:
+ pass
+ self.called_uncompress_image = True
+ return 0
+
+ def FakeDownloadAutotestFiles(root, build_id):
+ if root or build_id:
+ pass
+ self.called_download_autotest_files = True
+ return 'autotest'
+
+ # Initialize downloader
+ downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER)
+
+ # Set downloader to call fake stubs.
+ downloader.GetBuildID = FakeGetBuildID
+ downloader.UncompressImage = FakeUncompressImage
+ downloader.DownloadImage = GoodDownloadImage
+ downloader.DownloadAutotestFiles = FakeDownloadAutotestFiles
+
+ # Call Run.
+ image_path, autotest_path = downloader.Run(test_chroot, test_build_id,
+ test_empty_autotest_path)
+
+ # Make sure it called both _DownloadImage and _UncompressImage
+ self.assertTrue(self.called_download_image)
+ self.assertTrue(self.called_uncompress_image)
+ # Make sure it called DownloadAutotestFiles
+ self.assertTrue(self.called_download_autotest_files)
+ # Make sure it returned an image and autotest path returned from this call
+ self.assertTrue(image_path == 'chromiumos_test_image.bin')
+ self.assertTrue(autotest_path == 'autotest')
+
+ # Call Run with a non-empty autotest path
+ self.called_download_autotest_files = False
+
+ image_path, autotest_path = downloader.Run(test_chroot, test_build_id,
+ test_autotest_path)
+
+ # Verify that downloadAutotestFiles was not called
+ self.assertFalse(self.called_download_autotest_files)
+ # Make sure it returned the specified autotest path returned from this call
+ self.assertTrue(autotest_path == test_autotest_path)
+
+ # Reset values; Now use fake stub that simulates DownloadImage failing.
+ self.called_download_image = False
+ self.called_uncompress_image = False
+ self.called_download_autotest_files = False
+ downloader.DownloadImage = BadDownloadImage
+
+ # Call Run again.
+ self.assertRaises(download_images.MissingImage, downloader.Run, test_chroot,
+ test_autotest_path, test_build_id)
+
+ # Verify that UncompressImage and downloadAutotestFiles were not called,
+ # since _DownloadImage "failed"
+ self.assertTrue(self.called_download_image)
+ self.assertFalse(self.called_uncompress_image)
+ self.assertFalse(self.called_download_autotest_files)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
new file mode 100644
index 00000000..dbcde213
--- /dev/null
+++ b/crosperf/experiment.py
@@ -0,0 +1,212 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""The experiment setting module."""
+
+from __future__ import print_function
+
+import os
+import time
+
+import afe_lock_machine
+from threading import Lock
+
+from cros_utils import logger
+from cros_utils import misc
+
+import benchmark_run
+from machine_manager import BadChecksum
+from machine_manager import MachineManager
+from machine_manager import MockMachineManager
+import test_flag
+
+
+class Experiment(object):
+ """Class representing an Experiment to be run."""
+
+ def __init__(self, name, remote, working_directory, chromeos_root,
+ cache_conditions, labels, benchmarks, experiment_file, email_to,
+ acquire_timeout, log_dir, log_level, share_cache,
+ results_directory, locks_directory):
+ self.name = name
+ self.working_directory = working_directory
+ self.remote = remote
+ self.chromeos_root = chromeos_root
+ self.cache_conditions = cache_conditions
+ self.experiment_file = experiment_file
+ self.email_to = email_to
+ if not results_directory:
+ self.results_directory = os.path.join(self.working_directory,
+ self.name + '_results')
+ else:
+ self.results_directory = misc.CanonicalizePath(results_directory)
+ self.log_dir = log_dir
+ self.log_level = log_level
+ self.labels = labels
+ self.benchmarks = benchmarks
+ self.num_complete = 0
+ self.num_run_complete = 0
+ self.share_cache = share_cache
+ self.active_threads = []
+ # If locks_directory (self.lock_dir) not blank, we will use the file
+ # locking mechanism; if it is blank then we will use the AFE server
+ # locking mechanism.
+ self.locks_dir = locks_directory
+ self.locked_machines = []
+
+ if not remote:
+ raise RuntimeError('No remote hosts specified')
+ if not self.benchmarks:
+ raise RuntimeError('No benchmarks specified')
+ if not self.labels:
+ raise RuntimeError('No labels specified')
+
+ # We need one chromeos_root to run the benchmarks in, but it doesn't
+ # matter where it is, unless the ABIs are different.
+ if not chromeos_root:
+ for label in self.labels:
+ if label.chromeos_root:
+ chromeos_root = label.chromeos_root
+ break
+ if not chromeos_root:
+ raise RuntimeError('No chromeos_root given and could not determine '
+ 'one from the image path.')
+
+ machine_manager_fn = MachineManager
+ if test_flag.GetTestMode():
+ machine_manager_fn = MockMachineManager
+ self.machine_manager = machine_manager_fn(chromeos_root, acquire_timeout,
+ log_level, locks_directory)
+ self.l = logger.GetLogger(log_dir)
+
+ for machine in self.remote:
+ # machine_manager.AddMachine only adds reachable machines.
+ self.machine_manager.AddMachine(machine)
+ # Now machine_manager._all_machines contains a list of reachable
+ # machines. This is a subset of self.remote. We make both lists the same.
+ self.remote = [m.name for m in self.machine_manager.GetAllMachines()]
+ if not self.remote:
+ raise RuntimeError('No machine available for running experiment.')
+
+ for label in labels:
+ # We filter out label remotes that are not reachable (not in
+ # self.remote). So each label.remote is a sublist of experiment.remote.
+ label.remote = [r for r in label.remote if r in self.remote]
+ try:
+ self.machine_manager.ComputeCommonCheckSum(label)
+ except BadChecksum:
+ # Force same image on all machines, then we do checksum again. No
+ # bailout if checksums still do not match.
+ self.machine_manager.ForceSameImageToAllMachines(label)
+ self.machine_manager.ComputeCommonCheckSum(label)
+
+ self.machine_manager.ComputeCommonCheckSumString(label)
+
+ self.start_time = None
+ self.benchmark_runs = self._GenerateBenchmarkRuns()
+
+ self._schedv2 = None
+ self._internal_counter_lock = Lock()
+
+ def set_schedv2(self, schedv2):
+ self._schedv2 = schedv2
+
+ def schedv2(self):
+ return self._schedv2
+
+ def _GenerateBenchmarkRuns(self):
+ """Generate benchmark runs from labels and benchmark defintions."""
+ benchmark_runs = []
+ for label in self.labels:
+ for benchmark in self.benchmarks:
+ for iteration in xrange(1, benchmark.iterations + 1):
+
+ benchmark_run_name = '%s: %s (%s)' % (label.name, benchmark.name,
+ iteration)
+ full_name = '%s_%s_%s' % (label.name, benchmark.name, iteration)
+ logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
+ True)
+ benchmark_runs.append(benchmark_run.BenchmarkRun(
+ benchmark_run_name, benchmark, label, iteration,
+ self.cache_conditions, self.machine_manager, logger_to_use,
+ self.log_level, self.share_cache))
+
+ return benchmark_runs
+
+ def Build(self):
+ pass
+
+ def Terminate(self):
+ if self._schedv2 is not None:
+ self._schedv2.terminate()
+ else:
+ for t in self.benchmark_runs:
+ if t.isAlive():
+ self.l.LogError("Terminating run: '%s'." % t.name)
+ t.Terminate()
+
+ def IsComplete(self):
+ if self._schedv2:
+ return self._schedv2.is_complete()
+ if self.active_threads:
+ for t in self.active_threads:
+ if t.isAlive():
+ t.join(0)
+ if not t.isAlive():
+ self.num_complete += 1
+ if not t.cache_hit:
+ self.num_run_complete += 1
+ self.active_threads.remove(t)
+ return False
+ return True
+
+ def BenchmarkRunFinished(self, br):
+ """Update internal counters after br finishes.
+
+ Note this is only used by schedv2 and is called by multiple threads.
+ Never throw any exception here.
+ """
+
+ assert self._schedv2 is not None
+ with self._internal_counter_lock:
+ self.num_complete += 1
+ if not br.cache_hit:
+ self.num_run_complete += 1
+
+ def Run(self):
+ self.start_time = time.time()
+ if self._schedv2 is not None:
+ self._schedv2.run_sched()
+ else:
+ self.active_threads = []
+ for run in self.benchmark_runs:
+ # Set threads to daemon so program exits when ctrl-c is pressed.
+ run.daemon = True
+ run.start()
+ self.active_threads.append(run)
+
+ def SetCacheConditions(self, cache_conditions):
+ for run in self.benchmark_runs:
+ run.SetCacheConditions(cache_conditions)
+
+ def Cleanup(self):
+ """Make sure all machines are unlocked."""
+ if self.locks_dir:
+ # We are using the file locks mechanism, so call machine_manager.Cleanup
+ # to unlock everything.
+ self.machine_manager.Cleanup()
+ else:
+ if test_flag.GetTestMode():
+ return
+
+ all_machines = self.locked_machines
+ if not all_machines:
+ return
+
+ # If we locked any machines earlier, make sure we unlock them now.
+ lock_mgr = afe_lock_machine.AFELockManager(
+ all_machines, '', self.labels[0].chromeos_root, None)
+ machine_states = lock_mgr.GetMachineStates('unlock')
+ for k, state in machine_states.iteritems():
+ if state['locked']:
+ lock_mgr.UpdateLockInAFE(False, k)
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
new file mode 100644
index 00000000..2278015b
--- /dev/null
+++ b/crosperf/experiment_factory.py
@@ -0,0 +1,331 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""A module to generate experiments."""
+
+from __future__ import print_function
+import os
+import re
+import socket
+
+from benchmark import Benchmark
+import config
+from experiment import Experiment
+from label import Label
+from label import MockLabel
+from results_cache import CacheConditions
+import test_flag
+import file_lock_machine
+
+# Users may want to run Telemetry tests either individually, or in
+# specified sets. Here we define sets of tests that users may want
+# to run together.
+
+telemetry_perfv2_tests = [
+ 'dromaeo.domcoreattr', 'dromaeo.domcoremodify', 'dromaeo.domcorequery',
+ 'dromaeo.domcoretraverse', 'kraken', 'octane', 'robohornet_pro', 'sunspider'
+]
+
+telemetry_pagecycler_tests = [
+ 'page_cycler_v2.intl_ar_fa_he',
+ 'page_cycler_v2.intl_es_fr_pt-BR',
+ 'page_cycler_v2.intl_hi_ru',
+ 'page_cycler_v2.intl_ja_zh',
+ 'page_cycler_v2.intl_ko_th_vi',
+ # 'page_cycler_v2.morejs',
+ # 'page_cycler_v2.moz',
+ # 'page_cycler_v2.netsim.top_10',
+ 'page_cycler_v2.tough_layout_cases',
+ 'page_cycler_v2.typical_25'
+]
+
+telemetry_toolchain_old_perf_tests = [
+ 'dromaeo.domcoremodify', 'page_cycler_v2.intl_es_fr_pt-BR',
+ 'page_cycler_v2.intl_hi_ru', 'page_cycler_v2.intl_ja_zh',
+ 'page_cycler_v2.intl_ko_th_vi', 'page_cycler_v2.netsim.top_10',
+ 'page_cycler_v2.typical_25', 'robohornet_pro', 'spaceport',
+ 'tab_switching.top_10'
+]
+telemetry_toolchain_perf_tests = [
+ 'octane',
+ 'kraken',
+ 'speedometer',
+ 'dromaeo.domcoreattr',
+ 'dromaeo.domcoremodify',
+ 'smoothness.tough_webgl_cases',
+]
+graphics_perf_tests = [
+ 'graphics_GLBench',
+ 'graphics_GLMark2',
+ 'graphics_SanAngeles',
+ 'graphics_WebGLAquarium',
+ 'graphics_WebGLPerformance',
+]
+telemetry_crosbolt_perf_tests = [
+ 'octane',
+ 'kraken',
+ 'speedometer',
+ 'jetstream',
+ 'startup.cold.blank_page',
+ 'smoothness.top_25_smooth',
+]
+crosbolt_perf_tests = [
+ 'graphics_WebGLAquarium',
+ 'video_PlaybackPerf.h264',
+ 'video_PlaybackPerf.vp9',
+ 'video_WebRtcPerf',
+ 'BootPerfServerCrosPerf',
+ 'power_Resume',
+ 'video_PlaybackPerf.h264',
+ 'build_RootFilesystemSize',
+# 'cheets_AntutuTest',
+# 'cheets_PerfBootServer',
+# 'cheets_CandyCrushTest',
+# 'cheets_LinpackTest',
+]
+
+
+class ExperimentFactory(object):
+ """Factory class for building an Experiment, given an ExperimentFile as input.
+
+ This factory is currently hardcoded to produce an experiment for running
+ ChromeOS benchmarks, but the idea is that in the future, other types
+ of experiments could be produced.
+ """
+
+ def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
+ iterations, rm_chroot_tmp, perf_args, suite,
+ show_all_results, retries, run_local):
+ """Add all the tests in a set to the benchmarks list."""
+ for test_name in benchmark_list:
+ telemetry_benchmark = Benchmark(test_name, test_name, test_args,
+ iterations, rm_chroot_tmp, perf_args,
+ suite, show_all_results, retries,
+ run_local)
+ benchmarks.append(telemetry_benchmark)
+
+ def GetExperiment(self, experiment_file, working_directory, log_dir):
+ """Construct an experiment from an experiment file."""
+ global_settings = experiment_file.GetGlobalSettings()
+ experiment_name = global_settings.GetField('name')
+ board = global_settings.GetField('board')
+ remote = global_settings.GetField('remote')
+ # This is used to remove the ",' from the remote if user
+ # add them to the remote string.
+ new_remote = []
+ if remote:
+ for i in remote:
+ c = re.sub('["\']', '', i)
+ new_remote.append(c)
+ remote = new_remote
+ chromeos_root = global_settings.GetField('chromeos_root')
+ rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
+ perf_args = global_settings.GetField('perf_args')
+ acquire_timeout = global_settings.GetField('acquire_timeout')
+ cache_dir = global_settings.GetField('cache_dir')
+ cache_only = global_settings.GetField('cache_only')
+ config.AddConfig('no_email', global_settings.GetField('no_email'))
+ share_cache = global_settings.GetField('share_cache')
+ results_dir = global_settings.GetField('results_dir')
+ use_file_locks = global_settings.GetField('use_file_locks')
+ locks_dir = global_settings.GetField('locks_dir')
+ # If we pass a blank locks_dir to the Experiment, it will use the AFE server
+ # lock mechanism. So if the user specified use_file_locks, but did not
+ # specify a locks dir, set the locks dir to the default locks dir in
+ # file_lock_machine.
+ if use_file_locks and not locks_dir:
+ locks_dir = file_lock_machine.Machine.LOCKS_DIR
+ chrome_src = global_settings.GetField('chrome_src')
+ show_all_results = global_settings.GetField('show_all_results')
+ log_level = global_settings.GetField('logging_level')
+ if log_level not in ('quiet', 'average', 'verbose'):
+ log_level = 'verbose'
+ # Default cache hit conditions. The image checksum in the cache and the
+ # computed checksum of the image must match. Also a cache file must exist.
+ cache_conditions = [
+ CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
+ ]
+ if global_settings.GetField('rerun_if_failed'):
+ cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
+ if global_settings.GetField('rerun'):
+ cache_conditions.append(CacheConditions.FALSE)
+ if global_settings.GetField('same_machine'):
+ cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
+ if global_settings.GetField('same_specs'):
+ cache_conditions.append(CacheConditions.MACHINES_MATCH)
+
+ # Construct benchmarks.
+ # Some fields are common with global settings. The values are
+ # inherited and/or merged with the global settings values.
+ benchmarks = []
+ all_benchmark_settings = experiment_file.GetSettings('benchmark')
+ for benchmark_settings in all_benchmark_settings:
+ benchmark_name = benchmark_settings.name
+ test_name = benchmark_settings.GetField('test_name')
+ if not test_name:
+ test_name = benchmark_name
+ test_args = benchmark_settings.GetField('test_args')
+ iterations = benchmark_settings.GetField('iterations')
+ suite = benchmark_settings.GetField('suite')
+ retries = benchmark_settings.GetField('retries')
+ run_local = benchmark_settings.GetField('run_local')
+
+ if suite == 'telemetry_Crosperf':
+ if test_name == 'all_perfv2':
+ self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
+ iterations, rm_chroot_tmp, perf_args, suite,
+ show_all_results, retries, run_local)
+ elif test_name == 'all_pagecyclers':
+ self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, suite, show_all_results, retries,
+ run_local)
+ elif test_name == 'all_toolchain_perf':
+ self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, suite, show_all_results, retries,
+ run_local)
+ # Add non-telemetry toolchain-perf benchmarks:
+ benchmarks.append(
+ Benchmark(
+ 'graphics_WebGLAquarium',
+ 'graphics_WebGLAquarium',
+ '',
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ '',
+ show_all_results,
+ retries,
+ run_local=False))
+ elif test_name == 'all_toolchain_perf_old':
+ self.AppendBenchmarkSet(benchmarks,
+ telemetry_toolchain_old_perf_tests, test_args,
+ iterations, rm_chroot_tmp, perf_args, suite,
+ show_all_results, retries, run_local)
+ else:
+ benchmark = Benchmark(test_name, test_name, test_args, iterations,
+ rm_chroot_tmp, perf_args, suite,
+ show_all_results, retries, run_local)
+ benchmarks.append(benchmark)
+ else:
+ if test_name == 'all_graphics_perf':
+ self.AppendBenchmarkSet(benchmarks,
+ graphics_perf_tests, '',
+ iterations, rm_chroot_tmp, perf_args, '',
+ show_all_results, retries, run_local=False)
+ elif test_name == 'all_crosbolt_perf':
+ self.AppendBenchmarkSet(benchmarks,
+ telemetry_crosbolt_perf_tests, test_args,
+ iterations, rm_chroot_tmp, perf_args,
+ 'telemetry_Crosperf', show_all_results,
+ retries, run_local)
+ self.AppendBenchmarkSet(benchmarks,
+ crosbolt_perf_tests, '',
+ iterations, rm_chroot_tmp, perf_args, '',
+ show_all_results, retries, run_local=False)
+ else:
+ # Add the single benchmark.
+ benchmark = Benchmark(
+ benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local=False)
+ benchmarks.append(benchmark)
+
+ if not benchmarks:
+ raise RuntimeError('No benchmarks specified')
+
+ # Construct labels.
+ # Some fields are common with global settings. The values are
+ # inherited and/or merged with the global settings values.
+ labels = []
+ all_label_settings = experiment_file.GetSettings('label')
+ all_remote = list(remote)
+ for label_settings in all_label_settings:
+ label_name = label_settings.name
+ image = label_settings.GetField('chromeos_image')
+ autotest_path = label_settings.GetField('autotest_path')
+ chromeos_root = label_settings.GetField('chromeos_root')
+ my_remote = label_settings.GetField('remote')
+ compiler = label_settings.GetField('compiler')
+ new_remote = []
+ if my_remote:
+ for i in my_remote:
+ c = re.sub('["\']', '', i)
+ new_remote.append(c)
+ my_remote = new_remote
+ if image == '':
+ build = label_settings.GetField('build')
+ if len(build) == 0:
+ raise RuntimeError("Can not have empty 'build' field!")
+ image, autotest_path = label_settings.GetXbuddyPath(build,
+ autotest_path,
+ board,
+ chromeos_root,
+ log_level)
+
+ cache_dir = label_settings.GetField('cache_dir')
+ chrome_src = label_settings.GetField('chrome_src')
+
+ # TODO(yunlian): We should consolidate code in machine_manager.py
+ # to derermine whether we are running from within google or not
+ if ('corp.google.com' in socket.gethostname() and
+ (not my_remote or my_remote == remote and
+ global_settings.GetField('board') != board)):
+ my_remote = self.GetDefaultRemotes(board)
+ if global_settings.GetField('same_machine') and len(my_remote) > 1:
+ raise RuntimeError('Only one remote is allowed when same_machine '
+ 'is turned on')
+ all_remote += my_remote
+ image_args = label_settings.GetField('image_args')
+ if test_flag.GetTestMode():
+ # pylint: disable=too-many-function-args
+ label = MockLabel(label_name, image, autotest_path, chromeos_root,
+ board, my_remote, image_args, cache_dir, cache_only,
+ log_level, compiler, chrome_src)
+ else:
+ label = Label(label_name, image, autotest_path, chromeos_root, board,
+ my_remote, image_args, cache_dir, cache_only, log_level,
+ compiler, chrome_src)
+ labels.append(label)
+
+ if not labels:
+ raise RuntimeError('No labels specified')
+
+ email = global_settings.GetField('email')
+ all_remote += list(set(my_remote))
+ all_remote = list(set(all_remote))
+ experiment = Experiment(experiment_name, all_remote, working_directory,
+ chromeos_root, cache_conditions, labels, benchmarks,
+ experiment_file.Canonicalize(), email,
+ acquire_timeout, log_dir, log_level, share_cache,
+ results_dir, locks_dir)
+
+ return experiment
+
+ def GetDefaultRemotes(self, board):
+ default_remotes_file = os.path.join(
+ os.path.dirname(__file__), 'default_remotes')
+ try:
+ with open(default_remotes_file) as f:
+ for line in f:
+ key, v = line.split(':')
+ if key.strip() == board:
+ remotes = v.strip().split()
+ if remotes:
+ return remotes
+ else:
+ raise RuntimeError('There is no remote for {0}'.format(board))
+ except IOError:
+ # TODO: rethrow instead of throwing different exception.
+ raise RuntimeError('IOError while reading file {0}'
+ .format(default_remotes_file))
+ else:
+ raise RuntimeError('There is not remote for {0}'.format(board))
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
new file mode 100755
index 00000000..02bfd0a1
--- /dev/null
+++ b/crosperf/experiment_factory_unittest.py
@@ -0,0 +1,242 @@
+#!/usr/bin/env python2
+
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Unit test for experiment_factory.py"""
+
+from __future__ import print_function
+
+import StringIO
+import socket
+import mock
+import unittest
+
+from cros_utils.file_utils import FileUtils
+
+from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
+import test_flag
+import benchmark
+import experiment_factory
+import settings_factory
+
+EXPERIMENT_FILE_1 = """
+ board: x86-alex
+ remote: chromeos-alex3
+
+ benchmark: PageCycler {
+ iterations: 3
+ }
+
+ image1 {
+ chromeos_image: /usr/local/google/cros_image1.bin
+ }
+
+ image2 {
+ chromeos_image: /usr/local/google/cros_image2.bin
+ }
+ """
+
+# pylint: disable=too-many-function-args
+
+
+class ExperimentFactoryTest(unittest.TestCase):
+ """Class for running experiment factory unittests."""
+
+ def setUp(self):
+ self.append_benchmark_call_args = []
+
+ def testLoadExperimentFile1(self):
+ experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
+ exp = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory='', log_dir='')
+ self.assertEqual(exp.remote, ['chromeos-alex3'])
+
+ self.assertEqual(len(exp.benchmarks), 1)
+ self.assertEqual(exp.benchmarks[0].name, 'PageCycler')
+ self.assertEqual(exp.benchmarks[0].test_name, 'PageCycler')
+ self.assertEqual(exp.benchmarks[0].iterations, 3)
+
+ self.assertEqual(len(exp.labels), 2)
+ self.assertEqual(exp.labels[0].chromeos_image,
+ '/usr/local/google/cros_image1.bin')
+ self.assertEqual(exp.labels[0].board, 'x86-alex')
+
+ def test_append_benchmark_set(self):
+ ef = ExperimentFactory()
+
+ bench_list = []
+ ef.AppendBenchmarkSet(bench_list, experiment_factory.telemetry_perfv2_tests,
+ '', 1, False, '', 'telemetry_Crosperf', False, 0,
+ False)
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_perfv2_tests))
+ self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
+
+ bench_list = []
+ ef.AppendBenchmarkSet(bench_list,
+ experiment_factory.telemetry_pagecycler_tests, '', 1,
+ False, '', 'telemetry_Crosperf', False, 0, False)
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_pagecycler_tests))
+ self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
+
+ bench_list = []
+ ef.AppendBenchmarkSet(bench_list,
+ experiment_factory.telemetry_toolchain_perf_tests, '',
+ 1, False, '', 'telemetry_Crosperf', False, 0, False)
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_toolchain_perf_tests))
+ self.assertTrue(type(bench_list[0]) is benchmark.Benchmark)
+
+ @mock.patch.object(socket, 'gethostname')
+ def test_get_experiment(self, mock_socket):
+
+ test_flag.SetTestMode(False)
+ self.append_benchmark_call_args = []
+
+ def FakeAppendBenchmarkSet(bench_list, set_list, args, iters, rm_ch,
+ perf_args, suite, show_all):
+ 'Helper function for test_get_experiment'
+ arg_list = [
+ bench_list, set_list, args, iters, rm_ch, perf_args, suite, show_all
+ ]
+ self.append_benchmark_call_args.append(arg_list)
+
+ def FakeGetDefaultRemotes(board):
+ if not board:
+ return []
+ return ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros']
+
+ def FakeGetXbuddyPath(build, autotest_dir, board, chroot, log_level):
+ autotest_path = autotest_dir
+ if not autotest_path:
+ autotest_path = 'fake_autotest_path'
+ if not build or not board or not chroot or not log_level:
+ return '', autotest_path
+ return 'fake_image_path', autotest_path
+
+ ef = ExperimentFactory()
+ ef.AppendBenchmarkSet = FakeAppendBenchmarkSet
+ ef.GetDefaultRemotes = FakeGetDefaultRemotes
+
+ label_settings = settings_factory.LabelSettings('image_label')
+ benchmark_settings = settings_factory.BenchmarkSettings('bench_test')
+ global_settings = settings_factory.GlobalSettings('test_name')
+
+ label_settings.GetXbuddyPath = FakeGetXbuddyPath
+
+ mock_experiment_file = ExperimentFile(StringIO.StringIO(''))
+ mock_experiment_file.all_settings = []
+
+ test_flag.SetTestMode(True)
+ # Basic test.
+ global_settings.SetField('name', 'unittest_test')
+ global_settings.SetField('board', 'lumpy')
+ global_settings.SetField('remote', '123.45.67.89 123.45.76.80')
+ benchmark_settings.SetField('test_name', 'kraken')
+ benchmark_settings.SetField('suite', 'telemetry_Crosperf')
+ benchmark_settings.SetField('iterations', 1)
+ label_settings.SetField(
+ 'chromeos_image',
+ 'chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin')
+ label_settings.SetField('chrome_src', '/usr/local/google/home/chrome-top')
+ label_settings.SetField('autotest_path', '/tmp/autotest')
+
+ mock_experiment_file.global_settings = global_settings
+ mock_experiment_file.all_settings.append(label_settings)
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ mock_experiment_file.all_settings.append(global_settings)
+
+ mock_socket.return_value = ''
+
+ # First test. General test.
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(exp.remote, ['123.45.67.89', '123.45.76.80'])
+ self.assertEqual(exp.cache_conditions, [0, 2, 1])
+ self.assertEqual(exp.log_level, 'average')
+
+ self.assertEqual(len(exp.benchmarks), 1)
+ self.assertEqual(exp.benchmarks[0].name, 'kraken')
+ self.assertEqual(exp.benchmarks[0].test_name, 'kraken')
+ self.assertEqual(exp.benchmarks[0].iterations, 1)
+ self.assertEqual(exp.benchmarks[0].suite, 'telemetry_Crosperf')
+ self.assertFalse(exp.benchmarks[0].show_all_results)
+
+ self.assertEqual(len(exp.labels), 1)
+ self.assertEqual(exp.labels[0].chromeos_image,
+ 'chromeos/src/build/images/lumpy/latest/'
+ 'chromiumos_test_image.bin')
+ self.assertEqual(exp.labels[0].autotest_path, '/tmp/autotest')
+ self.assertEqual(exp.labels[0].board, 'lumpy')
+
+ # Second test: Remotes listed in labels.
+ test_flag.SetTestMode(True)
+ label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros')
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(
+ exp.remote,
+ ['chromeos1.cros', 'chromeos2.cros', '123.45.67.89', '123.45.76.80'])
+
+ # Third test: Automatic fixing of bad logging_level param:
+ global_settings.SetField('logging_level', 'really loud!')
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(exp.log_level, 'verbose')
+
+ # Fourth test: Setting cache conditions; only 1 remote with "same_machine"
+ global_settings.SetField('rerun_if_failed', 'true')
+ global_settings.SetField('rerun', 'true')
+ global_settings.SetField('same_machine', 'true')
+ global_settings.SetField('same_specs', 'true')
+
+ self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '', '')
+ label_settings.SetField('remote', '')
+ global_settings.SetField('remote', '123.45.67.89')
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1])
+
+ # Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all
+ # remotes (Call GetDefaultRemotes).
+ mock_socket.return_value = 'test.corp.google.com'
+ global_settings.SetField('remote', '')
+ global_settings.SetField('same_machine', 'false')
+
+ label_settings_2 = settings_factory.LabelSettings('official_image_label')
+ label_settings_2.SetField('chromeos_root', 'chromeos')
+ label_settings_2.SetField('build', 'official-dev')
+ label_settings_2.SetField('autotest_path', '')
+ label_settings_2.GetXbuddyPath = FakeGetXbuddyPath
+
+ mock_experiment_file.all_settings.append(label_settings_2)
+ exp = ef.GetExperiment(mock_experiment_file, '', '')
+ self.assertEqual(len(exp.labels), 2)
+ self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path')
+ self.assertEqual(exp.labels[1].autotest_path, 'fake_autotest_path')
+ self.assertEqual(
+ exp.remote,
+ ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'])
+
+ def test_get_default_remotes(self):
+ board_list = [
+ 'x86-alex', 'lumpy', 'elm', 'parrot', 'daisy', 'peach_pit', 'peppy',
+ 'squawks'
+ ]
+
+ ef = ExperimentFactory()
+ self.assertRaises(Exception, ef.GetDefaultRemotes, 'bad-board')
+
+ # Verify that we have entries for every board, and that we get three
+ # machines back for each board.
+ for b in board_list:
+ remotes = ef.GetDefaultRemotes(b)
+ if b == 'elm':
+ self.assertEqual(len(remotes), 2)
+ else:
+ self.assertEqual(len(remotes), 3)
+
+
+if __name__ == '__main__':
+ FileUtils.Configure(True)
+ test_flag.SetTestMode(True)
+ unittest.main()
diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py
new file mode 100644
index 00000000..016e9d86
--- /dev/null
+++ b/crosperf/experiment_file.py
@@ -0,0 +1,205 @@
+# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""The experiment file module. It manages the input file of crosperf."""
+
+from __future__ import print_function
+import os.path
+import re
+from settings_factory import SettingsFactory
+
+
+class ExperimentFile(object):
+ """Class for parsing the experiment file format.
+
+ The grammar for this format is:
+
+ experiment = { _FIELD_VALUE_RE | settings }
+ settings = _OPEN_SETTINGS_RE
+ { _FIELD_VALUE_RE }
+ _CLOSE_SETTINGS_RE
+
+ Where the regexes are terminals defined below. This results in an format
+ which looks something like:
+
+ field_name: value
+ settings_type: settings_name {
+ field_name: value
+ field_name: value
+ }
+ """
+
+ # Field regex, e.g. "iterations: 3"
+ _FIELD_VALUE_RE = re.compile(r'(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)')
+ # Open settings regex, e.g. "label {"
+ _OPEN_SETTINGS_RE = re.compile(r'(?:([\w.-]+):)?\s*([\w.-]+)\s*{')
+ # Close settings regex.
+ _CLOSE_SETTINGS_RE = re.compile(r'}')
+
+ def __init__(self, experiment_file, overrides=None):
+ """Construct object from file-like experiment_file.
+
+ Args:
+ experiment_file: file-like object with text description of experiment.
+ overrides: A settings object that will override fields in other settings.
+
+ Raises:
+ Exception: if invalid build type or description is invalid.
+ """
+ self.all_settings = []
+ self.global_settings = SettingsFactory().GetSettings('global', 'global')
+ self.all_settings.append(self.global_settings)
+
+ self._Parse(experiment_file)
+
+ for settings in self.all_settings:
+ settings.Inherit()
+ settings.Validate()
+ if overrides:
+ settings.Override(overrides)
+
+ def GetSettings(self, settings_type):
+ """Return nested fields from the experiment file."""
+ res = []
+ for settings in self.all_settings:
+ if settings.settings_type == settings_type:
+ res.append(settings)
+ return res
+
+ def GetGlobalSettings(self):
+ """Return the global fields from the experiment file."""
+ return self.global_settings
+
+ def _ParseField(self, reader):
+ """Parse a key/value field."""
+ line = reader.CurrentLine().strip()
+ match = ExperimentFile._FIELD_VALUE_RE.match(line)
+ append, name, _, text_value = match.groups()
+ return (name, text_value, append)
+
+ def _ParseSettings(self, reader):
+ """Parse a settings block."""
+ line = reader.CurrentLine().strip()
+ match = ExperimentFile._OPEN_SETTINGS_RE.match(line)
+ settings_type = match.group(1)
+ if settings_type is None:
+ settings_type = ''
+ settings_name = match.group(2)
+ settings = SettingsFactory().GetSettings(settings_name, settings_type)
+ settings.SetParentSettings(self.global_settings)
+
+ while reader.NextLine():
+ line = reader.CurrentLine().strip()
+
+ if not line:
+ continue
+ elif ExperimentFile._FIELD_VALUE_RE.match(line):
+ field = self._ParseField(reader)
+ settings.SetField(field[0], field[1], field[2])
+ elif ExperimentFile._CLOSE_SETTINGS_RE.match(line):
+ return settings
+
+ raise EOFError('Unexpected EOF while parsing settings block.')
+
+ def _Parse(self, experiment_file):
+ """Parse experiment file and create settings."""
+ reader = ExperimentFileReader(experiment_file)
+ settings_names = {}
+ try:
+ while reader.NextLine():
+ line = reader.CurrentLine().strip()
+
+ if not line:
+ continue
+ elif ExperimentFile._OPEN_SETTINGS_RE.match(line):
+ new_settings = self._ParseSettings(reader)
+ if new_settings.name in settings_names:
+ raise SyntaxError("Duplicate settings name: '%s'." %
+ new_settings.name)
+ settings_names[new_settings.name] = True
+ self.all_settings.append(new_settings)
+ elif ExperimentFile._FIELD_VALUE_RE.match(line):
+ field = self._ParseField(reader)
+ self.global_settings.SetField(field[0], field[1], field[2])
+ else:
+ raise IOError('Unexpected line.')
+ except Exception, err:
+ raise RuntimeError('Line %d: %s\n==> %s' % (reader.LineNo(), str(err),
+ reader.CurrentLine(False)))
+
+ def Canonicalize(self):
+ """Convert parsed experiment file back into an experiment file."""
+ res = ''
+ board = ''
+ for field_name in self.global_settings.fields:
+ field = self.global_settings.fields[field_name]
+ if field.assigned:
+ res += '%s: %s\n' % (field.name, field.GetString())
+ if field.name == 'board':
+ board = field.GetString()
+ res += '\n'
+
+ for settings in self.all_settings:
+ if settings.settings_type != 'global':
+ res += '%s: %s {\n' % (settings.settings_type, settings.name)
+ for field_name in settings.fields:
+ field = settings.fields[field_name]
+ if field.assigned:
+ res += '\t%s: %s\n' % (field.name, field.GetString())
+ if field.name == 'chromeos_image':
+ real_file = (
+ os.path.realpath(os.path.expanduser(field.GetString())))
+ if real_file != field.GetString():
+ res += '\t#actual_image: %s\n' % real_file
+ if field.name == 'build':
+ chromeos_root_field = settings.fields['chromeos_root']
+ if chromeos_root_field:
+ chromeos_root = chromeos_root_field.GetString()
+ value = field.GetString()
+ autotest_field = settings.fields['autotest_path']
+ autotest_path = ''
+ if autotest_field.assigned:
+ autotest_path = autotest_field.GetString()
+ image_path, autotest_path = settings.GetXbuddyPath(value,
+ autotest_path,
+ board,
+ chromeos_root,
+ 'quiet')
+ res += '\t#actual_image: %s\n' % image_path
+ if not autotest_field.assigned:
+ res += '\t#actual_autotest_path: %s\n' % autotest_path
+
+ res += '}\n\n'
+
+ return res
+
+
+class ExperimentFileReader(object):
+ """Handle reading lines from an experiment file."""
+
+ def __init__(self, file_object):
+ self.file_object = file_object
+ self.current_line = None
+ self.current_line_no = 0
+
+ def CurrentLine(self, strip_comment=True):
+ """Return the next line from the file, without advancing the iterator."""
+ if strip_comment:
+ return self._StripComment(self.current_line)
+ return self.current_line
+
+ def NextLine(self, strip_comment=True):
+ """Advance the iterator and return the next line of the file."""
+ self.current_line_no += 1
+ self.current_line = self.file_object.readline()
+ return self.CurrentLine(strip_comment)
+
+ def _StripComment(self, line):
+ """Strip comments starting with # from a line."""
+ if '#' in line:
+ line = line[:line.find('#')] + line[-1]
+ return line
+
+ def LineNo(self):
+ """Return the current line number."""
+ return self.current_line_no
diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py
new file mode 100755
index 00000000..ed1f176c
--- /dev/null
+++ b/crosperf/experiment_file_unittest.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python2
+
+# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""The unittest of experiment_file."""
+from __future__ import print_function
+import StringIO
+import unittest
+from experiment_file import ExperimentFile
+
+EXPERIMENT_FILE_1 = """
+ board: x86-alex
+ remote: chromeos-alex3
+ perf_args: record -a -e cycles
+ benchmark: PageCycler {
+ iterations: 3
+ }
+
+ image1 {
+ chromeos_image: /usr/local/google/cros_image1.bin
+ }
+
+ image2 {
+ remote: chromeos-lumpy1
+ chromeos_image: /usr/local/google/cros_image2.bin
+ }
+ """
+
+EXPERIMENT_FILE_2 = """
+ board: x86-alex
+ remote: chromeos-alex3
+ iterations: 3
+
+ benchmark: PageCycler {
+ }
+
+ benchmark: AndroidBench {
+ iterations: 2
+ }
+
+ image1 {
+ chromeos_image:/usr/local/google/cros_image1.bin
+ }
+
+ image2 {
+ chromeos_image: /usr/local/google/cros_image2.bin
+ }
+ """
+
+EXPERIMENT_FILE_3 = """
+ board: x86-alex
+ remote: chromeos-alex3
+ iterations: 3
+
+ benchmark: PageCycler {
+ }
+
+ image1 {
+ chromeos_image:/usr/local/google/cros_image1.bin
+ }
+
+ image1 {
+ chromeos_image: /usr/local/google/cros_image2.bin
+ }
+ """
+
+OUTPUT_FILE = """board: x86-alex
+remote: chromeos-alex3
+perf_args: record -a -e cycles
+
+benchmark: PageCycler {
+\titerations: 3
+}
+
+label: image1 {
+\tremote: chromeos-alex3
+\tchromeos_image: /usr/local/google/cros_image1.bin
+}
+
+label: image2 {
+\tremote: chromeos-lumpy1
+\tchromeos_image: /usr/local/google/cros_image2.bin
+}\n\n"""
+
+
+class ExperimentFileTest(unittest.TestCase):
+ """The main class for Experiment File test."""
+ def testLoadExperimentFile1(self):
+ input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
+ experiment_file = ExperimentFile(input_file)
+ global_settings = experiment_file.GetGlobalSettings()
+ self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
+ self.assertEqual(
+ global_settings.GetField('perf_args'), 'record -a -e cycles')
+ benchmark_settings = experiment_file.GetSettings('benchmark')
+ self.assertEqual(len(benchmark_settings), 1)
+ self.assertEqual(benchmark_settings[0].name, 'PageCycler')
+ self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
+
+ label_settings = experiment_file.GetSettings('label')
+ self.assertEqual(len(label_settings), 2)
+ self.assertEqual(label_settings[0].name, 'image1')
+ self.assertEqual(label_settings[0].GetField('chromeos_image'),
+ '/usr/local/google/cros_image1.bin')
+ self.assertEqual(label_settings[1].GetField('remote'), ['chromeos-lumpy1'])
+ self.assertEqual(label_settings[0].GetField('remote'), ['chromeos-alex3'])
+
+ def testOverrideSetting(self):
+ input_file = StringIO.StringIO(EXPERIMENT_FILE_2)
+ experiment_file = ExperimentFile(input_file)
+ global_settings = experiment_file.GetGlobalSettings()
+ self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
+
+ benchmark_settings = experiment_file.GetSettings('benchmark')
+ self.assertEqual(len(benchmark_settings), 2)
+ self.assertEqual(benchmark_settings[0].name, 'PageCycler')
+ self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
+ self.assertEqual(benchmark_settings[1].name, 'AndroidBench')
+ self.assertEqual(benchmark_settings[1].GetField('iterations'), 2)
+
+ def testDuplicateLabel(self):
+ input_file = StringIO.StringIO(EXPERIMENT_FILE_3)
+ self.assertRaises(Exception, ExperimentFile, input_file)
+
+ def testCanonicalize(self):
+ input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
+ experiment_file = ExperimentFile(input_file)
+ res = experiment_file.Canonicalize()
+ self.assertEqual(res, OUTPUT_FILE)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/experiment_files/README b/crosperf/experiment_files/README
new file mode 100644
index 00000000..d9c96870
--- /dev/null
+++ b/crosperf/experiment_files/README
@@ -0,0 +1,34 @@
+To use these experiment files, replace the board, remote and images
+placeholders and run crosperf on them.
+
+Further information about crosperf:
+https://sites.google.com/a/google.com/chromeos-toolchain-team-home2/home/team-tools-and-scripts/crosperf-cros-image-performance-comparison-tool
+
+The final experiment file should look something like the following (but with
+different actual values for the fields):
+
+board: lumpy
+remote: 123.45.67.089
+
+# Add images you want to test:
+my_image {
+ chromeos_image: /usr/local/chromeos/src/build/images/lumpy/chromiumos_test_image.bin
+}
+
+vanilla_image {
+ chromeos_root: /usr/local/chromeos
+ build: lumpy-release/R35-5672.0.0
+}
+
+# Paste experiment benchmarks here. Example, I pasted
+# page_cycler_v2.morejs here.
+
+# This experiment just runs a short autotest which measures the performance of
+# Telemetry's page_cycler_v2.morejs. In addition, it profiles
+
+perg_args: record -e cycles
+
+benchmark: page_cycler_v2.morejs {
+ suite: telemetry_Crosperf
+ iterations: 1
+}
diff --git a/crosperf/experiment_files/aes_perf.exp b/crosperf/experiment_files/aes_perf.exp
new file mode 100644
index 00000000..063c74be
--- /dev/null
+++ b/crosperf/experiment_files/aes_perf.exp
@@ -0,0 +1,21 @@
+# This experiment just runs a short autotest which measures the performance of
+# aes encryption.
+#
+# You should replace all the placeholders, marked by angle-brackets, with the
+# appropriate actual values.
+
+name: aes_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+benchmark: platform_AesThroughput {
+}
+
+# Replace the chromeos image below with the actual path to your test image.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
diff --git a/crosperf/experiment_files/bloat_perf.exp b/crosperf/experiment_files/bloat_perf.exp
new file mode 100644
index 00000000..14681778
--- /dev/null
+++ b/crosperf/experiment_files/bloat_perf.exp
@@ -0,0 +1,25 @@
+# This experiment just runs a short telemety autotest which measures
+# the performance of the page_cycler_v2.bloat test.
+#
+# You should replace all the placeholders, marked by angle-brackets, with the
+# appropriate actual values.
+
+name: bloat_perf_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+perf_args: record -e cycles
+
+benchmark: page_cycler_v2.bloat {
+ suite: telemetry_Crosperf
+ iterations:1
+}
+
+# Replace the chromeos image below with the actual path to your test image.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
diff --git a/crosperf/experiment_files/morejs_perf.exp b/crosperf/experiment_files/morejs_perf.exp
new file mode 100644
index 00000000..ebc54753
--- /dev/null
+++ b/crosperf/experiment_files/morejs_perf.exp
@@ -0,0 +1,25 @@
+# This experiment just runs a short telemety autotest which measures
+# the performance of the page_cycler_v2.morejs test.
+#
+# You should replace all the placeholders, marked by angle-brackets, with the
+# appropriate actual values.
+
+name: morejs_perf_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+perf_args: record -e cycles
+
+benchmark: page_cycler_v2.morejs {
+ suite: telemetry_Crosperf
+ iterations: 1
+}
+
+# Replace the chromeos image below with the actual path to your test image.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
diff --git a/crosperf/experiment_files/non-telemetry-tests.exp b/crosperf/experiment_files/non-telemetry-tests.exp
new file mode 100644
index 00000000..0ad1fe5c
--- /dev/null
+++ b/crosperf/experiment_files/non-telemetry-tests.exp
@@ -0,0 +1,31 @@
+# This example experiment file showa how to run some basic non-Telemetry
+# autotest tests.
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+name: non_telemetry_tests_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+benchmark: BootPerfServer {
+ test_name: BootPerfServer
+ iterations: 1
+}
+
+benchmark: bvt {
+ test_name: suite:bvt
+}
+
+benchmark: login_LoginSuccess {
+ test_name: login_LoginSuccess
+}
+
+# Replace the chromeos image below with the actual path to your test image.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
diff --git a/crosperf/experiment_files/official-image.exp b/crosperf/experiment_files/official-image.exp
new file mode 100644
index 00000000..bce7d6a3
--- /dev/null
+++ b/crosperf/experiment_files/official-image.exp
@@ -0,0 +1,41 @@
+# This example experiment file shows how to run a basic test, using
+# official images.
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+name: official_image_example
+
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+benchmark: canvasmark {
+ suite:telemetry_Crosperf
+ iterations: 1
+}
+
+
+# Replace <path-to-your-chroot-goes-here> with the actual directory path
+# to the top of your ChromimumOS chroot.
+first_official_image {
+ chromeos_root:<path-to-your-chroot-goes-here>
+ # Replace "latest-official" with the appropriate xbuddy version alias
+ # for the official image you want to use (see
+ # http://www.chromium.org/chromium-os/how-tos-and-troubleshooting/using-the-dev-server/xbuddy-for-devserver#TOC-XBuddy-Paths
+ # for xbuddy syntax).
+ build: latest-official
+}
+
+second_official_image {
+ # Replace <path-to-your-chroot-goes-here> with actual path.
+ chromeos_root:<path-to-your-chroot-goes-here>
+ # Replace "lumpy-release/R35-5672.0.0" with the official image you want
+ # to use.
+ build:lumpy-release/R35-5672.0.0
+}
+
+
diff --git a/crosperf/experiment_files/page_cycler.exp b/crosperf/experiment_files/page_cycler.exp
new file mode 100644
index 00000000..6cb6166d
--- /dev/null
+++ b/crosperf/experiment_files/page_cycler.exp
@@ -0,0 +1,28 @@
+# This experiment file shows how to run all of the Telemetry
+# page_cycler tests.
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+name: all_page_cyclers_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in
+# parallel on multiple machines. e.g. "remote: test-machine-1.com
+# test-machine2.come test-machine3.com"
+
+remote: <your-remote-goes-here>
+
+
+# NOTE: all_pagecyclers is a Crosperf alias that will cause all of the
+# Telemetry page_cycler benchmark tests to be run.
+benchmark: all_pagecyclers {
+ suite: telemetry_Crosperf
+ iterations: 2
+}
+
+# Replace the chromeos image below with the actual path to your test
+# image.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
diff --git a/crosperf/experiment_files/page_cycler_perf.exp b/crosperf/experiment_files/page_cycler_perf.exp
new file mode 100644
index 00000000..cd661737
--- /dev/null
+++ b/crosperf/experiment_files/page_cycler_perf.exp
@@ -0,0 +1,45 @@
+# This experiment profiles some of the Telemetry page cycler tests,
+# uisng 'perf' on the remotes to get performance profiles.
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+name: aes_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+perf_args: record -e cycles,instructions
+
+benchmark: page_cycler_v2.morejs {
+ suite: telemetry_Crosperf
+ iterations: 10
+}
+
+benchmark: page_cycler_v2.bloat {
+ suite: telemetry_Crosperf
+ iterations: 10
+}
+
+benchmark: page_cycler_v2.dhtml {
+ suite: telemetry_Crosperf
+ iterations: 10
+}
+
+benchmark: page_cycler_v2.intl_ar_fa_he {
+ suite: telemetry_Crosperf
+ iterations: 10
+}
+
+benchmark: page_cycler_v2.moz {
+ suite: telemetry_Crosperf
+ iterations: 10
+}
+
+# Replace the chromeos image below with the actual path to your test image.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
diff --git a/crosperf/experiment_files/telemetry-crosperf-suites.exp b/crosperf/experiment_files/telemetry-crosperf-suites.exp
new file mode 100644
index 00000000..2caa588d
--- /dev/null
+++ b/crosperf/experiment_files/telemetry-crosperf-suites.exp
@@ -0,0 +1,54 @@
+# This example experiment file shows how to invoke sets of tests (a
+# set is a group of tests that can be invoked by a single alias).
+# There are currently three sets defined for crosperf_Telemetry:
+# all_perfv2, all_pagecyclers, and all_toolchain_perf.
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+
+name: telemetry_crosperf_suites_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+# The example below will run all the benchmarks in the perf_v2 suite.
+# The exact list of benchmarks that will be run can be seen in
+# crosperf/experiment_factory.py
+benchmark: all_perfv2 {
+ suite:telemetry_Crosperf
+ iterations: 2
+}
+
+# The example below will run all the Telemetry page_cycler benchmarks.
+# The exact list of benchmarks that will be run can be seen in
+# crosperf/experiment_factory.py
+benchmark: all_pagecyclers {
+ suite:telemetry_Crosperf
+ iterations: 1
+}
+
+# The example below will run all the Telemetry page_cycler benchmarks.
+# The exact list of benchmarks that will be run can be seen in
+# crosperf/experiment_factory.py
+benchmark: all_toolchain_perf {
+ suite:telemetry_Crosperf
+ iterations: 1
+}
+
+# Replace the chromeos image below with the actual path to your test image.
+test_image_1 {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
+
+# Replace the chromeos image below with the actual path to your second
+# test image (if desired).
+new_image {
+ chromeos_image:<path-to-your-other-chroot-goes-here>/src/build/images/<board-goes-here>/latest/chromiumos_test_image.bin
+}
+
+
+
diff --git a/crosperf/experiment_files/telemetry-crosperf-with-external-chrome-src.exp b/crosperf/experiment_files/telemetry-crosperf-with-external-chrome-src.exp
new file mode 100644
index 00000000..517c13f1
--- /dev/null
+++ b/crosperf/experiment_files/telemetry-crosperf-with-external-chrome-src.exp
@@ -0,0 +1,31 @@
+# This example experiment file showings how to specify an external
+# chrome source tree (rather than using the one inside the chroot).
+# The Telemetry tests will be run from the external Chrome source
+# tree.
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+name: telemetry_crosperf_external_src_example
+
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+benchmark: octane {
+ suite: telemetry_Crosperf
+ iterations: 1
+}
+
+# Replace the chromeos image below with the actual path to your test imnage.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+ # Replace '/usr/local/google/chrome-top' with the path to the
+ # top of your Chrome source tree. From that directory
+ # "./src/tools/perf/run_benchmark" should be a valid file path.
+ chrome_src:/usr/local/google/chrome-top
+}
+
diff --git a/crosperf/experiment_files/telemetry-crosperf-with-profiler.exp b/crosperf/experiment_files/telemetry-crosperf-with-profiler.exp
new file mode 100644
index 00000000..4c2b88fc
--- /dev/null
+++ b/crosperf/experiment_files/telemetry-crosperf-with-profiler.exp
@@ -0,0 +1,35 @@
+# This example experiment file shows how to invoke the profiler (via
+# the perf_args above the benchmark).
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+
+name: telemetry_crosperf_profiler_example
+
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+# Below is the line that causes the profiler to run. Currently the
+# only profiler option is running 'perf' on the remote machine. If
+# you want you can replace 'record' with 'stat'. You would also need
+# to change the other args accordingly. Crosperf automatically
+# inserts a '-a' if you use 'record' for you perf_args. The results
+# of the perf run (perf.data and perf.report files) will be available
+# with the rest of the Crosperf results.
+perf_args: record -e cycles,instructions
+
+benchmark: page_cycler_v2.dhtml {
+ suite: telemetry_Crosperf
+ iterations: 1
+}
+
+# Replace the chromeos image below with the actual path to your test imnage.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
+
diff --git a/crosperf/experiment_files/telemetry-crosperf.exp b/crosperf/experiment_files/telemetry-crosperf.exp
new file mode 100644
index 00000000..111001d4
--- /dev/null
+++ b/crosperf/experiment_files/telemetry-crosperf.exp
@@ -0,0 +1,32 @@
+# This example experiment file shows how to run a Telemetry test,
+# using autotest (via "suite: telemetry_Crosperf"). This runs the
+# Telemetry's "run_benchmark" for the specified test.
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+name: basic_telemetry_crosperf_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+# Replace "octane" below with the name of the Telemetry benchmark you
+# want to run.
+benchmark: octane {
+ suite: telemetry_Crosperf
+ iterations: 1
+}
+
+# NOTE: You must specify at least one image; you may specify more than one.
+# Replace <path-to-your-chroot-goes-here> and <board-goes-here> below.
+vanilla_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/vanilla-image/chromiumos_test_image.bin
+}
+
+# Replace the chromeos image below with the actual path to your test image.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
diff --git a/crosperf/experiment_files/telemetry-without-autotest.exp b/crosperf/experiment_files/telemetry-without-autotest.exp
new file mode 100644
index 00000000..ce3f207e
--- /dev/null
+++ b/crosperf/experiment_files/telemetry-without-autotest.exp
@@ -0,0 +1,31 @@
+# This example experiment file shows how to run a Telemetry test
+# directly, bypassing autotest. This runs the "run_measurement"
+# script. You need to supply both the name of the Telemetry test and
+# the page_set (via the test_args argument).
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+name: telemetry_without_autotest_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+# Replace "page_cycler_dhtml" below with the name of the Telemetry test
+# that you want run_measurement to run. Also replace the page set below
+# (in the test_args field) with the appropriate page set for your test.
+# N.B. The key to running telemetry without autotest is the 'suite' field.
+# Make sure your suite is 'telemtry', NOT 'telemetry_Crosperf'.
+benchmark: page_cycler_dhtml {
+ suite: telemetry
+ iterations: 1
+ test_args: ./page_sets/page_cycler/dhtml.json
+}
+
+# Replace the chromeos image below with the actual path to your test image.
+test_image {
+ chromeos_image:<path-to-your-chroot>/src/build/images/<board>/test-image/chromiumos_test_image.bin
+}
diff --git a/crosperf/experiment_files/telemetry_perf_perf b/crosperf/experiment_files/telemetry_perf_perf
new file mode 100755
index 00000000..acdf96d0
--- /dev/null
+++ b/crosperf/experiment_files/telemetry_perf_perf
@@ -0,0 +1,77 @@
+#!/bin/bash
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Script for generating and running telemetry benchmarkes via crosperf with
+# different perf command lines in order to measure the impact of the perf
+# commands on performance. Crosperf cannot run the same benchmark multiple
+# times, so this script runs crosperf multpilpe times instead. Unfortunately,
+# this means you must compare the results yourself.
+#
+# Perf will run for the entire benchmark run, so results should be interpreted
+# in that context. i.e, if this shows a 3% overhead for a particular perf
+# command, that overhead would only be seen during the 2 seconds of measurement
+# during a Chrome OS Wide Profiling collection.
+set -e
+
+board=xxx #<you-board-here>
+remote=xxx #<your-remote-here>
+iterations=5
+chromeos_root=~/chromiumos
+chrome_src=~/chromium
+
+
+function GenerateExperiment() {
+ local perf_args="${1:+perf_args: $1}"
+ local track="$2" # stable, beta, dev
+
+ cat <<_EOF
+$perf_args
+benchmark: page_cycler_v2.typical_25 {
+ suite: telemetry_Crosperf
+}
+
+$track {
+ build: latest-$track
+}
+_EOF
+}
+
+function RunExperiment() {
+ local name="$1"
+ local perf_command="$2"
+ GenerateExperiment "$perf_command" "stable" > /tmp/crosperf.exp
+ ./crosperf /tmp/crosperf.exp \
+ --name telemetry_perf_perf_${name} \
+ --board="${board}" \
+ --remote="${remote}" \
+ --iterations="${iterations}" \
+ --chromeos_root="${chromeos_root}" \
+ --chrome_src="${chrome_src}" \
+ --rerun=true \
+ --use_file_locks=true \
+ --locks_dir=/tmp/crosperf.locks
+}
+
+if [ "$board" = "xxx" -o "$remote" = "xxx" ]; then
+ echo "Please set board and remote at the top of this script before running."
+ exit -1
+fi
+
+
+# Note that "-a" is automatically inserted in the perf command line.
+
+# Control: No profiling.
+RunExperiment 'control' ''
+# This is our baseline standard 'cycles' perf command.
+RunExperiment 'cycles.flat' \
+ 'record -e cycles -c 1000003'
+# Callgraph profiling.
+RunExperiment 'cycles.callgraph' \
+ 'record -g -e cycles -c 4000037'
+# Memory bandwidth profiling. As a perf stat command, we expect imperceptible
+# overhead.
+RunExperiment 'memory.bandwidth' \
+ 'stat -e cycles -e instructions -e uncore_imc/data_reads/ -e uncore_imc/data_writes/ -e cpu/event=0xD0,umask=0x11,name=MEM_UOPS_RETIRED-STLB_MISS_LOADS/ -e cpu/event=0xD0,umask=0x12,name=MEM_UOPS_RETIRED-STLB_MISS_STORES/'
+
diff --git a/crosperf/experiment_files/trybot-image.exp b/crosperf/experiment_files/trybot-image.exp
new file mode 100644
index 00000000..a261e08c
--- /dev/null
+++ b/crosperf/experiment_files/trybot-image.exp
@@ -0,0 +1,33 @@
+# This example experiment shows how to run a basic test, using a
+# (previously made) trybot image.
+
+#
+# You should replace all the placeholders, marked by angle-brackets,
+# with the appropriate actual values.
+
+name: trybot_example
+board: <your-board-goes-here>
+
+# Note: You can specify multiple remotes, to run your tests in parallel on
+# multiple machines. e.g. "remote: test-machine-1.com test-machine2.come
+# test-machine3.com"
+remote: <your-remote-goes-here>
+
+
+benchmark: canvasmark {
+ suite:telemetry_Crosperf
+ iterations: 1
+}
+
+
+# Replace <path-to-your-chroot-goes-here> with the actual directory path
+# to the top of your ChromimumOS chroot.
+trybot_image {
+ chromeos_root:<path-to-your-chroot-goes-here>
+ # Replace "trybot-lumpy-paladin/R34-5417.0.0-b1506" with the name of the
+ # trybot image that you wish to use. You can find this by going to the
+ # trybot build log, going to the 'Report' stage, and looking for 'Build
+ # Artifacts' at the bottom. You can extract the trybot image name from that.
+ build:trybot-lumpy-paladin/R34-5417.0.0-b1506
+}
+
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
new file mode 100644
index 00000000..b30c8bd5
--- /dev/null
+++ b/crosperf/experiment_runner.py
@@ -0,0 +1,309 @@
+# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""The experiment runner module."""
+from __future__ import print_function
+
+import getpass
+import os
+import shutil
+import time
+
+import afe_lock_machine
+import test_flag
+
+from cros_utils import command_executer
+from cros_utils import logger
+from cros_utils.email_sender import EmailSender
+from cros_utils.file_utils import FileUtils
+
+import config
+from experiment_status import ExperimentStatus
+from results_cache import CacheConditions
+from results_cache import ResultsCache
+from results_report import HTMLResultsReport
+from results_report import TextResultsReport
+from results_report import JSONResultsReport
+from schedv2 import Schedv2
+
+def _WriteJSONReportToFile(experiment, results_dir, json_report):
+ """Writes a JSON report to a file in results_dir."""
+ has_llvm = any('llvm' in l.compiler for l in experiment.labels)
+ compiler_string = 'llvm' if has_llvm else 'gcc'
+ board = experiment.labels[0].board
+ filename = 'report_%s_%s_%s.%s.json' % (
+ board, json_report.date, json_report.time.replace(':', '.'),
+ compiler_string)
+ fullname = os.path.join(results_dir, filename)
+ report_text = json_report.GetReport()
+ with open(fullname, 'w') as out_file:
+ out_file.write(report_text)
+
+
+class ExperimentRunner(object):
+ """ExperimentRunner Class."""
+
+ STATUS_TIME_DELAY = 30
+ THREAD_MONITOR_DELAY = 2
+
+ def __init__(self,
+ experiment,
+ json_report,
+ using_schedv2=False,
+ log=None,
+ cmd_exec=None):
+ self._experiment = experiment
+ self.l = log or logger.GetLogger(experiment.log_dir)
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(self.l)
+ self._terminated = False
+ self.json_report = json_report
+ self.locked_machines = []
+ if experiment.log_level != 'verbose':
+ self.STATUS_TIME_DELAY = 10
+
+ # Setting this to True will use crosperf sched v2 (feature in progress).
+ self._using_schedv2 = using_schedv2
+
+ def _GetMachineList(self):
+ """Return a list of all requested machines.
+
+ Create a list of all the requested machines, both global requests and
+ label-specific requests, and return the list.
+ """
+ machines = self._experiment.remote
+ # All Label.remote is a sublist of experiment.remote.
+ for l in self._experiment.labels:
+ for r in l.remote:
+ assert r in machines
+ return machines
+
+ def _UpdateMachineList(self, locked_machines):
+ """Update machines lists to contain only locked machines.
+
+ Go through all the lists of requested machines, both global and
+ label-specific requests, and remove any machine that we were not
+ able to lock.
+
+ Args:
+ locked_machines: A list of the machines we successfully locked.
+ """
+ for m in self._experiment.remote:
+ if m not in locked_machines:
+ self._experiment.remote.remove(m)
+
+ for l in self._experiment.labels:
+ for m in l.remote:
+ if m not in locked_machines:
+ l.remote.remove(m)
+
+ def _LockAllMachines(self, experiment):
+ """Attempt to globally lock all of the machines requested for run.
+
+ This method will use the AFE server to globally lock all of the machines
+ requested for this crosperf run, to prevent any other crosperf runs from
+ being able to update/use the machines while this experiment is running.
+ """
+ if test_flag.GetTestMode():
+ self.locked_machines = self._GetMachineList()
+ self._experiment.locked_machines = self.locked_machines
+ else:
+ lock_mgr = afe_lock_machine.AFELockManager(
+ self._GetMachineList(),
+ '',
+ experiment.labels[0].chromeos_root,
+ None,
+ log=self.l,)
+ for m in lock_mgr.machines:
+ if not lock_mgr.MachineIsKnown(m):
+ lock_mgr.AddLocalMachine(m)
+ machine_states = lock_mgr.GetMachineStates('lock')
+ lock_mgr.CheckMachineLocks(machine_states, 'lock')
+ self.locked_machines = lock_mgr.UpdateMachines(True)
+ self._experiment.locked_machines = self.locked_machines
+ self._UpdateMachineList(self.locked_machines)
+ self._experiment.machine_manager.RemoveNonLockedMachines(
+ self.locked_machines)
+ if len(self.locked_machines) == 0:
+ raise RuntimeError('Unable to lock any machines.')
+
+ def _UnlockAllMachines(self, experiment):
+ """Attempt to globally unlock all of the machines requested for run.
+
+ The method will use the AFE server to globally unlock all of the machines
+ requested for this crosperf run.
+ """
+ if not self.locked_machines or test_flag.GetTestMode():
+ return
+
+ lock_mgr = afe_lock_machine.AFELockManager(
+ self.locked_machines,
+ '',
+ experiment.labels[0].chromeos_root,
+ None,
+ log=self.l,)
+ machine_states = lock_mgr.GetMachineStates('unlock')
+ lock_mgr.CheckMachineLocks(machine_states, 'unlock')
+ lock_mgr.UpdateMachines(False)
+
+ def _ClearCacheEntries(self, experiment):
+ for br in experiment.benchmark_runs:
+ cache = ResultsCache()
+ cache.Init(br.label.chromeos_image, br.label.chromeos_root,
+ br.benchmark.test_name, br.iteration, br.test_args,
+ br.profiler_args, br.machine_manager, br.machine,
+ br.label.board, br.cache_conditions, br._logger, br.log_level,
+ br.label, br.share_cache, br.benchmark.suite,
+ br.benchmark.show_all_results, br.benchmark.run_local)
+ cache_dir = cache.GetCacheDirForWrite()
+ if os.path.exists(cache_dir):
+ self.l.LogOutput('Removing cache dir: %s' % cache_dir)
+ shutil.rmtree(cache_dir)
+
+ def _Run(self, experiment):
+ try:
+ if not experiment.locks_dir:
+ self._LockAllMachines(experiment)
+ if self._using_schedv2:
+ schedv2 = Schedv2(experiment)
+ experiment.set_schedv2(schedv2)
+ if CacheConditions.FALSE in experiment.cache_conditions:
+ self._ClearCacheEntries(experiment)
+ status = ExperimentStatus(experiment)
+ experiment.Run()
+ last_status_time = 0
+ last_status_string = ''
+ try:
+ if experiment.log_level != 'verbose':
+ self.l.LogStartDots()
+ while not experiment.IsComplete():
+ if last_status_time + self.STATUS_TIME_DELAY < time.time():
+ last_status_time = time.time()
+ border = '=============================='
+ if experiment.log_level == 'verbose':
+ self.l.LogOutput(border)
+ self.l.LogOutput(status.GetProgressString())
+ self.l.LogOutput(status.GetStatusString())
+ self.l.LogOutput(border)
+ else:
+ current_status_string = status.GetStatusString()
+ if current_status_string != last_status_string:
+ self.l.LogEndDots()
+ self.l.LogOutput(border)
+ self.l.LogOutput(current_status_string)
+ self.l.LogOutput(border)
+ last_status_string = current_status_string
+ else:
+ self.l.LogAppendDot()
+ time.sleep(self.THREAD_MONITOR_DELAY)
+ except KeyboardInterrupt:
+ self._terminated = True
+ self.l.LogError('Ctrl-c pressed. Cleaning up...')
+ experiment.Terminate()
+ raise
+ except SystemExit:
+ self._terminated = True
+ self.l.LogError('Unexpected exit. Cleaning up...')
+ experiment.Terminate()
+ raise
+ finally:
+ if not experiment.locks_dir:
+ self._UnlockAllMachines(experiment)
+
+ def _PrintTable(self, experiment):
+ self.l.LogOutput(TextResultsReport.FromExperiment(experiment).GetReport())
+
+ def _Email(self, experiment):
+ # Only email by default if a new run was completed.
+ send_mail = False
+ for benchmark_run in experiment.benchmark_runs:
+ if not benchmark_run.cache_hit:
+ send_mail = True
+ break
+ if (not send_mail and not experiment.email_to or
+ config.GetConfig('no_email')):
+ return
+
+ label_names = []
+ for label in experiment.labels:
+ label_names.append(label.name)
+ subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
+
+ text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
+ text_report += ('\nResults are stored in %s.\n' %
+ experiment.results_directory)
+ text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
+ html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
+ attachment = EmailSender.Attachment('report.html', html_report)
+ email_to = experiment.email_to or []
+ email_to.append(getpass.getuser())
+ EmailSender().SendEmail(email_to,
+ subject,
+ text_report,
+ attachments=[attachment],
+ msg_type='html')
+
+ def _StoreResults(self, experiment):
+ if self._terminated:
+ return
+ results_directory = experiment.results_directory
+ FileUtils().RmDir(results_directory)
+ FileUtils().MkDirP(results_directory)
+ self.l.LogOutput('Storing experiment file in %s.' % results_directory)
+ experiment_file_path = os.path.join(results_directory, 'experiment.exp')
+ FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)
+
+ self.l.LogOutput('Storing results report in %s.' % results_directory)
+ results_table_path = os.path.join(results_directory, 'results.html')
+ report = HTMLResultsReport.FromExperiment(experiment).GetReport()
+ if self.json_report:
+ json_report = JSONResultsReport.FromExperiment(experiment,
+ json_args={'indent': 2})
+ _WriteJSONReportToFile(experiment, results_directory, json_report)
+
+ FileUtils().WriteFile(results_table_path, report)
+
+ self.l.LogOutput('Storing email message body in %s.' % results_directory)
+ msg_file_path = os.path.join(results_directory, 'msg_body.html')
+ text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
+ text_report += ('\nResults are stored in %s.\n' %
+ experiment.results_directory)
+ msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
+ FileUtils().WriteFile(msg_file_path, msg_body)
+
+ self.l.LogOutput('Storing results of each benchmark run.')
+ for benchmark_run in experiment.benchmark_runs:
+ if benchmark_run.result:
+ benchmark_run_name = filter(str.isalnum, benchmark_run.name)
+ benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
+ benchmark_run.result.CopyResultsTo(benchmark_run_path)
+ benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
+
+ def Run(self):
+ try:
+ self._Run(self._experiment)
+ finally:
+ # Always print the report at the end of the run.
+ self._PrintTable(self._experiment)
+ if not self._terminated:
+ self._StoreResults(self._experiment)
+ self._Email(self._experiment)
+
+
+class MockExperimentRunner(ExperimentRunner):
+ """Mocked ExperimentRunner for testing."""
+
+ def __init__(self, experiment, json_report):
+ super(MockExperimentRunner, self).__init__(experiment, json_report)
+
+ def _Run(self, experiment):
+ self.l.LogOutput("Would run the following experiment: '%s'." %
+ experiment.name)
+
+ def _PrintTable(self, experiment):
+ self.l.LogOutput('Would print the experiment table.')
+
+ def _Email(self, experiment):
+ self.l.LogOutput('Would send result email.')
+
+ def _StoreResults(self, experiment):
+ self.l.LogOutput('Would store the results.')
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
new file mode 100755
index 00000000..38ac3874
--- /dev/null
+++ b/crosperf/experiment_runner_unittest.py
@@ -0,0 +1,450 @@
+#!/usr/bin/env python2
+#
+# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Tests for the experiment runner module."""
+
+from __future__ import print_function
+
+import StringIO
+import getpass
+import os
+
+import mock
+import unittest
+
+import experiment_runner
+import experiment_status
+import machine_manager
+import config
+import test_flag
+
+from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
+from results_cache import Result
+from results_report import HTMLResultsReport
+from results_report import TextResultsReport
+
+from cros_utils import command_executer
+from cros_utils.email_sender import EmailSender
+from cros_utils.file_utils import FileUtils
+
+EXPERIMENT_FILE_1 = """
+ board: parrot
+ remote: chromeos-parrot1.cros chromreos-parrot2.cros
+
+ benchmark: kraken {
+ suite: telemetry_Crosperf
+ iterations: 3
+ }
+
+ image1 {
+ chromeos_root: /usr/local/google/chromeos
+ chromeos_image: /usr/local/google/chromeos/src/build/images/parrot/latest/cros_image1.bin
+ }
+
+ image2 {
+ chromeos_image: /usr/local/google/chromeos/src/build/imaages/parrot/latest/cros_image2.bin
+ }
+ """
+
+# pylint: disable=protected-access
+
+
+class FakeLogger(object):
+ """Fake logger for tests."""
+
+ def __init__(self):
+ self.LogOutputCount = 0
+ self.LogErrorCount = 0
+ self.output_msgs = []
+ self.error_msgs = []
+ self.dot_count = 0
+ self.LogStartDotsCount = 0
+ self.LogEndDotsCount = 0
+ self.LogAppendDotCount = 0
+
+ def LogOutput(self, msg):
+ self.LogOutputCount += 1
+ self.output_msgs.append(msg)
+
+ def LogError(self, msg):
+ self.LogErrorCount += 1
+ self.error_msgs.append(msg)
+
+ def LogStartDots(self):
+ self.LogStartDotsCount += 1
+ self.dot_count += 1
+
+ def LogAppendDot(self):
+ self.LogAppendDotCount += 1
+ self.dot_count += 1
+
+ def LogEndDots(self):
+ self.LogEndDotsCount += 1
+
+ def Reset(self):
+ self.LogOutputCount = 0
+ self.LogErrorCount = 0
+ self.output_msgs = []
+ self.error_msgs = []
+ self.dot_count = 0
+ self.LogStartDotsCount = 0
+ self.LogEndDotsCount = 0
+ self.LogAppendDotCount = 0
+
+
+class ExperimentRunnerTest(unittest.TestCase):
+ """Test for experiment runner class."""
+
+ run_count = 0
+ is_complete_count = 0
+ mock_logger = FakeLogger()
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+
+ def make_fake_experiment(self):
+ test_flag.SetTestMode(True)
+ experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
+ experiment = ExperimentFactory().GetExperiment(experiment_file,
+ working_directory='',
+ log_dir='')
+ return experiment
+
+ @mock.patch.object(machine_manager.MachineManager, 'AddMachine')
+ @mock.patch.object(os.path, 'isfile')
+
+ # pylint: disable=arguments-differ
+ def setUp(self, mock_isfile, _mock_addmachine):
+ mock_isfile.return_value = True
+ self.exp = self.make_fake_experiment()
+
+ def test_init(self):
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
+ self.assertFalse(er._terminated)
+ self.assertEqual(er.STATUS_TIME_DELAY, 10)
+
+ self.exp.log_level = 'verbose'
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
+ self.assertEqual(er.STATUS_TIME_DELAY, 30)
+
+ @mock.patch.object(experiment_status.ExperimentStatus, 'GetStatusString')
+ @mock.patch.object(experiment_status.ExperimentStatus, 'GetProgressString')
+ def test_run(self, mock_progress_string, mock_status_string):
+
+ self.run_count = 0
+ self.is_complete_count = 0
+
+ def reset():
+ self.run_count = 0
+ self.is_complete_count = 0
+
+ def FakeRun():
+ self.run_count += 1
+ return 0
+
+ def FakeIsComplete():
+ self.is_complete_count += 1
+ if self.is_complete_count < 3:
+ return False
+ else:
+ return True
+
+ self.mock_logger.Reset()
+ self.exp.Run = FakeRun
+ self.exp.IsComplete = FakeIsComplete
+
+ # Test 1: log_level == "quiet"
+ self.exp.log_level = 'quiet'
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
+ er.STATUS_TIME_DELAY = 2
+ mock_status_string.return_value = 'Fake status string'
+ er._Run(self.exp)
+ self.assertEqual(self.run_count, 1)
+ self.assertTrue(self.is_complete_count > 0)
+ self.assertEqual(self.mock_logger.LogStartDotsCount, 1)
+ self.assertEqual(self.mock_logger.LogAppendDotCount, 1)
+ self.assertEqual(self.mock_logger.LogEndDotsCount, 1)
+ self.assertEqual(self.mock_logger.dot_count, 2)
+ self.assertEqual(mock_progress_string.call_count, 0)
+ self.assertEqual(mock_status_string.call_count, 2)
+ self.assertEqual(self.mock_logger.output_msgs,
+ ['==============================', 'Fake status string',
+ '=============================='])
+ self.assertEqual(len(self.mock_logger.error_msgs), 0)
+
+ # Test 2: log_level == "average"
+ self.mock_logger.Reset()
+ reset()
+ self.exp.log_level = 'average'
+ mock_status_string.call_count = 0
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
+ er.STATUS_TIME_DELAY = 2
+ mock_status_string.return_value = 'Fake status string'
+ er._Run(self.exp)
+ self.assertEqual(self.run_count, 1)
+ self.assertTrue(self.is_complete_count > 0)
+ self.assertEqual(self.mock_logger.LogStartDotsCount, 1)
+ self.assertEqual(self.mock_logger.LogAppendDotCount, 1)
+ self.assertEqual(self.mock_logger.LogEndDotsCount, 1)
+ self.assertEqual(self.mock_logger.dot_count, 2)
+ self.assertEqual(mock_progress_string.call_count, 0)
+ self.assertEqual(mock_status_string.call_count, 2)
+ self.assertEqual(self.mock_logger.output_msgs,
+ ['==============================', 'Fake status string',
+ '=============================='])
+ self.assertEqual(len(self.mock_logger.error_msgs), 0)
+
+ # Test 3: log_level == "verbose"
+ self.mock_logger.Reset()
+ reset()
+ self.exp.log_level = 'verbose'
+ mock_status_string.call_count = 0
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
+ er.STATUS_TIME_DELAY = 2
+ mock_status_string.return_value = 'Fake status string'
+ mock_progress_string.return_value = 'Fake progress string'
+ er._Run(self.exp)
+ self.assertEqual(self.run_count, 1)
+ self.assertTrue(self.is_complete_count > 0)
+ self.assertEqual(self.mock_logger.LogStartDotsCount, 0)
+ self.assertEqual(self.mock_logger.LogAppendDotCount, 0)
+ self.assertEqual(self.mock_logger.LogEndDotsCount, 0)
+ self.assertEqual(self.mock_logger.dot_count, 0)
+ self.assertEqual(mock_progress_string.call_count, 2)
+ self.assertEqual(mock_status_string.call_count, 2)
+ self.assertEqual(self.mock_logger.output_msgs,
+ ['==============================', 'Fake progress string',
+ 'Fake status string', '==============================',
+ '==============================', 'Fake progress string',
+ 'Fake status string', '=============================='])
+ self.assertEqual(len(self.mock_logger.error_msgs), 0)
+
+ @mock.patch.object(TextResultsReport, 'GetReport')
+ def test_print_table(self, mock_report):
+ self.mock_logger.Reset()
+ mock_report.return_value = 'This is a fake experiment report.'
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
+ er._PrintTable(self.exp)
+ self.assertEqual(mock_report.call_count, 1)
+ self.assertEqual(self.mock_logger.output_msgs,
+ ['This is a fake experiment report.'])
+
+ @mock.patch.object(HTMLResultsReport, 'GetReport')
+ @mock.patch.object(TextResultsReport, 'GetReport')
+ @mock.patch.object(EmailSender, 'Attachment')
+ @mock.patch.object(EmailSender, 'SendEmail')
+ @mock.patch.object(getpass, 'getuser')
+ def test_email(self, mock_getuser, mock_emailer, mock_attachment,
+ mock_text_report, mock_html_report):
+
+ mock_getuser.return_value = 'john.smith@google.com'
+ mock_text_report.return_value = 'This is a fake text report.'
+ mock_html_report.return_value = 'This is a fake html report.'
+
+ self.mock_logger.Reset()
+ config.AddConfig('no_email', True)
+ self.exp.email_to = ['jane.doe@google.com']
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
+ # Test 1. Config:no_email; exp.email_to set ==> no email sent
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 0)
+ self.assertEqual(mock_emailer.call_count, 0)
+ self.assertEqual(mock_attachment.call_count, 0)
+ self.assertEqual(mock_text_report.call_count, 0)
+ self.assertEqual(mock_html_report.call_count, 0)
+
+ # Test 2. Config: email. exp.email_to set; cache hit. => send email
+ self.mock_logger.Reset()
+ config.AddConfig('no_email', False)
+ for r in self.exp.benchmark_runs:
+ r.cache_hit = True
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 1)
+ self.assertEqual(mock_emailer.call_count, 1)
+ self.assertEqual(mock_attachment.call_count, 1)
+ self.assertEqual(mock_text_report.call_count, 1)
+ self.assertEqual(mock_html_report.call_count, 1)
+ self.assertEqual(len(mock_emailer.call_args), 2)
+ self.assertEqual(mock_emailer.call_args[0],
+ (['jane.doe@google.com', 'john.smith@google.com'],
+ ': image1 vs. image2',
+ "<pre style='font-size: 13px'>This is a fake text "
+ 'report.\nResults are stored in _results.\n</pre>'))
+ self.assertTrue(type(mock_emailer.call_args[1]) is dict)
+ self.assertEqual(len(mock_emailer.call_args[1]), 2)
+ self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
+ self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html')
+
+ mock_attachment.assert_called_with('report.html',
+ 'This is a fake html report.')
+
+ # Test 3. Config: email; exp.mail_to set; no cache hit. => send email
+ self.mock_logger.Reset()
+ mock_getuser.reset_mock()
+ mock_emailer.reset_mock()
+ mock_attachment.reset_mock()
+ mock_text_report.reset_mock()
+ mock_html_report.reset_mock()
+ config.AddConfig('no_email', False)
+ for r in self.exp.benchmark_runs:
+ r.cache_hit = False
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 1)
+ self.assertEqual(mock_emailer.call_count, 1)
+ self.assertEqual(mock_attachment.call_count, 1)
+ self.assertEqual(mock_text_report.call_count, 1)
+ self.assertEqual(mock_html_report.call_count, 1)
+ self.assertEqual(len(mock_emailer.call_args), 2)
+ self.assertEqual(mock_emailer.call_args[0],
+ (['jane.doe@google.com', 'john.smith@google.com',
+ 'john.smith@google.com'], ': image1 vs. image2',
+ "<pre style='font-size: 13px'>This is a fake text "
+ 'report.\nResults are stored in _results.\n</pre>'))
+ self.assertTrue(type(mock_emailer.call_args[1]) is dict)
+ self.assertEqual(len(mock_emailer.call_args[1]), 2)
+ self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
+ self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html')
+
+ mock_attachment.assert_called_with('report.html',
+ 'This is a fake html report.')
+
+ # Test 4. Config: email; exp.mail_to = None; no cache hit. => send email
+ self.mock_logger.Reset()
+ mock_getuser.reset_mock()
+ mock_emailer.reset_mock()
+ mock_attachment.reset_mock()
+ mock_text_report.reset_mock()
+ mock_html_report.reset_mock()
+ self.exp.email_to = []
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 1)
+ self.assertEqual(mock_emailer.call_count, 1)
+ self.assertEqual(mock_attachment.call_count, 1)
+ self.assertEqual(mock_text_report.call_count, 1)
+ self.assertEqual(mock_html_report.call_count, 1)
+ self.assertEqual(len(mock_emailer.call_args), 2)
+ self.assertEqual(mock_emailer.call_args[0],
+ (['john.smith@google.com'], ': image1 vs. image2',
+ "<pre style='font-size: 13px'>This is a fake text "
+ 'report.\nResults are stored in _results.\n</pre>'))
+ self.assertTrue(type(mock_emailer.call_args[1]) is dict)
+ self.assertEqual(len(mock_emailer.call_args[1]), 2)
+ self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
+ self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html')
+
+ mock_attachment.assert_called_with('report.html',
+ 'This is a fake html report.')
+
+ # Test 5. Config: email; exp.mail_to = None; cache hit => no email sent
+ self.mock_logger.Reset()
+ mock_getuser.reset_mock()
+ mock_emailer.reset_mock()
+ mock_attachment.reset_mock()
+ mock_text_report.reset_mock()
+ mock_html_report.reset_mock()
+ for r in self.exp.benchmark_runs:
+ r.cache_hit = True
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 0)
+ self.assertEqual(mock_emailer.call_count, 0)
+ self.assertEqual(mock_attachment.call_count, 0)
+ self.assertEqual(mock_text_report.call_count, 0)
+ self.assertEqual(mock_html_report.call_count, 0)
+
+ @mock.patch.object(FileUtils, 'RmDir')
+ @mock.patch.object(FileUtils, 'MkDirP')
+ @mock.patch.object(FileUtils, 'WriteFile')
+ @mock.patch.object(HTMLResultsReport, 'FromExperiment')
+ @mock.patch.object(TextResultsReport, 'FromExperiment')
+ @mock.patch.object(Result, 'CopyResultsTo')
+ @mock.patch.object(Result, 'CleanUp')
+ def test_store_results(self, mock_cleanup, mock_copy, _mock_text_report,
+ mock_report, mock_writefile, mock_mkdir, mock_rmdir):
+
+ self.mock_logger.Reset()
+ self.exp.results_directory = '/usr/local/crosperf-results'
+ bench_run = self.exp.benchmark_runs[5]
+ bench_path = '/usr/local/crosperf-results/' + filter(str.isalnum,
+ bench_run.name)
+ self.assertEqual(len(self.exp.benchmark_runs), 6)
+
+ er = experiment_runner.ExperimentRunner(self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec)
+
+ # Test 1. Make sure nothing is done if _terminated is true.
+ er._terminated = True
+ er._StoreResults(self.exp)
+ self.assertEqual(mock_cleanup.call_count, 0)
+ self.assertEqual(mock_copy.call_count, 0)
+ self.assertEqual(mock_report.call_count, 0)
+ self.assertEqual(mock_writefile.call_count, 0)
+ self.assertEqual(mock_mkdir.call_count, 0)
+ self.assertEqual(mock_rmdir.call_count, 0)
+ self.assertEqual(self.mock_logger.LogOutputCount, 0)
+
+ # Test 2. _terminated is false; everything works properly.
+ fake_result = Result(self.mock_logger, self.exp.labels[0], 'average',
+ 'daisy1')
+ for r in self.exp.benchmark_runs:
+ r.result = fake_result
+ er._terminated = False
+ er._StoreResults(self.exp)
+ self.assertEqual(mock_cleanup.call_count, 6)
+ mock_cleanup.called_with(bench_run.benchmark.rm_chroot_tmp)
+ self.assertEqual(mock_copy.call_count, 6)
+ mock_copy.called_with(bench_path)
+ self.assertEqual(mock_writefile.call_count, 3)
+ self.assertEqual(len(mock_writefile.call_args_list), 3)
+ first_args = mock_writefile.call_args_list[0]
+ second_args = mock_writefile.call_args_list[1]
+ self.assertEqual(first_args[0][0],
+ '/usr/local/crosperf-results/experiment.exp')
+ self.assertEqual(second_args[0][0],
+ '/usr/local/crosperf-results/results.html')
+ self.assertEqual(mock_mkdir.call_count, 1)
+ mock_mkdir.called_with('/usr/local/crosperf-results')
+ self.assertEqual(mock_rmdir.call_count, 1)
+ mock_rmdir.called_with('/usr/local/crosperf-results')
+ self.assertEqual(self.mock_logger.LogOutputCount, 4)
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ ['Storing experiment file in /usr/local/crosperf-results.',
+ 'Storing results report in /usr/local/crosperf-results.',
+ 'Storing email message body in /usr/local/crosperf-results.',
+ 'Storing results of each benchmark run.'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py
new file mode 100644
index 00000000..627db99e
--- /dev/null
+++ b/crosperf/experiment_status.py
@@ -0,0 +1,145 @@
+# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""The class to show the banner."""
+
+from __future__ import print_function
+
+import collections
+import datetime
+import time
+
+
+class ExperimentStatus(object):
+ """The status class."""
+
+ def __init__(self, experiment):
+ self.experiment = experiment
+ self.num_total = len(self.experiment.benchmark_runs)
+ self.completed = 0
+ self.new_job_start_time = time.time()
+ self.log_level = experiment.log_level
+
+ def _GetProgressBar(self, num_complete, num_total):
+ ret = 'Done: %s%%' % int(100.0 * num_complete / num_total)
+ bar_length = 50
+ done_char = '>'
+ undone_char = ' '
+ num_complete_chars = bar_length * num_complete / num_total
+ num_undone_chars = bar_length - num_complete_chars
+ ret += ' [%s%s]' % (num_complete_chars * done_char,
+ num_undone_chars * undone_char)
+ return ret
+
+ def GetProgressString(self):
+ """Get the elapsed_time, ETA."""
+ current_time = time.time()
+ if self.experiment.start_time:
+ elapsed_time = current_time - self.experiment.start_time
+ else:
+ elapsed_time = 0
+ try:
+ if self.completed != self.experiment.num_complete:
+ self.completed = self.experiment.num_complete
+ self.new_job_start_time = current_time
+ time_completed_jobs = (elapsed_time -
+ (current_time - self.new_job_start_time))
+ # eta is calculated as:
+ # ETA = (num_jobs_not_yet_started * estimated_time_per_job)
+ # + time_left_for_current_job
+ #
+ # where
+ # num_jobs_not_yet_started = (num_total - num_complete - 1)
+ #
+ # estimated_time_per_job = time_completed_jobs / num_run_complete
+ #
+ # time_left_for_current_job = estimated_time_per_job -
+ # time_spent_so_far_on_current_job
+ #
+ # The biggest problem with this calculation is its assumption that
+ # all jobs have roughly the same running time (blatantly false!).
+ #
+ # ETA can come out negative if the time spent on the current job is
+ # greater than the estimated time per job (e.g. you're running the
+ # first long job, after a series of short jobs). For now, if that
+ # happens, we set the ETA to "Unknown."
+ #
+ eta_seconds = (float(self.num_total - self.experiment.num_complete - 1) *
+ time_completed_jobs / self.experiment.num_run_complete +
+ (time_completed_jobs / self.experiment.num_run_complete -
+ (current_time - self.new_job_start_time)))
+
+ eta_seconds = int(eta_seconds)
+ if eta_seconds > 0:
+ eta = datetime.timedelta(seconds=eta_seconds)
+ else:
+ eta = 'Unknown'
+ except ZeroDivisionError:
+ eta = 'Unknown'
+ strings = []
+ strings.append('Current time: %s Elapsed: %s ETA: %s' %
+ (datetime.datetime.now(),
+ datetime.timedelta(seconds=int(elapsed_time)), eta))
+ strings.append(self._GetProgressBar(self.experiment.num_complete,
+ self.num_total))
+ return '\n'.join(strings)
+
+ def GetStatusString(self):
+ """Get the status string of all the benchmark_runs."""
+ status_bins = collections.defaultdict(list)
+ for benchmark_run in self.experiment.benchmark_runs:
+ status_bins[benchmark_run.timeline.GetLastEvent()].append(benchmark_run)
+
+ status_strings = []
+ for key, val in status_bins.iteritems():
+ if key == 'RUNNING':
+ get_description = self._GetNamesAndIterations
+ else:
+ get_description = self._GetCompactNamesAndIterations
+ status_strings.append('%s: %s' % (key, get_description(val)))
+
+ thread_status = ''
+ thread_status_format = 'Thread Status: \n{}\n'
+ if (self.experiment.schedv2() is None and
+ self.experiment.log_level == 'verbose'):
+ # Add the machine manager status.
+ thread_status = thread_status_format.format(
+ self.experiment.machine_manager.AsString())
+ elif self.experiment.schedv2():
+ # In schedv2 mode, we always print out thread status.
+ thread_status = thread_status_format.format(self.experiment.schedv2(
+ ).threads_status_as_string())
+
+ result = '{}{}'.format(thread_status, '\n'.join(status_strings))
+
+ return result
+
+ def _GetNamesAndIterations(self, benchmark_runs):
+ strings = []
+ t = time.time()
+ for benchmark_run in benchmark_runs:
+ t_last = benchmark_run.timeline.GetLastEventTime()
+ elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
+ strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
+ return ' %s (%s)' % (len(strings), ', '.join(strings))
+
+ def _GetCompactNamesAndIterations(self, benchmark_runs):
+ grouped_benchmarks = collections.defaultdict(list)
+ for benchmark_run in benchmark_runs:
+ grouped_benchmarks[benchmark_run.label.name].append(benchmark_run)
+
+ output_segs = []
+ for label_name, label_runs in grouped_benchmarks.iteritems():
+ strings = []
+ benchmark_iterations = collections.defaultdict(list)
+ for benchmark_run in label_runs:
+ assert benchmark_run.label.name == label_name
+ benchmark_name = benchmark_run.benchmark.name
+ benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
+ for key, val in benchmark_iterations.iteritems():
+ val.sort()
+ iterations = ','.join(map(str, val))
+ strings.append('{} [{}]'.format(key, iterations))
+ output_segs.append(' ' + label_name + ': ' + ', '.join(strings) + '\n')
+
+ return ' %s \n%s' % (len(benchmark_runs), ''.join(output_segs))
diff --git a/crosperf/field.py b/crosperf/field.py
new file mode 100644
index 00000000..bc92e2cc
--- /dev/null
+++ b/crosperf/field.py
@@ -0,0 +1,152 @@
+# Copyright 2011 Google Inc. All Rights Reserved.
+"""Module to represent a Field in an experiment file."""
+
+
+class Field(object):
+ """Class representing a Field in an experiment file."""
+
+ def __init__(self, name, required, default, inheritable, description):
+ self.name = name
+ self.required = required
+ self.assigned = False
+ self.default = default
+ self._value = default
+ self.inheritable = inheritable
+ self.description = description
+
+ def Set(self, value, parse=True):
+ if parse:
+ self._value = self._Parse(value)
+ else:
+ self._value = value
+ self.assigned = True
+
+ def Append(self, value):
+ self._value += self._Parse(value)
+ self.assigned = True
+
+ def _Parse(self, value):
+ return value
+
+ def Get(self):
+ return self._value
+
+ def GetString(self):
+ return str(self._value)
+
+
+class TextField(Field):
+ """Class of text field."""
+
+ def __init__(self,
+ name,
+ required=False,
+ default='',
+ inheritable=False,
+ description=''):
+ super(TextField, self).__init__(name, required, default, inheritable,
+ description)
+
+ def _Parse(self, value):
+ return str(value)
+
+
+class BooleanField(Field):
+ """Class of boolean field."""
+
+ def __init__(self,
+ name,
+ required=False,
+ default=False,
+ inheritable=False,
+ description=''):
+ super(BooleanField, self).__init__(name, required, default, inheritable,
+ description)
+
+ def _Parse(self, value):
+ if value.lower() == 'true':
+ return True
+ elif value.lower() == 'false':
+ return False
+ raise TypeError("Invalid value for '%s'. Must be true or false." %
+ self.name)
+
+
+class IntegerField(Field):
+ """Class of integer field."""
+
+ def __init__(self,
+ name,
+ required=False,
+ default=0,
+ inheritable=False,
+ description=''):
+ super(IntegerField, self).__init__(name, required, default, inheritable,
+ description)
+
+ def _Parse(self, value):
+ return int(value)
+
+
+class FloatField(Field):
+ """Class of float field."""
+
+ def __init__(self,
+ name,
+ required=False,
+ default=0,
+ inheritable=False,
+ description=''):
+ super(FloatField, self).__init__(name, required, default, inheritable,
+ description)
+
+ def _Parse(self, value):
+ return float(value)
+
+
+class ListField(Field):
+ """Class of list field."""
+
+ def __init__(self,
+ name,
+ required=False,
+ default=None,
+ inheritable=False,
+ description=''):
+ super(ListField, self).__init__(name, required, default, inheritable,
+ description)
+
+ def _Parse(self, value):
+ return value.split()
+
+ def GetString(self):
+ return ' '.join(self._value)
+
+ def Append(self, value):
+ v = self._Parse(value)
+ if not self._value:
+ self._value = v
+ else:
+ self._value += v
+ self.assigned = True
+
+
+class EnumField(Field):
+ """Class of enum field."""
+
+ def __init__(self,
+ name,
+ options,
+ required=False,
+ default='',
+ inheritable=False,
+ description=''):
+ super(EnumField, self).__init__(name, required, default, inheritable,
+ description)
+ self.options = options
+
+ def _Parse(self, value):
+ if value not in self.options:
+ raise TypeError("Invalid enum value for field '%s'. Must be one of (%s)" %
+ (self.name, ', '.join(self.options)))
+ return str(value)
diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py
new file mode 100755
index 00000000..9f2a7136
--- /dev/null
+++ b/crosperf/flag_test_unittest.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python2
+#
+# Copyright 2014 Google Inc. All Rights Reserved.
+
+"""The unittest of flags."""
+
+from __future__ import print_function
+import test_flag
+
+import unittest
+
+
+class FlagTestCase(unittest.TestCase):
+ """The unittest class."""
+ def test_test_flag(self):
+ # Verify that test_flag.is_test exists, that it is a list,
+ # and that it contains 1 element.
+ self.assertTrue(type(test_flag.is_test) is list)
+ self.assertEqual(len(test_flag.is_test), 1)
+
+ # Verify that the getting the flag works and that the flag
+ # contains False, its starting value.
+ save_flag = test_flag.GetTestMode()
+ self.assertFalse(save_flag)
+
+ # Verify that setting the flat to True, then getting it, works.
+ test_flag.SetTestMode(True)
+ self.assertTrue(test_flag.GetTestMode())
+
+ # Verify that setting the flag to False, then getting it, works.
+ test_flag.SetTestMode(save_flag)
+ self.assertFalse(test_flag.GetTestMode())
+
+ # Verify that test_flag.is_test still exists, that it still is a
+ # list, and that it still contains 1 element.
+ self.assertTrue(type(test_flag.is_test) is list)
+ self.assertEqual(len(test_flag.is_test), 1)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/generate_report.py b/crosperf/generate_report.py
new file mode 100755
index 00000000..e0add994
--- /dev/null
+++ b/crosperf/generate_report.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python2
+#
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Given a specially-formatted JSON object, generates results report(s).
+
+The JSON object should look like:
+{"data": BenchmarkData, "platforms": BenchmarkPlatforms}
+
+BenchmarkPlatforms is a [str], each of which names a platform the benchmark
+ was run on (e.g. peppy, shamu, ...). Note that the order of this list is
+ related with the order of items in BenchmarkData.
+
+BenchmarkData is a {str: [PlatformData]}. The str is the name of the benchmark,
+and a PlatformData is a set of data for a given platform. There must be one
+PlatformData for each benchmark, for each element in BenchmarkPlatforms.
+
+A PlatformData is a [{str: float}], where each str names a metric we recorded,
+and the float is the value for that metric. Each element is considered to be
+the metrics collected from an independent run of this benchmark. NOTE: Each
+PlatformData is expected to have a "retval" key, with the return value of
+the benchmark. If the benchmark is successful, said return value should be 0.
+Otherwise, this will break some of our JSON functionality.
+
+Putting it all together, a JSON object will end up looking like:
+ { "platforms": ["peppy", "peppy-new-crosstool"],
+ "data": {
+ "bench_draw_line": [
+ [{"time (ms)": 1.321, "memory (mb)": 128.1, "retval": 0},
+ {"time (ms)": 1.920, "memory (mb)": 128.4, "retval": 0}],
+ [{"time (ms)": 1.221, "memory (mb)": 124.3, "retval": 0},
+ {"time (ms)": 1.423, "memory (mb)": 123.9, "retval": 0}]
+ ]
+ }
+ }
+
+Which says that we ran a benchmark on platforms named peppy, and
+ peppy-new-crosstool.
+We ran one benchmark, named bench_draw_line.
+It was run twice on each platform.
+Peppy's runs took 1.321ms and 1.920ms, while peppy-new-crosstool's took 1.221ms
+ and 1.423ms. None of the runs failed to complete.
+"""
+
+from __future__ import division
+from __future__ import print_function
+
+import argparse
+import functools
+import json
+import os
+import sys
+import traceback
+
+from results_report import BenchmarkResults
+from results_report import HTMLResultsReport
+from results_report import JSONResultsReport
+from results_report import TextResultsReport
+
+
+def CountBenchmarks(benchmark_runs):
+ """Counts the number of iterations for each benchmark in benchmark_runs."""
+ # Example input for benchmark_runs:
+ # {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]}
+ def _MaxLen(results):
+ return 0 if not results else max(len(r) for r in results)
+ return [(name, _MaxLen(results))
+ for name, results in benchmark_runs.iteritems()]
+
+
+def CutResultsInPlace(results, max_keys=50, complain_on_update=True):
+ """Limits the given benchmark results to max_keys keys in-place.
+
+ This takes the `data` field from the benchmark input, and mutates each
+ benchmark run to contain `max_keys` elements (ignoring special elements, like
+ "retval"). At the moment, it just selects the first `max_keys` keyvals,
+ alphabetically.
+
+ If complain_on_update is true, this will print a message noting that a
+ truncation occurred.
+
+ This returns the `results` object that was passed in, for convenience.
+
+ e.g.
+ >>> benchmark_data = {
+ ... "bench_draw_line": [
+ ... [{"time (ms)": 1.321, "memory (mb)": 128.1, "retval": 0},
+ ... {"time (ms)": 1.920, "memory (mb)": 128.4, "retval": 0}],
+ ... [{"time (ms)": 1.221, "memory (mb)": 124.3, "retval": 0},
+ ... {"time (ms)": 1.423, "memory (mb)": 123.9, "retval": 0}]
+ ... ]
+ ... }
+ >>> CutResultsInPlace(benchmark_data, max_keys=1, complain_on_update=False)
+ {
+ 'bench_draw_line': [
+ [{'memory (mb)': 128.1, 'retval': 0},
+ {'memory (mb)': 128.4, 'retval': 0}],
+ [{'memory (mb)': 124.3, 'retval': 0},
+ {'memory (mb)': 123.9, 'retval': 0}]
+ ]
+ }
+ """
+ actually_updated = False
+ for bench_results in results.itervalues():
+ for platform_results in bench_results:
+ for i, result in enumerate(platform_results):
+ # Keep the keys that come earliest when sorted alphabetically.
+ # Forcing alphabetical order is arbitrary, but necessary; otherwise,
+ # the keyvals we'd emit would depend on our iteration order through a
+ # map.
+ removable_keys = sorted(k for k in result if k != 'retval')
+ retained_keys = removable_keys[:max_keys]
+ platform_results[i] = {k: result[k] for k in retained_keys}
+ # retval needs to be passed through all of the time.
+ retval = result.get('retval')
+ if retval is not None:
+ platform_results[i]['retval'] = retval
+ actually_updated = actually_updated or \
+ len(retained_keys) != len(removable_keys)
+
+ if actually_updated and complain_on_update:
+ print("Warning: Some benchmark keyvals have been truncated.",
+ file=sys.stderr)
+ return results
+
+
+def _ConvertToASCII(obj):
+ """Convert an object loaded from JSON to ASCII; JSON gives us unicode."""
+
+ # Using something like `object_hook` is insufficient, since it only fires on
+ # actual JSON objects. `encoding` fails, too, since the default decoder always
+ # uses unicode() to decode strings.
+ if isinstance(obj, unicode):
+ return str(obj)
+ if isinstance(obj, dict):
+ return {_ConvertToASCII(k): _ConvertToASCII(v) for k, v in obj.iteritems()}
+ if isinstance(obj, list):
+ return [_ConvertToASCII(v) for v in obj]
+ return obj
+
+
+def _PositiveInt(s):
+ i = int(s)
+ if i < 0:
+ raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i, ))
+ return i
+
+
+def _AccumulateActions(args):
+ """Given program arguments, determines what actions we want to run.
+
+ Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a
+ ResultsReport, and the str is the file extension for the given report.
+ """
+ results = []
+ # The order of these is arbitrary.
+ if args.json:
+ results.append((JSONResultsReport, 'json'))
+ if args.text:
+ results.append((TextResultsReport, 'txt'))
+ if args.email:
+ email_ctor = functools.partial(TextResultsReport, email=True)
+ results.append((email_ctor, 'email'))
+ # We emit HTML if nothing else was specified.
+ if args.html or not results:
+ results.append((HTMLResultsReport, 'html'))
+ return results
+
+
+# Note: get_contents is a function, because it may be expensive (generating some
+# HTML reports takes O(seconds) on my machine, depending on the size of the
+# input data).
+def WriteFile(output_prefix, extension, get_contents, overwrite, verbose):
+ """Writes `contents` to a file named "${output_prefix}.${extension}".
+
+ get_contents should be a zero-args function that returns a string (of the
+ contents to write).
+ If output_prefix == '-', this writes to stdout.
+ If overwrite is False, this will not overwrite files.
+ """
+ if output_prefix == '-':
+ if verbose:
+ print('Writing %s report to stdout' % (extension, ), file=sys.stderr)
+ sys.stdout.write(get_contents())
+ return
+
+ file_name = '%s.%s' % (output_prefix, extension)
+ if not overwrite and os.path.exists(file_name):
+ raise IOError('Refusing to write %s -- it already exists' % (file_name, ))
+
+ with open(file_name, 'w') as out_file:
+ if verbose:
+ print('Writing %s report to %s' % (extension, file_name), file=sys.stderr)
+ out_file.write(get_contents())
+
+
+def RunActions(actions, benchmark_results, output_prefix, overwrite, verbose):
+ """Runs `actions`, returning True if all succeeded."""
+ failed = False
+
+ report_ctor = None # Make the linter happy
+ for report_ctor, extension in actions:
+ try:
+ get_contents = lambda: report_ctor(benchmark_results).GetReport()
+ WriteFile(output_prefix, extension, get_contents, overwrite, verbose)
+ except Exception:
+ # Complain and move along; we may have more actions that might complete
+ # successfully.
+ failed = True
+ traceback.print_exc()
+ return not failed
+
+
+def PickInputFile(input_name):
+ """Given program arguments, returns file to read for benchmark input."""
+ return sys.stdin if input_name == '-' else open(input_name)
+
+
+def _NoPerfReport(_label_name, _benchmark_name, _benchmark_iteration):
+ return {}
+
+
+def _ParseArgs(argv):
+ parser = argparse.ArgumentParser(description='Turns JSON into results '
+ 'report(s).')
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help='Be a tiny bit more verbose.')
+ parser.add_argument('-f', '--force', action='store_true',
+ help='Overwrite existing results files.')
+ parser.add_argument('-o', '--output', default='report', type=str,
+ help='Prefix of the output filename (default: report). '
+ '- means stdout.')
+ parser.add_argument('-i', '--input', required=True, type=str,
+ help='Where to read the JSON from. - means stdin.')
+ parser.add_argument('-l', '--statistic-limit', default=0, type=_PositiveInt,
+ help='The maximum number of benchmark statistics to '
+ 'display from a single run. 0 implies unlimited.')
+ parser.add_argument('--json', action='store_true',
+ help='Output a JSON report.')
+ parser.add_argument('--text', action='store_true',
+ help='Output a text report.')
+ parser.add_argument('--email', action='store_true',
+ help='Output a text report suitable for email.')
+ parser.add_argument('--html', action='store_true',
+ help='Output an HTML report (this is the default if no '
+ 'other output format is specified).')
+ return parser.parse_args(argv)
+
+
+def Main(argv):
+ args = _ParseArgs(argv)
+ # JSON likes to load UTF-8; our results reporter *really* doesn't like
+ # UTF-8.
+ with PickInputFile(args.input) as in_file:
+ raw_results = _ConvertToASCII(json.load(in_file))
+
+ platform_names = raw_results['platforms']
+ results = raw_results['data']
+ if args.statistic_limit:
+ results = CutResultsInPlace(results, max_keys=args.statistic_limit)
+ benches = CountBenchmarks(results)
+ # In crosperf, a label is essentially a platform+configuration. So, a name of
+ # a label and a name of a platform are equivalent for our purposes.
+ bench_results = BenchmarkResults(label_names=platform_names,
+ benchmark_names_and_iterations=benches,
+ run_keyvals=results,
+ read_perf_report=_NoPerfReport)
+ actions = _AccumulateActions(args)
+ ok = RunActions(actions, bench_results, args.output, args.force,
+ args.verbose)
+ return 0 if ok else 1
+
+
+if __name__ == '__main__':
+ sys.exit(Main(sys.argv[1:]))
diff --git a/crosperf/generate_report_unittest.py b/crosperf/generate_report_unittest.py
new file mode 100755
index 00000000..a5d00635
--- /dev/null
+++ b/crosperf/generate_report_unittest.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python2
+#
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Test for generate_report.py."""
+
+from __future__ import division
+from __future__ import print_function
+
+from StringIO import StringIO
+
+import copy
+import json
+import mock
+import test_flag
+import unittest
+
+import generate_report
+import results_report
+
+class _ContextualStringIO(StringIO):
+ """StringIO that can be used in `with` statements."""
+ def __init__(self, *args):
+ StringIO.__init__(self, *args)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, _type, _value, _traceback):
+ pass
+
+
+class GenerateReportTests(unittest.TestCase):
+ """Tests for generate_report.py."""
+ def testCountBenchmarks(self):
+ runs = {
+ 'foo': [[{}, {}, {}], [{}, {}, {}, {}]],
+ 'bar': [],
+ 'baz': [[], [{}], [{}, {}, {}]]
+ }
+ results = generate_report.CountBenchmarks(runs)
+ expected_results = [('foo', 4), ('bar', 0), ('baz', 3)]
+ self.assertItemsEqual(expected_results, results)
+
+ def testCutResultsInPlace(self):
+ bench_data = {
+ 'foo': [[{'a': 1, 'b': 2, 'c': 3}, {'a': 3, 'b': 2.5, 'c': 1}]],
+ 'bar': [[{'d': 11, 'e': 12, 'f': 13}]],
+ 'baz': [[{'g': 12, 'h': 13}]],
+ 'qux': [[{'i': 11}]],
+ }
+ original_bench_data = copy.deepcopy(bench_data)
+
+ max_keys = 2
+ results = generate_report.CutResultsInPlace(bench_data, max_keys=max_keys,
+ complain_on_update=False)
+ # Cuts should be in-place.
+ self.assertIs(results, bench_data)
+ self.assertItemsEqual(original_bench_data.keys(), bench_data.keys())
+ for bench_name, original_runs in original_bench_data.iteritems():
+ bench_runs = bench_data[bench_name]
+ self.assertEquals(len(original_runs), len(bench_runs))
+ # Order of these sub-lists shouldn't have changed.
+ for original_list, new_list in zip(original_runs, bench_runs):
+ self.assertEqual(len(original_list), len(new_list))
+ for original_keyvals, sub_keyvals in zip(original_list, new_list):
+ # sub_keyvals must be a subset of original_keyvals
+ self.assertDictContainsSubset(sub_keyvals, original_keyvals)
+
+
+ def testCutResultsInPlaceLeavesRetval(self):
+ bench_data = {
+ 'foo': [[{'retval': 0, 'a': 1}]],
+ 'bar': [[{'retval': 1}]],
+ 'baz': [[{'RETVAL': 1}]],
+ }
+ results = generate_report.CutResultsInPlace(bench_data, max_keys=0,
+ complain_on_update=False)
+ # Just reach into results assuming we know it otherwise outputs things
+ # sanely. If it doesn't, testCutResultsInPlace should give an indication as
+ # to what, exactly, is broken.
+ self.assertEqual(results['foo'][0][0].items(), [('retval', 0)])
+ self.assertEqual(results['bar'][0][0].items(), [('retval', 1)])
+ self.assertEqual(results['baz'][0][0].items(), [])
+
+ def _RunMainWithInput(self, args, input_obj):
+ assert '-i' not in args
+ args += ['-i', '-']
+ input_buf = _ContextualStringIO(json.dumps(input_obj))
+ with mock.patch('generate_report.PickInputFile', return_value=input_buf) \
+ as patched_pick:
+ result = generate_report.Main(args)
+ patched_pick.assert_called_once_with('-')
+ return result
+
+ @mock.patch('generate_report.RunActions')
+ def testMain(self, mock_run_actions):
+ # Email is left out because it's a bit more difficult to test, and it'll be
+ # mildly obvious if it's failing.
+ args = ['--json', '--html', '--text']
+ return_code = self._RunMainWithInput(args, {'platforms': [], 'data': {}})
+ self.assertEqual(0, return_code)
+ self.assertEqual(mock_run_actions.call_count, 1)
+ ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
+ self.assertItemsEqual(ctors, [
+ results_report.JSONResultsReport,
+ results_report.TextResultsReport,
+ results_report.HTMLResultsReport,
+ ])
+
+ @mock.patch('generate_report.RunActions')
+ def testMainSelectsHTMLIfNoReportsGiven(self, mock_run_actions):
+ args = []
+ return_code = self._RunMainWithInput(args, {'platforms': [], 'data': {}})
+ self.assertEqual(0, return_code)
+ self.assertEqual(mock_run_actions.call_count, 1)
+ ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
+ self.assertItemsEqual(ctors, [results_report.HTMLResultsReport])
+
+ # We only mock print_exc so we don't have exception info printed to stdout.
+ @mock.patch('generate_report.WriteFile', side_effect=ValueError('Oh noo'))
+ @mock.patch('traceback.print_exc')
+ def testRunActionsRunsAllActionsRegardlessOfExceptions(self, mock_print_exc,
+ mock_write_file):
+ actions = [(None, 'json'), (None, 'html'), (None, 'text'), (None, 'email')]
+ output_prefix = '-'
+ ok = generate_report.RunActions(actions, {}, output_prefix, overwrite=False,
+ verbose=False)
+ self.assertFalse(ok)
+ self.assertEqual(mock_write_file.call_count, len(actions))
+ self.assertEqual(mock_print_exc.call_count, len(actions))
+
+ @mock.patch('generate_report.WriteFile')
+ def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file):
+ actions = [(None, 'json'), (None, 'html'), (None, 'text')]
+ output_prefix = '-'
+ ok = generate_report.RunActions(actions, {}, output_prefix, overwrite=False,
+ verbose=False)
+ self.assertEqual(mock_write_file.call_count, len(actions))
+ self.assertTrue(ok)
+
+
+if __name__ == '__main__':
+ test_flag.SetTestMode(True)
+ unittest.main()
diff --git a/crosperf/help.py b/crosperf/help.py
new file mode 100644
index 00000000..61ed8ea2
--- /dev/null
+++ b/crosperf/help.py
@@ -0,0 +1,114 @@
+# Copyright 2011 Google Inc. All Rights Reserved.
+"""Module to print help message."""
+
+from __future__ import print_function
+
+import sys
+import textwrap
+from settings_factory import BenchmarkSettings
+from settings_factory import GlobalSettings
+from settings_factory import LabelSettings
+
+
+class Help(object):
+ """The help class."""
+
+ def GetUsage(self):
+ return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0])
+
+ def _WrapLine(self, line):
+ return '\n'.join(textwrap.wrap(line, 80))
+
+ def _GetFieldDescriptions(self, fields):
+ res = ''
+ for field_name in fields:
+ field = fields[field_name]
+ res += 'Field:\t\t%s\n' % field.name
+ res += self._WrapLine('Description:\t%s' % field.description) + '\n'
+ res += 'Type:\t\t%s\n' % type(field).__name__.replace('Field', '')
+ res += 'Required:\t%s\n' % field.required
+ if field.default:
+ res += 'Default:\t%s\n' % field.default
+ res += '\n'
+ return res
+
+ def GetHelp(self):
+ global_fields = self._GetFieldDescriptions(GlobalSettings('').fields)
+ benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings('').fields)
+ label_fields = self._GetFieldDescriptions(LabelSettings('').fields)
+
+ return """%s is a script for running performance experiments on
+ChromeOS. It allows one to run ChromeOS Autotest benchmarks over
+several images and compare the results to determine whether there
+is a performance difference.
+
+Comparing several images using %s is referred to as running an
+"experiment". An "experiment file" is a configuration file which holds
+all the information that describes the experiment and how it should be
+run. An example of a simple experiment file is below:
+
+--------------------------------- test.exp ---------------------------------
+name: my_experiment
+board: x86-alex
+remote: chromeos2-row1-rack4-host7.cros 172.18.122.132
+
+benchmark: page_cycler_v2.morejs {
+ suite: telemetry_Crosperf
+ iterations: 3
+}
+
+my_first_image {
+ chromeos_image: /usr/local/chromeos-1/chromiumos_image.bin
+}
+
+my_second_image {
+ chromeos_image: /usr/local/chromeos-2/chromiumos_image.bin
+}
+----------------------------------------------------------------------------
+
+This experiment file names the experiment "my_experiment". It will be
+run on the board x86-alex. Benchmarks will be run using two remote
+devices, one is a device specified by a hostname and the other is a
+device specified by it's IP address. Benchmarks will be run in
+parallel across these devices. There is currently no way to specify
+which benchmark will run on each device.
+
+We define one "benchmark" that will be run, page_cycler_v2.morejs. This
+benchmark has two "fields", one which specifies that this benchmark is
+part of the telemetry_Crosperf suite (this is the common way to run
+most Telemetry benchmarks), and the other which specifies how many
+iterations it will run for.
+
+We specify one or more "labels" or images which will be compared. The
+page_cycler_v2.morejs benchmark will be run on each of these images 3
+times and a result table will be output which compares them for all
+the images specified.
+
+The full list of fields that can be specified in the experiment file
+are as follows:
+=================
+Global Fields
+=================
+%s
+=================
+Benchmark Fields
+=================
+%s
+=================
+Label Fields
+=================
+%s
+
+Note that global fields are overidden by label or benchmark fields, if
+they can be specified in both places. Fields that are specified as
+arguments override fields specified in experiment files.
+
+%s is invoked by passing it a path to an experiment file,
+as well as any options (in addition to those specified in the
+experiment file). Crosperf runs the experiment and caches the results
+(or reads the previously cached experiment results out of the cache),
+generates and displays a report based on the run, and emails the
+report to the user. If the results were all read out of the cache,
+then by default no email is generated.
+""" % (sys.argv[0], sys.argv[0], global_fields, benchmark_fields, label_fields,
+ sys.argv[0])
diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py
new file mode 100644
index 00000000..e330084e
--- /dev/null
+++ b/crosperf/image_checksummer.py
@@ -0,0 +1,69 @@
+# Copyright 2011 Google Inc. All Rights Reserved.
+"""Compute image checksum."""
+
+from __future__ import print_function
+
+import os
+import threading
+
+from cros_utils import logger
+from cros_utils.file_utils import FileUtils
+
+
+class ImageChecksummer(object):
+ """Compute image checksum."""
+
+ class PerImageChecksummer(object):
+ """Compute checksum for an image."""
+
+ def __init__(self, label, log_level):
+ self._lock = threading.Lock()
+ self.label = label
+ self._checksum = None
+ self.log_level = log_level
+
+ def Checksum(self):
+ with self._lock:
+ if not self._checksum:
+ logger.GetLogger().LogOutput("Acquiring checksum for '%s'." %
+ self.label.name)
+ self._checksum = None
+ if self.label.image_type != 'local':
+ raise RuntimeError('Called Checksum on non-local image!')
+ if self.label.chromeos_image:
+ if os.path.exists(self.label.chromeos_image):
+ self._checksum = FileUtils().Md5File(
+ self.label.chromeos_image, log_level=self.log_level)
+ logger.GetLogger().LogOutput('Computed checksum is '
+ ': %s' % self._checksum)
+ if not self._checksum:
+ raise RuntimeError('Checksum computing error.')
+ logger.GetLogger().LogOutput('Checksum is: %s' % self._checksum)
+ return self._checksum
+
+ _instance = None
+ _lock = threading.Lock()
+ _per_image_checksummers = {}
+
+ def __new__(cls, *args, **kwargs):
+ with cls._lock:
+ if not cls._instance:
+ cls._instance = super(ImageChecksummer, cls).__new__(cls, *args,
+ **kwargs)
+ return cls._instance
+
+ def Checksum(self, label, log_level):
+ if label.image_type != 'local':
+ raise RuntimeError('Attempt to call Checksum on non-local image.')
+ with self._lock:
+ if label.name not in self._per_image_checksummers:
+ self._per_image_checksummers[label.name] = (
+ ImageChecksummer.PerImageChecksummer(label, log_level))
+ checksummer = self._per_image_checksummers[label.name]
+
+ try:
+ return checksummer.Checksum()
+ except:
+ logger.GetLogger().LogError('Could not compute checksum of image in label'
+ " '%s'." % label.name)
+ raise
diff --git a/crosperf/label.py b/crosperf/label.py
new file mode 100644
index 00000000..d993c15c
--- /dev/null
+++ b/crosperf/label.py
@@ -0,0 +1,159 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""The label of benchamrks."""
+
+from __future__ import print_function
+
+import hashlib
+import os
+
+from image_checksummer import ImageChecksummer
+from cros_utils.file_utils import FileUtils
+from cros_utils import misc
+
+
+class Label(object):
+ """The label class."""
+
+ def __init__(self,
+ name,
+ chromeos_image,
+ autotest_path,
+ chromeos_root,
+ board,
+ remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
+ chrome_src=None):
+
+ self.image_type = self._GetImageType(chromeos_image)
+
+ # Expand ~
+ chromeos_root = os.path.expanduser(chromeos_root)
+ if self.image_type == 'local':
+ chromeos_image = os.path.expanduser(chromeos_image)
+
+ self.name = name
+ self.chromeos_image = chromeos_image
+ self.autotest_path = autotest_path
+ self.board = board
+ self.remote = remote
+ self.image_args = image_args
+ self.cache_dir = cache_dir
+ self.cache_only = cache_only
+ self.log_level = log_level
+ self.chrome_version = ''
+ self.compiler = compiler
+
+ if not chromeos_root:
+ if self.image_type == 'local':
+ chromeos_root = FileUtils().ChromeOSRootFromImage(chromeos_image)
+ if not chromeos_root:
+ raise RuntimeError("No ChromeOS root given for label '%s' and could "
+ "not determine one from image path: '%s'." %
+ (name, chromeos_image))
+ else:
+ chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chromeos_root)
+ if not chromeos_root:
+ raise RuntimeError("Invalid ChromeOS root given for label '%s': '%s'." %
+ (name, chromeos_root))
+
+ self.chromeos_root = chromeos_root
+ if not chrome_src:
+ self.chrome_src = os.path.join(
+ self.chromeos_root, '.cache/distfiles/target/chrome-src-internal')
+ if not os.path.exists(self.chrome_src):
+ self.chrome_src = os.path.join(self.chromeos_root,
+ '.cache/distfiles/target/chrome-src')
+ else:
+ chromeos_src = misc.CanonicalizePath(chrome_src)
+ if not chromeos_src:
+ raise RuntimeError("Invalid Chrome src given for label '%s': '%s'." %
+ (name, chrome_src))
+ self.chrome_src = chromeos_src
+
+ self._SetupChecksum()
+
+ def _SetupChecksum(self):
+ """Compute label checksum only once."""
+
+ self.checksum = None
+ if self.image_type == 'local':
+ self.checksum = ImageChecksummer().Checksum(self, self.log_level)
+ elif self.image_type == 'trybot':
+ self.checksum = hashlib.md5(self.chromeos_image).hexdigest()
+
+ def _GetImageType(self, chromeos_image):
+ image_type = None
+ if chromeos_image.find('xbuddy://') < 0:
+ image_type = 'local'
+ elif chromeos_image.find('trybot') >= 0:
+ image_type = 'trybot'
+ else:
+ image_type = 'official'
+ return image_type
+
+ def __hash__(self):
+ """Label objects are used in a map, so provide "hash" and "equal"."""
+
+ return hash(self.name)
+
+ def __eq__(self, other):
+ """Label objects are used in a map, so provide "hash" and "equal"."""
+
+ return isinstance(other, Label) and other.name == self.name
+
+ def __str__(self):
+ """For better debugging."""
+
+ return 'label[name="{}"]'.format(self.name)
+
+
+class MockLabel(object):
+ """The mock label class."""
+
+ def __init__(self,
+ name,
+ chromeos_image,
+ autotest_path,
+ chromeos_root,
+ board,
+ remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
+ chrome_src=None):
+ self.name = name
+ self.chromeos_image = chromeos_image
+ self.autotest_path = autotest_path
+ self.board = board
+ self.remote = remote
+ self.cache_dir = cache_dir
+ self.cache_only = cache_only
+ if not chromeos_root:
+ self.chromeos_root = '/tmp/chromeos_root'
+ else:
+ self.chromeos_root = chromeos_root
+ self.image_args = image_args
+ self.chrome_src = chrome_src
+ self.image_type = self._GetImageType(chromeos_image)
+ self.checksum = ''
+ self.log_level = log_level
+ self.compiler = compiler
+ self.chrome_version = 'Fake Chrome Version 50'
+
+ def _GetImageType(self, chromeos_image):
+ image_type = None
+ if chromeos_image.find('xbuddy://') < 0:
+ image_type = 'local'
+ elif chromeos_image.find('trybot') >= 0:
+ image_type = 'trybot'
+ else:
+ image_type = 'official'
+ return image_type
diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py
new file mode 100644
index 00000000..3cc464bb
--- /dev/null
+++ b/crosperf/machine_image_manager.py
@@ -0,0 +1,304 @@
+
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""MachineImageManager allocates images to duts."""
+
+class MachineImageManager(object):
+ """Management of allocating images to duts.
+
+ * Data structure we have -
+
+ duts_ - list of duts, for each duts, we assume the following 2 properties
+ exist - label_ (the current label the duts_ carries or None, if it has an
+ alien image) and name (a string)
+
+ labels_ - a list of labels, for each label, we assume these properties
+ exist - remote (a set/vector/list of dut names (not dut object), to each
+ of which this image is compatible), remote could be none, which means
+ universal compatible.
+
+ label_duts_ - for each label, we maintain a list of duts, onto which the
+ label is imaged. Note this is an array of lists. Each element of each list
+ is an integer which is dut oridnal. We access this array using label
+ ordinal.
+
+ allocate_log_ - a list of allocation record. For example, if we allocate
+ l1 to d1, then l2 to d2, then allocate_log_ would be [(1, 1), (2, 2)].
+ This is used for debug/log, etc. All tuples in the list are integer pairs
+ (label_ordinal, dut_ordinal).
+
+ n_duts_ - number of duts.
+
+ n_labels_ - number of labels.
+
+ dut_name_ordinal_ - mapping from dut name (a string) to an integer,
+ starting from 0. So that duts_[dut_name_ordinal_[a_dut.name]]= a_dut.
+
+ * Problem abstraction -
+
+ Assume we have the following matrix - label X machine (row X col). A 'X'
+ in (i, j) in the matrix means machine and lable is not compatible, or that
+ we cannot image li to Mj.
+
+ M1 M2 M3
+ L1 X
+
+ L2 X
+
+ L3 X X
+
+ Now that we'll try to find a way to fill Ys in the matrix so that -
+
+ a) - each row at least get a Y, this ensures that each label get imaged
+ at least once, an apparent prerequiste.
+
+ b) - each column get at most N Ys. This make sure we can successfully
+ finish all tests by re-image each machine at most N times. That being
+ said, we could *OPTIONALLY* reimage some machines more than N times to
+ *accelerate* the test speed.
+
+ How to choose initial N for b) -
+ If number of duts (nd) is equal to or more than that of labels (nl), we
+ start from N == 1. Else we start from N = nl - nd + 1.
+
+ We will begin the search with pre-defined N, if we fail to find such a
+ solution for such N, we increase N by 1 and continue the search till we
+ get N == nl, at this case we fails.
+
+ Such a solution ensures minimal number of reimages.
+
+ * Solution representation
+
+ The solution will be placed inside the matrix, like below
+
+ M1 M2 M3 M4
+ L1 X X Y
+
+ L2 Y X
+
+ L3 X Y X
+
+ * Allocation algorithm
+
+ When Mj asks for a image, we check column j, pick the first cell that
+ contains a 'Y', and mark the cell '_'. If no such 'Y' exists (like M4 in
+ the above solution matrix), we just pick an image that the minimal reimage
+ number.
+
+ After allocate for M3
+ M1 M2 M3 M4
+ L1 X X _
+
+ L2 Y X
+
+ L3 X Y X
+
+ After allocate for M4
+ M1 M2 M3 M4
+ L1 X X _
+
+ L2 Y X _
+
+ L3 X Y X
+
+ After allocate for M2
+ M1 M2 M3 M4
+ L1 X X _
+
+ L2 Y X _
+
+ L3 X _ X
+
+ After allocate for M1
+ M1 M2 M3 M4
+ L1 X X _
+
+ L2 _ X _
+
+ L3 X _ X
+
+ After allocate for M2
+ M1 M2 M3 M4
+ L1 X X _
+
+ L2 _ _ X _
+
+ L3 X _ X
+
+ If we try to allocate for M1 or M2 or M3 again, we get None.
+
+ * Special / common case to handle seperately
+
+ We have only 1 dut or if we have only 1 label, that's simple enough.
+
+ """
+
+ def __init__(self, labels, duts):
+ self.labels_ = labels
+ self.duts_ = duts
+ self.n_labels_ = len(labels)
+ self.n_duts_ = len(duts)
+ self.dut_name_ordinal_ = dict()
+ for idx, dut in enumerate(self.duts_):
+ self.dut_name_ordinal_[dut.name] = idx
+
+ # Generate initial matrix containg 'X' or ' '.
+ self.matrix_ = [['X' if (l.remote and len(l.remote)) else ' ' \
+ for _ in range(self.n_duts_)] for l in self.labels_]
+ for ol, l in enumerate(self.labels_):
+ if l.remote:
+ for r in l.remote:
+ self.matrix_[ol][self.dut_name_ordinal_[r]] = ' '
+
+ self.label_duts_ = [[] for _ in range(self.n_labels_)]
+ self.allocate_log_ = []
+
+ def compute_initial_allocation(self):
+ """Compute the initial label-dut allocation.
+
+ This method finds the most efficient way that every label gets imaged at
+ least once.
+
+ Returns:
+ False, only if not all labels could be imaged to a certain machine,
+ otherwise True.
+ """
+
+ if self.n_duts_ == 1:
+ for i, v in self.matrix_vertical_generator(0):
+ if v != 'X':
+ self.matrix_[i][0] = 'Y'
+ return
+
+ if self.n_labels_ == 1:
+ for j, v in self.matrix_horizontal_generator(0):
+ if v != 'X':
+ self.matrix_[0][j] = 'Y'
+ return
+
+ if self.n_duts_ >= self.n_labels_:
+ n = 1
+ else:
+ n = self.n_labels_ - self.n_duts_ + 1
+ while n <= self.n_labels_:
+ if self._compute_initial_allocation_internal(0, n):
+ break
+ n += 1
+
+ return n <= self.n_labels_
+
+ def _record_allocate_log(self, label_i, dut_j):
+ self.allocate_log_.append((label_i, dut_j))
+ self.label_duts_[label_i].append(dut_j)
+
+ def allocate(self, dut, schedv2=None):
+ """Allocate a label for dut.
+
+ Args:
+ dut: the dut that asks for a new image.
+ schedv2: the scheduling instance, we need the benchmark run
+ information with schedv2 for a better allocation.
+
+ Returns:
+ a label to image onto the dut or None if no more available images for
+ the dut.
+ """
+ j = self.dut_name_ordinal_[dut.name]
+ # 'can_' prefix means candidate label's.
+ can_reimage_number = 999
+ can_i = 999
+ can_label = None
+ can_pending_br_num = 0
+ for i, v in self.matrix_vertical_generator(j):
+ label = self.labels_[i]
+
+ # 2 optimizations here regarding allocating label to dut.
+ # Note schedv2 might be None in case we do not need this
+ # optimization or we are in testing mode.
+ if schedv2 is not None:
+ pending_br_num = len(schedv2.get_label_map()[label])
+ if pending_br_num == 0:
+ # (A) - we have finished all br of this label,
+ # apparently, we do not want to reimaeg dut to
+ # this label.
+ continue
+ else:
+ # In case we do not have a schedv2 instance, mark
+ # pending_br_num as 0, so pending_br_num >=
+ # can_pending_br_num is always True.
+ pending_br_num = 0
+
+ # For this time being, I just comment this out until we have a
+ # better estimation how long each benchmarkrun takes.
+ # if (pending_br_num <= 5 and
+ # len(self.label_duts_[i]) >= 1):
+ # # (B) this is heuristic - if there are just a few test cases
+ # # (say <5) left undone for this label, and there is at least
+ # # 1 other machine working on this lable, we probably not want
+ # # to bother to reimage this dut to help with these 5 test
+ # # cases
+ # continue
+
+ if v == 'Y':
+ self.matrix_[i][j] = '_'
+ self._record_allocate_log(i, j)
+ return label
+ if v == ' ':
+ label_reimage_number = len(self.label_duts_[i])
+ if ((can_label is None) or
+ (label_reimage_number < can_reimage_number or
+ (label_reimage_number == can_reimage_number and
+ pending_br_num >= can_pending_br_num))):
+ can_reimage_number = label_reimage_number
+ can_i = i
+ can_label = label
+ can_pending_br_num = pending_br_num
+
+ # All labels are marked either '_' (already taken) or 'X' (not
+ # compatible), so return None to notify machine thread to quit.
+ if can_label is None:
+ return None
+
+ # At this point, we don't find any 'Y' for the machine, so we go the
+ # 'min' approach.
+ self.matrix_[can_i][j] = '_'
+ self._record_allocate_log(can_i, j)
+ return can_label
+
+ def matrix_vertical_generator(self, col):
+ """Iterate matrix vertically at column 'col'.
+
+ Yield row number i and value at matrix_[i][col].
+ """
+ for i, _ in enumerate(self.labels_):
+ yield i, self.matrix_[i][col]
+
+ def matrix_horizontal_generator(self, row):
+ """Iterate matrix horizontally at row 'row'.
+
+ Yield col number j and value at matrix_[row][j].
+ """
+ for j, _ in enumerate(self.duts_):
+ yield j, self.matrix_[row][j]
+
+ def _compute_initial_allocation_internal(self, level, N):
+ """Search matrix for d with N."""
+
+ if level == self.n_labels_:
+ return True
+
+ for j, v in self.matrix_horizontal_generator(level):
+ if v == ' ':
+ # Before we put a 'Y', we check how many Y column 'j' has.
+ # Note y[0] is row idx, y[1] is the cell value.
+ ny = reduce(lambda x, y: x + 1 if (y[1] == 'Y') else x,
+ self.matrix_vertical_generator(j), 0)
+ if ny < N:
+ self.matrix_[level][j] = 'Y'
+ if self._compute_initial_allocation_internal(level + 1, N):
+ return True
+ self.matrix_[level][j] = ' '
+
+ return False
diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py
new file mode 100755
index 00000000..fe41dc09
--- /dev/null
+++ b/crosperf/machine_image_manager_unittest.py
@@ -0,0 +1,290 @@
+#!/usr/bin/env python2
+
+# Copyright 2015 Google Inc. All Rights Reserved.
+
+"""Unit tests for the MachineImageManager class."""
+
+from __future__ import print_function
+
+import random
+import unittest
+
+from machine_image_manager import MachineImageManager
+
+
+class MockLabel(object):
+ """Class for generating a mock Label."""
+
+ def __init__(self, name, remotes=None):
+ self.name = name
+ self.remote = remotes
+
+ def __hash__(self):
+ """Provide hash function for label.
+
+ This is required because Label object is used inside a dict as key.
+ """
+ return hash(self.name)
+
+ def __eq__(self, other):
+ """Provide eq function for label.
+
+ This is required because Label object is used inside a dict as key.
+ """
+ return isinstance(other, MockLabel) and other.name == self.name
+
+
+class MockDut(object):
+ """Class for creating a mock Device-Under-Test (DUT)."""
+
+ def __init__(self, name, label=None):
+ self.name = name
+ self.label_ = label
+
+
+class MachineImageManagerTester(unittest.TestCase):
+ """Class for testing MachineImageManager."""
+
+ def gen_duts_by_name(self, *names):
+ duts = []
+ for n in names:
+ duts.append(MockDut(n))
+ return duts
+
+ def print_matrix(self, matrix):
+ for r in matrix:
+ for v in r:
+ print('{} '.format('.' if v == ' ' else v)),
+ print('')
+
+ def create_labels_and_duts_from_pattern(self, pattern):
+ labels = []
+ duts = []
+ for i, r in enumerate(pattern):
+ l = MockLabel('l{}'.format(i), [])
+ for j, v in enumerate(r.split()):
+ if v == '.':
+ l.remote.append('m{}'.format(j))
+ if i == 0:
+ duts.append(MockDut('m{}'.format(j)))
+ labels.append(l)
+ return labels, duts
+
+ def check_matrix_against_pattern(self, matrix, pattern):
+ for i, s in enumerate(pattern):
+ for j, v in enumerate(s.split()):
+ self.assertTrue(v == '.' and matrix[i][j] == ' ' or v == matrix[i][j])
+
+ def pattern_based_test(self, inp, output):
+ labels, duts = self.create_labels_and_duts_from_pattern(inp)
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.check_matrix_against_pattern(mim.matrix_, output)
+ return mim
+
+ def test_single_dut(self):
+ labels = [MockLabel('l1'), MockLabel('l2'), MockLabel('l3')]
+ dut = MockDut('m1')
+ mim = MachineImageManager(labels, [dut])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [['Y'], ['Y'], ['Y']])
+
+ def test_single_label(self):
+ labels = [MockLabel('l1')]
+ duts = self.gen_duts_by_name('m1', 'm2', 'm3')
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
+
+ def test_case1(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
+ 'X']])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case2(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X',
+ 'X']])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case3(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case4(self):
+ labels = [MockLabel('l1', ['m1', 'm2']), MockLabel('l2', ['m2', 'm3']),
+ MockLabel('l3', ['m1'])]
+ duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')]
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_case5(self):
+ labels = [MockLabel('l1', ['m3']), MockLabel('l2', ['m3']),
+ MockLabel('l3', ['m1'])]
+ duts = self.gen_duts_by_name('m1', 'm2', 'm3')
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.assertTrue(mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'], ['Y', 'X',
+ 'X']])
+
+ def test_2x2_with_allocation(self):
+ labels = [MockLabel('l0'), MockLabel('l1')]
+ duts = [MockDut('m0'), MockDut('m1')]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.assertTrue(mim.allocate(duts[0]) == labels[0])
+ self.assertTrue(mim.allocate(duts[0]) == labels[1])
+ self.assertTrue(mim.allocate(duts[0]) is None)
+ self.assertTrue(mim.matrix_[0][0] == '_')
+ self.assertTrue(mim.matrix_[1][0] == '_')
+ self.assertTrue(mim.allocate(duts[1]) == labels[1])
+
+ def test_10x10_general(self):
+ """Gen 10x10 matrix."""
+ n = 10
+ labels = []
+ duts = []
+ for i in range(n):
+ labels.append(MockLabel('l{}'.format(i)))
+ duts.append(MockDut('m{}'.format(i)))
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ for i in range(n):
+ for j in range(n):
+ if i == j:
+ self.assertTrue(mim.matrix_[i][j] == 'Y')
+ else:
+ self.assertTrue(mim.matrix_[i][j] == ' ')
+ self.assertTrue(mim.allocate(duts[3]).name == 'l3')
+
+ def test_random_generated(self):
+ n = 10
+ labels = []
+ duts = []
+ for i in range(10):
+ # generate 3-5 machines that is compatible with this label
+ l = MockLabel('l{}'.format(i), [])
+ r = random.random()
+ for _ in range(4):
+ t = int(r * 10) % n
+ r *= 10
+ l.remote.append('m{}'.format(t))
+ labels.append(l)
+ duts.append(MockDut('m{}'.format(i)))
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+
+ def test_10x10_fully_random(self):
+ inp = ['X . . . X X . X X .', 'X X . X . X . X X .',
+ 'X X X . . X . X . X', 'X . X X . . X X . X',
+ 'X X X X . . . X . .', 'X X . X . X . . X .',
+ '. X . X . X X X . .', '. X . X X . X X . .',
+ 'X X . . . X X X . .', '. X X X X . . . . X']
+ output = ['X Y . . X X . X X .', 'X X Y X . X . X X .',
+ 'X X X Y . X . X . X', 'X . X X Y . X X . X',
+ 'X X X X . Y . X . .', 'X X . X . X Y . X .',
+ 'Y X . X . X X X . .', '. X . X X . X X Y .',
+ 'X X . . . X X X . Y', '. X X X X . . Y . X']
+ self.pattern_based_test(inp, output)
+
+ def test_10x10_fully_random2(self):
+ inp = ['X . X . . X . X X X', 'X X X X X X . . X .',
+ 'X . X X X X X . . X', 'X X X . X . X X . .',
+ '. X . X . X X X X X', 'X X X X X X X . . X',
+ 'X . X X X X X . . X', 'X X X . X X X X . .',
+ 'X X X . . . X X X X', '. X X . X X X . X X']
+ output = ['X . X Y . X . X X X', 'X X X X X X Y . X .',
+ 'X Y X X X X X . . X', 'X X X . X Y X X . .',
+ '. X Y X . X X X X X', 'X X X X X X X Y . X',
+ 'X . X X X X X . Y X', 'X X X . X X X X . Y',
+ 'X X X . Y . X X X X', 'Y X X . X X X . X X']
+ self.pattern_based_test(inp, output)
+
+ def test_3x4_with_allocation(self):
+ inp = ['X X . .', '. . X .', 'X . X .']
+ output = ['X X Y .', 'Y . X .', 'X Y X .']
+ mim = self.pattern_based_test(inp, output)
+ self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[3]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[2]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[1]) == None)
+ self.assertTrue(mim.allocate(mim.duts_[0]) == None)
+ self.assertTrue(mim.label_duts_[0] == [2, 3])
+ self.assertTrue(mim.label_duts_[1] == [0, 3, 1])
+ self.assertTrue(mim.label_duts_[2] == [3, 1])
+ self.assertTrue(mim.allocate_log_ == [(0, 2), (2, 3), (1, 0), (2, 1),
+ (1, 3), (0, 3), (1, 1)])
+
+ def test_cornercase_1(self):
+ """This corner case is brought up by Caroline.
+
+ The description is -
+
+ If you have multiple labels and multiple machines, (so we don't
+ automatically fall into the 1 dut or 1 label case), but all of the
+ labels specify the same 1 remote, then instead of assigning the same
+ machine to all the labels, your algorithm fails to assign any...
+
+ So first step is to create an initial matrix like below, l0, l1 and l2
+ all specify the same 1 remote - m0.
+
+ m0 m1 m2
+ l0 . X X
+
+ l1 . X X
+
+ l2 . X X
+
+ The search process will be like this -
+ a) try to find a solution with at most 1 'Y's per column (but ensure at
+ least 1 Y per row), fail
+ b) try to find a solution with at most 2 'Y's per column (but ensure at
+ least 1 Y per row), fail
+ c) try to find a solution with at most 3 'Y's per column (but ensure at
+ least 1 Y per row), succeed, so we end up having this solution
+
+ m0 m1 m2
+ l0 Y X X
+
+ l1 Y X X
+
+ l2 Y X X
+ """
+
+ inp = ['. X X', '. X X', '. X X']
+ output = ['Y X X', 'Y X X', 'Y X X']
+ mim = self.pattern_based_test(inp, output)
+ self.assertTrue(mim.allocate(mim.duts_[1]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[2]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[0]) is None)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
new file mode 100644
index 00000000..2fdf141b
--- /dev/null
+++ b/crosperf/machine_manager.py
@@ -0,0 +1,709 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Machine Manager module."""
+
+from __future__ import print_function
+
+import collections
+import file_lock_machine
+import hashlib
+import image_chromeos
+import math
+import os.path
+import re
+import sys
+import threading
+import time
+
+import test_flag
+from cros_utils import command_executer
+from cros_utils import logger
+
+CHECKSUM_FILE = '/usr/local/osimage_checksum_file'
+
+
+class BadChecksum(Exception):
+ """Raised if all machines for a label don't have the same checksum."""
+ pass
+
+
+class BadChecksumString(Exception):
+ """Raised if all machines for a label don't have the same checksum string."""
+ pass
+
+
+class MissingLocksDirectory(Exception):
+ """Raised when cannot find/access the machine locks directory."""
+
+
+class CrosCommandError(Exception):
+ """Raised when an error occurs running command on DUT."""
+
+
+class CrosMachine(object):
+ """The machine class."""
+
+ def __init__(self, name, chromeos_root, log_level, cmd_exec=None):
+ self.name = name
+ self.image = None
+ # We relate a dut with a label if we reimage the dut using label or we
+ # detect at the very beginning that the dut is running this label.
+ self.label = None
+ self.checksum = None
+ self.locked = False
+ self.released_time = time.time()
+ self.test_run = None
+ self.chromeos_root = chromeos_root
+ self.log_level = log_level
+ self.cpuinfo = None
+ self.machine_id = None
+ self.checksum_string = None
+ self.meminfo = None
+ self.phys_kbytes = None
+ self.ce = cmd_exec or command_executer.GetCommandExecuter(
+ log_level=self.log_level)
+ self.SetUpChecksumInfo()
+
+ def SetUpChecksumInfo(self):
+ if not self.IsReachable():
+ self.machine_checksum = None
+ return
+ self._GetMemoryInfo()
+ self._GetCPUInfo()
+ self._ComputeMachineChecksumString()
+ self._GetMachineID()
+ self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
+ self.machine_id_checksum = self._GetMD5Checksum(self.machine_id)
+
+ def IsReachable(self):
+ command = 'ls'
+ ret = self.ce.CrosRunCommand(command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
+ if ret:
+ return False
+ return True
+
+ def _ParseMemoryInfo(self):
+ line = self.meminfo.splitlines()[0]
+ usable_kbytes = int(line.split()[1])
+ # This code is from src/third_party/test/files/client/bin/base_utils.py
+ # usable_kbytes is system's usable DRAM in kbytes,
+ # as reported by memtotal() from device /proc/meminfo memtotal
+ # after Linux deducts 1.5% to 9.5% for system table overhead
+ # Undo the unknown actual deduction by rounding up
+ # to next small multiple of a big power-of-two
+ # eg 12GB - 5.1% gets rounded back up to 12GB
+ mindeduct = 0.005 # 0.5 percent
+ maxdeduct = 0.095 # 9.5 percent
+ # deduction range 1.5% .. 9.5% supports physical mem sizes
+ # 6GB .. 12GB in steps of .5GB
+ # 12GB .. 24GB in steps of 1 GB
+ # 24GB .. 48GB in steps of 2 GB ...
+ # Finer granularity in physical mem sizes would require
+ # tighter spread between min and max possible deductions
+
+ # increase mem size by at least min deduction, without rounding
+ min_kbytes = int(usable_kbytes / (1.0 - mindeduct))
+ # increase mem size further by 2**n rounding, by 0..roundKb or more
+ round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes
+ # find least binary roundup 2**n that covers worst-cast roundKb
+ mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2)))
+ # have round_kbytes <= mod2n < round_kbytes*2
+ # round min_kbytes up to next multiple of mod2n
+ phys_kbytes = min_kbytes + mod2n - 1
+ phys_kbytes -= phys_kbytes % mod2n # clear low bits
+ self.phys_kbytes = phys_kbytes
+
+ def _GetMemoryInfo(self):
+ #TODO yunlian: when the machine in rebooting, it will not return
+ #meminfo, the assert does not catch it either
+ command = 'cat /proc/meminfo'
+ ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput(
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
+ assert ret == 0, 'Could not get meminfo from machine: %s' % self.name
+ if ret == 0:
+ self._ParseMemoryInfo()
+
+ def _GetCPUInfo(self):
+ command = 'cat /proc/cpuinfo'
+ ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput(
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
+ assert ret == 0, 'Could not get cpuinfo from machine: %s' % self.name
+
+ def _ComputeMachineChecksumString(self):
+ self.checksum_string = ''
+ exclude_lines_list = ['MHz', 'BogoMIPS', 'bogomips']
+ for line in self.cpuinfo.splitlines():
+ if not any(e in line for e in exclude_lines_list):
+ self.checksum_string += line
+ self.checksum_string += ' ' + str(self.phys_kbytes)
+
+ def _GetMD5Checksum(self, ss):
+ if ss:
+ return hashlib.md5(ss).hexdigest()
+ else:
+ return ''
+
+ def _GetMachineID(self):
+ command = 'dump_vpd_log --full --stdout'
+ _, if_out, _ = self.ce.CrosRunCommandWOutput(
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
+ b = if_out.splitlines()
+ a = [l for l in b if 'Product' in l]
+ if len(a):
+ self.machine_id = a[0]
+ return
+ command = 'ifconfig'
+ _, if_out, _ = self.ce.CrosRunCommandWOutput(
+ command,
+ machine=self.name,
+ chromeos_root=self.chromeos_root)
+ b = if_out.splitlines()
+ a = [l for l in b if 'HWaddr' in l]
+ if len(a):
+ self.machine_id = '_'.join(a)
+ return
+ a = [l for l in b if 'ether' in l]
+ if len(a):
+ self.machine_id = '_'.join(a)
+ return
+ assert 0, 'Could not get machine_id from machine: %s' % self.name
+
+ def __str__(self):
+ l = []
+ l.append(self.name)
+ l.append(str(self.image))
+ l.append(str(self.checksum))
+ l.append(str(self.locked))
+ l.append(str(self.released_time))
+ return ', '.join(l)
+
+
+class MachineManager(object):
+ """Lock, image and unlock machines locally for benchmark runs.
+
+ This class contains methods and calls to lock, unlock and image
+ machines and distribute machines to each benchmark run. The assumption is
+ that all of the machines for the experiment have been globally locked
+ (using an AFE server) in the ExperimentRunner, but the machines still need
+ to be locally locked/unlocked (allocated to benchmark runs) to prevent
+ multiple benchmark runs within the same experiment from trying to use the
+ same machine at the same time.
+ """
+
+ def __init__(self,
+ chromeos_root,
+ acquire_timeout,
+ log_level,
+ locks_dir,
+ cmd_exec=None,
+ lgr=None):
+ self._lock = threading.RLock()
+ self._all_machines = []
+ self._machines = []
+ self.image_lock = threading.Lock()
+ self.num_reimages = 0
+ self.chromeos_root = None
+ self.machine_checksum = {}
+ self.machine_checksum_string = {}
+ self.acquire_timeout = acquire_timeout
+ self.log_level = log_level
+ self.locks_dir = locks_dir
+ self.ce = cmd_exec or command_executer.GetCommandExecuter(
+ log_level=self.log_level)
+ self.logger = lgr or logger.GetLogger()
+
+ if self.locks_dir and not os.path.isdir(self.locks_dir):
+ raise MissingLocksDirectory('Cannot access locks directory: %s' %
+ self.locks_dir)
+
+ self._initialized_machines = []
+ self.chromeos_root = chromeos_root
+
+ def RemoveNonLockedMachines(self, locked_machines):
+ for m in self._all_machines:
+ if m.name not in locked_machines:
+ self._all_machines.remove(m)
+
+ for m in self._machines:
+ if m.name not in locked_machines:
+ self._machines.remove(m)
+
+ def GetChromeVersion(self, machine):
+ """Get the version of Chrome running on the DUT."""
+
+ cmd = '/opt/google/chrome/chrome --version'
+ ret, version, _ = self.ce.CrosRunCommandWOutput(
+ cmd,
+ machine=machine.name,
+ chromeos_root=self.chromeos_root)
+ if ret != 0:
+ raise CrosCommandError("Couldn't get Chrome version from %s." %
+ machine.name)
+
+ if ret != 0:
+ version = ''
+ return version.rstrip()
+
+ def ImageMachine(self, machine, label):
+ checksum = label.checksum
+
+ if checksum and (machine.checksum == checksum):
+ return
+ chromeos_root = label.chromeos_root
+ if not chromeos_root:
+ chromeos_root = self.chromeos_root
+ image_chromeos_args = [image_chromeos.__file__, '--no_lock',
+ '--chromeos_root=%s' % chromeos_root,
+ '--image=%s' % label.chromeos_image,
+ '--image_args=%s' % label.image_args, '--remote=%s' %
+ machine.name, '--logging_level=%s' % self.log_level]
+ if label.board:
+ image_chromeos_args.append('--board=%s' % label.board)
+
+ # Currently can't image two machines at once.
+ # So have to serialized on this lock.
+ save_ce_log_level = self.ce.log_level
+ if self.log_level != 'verbose':
+ self.ce.log_level = 'average'
+
+ with self.image_lock:
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Pushing image onto machine.')
+ self.logger.LogOutput('Running image_chromeos.DoImage with %s' %
+ ' '.join(image_chromeos_args))
+ retval = 0
+ if not test_flag.GetTestMode():
+ retval = image_chromeos.DoImage(image_chromeos_args)
+ if retval:
+ cmd = 'reboot && exit'
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('reboot & exit.')
+ self.ce.CrosRunCommand(cmd,
+ machine=machine.name,
+ chromeos_root=self.chromeos_root)
+ time.sleep(60)
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Pushing image onto machine.')
+ self.logger.LogOutput('Running image_chromeos.DoImage with %s' %
+ ' '.join(image_chromeos_args))
+ retval = image_chromeos.DoImage(image_chromeos_args)
+ if retval:
+ raise RuntimeError("Could not image machine: '%s'." % machine.name)
+ else:
+ self.num_reimages += 1
+ machine.checksum = checksum
+ machine.image = label.chromeos_image
+ machine.label = label
+
+ if not label.chrome_version:
+ label.chrome_version = self.GetChromeVersion(machine)
+
+ self.ce.log_level = save_ce_log_level
+ return retval
+
+ def ComputeCommonCheckSum(self, label):
+ # Since this is used for cache lookups before the machines have been
+ # compared/verified, check here to make sure they all have the same
+ # checksum (otherwise the cache lookup may not be valid).
+ common_checksum = None
+ for machine in self.GetMachines(label):
+ # Make sure the machine's checksums are calculated.
+ if not machine.machine_checksum:
+ machine.SetUpChecksumInfo()
+ cs = machine.machine_checksum
+ # If this is the first machine we've examined, initialize
+ # common_checksum.
+ if not common_checksum:
+ common_checksum = cs
+ # Make sure this machine's checksum matches our 'common' checksum.
+ if cs != common_checksum:
+ raise BadChecksum('Machine checksums do not match!')
+ self.machine_checksum[label.name] = common_checksum
+
+ def ComputeCommonCheckSumString(self, label):
+ # The assumption is that this function is only called AFTER
+ # ComputeCommonCheckSum, so there is no need to verify the machines
+ # are the same here. If this is ever changed, this function should be
+ # modified to verify that all the machines for a given label are the
+ # same.
+ for machine in self.GetMachines(label):
+ if machine.checksum_string:
+ self.machine_checksum_string[label.name] = machine.checksum_string
+ break
+
+ def _TryToLockMachine(self, cros_machine):
+ with self._lock:
+ assert cros_machine, "Machine can't be None"
+ for m in self._machines:
+ if m.name == cros_machine.name:
+ return
+ locked = True
+ if self.locks_dir:
+ locked = file_lock_machine.Machine(cros_machine.name,
+ self.locks_dir).Lock(True,
+ sys.argv[0])
+ if locked:
+ self._machines.append(cros_machine)
+ command = 'cat %s' % CHECKSUM_FILE
+ ret, out, _ = self.ce.CrosRunCommandWOutput(
+ command,
+ chromeos_root=self.chromeos_root,
+ machine=cros_machine.name)
+ if ret == 0:
+ cros_machine.checksum = out.strip()
+ elif self.locks_dir:
+ self.logger.LogOutput("Couldn't lock: %s" % cros_machine.name)
+
+ # This is called from single threaded mode.
+ def AddMachine(self, machine_name):
+ with self._lock:
+ for m in self._all_machines:
+ assert m.name != machine_name, 'Tried to double-add %s' % machine_name
+
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Setting up remote access to %s' % machine_name)
+ self.logger.LogOutput('Checking machine characteristics for %s' %
+ machine_name)
+ cm = CrosMachine(machine_name, self.chromeos_root, self.log_level)
+ if cm.machine_checksum:
+ self._all_machines.append(cm)
+
+ def RemoveMachine(self, machine_name):
+ with self._lock:
+ self._machines = [m for m in self._machines if m.name != machine_name]
+ if self.locks_dir:
+ res = file_lock_machine.Machine(machine_name,
+ self.locks_dir).Unlock(True)
+ if not res:
+ self.logger.LogError("Could not unlock machine: '%s'." % machine_name)
+
+ def ForceSameImageToAllMachines(self, label):
+ machines = self.GetMachines(label)
+ for m in machines:
+ self.ImageMachine(m, label)
+ m.SetUpChecksumInfo()
+
+ def AcquireMachine(self, label):
+ image_checksum = label.checksum
+ machines = self.GetMachines(label)
+ check_interval_time = 120
+ with self._lock:
+ # Lazily external lock machines
+ while self.acquire_timeout >= 0:
+ for m in machines:
+ new_machine = m not in self._all_machines
+ self._TryToLockMachine(m)
+ if new_machine:
+ m.released_time = time.time()
+ if self.GetAvailableMachines(label):
+ break
+ sleep_time = max(1, min(self.acquire_timeout, check_interval_time))
+ time.sleep(sleep_time)
+ self.acquire_timeout -= sleep_time
+
+ if self.acquire_timeout < 0:
+ self.logger.LogFatal('Could not acquire any of the '
+ "following machines: '%s'" %
+ ', '.join(machine.name for machine in machines))
+
+### for m in self._machines:
+### if (m.locked and time.time() - m.released_time < 10 and
+### m.checksum == image_checksum):
+### return None
+ unlocked_machines = [machine
+ for machine in self.GetAvailableMachines(label)
+ if not machine.locked]
+ for m in unlocked_machines:
+ if image_checksum and m.checksum == image_checksum:
+ m.locked = True
+ m.test_run = threading.current_thread()
+ return m
+ for m in unlocked_machines:
+ if not m.checksum:
+ m.locked = True
+ m.test_run = threading.current_thread()
+ return m
+ # This logic ensures that threads waiting on a machine will get a machine
+ # with a checksum equal to their image over other threads. This saves time
+ # when crosperf initially assigns the machines to threads by minimizing
+ # the number of re-images.
+ # TODO(asharif): If we centralize the thread-scheduler, we wont need this
+ # code and can implement minimal reimaging code more cleanly.
+ for m in unlocked_machines:
+ if time.time() - m.released_time > 15:
+ # The release time gap is too large, so it is probably in the start
+ # stage, we need to reset the released_time.
+ m.released_time = time.time()
+ elif time.time() - m.released_time > 8:
+ m.locked = True
+ m.test_run = threading.current_thread()
+ return m
+ return None
+
+ def GetAvailableMachines(self, label=None):
+ if not label:
+ return self._machines
+ return [m for m in self._machines if m.name in label.remote]
+
+ def GetMachines(self, label=None):
+ if not label:
+ return self._all_machines
+ return [m for m in self._all_machines if m.name in label.remote]
+
+ def ReleaseMachine(self, machine):
+ with self._lock:
+ for m in self._machines:
+ if machine.name == m.name:
+ assert m.locked, 'Tried to double-release %s' % m.name
+ m.released_time = time.time()
+ m.locked = False
+ m.status = 'Available'
+ break
+
+ def Cleanup(self):
+ with self._lock:
+ # Unlock all machines (via file lock)
+ for m in self._machines:
+ res = file_lock_machine.Machine(m.name, self.locks_dir).Unlock(True)
+
+ if not res:
+ self.logger.LogError("Could not unlock machine: '%s'." % m.name)
+
+ def __str__(self):
+ with self._lock:
+ l = ['MachineManager Status:'] + [str(m) for m in self._machines]
+ return '\n'.join(l)
+
+ def AsString(self):
+ with self._lock:
+ stringify_fmt = '%-30s %-10s %-4s %-25s %-32s'
+ header = stringify_fmt % ('Machine', 'Thread', 'Lock', 'Status',
+ 'Checksum')
+ table = [header]
+ for m in self._machines:
+ if m.test_run:
+ test_name = m.test_run.name
+ test_status = m.test_run.timeline.GetLastEvent()
+ else:
+ test_name = ''
+ test_status = ''
+
+ try:
+ machine_string = stringify_fmt % (m.name, test_name, m.locked,
+ test_status, m.checksum)
+ except ValueError:
+ machine_string = ''
+ table.append(machine_string)
+ return 'Machine Status:\n%s' % '\n'.join(table)
+
+ def GetAllCPUInfo(self, labels):
+ """Get cpuinfo for labels, merge them if their cpuinfo are the same."""
+ dic = collections.defaultdict(list)
+ for label in labels:
+ for machine in self._all_machines:
+ if machine.name in label.remote:
+ dic[machine.cpuinfo].append(label.name)
+ break
+ output_segs = []
+ for key, v in dic.iteritems():
+ output = ' '.join(v)
+ output += '\n-------------------\n'
+ output += key
+ output += '\n\n\n'
+ output_segs.append(output)
+ return ''.join(output_segs)
+
+ def GetAllMachines(self):
+ return self._all_machines
+
+
+class MockCrosMachine(CrosMachine):
+ """Mock cros machine class."""
+ # pylint: disable=super-init-not-called
+
+ MEMINFO_STRING = """MemTotal: 3990332 kB
+MemFree: 2608396 kB
+Buffers: 147168 kB
+Cached: 811560 kB
+SwapCached: 0 kB
+Active: 503480 kB
+Inactive: 628572 kB
+Active(anon): 174532 kB
+Inactive(anon): 88576 kB
+Active(file): 328948 kB
+Inactive(file): 539996 kB
+Unevictable: 0 kB
+Mlocked: 0 kB
+SwapTotal: 5845212 kB
+SwapFree: 5845212 kB
+Dirty: 9384 kB
+Writeback: 0 kB
+AnonPages: 173408 kB
+Mapped: 146268 kB
+Shmem: 89676 kB
+Slab: 188260 kB
+SReclaimable: 169208 kB
+SUnreclaim: 19052 kB
+KernelStack: 2032 kB
+PageTables: 7120 kB
+NFS_Unstable: 0 kB
+Bounce: 0 kB
+WritebackTmp: 0 kB
+CommitLimit: 7840376 kB
+Committed_AS: 1082032 kB
+VmallocTotal: 34359738367 kB
+VmallocUsed: 364980 kB
+VmallocChunk: 34359369407 kB
+DirectMap4k: 45824 kB
+DirectMap2M: 4096000 kB
+"""
+
+ CPUINFO_STRING = """processor: 0
+vendor_id: GenuineIntel
+cpu family: 6
+model: 42
+model name: Intel(R) Celeron(R) CPU 867 @ 1.30GHz
+stepping: 7
+microcode: 0x25
+cpu MHz: 1300.000
+cache size: 2048 KB
+physical id: 0
+siblings: 2
+core id: 0
+cpu cores: 2
+apicid: 0
+initial apicid: 0
+fpu: yes
+fpu_exception: yes
+cpuid level: 13
+wp: yes
+flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid
+bogomips: 2594.17
+clflush size: 64
+cache_alignment: 64
+address sizes: 36 bits physical, 48 bits virtual
+power management:
+
+processor: 1
+vendor_id: GenuineIntel
+cpu family: 6
+model: 42
+model name: Intel(R) Celeron(R) CPU 867 @ 1.30GHz
+stepping: 7
+microcode: 0x25
+cpu MHz: 1300.000
+cache size: 2048 KB
+physical id: 0
+siblings: 2
+core id: 1
+cpu cores: 2
+apicid: 2
+initial apicid: 2
+fpu: yes
+fpu_exception: yes
+cpuid level: 13
+wp: yes
+flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid
+bogomips: 2594.17
+clflush size: 64
+cache_alignment: 64
+address sizes: 36 bits physical, 48 bits virtual
+power management:
+"""
+
+ def __init__(self, name, chromeos_root, log_level):
+ self.name = name
+ self.image = None
+ self.checksum = None
+ self.locked = False
+ self.released_time = time.time()
+ self.test_run = None
+ self.chromeos_root = chromeos_root
+ self.checksum_string = re.sub(r'\d', '', name)
+ #In test, we assume "lumpy1", "lumpy2" are the same machine.
+ self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
+ self.log_level = log_level
+ self.label = None
+ self.ce = command_executer.GetCommandExecuter(log_level=self.log_level)
+ self._GetCPUInfo()
+
+ def IsReachable(self):
+ return True
+
+ def _GetMemoryInfo(self):
+ self.meminfo = self.MEMINFO_STRING
+ self._ParseMemoryInfo()
+
+ def _GetCPUInfo(self):
+ self.cpuinfo = self.CPUINFO_STRING
+
+
+class MockMachineManager(MachineManager):
+ """Mock machine manager class."""
+
+ def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir):
+ super(MockMachineManager, self).__init__(
+ chromeos_root, acquire_timeout, log_level, locks_dir)
+
+ def _TryToLockMachine(self, cros_machine):
+ self._machines.append(cros_machine)
+ cros_machine.checksum = ''
+
+ def AddMachine(self, machine_name):
+ with self._lock:
+ for m in self._all_machines:
+ assert m.name != machine_name, 'Tried to double-add %s' % machine_name
+ cm = MockCrosMachine(machine_name, self.chromeos_root, self.log_level)
+ assert cm.machine_checksum, ('Could not find checksum for machine %s' %
+ machine_name)
+ # In Original MachineManager, the test is 'if cm.machine_checksum:' - if a
+ # machine is unreachable, then its machine_checksum is None. Here we
+ # cannot do this, because machine_checksum is always faked, so we directly
+ # test cm.IsReachable, which is properly mocked.
+ if cm.IsReachable():
+ self._all_machines.append(cm)
+
+ def GetChromeVersion(self, machine):
+ return 'Mock Chrome Version R50'
+
+ def AcquireMachine(self, label):
+ for machine in self._all_machines:
+ if not machine.locked:
+ machine.locked = True
+ return machine
+ return None
+
+ def ImageMachine(self, machine_name, label):
+ if machine_name or label:
+ return 0
+ return 1
+
+ def ReleaseMachine(self, machine):
+ machine.locked = False
+
+ def GetMachines(self, label=None):
+ return self._all_machines
+
+ def GetAvailableMachines(self, label=None):
+ return self._all_machines
+
+ def ForceSameImageToAllMachines(self, label=None):
+ return 0
+
+ def ComputeCommonCheckSum(self, label=None):
+ common_checksum = 12345
+ for machine in self.GetMachines(label):
+ machine.machine_checksum = common_checksum
+ self.machine_checksum[label.name] = common_checksum
+
+ def GetAllMachines(self):
+ return self._all_machines
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
new file mode 100755
index 00000000..8652f171
--- /dev/null
+++ b/crosperf/machine_manager_unittest.py
@@ -0,0 +1,845 @@
+#!/usr/bin/env python2
+
+# Copyright 2012 Google Inc. All Rights Reserved.
+"""Unittest for machine_manager."""
+
+from __future__ import print_function
+
+import os.path
+import time
+import hashlib
+
+import mock
+import unittest
+
+import label
+import machine_manager
+import image_checksummer
+import test_flag
+
+from benchmark import Benchmark
+from benchmark_run import MockBenchmarkRun
+from cros_utils import command_executer
+from cros_utils import logger
+
+# pylint: disable=protected-access
+
+
+class MyMachineManager(machine_manager.MachineManager):
+ """Machine manager for test."""
+
+ def __init__(self, chromeos_root):
+ super(MyMachineManager, self).__init__(chromeos_root, 0, 'average', '')
+
+ def _TryToLockMachine(self, cros_machine):
+ self._machines.append(cros_machine)
+ cros_machine.checksum = ''
+
+ def AddMachine(self, machine_name):
+ with self._lock:
+ for m in self._all_machines:
+ assert m.name != machine_name, 'Tried to double-add %s' % machine_name
+ cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root,
+ 'average')
+ assert cm.machine_checksum, ('Could not find checksum for machine %s' %
+ machine_name)
+ self._all_machines.append(cm)
+
+
+CHROMEOS_ROOT = '/tmp/chromeos-root'
+MACHINE_NAMES = ['lumpy1', 'lumpy2', 'lumpy3', 'daisy1', 'daisy2']
+LABEL_LUMPY = label.MockLabel('lumpy', 'lumpy_chromeos_image', 'autotest_dir',
+ CHROMEOS_ROOT, 'lumpy',
+ ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'], '', '',
+ False, 'average,'
+ 'gcc', None)
+LABEL_MIX = label.MockLabel('mix', 'chromeos_image', 'autotest_dir',
+ CHROMEOS_ROOT, 'mix',
+ ['daisy1', 'daisy2', 'lumpy3', 'lumpy4'], '', '',
+ False, 'average', 'gcc', None)
+
+
+class MachineManagerTest(unittest.TestCase):
+ """Test for machine manager class."""
+
+ msgs = []
+ image_log = []
+ log_fatal_msgs = []
+ fake_logger_count = 0
+ fake_logger_msgs = []
+
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+
+ mock_logger = mock.Mock(spec=logger.Logger)
+
+ mock_lumpy1 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_lumpy2 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_lumpy3 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_lumpy4 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_daisy1 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_daisy2 = mock.Mock(spec=machine_manager.CrosMachine)
+
+ @mock.patch.object(os.path, 'isdir')
+
+ # pylint: disable=arguments-differ
+ def setUp(self, mock_isdir):
+
+ mock_isdir.return_value = True
+ self.mm = machine_manager.MachineManager('/usr/local/chromeos', 0,
+ 'average', None,
+ self.mock_cmd_exec,
+ self.mock_logger)
+
+ self.mock_lumpy1.name = 'lumpy1'
+ self.mock_lumpy2.name = 'lumpy2'
+ self.mock_lumpy3.name = 'lumpy3'
+ self.mock_lumpy4.name = 'lumpy4'
+ self.mock_daisy1.name = 'daisy1'
+ self.mock_daisy2.name = 'daisy2'
+ self.mock_lumpy1.machine_checksum = 'lumpy123'
+ self.mock_lumpy2.machine_checksum = 'lumpy123'
+ self.mock_lumpy3.machine_checksum = 'lumpy123'
+ self.mock_lumpy4.machine_checksum = 'lumpy123'
+ self.mock_daisy1.machine_checksum = 'daisy12'
+ self.mock_daisy2.machine_checksum = 'daisy12'
+ self.mock_lumpy1.checksum_string = 'lumpy_checksum_str'
+ self.mock_lumpy2.checksum_string = 'lumpy_checksum_str'
+ self.mock_lumpy3.checksum_string = 'lumpy_checksum_str'
+ self.mock_lumpy4.checksum_string = 'lumpy_checksum_str'
+ self.mock_daisy1.checksum_string = 'daisy_checksum_str'
+ self.mock_daisy2.checksum_string = 'daisy_checksum_str'
+ self.mock_lumpy1.cpuinfo = 'lumpy_cpu_info'
+ self.mock_lumpy2.cpuinfo = 'lumpy_cpu_info'
+ self.mock_lumpy3.cpuinfo = 'lumpy_cpu_info'
+ self.mock_lumpy4.cpuinfo = 'lumpy_cpu_info'
+ self.mock_daisy1.cpuinfo = 'daisy_cpu_info'
+ self.mock_daisy2.cpuinfo = 'daisy_cpu_info'
+ self.mm._all_machines.append(self.mock_daisy1)
+ self.mm._all_machines.append(self.mock_daisy2)
+ self.mm._all_machines.append(self.mock_lumpy1)
+ self.mm._all_machines.append(self.mock_lumpy2)
+ self.mm._all_machines.append(self.mock_lumpy3)
+
+ def testGetMachines(self):
+ manager = MyMachineManager(CHROMEOS_ROOT)
+ for m in MACHINE_NAMES:
+ manager.AddMachine(m)
+ names = [m.name for m in manager.GetMachines(LABEL_LUMPY)]
+ self.assertEqual(names, ['lumpy1', 'lumpy2', 'lumpy3'])
+
+ def testGetAvailableMachines(self):
+ manager = MyMachineManager(CHROMEOS_ROOT)
+ for m in MACHINE_NAMES:
+ manager.AddMachine(m)
+ for m in manager._all_machines:
+ if int(m.name[-1]) % 2:
+ manager._TryToLockMachine(m)
+ names = [m.name for m in manager.GetAvailableMachines(LABEL_LUMPY)]
+ self.assertEqual(names, ['lumpy1', 'lumpy3'])
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
+ @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
+ def test_image_machine(self, mock_checksummer, mock_run_croscmd, mock_run_cmd,
+ mock_sleep):
+
+ def FakeMD5Checksum(_input_str):
+ return 'machine_fake_md5_checksum'
+
+ self.fake_logger_count = 0
+ self.fake_logger_msgs = []
+
+ def FakeLogOutput(msg):
+ self.fake_logger_count += 1
+ self.fake_logger_msgs.append(msg)
+
+ def ResetValues():
+ self.fake_logger_count = 0
+ self.fake_logger_msgs = []
+ mock_run_cmd.reset_mock()
+ mock_run_croscmd.reset_mock()
+ mock_checksummer.reset_mock()
+ mock_sleep.reset_mock()
+ machine.checksum = 'fake_md5_checksum'
+ self.mm.checksum = None
+ self.mm.num_reimages = 0
+
+ self.mock_cmd_exec.CrosRunCommand = mock_run_croscmd
+ self.mock_cmd_exec.RunCommand = mock_run_cmd
+
+ self.mm.logger.LogOutput = FakeLogOutput
+ machine = self.mock_lumpy1
+ machine._GetMD5Checksum = FakeMD5Checksum
+ machine.checksum = 'fake_md5_checksum'
+ mock_checksummer.return_value = 'fake_md5_checksum'
+ self.mock_cmd_exec.log_level = 'verbose'
+
+ test_flag.SetTestMode(True)
+ # Test 1: label.image_type == "local"
+ LABEL_LUMPY.image_type = 'local'
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ self.assertEqual(mock_run_cmd.call_count, 0)
+ self.assertEqual(mock_run_croscmd.call_count, 0)
+
+ #Test 2: label.image_type == "trybot"
+ ResetValues()
+ LABEL_LUMPY.image_type = 'trybot'
+ mock_run_cmd.return_value = 0
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ self.assertEqual(mock_run_croscmd.call_count, 0)
+ self.assertEqual(mock_checksummer.call_count, 0)
+
+ # Test 3: label.image_type is neither local nor trybot; retval from
+ # RunCommand is 1, i.e. image_chromeos fails...
+ ResetValues()
+ LABEL_LUMPY.image_type = 'other'
+ mock_run_cmd.return_value = 1
+ try:
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ except RuntimeError:
+ self.assertEqual(mock_checksummer.call_count, 0)
+ self.assertEqual(mock_run_cmd.call_count, 2)
+ self.assertEqual(mock_run_croscmd.call_count, 1)
+ self.assertEqual(mock_sleep.call_count, 1)
+ image_call_args_str = mock_run_cmd.call_args[0][0]
+ image_call_args = image_call_args_str.split(' ')
+ self.assertEqual(image_call_args[0], 'python')
+ self.assertEqual(image_call_args[1].split('/')[-1], 'image_chromeos.pyc')
+ image_call_args = image_call_args[2:]
+ self.assertEqual(image_call_args, [
+ '--chromeos_root=/tmp/chromeos-root', '--image=lumpy_chromeos_image',
+ '--image_args=', '--remote=lumpy1', '--logging_level=average',
+ '--board=lumpy'
+ ])
+ self.assertEqual(mock_run_croscmd.call_args[0][0], 'reboot && exit')
+
+ # Test 4: Everything works properly. Trybot image type.
+ ResetValues()
+ LABEL_LUMPY.image_type = 'trybot'
+ mock_run_cmd.return_value = 0
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ self.assertEqual(mock_checksummer.call_count, 0)
+ self.assertEqual(mock_run_croscmd.call_count, 0)
+ self.assertEqual(mock_sleep.call_count, 0)
+
+ def test_compute_common_checksum(self):
+
+ self.mm.machine_checksum = {}
+ self.mm.ComputeCommonCheckSum(LABEL_LUMPY)
+ self.assertEqual(self.mm.machine_checksum['lumpy'], 'lumpy123')
+ self.assertEqual(len(self.mm.machine_checksum), 1)
+
+ self.mm.machine_checksum = {}
+ self.assertRaises(machine_manager.BadChecksum,
+ self.mm.ComputeCommonCheckSum, LABEL_MIX)
+
+ def test_compute_common_checksum_string(self):
+ self.mm.machine_checksum_string = {}
+ self.mm.ComputeCommonCheckSumString(LABEL_LUMPY)
+ self.assertEqual(len(self.mm.machine_checksum_string), 1)
+ self.assertEqual(self.mm.machine_checksum_string['lumpy'],
+ 'lumpy_checksum_str')
+
+ self.mm.machine_checksum_string = {}
+ self.mm.ComputeCommonCheckSumString(LABEL_MIX)
+ self.assertEqual(len(self.mm.machine_checksum_string), 1)
+ self.assertEqual(self.mm.machine_checksum_string['mix'],
+ 'daisy_checksum_str')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
+ def test_try_to_lock_machine(self, mock_cros_runcmd):
+ self.assertRaises(self.mm._TryToLockMachine, None)
+
+ mock_cros_runcmd.return_value = [0, 'false_lock_checksum', '']
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
+ self.mm._machines = []
+ self.mm._TryToLockMachine(self.mock_lumpy1)
+ self.assertEqual(len(self.mm._machines), 1)
+ self.assertEqual(self.mm._machines[0], self.mock_lumpy1)
+ self.assertEqual(self.mock_lumpy1.checksum, 'false_lock_checksum')
+ self.assertEqual(mock_cros_runcmd.call_count, 1)
+ cmd_str = mock_cros_runcmd.call_args[0][0]
+ self.assertEqual(cmd_str, 'cat /usr/local/osimage_checksum_file')
+ args_dict = mock_cros_runcmd.call_args[1]
+ self.assertEqual(len(args_dict), 2)
+ self.assertEqual(args_dict['machine'], self.mock_lumpy1.name)
+ self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
+
+ @mock.patch.object(machine_manager, 'CrosMachine')
+ def test_add_machine(self, mock_machine):
+
+ mock_machine.machine_checksum = 'daisy123'
+ self.assertEqual(len(self.mm._all_machines), 5)
+ self.mm.AddMachine('daisy3')
+ self.assertEqual(len(self.mm._all_machines), 6)
+
+ self.assertRaises(Exception, self.mm.AddMachine, 'lumpy1')
+
+ def test_remove_machine(self):
+ self.mm._machines = self.mm._all_machines
+ self.assertTrue(self.mock_lumpy2 in self.mm._machines)
+ self.mm.RemoveMachine(self.mock_lumpy2.name)
+ self.assertFalse(self.mock_lumpy2 in self.mm._machines)
+
+ def test_force_same_image_to_all_machines(self):
+ self.image_log = []
+
+ def FakeImageMachine(machine, label_arg):
+ image = label_arg.chromeos_image
+ self.image_log.append('Pushed %s onto %s' % (image, machine.name))
+
+ def FakeSetUpChecksumInfo():
+ pass
+
+ self.mm.ImageMachine = FakeImageMachine
+ self.mock_lumpy1.SetUpChecksumInfo = FakeSetUpChecksumInfo
+ self.mock_lumpy2.SetUpChecksumInfo = FakeSetUpChecksumInfo
+ self.mock_lumpy3.SetUpChecksumInfo = FakeSetUpChecksumInfo
+
+ self.mm.ForceSameImageToAllMachines(LABEL_LUMPY)
+ self.assertEqual(len(self.image_log), 3)
+ self.assertEqual(self.image_log[0],
+ 'Pushed lumpy_chromeos_image onto lumpy1')
+ self.assertEqual(self.image_log[1],
+ 'Pushed lumpy_chromeos_image onto lumpy2')
+ self.assertEqual(self.image_log[2],
+ 'Pushed lumpy_chromeos_image onto lumpy3')
+
+ @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
+ @mock.patch.object(hashlib, 'md5')
+ def test_acquire_machine(self, mock_md5, mock_checksum):
+
+ self.msgs = []
+ self.log_fatal_msgs = []
+
+ def FakeLock(machine):
+ self.msgs.append('Tried to lock %s' % machine.name)
+
+ def FakeLogFatal(msg):
+ self.log_fatal_msgs.append(msg)
+
+ self.mm._TryToLockMachine = FakeLock
+ self.mm.logger.LogFatal = FakeLogFatal
+
+ mock_md5.return_value = '123456'
+ mock_checksum.return_value = 'fake_md5_checksum'
+
+ self.mm._machines = self.mm._all_machines
+ self.mock_lumpy1.locked = True
+ self.mock_lumpy2.locked = True
+ self.mock_lumpy3.locked = False
+ self.mock_lumpy3.checksum = 'fake_md5_checksum'
+ self.mock_daisy1.locked = True
+ self.mock_daisy2.locked = False
+ self.mock_daisy2.checksum = 'fake_md5_checksum'
+
+ self.mock_lumpy1.released_time = time.time()
+ self.mock_lumpy2.released_time = time.time()
+ self.mock_lumpy3.released_time = time.time()
+ self.mock_daisy1.released_time = time.time()
+ self.mock_daisy2.released_time = time.time()
+
+ # Test 1. Basic test. Acquire lumpy3.
+ self.mm.AcquireMachine(LABEL_LUMPY)
+ m = self.mock_lumpy1
+ self.assertEqual(m, self.mock_lumpy1)
+ self.assertTrue(self.mock_lumpy1.locked)
+ self.assertEqual(mock_md5.call_count, 0)
+ self.assertEqual(self.msgs, [
+ 'Tried to lock lumpy1', 'Tried to lock lumpy2', 'Tried to lock lumpy3'
+ ])
+
+ # Test the second return statment (machine is unlocked, has no checksum)
+ save_locked = self.mock_lumpy1.locked
+ self.mock_lumpy1.locked = False
+ self.mock_lumpy1.checksum = None
+ m = self.mm.AcquireMachine(LABEL_LUMPY)
+ self.assertEqual(m, self.mock_lumpy1)
+ self.assertTrue(self.mock_lumpy1.locked)
+
+ # Test the third return statement:
+ # - machine is unlocked
+ # - checksums don't match
+ # - current time minus release time is > 20.
+ self.mock_lumpy1.locked = False
+ self.mock_lumpy1.checksum = '123'
+ self.mock_lumpy1.released_time = time.time() - 8
+ m = self.mm.AcquireMachine(LABEL_LUMPY)
+ self.assertEqual(m, self.mock_lumpy1)
+ self.assertTrue(self.mock_lumpy1.locked)
+
+ # Test all machines are already locked.
+ m = self.mm.AcquireMachine(LABEL_LUMPY)
+ self.assertIsNone(m)
+
+ # Restore values of mock_lumpy1, so other tests succeed.
+ self.mock_lumpy1.locked = save_locked
+ self.mock_lumpy1.checksum = '123'
+
+ def test_get_available_machines(self):
+ self.mm._machines = self.mm._all_machines
+
+ machine_list = self.mm.GetAvailableMachines()
+ self.assertEqual(machine_list, self.mm._all_machines)
+
+ machine_list = self.mm.GetAvailableMachines(LABEL_MIX)
+ self.assertEqual(machine_list,
+ [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3])
+
+ machine_list = self.mm.GetAvailableMachines(LABEL_LUMPY)
+ self.assertEqual(machine_list,
+ [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3])
+
+ def test_get_machines(self):
+ machine_list = self.mm.GetMachines()
+ self.assertEqual(machine_list, self.mm._all_machines)
+
+ machine_list = self.mm.GetMachines(LABEL_MIX)
+ self.assertEqual(machine_list,
+ [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3])
+
+ machine_list = self.mm.GetMachines(LABEL_LUMPY)
+ self.assertEqual(machine_list,
+ [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3])
+
+ def test_release_machines(self):
+
+ self.mm._machines = [self.mock_lumpy1, self.mock_daisy2]
+
+ self.mock_lumpy1.locked = True
+ self.mock_daisy2.locked = True
+
+ self.assertTrue(self.mock_lumpy1.locked)
+ self.mm.ReleaseMachine(self.mock_lumpy1)
+ self.assertFalse(self.mock_lumpy1.locked)
+ self.assertEqual(self.mock_lumpy1.status, 'Available')
+
+ self.assertTrue(self.mock_daisy2.locked)
+ self.mm.ReleaseMachine(self.mock_daisy2)
+ self.assertFalse(self.mock_daisy2.locked)
+ self.assertEqual(self.mock_daisy2.status, 'Available')
+
+ # Test double-relase...
+ self.assertRaises(AssertionError, self.mm.ReleaseMachine, self.mock_lumpy1)
+
+ def test_cleanup(self):
+ self.mock_logger.reset_mock()
+ self.mm.Cleanup()
+ self.assertEqual(self.mock_logger.call_count, 0)
+
+ OUTPUT_STR = ('Machine Status:\nMachine Thread '
+ 'Lock Status Checksum'
+ ' \nlumpy1 test '
+ 'run True PENDING 123'
+ ' \nlumpy2 '
+ 'test run False PENDING 123'
+ ' \nlumpy3 '
+ 'test run False PENDING 123'
+ ' \ndaisy1 '
+ 'test run False PENDING 678'
+ ' \ndaisy2 '
+ 'test run True PENDING 678'
+ ' ')
+
+ def test_as_string(self):
+
+ mock_logger = mock.Mock(spec=logger.Logger)
+
+ bench = Benchmark(
+ 'page_cycler_v2.netsim.top_10', # name
+ 'page_cycler_v2.netsim.top_10', # test_name
+ '', # test_args
+ 1, # iteratins
+ False, # rm_chroot_tmp
+ '', # perf_args
+ suite='telemetry_Crosperf') # suite
+
+ test_run = MockBenchmarkRun('test run', bench, LABEL_LUMPY, 1, [], self.mm,
+ mock_logger, 'verbose', '')
+
+ self.mm._machines = [
+ self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3, self.mock_daisy1,
+ self.mock_daisy2
+ ]
+
+ self.mock_lumpy1.test_run = test_run
+ self.mock_lumpy2.test_run = test_run
+ self.mock_lumpy3.test_run = test_run
+ self.mock_daisy1.test_run = test_run
+ self.mock_daisy2.test_run = test_run
+
+ self.mock_lumpy1.locked = True
+ self.mock_lumpy2.locked = False
+ self.mock_lumpy3.locked = False
+ self.mock_daisy1.locked = False
+ self.mock_daisy2.locked = True
+
+ self.mock_lumpy1.checksum = '123'
+ self.mock_lumpy2.checksum = '123'
+ self.mock_lumpy3.checksum = '123'
+ self.mock_daisy1.checksum = '678'
+ self.mock_daisy2.checksum = '678'
+
+ output = self.mm.AsString()
+ self.assertEqual(output, self.OUTPUT_STR)
+
+ def test_get_all_cpu_info(self):
+ info = self.mm.GetAllCPUInfo([LABEL_LUMPY, LABEL_MIX])
+ self.assertEqual(info,
+ 'lumpy\n-------------------\nlumpy_cpu_info\n\n\nmix\n-'
+ '------------------\ndaisy_cpu_info\n\n\n')
+
+
+MEMINFO_STRING = """MemTotal: 3990332 kB
+MemFree: 2608396 kB
+Buffers: 147168 kB
+Cached: 811560 kB
+SwapCached: 0 kB
+Active: 503480 kB
+Inactive: 628572 kB
+Active(anon): 174532 kB
+Inactive(anon): 88576 kB
+Active(file): 328948 kB
+Inactive(file): 539996 kB
+Unevictable: 0 kB
+Mlocked: 0 kB
+SwapTotal: 5845212 kB
+SwapFree: 5845212 kB
+Dirty: 9384 kB
+Writeback: 0 kB
+AnonPages: 173408 kB
+Mapped: 146268 kB
+Shmem: 89676 kB
+Slab: 188260 kB
+SReclaimable: 169208 kB
+SUnreclaim: 19052 kB
+KernelStack: 2032 kB
+PageTables: 7120 kB
+NFS_Unstable: 0 kB
+Bounce: 0 kB
+WritebackTmp: 0 kB
+CommitLimit: 7840376 kB
+Committed_AS: 1082032 kB
+VmallocTotal: 34359738367 kB
+VmallocUsed: 364980 kB
+VmallocChunk: 34359369407 kB
+DirectMap4k: 45824 kB
+DirectMap2M: 4096000 kB
+"""
+
+CPUINFO_STRING = """processor: 0
+vendor_id: GenuineIntel
+cpu family: 6
+model: 42
+model name: Intel(R) Celeron(R) CPU 867 @ 1.30GHz
+stepping: 7
+microcode: 0x25
+cpu MHz: 1300.000
+cache size: 2048 KB
+physical id: 0
+siblings: 2
+core id: 0
+cpu cores: 2
+apicid: 0
+initial apicid: 0
+fpu: yes
+fpu_exception: yes
+cpuid level: 13
+wp: yes
+flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid
+bogomips: 2594.17
+clflush size: 64
+cache_alignment: 64
+address sizes: 36 bits physical, 48 bits virtual
+power management:
+
+processor: 1
+vendor_id: GenuineIntel
+cpu family: 6
+model: 42
+model name: Intel(R) Celeron(R) CPU 867 @ 1.30GHz
+stepping: 7
+microcode: 0x25
+cpu MHz: 1300.000
+cache size: 2048 KB
+physical id: 0
+siblings: 2
+core id: 1
+cpu cores: 2
+apicid: 2
+initial apicid: 2
+fpu: yes
+fpu_exception: yes
+cpuid level: 13
+wp: yes
+flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid
+bogomips: 2594.17
+clflush size: 64
+cache_alignment: 64
+address sizes: 36 bits physical, 48 bits virtual
+power management:
+"""
+
+CHECKSUM_STRING = ('processor: 0vendor_id: GenuineIntelcpu family: 6model: '
+ '42model name: Intel(R) Celeron(R) CPU 867 @ '
+ '1.30GHzstepping: 7microcode: 0x25cache size: 2048 '
+ 'KBphysical id: 0siblings: 2core id: 0cpu cores: 2apicid: '
+ '0initial apicid: 0fpu: yesfpu_exception: yescpuid level: '
+ '13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep'
+ ' mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse '
+ 'sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc '
+ 'arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc '
+ 'aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 '
+ 'ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt '
+ 'tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts '
+ 'dts tpr_shadow vnmi flexpriority ept vpidclflush size: '
+ '64cache_alignment: 64address sizes: 36 bits physical, 48 '
+ 'bits virtualpower management:processor: 1vendor_id: '
+ 'GenuineIntelcpu family: 6model: 42model name: Intel(R) '
+ 'Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache'
+ ' size: 2048 KBphysical id: 0siblings: 2core id: 1cpu cores:'
+ ' 2apicid: 2initial apicid: 2fpu: yesfpu_exception: yescpuid'
+ ' level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 '
+ 'apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx '
+ 'fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm '
+ 'constant_tsc arch_perfmon pebs bts rep_good nopl xtopology '
+ 'nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl '
+ 'vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic '
+ 'popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt '
+ 'pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush '
+ 'size: 64cache_alignment: 64address sizes: 36 bits physical,'
+ ' 48 bits virtualpower management: 4194304')
+
+DUMP_VPD_STRING = """
+"PBA_SN"="Pba.txt"
+"Product_S/N"="HT4L91SC300208"
+"serial_number"="HT4L91SC300208Z"
+"System_UUID"="12153006-1755-4f66-b410-c43758a71127"
+"shipping_country"="US"
+"initial_locale"="en-US"
+"keyboard_layout"="xkb:us::eng"
+"initial_timezone"="America/Los_Angeles"
+"MACAddress"=""
+"System_UUID"="29dd9c61-7fa1-4c83-b89a-502e7eb08afe"
+"ubind_attribute"="0c433ce7585f486730b682bb05626a12ce2d896e9b57665387f8ce2ccfdcc56d2e2f1483"
+"gbind_attribute"="7e9a851324088e269319347c6abb8d1572ec31022fa07e28998229afe8acb45c35a89b9d"
+"ActivateDate"="2013-38"
+"""
+
+IFCONFIG_STRING = """
+eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
+ inet 172.17.129.247 netmask 255.255.254.0 broadcast 172.17.129.255
+ inet6 2620:0:1000:3002:143:fed4:3ff6:279d prefixlen 64 scopeid 0x0<global>
+ inet6 2620:0:1000:3002:4459:1399:1f02:9e4c prefixlen 64 scopeid 0x0<global>
+ inet6 2620:0:1000:3002:d9e4:87b:d4ec:9a0e prefixlen 64 scopeid 0x0<global>
+ inet6 2620:0:1000:3002:7d45:23f1:ea8a:9604 prefixlen 64 scopeid 0x0<global>
+ inet6 2620:0:1000:3002:250:b6ff:fe63:db65 prefixlen 64 scopeid 0x0<global>
+ inet6 fe80::250:b6ff:fe63:db65 prefixlen 64 scopeid 0x20<link>
+ ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)
+ RX packets 9817166 bytes 10865181708 (10.1 GiB)
+ RX errors 194 dropped 0 overruns 0 frame 194
+ TX packets 0 bytes 2265811903 (2.1 GiB)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+
+eth1: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
+ ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)
+ RX packets 0 bytes 0 (0.0 B)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 0 bytes 0 (0.0 B)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+
+lo: flags=73<UP,LOOPBACK,RUNNING> mtu 16436
+ inet 127.0.0.1 netmask 255.0.0.0
+ inet6 ::1 prefixlen 128 scopeid 0x10<host>
+ loop txqueuelen 0 (Local Loopback)
+ RX packets 981004 bytes 1127468524 (1.0 GiB)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 981004 bytes 1127468524 (1.0 GiB)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+
+wlan0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
+ ether 44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)
+ RX packets 0 bytes 0 (0.0 B)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 0 bytes 0 (0.0 B)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+"""
+
+
+class CrosMachineTest(unittest.TestCase):
+ """Test for CrosMachine class."""
+
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
+ def test_init(self, mock_setup):
+
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ self.assertEqual(mock_setup.call_count, 1)
+ self.assertEqual(cm.chromeos_root, '/usr/local/chromeos')
+ self.assertEqual(cm.log_level, 'average')
+
+ @mock.patch.object(machine_manager.CrosMachine, 'IsReachable')
+ @mock.patch.object(machine_manager.CrosMachine, '_GetMemoryInfo')
+ @mock.patch.object(machine_manager.CrosMachine, '_GetCPUInfo')
+ @mock.patch.object(machine_manager.CrosMachine,
+ '_ComputeMachineChecksumString')
+ @mock.patch.object(machine_manager.CrosMachine, '_GetMachineID')
+ @mock.patch.object(machine_manager.CrosMachine, '_GetMD5Checksum')
+ def test_setup_checksum_info(self, mock_md5sum, mock_machineid,
+ mock_checkstring, mock_cpuinfo, mock_meminfo,
+ mock_isreachable):
+
+ # Test 1. Machine is not reachable; SetUpChecksumInfo is called via
+ # __init__.
+ mock_isreachable.return_value = False
+ mock_md5sum.return_value = 'md5_checksum'
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ cm.checksum_string = 'This is a checksum string.'
+ cm.machine_id = 'machine_id1'
+ self.assertEqual(mock_isreachable.call_count, 1)
+ self.assertIsNone(cm.machine_checksum)
+ self.assertEqual(mock_meminfo.call_count, 0)
+
+ # Test 2. Machine is reachable. Call explicitly.
+ mock_isreachable.return_value = True
+ cm.checksum_string = 'This is a checksum string.'
+ cm.machine_id = 'machine_id1'
+ cm.SetUpChecksumInfo()
+ self.assertEqual(mock_isreachable.call_count, 2)
+ self.assertEqual(mock_meminfo.call_count, 1)
+ self.assertEqual(mock_cpuinfo.call_count, 1)
+ self.assertEqual(mock_checkstring.call_count, 1)
+ self.assertEqual(mock_machineid.call_count, 1)
+ self.assertEqual(mock_md5sum.call_count, 2)
+ self.assertEqual(cm.machine_checksum, 'md5_checksum')
+ self.assertEqual(cm.machine_id_checksum, 'md5_checksum')
+ self.assertEqual(mock_md5sum.call_args_list[0][0][0],
+ 'This is a checksum string.')
+ self.assertEqual(mock_md5sum.call_args_list[1][0][0], 'machine_id1')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
+ def test_is_reachable(self, mock_setup, mock_run_cmd):
+
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
+
+ # Test 1. CrosRunCommand returns 1 (fail)
+ mock_run_cmd.return_value = 1
+ result = cm.IsReachable()
+ self.assertFalse(result)
+ self.assertEqual(mock_setup.call_count, 1)
+ self.assertEqual(mock_run_cmd.call_count, 1)
+
+ # Test 2. CrosRunCommand returns 0 (success)
+ mock_run_cmd.return_value = 0
+ result = cm.IsReachable()
+ self.assertTrue(result)
+ self.assertEqual(mock_run_cmd.call_count, 2)
+ first_args = mock_run_cmd.call_args_list[0]
+ second_args = mock_run_cmd.call_args_list[1]
+ self.assertEqual(first_args[0], second_args[0])
+ self.assertEqual(first_args[1], second_args[1])
+ self.assertEqual(len(first_args[0]), 1)
+ self.assertEqual(len(first_args[1]), 2)
+ self.assertEqual(first_args[0][0], 'ls')
+ args_dict = first_args[1]
+ self.assertEqual(args_dict['machine'], 'daisy.cros')
+ self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
+
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
+ def test_parse_memory_info(self, _mock_setup):
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ cm.meminfo = MEMINFO_STRING
+ cm._ParseMemoryInfo()
+ self.assertEqual(cm.phys_kbytes, 4194304)
+
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
+ def test_get_memory_info(self, _mock_setup, mock_run_cmd):
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
+ mock_run_cmd.return_value = [0, MEMINFO_STRING, '']
+ cm._GetMemoryInfo()
+ self.assertEqual(mock_run_cmd.call_count, 1)
+ call_args = mock_run_cmd.call_args_list[0]
+ self.assertEqual(call_args[0][0], 'cat /proc/meminfo')
+ args_dict = call_args[1]
+ self.assertEqual(args_dict['machine'], 'daisy.cros')
+ self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
+ self.assertEqual(cm.meminfo, MEMINFO_STRING)
+ self.assertEqual(cm.phys_kbytes, 4194304)
+
+ mock_run_cmd.return_value = [1, MEMINFO_STRING, '']
+ self.assertRaises(cm._GetMemoryInfo)
+
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
+ def test_get_cpu_info(self, _mock_setup, mock_run_cmd):
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
+ mock_run_cmd.return_value = [0, CPUINFO_STRING, '']
+ cm._GetCPUInfo()
+ self.assertEqual(mock_run_cmd.call_count, 1)
+ call_args = mock_run_cmd.call_args_list[0]
+ self.assertEqual(call_args[0][0], 'cat /proc/cpuinfo')
+ args_dict = call_args[1]
+ self.assertEqual(args_dict['machine'], 'daisy.cros')
+ self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
+ self.assertEqual(cm.cpuinfo, CPUINFO_STRING)
+
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
+ def test_compute_machine_checksum_string(self, _mock_setup):
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ cm.cpuinfo = CPUINFO_STRING
+ cm.meminfo = MEMINFO_STRING
+ cm._ParseMemoryInfo()
+ cm._ComputeMachineChecksumString()
+ self.assertEqual(cm.checksum_string, CHECKSUM_STRING)
+
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
+ def test_get_md5_checksum(self, _mock_setup):
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ temp_str = 'abcde'
+ checksum_str = cm._GetMD5Checksum(temp_str)
+ self.assertEqual(checksum_str, 'ab56b4d92b40713acc5af89985d4b786')
+
+ temp_str = ''
+ checksum_str = cm._GetMD5Checksum(temp_str)
+ self.assertEqual(checksum_str, '')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
+ @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
+ def test_get_machine_id(self, _mock_setup, mock_run_cmd):
+ cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
+ 'average', self.mock_cmd_exec)
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
+ mock_run_cmd.return_value = [0, DUMP_VPD_STRING, '']
+
+ cm._GetMachineID()
+ self.assertEqual(cm.machine_id, '"Product_S/N"="HT4L91SC300208"')
+
+ mock_run_cmd.return_value = [0, IFCONFIG_STRING, '']
+ cm._GetMachineID()
+ self.assertEqual(
+ cm.machine_id,
+ ' ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ '
+ 'ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether '
+ '44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)')
+
+ mock_run_cmd.return_value = [0, 'invalid hardware config', '']
+ self.assertRaises(cm._GetMachineID)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
new file mode 100644
index 00000000..758108fa
--- /dev/null
+++ b/crosperf/mock_instance.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""This contains some mock instances for testing."""
+
+from __future__ import print_function
+
+from benchmark import Benchmark
+from label import MockLabel
+
+perf_args = 'record -a -e cycles'
+label1 = MockLabel(
+ 'test1',
+ 'image1',
+ 'autotest_dir',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos-alex1',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+label2 = MockLabel(
+ 'test2',
+ 'image2',
+ 'autotest_dir',
+ '/tmp/test_benchmark_run_2',
+ 'x86-alex',
+ 'chromeos-alex2',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '',
+ perf_args, '', '')
+
+benchmark2 = Benchmark('benchmark2', 'autotest_name_2', 'autotest_args', 2, '',
+ perf_args, '', '')
+
+keyval = {}
+keyval[0] = {
+ '': 'PASS',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'ms_1': '2.1',
+ 'total': '5',
+ 'bool': 'True'
+}
+
+keyval[1] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6',
+ 'test{1}': '3',
+ 'test{2}': '4',
+ 'bool': 'FALSE'
+}
+
+keyval[2] = {
+ '': 'PASS',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6',
+ 'test{1}': '5',
+ 'test{2}': '4',
+ 'bool': 'FALSE'
+}
+
+keyval[3] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'FALSE'
+}
+
+keyval[4] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'TRUE'
+}
+
+keyval[5] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '2',
+ 'bool': 'TRUE'
+}
+
+keyval[6] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'bool': 'TRUE'
+}
+
+keyval[7] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7',
+ 'test{1}': '1',
+ 'test{2}': '6',
+ 'bool': 'TRUE'
+}
+
+keyval[8] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '3.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '8',
+ 'bool': 'TRUE'
+}
diff --git a/crosperf/perf_files/perf.data.report.0 b/crosperf/perf_files/perf.data.report.0
new file mode 100644
index 00000000..910fdc44
--- /dev/null
+++ b/crosperf/perf_files/perf.data.report.0
@@ -0,0 +1,734 @@
+# To display the perf.data header info, please use --header/--header-only options.
+#
+# NOTE: this file has been manually cut into arbitrary tiny pieces. The original
+# was > 100,000 lines, and took Python a few seconds to run through. This one
+# takes almost no time, and should work just as well.
+#
+# Samples: 292K of event 'cycles'
+# Event count (approx.): 106521626675
+#
+# Overhead Samples Command Shared Object Symbol
+# ........ ............ ............... .............................. ......................
+#
+ 0.66% 3539 swapper [kernel.kallsyms] [k] 0xffffffffa4a1f1c9
+ 0.61% 1703 chrome [kernel.kallsyms] [k] 0xffffffffa4eca110
+ 0.50% 1402 chrome [kernel.kallsyms] [k] 0xffffffffa4beea47
+ 0.48% 1297 chrome perf-24199.map [.] 0x0000115bb6c35d7a
+ 0.47% 1286 chrome perf-24199.map [.] 0x0000115bb7ba9b54
+ 0.42% 1149 lsusb lsusb [.] 0x0000000000010e60
+ 0.37% 1029 chrome chrome [.] 0x0000000000e45a2b
+ 0.37% 991 chrome perf-24199.map [.] 0x0000115bb6c35d72
+ 0.28% 762 chrome perf-24199.map [.] 0x0000115bb6c35d76
+ 0.27% 735 chrome perf-24199.map [.] 0x0000115bb6aa463a
+ 0.22% 608 chrome perf-24199.map [.] 0x0000115bb7ba9ebf
+ 0.17% 468 chrome perf-24199.map [.] 0x0000115bb6a7afc3
+ 0.17% 503 chrome [kernel.kallsyms] [k] 0xffffffffa4bf4c3d
+ 0.17% 450 chrome perf-24199.map [.] 0x0000115bb6af7457
+ 0.16% 444 chrome perf-24199.map [.] 0x0000115bb7c6edd1
+ 0.16% 438 chrome perf-24199.map [.] 0x0000115bb7c6f93d
+ 0.15% 420 chrome perf-24199.map [.] 0x0000115bb6af744b
+ 0.15% 414 chrome perf-24199.map [.] 0x0000115bb7c6fa42
+ 0.15% 405 chrome perf-24199.map [.] 0x0000115bb6af7430
+ 0.15% 398 chrome perf-24199.map [.] 0x0000115bb6af7421
+ 0.15% 396 chrome perf-24199.map [.] 0x0000115bb6af7438
+ 0.15% 396 chrome perf-24199.map [.] 0x0000115bb6af742b
+ 0.14% 437 chrome chrome [.] 0x0000000005d10b64
+ 0.14% 385 chrome perf-24199.map [.] 0x0000115bb7c6f9e5
+ 0.14% 371 chrome perf-24199.map [.] 0x0000115bb6af7418
+ 0.14% 369 chrome perf-24199.map [.] 0x0000115bb6af73f9
+ 0.14% 369 chrome perf-24199.map [.] 0x0000115bb5d21648
+ 0.13% 363 chrome perf-24199.map [.] 0x0000115bb6af7428
+ 0.13% 358 chrome perf-24199.map [.] 0x0000115bb6b80e03
+ 0.13% 343 chrome perf-24199.map [.] 0x0000115bb6af73fc
+ 0.13% 344 chrome chrome [.] 0x0000000000e55b20
+ 0.12% 338 chrome chrome [.] 0x00000000011d1cb0
+ 0.12% 317 chrome perf-24199.map [.] 0x0000115bb6aa469c
+ 0.11% 311 chrome perf-24199.map [.] 0x0000115bb6af73f6
+ 0.11% 315 chrome chrome [.] 0x0000000000e48e65
+ 0.11% 310 chrome perf-24199.map [.] 0x0000115bb6af73dc
+ 0.11% 309 chrome perf-24199.map [.] 0x0000115bb6af73cc
+ 0.11% 303 chrome perf-24199.map [.] 0x0000115bb5d21662
+ 0.11% 302 chrome perf-24199.map [.] 0x0000115bb5d29f6a
+ 0.11% 295 chrome perf-24199.map [.] 0x0000115bb6af7382
+ 0.11% 295 chrome perf-24199.map [.] 0x0000115bb6c35d1d
+ 0.11% 294 chrome perf-24199.map [.] 0x0000115bb6c35d99
+ 0.11% 293 chrome perf-24199.map [.] 0x0000115bb6c35cec
+ 0.11% 292 chrome perf-24199.map [.] 0x0000115bb6af73bc
+ 0.10% 285 chrome chrome [.] 0x0000000000e46990
+ 0.10% 283 chrome perf-24199.map [.] 0x0000115bb6af7465
+ 0.10% 282 chrome perf-24199.map [.] 0x0000115bb6aa4699
+ 0.10% 276 chrome perf-24199.map [.] 0x0000115bb6c35d2e
+ 0.10% 274 chrome perf-24199.map [.] 0x0000115bb6c35d6e
+ 0.10% 273 chrome perf-24199.map [.] 0x0000115bb6af73f0
+ 0.10% 268 chrome perf-24199.map [.] 0x0000115bb7ba9ecb
+ 0.10% 266 chrome perf-24199.map [.] 0x0000115bb6af73a1
+ 0.10% 262 chrome perf-24199.map [.] 0x0000115bb6c35d57
+ 0.09% 286 chrome [kernel.kallsyms] [k] 0xffffffffa4bef022
+ 0.09% 256 chrome chrome [.] 0x0000000000e6fa2b
+ 0.09% 249 chrome perf-24199.map [.] 0x0000115bb6c35d47
+ 0.09% 248 chrome perf-24199.map [.] 0x0000115bb6af73e6
+ 0.09% 247 chrome perf-24199.map [.] 0x0000115bb6c35d8d
+ 0.09% 240 chrome perf-24199.map [.] 0x0000115bb6a7b6e7
+ 0.09% 240 chrome perf-24199.map [.] 0x0000115bb6c35d81
+ 0.09% 233 chrome perf-24199.map [.] 0x0000115bb7ba9e8c
+ 0.09% 233 chrome perf-24199.map [.] 0x0000115bb6c35d02
+ 0.08% 230 chrome perf-24199.map [.] 0x0000115bb5d09f68
+ 0.08% 228 chrome chrome [.] 0x0000000000e45adc
+ 0.08% 232 swapper [kernel.kallsyms] [k] 0xffffffffa4dccf94
+ 0.08% 222 chrome perf-24199.map [.] 0x0000115bb7bed938
+ 0.08% 222 chrome perf-24199.map [.] 0x0000115bb5d0a372
+ 0.08% 338 python [kernel.kallsyms] [k] 0xffffffffa4eca110
+ 0.08% 218 chrome perf-24199.map [.] 0x0000115bb7ba9b5d
+ 0.08% 215 chrome perf-24199.map [.] 0x0000115bb7ba9ea8
+ 0.08% 246 python [kernel.kallsyms] [k] 0xffffffffa4ad6f19
+ 0.08% 216 swapper [kernel.kallsyms] [k] 0xffffffffa4dccfa1
+ 0.08% 206 lsusb lsusb [.] 0x0000000000010e63
+ 0.08% 207 chrome chrome [.] 0x0000000000e4596c
+ 0.07% 204 chrome perf-24199.map [.] 0x0000115bb5d29dd4
+ 0.07% 202 chrome perf-24199.map [.] 0x0000115bb6b25330
+ 0.07% 199 chrome perf-24199.map [.] 0x0000115bb6b25338
+ 0.07% 198 chrome perf-24199.map [.] 0x0000115bb5d1726d
+ 0.07% 194 chrome perf-24199.map [.] 0x0000115bb6a7b07c
+ 0.07% 214 chrome chrome [.] 0x0000000005d10e5e
+ 0.07% 187 chrome perf-24199.map [.] 0x0000115bb7ba9b69
+ 0.07% 188 chrome perf-24199.map [.] 0x0000115bb5d1728e
+ 0.07% 187 chrome perf-24199.map [.] 0x0000115bb6b80dfe
+ 0.07% 179 chrome perf-24199.map [.] 0x0000115bb7bed940
+ 0.07% 179 chrome perf-24199.map [.] 0x0000115bb5d0a36e
+ 0.06% 176 chrome chrome [.] 0x0000000000e75fe4
+ 0.06% 181 chrome chrome [.] 0x00000000023fd480
+ 0.06% 172 chrome perf-24199.map [.] 0x0000115bb6af73e9
+ 0.06% 170 chrome perf-24199.map [.] 0x0000115bb6a7b6fe
+ 0.06% 177 swapper [kernel.kallsyms] [k] 0xffffffffa4dccf9b
+ 0.06% 168 chrome chrome [.] 0x0000000000e45aff
+ 0.06% 166 chrome perf-24199.map [.] 0x0000115bb6b25340
+ 0.06% 175 chrome [kernel.kallsyms] [k] 0xffffffffa4ac31c3
+ 0.06% 163 chrome chrome [.] 0x0000000000e4fcb8
+ 0.06% 160 chrome perf-24199.map [.] 0x0000115bb6a7afbb
+ 0.06% 160 chrome chrome [.] 0x0000000000e54d5c
+ 0.06% 156 chrome perf-24199.map [.] 0x0000115bb6a7af9f
+ 0.06% 157 chrome perf-24199.map [.] 0x0000115bb5d29daf
+ 0.06% 156 chrome perf-24199.map [.] 0x0000115bb5d21656
+ 0.06% 172 chrome chrome [.] 0x0000000005d10b5b
+ 0.06% 156 chrome perf-24199.map [.] 0x0000115bb6aa4662
+ 0.06% 155 chrome perf-24199.map [.] 0x0000115bb7bed932
+ 0.06% 155 chrome perf-24199.map [.] 0x0000115bb6b82327
+ 0.05% 149 chrome perf-24199.map [.] 0x0000115bb7ba9ede
+ 0.05% 146 chrome perf-24199.map [.] 0x0000115bb6aa45f8
+ 0.05% 145 chrome perf-24199.map [.] 0x0000115bb6aa460e
+ 0.05% 153 chrome chrome [.] 0x0000000000cb7030
+ 0.05% 142 chrome perf-24199.map [.] 0x0000115bb7ba9b18
+ 0.05% 143 chrome chrome [.] 0x0000000000f13e9c
+ 0.05% 143 chrome perf-24199.map [.] 0x0000115bb6b2530a
+ 0.05% 141 chrome chrome [.] 0x0000000000e18c45
+ 0.05% 138 chrome perf-24199.map [.] 0x0000115bb6ca5090
+ 0.05% 211 python [kernel.kallsyms] [k] 0xffffffffa4ae14fd
+ 0.05% 137 chrome perf-24199.map [.] 0x0000115bb6aa4692
+ 0.05% 137 chrome perf-24199.map [.] 0x0000115bb6aa4626
+ 0.05% 136 chrome perf-24199.map [.] 0x0000115bb7ba9ed2
+ 0.05% 196 python [kernel.kallsyms] [k] 0xffffffffa4beeac5
+ 0.05% 133 chrome perf-24199.map [.] 0x0000115bb6ca5109
+ 0.05% 132 chrome perf-24199.map [.] 0x0000115bb7ba9b42
+ 0.05% 132 chrome perf-24199.map [.] 0x0000115bb6b8230f
+ 0.05% 132 chrome perf-24199.map [.] 0x0000115bb5d215e5
+ 0.05% 131 chrome perf-24199.map [.] 0x0000115bb7c6fa0a
+ 0.05% 149 chrome libpthread-2.19.so [.] 0x000000000000b471
+ 0.05% 130 chrome perf-24199.map [.] 0x0000115bb6aa4678
+ 0.05% 133 chrome libc-2.19.so [.] 0x0000000000088b72
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4c692
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6a7b4bc
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb7bba146
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb7ba9e83
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb7ba9dde
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4c713
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6a7b197
+ 0.01% 16 keygen libfreebl3.so [.] 0x000000000005bc62
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6ae766b
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4c6ef
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4e9ef
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4c0ba
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6a78053
+ 0.01% 16 chrome chrome [.] 0x0000000000e73bb0
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c36bee
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c3979b
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4e93b
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6af73bf
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b814a7
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6a7b6cd
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6af73c5
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b8147d
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b8216b
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b80dc6
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6ba1724
+ 0.01% 16 chrome chrome [.] 0x000000000254788e
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b81ed9
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb5d27a01
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a09503
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a63d39
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a65090
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4dcd1c3
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a544b0
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a54f5b
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4ec8bec
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4c532e4
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a00e4c
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a63e67
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4ec855a
+ 0.00% 1 mtpd [kernel.kallsyms] [k] 0xffffffffa4a0cb13
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a00dee
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5d3a2
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66eba
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4bea29e
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a545c4
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a62fcf
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4cc8948
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4ec9b33
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4ec8911
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a64bf8
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a00e4c
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a63d0c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4bea29a
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a75623
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a5d435
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a546cf
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4bec12d
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a66db1
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4ec855b
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a6394d
+ 0.00% 1 dbus-daemon [kernel.kallsyms] [k] 0xffffffffa4ded832
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a638c4
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a1fc16
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a75810
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a92368
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a23893
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a00e17
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a679aa
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a6e743
+ 0.00% 1 disks  [.] 0x00000000000e9fc7
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a55032
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a58dc9
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a6646c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a65163
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4ec84f8
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a54e31
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a63e17
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4ec8435
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4bf4d14
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a3909e
+ 0.00% 1 dbus-daemon [kernel.kallsyms] [k] 0xffffffffa4bef0f0
+ 0.00% 1 disks  [.] 0x0000000000082e08
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4dce5a5
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66a5c
+ 0.00% 1 rsync [kernel.kallsyms] [k] 0xffffffffa4bbaa3c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a1feea
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5de3a
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a38e6b
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a2cb16
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a7a32f
+ 0.00% 5 perf [kernel.kallsyms] [k] 0xffffffffa4a13e63
+
+
+# Samples: 266K of event 'instructions'
+# Event count (approx.): 154627854320
+#
+# Overhead Samples Command Shared Object Symbol
+# ........ ............ ............... .......................... ......................
+#
+ 1.65% 2882 chrome perf-24199.map [.] 0x0000115bb6c35d7a
+ 0.67% 987 chrome perf-24199.map [.] 0x0000115bb7ba9b54
+ 0.51% 663 chrome perf-24199.map [.] 0x0000115bb6af7457
+ 0.45% 592 chrome perf-24199.map [.] 0x0000115bb6af744b
+ 0.45% 660 chrome perf-24199.map [.] 0x0000115bb7ba9ebf
+ 0.44% 581 chrome perf-24199.map [.] 0x0000115bb6af7438
+ 0.39% 576 chrome perf-24199.map [.] 0x0000115bb7c6f9e5
+ 0.37% 488 chrome perf-24199.map [.] 0x0000115bb6af7430
+ 0.34% 499 chrome perf-24199.map [.] 0x0000115bb7c6f93d
+ 0.33% 575 chrome perf-24199.map [.] 0x0000115bb6c35d81
+ 0.33% 573 chrome perf-24199.map [.] 0x0000115bb6c35d99
+ 0.32% 420 chrome perf-24199.map [.] 0x0000115bb6af742b
+ 0.30% 391 chrome perf-24199.map [.] 0x0000115bb6af7465
+ 0.29% 503 chrome perf-24199.map [.] 0x0000115bb6c35d76
+ 0.29% 377 chrome perf-24199.map [.] 0x0000115bb6af73f0
+ 0.28% 492 chrome perf-24199.map [.] 0x0000115bb6a7afc3
+ 0.28% 373 chrome perf-24199.map [.] 0x0000115bb6af7428
+ 0.27% 361 chrome perf-24199.map [.] 0x0000115bb6af7382
+ 0.27% 361 chrome perf-24199.map [.] 0x0000115bb6af73f9
+ 0.27% 360 chrome perf-24199.map [.] 0x0000115bb6af73a1
+ 0.27% 464 chrome perf-24199.map [.] 0x0000115bb6c35d8d
+ 0.24% 318 chrome perf-24199.map [.] 0x0000115bb6af7421
+ 0.24% 425 chrome perf-24199.map [.] 0x0000115bb6b80e03
+ 0.24% 314 chrome perf-24199.map [.] 0x0000115bb6af73f6
+ 0.24% 345 chrome perf-24199.map [.] 0x0000115bb7c6fa42
+ 0.23% 338 chrome perf-24199.map [.] 0x0000115bb7c6edd1
+ 0.21% 315 chrome perf-24199.map [.] 0x0000115bb7ba9ecb
+ 0.21% 279 chrome perf-24199.map [.] 0x0000115bb6af7418
+ 0.21% 277 chrome perf-24199.map [.] 0x0000115bb6af73bc
+ 0.21% 304 chrome perf-24199.map [.] 0x0000115bb7ba9ea8
+ 0.20% 534 chrome perf-24199.map [.] 0x0000115bb5d21648
+ 0.18% 238 chrome perf-24199.map [.] 0x0000115bb6af73e6
+ 0.17% 227 chrome perf-24199.map [.] 0x0000115bb6af73fc
+ 0.17% 241 chrome perf-24199.map [.] 0x0000115bb7ba9b5d
+ 0.16% 240 chrome perf-24199.map [.] 0x0000115bb7ba9b10
+ 0.16% 285 chrome perf-24199.map [.] 0x0000115bb6a7b07c
+ 0.15% 205 chrome perf-24199.map [.] 0x0000115bb6af73dc
+ 0.15% 290 chrome perf-24199.map [.] 0x0000115bb6aa460e
+ 0.15% 223 chrome perf-24199.map [.] 0x0000115bb7ba9b69
+ 0.15% 194 chrome perf-24199.map [.] 0x0000115bb6af73cc
+ 0.15% 191 chrome perf-24199.map [.] 0x0000115bb6af974c
+ 0.14% 185 chrome perf-24199.map [.] 0x0000115bb6af7461
+ 0.14% 204 chrome perf-24199.map [.] 0x0000115bb7bed940
+ 0.14% 368 chrome perf-24199.map [.] 0x0000115bb5d21662
+ 0.14% 230 chrome perf-24199.map [.] 0x0000115bb6a7b412
+ 0.14% 199 chrome perf-24199.map [.] 0x0000115bb7ba9ed2
+ 0.13% 197 chrome perf-24199.map [.] 0x0000115bb7ba9ede
+ 0.13% 255 chrome perf-24199.map [.] 0x0000115bb6aa463a
+ 0.13% 191 chrome perf-24199.map [.] 0x0000115bb7ba9e8c
+ 0.13% 187 chrome perf-24199.map [.] 0x0000115bb7c6f9cb
+ 0.13% 242 chrome perf-24199.map [.] 0x0000115bb6aa4678
+ 0.13% 165 chrome perf-24199.map [.] 0x0000115bb6af975e
+ 0.13% 222 chrome perf-24199.map [.] 0x0000115bb6b80dfe
+ 0.12% 163 chrome perf-24199.map [.] 0x0000115bb6af9746
+ 0.12% 234 chrome perf-24199.map [.] 0x0000115bb6aa4692
+ 0.12% 178 chrome perf-24199.map [.] 0x0000115bb7c6ed55
+ 0.12% 157 chrome perf-24199.map [.] 0x0000115bb6af96f4
+ 0.12% 154 chrome perf-24199.map [.] 0x0000115bb6af9737
+ 0.12% 173 chrome perf-24199.map [.] 0x0000115bb7c6fa73
+ 0.12% 171 chrome perf-24199.map [.] 0x0000115bb7c6fa5e
+ 0.12% 200 chrome perf-24199.map [.] 0x0000115bb6a7afbb
+ 0.12% 199 chrome perf-24199.map [.] 0x0000115bb6a7b6fe
+ 0.12% 169 chrome perf-24199.map [.] 0x0000115bb7c6f8f2
+ 0.11% 148 chrome perf-24199.map [.] 0x0000115bb6af737e
+ 0.11% 205 chrome libc-2.19.so [.] 0x0000000000088b72
+ 0.11% 212 chrome perf-24199.map [.] 0x0000115bb6aa469c
+ 0.11% 160 chrome perf-24199.map [.] 0x0000115bb7c6f8d0
+ 0.11% 204 chrome perf-24199.map [.] 0x0000115bb6aa4626
+ 0.11% 160 chrome perf-24199.map [.] 0x0000115bb7bed932
+ 0.11% 154 chrome perf-24199.map [.] 0x0000115bb7ba9b18
+ 0.11% 137 chrome perf-24199.map [.] 0x0000115bb6af972f
+ 0.10% 153 chrome perf-24199.map [.] 0x0000115bb7c6f9f9
+ 0.10% 136 chrome perf-24199.map [.] 0x0000115bb6af7394
+ 0.10% 238 chrome chrome [.] 0x0000000000e45adc
+ 0.10% 131 chrome perf-24199.map [.] 0x0000115bb6af977d
+ 0.10% 146 chrome perf-24199.map [.] 0x0000115bb7c6f907
+ 0.10% 171 chrome perf-24199.map [.] 0x0000115bb6a7b6e7
+ 0.10% 144 chrome perf-24199.map [.] 0x0000115bb7c6f9e1
+ 0.10% 128 chrome perf-24199.map [.] 0x0000115bb6af9732
+ 0.10% 256 chrome perf-24199.map [.] 0x0000115bb5d21656
+ 0.10% 142 chrome perf-24199.map [.] 0x0000115bb7c6f9b1
+ 0.10% 181 chrome perf-24199.map [.] 0x0000115bb6aa464e
+ 0.10% 147 chrome perf-24199.map [.] 0x0000115bb7bf5700
+ 0.10% 181 chrome perf-24199.map [.] 0x0000115bb6aa4662
+ 0.09% 161 chrome perf-24199.map [.] 0x0000115bb6a7af9f
+ 0.09% 136 chrome perf-24199.map [.] 0x0000115bb7c6f216
+ 0.09% 159 chrome perf-24199.map [.] 0x0000115bb6a7b377
+ 0.09% 228 chrome perf-24199.map [.] 0x0000115bb5d0a372
+ 0.09% 118 chrome perf-24199.map [.] 0x0000115bb6af9769
+ 0.09% 117 chrome perf-24199.map [.] 0x0000115bb6af96f2
+ 0.09% 336 chrome perf-24199.map [.] 0x0000115bb5d1726d
+ 0.09% 193 chrome chrome [.] 0x0000000000e76562
+ 0.09% 117 chrome perf-24199.map [.] 0x0000115bb6add6ed
+ 0.09% 219 chrome chrome [.] 0x0000000000e45a2b
+ 0.09% 148 chrome perf-24199.map [.] 0x0000115bb6a7afc9
+ 0.08% 111 chrome perf-24199.map [.] 0x0000115bb6af972a
+ 0.08% 158 chrome perf-24199.map [.] 0x0000115bb6aa45f8
+ 0.08% 145 chrome perf-24199.map [.] 0x0000115bb6a7afed
+ 0.08% 112 chrome perf-24199.map [.] 0x0000115bb6af73b8
+ 0.08% 107 chrome perf-24199.map [.] 0x0000115bb6af9707
+ 0.08% 118 chrome perf-24199.map [.] 0x0000115bb7ba9b3a
+ 0.08% 138 chrome perf-24199.map [.] 0x0000115bb6a785c9
+ 0.08% 117 chrome perf-24199.map [.] 0x0000115bb7c6ed3e
+ 0.08% 142 chrome perf-24199.map [.] 0x0000115bb6b81e10
+ 0.08% 106 chrome perf-24199.map [.] 0x0000115bb6af73e3
+ 0.08% 154 chrome chrome [.] 0x0000000000eb5472
+ 0.08% 116 chrome perf-24199.map [.] 0x0000115bb7bed9b6
+ 0.08% 287 chrome perf-24199.map [.] 0x0000115bb5d29f6a
+ 0.08% 199 chrome chrome [.] 0x0000000000e6fa2b
+ 0.08% 218 chrome chrome [.] 0x0000000000e55b20
+ 0.08% 110 chrome perf-24199.map [.] 0x0000115bb7c6f925
+ 0.07% 112 chrome perf-24199.map [.] 0x0000115bb7bb53c2
+ 0.07% 107 chrome perf-24199.map [.] 0x0000115bb7c6f92d
+ 0.07% 155 chrome perf-24199.map [.] 0x0000115bb5d2640e
+ 0.07% 127 chrome perf-24199.map [.] 0x0000115bb6c35d72
+ 0.07% 124 chrome perf-24199.map [.] 0x0000115bb6a78284
+ 0.07% 107 chrome perf-24199.map [.] 0x0000115bb7bed990
+ 0.07% 421 python [kernel.kallsyms] [k] 0xffffffffa4ae14fd
+ 0.07% 93 chrome perf-24199.map [.] 0x0000115bb6af73e9
+ 0.07% 104 chrome perf-24199.map [.] 0x0000115bb7bed938
+ 0.07% 158 chrome perf-24199.map [.] 0x0000115bb6b57933
+ 0.07% 100 chrome perf-24199.map [.] 0x0000115bb7c6f96b
+ 0.07% 123 chrome perf-24199.map [.] 0x0000115bb6b8230f
+ 0.07% 101 chrome perf-24199.map [.] 0x0000115bb7bed9bf
+ 0.07% 102 chrome perf-24199.map [.] 0x0000115bb7bb53df
+ 0.07% 144 chrome perf-24199.map [.] 0x0000115bb6b2530a
+ 0.07% 91 chrome perf-24199.map [.] 0x0000115bb6add73f
+ 0.07% 89 chrome perf-24199.map [.] 0x0000115bb6add762
+ 0.07% 98 chrome perf-24199.map [.] 0x0000115bb7bed926
+ 0.06% 85 chrome perf-24199.map [.] 0x0000115bb6af96a7
+ 0.06% 129 chrome perf-24199.map [.] 0x0000115bb6aa4699
+ 0.06% 112 chrome perf-24199.map [.] 0x0000115bb6c35d47
+ 0.06% 295 chrome chrome [.] 0x00000000011d1cb0
+ 0.06% 114 chrome perf-24199.map [.] 0x0000115bb6b822b1
+ 0.06% 94 chrome perf-24199.map [.] 0x0000115bb7bed99d
+ 0.06% 94 chrome perf-24199.map [.] 0x0000115bb7bb53f2
+ 0.06% 92 chrome perf-24199.map [.] 0x0000115bb7ba9b86
+ 0.06% 92 chrome perf-24199.map [.] 0x0000115bb7ba9b29
+ 0.06% 88 chrome perf-24199.map [.] 0x0000115bb6e14f87
+ 0.06% 80 chrome perf-24199.map [.] 0x0000115bb6af9722
+ 0.06% 109 chrome perf-24199.map [.] 0x0000115bb6b8238a
+ 0.06% 93 chrome perf-24199.map [.] 0x0000115bb7ba71a3
+ 0.06% 80 chrome perf-24199.map [.] 0x0000115bb6af747f
+ 0.06% 107 chrome perf-24199.map [.] 0x0000115bb6b80e22
+ 0.06% 104 chrome perf-24199.map [.] 0x0000115bb6a72466
+ 0.06% 78 chrome perf-24199.map [.] 0x0000115bb6add757
+ 0.06% 80 chrome perf-24199.map [.] 0x0000115bb6add745
+ 0.06% 102 chrome perf-24199.map [.] 0x0000115bb6c35cec
+ 0.06% 202 chrome chrome [.] 0x0000000000f13e9c
+ 0.06% 166 chrome chrome [.] 0x0000000000e46989
+ 0.06% 318 python [kernel.kallsyms] [k] 0xffffffffa4ad6f19
+ 0.06% 83 chrome perf-24199.map [.] 0x0000115bb7c6f97d
+ 0.06% 77 chrome perf-24199.map [.] 0x0000115bb6add730
+ 0.06% 82 chrome perf-24199.map [.] 0x0000115bb7c6f992
+ 0.06% 132 chrome perf-24199.map [.] 0x0000115bb5d417f2
+ 0.06% 76 chrome perf-24199.map [.] 0x0000115bb6add728
+ 0.06% 72 chrome perf-24199.map [.] 0x0000115bb6af9702
+ 0.06% 94 chrome perf-24199.map [.] 0x0000115bb6c4ea18
+ 0.06% 321 chrome [kernel.kallsyms] [k] 0xffffffffa4bf4c3d
+ 0.06% 139 chrome perf-24199.map [.] 0x0000115bb5d21666
+ 0.06% 72 chrome perf-24199.map [.] 0x0000115bb6af9684
+ 0.06% 78 chrome perf-24199.map [.] 0x0000115bb6add6a0
+ 0.05% 72 chrome perf-24199.map [.] 0x0000115bb6af73c8
+ 0.05% 137 chrome perf-24199.map [.] 0x0000115bb5d0a36e
+ 0.05% 97 chrome perf-24199.map [.] 0x0000115bb6b82327
+ 0.05% 70 chrome perf-24199.map [.] 0x0000115bb6b06516
+ 0.05% 137 chrome chrome [.] 0x0000000000e6fa1c
+ 0.05% 132 chrome perf-24199.map [.] 0x0000115bb5d21629
+ 0.05% 130 chrome chrome [.] 0x0000000000e54d5c
+ 0.05% 122 chrome chrome [.] 0x0000000000e48e5f
+ 0.05% 839 lsusb lsusb [.] 0x0000000000010e60
+ 0.05% 133 chrome perf-24199.map [.] 0x0000115bb5d215dd
+ 0.05% 130 chrome perf-24199.map [.] 0x0000115bb5d215c9
+ 0.05% 130 chrome perf-24199.map [.] 0x0000115bb78d3895
+ 0.05% 76 chrome perf-24199.map [.] 0x0000115bb7c6f174
+ 0.01% 46 chrome chrome [.] 0x0000000005d109a9
+ 0.01% 15 chrome perf-24199.map [.] 0x0000115bb6aa5665
+ 0.01% 17 chrome chrome [.] 0x0000000000ec6b13
+ 0.01% 18 chrome perf-24199.map [.] 0x0000115bb5d417ea
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6c3581e
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6af94fb
+ 0.01% 29 chrome libc-2.19.so [.] 0x000000000009a8d5
+ 0.01% 25 chrome chrome [.] 0x0000000000e57849
+ 0.01% 40 chrome chrome [.] 0x0000000005d1101d
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb6e1502e
+ 0.01% 20 chrome perf-24199.map [.] 0x0000115bb7c9f11d
+ 0.01% 18 chrome perf-24199.map [.] 0x0000115bb6b577c8
+ 0.01% 30 chrome [kernel.kallsyms] [k] 0xffffffffa4acff4a
+ 0.01% 38 python libpython2.7.so.1.0 [.] 0x000000000011ad14
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6b8221e
+ 0.01% 59 chrome i965_dri.so [.] 0x0000000000483802
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6a7d197
+ 0.01% 19 chrome perf-24199.map [.] 0x0000115bb6b51f6a
+ 0.01% 31 chrome libpthread-2.19.so [.] 0x0000000000009e71
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6c4eac4
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb7bea5c2
+ 0.01% 38 chrome chrome [.] 0x0000000000d3e821
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb7c78e2c
+ 0.01% 22 chrome perf-24199.map [.] 0x0000115bb5d67f0f
+ 0.01% 30 chrome chrome [.] 0x0000000000ec4b08
+ 0.01% 15 chrome perf-24199.map [.] 0x0000115bb5d2793b
+ 0.01% 28 chrome chrome [.] 0x0000000000f38669
+ 0.01% 43 chrome chrome [.] 0x0000000001bfb240
+ 0.01% 20 chrome perf-24199.map [.] 0x0000115bb5d09f8e
+ 0.01% 30 chrome perf-24199.map [.] 0x0000115bb5e3f9b2
+ 0.01% 18 chrome perf-24199.map [.] 0x0000115bb5d072d7
+ 0.01% 32 chrome ld-2.19.so [.] 0x000000000000bcc7
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6a786b6
+ 0.01% 36 chrome chrome [.] 0x00000000024366f0
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb6b5a6bf
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6a78024
+ 0.01% 16 sshd sshd [.] 0x0000000000075c37
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6adec72
+ 0.01% 15 chrome perf-24199.map [.] 0x0000115bb6ba0a78
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6a7b373
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6c3256e
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb6e14f74
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6af95ac
+ 0.01% 26 python libpython2.7.so.1.0 [.] 0x0000000000095f32
+ 0.01% 19 chrome perf-24199.map [.] 0x0000115bb5d215e1
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6adeb55
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b2c7db
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7ba9e3d
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6af5a62
+ 0.01% 25 chrome chrome [.] 0x0000000000e1654e
+ 0.01% 24 chrome perf-24199.map [.] 0x0000115bb6ca75f2
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c6ef16
+ 0.01% 17 chrome chrome [.] 0x0000000000ed5a9a
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6b810f4
+ 0.01% 30 chrome chrome [.] 0x0000000000e543e9
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb6b5a5e2
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6ab0afa
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c6f24e
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b2ca81
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aa471b
+ 0.01% 37 chrome chrome [.] 0x0000000000eb4d31
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb6e1f80f
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c6f5d9
+ 0.01% 31 chrome chrome [.] 0x0000000000e18aa9
+ 0.01% 18 chrome chrome [.] 0x0000000000e4907d
+ 0.01% 58 chrome i965_dri.so [.] 0x0000000000483806
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6a7b69b
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b81edd
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c6f5b3
+ 0.01% 41 chrome chrome [.] 0x0000000000d1e411
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb7b0a72f
+ 0.01% 23 lsof [kernel.kallsyms] [k] 0xffffffffa4bf4c3d
+ 0.01% 16 chrome chrome [.] 0x0000000000e7656e
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b24dbd
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aa5672
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7ba9d29
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6cb5bbd
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b2ca2a
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6a72710
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6c35d84
+ 0.01% 19 chrome chrome [.] 0x0000000000e45ac6
+ 0.01% 38 python libpython2.7.so.1.0 [.] 0x00000000000a8c9e
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b2c989
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b2ca71
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aa5666
+ 0.01% 20 chrome perf-24199.map [.] 0x0000115bb5d2165e
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aa4729
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b80feb
+ 0.01% 27 python libpython2.7.so.1.0 [.] 0x00000000000a8bf8
+ 0.01% 23 chrome chrome [.] 0x0000000000eea37a
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c6f25f
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb6e1fc56
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b315b4
+ 0.01% 24 chrome chrome [.] 0x0000000000f16081
+ 0.01% 26 chrome [kernel.kallsyms] [k] 0xffffffffa4ad9a67
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb5d2177a
+ 0.01% 30 chrome libc-2.19.so [.] 0x000000000009a991
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6a7b687
+ 0.01% 32 chrome chrome [.] 0x0000000000f9f3c0
+ 0.01% 25 chrome chrome [.] 0x0000000000f13e73
+ 0.01% 17 chrome chrome [.] 0x0000000000e48e41
+ 0.01% 25 chrome chrome [.] 0x0000000000f19e4e
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb6b557dd
+ 0.01% 28 chrome chrome [.] 0x0000000000e18b99
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb7c9f0c6
+ 0.01% 30 python libpython2.7.so.1.0 [.] 0x00000000000e8dc7
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6ad9163
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb6b5c652
+ 0.01% 18 chrome chrome [.] 0x0000000000e45abc
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6a7d0d7
+ 0.01% 37 chrome chrome [.] 0x0000000000ce74f6
+ 0.01% 15 chrome chrome [.] 0x0000000000f13df8
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6bc7d01
+ 0.01% 20 chrome perf-24199.map [.] 0x0000115bb5d265fa
+ 0.01% 38 chrome chrome [.] 0x00000000011dc830
+ 0.01% 27 chrome perf-24199.map [.] 0x0000115bb5d17263
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6c36bc0
+ 0.01% 24 chrome chrome [.] 0x0000000000e18b9d
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6ad4877
+ 0.01% 27 chrome chrome [.] 0x0000000000f15f92
+ 0.01% 31 chrome chrome [.] 0x0000000000cf4525
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6aded45
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6c36bee
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6af5ac8
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aac55f
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb5d07a9c
+ 0.01% 15 chrome chrome [.] 0x0000000000e520df
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b80f05
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6ac669f
+ 0.01% 29 chrome libc-2.19.so [.] 0x000000000008e2bb
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b81f43
+ 0.01% 32 chrome ld-2.19.so [.] 0x000000000000bca3
+ 0.01% 23 chrome perf-24199.map [.] 0x0000115bb6ca738d
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6e1fb74
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6abcae7
+ 0.01% 33 chrome chrome [.] 0x0000000000e10fd9
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7beaa06
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6e150b1
+ 0.01% 27 chrome perf-24199.map [.] 0x0000115bb7e1e828
+ 0.01% 23 chrome chrome [.] 0x0000000000f1608a
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6a7b4f3
+ 0.01% 18 chrome perf-24199.map [.] 0x0000115bb6b57760
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7bf5036
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b814d1
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6ba4ea1
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aaca3e
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7bf5678
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6a7d202
+ 0.01% 24 chrome ld-2.19.so [.] 0x000000000000967a
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb706289d
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b252a0
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6e1fbb7
+ 0.01% 23 chrome chrome [.] 0x0000000000f15f6a
+ 0.01% 27 chrome chrome [.] 0x0000000000f19e57
+ 0.01% 20 chrome chrome [.] 0x0000000000e5752d
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6a9f27e
+ 0.01% 24 ps [kernel.kallsyms] [k] 0xffffffffa4bee3ef
+ 0.01% 18 chrome chrome [.] 0x0000000000ed5ad3
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6c4e9e3
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b5a2e0
+ 0.01% 25 chrome chrome [.] 0x0000000000eb696b
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b8213f
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6c35d37
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6a7b399
+ 0.01% 21 chrome chrome [.] 0x0000000000e4722f
+ 0.01% 20 chrome chrome [.] 0x0000000000dbec48
+ 0.01% 15 chrome perf-24199.map [.] 0x0000115bb6b358f7
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b8215a
+ 0.01% 21 chrome libc-2.19.so [.] 0x000000000008d855
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6af5abf
+ 0.01% 36 chrome chrome [.] 0x0000000005d10df6
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b5c716
+ 0.01% 18 chrome chrome [.] 0x0000000000e45c20
+ 0.01% 26 chrome chrome [.] 0x0000000000f1606e
+ 0.01% 22 chrome chrome [.] 0x0000000000f1419e
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c8ad93
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6a784f8
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb7cdc597
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6b810d3
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb7ba9bae
+ 0.01% 19 chrome chrome [.] 0x0000000000e48dd8
+ 0.01% 19 chrome chrome [.] 0x0000000000eb4de8
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6e41577
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6b7d7f6
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6b794d1
+ 0.01% 9 chrome perf-24199.map [.] 0x0000115bb6af95d5
+ 0.01% 17 chrome chrome [.] 0x0000000000e6fa00
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c8b961
+ 0.01% 15 chrome perf-24199.map [.] 0x0000115bb6b315c5
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a549b6
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a923d2
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4b03afa
+ 0.00% 1 tcsd [kernel.kallsyms] [k] 0xffffffffa4a5c294
+ 0.00% 1 kworker/0:1H [kernel.kallsyms] [k] 0xffffffffa4cef732
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a00c88
+ 0.00% 1 kworker/u:5 [kernel.kallsyms] [k] 0xffffffffa4a8ac8a
+ 0.00% 1 lsof [kernel.kallsyms] [k] 0xffffffffa4a51a55
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a3a27c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a58dff
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66016
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a92359
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a93347
+ 0.00% 1 sh libc-2.19.so [.] 0x0000000000082a06
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a38e4d
+ 0.00% 1 powerd  [.] 0x00000000000a0930
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4ec8d72
+ 0.00% 1 powerd [kernel.kallsyms] [k] 0xffffffffa4a408c8
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4ab35a2
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5fda6
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5fed2
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a543e8
+ 0.00% 1 dhcpcd  [.] 0x00000000000280a3
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a54db9
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a00e21
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4a65ea8
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4adde99
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5655b
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a40550
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a54a89
+ 0.00% 1 python libpython2.7.so.1.0 [.] 0x000000000008f1be
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a64ade
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4bbaa30
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a00c06
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66bfe
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a63bf9
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5fe01
+ 0.00% 1 tcsd [kernel.kallsyms] [k] 0xffffffffa4a5f308
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4be56c1
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a40596
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a9381f
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a757c5
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a65ea3
+ 0.00% 1 rsyslogd [kernel.kallsyms] [k] 0xffffffffa4bee6b7
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a2d72a
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a63d00
+ 0.00% 1 chrome chrome [.] 0x0000000000cd3879
+ 0.00% 1 python libc-2.19.so [.] 0x00000000000e9fc7
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a58e03
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a65f0b
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4ec89d8
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4ec90d6
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a548c8
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4a38453
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4a66335
+ 0.00% 1 chrome chrome [.] 0x0000000000cc75d8
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a2058f
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4e0f293
+ 0.00% 1 kworker/u:0 [ath9k] [k] 0x0000000000008126
+ 0.00% 1 jbd2/sda1-8 [kernel.kallsyms] [k] 0xffffffffa4a08ab7
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a2388e
+ 0.00% 1 powerd  [.] 0x000000000007dca0
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4a2c97c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a0659b
+ 0.00% 1 chrome chrome [.] 0x0000000000d0e3bd
+ 0.00% 1 cryptohomed  [.] 0x000000000001dcd0
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4bec0c0
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a62145
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4bf3318
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5fd33
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a2d71a
+ 0.00% 1 kworker/u:0 [kernel.kallsyms] [k] 0xffffffffa4ad29f3
+ 0.00% 1 tcsd [kernel.kallsyms] [k] 0xffffffffa4a67997
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a548a8
+ 0.00% 1 python libc-2.19.so [.] 0x0000000000082bb3
+ 0.00% 1 dhcpcd  [.] 0x0000000000024f56
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4bf334e
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a65e88
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a41578
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a65a87
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4a66d95
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4ec89cf
+ 0.00% 1 powerd [kernel.kallsyms] [k] 0xffffffffa4c6a22e
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a91b84
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a4073d
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a408c2
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66a84
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a61b9f
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4ec8c7f
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a40a1d
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4ec8b7c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a6646c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a3a6d4
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a41623
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a75d98
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4bf32fd
+ 0.00% 1 chrome chrome [.] 0x00000000022ad6a0
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a62152
+ 0.00% 1 dbus-daemon [kernel.kallsyms] [k] 0xffffffffa4b0d38b
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a58d66
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4ecb3ed
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a54eca
+ 0.00% 1 periodic_schedu [kernel.kallsyms] [k] 0xffffffffa4a92b1a
+ 0.00% 1 periodic_schedu [kernel.kallsyms] [k] 0xffffffffa4bec2e9
+ 0.00% 1 sh [kernel.kallsyms] [k] 0xffffffffa4a92a06
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a65b67
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66ce5
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4b06121
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a54418
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a65cc5
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a547ee
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a64c99
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a5adda
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a923ac
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a91ec6
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a63269
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a62f53
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a621ff
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a65d8b
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5de41
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a56f3d
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4be56c6
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5d828
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a65ccf
+ 0.00% 1 sh [kernel.kallsyms] [k] 0xffffffffa4b06cb5
+ 0.00% 1 perf [kernel.kallsyms] [k] 0xffffffffa4ab0a38
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a1ff32
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a7aac4
+ 0.00% 8 swapper [kernel.kallsyms] [k] 0xffffffffa4a0ee03
+ 0.00% 4 perf [kernel.kallsyms] [k] 0xffffffffa4a0ee03
+
+#
+# (For a higher level overview, try: perf report --sort comm,dso)
+#
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
new file mode 100644
index 00000000..29e118e8
--- /dev/null
+++ b/crosperf/results_cache.py
@@ -0,0 +1,758 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Module to deal with result cache."""
+
+from __future__ import print_function
+
+import glob
+import hashlib
+import os
+import pickle
+import re
+import tempfile
+import json
+import sys
+
+from cros_utils import command_executer
+from cros_utils import misc
+
+from image_checksummer import ImageChecksummer
+
+import results_report
+import test_flag
+
+SCRATCH_DIR = os.path.expanduser('~/cros_scratch')
+RESULTS_FILE = 'results.txt'
+MACHINE_FILE = 'machine.txt'
+AUTOTEST_TARBALL = 'autotest.tbz2'
+PERF_RESULTS_FILE = 'perf-results.txt'
+CACHE_KEYS_FILE = 'cache_keys.txt'
+
+
+class Result(object):
+ """Class for holding the results of a single test run.
+
+ This class manages what exactly is stored inside the cache without knowing
+ what the key of the cache is. For runs with perf, it stores perf.data,
+ perf.report, etc. The key generation is handled by the ResultsCache class.
+ """
+
+ def __init__(self, logger, label, log_level, machine, cmd_exec=None):
+ self.chromeos_root = label.chromeos_root
+ self._logger = logger
+ self.ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger, log_level=log_level)
+ self.temp_dir = None
+ self.label = label
+ self.results_dir = None
+ self.log_level = log_level
+ self.machine = machine
+ self.perf_data_files = []
+ self.perf_report_files = []
+ self.results_file = []
+ self.chrome_version = ''
+ self.err = None
+ self.chroot_results_dir = ''
+ self.test_name = ''
+ self.keyvals = None
+ self.board = None
+ self.suite = None
+ self.retval = None
+ self.out = None
+
+ def CopyFilesTo(self, dest_dir, files_to_copy):
+ file_index = 0
+ for file_to_copy in files_to_copy:
+ if not os.path.isdir(dest_dir):
+ command = 'mkdir -p %s' % dest_dir
+ self.ce.RunCommand(command)
+ dest_file = os.path.join(dest_dir,
+ ('%s.%s' % (os.path.basename(file_to_copy),
+ file_index)))
+ ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
+ if ret:
+ raise IOError('Could not copy results file: %s' % file_to_copy)
+
+ def CopyResultsTo(self, dest_dir):
+ self.CopyFilesTo(dest_dir, self.perf_data_files)
+ self.CopyFilesTo(dest_dir, self.perf_report_files)
+ if len(self.perf_data_files) or len(self.perf_report_files):
+ self._logger.LogOutput('Perf results files stored in %s.' % dest_dir)
+
+ def GetNewKeyvals(self, keyvals_dict):
+ # Initialize 'units' dictionary.
+ units_dict = {}
+ for k in keyvals_dict:
+ units_dict[k] = ''
+ results_files = self.GetDataMeasurementsFiles()
+ for f in results_files:
+ # Make sure we can find the results file
+ if os.path.exists(f):
+ data_filename = f
+ else:
+ # Otherwise get the base filename and create the correct
+ # path for it.
+ _, f_base = misc.GetRoot(f)
+ data_filename = os.path.join(self.chromeos_root, 'chroot/tmp',
+ self.temp_dir, f_base)
+ if data_filename.find('.json') > 0:
+ raw_dict = dict()
+ if os.path.exists(data_filename):
+ with open(data_filename, 'r') as data_file:
+ raw_dict = json.load(data_file)
+
+ if 'charts' in raw_dict:
+ raw_dict = raw_dict['charts']
+ for k1 in raw_dict:
+ field_dict = raw_dict[k1]
+ for k2 in field_dict:
+ result_dict = field_dict[k2]
+ key = k1 + '__' + k2
+ if 'value' in result_dict:
+ keyvals_dict[key] = result_dict['value']
+ elif 'values' in result_dict:
+ values = result_dict['values']
+ if ('type' in result_dict and
+ result_dict['type'] == 'list_of_scalar_values' and values and
+ values != 'null'):
+ keyvals_dict[key] = sum(values) / float(len(values))
+ else:
+ keyvals_dict[key] = values
+ units_dict[key] = result_dict['units']
+ else:
+ if os.path.exists(data_filename):
+ with open(data_filename, 'r') as data_file:
+ lines = data_file.readlines()
+ for line in lines:
+ tmp_dict = json.loads(line)
+ graph_name = tmp_dict['graph']
+ graph_str = (graph_name + '__') if graph_name else ''
+ key = graph_str + tmp_dict['description']
+ keyvals_dict[key] = tmp_dict['value']
+ units_dict[key] = tmp_dict['units']
+
+ return keyvals_dict, units_dict
+
+ def AppendTelemetryUnits(self, keyvals_dict, units_dict):
+ """keyvals_dict is the dict of key-value used to generate Crosperf reports.
+
+ units_dict is a dictionary of the units for the return values in
+ keyvals_dict. We need to associate the units with the return values,
+ for Telemetry tests, so that we can include the units in the reports.
+ This function takes each value in keyvals_dict, finds the corresponding
+ unit in the units_dict, and replaces the old value with a list of the
+ old value and the units. This later gets properly parsed in the
+ ResultOrganizer class, for generating the reports.
+ """
+
+ results_dict = {}
+ for k in keyvals_dict:
+ # We don't want these lines in our reports; they add no useful data.
+ if k == '' or k == 'telemetry_Crosperf':
+ continue
+ val = keyvals_dict[k]
+ units = units_dict[k]
+ new_val = [val, units]
+ results_dict[k] = new_val
+ return results_dict
+
+ def GetKeyvals(self):
+ results_in_chroot = os.path.join(self.chromeos_root, 'chroot', 'tmp')
+ if not self.temp_dir:
+ self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
+ command = 'cp -r {0}/* {1}'.format(self.results_dir, self.temp_dir)
+ self.ce.RunCommand(command, print_to_console=False)
+
+ command = ('python generate_test_report --no-color --csv %s' %
+ (os.path.join('/tmp', os.path.basename(self.temp_dir))))
+ _, out, _ = self.ce.ChrootRunCommandWOutput(
+ self.chromeos_root, command, print_to_console=False)
+ keyvals_dict = {}
+ tmp_dir_in_chroot = misc.GetInsideChrootPath(self.chromeos_root,
+ self.temp_dir)
+ for line in out.splitlines():
+ tokens = re.split('=|,', line)
+ key = tokens[-2]
+ if key.startswith(tmp_dir_in_chroot):
+ key = key[len(tmp_dir_in_chroot) + 1:]
+ value = tokens[-1]
+ keyvals_dict[key] = value
+
+ # Check to see if there is a perf_measurements file and get the
+ # data from it if so.
+ keyvals_dict, units_dict = self.GetNewKeyvals(keyvals_dict)
+ if self.suite == 'telemetry_Crosperf':
+ # For telemtry_Crosperf results, append the units to the return
+ # results, for use in generating the reports.
+ keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict)
+ return keyvals_dict
+
+ def GetResultsDir(self):
+ mo = re.search(r'Results placed in (\S+)', self.out)
+ if mo:
+ result = mo.group(1)
+ return result
+ raise RuntimeError('Could not find results directory.')
+
+ def FindFilesInResultsDir(self, find_args):
+ if not self.results_dir:
+ return None
+
+ command = 'find %s %s' % (self.results_dir, find_args)
+ ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False)
+ if ret:
+ raise RuntimeError('Could not run find command!')
+ return out
+
+ def GetResultsFile(self):
+ return self.FindFilesInResultsDir('-name results-chart.json').splitlines()
+
+ def GetPerfDataFiles(self):
+ return self.FindFilesInResultsDir('-name perf.data').splitlines()
+
+ def GetPerfReportFiles(self):
+ return self.FindFilesInResultsDir('-name perf.data.report').splitlines()
+
+ def GetDataMeasurementsFiles(self):
+ result = self.FindFilesInResultsDir('-name perf_measurements').splitlines()
+ if not result:
+ result = \
+ self.FindFilesInResultsDir('-name results-chart.json').splitlines()
+ return result
+
+ def GeneratePerfReportFiles(self):
+ perf_report_files = []
+ for perf_data_file in self.perf_data_files:
+ # Generate a perf.report and store it side-by-side with the perf.data
+ # file.
+ chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
+ perf_data_file)
+ perf_report_file = '%s.report' % perf_data_file
+ if os.path.exists(perf_report_file):
+ raise RuntimeError('Perf report file already exists: %s' %
+ perf_report_file)
+ chroot_perf_report_file = misc.GetInsideChrootPath(self.chromeos_root,
+ perf_report_file)
+ perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
+
+ perf_file = '/usr/sbin/perf'
+ if os.path.exists(perf_path):
+ perf_file = '/usr/bin/perf'
+
+ command = ('%s report '
+ '-n '
+ '--symfs /build/%s '
+ '--vmlinux /build/%s/usr/lib/debug/boot/vmlinux '
+ '--kallsyms /build/%s/boot/System.map-* '
+ '-i %s --stdio '
+ '> %s' % (perf_file, self.board, self.board, self.board,
+ chroot_perf_data_file, chroot_perf_report_file))
+ self.ce.ChrootRunCommand(self.chromeos_root, command)
+
+ # Add a keyval to the dictionary for the events captured.
+ perf_report_files.append(
+ misc.GetOutsideChrootPath(self.chromeos_root,
+ chroot_perf_report_file))
+ return perf_report_files
+
+ def GatherPerfResults(self):
+ report_id = 0
+ for perf_report_file in self.perf_report_files:
+ with open(perf_report_file, 'r') as f:
+ report_contents = f.read()
+ for group in re.findall(r'Events: (\S+) (\S+)', report_contents):
+ num_events = group[0]
+ event_name = group[1]
+ key = 'perf_%s_%s' % (report_id, event_name)
+ value = str(misc.UnitToNumber(num_events))
+ self.keyvals[key] = value
+
+ def PopulateFromRun(self, out, err, retval, test, suite):
+ self.board = self.label.board
+ self.out = out
+ self.err = err
+ self.retval = retval
+ self.test_name = test
+ self.suite = suite
+ self.chroot_results_dir = self.GetResultsDir()
+ self.results_dir = misc.GetOutsideChrootPath(self.chromeos_root,
+ self.chroot_results_dir)
+ self.results_file = self.GetResultsFile()
+ self.perf_data_files = self.GetPerfDataFiles()
+ # Include all perf.report data in table.
+ self.perf_report_files = self.GeneratePerfReportFiles()
+ # TODO(asharif): Do something similar with perf stat.
+
+ # Grab keyvals from the directory.
+ self.ProcessResults()
+
+ def ProcessJsonResults(self):
+ # Open and parse the json results file generated by telemetry/test_that.
+ if not self.results_file:
+ raise IOError('No results file found.')
+ filename = self.results_file[0]
+ if not filename.endswith('.json'):
+ raise IOError('Attempt to call json on non-json file: %s' % filename)
+
+ if not os.path.exists(filename):
+ return {}
+
+ keyvals = {}
+ with open(filename, 'r') as f:
+ raw_dict = json.load(f)
+ if 'charts' in raw_dict:
+ raw_dict = raw_dict['charts']
+ for k, field_dict in raw_dict.iteritems():
+ for item in field_dict:
+ keyname = k + '__' + item
+ value_dict = field_dict[item]
+ if 'value' in value_dict:
+ result = value_dict['value']
+ elif 'values' in value_dict:
+ values = value_dict['values']
+ if not values:
+ continue
+ if ('type' in value_dict and
+ value_dict['type'] == 'list_of_scalar_values' and
+ values != 'null'):
+ result = sum(values) / float(len(values))
+ else:
+ result = values
+ units = value_dict['units']
+ new_value = [result, units]
+ keyvals[keyname] = new_value
+ return keyvals
+
+ def ProcessResults(self, use_cache=False):
+ # Note that this function doesn't know anything about whether there is a
+ # cache hit or miss. It should process results agnostic of the cache hit
+ # state.
+ if self.results_file and self.results_file[0].find(
+ 'results-chart.json') != -1:
+ self.keyvals = self.ProcessJsonResults()
+ else:
+ if not use_cache:
+ print('\n ** WARNING **: Had to use deprecated output-method to '
+ 'collect results.\n')
+ self.keyvals = self.GetKeyvals()
+ self.keyvals['retval'] = self.retval
+ # Generate report from all perf.data files.
+ # Now parse all perf report files and include them in keyvals.
+ self.GatherPerfResults()
+
+ def GetChromeVersionFromCache(self, cache_dir):
+ # Read chrome_version from keys file, if present.
+ chrome_version = ''
+ keys_file = os.path.join(cache_dir, CACHE_KEYS_FILE)
+ if os.path.exists(keys_file):
+ with open(keys_file, 'r') as f:
+ lines = f.readlines()
+ for l in lines:
+ if l.startswith('Google Chrome '):
+ chrome_version = l
+ if chrome_version.endswith('\n'):
+ chrome_version = chrome_version[:-1]
+ break
+ return chrome_version
+
+ def PopulateFromCacheDir(self, cache_dir, test, suite):
+ self.test_name = test
+ self.suite = suite
+ # Read in everything from the cache directory.
+ with open(os.path.join(cache_dir, RESULTS_FILE), 'r') as f:
+ self.out = pickle.load(f)
+ self.err = pickle.load(f)
+ self.retval = pickle.load(f)
+
+ # Untar the tarball to a temporary directory
+ self.temp_dir = tempfile.mkdtemp(
+ dir=os.path.join(self.chromeos_root, 'chroot', 'tmp'))
+
+ command = ('cd %s && tar xf %s' %
+ (self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
+ ret = self.ce.RunCommand(command, print_to_console=False)
+ if ret:
+ raise RuntimeError('Could not untar cached tarball')
+ self.results_dir = self.temp_dir
+ self.results_file = self.GetDataMeasurementsFiles()
+ self.perf_data_files = self.GetPerfDataFiles()
+ self.perf_report_files = self.GetPerfReportFiles()
+ self.chrome_version = self.GetChromeVersionFromCache(cache_dir)
+ self.ProcessResults(use_cache=True)
+
+ def CleanUp(self, rm_chroot_tmp):
+ if rm_chroot_tmp and self.results_dir:
+ dirname, basename = misc.GetRoot(self.results_dir)
+ if basename.find('test_that_results_') != -1:
+ command = 'rm -rf %s' % self.results_dir
+ else:
+ command = 'rm -rf %s' % dirname
+ self.ce.RunCommand(command)
+ if self.temp_dir:
+ command = 'rm -rf %s' % self.temp_dir
+ self.ce.RunCommand(command)
+
+ def StoreToCacheDir(self, cache_dir, machine_manager, key_list):
+ # Create the dir if it doesn't exist.
+ temp_dir = tempfile.mkdtemp()
+
+ # Store to the temp directory.
+ with open(os.path.join(temp_dir, RESULTS_FILE), 'w') as f:
+ pickle.dump(self.out, f)
+ pickle.dump(self.err, f)
+ pickle.dump(self.retval, f)
+
+ if not test_flag.GetTestMode():
+ with open(os.path.join(temp_dir, CACHE_KEYS_FILE), 'w') as f:
+ f.write('%s\n' % self.label.name)
+ f.write('%s\n' % self.label.chrome_version)
+ f.write('%s\n' % self.machine.checksum_string)
+ for k in key_list:
+ f.write(k)
+ f.write('\n')
+
+ if self.results_dir:
+ tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
+ command = ('cd %s && '
+ 'tar '
+ '--exclude=var/spool '
+ '--exclude=var/log '
+ '-cjf %s .' % (self.results_dir, tarball))
+ ret = self.ce.RunCommand(command)
+ if ret:
+ raise RuntimeError("Couldn't store autotest output directory.")
+ # Store machine info.
+ # TODO(asharif): Make machine_manager a singleton, and don't pass it into
+ # this function.
+ with open(os.path.join(temp_dir, MACHINE_FILE), 'w') as f:
+ f.write(machine_manager.machine_checksum_string[self.label.name])
+
+ if os.path.exists(cache_dir):
+ command = 'rm -rf {0}'.format(cache_dir)
+ self.ce.RunCommand(command)
+
+ command = 'mkdir -p {0} && '.format(os.path.dirname(cache_dir))
+ command += 'chmod g+x {0} && '.format(temp_dir)
+ command += 'mv {0} {1}'.format(temp_dir, cache_dir)
+ ret = self.ce.RunCommand(command)
+ if ret:
+ command = 'rm -rf {0}'.format(temp_dir)
+ self.ce.RunCommand(command)
+ raise RuntimeError('Could not move dir %s to dir %s' %
+ (temp_dir, cache_dir))
+
+ @classmethod
+ def CreateFromRun(cls,
+ logger,
+ log_level,
+ label,
+ machine,
+ out,
+ err,
+ retval,
+ test,
+ suite='telemetry_Crosperf'):
+ if suite == 'telemetry':
+ result = TelemetryResult(logger, label, log_level, machine)
+ else:
+ result = cls(logger, label, log_level, machine)
+ result.PopulateFromRun(out, err, retval, test, suite)
+ return result
+
+ @classmethod
+ def CreateFromCacheHit(cls,
+ logger,
+ log_level,
+ label,
+ machine,
+ cache_dir,
+ test,
+ suite='telemetry_Crosperf'):
+ if suite == 'telemetry':
+ result = TelemetryResult(logger, label, log_level, machine)
+ else:
+ result = cls(logger, label, log_level, machine)
+ try:
+ result.PopulateFromCacheDir(cache_dir, test, suite)
+
+ except RuntimeError as e:
+ logger.LogError('Exception while using cache: %s' % e)
+ return None
+ return result
+
+
+class TelemetryResult(Result):
+ """Class to hold the results of a single Telemetry run."""
+
+ def __init__(self, logger, label, log_level, machine, cmd_exec=None):
+ super(TelemetryResult, self).__init__(logger, label, log_level, machine,
+ cmd_exec)
+
+ def PopulateFromRun(self, out, err, retval, test, suite):
+ self.out = out
+ self.err = err
+ self.retval = retval
+
+ self.ProcessResults()
+
+ # pylint: disable=arguments-differ
+ def ProcessResults(self):
+ # The output is:
+ # url,average_commit_time (ms),...
+ # www.google.com,33.4,21.2,...
+ # We need to convert to this format:
+ # {"www.google.com:average_commit_time (ms)": "33.4",
+ # "www.google.com:...": "21.2"}
+ # Added note: Occasionally the output comes back
+ # with "JSON.stringify(window.automation.GetResults())" on
+ # the first line, and then the rest of the output as
+ # described above.
+
+ lines = self.out.splitlines()
+ self.keyvals = {}
+
+ if lines:
+ if lines[0].startswith('JSON.stringify'):
+ lines = lines[1:]
+
+ if not lines:
+ return
+ labels = lines[0].split(',')
+ for line in lines[1:]:
+ fields = line.split(',')
+ if len(fields) != len(labels):
+ continue
+ for i in xrange(1, len(labels)):
+ key = '%s %s' % (fields[0], labels[i])
+ value = fields[i]
+ self.keyvals[key] = value
+ self.keyvals['retval'] = self.retval
+
+ def PopulateFromCacheDir(self, cache_dir, test, suite):
+ self.test_name = test
+ self.suite = suite
+ with open(os.path.join(cache_dir, RESULTS_FILE), 'r') as f:
+ self.out = pickle.load(f)
+ self.err = pickle.load(f)
+ self.retval = pickle.load(f)
+
+ self.chrome_version = \
+ super(TelemetryResult, self).GetChromeVersionFromCache(cache_dir)
+ self.ProcessResults()
+
+
+class CacheConditions(object):
+ """Various Cache condition values, for export."""
+
+ # Cache hit only if the result file exists.
+ CACHE_FILE_EXISTS = 0
+
+ # Cache hit if the checksum of cpuinfo and totalmem of
+ # the cached result and the new run match.
+ MACHINES_MATCH = 1
+
+ # Cache hit if the image checksum of the cached result and the new run match.
+ CHECKSUMS_MATCH = 2
+
+ # Cache hit only if the cached result was successful
+ RUN_SUCCEEDED = 3
+
+ # Never a cache hit.
+ FALSE = 4
+
+ # Cache hit if the image path matches the cached image path.
+ IMAGE_PATH_MATCH = 5
+
+ # Cache hit if the uuid of hard disk mataches the cached one
+
+ SAME_MACHINE_MATCH = 6
+
+
+class ResultsCache(object):
+ """Class to handle the cache for storing/retrieving test run results.
+
+ This class manages the key of the cached runs without worrying about what
+ is exactly stored (value). The value generation is handled by the Results
+ class.
+ """
+ CACHE_VERSION = 6
+
+ def __init__(self):
+ # Proper initialization happens in the Init function below.
+ self.chromeos_image = None
+ self.chromeos_root = None
+ self.test_name = None
+ self.iteration = None
+ self.test_args = None
+ self.profiler_args = None
+ self.board = None
+ self.cache_conditions = None
+ self.machine_manager = None
+ self.machine = None
+ self._logger = None
+ self.ce = None
+ self.label = None
+ self.share_cache = None
+ self.suite = None
+ self.log_level = None
+ self.show_all = None
+ self.run_local = None
+
+ def Init(self, chromeos_image, chromeos_root, test_name, iteration, test_args,
+ profiler_args, machine_manager, machine, board, cache_conditions,
+ logger_to_use, log_level, label, share_cache, suite,
+ show_all_results, run_local):
+ self.chromeos_image = chromeos_image
+ self.chromeos_root = chromeos_root
+ self.test_name = test_name
+ self.iteration = iteration
+ self.test_args = test_args
+ self.profiler_args = profiler_args
+ self.board = board
+ self.cache_conditions = cache_conditions
+ self.machine_manager = machine_manager
+ self.machine = machine
+ self._logger = logger_to_use
+ self.ce = command_executer.GetCommandExecuter(
+ self._logger, log_level=log_level)
+ self.label = label
+ self.share_cache = share_cache
+ self.suite = suite
+ self.log_level = log_level
+ self.show_all = show_all_results
+ self.run_local = run_local
+
+ def GetCacheDirForRead(self):
+ matching_dirs = []
+ for glob_path in self.FormCacheDir(self.GetCacheKeyList(True)):
+ matching_dirs += glob.glob(glob_path)
+
+ if matching_dirs:
+ # Cache file found.
+ return matching_dirs[0]
+ return None
+
+ def GetCacheDirForWrite(self, get_keylist=False):
+ cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0]
+ if get_keylist:
+ args_str = '%s_%s_%s' % (self.test_args, self.profiler_args,
+ self.run_local)
+ version, image = results_report.ParseChromeosImage(
+ self.label.chromeos_image)
+ keylist = [
+ version, image, self.label.board, self.machine.name, self.test_name,
+ str(self.iteration), args_str
+ ]
+ return cache_path, keylist
+ return cache_path
+
+ def FormCacheDir(self, list_of_strings):
+ cache_key = ' '.join(list_of_strings)
+ cache_dir = misc.GetFilenameFromString(cache_key)
+ if self.label.cache_dir:
+ cache_home = os.path.abspath(os.path.expanduser(self.label.cache_dir))
+ cache_path = [os.path.join(cache_home, cache_dir)]
+ else:
+ cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
+
+ if len(self.share_cache):
+ for path in [x.strip() for x in self.share_cache.split(',')]:
+ if os.path.exists(path):
+ cache_path.append(os.path.join(path, cache_dir))
+ else:
+ self._logger.LogFatal('Unable to find shared cache: %s' % path)
+
+ return cache_path
+
+ def GetCacheKeyList(self, read):
+ if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
+ machine_checksum = '*'
+ else:
+ machine_checksum = self.machine_manager.machine_checksum[self.label.name]
+ if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions:
+ checksum = '*'
+ elif self.label.image_type == 'trybot':
+ checksum = hashlib.md5(self.label.chromeos_image).hexdigest()
+ elif self.label.image_type == 'official':
+ checksum = '*'
+ else:
+ checksum = ImageChecksummer().Checksum(self.label, self.log_level)
+
+ if read and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions:
+ image_path_checksum = '*'
+ else:
+ image_path_checksum = hashlib.md5(self.chromeos_image).hexdigest()
+
+ machine_id_checksum = ''
+ if read and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions:
+ machine_id_checksum = '*'
+ else:
+ if self.machine and self.machine.name in self.label.remote:
+ machine_id_checksum = self.machine.machine_id_checksum
+ else:
+ for machine in self.machine_manager.GetMachines(self.label):
+ if machine.name == self.label.remote[0]:
+ machine_id_checksum = machine.machine_id_checksum
+ break
+
+ temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
+ self.run_local)
+ test_args_checksum = hashlib.md5(temp_test_args).hexdigest()
+ return (image_path_checksum, self.test_name, str(self.iteration),
+ test_args_checksum, checksum, machine_checksum, machine_id_checksum,
+ str(self.CACHE_VERSION))
+
+ def ReadResult(self):
+ if CacheConditions.FALSE in self.cache_conditions:
+ cache_dir = self.GetCacheDirForWrite()
+ command = 'rm -rf %s' % (cache_dir,)
+ self.ce.RunCommand(command)
+ return None
+ cache_dir = self.GetCacheDirForRead()
+
+ if not cache_dir:
+ return None
+
+ if not os.path.isdir(cache_dir):
+ return None
+
+ if self.log_level == 'verbose':
+ self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir)
+ result = Result.CreateFromCacheHit(self._logger, self.log_level, self.label,
+ self.machine, cache_dir, self.test_name,
+ self.suite)
+ if not result:
+ return None
+
+ if (result.retval == 0 or
+ CacheConditions.RUN_SUCCEEDED not in self.cache_conditions):
+ return result
+
+ return None
+
+ def StoreResult(self, result):
+ cache_dir, keylist = self.GetCacheDirForWrite(get_keylist=True)
+ result.StoreToCacheDir(cache_dir, self.machine_manager, keylist)
+
+
+class MockResultsCache(ResultsCache):
+ """Class for mock testing, corresponding to ResultsCache class."""
+
+ def Init(self, *args):
+ pass
+
+ def ReadResult(self):
+ return None
+
+ def StoreResult(self, result):
+ pass
+
+
+class MockResult(Result):
+ """Class for mock testing, corresponding to Result class."""
+
+ def PopulateFromRun(self, out, err, retval, test, suite):
+ self.out = out
+ self.err = err
+ self.retval = retval
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
new file mode 100755
index 00000000..9e97c9b1
--- /dev/null
+++ b/crosperf/results_cache_unittest.py
@@ -0,0 +1,1178 @@
+#!/usr/bin/env python2
+
+# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Module of result cache unittest."""
+
+from __future__ import print_function
+
+import mock
+import os
+import tempfile
+import unittest
+
+import image_checksummer
+import machine_manager
+import test_flag
+
+from label import MockLabel
+from results_cache import CacheConditions
+from results_cache import Result
+from results_cache import ResultsCache
+from results_cache import TelemetryResult
+from cros_utils import command_executer
+from cros_utils import logger
+from cros_utils import misc
+
+OUTPUT = """CMD (True): ./test_that.sh\
+ --remote=172.17.128.241 --board=lumpy LibCBench
+CMD (None): cd /usr/local/google/home/yunlian/gd/src/build/images/lumpy/latest/../../../../..; cros_sdk -- ./in_chroot_cmd6X7Cxu.sh
+Identity added: /tmp/test_that.PO1234567/autotest_key (/tmp/test_that.PO1234567/autotest_key)
+INFO : Using emerged autotests already installed at /build/lumpy/usr/local/autotest.
+
+INFO : Running the following control files 1 times:
+INFO : * 'client/site_tests/platform_LibCBench/control'
+
+INFO : Running client test client/site_tests/platform_LibCBench/control
+./server/autoserv -m 172.17.128.241 --ssh-port 22 -c client/site_tests/platform_LibCBench/control -r /tmp/test_that.PO1234567/platform_LibCBench --test-retry=0 --args
+ERROR:root:import statsd failed, no stats will be reported.
+14:20:22 INFO | Results placed in /tmp/test_that.PO1234567/platform_LibCBench
+14:20:22 INFO | Processing control file
+14:20:23 INFO | Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_VIIP67ssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'
+14:20:23 ERROR| [stderr] Warning: Permanently added '172.17.128.241' (RSA) to the list of known hosts.
+14:20:23 INFO | INFO ---- ---- kernel=3.8.11 localtime=May 22 14:20:23 timestamp=1369257623
+14:20:23 INFO | Installing autotest on 172.17.128.241
+14:20:23 INFO | Using installation dir /usr/local/autotest
+14:20:23 WARNI| No job_repo_url for <remote host: 172.17.128.241>
+14:20:23 INFO | Could not install autotest using the packaging system: No repos to install an autotest client from. Trying other methods
+14:20:23 INFO | Installation of autotest completed
+14:20:24 WARNI| No job_repo_url for <remote host: 172.17.128.241>
+14:20:24 INFO | Executing /usr/local/autotest/bin/autotest /usr/local/autotest/control phase 0
+14:20:24 INFO | Entered autotestd_monitor.
+14:20:24 INFO | Finished launching tail subprocesses.
+14:20:24 INFO | Finished waiting on autotestd to start.
+14:20:26 INFO | START ---- ---- timestamp=1369257625 localtime=May 22 14:20:25
+14:20:26 INFO | START platform_LibCBench platform_LibCBench timestamp=1369257625 localtime=May 22 14:20:25
+14:20:30 INFO | GOOD platform_LibCBench platform_LibCBench timestamp=1369257630 localtime=May 22 14:20:30 completed successfully
+14:20:30 INFO | END GOOD platform_LibCBench platform_LibCBench timestamp=1369257630 localtime=May 22 14:20:30
+14:20:31 INFO | END GOOD ---- ---- timestamp=1369257630 localtime=May 22 14:20:30
+14:20:31 INFO | Got lock of exit_code_file.
+14:20:31 INFO | Released lock of exit_code_file and closed it.
+OUTPUT: ==============================
+OUTPUT: Current time: 2013-05-22 14:20:32.818831 Elapsed: 0:01:30 ETA: Unknown
+Done: 0% [ ]
+OUTPUT: Thread Status:
+RUNNING: 1 ('ttt: LibCBench (1)' 0:01:21)
+Machine Status:
+Machine Thread Lock Status Checksum
+172.17.128.241 ttt: LibCBench (1) True RUNNING 3ba9f2ecbb222f20887daea5583d86ba
+
+OUTPUT: ==============================
+14:20:33 INFO | Killing child processes.
+14:20:33 INFO | Client complete
+14:20:33 INFO | Finished processing control file
+14:20:33 INFO | Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_aVJUgmssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'
+14:20:33 ERROR| [stderr] Warning: Permanently added '172.17.128.241' (RSA) to the list of known hosts.
+
+INFO : Test results:
+-------------------------------------------------------------------
+platform_LibCBench [ PASSED ]
+platform_LibCBench/platform_LibCBench [ PASSED ]
+platform_LibCBench/platform_LibCBench b_malloc_big1__0_ 0.00375231466667
+platform_LibCBench/platform_LibCBench b_malloc_big2__0_ 0.002951359
+platform_LibCBench/platform_LibCBench b_malloc_bubble__0_ 0.015066374
+platform_LibCBench/platform_LibCBench b_malloc_sparse__0_ 0.015053784
+platform_LibCBench/platform_LibCBench b_malloc_thread_local__0_ 0.01138439
+platform_LibCBench/platform_LibCBench b_malloc_thread_stress__0_ 0.0367894733333
+platform_LibCBench/platform_LibCBench b_malloc_tiny1__0_ 0.000768474333333
+platform_LibCBench/platform_LibCBench b_malloc_tiny2__0_ 0.000581407333333
+platform_LibCBench/platform_LibCBench b_pthread_create_serial1__0_ 0.0291785246667
+platform_LibCBench/platform_LibCBench b_pthread_createjoin_serial1__0_ 0.031907936
+platform_LibCBench/platform_LibCBench b_pthread_createjoin_serial2__0_ 0.043485347
+platform_LibCBench/platform_LibCBench b_pthread_uselesslock__0_ 0.0294113346667
+platform_LibCBench/platform_LibCBench b_regex_compile____a_b_c__d_b__ 0.00529833933333
+platform_LibCBench/platform_LibCBench b_regex_search____a_b_c__d_b__ 0.00165455066667
+platform_LibCBench/platform_LibCBench b_regex_search___a_25_b__ 0.0496191923333
+platform_LibCBench/platform_LibCBench b_stdio_putcgetc__0_ 0.100005711667
+platform_LibCBench/platform_LibCBench b_stdio_putcgetc_unlocked__0_ 0.0371443833333
+platform_LibCBench/platform_LibCBench b_string_memset__0_ 0.00275405066667
+platform_LibCBench/platform_LibCBench b_string_strchr__0_ 0.00456903
+platform_LibCBench/platform_LibCBench b_string_strlen__0_ 0.044893587
+platform_LibCBench/platform_LibCBench b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__ 0.118360778
+platform_LibCBench/platform_LibCBench b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__ 0.068957325
+platform_LibCBench/platform_LibCBench b_string_strstr___aaaaaaaaaaaaaacccccccccccc__ 0.0135694476667
+platform_LibCBench/platform_LibCBench b_string_strstr___abcdefghijklmnopqrstuvwxyz__ 0.0134553343333
+platform_LibCBench/platform_LibCBench b_string_strstr___azbycxdwevfugthsirjqkplomn__ 0.0133123556667
+platform_LibCBench/platform_LibCBench b_utf8_bigbuf__0_ 0.0473772253333
+platform_LibCBench/platform_LibCBench b_utf8_onebyone__0_ 0.130938538333
+-------------------------------------------------------------------
+Total PASS: 2/2 (100%)
+
+INFO : Elapsed time: 0m16s
+"""
+
+error = """
+ERROR: Identity added: /tmp/test_that.Z4Ld/autotest_key (/tmp/test_that.Z4Ld/autotest_key)
+INFO : Using emerged autotests already installed at /build/lumpy/usr/local/autotest.
+INFO : Running the following control files 1 times:
+INFO : * 'client/site_tests/platform_LibCBench/control'
+INFO : Running client test client/site_tests/platform_LibCBench/control
+INFO : Test results:
+INFO : Elapsed time: 0m18s
+"""
+
+keyvals = {
+ '': 'PASS',
+ 'b_stdio_putcgetc__0_': '0.100005711667',
+ 'b_string_strstr___azbycxdwevfugthsirjqkplomn__': '0.0133123556667',
+ 'b_malloc_thread_local__0_': '0.01138439',
+ 'b_string_strlen__0_': '0.044893587',
+ 'b_malloc_sparse__0_': '0.015053784',
+ 'b_string_memset__0_': '0.00275405066667',
+ 'platform_LibCBench': 'PASS',
+ 'b_pthread_uselesslock__0_': '0.0294113346667',
+ 'b_string_strchr__0_': '0.00456903',
+ 'b_pthread_create_serial1__0_': '0.0291785246667',
+ 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__': '0.118360778',
+ 'b_string_strstr___aaaaaaaaaaaaaacccccccccccc__': '0.0135694476667',
+ 'b_pthread_createjoin_serial1__0_': '0.031907936',
+ 'b_malloc_thread_stress__0_': '0.0367894733333',
+ 'b_regex_search____a_b_c__d_b__': '0.00165455066667',
+ 'b_malloc_bubble__0_': '0.015066374',
+ 'b_malloc_big2__0_': '0.002951359',
+ 'b_stdio_putcgetc_unlocked__0_': '0.0371443833333',
+ 'b_pthread_createjoin_serial2__0_': '0.043485347',
+ 'b_regex_search___a_25_b__': '0.0496191923333',
+ 'b_utf8_bigbuf__0_': '0.0473772253333',
+ 'b_malloc_big1__0_': '0.00375231466667',
+ 'b_regex_compile____a_b_c__d_b__': '0.00529833933333',
+ 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__': '0.068957325',
+ 'b_malloc_tiny2__0_': '0.000581407333333',
+ 'b_utf8_onebyone__0_': '0.130938538333',
+ 'b_malloc_tiny1__0_': '0.000768474333333',
+ 'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333'
+}
+
+TMP_DIR1 = '/tmp/tmpAbcXyz'
+
+
+class MockResult(Result):
+ """Mock result class."""
+
+ def __init__(self, mylogger, label, logging_level, machine):
+ super(MockResult, self).__init__(mylogger, label, logging_level, machine)
+
+ def FindFilesInResultsDir(self, find_args):
+ return ''
+
+ # pylint: disable=arguments-differ
+ def GetKeyvals(self, temp=False):
+ if temp:
+ pass
+ return keyvals
+
+
+class ResultTest(unittest.TestCase):
+ """Result test class."""
+
+ def __init__(self, *args, **kwargs):
+ super(ResultTest, self).__init__(*args, **kwargs)
+ self.callFakeProcessResults = False
+ self.fakeCacheReturnResult = None
+ self.callGetResultsDir = False
+ self.callProcessResults = False
+ self.callGetPerfReportFiles = False
+ self.kv_dict = None
+ self.tmpdir = ''
+ self.callGetNewKeyvals = False
+ self.callGetResultsFile = False
+ self.callGetPerfDataFiles = False
+ self.args = None
+ self.callGatherPerfResults = False
+ self.mock_logger = mock.Mock(spec=logger.Logger)
+ self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ self.mock_label = MockLabel('mock_label', 'chromeos_image', 'autotest_dir',
+ '/tmp', 'lumpy', 'remote', 'image_args',
+ 'cache_dir', 'average', 'gcc', None)
+
+ def testCreateFromRun(self):
+ result = MockResult.CreateFromRun(logger.GetLogger(), 'average',
+ self.mock_label, 'remote1', OUTPUT, error,
+ 0, True, 0)
+ self.assertEqual(result.keyvals, keyvals)
+ self.assertEqual(result.chroot_results_dir,
+ '/tmp/test_that.PO1234567/platform_LibCBench')
+ self.assertEqual(result.results_dir,
+ '/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench')
+ self.assertEqual(result.retval, 0)
+
+ def setUp(self):
+ self.result = Result(self.mock_logger, self.mock_label, 'average',
+ self.mock_cmd_exec)
+
+ @mock.patch.object(os.path, 'isdir')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(command_executer.CommandExecuter, 'CopyFiles')
+ def test_copy_files_to(self, mock_copyfiles, mock_runcmd, mock_isdir):
+
+ files = ['src_file_1', 'src_file_2', 'src_file_3']
+ dest_dir = '/tmp/test'
+ self.mock_cmd_exec.RunCommand = mock_runcmd
+ self.mock_cmd_exec.CopyFiles = mock_copyfiles
+
+ mock_copyfiles.return_value = 0
+
+ #test 1. dest_dir exists; CopyFiles returns 0.
+ mock_isdir.return_value = True
+ self.result.CopyFilesTo(dest_dir, files)
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertEqual(mock_copyfiles.call_count, 3)
+ first_args = mock_copyfiles.call_args_list[0][0]
+ second_args = mock_copyfiles.call_args_list[1][0]
+ third_args = mock_copyfiles.call_args_list[2][0]
+ self.assertEqual(first_args, ('src_file_1', '/tmp/test/src_file_1.0'))
+ self.assertEqual(second_args, ('src_file_2', '/tmp/test/src_file_2.0'))
+ self.assertEqual(third_args, ('src_file_3', '/tmp/test/src_file_3.0'))
+
+ mock_runcmd.reset_mock()
+ mock_copyfiles.reset_mock()
+ #test 2. dest_dir does not exist; CopyFiles returns 0.
+ mock_isdir.return_value = False
+ self.result.CopyFilesTo(dest_dir, files)
+ self.assertEqual(mock_runcmd.call_count, 3)
+ self.assertEqual(mock_copyfiles.call_count, 3)
+ self.assertEqual(mock_runcmd.call_args_list[0],
+ mock_runcmd.call_args_list[1])
+ self.assertEqual(mock_runcmd.call_args_list[0],
+ mock_runcmd.call_args_list[2])
+ self.assertEqual(mock_runcmd.call_args_list[0][0], ('mkdir -p /tmp/test',))
+
+ #test 3. CopyFiles returns 1 (fails).
+ mock_copyfiles.return_value = 1
+ self.assertRaises(Exception, self.result.CopyFilesTo, dest_dir, files)
+
+ @mock.patch.object(Result, 'CopyFilesTo')
+ def test_copy_results_to(self, mockCopyFilesTo):
+ perf_data_files = [
+ '/tmp/perf.data.0', '/tmp/perf.data.1', '/tmp/perf.data.2'
+ ]
+ perf_report_files = [
+ '/tmp/perf.report.0', '/tmp/perf.report.1', '/tmp/perf.report.2'
+ ]
+
+ self.result.perf_data_files = perf_data_files
+ self.result.perf_report_files = perf_report_files
+
+ self.result.CopyFilesTo = mockCopyFilesTo
+ self.result.CopyResultsTo('/tmp/results/')
+ self.assertEqual(mockCopyFilesTo.call_count, 2)
+ self.assertEqual(len(mockCopyFilesTo.call_args_list), 2)
+ self.assertEqual(mockCopyFilesTo.call_args_list[0][0],
+ ('/tmp/results/', perf_data_files))
+ self.assertEqual(mockCopyFilesTo.call_args_list[1][0],
+ ('/tmp/results/', perf_report_files))
+
+ def test_get_new_keyvals(self):
+ kv_dict = {}
+
+ def FakeGetDataMeasurementsFiles():
+ filename = os.path.join(os.getcwd(), 'unittest_keyval_file.txt')
+ return [filename]
+
+ self.result.GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles
+ kv_dict2, udict = self.result.GetNewKeyvals(kv_dict)
+ self.assertEqual(kv_dict2, {
+ u'Box2D__Box2D': 4775,
+ u'Mandreel__Mandreel': 6620,
+ u'Gameboy__Gameboy': 9901,
+ u'Crypto__Crypto': 8737,
+ u'telemetry_page_measurement_results__num_errored': 0,
+ u'telemetry_page_measurement_results__num_failed': 0,
+ u'PdfJS__PdfJS': 6455,
+ u'Total__Score': 7918,
+ u'EarleyBoyer__EarleyBoyer': 14340,
+ u'MandreelLatency__MandreelLatency': 5188,
+ u'CodeLoad__CodeLoad': 6271,
+ u'DeltaBlue__DeltaBlue': 14401,
+ u'Typescript__Typescript': 9815,
+ u'SplayLatency__SplayLatency': 7653,
+ u'zlib__zlib': 16094,
+ u'Richards__Richards': 10358,
+ u'RegExp__RegExp': 1765,
+ u'NavierStokes__NavierStokes': 9815,
+ u'Splay__Splay': 4425,
+ u'RayTrace__RayTrace': 16600
+ })
+ self.assertEqual(udict, {
+ u'Box2D__Box2D': u'score',
+ u'Mandreel__Mandreel': u'score',
+ u'Gameboy__Gameboy': u'score',
+ u'Crypto__Crypto': u'score',
+ u'telemetry_page_measurement_results__num_errored': u'count',
+ u'telemetry_page_measurement_results__num_failed': u'count',
+ u'PdfJS__PdfJS': u'score',
+ u'Total__Score': u'score',
+ u'EarleyBoyer__EarleyBoyer': u'score',
+ u'MandreelLatency__MandreelLatency': u'score',
+ u'CodeLoad__CodeLoad': u'score',
+ u'DeltaBlue__DeltaBlue': u'score',
+ u'Typescript__Typescript': u'score',
+ u'SplayLatency__SplayLatency': u'score',
+ u'zlib__zlib': u'score',
+ u'Richards__Richards': u'score',
+ u'RegExp__RegExp': u'score',
+ u'NavierStokes__NavierStokes': u'score',
+ u'Splay__Splay': u'score',
+ u'RayTrace__RayTrace': u'score'
+ })
+
+ def test_append_telemetry_units(self):
+ kv_dict = {
+ u'Box2D__Box2D': 4775,
+ u'Mandreel__Mandreel': 6620,
+ u'Gameboy__Gameboy': 9901,
+ u'Crypto__Crypto': 8737,
+ u'PdfJS__PdfJS': 6455,
+ u'Total__Score': 7918,
+ u'EarleyBoyer__EarleyBoyer': 14340,
+ u'MandreelLatency__MandreelLatency': 5188,
+ u'CodeLoad__CodeLoad': 6271,
+ u'DeltaBlue__DeltaBlue': 14401,
+ u'Typescript__Typescript': 9815,
+ u'SplayLatency__SplayLatency': 7653,
+ u'zlib__zlib': 16094,
+ u'Richards__Richards': 10358,
+ u'RegExp__RegExp': 1765,
+ u'NavierStokes__NavierStokes': 9815,
+ u'Splay__Splay': 4425,
+ u'RayTrace__RayTrace': 16600
+ }
+ units_dict = {
+ u'Box2D__Box2D': u'score',
+ u'Mandreel__Mandreel': u'score',
+ u'Gameboy__Gameboy': u'score',
+ u'Crypto__Crypto': u'score',
+ u'PdfJS__PdfJS': u'score',
+ u'Total__Score': u'score',
+ u'EarleyBoyer__EarleyBoyer': u'score',
+ u'MandreelLatency__MandreelLatency': u'score',
+ u'CodeLoad__CodeLoad': u'score',
+ u'DeltaBlue__DeltaBlue': u'score',
+ u'Typescript__Typescript': u'score',
+ u'SplayLatency__SplayLatency': u'score',
+ u'zlib__zlib': u'score',
+ u'Richards__Richards': u'score',
+ u'RegExp__RegExp': u'score',
+ u'NavierStokes__NavierStokes': u'score',
+ u'Splay__Splay': u'score',
+ u'RayTrace__RayTrace': u'score'
+ }
+
+ results_dict = self.result.AppendTelemetryUnits(kv_dict, units_dict)
+ self.assertEqual(results_dict, {
+ u'Box2D__Box2D': [4775, u'score'],
+ u'Splay__Splay': [4425, u'score'],
+ u'Gameboy__Gameboy': [9901, u'score'],
+ u'Crypto__Crypto': [8737, u'score'],
+ u'PdfJS__PdfJS': [6455, u'score'],
+ u'Total__Score': [7918, u'score'],
+ u'EarleyBoyer__EarleyBoyer': [14340, u'score'],
+ u'MandreelLatency__MandreelLatency': [5188, u'score'],
+ u'DeltaBlue__DeltaBlue': [14401, u'score'],
+ u'SplayLatency__SplayLatency': [7653, u'score'],
+ u'Mandreel__Mandreel': [6620, u'score'],
+ u'Richards__Richards': [10358, u'score'],
+ u'zlib__zlib': [16094, u'score'],
+ u'CodeLoad__CodeLoad': [6271, u'score'],
+ u'Typescript__Typescript': [9815, u'score'],
+ u'RegExp__RegExp': [1765, u'score'],
+ u'RayTrace__RayTrace': [16600, u'score'],
+ u'NavierStokes__NavierStokes': [9815, u'score']
+ })
+
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(tempfile, 'mkdtemp')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(command_executer.CommandExecuter,
+ 'ChrootRunCommandWOutput')
+ def test_get_keyvals(self, mock_chrootruncmd, mock_runcmd, mock_mkdtemp,
+ mock_getpath):
+
+ self.kv_dict = {}
+ self.callGetNewKeyvals = False
+
+ def reset():
+ self.kv_dict = {}
+ self.callGetNewKeyvals = False
+ mock_chrootruncmd.reset_mock()
+ mock_runcmd.reset_mock()
+ mock_mkdtemp.reset_mock()
+ mock_getpath.reset_mock()
+
+ def FakeGetNewKeyvals(kv_dict):
+ self.kv_dict = kv_dict
+ self.callGetNewKeyvals = True
+ return_kvdict = {'first_time': 680, 'Total': 10}
+ return_udict = {'first_time': 'ms', 'Total': 'score'}
+ return return_kvdict, return_udict
+
+ mock_mkdtemp.return_value = TMP_DIR1
+ mock_chrootruncmd.return_value = [
+ '', ('%s,PASS\n%s/telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1), ''
+ ]
+ mock_getpath.return_value = TMP_DIR1
+ self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
+ self.result.ce.RunCommand = mock_runcmd
+ self.result.GetNewKeyvals = FakeGetNewKeyvals
+ self.result.suite = 'telemetry_Crosperf'
+ self.result.results_dir = '/tmp/test_that_resultsNmq'
+
+ # Test 1. no self.temp_dir.
+ res = self.result.GetKeyvals()
+ self.assertTrue(self.callGetNewKeyvals)
+ self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(mock_runcmd.call_args_list[0][0],
+ ('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1,))
+ self.assertEqual(mock_chrootruncmd.call_count, 1)
+ self.assertEqual(mock_chrootruncmd.call_args_list[0][0], (
+ '/tmp', ('python generate_test_report --no-color --csv %s') % TMP_DIR1))
+ self.assertEqual(mock_getpath.call_count, 1)
+ self.assertEqual(mock_mkdtemp.call_count, 1)
+ self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
+
+ # Test 2. self.temp_dir
+ reset()
+ mock_chrootruncmd.return_value = [
+ '', ('/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/'
+ 'telemetry_Crosperf,PASS\n'), ''
+ ]
+ mock_getpath.return_value = '/tmp/tmpJCajRG'
+ self.result.temp_dir = '/tmp/tmpJCajRG'
+ res = self.result.GetKeyvals()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertEqual(mock_mkdtemp.call_count, 0)
+ self.assertEqual(mock_chrootruncmd.call_count, 1)
+ self.assertTrue(self.callGetNewKeyvals)
+ self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
+ self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
+
+ # Test 3. suite != telemetry_Crosperf. Normally this would be for
+ # running non-Telemetry autotests, such as BootPerfServer. In this test
+ # case, the keyvals we have set up were returned from a Telemetry test run;
+ # so this pass is basically testing that we don't append the units to the
+ # test results (which we do for Telemetry autotest runs).
+ reset()
+ self.result.suite = ''
+ res = self.result.GetKeyvals()
+ self.assertEqual(res, {'Total': 10, 'first_time': 680})
+
+ def test_get_results_dir(self):
+
+ self.result.out = ''
+ self.assertRaises(Exception, self.result.GetResultsDir)
+
+ self.result.out = OUTPUT
+ resdir = self.result.GetResultsDir()
+ self.assertEqual(resdir, '/tmp/test_that.PO1234567/platform_LibCBench')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandGeneric')
+ def test_find_files_in_results_dir(self, mock_runcmd):
+
+ self.result.results_dir = None
+ res = self.result.FindFilesInResultsDir('-name perf.data')
+ self.assertIsNone(res)
+
+ self.result.ce.RunCommand = mock_runcmd
+ self.result.results_dir = '/tmp/test_results'
+ mock_runcmd.return_value = [0, '/tmp/test_results/perf.data', '']
+ res = self.result.FindFilesInResultsDir('-name perf.data')
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(mock_runcmd.call_args_list[0][0],
+ ('find /tmp/test_results -name perf.data',))
+ self.assertEqual(res, '/tmp/test_results/perf.data')
+
+ mock_runcmd.reset_mock()
+ mock_runcmd.return_value = [1, '', '']
+ self.assertRaises(Exception, self.result.FindFilesInResultsDir,
+ '-name perf.data')
+
+ @mock.patch.object(Result, 'FindFilesInResultsDir')
+ def test_get_perf_data_files(self, mock_findfiles):
+ self.args = None
+
+ mock_findfiles.return_value = 'line1\nline1\n'
+ self.result.FindFilesInResultsDir = mock_findfiles
+ res = self.result.GetPerfDataFiles()
+ self.assertEqual(res, ['line1', 'line1'])
+ self.assertEqual(mock_findfiles.call_args_list[0][0], ('-name perf.data',))
+
+ def test_get_perf_report_files(self):
+ self.args = None
+
+ def FakeFindFiles(find_args):
+ self.args = find_args
+ return 'line1\nline1\n'
+
+ self.result.FindFilesInResultsDir = FakeFindFiles
+ res = self.result.GetPerfReportFiles()
+ self.assertEqual(res, ['line1', 'line1'])
+ self.assertEqual(self.args, '-name perf.data.report')
+
+ def test_get_data_measurement_files(self):
+ self.args = None
+
+ def FakeFindFiles(find_args):
+ self.args = find_args
+ return 'line1\nline1\n'
+
+ self.result.FindFilesInResultsDir = FakeFindFiles
+ res = self.result.GetDataMeasurementsFiles()
+ self.assertEqual(res, ['line1', 'line1'])
+ self.assertEqual(self.args, '-name perf_measurements')
+
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
+ def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath):
+ fake_file = '/usr/chromeos/chroot/tmp/results/fake_file'
+ self.result.perf_data_files = ['/tmp/results/perf.data']
+ self.result.board = 'lumpy'
+ mock_getpath.return_value = fake_file
+ self.result.ce.ChrootRunCommand = mock_chrootruncmd
+ tmp = self.result.GeneratePerfReportFiles()
+ self.assertEqual(tmp, ['/tmp/chroot%s' % fake_file])
+ self.assertEqual(mock_chrootruncmd.call_args_list[0][0],
+ ('/tmp',
+ ('/usr/sbin/perf report -n --symfs /build/lumpy '
+ '--vmlinux /build/lumpy/usr/lib/debug/boot/vmlinux '
+ '--kallsyms /build/lumpy/boot/System.map-* -i '
+ '%s --stdio > %s') % (fake_file, fake_file)))
+
+ @mock.patch.object(misc, 'GetOutsideChrootPath')
+ def test_populate_from_run(self, mock_getpath):
+
+ def FakeGetResultsDir():
+ self.callGetResultsDir = True
+ return '/tmp/results_dir'
+
+ def FakeGetResultsFile():
+ self.callGetResultsFile = True
+ return []
+
+ def FakeGetPerfDataFiles():
+ self.callGetPerfDataFiles = True
+ return []
+
+ def FakeGetPerfReportFiles():
+ self.callGetPerfReportFiles = True
+ return []
+
+ def FakeProcessResults(show_results=False):
+ if show_results:
+ pass
+ self.callProcessResults = True
+
+ if mock_getpath:
+ pass
+ mock.get_path = '/tmp/chromeos/tmp/results_dir'
+ self.result.chromeos_root = '/tmp/chromeos'
+
+ self.callGetResultsDir = False
+ self.callGetResultsFile = False
+ self.callGetPerfDataFiles = False
+ self.callGetPerfReportFiles = False
+ self.callProcessResults = False
+
+ self.result.GetResultsDir = FakeGetResultsDir
+ self.result.GetResultsFile = FakeGetResultsFile
+ self.result.GetPerfDataFiles = FakeGetPerfDataFiles
+ self.result.GeneratePerfReportFiles = FakeGetPerfReportFiles
+ self.result.ProcessResults = FakeProcessResults
+
+ self.result.PopulateFromRun(OUTPUT, '', 0, 'test', 'telemetry_Crosperf')
+ self.assertTrue(self.callGetResultsDir)
+ self.assertTrue(self.callGetResultsFile)
+ self.assertTrue(self.callGetPerfDataFiles)
+ self.assertTrue(self.callGetPerfReportFiles)
+ self.assertTrue(self.callProcessResults)
+
+ def test_process_results(self):
+
+ def FakeGetKeyvals(show_all=False):
+ if show_all:
+ return {'first_time': 680, 'Total': 10}
+ else:
+ return {'Total': 10}
+
+ def FakeGatherPerfResults():
+ self.callGatherPerfResults = True
+
+ self.callGatherPerfResults = False
+
+ self.result.GetKeyvals = FakeGetKeyvals
+ self.result.GatherPerfResults = FakeGatherPerfResults
+
+ self.result.retval = 0
+ self.result.ProcessResults()
+ self.assertTrue(self.callGatherPerfResults)
+ self.assertEqual(len(self.result.keyvals), 2)
+ self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 0})
+
+ self.result.retval = 1
+ self.result.ProcessResults()
+ self.assertEqual(len(self.result.keyvals), 2)
+ self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 1})
+
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(command_executer.CommandExecuter,
+ 'ChrootRunCommandWOutput')
+ def test_populate_from_cache_dir(self, mock_runchrootcmd, mock_getpath):
+
+ # pylint: disable=redefined-builtin
+ def FakeMkdtemp(dir=None):
+ if dir:
+ pass
+ return self.tmpdir
+
+ current_path = os.getcwd()
+ cache_dir = os.path.join(current_path, 'test_cache/test_input')
+ self.result.ce = command_executer.GetCommandExecuter(log_level='average')
+ self.result.ce.ChrootRunCommandWOutput = mock_runchrootcmd
+ mock_runchrootcmd.return_value = [
+ '', ('%s,PASS\n%s/\telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1),
+ ''
+ ]
+ mock_getpath.return_value = TMP_DIR1
+ self.tmpdir = tempfile.mkdtemp()
+ save_real_mkdtemp = tempfile.mkdtemp
+ tempfile.mkdtemp = FakeMkdtemp
+
+ self.result.PopulateFromCacheDir(cache_dir, 'sunspider',
+ 'telemetry_Crosperf')
+ self.assertEqual(self.result.keyvals, {
+ u'Total__Total': [444.0, u'ms'],
+ u'regexp-dna__regexp-dna': [16.2, u'ms'],
+ u'telemetry_page_measurement_results__num_failed': [0, u'count'],
+ u'telemetry_page_measurement_results__num_errored': [0, u'count'],
+ u'string-fasta__string-fasta': [23.2, u'ms'],
+ u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
+ u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte': [3.2, u'ms'],
+ u'access-nsieve__access-nsieve': [7.9, u'ms'],
+ u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
+ u'string-validate-input__string-validate-input': [19.3, u'ms'],
+ u'3d-raytrace__3d-raytrace': [24.7, u'ms'],
+ u'3d-cube__3d-cube': [28.0, u'ms'],
+ u'string-unpack-code__string-unpack-code': [46.7, u'ms'],
+ u'date-format-tofte__date-format-tofte': [26.3, u'ms'],
+ u'math-partial-sums__math-partial-sums': [22.0, u'ms'],
+ '\telemetry_Crosperf': ['PASS', ''],
+ u'crypto-aes__crypto-aes': [15.2, u'ms'],
+ u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'],
+ u'crypto-md5__crypto-md5': [10.5, u'ms'],
+ u'string-tagcloud__string-tagcloud': [52.8, u'ms'],
+ u'access-nbody__access-nbody': [8.5, u'ms'],
+ 'retval': 0,
+ u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'],
+ u'math-cordic__math-cordic': [8.7, u'ms'],
+ u'access-binary-trees__access-binary-trees': [4.5, u'ms'],
+ u'controlflow-recursive__controlflow-recursive': [4.4, u'ms'],
+ u'access-fannkuch__access-fannkuch': [17.8, u'ms'],
+ u'string-base64__string-base64': [16.0, u'ms'],
+ u'date-format-xparb__date-format-xparb': [20.9, u'ms'],
+ u'3d-morph__3d-morph': [22.1, u'ms'],
+ u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms']
+ })
+
+ # Clean up after test.
+ tempfile.mkdtemp = save_real_mkdtemp
+ command = 'rm -Rf %s' % self.tmpdir
+ self.result.ce.RunCommand(command)
+
+ @mock.patch.object(misc, 'GetRoot')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
+ def test_cleanup(self, mock_runcmd, mock_getroot):
+
+ # Test 1. 'rm_chroot_tmp' is True; self.results_dir exists;
+ # self.temp_dir exists; results_dir name contains 'test_that_results_'.
+ mock_getroot.return_value = ['/tmp/tmp_AbcXyz', 'test_that_results_fake']
+ self.result.ce.RunCommand = mock_runcmd
+ self.result.results_dir = 'test_results_dir'
+ self.result.temp_dir = 'testtemp_dir'
+ self.result.CleanUp(True)
+ self.assertEqual(mock_getroot.call_count, 1)
+ self.assertEqual(mock_runcmd.call_count, 2)
+ self.assertEqual(mock_runcmd.call_args_list[0][0],
+ ('rm -rf test_results_dir',))
+ self.assertEqual(mock_runcmd.call_args_list[1][0], ('rm -rf testtemp_dir',))
+
+ # Test 2. Same, except ath results_dir name does not contain
+ # 'test_that_results_'
+ mock_getroot.reset_mock()
+ mock_runcmd.reset_mock()
+ mock_getroot.return_value = ['/tmp/tmp_AbcXyz', 'other_results_fake']
+ self.result.ce.RunCommand = mock_runcmd
+ self.result.results_dir = 'test_results_dir'
+ self.result.temp_dir = 'testtemp_dir'
+ self.result.CleanUp(True)
+ self.assertEqual(mock_getroot.call_count, 1)
+ self.assertEqual(mock_runcmd.call_count, 2)
+ self.assertEqual(mock_runcmd.call_args_list[0][0],
+ ('rm -rf /tmp/tmp_AbcXyz',))
+ self.assertEqual(mock_runcmd.call_args_list[1][0], ('rm -rf testtemp_dir',))
+
+ # Test 3. mock_getroot returns nothing; 'rm_chroot_tmp' is False.
+ mock_getroot.reset_mock()
+ mock_runcmd.reset_mock()
+ self.result.CleanUp(False)
+ self.assertEqual(mock_getroot.call_count, 0)
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(mock_runcmd.call_args_list[0][0], ('rm -rf testtemp_dir',))
+
+ # Test 4. 'rm_chroot_tmp' is True, but result_dir & temp_dir are None.
+ mock_getroot.reset_mock()
+ mock_runcmd.reset_mock()
+ self.result.results_dir = None
+ self.result.temp_dir = None
+ self.result.CleanUp(True)
+ self.assertEqual(mock_getroot.call_count, 0)
+ self.assertEqual(mock_runcmd.call_count, 0)
+
+ @mock.patch.object(misc, 'GetInsideChrootPath')
+ @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
+ def test_store_to_cache_dir(self, mock_chrootruncmd, mock_getpath):
+
+ def FakeMkdtemp(directory=''):
+ if directory:
+ pass
+ return self.tmpdir
+
+ if mock_chrootruncmd or mock_getpath:
+ pass
+ current_path = os.getcwd()
+ cache_dir = os.path.join(current_path, 'test_cache/test_output')
+
+ self.result.ce = command_executer.GetCommandExecuter(log_level='average')
+ self.result.out = OUTPUT
+ self.result.err = error
+ self.result.retval = 0
+ self.tmpdir = tempfile.mkdtemp()
+ if not os.path.exists(self.tmpdir):
+ os.makedirs(self.tmpdir)
+ self.result.results_dir = os.path.join(os.getcwd(), 'test_cache')
+ save_real_mkdtemp = tempfile.mkdtemp
+ tempfile.mkdtemp = FakeMkdtemp
+
+ mock_mm = machine_manager.MockMachineManager('/tmp/chromeos_root', 0,
+ 'average', '')
+ mock_mm.machine_checksum_string['mock_label'] = 'fake_machine_checksum123'
+
+ mock_keylist = ['key1', 'key2', 'key3']
+ test_flag.SetTestMode(True)
+ self.result.StoreToCacheDir(cache_dir, mock_mm, mock_keylist)
+
+ # Check that the correct things were written to the 'cache'.
+ test_dir = os.path.join(os.getcwd(), 'test_cache/test_output')
+ base_dir = os.path.join(os.getcwd(), 'test_cache/compare_output')
+ self.assertTrue(os.path.exists(os.path.join(test_dir, 'autotest.tbz2')))
+ self.assertTrue(os.path.exists(os.path.join(test_dir, 'machine.txt')))
+ self.assertTrue(os.path.exists(os.path.join(test_dir, 'results.txt')))
+
+ f1 = os.path.join(test_dir, 'machine.txt')
+ f2 = os.path.join(base_dir, 'machine.txt')
+ cmd = 'diff %s %s' % (f1, f2)
+ [_, out, _] = self.result.ce.RunCommandWOutput(cmd)
+ self.assertEqual(len(out), 0)
+
+ f1 = os.path.join(test_dir, 'results.txt')
+ f2 = os.path.join(base_dir, 'results.txt')
+ cmd = 'diff %s %s' % (f1, f2)
+ [_, out, _] = self.result.ce.RunCommandWOutput(cmd)
+ self.assertEqual(len(out), 0)
+
+ # Clean up after test.
+ tempfile.mkdtemp = save_real_mkdtemp
+ command = 'rm %s/*' % test_dir
+ self.result.ce.RunCommand(command)
+
+
+TELEMETRY_RESULT_KEYVALS = {
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'math-cordic (ms)':
+ '11.4',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-nbody (ms)':
+ '6.9',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-fannkuch (ms)':
+ '26.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'math-spectral-norm (ms)':
+ '6.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-nsieve-bits (ms)':
+ '9.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'math-partial-sums (ms)':
+ '32.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'regexp-dna (ms)':
+ '16.1',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ '3d-cube (ms)':
+ '42.7',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'crypto-md5 (ms)':
+ '10.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'crypto-sha1 (ms)':
+ '12.4',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-tagcloud (ms)':
+ '47.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-fasta (ms)':
+ '36.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-binary-trees (ms)':
+ '7.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'date-format-xparb (ms)':
+ '138.1',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'crypto-aes (ms)':
+ '19.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'Total (ms)':
+ '656.5',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-base64 (ms)':
+ '17.5',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-validate-input (ms)':
+ '24.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ '3d-raytrace (ms)':
+ '28.7',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'controlflow-recursive (ms)':
+ '5.3',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-bits-in-byte (ms)':
+ '9.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ '3d-morph (ms)':
+ '50.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-bitwise-and (ms)':
+ '8.8',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'access-nsieve (ms)':
+ '8.6',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'date-format-tofte (ms)':
+ '31.2',
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'bitops-3bit-bits-in-byte (ms)':
+ '3.5',
+ 'retval':
+ 0,
+ 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
+ 'string-unpack-code (ms)':
+ '45.0'
+}
+
+PURE_TELEMETRY_OUTPUT = """
+page_name,3d-cube (ms),3d-morph (ms),3d-raytrace (ms),Total (ms),access-binary-trees (ms),access-fannkuch (ms),access-nbody (ms),access-nsieve (ms),bitops-3bit-bits-in-byte (ms),bitops-bits-in-byte (ms),bitops-bitwise-and (ms),bitops-nsieve-bits (ms),controlflow-recursive (ms),crypto-aes (ms),crypto-md5 (ms),crypto-sha1 (ms),date-format-tofte (ms),date-format-xparb (ms),math-cordic (ms),math-partial-sums (ms),math-spectral-norm (ms),regexp-dna (ms),string-base64 (ms),string-fasta (ms),string-tagcloud (ms),string-unpack-code (ms),string-validate-input (ms)\r\nhttp://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html,42.7,50.2,28.7,656.5,7.3,26.3,6.9,8.6,3.5,9.8,8.8,9.3,5.3,19.2,10.8,12.4,31.2,138.1,11.4,32.8,6.3,16.1,17.5,36.3,47.2,45.0,24.8\r
+"""
+
+
+class TelemetryResultTest(unittest.TestCase):
+ """Telemetry result test."""
+
+ def __init__(self, *args, **kwargs):
+ super(TelemetryResultTest, self).__init__(*args, **kwargs)
+ self.callFakeProcessResults = False
+ self.result = None
+ self.mock_logger = mock.Mock(spec=logger.Logger)
+ self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ self.mock_label = MockLabel('mock_label', 'chromeos_image', 'autotest_dir',
+ '/tmp', 'lumpy', 'remote', 'image_args',
+ 'cache_dir', 'average', 'gcc', None)
+ self.mock_machine = machine_manager.MockCrosMachine('falco.cros',
+ '/tmp/chromeos',
+ 'average')
+
+ def test_populate_from_run(self):
+
+ def FakeProcessResults():
+ self.callFakeProcessResults = True
+
+ self.callFakeProcessResults = False
+ self.result = TelemetryResult(self.mock_logger, self.mock_label, 'average',
+ self.mock_cmd_exec)
+ self.result.ProcessResults = FakeProcessResults
+ self.result.PopulateFromRun(OUTPUT, error, 3, 'fake_test',
+ 'telemetry_Crosperf')
+ self.assertTrue(self.callFakeProcessResults)
+ self.assertEqual(self.result.out, OUTPUT)
+ self.assertEqual(self.result.err, error)
+ self.assertEqual(self.result.retval, 3)
+
+ def test_populate_from_cache_dir_and_process_results(self):
+
+ self.result = TelemetryResult(self.mock_logger, self.mock_label, 'average',
+ self.mock_machine)
+ current_path = os.getcwd()
+ cache_dir = os.path.join(current_path,
+ 'test_cache/test_puretelemetry_input')
+ self.result.PopulateFromCacheDir(cache_dir, '', '')
+ self.assertEqual(self.result.out.strip(), PURE_TELEMETRY_OUTPUT.strip())
+ self.assertEqual(self.result.err, '')
+ self.assertEqual(self.result.retval, 0)
+ self.assertEqual(self.result.keyvals, TELEMETRY_RESULT_KEYVALS)
+
+
+class ResultsCacheTest(unittest.TestCase):
+ """Resultcache test class."""
+
+ def __init__(self, *args, **kwargs):
+ super(ResultsCacheTest, self).__init__(*args, **kwargs)
+ self.fakeCacheReturnResult = None
+ self.mock_logger = mock.Mock(spec=logger.Logger)
+ self.mock_label = MockLabel('mock_label', 'chromeos_image', 'autotest_dir',
+ '/tmp', 'lumpy', 'remote', 'image_args',
+ 'cache_dir', 'average', 'gcc', None)
+
+ def setUp(self):
+ self.results_cache = ResultsCache()
+
+ mock_machine = machine_manager.MockCrosMachine('falco.cros',
+ '/tmp/chromeos', 'average')
+
+ mock_mm = machine_manager.MockMachineManager('/tmp/chromeos_root', 0,
+ 'average', '')
+ mock_mm.machine_checksum_string['mock_label'] = 'fake_machine_checksum123'
+
+ self.results_cache.Init(
+ self.mock_label.chromeos_image,
+ self.mock_label.chromeos_root,
+ 'sunspider',
+ 1, # benchmark_run.iteration,
+ '', # benchmark_run.test_args,
+ '', # benchmark_run.profiler_args,
+ mock_mm,
+ mock_machine,
+ self.mock_label.board,
+ [CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH],
+ self.mock_logger,
+ 'average',
+ self.mock_label,
+ '', # benchmark_run.share_cache
+ 'telemetry_Crosperf',
+ True, # benchmark_run.show_all_results
+ False) # benchmark_run.run_local
+
+ @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
+ def test_get_cache_dir_for_write(self, mock_checksum):
+
+ def FakeGetMachines(label):
+ if label:
+ pass
+ m1 = machine_manager.MockCrosMachine('lumpy1.cros',
+ self.results_cache.chromeos_root,
+ 'average')
+ m2 = machine_manager.MockCrosMachine('lumpy2.cros',
+ self.results_cache.chromeos_root,
+ 'average')
+ return [m1, m2]
+
+ mock_checksum.return_value = 'FakeImageChecksumabc123'
+ self.results_cache.machine_manager.GetMachines = FakeGetMachines
+ self.results_cache.machine_manager.machine_checksum['mock_label'] = \
+ 'FakeMachineChecksumabc987'
+ # Based on the label, benchmark and machines, get the directory in which
+ # to store the cache information for this test run.
+ result_path = self.results_cache.GetCacheDirForWrite()
+ # Verify that the returned directory is correct (since the label
+ # contained a cache_dir, named 'cache_dir', that's what is expected in
+ # the result, rather than '~/cros_scratch').
+ comp_path = os.path.join(os.getcwd(),
+ 'cache_dir/54524606abaae4fdf7b02f49f7ae7127_'
+ 'sunspider_1_fda29412ceccb72977516c4785d08e2c_'
+ 'FakeImageChecksumabc123_FakeMachineChecksum'
+ 'abc987__6')
+ self.assertEqual(result_path, comp_path)
+
+ def test_form_cache_dir(self):
+ # This is very similar to the previous test (FormCacheDir is called
+ # from GetCacheDirForWrite).
+ cache_key_list = ('54524606abaae4fdf7b02f49f7ae7127', 'sunspider', '1',
+ '7215ee9c7d9dc229d2921a40e899ec5f',
+ 'FakeImageChecksumabc123', '*', '*', '6')
+ path = self.results_cache.FormCacheDir(cache_key_list)
+ self.assertEqual(len(path), 1)
+ path1 = path[0]
+ test_dirname = ('54524606abaae4fdf7b02f49f7ae7127_sunspider_1_7215ee9'
+ 'c7d9dc229d2921a40e899ec5f_FakeImageChecksumabc123_*_*_6')
+ comp_path = os.path.join(os.getcwd(), 'cache_dir', test_dirname)
+ self.assertEqual(path1, comp_path)
+
+ @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
+ def test_get_cache_key_list(self, mock_checksum):
+ # This tests the mechanism that generates the various pieces of the
+ # cache directory name, based on various conditions.
+
+ def FakeGetMachines(label):
+ if label:
+ pass
+ m1 = machine_manager.MockCrosMachine('lumpy1.cros',
+ self.results_cache.chromeos_root,
+ 'average')
+ m2 = machine_manager.MockCrosMachine('lumpy2.cros',
+ self.results_cache.chromeos_root,
+ 'average')
+ return [m1, m2]
+
+ mock_checksum.return_value = 'FakeImageChecksumabc123'
+ self.results_cache.machine_manager.GetMachines = FakeGetMachines
+ self.results_cache.machine_manager.machine_checksum['mock_label'] = \
+ 'FakeMachineChecksumabc987'
+
+ # Test 1. Generating cache name for reading (not writing).
+ key_list = self.results_cache.GetCacheKeyList(True)
+ self.assertEqual(key_list[0], '*') # Machine checksum value, for read.
+ self.assertEqual(key_list[1], 'sunspider')
+ self.assertEqual(key_list[2], '1')
+ self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
+ self.assertEqual(key_list[4], 'FakeImageChecksumabc123')
+ self.assertEqual(key_list[5], '*')
+ self.assertEqual(key_list[6], '*')
+ self.assertEqual(key_list[7], '6')
+
+ # Test 2. Generating cache name for writing, with local image type.
+ key_list = self.results_cache.GetCacheKeyList(False)
+ self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127')
+ self.assertEqual(key_list[1], 'sunspider')
+ self.assertEqual(key_list[2], '1')
+ self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
+ self.assertEqual(key_list[4], 'FakeImageChecksumabc123')
+ self.assertEqual(key_list[5], 'FakeMachineChecksumabc987')
+ self.assertEqual(key_list[6], '')
+ self.assertEqual(key_list[7], '6')
+
+ # Test 3. Generating cache name for writing, with trybot image type.
+ self.results_cache.label.image_type = 'trybot'
+ key_list = self.results_cache.GetCacheKeyList(False)
+ self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127')
+ self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
+ self.assertEqual(key_list[4], '54524606abaae4fdf7b02f49f7ae7127')
+ self.assertEqual(key_list[5], 'FakeMachineChecksumabc987')
+
+ # Test 4. Generating cache name for writing, with official image type.
+ self.results_cache.label.image_type = 'official'
+ key_list = self.results_cache.GetCacheKeyList(False)
+ self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127')
+ self.assertEqual(key_list[1], 'sunspider')
+ self.assertEqual(key_list[2], '1')
+ self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
+ self.assertEqual(key_list[4], '*')
+ self.assertEqual(key_list[5], 'FakeMachineChecksumabc987')
+ self.assertEqual(key_list[6], '')
+ self.assertEqual(key_list[7], '6')
+
+ # Test 5. Generating cache name for writing, with local image type, and
+ # specifying that the image path must match the cached image path.
+ self.results_cache.label.image_type = 'local'
+ self.results_cache.cache_conditions.append(CacheConditions.IMAGE_PATH_MATCH)
+ key_list = self.results_cache.GetCacheKeyList(False)
+ self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127')
+ self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
+ self.assertEqual(key_list[4], 'FakeImageChecksumabc123')
+ self.assertEqual(key_list[5], 'FakeMachineChecksumabc987')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
+ @mock.patch.object(os.path, 'isdir')
+ @mock.patch.object(Result, 'CreateFromCacheHit')
+ def test_read_result(self, mock_create, mock_isdir, mock_runcmd):
+
+ self.fakeCacheReturnResult = None
+
+ def FakeGetCacheDirForRead():
+ return self.fakeCacheReturnResult
+
+ def FakeGetCacheDirForWrite():
+ return self.fakeCacheReturnResult
+
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ fake_result = Result(self.mock_logger, self.mock_label, 'average',
+ mock_cmd_exec)
+ fake_result.retval = 0
+
+ # Set up results_cache _GetCacheDirFor{Read,Write} to return
+ # self.fakeCacheReturnResult, which is initially None (see above).
+ # So initially, no cache dir is returned.
+ self.results_cache.GetCacheDirForRead = FakeGetCacheDirForRead
+ self.results_cache.GetCacheDirForWrite = FakeGetCacheDirForWrite
+
+ mock_isdir.return_value = True
+ save_cc = [
+ CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
+ ]
+ self.results_cache.cache_conditions.append(CacheConditions.FALSE)
+
+ # Test 1. CacheCondition.FALSE, which means do not read from the cache.
+ # (force re-running of test). Result should be None.
+ res = self.results_cache.ReadResult()
+ self.assertIsNone(res)
+ self.assertEqual(mock_runcmd.call_count, 1)
+
+ # Test 2. Remove CacheCondition.FALSE. Result should still be None,
+ # because GetCacheDirForRead is returning None at the moment.
+ mock_runcmd.reset_mock()
+ self.results_cache.cache_conditions = save_cc
+ res = self.results_cache.ReadResult()
+ self.assertIsNone(res)
+ self.assertEqual(mock_runcmd.call_count, 0)
+
+ # Test 3. Now set up cache dir to be returned by GetCacheDirForRead.
+ # Since cache_dir is found, will call Result.CreateFromCacheHit, which
+ # which will actually all our mock_create and should return fake_result.
+ self.fakeCacheReturnResult = 'fake/cache/dir'
+ mock_create.return_value = fake_result
+ res = self.results_cache.ReadResult()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertEqual(res, fake_result)
+
+ # Test 4. os.path.isdir(cache_dir) will now return false, so result
+ # should be None again (no cache found).
+ mock_isdir.return_value = False
+ res = self.results_cache.ReadResult()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertIsNone(res)
+
+ # Test 5. os.path.isdir returns true, but mock_create now returns None
+ # (the call to CreateFromCacheHit returns None), so overal result is None.
+ mock_isdir.return_value = True
+ mock_create.return_value = None
+ res = self.results_cache.ReadResult()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertIsNone(res)
+
+ # Test 6. Everything works 'as expected', result should be fake_result.
+ mock_create.return_value = fake_result
+ res = self.results_cache.ReadResult()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertEqual(res, fake_result)
+
+ # Test 7. The run failed; result should be None.
+ mock_create.return_value = fake_result
+ fake_result.retval = 1
+ self.results_cache.cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
+ res = self.results_cache.ReadResult()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertIsNone(res)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
new file mode 100644
index 00000000..097c744d
--- /dev/null
+++ b/crosperf/results_organizer.py
@@ -0,0 +1,192 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Parse data from benchmark_runs for tabulator."""
+
+from __future__ import print_function
+
+import errno
+import json
+import os
+import re
+import sys
+
+from cros_utils import misc
+
+_TELEMETRY_RESULT_DEFAULTS_FILE = 'default-telemetry-results.json'
+_DUP_KEY_REGEX = re.compile(r'(\w+)\{(\d+)\}')
+
+
+def _AdjustIteration(benchmarks, max_dup, bench):
+ """Adjust the interation numbers if they have keys like ABCD{i}."""
+ for benchmark in benchmarks:
+ if benchmark.name != bench or benchmark.iteration_adjusted:
+ continue
+ benchmark.iteration_adjusted = True
+ benchmark.iterations *= (max_dup + 1)
+
+
+def _GetMaxDup(data):
+ """Find the maximum i inside ABCD{i}.
+
+ data should be a [[[Key]]], where Key is a string that may look like
+ ABCD{i}.
+ """
+ max_dup = 0
+ for label in data:
+ for run in label:
+ for key in run:
+ match = _DUP_KEY_REGEX.match(key)
+ if match:
+ max_dup = max(max_dup, int(match.group(2)))
+ return max_dup
+
+
+def _Repeat(func, times):
+ """Returns the result of running func() n times."""
+ return [func() for _ in xrange(times)]
+
+
+def _GetNonDupLabel(max_dup, runs):
+ """Create new list for the runs of the same label.
+
+ Specifically, this will split out keys like foo{0}, foo{1} from one run into
+ their own runs. For example, given a run like:
+ {"foo": 1, "bar{0}": 2, "baz": 3, "qux{1}": 4, "pirate{0}": 5}
+
+ You'll get:
+ [{"foo": 1, "baz": 3}, {"bar": 2, "pirate": 5}, {"qux": 4}]
+
+ Hands back the lists of transformed runs, all concatenated together.
+ """
+ new_runs = []
+ for run in runs:
+ new_run = {}
+ added_runs = _Repeat(dict, max_dup)
+ for key, value in run.iteritems():
+ match = _DUP_KEY_REGEX.match(key)
+ if not match:
+ new_run[key] = value
+ else:
+ new_key, index_str = match.groups()
+ added_runs[int(index_str)-1][new_key] = str(value)
+ new_runs.append(new_run)
+ new_runs += added_runs
+ return new_runs
+
+
+def _DuplicatePass(result, benchmarks):
+ """Properly expands keys like `foo{1}` in `result`."""
+ for bench, data in result.iteritems():
+ max_dup = _GetMaxDup(data)
+ # If there's nothing to expand, there's nothing to do.
+ if not max_dup:
+ continue
+ for i, runs in enumerate(data):
+ data[i] = _GetNonDupLabel(max_dup, runs)
+ _AdjustIteration(benchmarks, max_dup, bench)
+
+
+def _ReadSummaryFile(filename):
+ """Reads the summary file at filename."""
+ dirname, _ = misc.GetRoot(filename)
+ fullname = os.path.join(dirname, _TELEMETRY_RESULT_DEFAULTS_FILE)
+ try:
+ # Slurp the summary file into a dictionary. The keys in the dictionary are
+ # the benchmark names. The value for a key is a list containing the names
+ # of all the result fields that should be returned in a 'default' report.
+ with open(fullname) as in_file:
+ return json.load(in_file)
+ except IOError as e:
+ # ENOENT means "no such file or directory"
+ if e.errno == errno.ENOENT:
+ return {}
+ raise
+
+
+def _MakeOrganizeResultOutline(benchmark_runs, labels):
+ """Creates the "outline" of the OrganizeResults result for a set of runs.
+
+ Report generation returns lists of different sizes, depending on the input
+ data. Depending on the order in which we iterate through said input data, we
+ may populate the Nth index of a list, then the N-1st, then the N+Mth, ...
+
+ It's cleaner to figure out the "skeleton"/"outline" ahead of time, so we don't
+ have to worry about resizing while computing results.
+ """
+ # Count how many iterations exist for each benchmark run.
+ # We can't simply count up, since we may be given an incomplete set of
+ # iterations (e.g. [r.iteration for r in benchmark_runs] == [1, 3])
+ iteration_count = {}
+ for run in benchmark_runs:
+ name = run.benchmark.name
+ old_iterations = iteration_count.get(name, -1)
+ # N.B. run.iteration starts at 1, not 0.
+ iteration_count[name] = max(old_iterations, run.iteration)
+
+ # Result structure: {benchmark_name: [[{key: val}]]}
+ result = {}
+ for run in benchmark_runs:
+ name = run.benchmark.name
+ num_iterations = iteration_count[name]
+ # default param makes cros lint be quiet about defining num_iterations in a
+ # loop.
+ make_dicts = lambda n=num_iterations: _Repeat(dict, n)
+ result[name] = _Repeat(make_dicts, len(labels))
+ return result
+
+def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
+ """Create a dict from benchmark_runs.
+
+ The structure of the output dict is as follows:
+ {"benchmark_1":[
+ [{"key1":"v1", "key2":"v2"},{"key1":"v1", "key2","v2"}]
+ #one label
+ []
+ #the other label
+ ]
+ "benchmark_2":
+ [
+ ]}.
+ """
+ result = _MakeOrganizeResultOutline(benchmark_runs, labels)
+ label_names = [label.name for label in labels]
+ label_indices = {name: i for i, name in enumerate(label_names)}
+ summary_file = _ReadSummaryFile(sys.argv[0])
+ if benchmarks is None:
+ benchmarks = []
+
+ for benchmark_run in benchmark_runs:
+ if not benchmark_run.result:
+ continue
+ benchmark = benchmark_run.benchmark
+ label_index = label_indices[benchmark_run.label.name]
+ cur_label_list = result[benchmark.name][label_index]
+ cur_dict = cur_label_list[benchmark_run.iteration - 1]
+
+ show_all_results = json_report or benchmark.show_all_results
+ if not show_all_results:
+ summary_list = summary_file.get(benchmark.test_name)
+ if summary_list:
+ summary_list.append('retval')
+ else:
+ # Did not find test_name in json file; show everything.
+ show_all_results = True
+ for test_key in benchmark_run.result.keyvals:
+ if show_all_results or test_key in summary_list:
+ cur_dict[test_key] = benchmark_run.result.keyvals[test_key]
+ # Occasionally Telemetry tests will not fail but they will not return a
+ # result, either. Look for those cases, and force them to be a fail.
+ # (This can happen if, for example, the test has been disabled.)
+ if len(cur_dict) == 1 and cur_dict['retval'] == 0:
+ cur_dict['retval'] = 1
+ # TODO: This output should be sent via logger.
+ print("WARNING: Test '%s' appears to have succeeded but returned"
+ ' no results.' % benchmark.name,
+ file=sys.stderr)
+ if json_report and benchmark_run.machine:
+ cur_dict['machine'] = benchmark_run.machine.name
+ cur_dict['machine_checksum'] = benchmark_run.machine.checksum
+ cur_dict['machine_string'] = benchmark_run.machine.checksum_string
+ _DuplicatePass(result, benchmarks)
+ return result
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
new file mode 100755
index 00000000..ccf02973
--- /dev/null
+++ b/crosperf/results_organizer_unittest.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python2
+
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Testing of ResultsOrganizer
+
+ We create some labels, benchmark_runs and then create a ResultsOrganizer,
+ after that, we compare the result of ResultOrganizer.
+ """
+
+from __future__ import print_function
+
+import unittest
+
+from benchmark_run import BenchmarkRun
+from results_cache import Result
+from results_organizer import OrganizeResults
+
+import mock_instance
+
+result = {'benchmark1': [[{'': 'PASS',
+ 'bool': 'True',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'ms_1': '2.1',
+ 'total': '5'}, {'test': '2'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6'}, {'test': '3'}, {'test': '4'}],
+ [{'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6'}, {'test': '5'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7'}, {'test': '2'}, {'test': '6'}]],
+ 'benchmark2': [[{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7'}, {'test': '2'}, {'test': '6'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7'}, {'test': '2'}, {'test': '2'}],
+ [{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7'}, {'test': '2'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7'}, {'test': '1'}, {'test': '6'}]]}
+
+
+class ResultOrganizerTest(unittest.TestCase):
+ """Test result organizer."""
+
+ def testResultOrganizer(self):
+ labels = [mock_instance.label1, mock_instance.label2]
+ benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
+ benchmark_runs = [None] * 8
+ benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '',
+ '', 'average', '')
+
+ i = 0
+ for b in benchmark_runs:
+ b.result = Result('', b.label, 'average', 'machine')
+ b.result.keyvals = mock_instance.keyval[i]
+ i += 1
+
+ organized = OrganizeResults(benchmark_runs, labels, benchmarks)
+ self.assertEqual(organized, result)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
new file mode 100644
index 00000000..7a465349
--- /dev/null
+++ b/crosperf/results_report.py
@@ -0,0 +1,691 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""A module to handle the report format."""
+from __future__ import print_function
+
+import datetime
+import functools
+import itertools
+import json
+import os
+import re
+
+from cros_utils.tabulator import AmeanResult
+from cros_utils.tabulator import Cell
+from cros_utils.tabulator import CoeffVarFormat
+from cros_utils.tabulator import CoeffVarResult
+from cros_utils.tabulator import Column
+from cros_utils.tabulator import Format
+from cros_utils.tabulator import GmeanRatioResult
+from cros_utils.tabulator import LiteralResult
+from cros_utils.tabulator import MaxResult
+from cros_utils.tabulator import MinResult
+from cros_utils.tabulator import PValueFormat
+from cros_utils.tabulator import PValueResult
+from cros_utils.tabulator import RatioFormat
+from cros_utils.tabulator import RawResult
+from cros_utils.tabulator import StdResult
+from cros_utils.tabulator import TableFormatter
+from cros_utils.tabulator import TableGenerator
+from cros_utils.tabulator import TablePrinter
+from update_telemetry_defaults import TelemetryDefaults
+
+from column_chart import ColumnChart
+from results_organizer import OrganizeResults
+
+import results_report_templates as templates
+
+
+def ParseChromeosImage(chromeos_image):
+ """Parse the chromeos_image string for the image and version.
+
+ The chromeos_image string will probably be in one of two formats:
+ 1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
+ chromiumos_test_image.bin
+ 2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
+ chromiumos_test_image.bin
+
+ We parse these strings to find the 'chromeos_version' to store in the
+ json archive (without the .datatime bit in the first case); and also
+ the 'chromeos_image', which would be all of the first case, but only the
+ part after '/chroot/tmp' in the second case.
+
+ Args:
+ chromeos_image: string containing the path to the chromeos_image that
+ crosperf used for the test.
+
+ Returns:
+ version, image: The results of parsing the input string, as explained
+ above.
+ """
+ # Find the Chromeos Version, e.g. R45-2345.0.0.....
+ # chromeos_image should have been something like:
+ # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
+ if chromeos_image.endswith('/chromiumos_test_image.bin'):
+ full_version = chromeos_image.split('/')[-2]
+ # Strip the date and time off of local builds (which have the format
+ # "R43-2345.0.0.date-and-time").
+ version, _ = os.path.splitext(full_version)
+ else:
+ version = ''
+
+ # Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then
+ # it's an official image that got downloaded, so chop off the download path
+ # to make the official image name more clear.
+ official_image_path = '/chroot/tmp'
+ if official_image_path in chromeos_image:
+ image = chromeos_image.split(official_image_path, 1)[1]
+ else:
+ image = chromeos_image
+ return version, image
+
+
+def _AppendUntilLengthIs(gen, the_list, target_len):
+ """Appends to `list` until `list` is `target_len` elements long.
+
+ Uses `gen` to generate elements.
+ """
+ the_list.extend(gen() for _ in xrange(target_len - len(the_list)))
+ return the_list
+
+
+def _FilterPerfReport(event_threshold, report):
+ """Filters out entries with `< event_threshold` percent in a perf report."""
+ def filter_dict(m):
+ return {fn_name: pct for fn_name, pct in m.iteritems()
+ if pct >= event_threshold}
+ return {event: filter_dict(m) for event, m in report.iteritems()}
+
+
+class _PerfTable(object):
+ """Generates dicts from a perf table.
+
+ Dicts look like:
+ {'benchmark_name': {'perf_event_name': [LabelData]}}
+ where LabelData is a list of perf dicts, each perf dict coming from the same
+ label.
+ Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the
+ percentage of time spent in function_name).
+ """
+
+ def __init__(self, benchmark_names_and_iterations, label_names,
+ read_perf_report, event_threshold=None):
+ """Constructor.
+
+ read_perf_report is a function that takes a label name, benchmark name, and
+ benchmark iteration, and returns a dictionary describing the perf output for
+ that given run.
+ """
+ self.event_threshold = event_threshold
+ self._label_indices = {name: i for i, name in enumerate(label_names)}
+ self.perf_data = {}
+ for label in label_names:
+ for bench_name, bench_iterations in benchmark_names_and_iterations:
+ for i in xrange(bench_iterations):
+ report = read_perf_report(label, bench_name, i)
+ self._ProcessPerfReport(report, label, bench_name, i)
+
+ def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration):
+ """Add the data from one run to the dict."""
+ perf_of_run = perf_report
+ if self.event_threshold is not None:
+ perf_of_run = _FilterPerfReport(self.event_threshold, perf_report)
+ if benchmark_name not in self.perf_data:
+ self.perf_data[benchmark_name] = {event: [] for event in perf_of_run}
+ ben_data = self.perf_data[benchmark_name]
+ label_index = self._label_indices[label]
+ for event in ben_data:
+ _AppendUntilLengthIs(list, ben_data[event], label_index + 1)
+ data_for_label = ben_data[event][label_index]
+ _AppendUntilLengthIs(dict, data_for_label, iteration + 1)
+ data_for_label[iteration] = perf_of_run[event] if perf_of_run else {}
+
+
+def _GetResultsTableHeader(ben_name, iterations):
+ benchmark_info = ('Benchmark: {0}; Iterations: {1}'
+ .format(ben_name, iterations))
+ cell = Cell()
+ cell.string_value = benchmark_info
+ cell.header = True
+ return [[cell]]
+
+
+def _ParseColumn(columns, iteration):
+ new_column = []
+ for column in columns:
+ if column.result.__class__.__name__ != 'RawResult':
+ new_column.append(column)
+ else:
+ new_column.extend(Column(LiteralResult(i), Format(), str(i + 1))
+ for i in xrange(iteration))
+ return new_column
+
+
+def _GetTables(benchmark_results, columns, table_type):
+ iter_counts = benchmark_results.iter_counts
+ result = benchmark_results.run_keyvals
+ tables = []
+ for bench_name, runs in result.iteritems():
+ iterations = iter_counts[bench_name]
+ ben_table = _GetResultsTableHeader(bench_name, iterations)
+
+ all_runs_empty = all(not dict for label in runs for dict in label)
+ if all_runs_empty:
+ cell = Cell()
+ cell.string_value = ('This benchmark contains no result.'
+ ' Is the benchmark name valid?')
+ cell_table = [[cell]]
+ else:
+ table = TableGenerator(runs, benchmark_results.label_names).GetTable()
+ parsed_columns = _ParseColumn(columns, iterations)
+ tf = TableFormatter(table, parsed_columns)
+ cell_table = tf.GetCellTable(table_type)
+ tables.append(ben_table)
+ tables.append(cell_table)
+ return tables
+
+
+def _GetPerfTables(benchmark_results, columns, table_type):
+ p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations,
+ benchmark_results.label_names,
+ benchmark_results.read_perf_report)
+
+ tables = []
+ for benchmark in p_table.perf_data:
+ iterations = benchmark_results.iter_counts[benchmark]
+ ben_table = _GetResultsTableHeader(benchmark, iterations)
+ tables.append(ben_table)
+ benchmark_data = p_table.perf_data[benchmark]
+ table = []
+ for event in benchmark_data:
+ tg = TableGenerator(benchmark_data[event],
+ benchmark_results.label_names,
+ sort=TableGenerator.SORT_BY_VALUES_DESC)
+ table = tg.GetTable(ResultsReport.PERF_ROWS)
+ parsed_columns = _ParseColumn(columns, iterations)
+ tf = TableFormatter(table, parsed_columns)
+ tf.GenerateCellTable(table_type)
+ tf.AddColumnName()
+ tf.AddLabelName()
+ tf.AddHeader(str(event))
+ table = tf.GetCellTable(table_type, headers=False)
+ tables.append(table)
+ return tables
+
+
+class ResultsReport(object):
+ """Class to handle the report format."""
+ MAX_COLOR_CODE = 255
+ PERF_ROWS = 5
+
+ def __init__(self, results):
+ self.benchmark_results = results
+
+ def _GetTablesWithColumns(self, columns, table_type, perf):
+ get_tables = _GetPerfTables if perf else _GetTables
+ return get_tables(self.benchmark_results, columns, table_type)
+
+ def GetFullTables(self, perf=False):
+ columns = [Column(RawResult(), Format()),
+ Column(MinResult(), Format()),
+ Column(MaxResult(), Format()),
+ Column(AmeanResult(), Format()),
+ Column(StdResult(), Format(), 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
+ Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
+ Column(PValueResult(), PValueFormat(), 'p-value')]
+ return self._GetTablesWithColumns(columns, 'full', perf)
+
+ def GetSummaryTables(self, perf=False):
+ columns = [Column(AmeanResult(), Format()),
+ Column(StdResult(), Format(), 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
+ Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
+ Column(PValueResult(), PValueFormat(), 'p-value')]
+ return self._GetTablesWithColumns(columns, 'summary', perf)
+
+
+def _PrintTable(tables, out_to):
+ # tables may be None.
+ if not tables:
+ return ''
+
+ if out_to == 'HTML':
+ out_type = TablePrinter.HTML
+ elif out_to == 'PLAIN':
+ out_type = TablePrinter.PLAIN
+ elif out_to == 'CONSOLE':
+ out_type = TablePrinter.CONSOLE
+ elif out_to == 'TSV':
+ out_type = TablePrinter.TSV
+ elif out_to == 'EMAIL':
+ out_type = TablePrinter.EMAIL
+ else:
+ raise ValueError('Invalid out_to value: %s' % (out_to,))
+
+ printers = (TablePrinter(table, out_type) for table in tables)
+ return ''.join(printer.Print() for printer in printers)
+
+
+class TextResultsReport(ResultsReport):
+ """Class to generate text result report."""
+
+ H1_STR = '==========================================='
+ H2_STR = '-------------------------------------------'
+
+ def __init__(self, results, email=False, experiment=None):
+ super(TextResultsReport, self).__init__(results)
+ self.email = email
+ self.experiment = experiment
+
+ @staticmethod
+ def _MakeTitle(title):
+ header_line = TextResultsReport.H1_STR
+ # '' at the end gives one newline.
+ return '\n'.join([header_line, title, header_line, ''])
+
+ @staticmethod
+ def _MakeSection(title, body):
+ header_line = TextResultsReport.H2_STR
+ # '\n' at the end gives us two newlines.
+ return '\n'.join([header_line, title, header_line, body, '\n'])
+
+ @staticmethod
+ def FromExperiment(experiment, email=False):
+ results = BenchmarkResults.FromExperiment(experiment)
+ return TextResultsReport(results, email, experiment)
+
+ def GetStatusTable(self):
+ """Generate the status table by the tabulator."""
+ table = [['', '']]
+ columns = [Column(LiteralResult(iteration=0), Format(), 'Status'),
+ Column(LiteralResult(iteration=1), Format(), 'Failing Reason')]
+
+ for benchmark_run in self.experiment.benchmark_runs:
+ status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
+ benchmark_run.failure_reason]]
+ table.append(status)
+ cell_table = TableFormatter(table, columns).GetCellTable('status')
+ return [cell_table]
+
+ def GetReport(self):
+ """Generate the report for email and console."""
+ output_type = 'EMAIL' if self.email else 'CONSOLE'
+ experiment = self.experiment
+
+ sections = []
+ if experiment is not None:
+ title_contents = "Results report for '%s'" % (experiment.name, )
+ else:
+ title_contents = 'Results report'
+ sections.append(self._MakeTitle(title_contents))
+
+ summary_table = _PrintTable(self.GetSummaryTables(perf=False), output_type)
+ sections.append(self._MakeSection('Summary', summary_table))
+
+ if experiment is not None:
+ table = _PrintTable(self.GetStatusTable(), output_type)
+ sections.append(self._MakeSection('Benchmark Run Status', table))
+
+ perf_table = _PrintTable(self.GetSummaryTables(perf=True), output_type)
+ if perf_table:
+ sections.append(self._MakeSection('Perf Data', perf_table))
+
+ if experiment is not None:
+ experiment_file = experiment.experiment_file
+ sections.append(self._MakeSection('Experiment File', experiment_file))
+
+ cpu_info = experiment.machine_manager.GetAllCPUInfo(experiment.labels)
+ sections.append(self._MakeSection('CPUInfo', cpu_info))
+
+ return '\n'.join(sections)
+
+
+def _GetHTMLCharts(label_names, test_results):
+ charts = []
+ for item, runs in test_results.iteritems():
+ # Fun fact: label_names is actually *entirely* useless as a param, since we
+ # never add headers. We still need to pass it anyway.
+ table = TableGenerator(runs, label_names).GetTable()
+ columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
+ Column(MaxResult(), Format())]
+ tf = TableFormatter(table, columns)
+ data_table = tf.GetCellTable('full', headers=False)
+
+ for cur_row_data in data_table:
+ test_key = cur_row_data[0].string_value
+ title = '{0}: {1}'.format(item, test_key.replace('/', ''))
+ chart = ColumnChart(title, 300, 200)
+ chart.AddColumn('Label', 'string')
+ chart.AddColumn('Average', 'number')
+ chart.AddColumn('Min', 'number')
+ chart.AddColumn('Max', 'number')
+ chart.AddSeries('Min', 'line', 'black')
+ chart.AddSeries('Max', 'line', 'black')
+ cur_index = 1
+ for label in label_names:
+ chart.AddRow([label,
+ cur_row_data[cur_index].value,
+ cur_row_data[cur_index + 1].value,
+ cur_row_data[cur_index + 2].value])
+ if isinstance(cur_row_data[cur_index].value, str):
+ chart = None
+ break
+ cur_index += 3
+ if chart:
+ charts.append(chart)
+ return charts
+
+
+class HTMLResultsReport(ResultsReport):
+ """Class to generate html result report."""
+
+ def __init__(self, benchmark_results, experiment=None):
+ super(HTMLResultsReport, self).__init__(benchmark_results)
+ self.experiment = experiment
+
+ @staticmethod
+ def FromExperiment(experiment):
+ return HTMLResultsReport(BenchmarkResults.FromExperiment(experiment),
+ experiment=experiment)
+
+ def GetReport(self):
+ label_names = self.benchmark_results.label_names
+ test_results = self.benchmark_results.run_keyvals
+ charts = _GetHTMLCharts(label_names, test_results)
+ chart_javascript = ''.join(chart.GetJavascript() for chart in charts)
+ chart_divs = ''.join(chart.GetDiv() for chart in charts)
+
+ summary_table = self.GetSummaryTables()
+ full_table = self.GetFullTables()
+ perf_table = self.GetSummaryTables(perf=True)
+ experiment_file = ''
+ if self.experiment is not None:
+ experiment_file = self.experiment.experiment_file
+ # Use kwargs for sanity, and so that testing is a bit easier.
+ return templates.GenerateHTMLPage(perf_table=perf_table,
+ chart_js=chart_javascript,
+ summary_table=summary_table,
+ print_table=_PrintTable,
+ chart_divs=chart_divs,
+ full_table=full_table,
+ experiment_file=experiment_file)
+
+
+def ParseStandardPerfReport(report_data):
+ """Parses the output of `perf report`.
+
+ It'll parse the following:
+ {{garbage}}
+ # Samples: 1234M of event 'foo'
+
+ 1.23% command shared_object location function::name
+
+ 1.22% command shared_object location function2::name
+
+ # Samples: 999K of event 'bar'
+
+ 0.23% command shared_object location function3::name
+ {{etc.}}
+
+ Into:
+ {'foo': {'function::name': 1.23, 'function2::name': 1.22},
+ 'bar': {'function3::name': 0.23, etc.}}
+ """
+ # This function fails silently on its if it's handed a string (as opposed to a
+ # list of lines). So, auto-split if we do happen to get a string.
+ if isinstance(report_data, basestring):
+ report_data = report_data.splitlines()
+
+ # Samples: N{K,M,G} of event 'event-name'
+ samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'")
+
+ # We expect lines like:
+ # N.NN% command samples shared_object [location] symbol
+ #
+ # Note that we're looking at stripped lines, so there is no space at the
+ # start.
+ perf_regex = re.compile(r'^(\d+(?:.\d*)?)%' # N.NN%
+ r'\s*\d+' # samples count (ignored)
+ r'\s*\S+' # command (ignored)
+ r'\s*\S+' # shared_object (ignored)
+ r'\s*\[.\]' # location (ignored)
+ r'\s*(\S.+)' # function
+ )
+
+ stripped_lines = (l.strip() for l in report_data)
+ nonempty_lines = (l for l in stripped_lines if l)
+ # Ignore all lines before we see samples_regex
+ interesting_lines = itertools.dropwhile(lambda x: not samples_regex.match(x),
+ nonempty_lines)
+
+ first_sample_line = next(interesting_lines, None)
+ # Went through the entire file without finding a 'samples' header. Quit.
+ if first_sample_line is None:
+ return {}
+
+ sample_name = samples_regex.match(first_sample_line).group(1)
+ current_result = {}
+ results = {sample_name: current_result}
+ for line in interesting_lines:
+ samples_match = samples_regex.match(line)
+ if samples_match:
+ sample_name = samples_match.group(1)
+ current_result = {}
+ results[sample_name] = current_result
+ continue
+
+ match = perf_regex.match(line)
+ if not match:
+ continue
+ percentage_str, func_name = match.groups()
+ try:
+ percentage = float(percentage_str)
+ except ValueError:
+ # Couldn't parse it; try to be "resilient".
+ continue
+ current_result[func_name] = percentage
+ return results
+
+
+def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name,
+ benchmark_iteration):
+ """Reads a perf report for the given benchmark. Returns {} on failure.
+
+ The result should be a map of maps; it should look like:
+ {perf_event_name: {function_name: pct_time_spent}}, e.g.
+ {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}}
+ """
+ raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
+ dir_name = ''.join(c for c in raw_dir_name if c.isalnum())
+ file_name = os.path.join(results_directory, dir_name, 'perf.data.report.0')
+ try:
+ with open(file_name) as in_file:
+ return ParseStandardPerfReport(in_file)
+ except IOError:
+ # Yes, we swallow any IO-related errors.
+ return {}
+
+
+# Split out so that testing (specifically: mocking) is easier
+def _ExperimentToKeyvals(experiment, for_json_report):
+ """Converts an experiment to keyvals."""
+ return OrganizeResults(experiment.benchmark_runs, experiment.labels,
+ json_report=for_json_report)
+
+
+class BenchmarkResults(object):
+ """The minimum set of fields that any ResultsReport will take."""
+ def __init__(self, label_names, benchmark_names_and_iterations, run_keyvals,
+ read_perf_report=None):
+ if read_perf_report is None:
+ def _NoPerfReport(*_args, **_kwargs):
+ return {}
+ read_perf_report = _NoPerfReport
+
+ self.label_names = label_names
+ self.benchmark_names_and_iterations = benchmark_names_and_iterations
+ self.iter_counts = dict(benchmark_names_and_iterations)
+ self.run_keyvals = run_keyvals
+ self.read_perf_report = read_perf_report
+
+ @staticmethod
+ def FromExperiment(experiment, for_json_report=False):
+ label_names = [label.name for label in experiment.labels]
+ benchmark_names_and_iterations = [(benchmark.name, benchmark.iterations)
+ for benchmark in experiment.benchmarks]
+ run_keyvals = _ExperimentToKeyvals(experiment, for_json_report)
+ read_perf_report = functools.partial(_ReadExperimentPerfReport,
+ experiment.results_directory)
+ return BenchmarkResults(label_names, benchmark_names_and_iterations,
+ run_keyvals, read_perf_report)
+
+
+def _GetElemByName(name, from_list):
+ """Gets an element from the given list by its name field.
+
+ Raises an error if it doesn't find exactly one match.
+ """
+ elems = [e for e in from_list if e.name == name]
+ if len(elems) != 1:
+ raise ValueError('Expected 1 item named %s, found %d' % (name, len(elems)))
+ return elems[0]
+
+
+def _Unlist(l):
+ """If l is a list, extracts the first element of l. Otherwise, returns l."""
+ return l[0] if isinstance(l, list) else l
+
+class JSONResultsReport(ResultsReport):
+ """Class that generates JSON reports for experiments."""
+
+ def __init__(self, benchmark_results, date=None, time=None, experiment=None,
+ json_args=None):
+ """Construct a JSONResultsReport.
+
+ json_args is the dict of arguments we pass to json.dumps in GetReport().
+ """
+ super(JSONResultsReport, self).__init__(benchmark_results)
+
+ defaults = TelemetryDefaults()
+ defaults.ReadDefaultsFile()
+ summary_field_defaults = defaults.GetDefault()
+ if summary_field_defaults is None:
+ summary_field_defaults = {}
+ self.summary_field_defaults = summary_field_defaults
+
+ if json_args is None:
+ json_args = {}
+ self.json_args = json_args
+
+ self.experiment = experiment
+ if not date:
+ timestamp = datetime.datetime.strftime(datetime.datetime.now(),
+ '%Y-%m-%d %H:%M:%S')
+ date, time = timestamp.split(' ')
+ self.date = date
+ self.time = time
+
+ @staticmethod
+ def FromExperiment(experiment, date=None, time=None, json_args=None):
+ benchmark_results = BenchmarkResults.FromExperiment(experiment,
+ for_json_report=True)
+ return JSONResultsReport(benchmark_results, date, time, experiment,
+ json_args)
+
+ def GetReportObjectIgnoringExperiment(self):
+ """Gets the JSON report object specifically for the output data.
+
+ Ignores any experiment-specific fields (e.g. board, machine checksum, ...).
+ """
+ benchmark_results = self.benchmark_results
+ label_names = benchmark_results.label_names
+ summary_field_defaults = self.summary_field_defaults
+ final_results = []
+ for test, test_results in benchmark_results.run_keyvals.iteritems():
+ for label_name, label_results in zip(label_names, test_results):
+ for iter_results in label_results:
+ passed = iter_results.get('retval') == 0
+ json_results = {
+ 'date': self.date,
+ 'time': self.time,
+ 'label': label_name,
+ 'test_name': test,
+ 'pass': passed,
+ }
+ final_results.append(json_results)
+
+ if not passed:
+ continue
+
+ # Get overall results.
+ summary_fields = summary_field_defaults.get(test)
+ if summary_fields is not None:
+ value = []
+ json_results['overall_result'] = value
+ for f in summary_fields:
+ v = iter_results.get(f)
+ if v is None:
+ continue
+ # New telemetry results format: sometimes we get a list of lists
+ # now.
+ v = _Unlist(_Unlist(v))
+ value.append((f, float(v)))
+
+ # Get detailed results.
+ detail_results = {}
+ json_results['detailed_results'] = detail_results
+ for k, v in iter_results.iteritems():
+ if k == 'retval' or k == 'PASS' or k == ['PASS'] or v == 'PASS':
+ continue
+
+ v = _Unlist(v)
+ if 'machine' in k:
+ json_results[k] = v
+ elif v is not None:
+ if isinstance(v, list):
+ detail_results[k] = [float(d) for d in v]
+ else:
+ detail_results[k] = float(v)
+ return final_results
+
+ def GetReportObject(self):
+ """Generate the JSON report, returning it as a python object."""
+ report_list = self.GetReportObjectIgnoringExperiment()
+ if self.experiment is not None:
+ self._AddExperimentSpecificFields(report_list)
+ return report_list
+
+ def _AddExperimentSpecificFields(self, report_list):
+ """Add experiment-specific data to the JSON report."""
+ board = self.experiment.labels[0].board
+ manager = self.experiment.machine_manager
+ for report in report_list:
+ label_name = report['label']
+ label = _GetElemByName(label_name, self.experiment.labels)
+
+ img_path = os.path.realpath(os.path.expanduser(label.chromeos_image))
+ ver, img = ParseChromeosImage(img_path)
+
+ report.update({
+ 'board': board,
+ 'chromeos_image': img,
+ 'chromeos_version': ver,
+ 'chrome_version': label.chrome_version,
+ 'compiler': label.compiler
+ })
+
+ if not report['pass']:
+ continue
+ if 'machine_checksum' not in report:
+ report['machine_checksum'] = manager.machine_checksum[label_name]
+ if 'machine_string' not in report:
+ report['machine_string'] = manager.machine_checksum_string[label_name]
+
+ def GetReport(self):
+ """Dump the results of self.GetReportObject() to a string as JSON."""
+ # This exists for consistency with the other GetReport methods.
+ # Specifically, they all return strings, so it's a bit awkward if the JSON
+ # results reporter returns an object.
+ return json.dumps(self.GetReportObject(), **self.json_args)
diff --git a/crosperf/results_report_templates.py b/crosperf/results_report_templates.py
new file mode 100644
index 00000000..827649fd
--- /dev/null
+++ b/crosperf/results_report_templates.py
@@ -0,0 +1,196 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Text templates used by various parts of results_report."""
+from __future__ import print_function
+
+import cgi
+from string import Template
+
+_TabMenuTemplate = Template("""
+<div class='tab-menu'>
+ <a href="javascript:switchTab('$table_name', 'html')">HTML</a>
+ <a href="javascript:switchTab('$table_name', 'text')">Text</a>
+ <a href="javascript:switchTab('$table_name', 'tsv')">TSV</a>
+</div>""")
+
+def _GetTabMenuHTML(table_name):
+ # N.B. cgi.escape does some very basic HTML escaping. Nothing more.
+ escaped = cgi.escape(table_name, quote=True)
+ return _TabMenuTemplate.substitute(table_name=escaped)
+
+
+_ExperimentFileHTML = """
+<div class='results-section'>
+ <div class='results-section-title'>Experiment File</div>
+ <div class='results-section-content'>
+ <pre>%s</pre>
+</div>
+"""
+
+def _GetExperimentFileHTML(experiment_file_text):
+ if not experiment_file_text:
+ return ''
+ return _ExperimentFileHTML % (cgi.escape(experiment_file_text), )
+
+
+_ResultsSectionHTML = Template("""
+<div class='results-section'>
+ <div class='results-section-title'>$sect_name</div>
+ <div class='results-section-content'>
+ <div id='${short_name}-html'>$html_table</div>
+ <div id='${short_name}-text'><pre>$text_table</pre></div>
+ <div id='${short_name}-tsv'><pre>$tsv_table</pre></div>
+ </div>
+ $tab_menu
+</div>
+""")
+
+def _GetResultsSectionHTML(print_table, table_name, data):
+ first_word = table_name.strip().split()[0]
+ short_name = first_word.lower()
+ return _ResultsSectionHTML.substitute(sect_name=table_name,
+ html_table=print_table(data, 'HTML'),
+ text_table=print_table(data, 'PLAIN'),
+ tsv_table=print_table(data, 'TSV'),
+ tab_menu=_GetTabMenuHTML(short_name),
+ short_name=short_name)
+
+
+
+_MainHTML = Template("""
+<html>
+<head>
+ <style type="text/css">
+ body {
+ font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif;
+ font-size: 12px;
+ }
+
+ pre {
+ margin: 10px;
+ color: #039;
+ font-size: 14px;
+ }
+
+ .chart {
+ display: inline;
+ }
+
+ .hidden {
+ visibility: hidden;
+ }
+
+ .results-section {
+ border: 1px solid #b9c9fe;
+ margin: 10px;
+ }
+
+ .results-section-title {
+ background-color: #b9c9fe;
+ color: #039;
+ padding: 7px;
+ font-size: 14px;
+ width: 200px;
+ }
+
+ .results-section-content {
+ margin: 10px;
+ padding: 10px;
+ overflow:auto;
+ }
+
+ #box-table-a {
+ font-size: 12px;
+ width: 480px;
+ text-align: left;
+ border-collapse: collapse;
+ }
+
+ #box-table-a th {
+ padding: 6px;
+ background: #b9c9fe;
+ border-right: 1px solid #fff;
+ border-bottom: 1px solid #fff;
+ color: #039;
+ text-align: center;
+ }
+
+ #box-table-a td {
+ padding: 4px;
+ background: #e8edff;
+ border-bottom: 1px solid #fff;
+ border-right: 1px solid #fff;
+ color: #669;
+ border-top: 1px solid transparent;
+ }
+
+ #box-table-a tr:hover td {
+ background: #d0dafd;
+ color: #339;
+ }
+
+ </style>
+ <script type='text/javascript' src='https://www.google.com/jsapi'></script>
+ <script type='text/javascript'>
+ google.load('visualization', '1', {packages:['corechart']});
+ google.setOnLoadCallback(init);
+ function init() {
+ switchTab('summary', 'html');
+ ${perf_init};
+ switchTab('full', 'html');
+ drawTable();
+ }
+ function drawTable() {
+ ${chart_js};
+ }
+ function switchTab(table, tab) {
+ document.getElementById(table + '-html').style.display = 'none';
+ document.getElementById(table + '-text').style.display = 'none';
+ document.getElementById(table + '-tsv').style.display = 'none';
+ document.getElementById(table + '-' + tab).style.display = 'block';
+ }
+ </script>
+</head>
+
+<body>
+ $summary_table
+ $perf_html
+ <div class='results-section'>
+ <div class='results-section-title'>Charts</div>
+ <div class='results-section-content'>$chart_divs</div>
+ </div>
+ $full_table
+ $experiment_file
+</body>
+</html>
+""")
+
+# It's a bit ugly that we take some HTML things, and some non-HTML things, but I
+# need to balance prettiness with time spent making things pretty.
+def GenerateHTMLPage(perf_table, chart_js, summary_table, print_table,
+ chart_divs, full_table, experiment_file):
+ """Generates a crosperf HTML page from the given arguments.
+
+ print_table is a two-arg function called like: print_table(t, f)
+ t is one of [summary_table, print_table, full_table]; it's the table we want
+ to format.
+ f is one of ['TSV', 'HTML', 'PLAIN']; it's the type of format we want.
+ """
+ summary_table_html = _GetResultsSectionHTML(print_table, 'Summary Table',
+ summary_table)
+ if perf_table:
+ perf_html = _GetResultsSectionHTML(print_table, 'Perf Table', perf_table)
+ perf_init = "switchTab('perf', 'html')"
+ else:
+ perf_html = ''
+ perf_init = ''
+
+ full_table_html = _GetResultsSectionHTML(print_table, 'Full Table',
+ full_table)
+ experiment_file_html = _GetExperimentFileHTML(experiment_file)
+ return _MainHTML.substitute(perf_init=perf_init, chart_js=chart_js,
+ summary_table=summary_table_html,
+ perf_html=perf_html, chart_divs=chart_divs,
+ full_table=full_table_html,
+ experiment_file=experiment_file_html)
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
new file mode 100755
index 00000000..ed5c74fa
--- /dev/null
+++ b/crosperf/results_report_unittest.py
@@ -0,0 +1,415 @@
+#!/usr/bin/env python2
+#
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unittest for the results reporter."""
+
+from __future__ import division
+from __future__ import print_function
+
+from StringIO import StringIO
+
+import collections
+import mock
+import os
+import test_flag
+import unittest
+
+from benchmark_run import MockBenchmarkRun
+from cros_utils import logger
+from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
+from machine_manager import MockCrosMachine
+from machine_manager import MockMachineManager
+from results_cache import MockResult
+from results_report import BenchmarkResults
+from results_report import HTMLResultsReport
+from results_report import JSONResultsReport
+from results_report import ParseChromeosImage
+from results_report import ParseStandardPerfReport
+from results_report import TextResultsReport
+
+
+class FreeFunctionsTest(unittest.TestCase):
+ """Tests for any free functions in results_report."""
+
+ def testParseChromeosImage(self):
+ # N.B. the cases with blank versions aren't explicitly supported by
+ # ParseChromeosImage. I'm not sure if they need to be supported, but the
+ # goal of this was to capture existing functionality as much as possible.
+ base_case = '/my/chroot/src/build/images/x86-generic/R01-1.0.date-time' \
+ '/chromiumos_test_image.bin'
+ self.assertEqual(ParseChromeosImage(base_case), ('R01-1.0', base_case))
+
+ dir_base_case = os.path.dirname(base_case)
+ self.assertEqual(ParseChromeosImage(dir_base_case), ('', dir_base_case))
+
+ buildbot_case = '/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time' \
+ '/chromiumos_test_image.bin'
+ buildbot_img = buildbot_case.split('/chroot/tmp')[1]
+
+ self.assertEqual(ParseChromeosImage(buildbot_case),
+ ('R02-1.0', buildbot_img))
+ self.assertEqual(ParseChromeosImage(os.path.dirname(buildbot_case)),
+ ('', os.path.dirname(buildbot_img)))
+
+ # Ensure we don't act completely insanely given a few mildly insane paths.
+ fun_case = '/chromiumos_test_image.bin'
+ self.assertEqual(ParseChromeosImage(fun_case), ('', fun_case))
+
+ fun_case2 = 'chromiumos_test_image.bin'
+ self.assertEqual(ParseChromeosImage(fun_case2), ('', fun_case2))
+
+
+# There are many ways for this to be done better, but the linter complains
+# about all of them (that I can think of, at least).
+_fake_path_number = [0]
+def FakePath(ext):
+ """Makes a unique path that shouldn't exist on the host system.
+
+ Each call returns a different path, so if said path finds its way into an
+ error message, it may be easier to track it to its source.
+ """
+ _fake_path_number[0] += 1
+ prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0], )
+ return os.path.join(prefix, ext)
+
+
+def MakeMockExperiment(compiler='gcc'):
+ """Mocks an experiment using the given compiler."""
+ mock_experiment_file = StringIO("""
+ board: x86-alex
+ remote: 127.0.0.1
+ perf_args: record -a -e cycles
+ benchmark: PageCycler {
+ iterations: 3
+ }
+
+ image1 {
+ chromeos_image: %s
+ }
+
+ image2 {
+ remote: 127.0.0.2
+ chromeos_image: %s
+ }
+ """ % (FakePath('cros_image1.bin'), FakePath('cros_image2.bin')))
+ efile = ExperimentFile(mock_experiment_file)
+ experiment = ExperimentFactory().GetExperiment(efile,
+ FakePath('working_directory'),
+ FakePath('log_dir'))
+ for label in experiment.labels:
+ label.compiler = compiler
+ return experiment
+
+
+def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
+ label=None):
+ """Injects successful experiment runs (for each label) into the experiment."""
+ # Defensive copy of keyvals, so if it's modified, we'll know.
+ keyvals = dict(keyvals)
+ num_configs = len(experiment.benchmarks) * len(experiment.labels)
+ num_runs = len(experiment.benchmark_runs) // num_configs
+
+ # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in
+ # benchmark_run_unittest)
+ bench = experiment.benchmarks[for_benchmark]
+ cache_conditions = []
+ log_level = 'average'
+ share_cache = ''
+ locks_dir = ''
+ log = logger.GetLogger()
+ machine_manager = MockMachineManager(FakePath('chromeos_root'), 0,
+ log_level, locks_dir)
+ machine_manager.AddMachine('testing_machine')
+ machine = next(m for m in machine_manager.GetMachines()
+ if m.name == 'testing_machine')
+ for label in experiment.labels:
+ def MakeSuccessfulRun(n):
+ run = MockBenchmarkRun('mock_success%d' % (n, ), bench, label,
+ 1 + n + num_runs, cache_conditions,
+ machine_manager, log, log_level, share_cache)
+ mock_result = MockResult(log, label, log_level, machine)
+ mock_result.keyvals = keyvals
+ run.result = mock_result
+ return run
+
+ experiment.benchmark_runs.extend(MakeSuccessfulRun(n)
+ for n in xrange(how_many))
+ return experiment
+
+
+class TextResultsReportTest(unittest.TestCase):
+ """Tests that the output of a text report contains the things we pass in.
+
+ At the moment, this doesn't care deeply about the format in which said
+ things are displayed. It just cares that they're present.
+ """
+
+ def _checkReport(self, email):
+ num_success = 2
+ success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
+ experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
+ success_keyvals)
+ text_report = TextResultsReport.FromExperiment(experiment, email=email) \
+ .GetReport()
+ self.assertIn(str(success_keyvals['a_float']), text_report)
+ self.assertIn(success_keyvals['machine'], text_report)
+ self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
+ return text_report
+
+
+ def testOutput(self):
+ email_report = self._checkReport(email=True)
+ text_report = self._checkReport(email=False)
+
+ # Ensure that the reports somehow different. Otherwise, having the
+ # distinction is useless.
+ self.assertNotEqual(email_report, text_report)
+
+
+class HTMLResultsReportTest(unittest.TestCase):
+ """Tests that the output of a HTML report contains the things we pass in.
+
+ At the moment, this doesn't care deeply about the format in which said
+ things are displayed. It just cares that they're present.
+ """
+
+ _TestOutput = collections.namedtuple('TestOutput', ['summary_table',
+ 'perf_html',
+ 'chart_js',
+ 'charts',
+ 'full_table',
+ 'experiment_file'])
+
+ @staticmethod
+ def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
+ chart_divs, full_table, experiment_file):
+ # N.B. Currently we don't check chart_js; it's just passed through because
+ # cros lint complains otherwise.
+ summary_table = print_table(summary_table, 'HTML')
+ perf_html = print_table(perf_table, 'HTML')
+ full_table = print_table(full_table, 'HTML')
+ return HTMLResultsReportTest._TestOutput(summary_table=summary_table,
+ perf_html=perf_html,
+ chart_js=chart_js,
+ charts=chart_divs,
+ full_table=full_table,
+ experiment_file=experiment_file)
+
+ def _GetOutput(self, experiment=None, benchmark_results=None):
+ with mock.patch('results_report_templates.GenerateHTMLPage') as standin:
+ if experiment is not None:
+ HTMLResultsReport.FromExperiment(experiment).GetReport()
+ else:
+ HTMLResultsReport(benchmark_results).GetReport()
+ mod_mock = standin
+ self.assertEquals(mod_mock.call_count, 1)
+ # call_args[0] is positional args, call_args[1] is kwargs.
+ self.assertEquals(mod_mock.call_args[0], tuple())
+ fmt_args = mod_mock.call_args[1]
+ return self._GetTestOutput(**fmt_args)
+
+ def testNoSuccessOutput(self):
+ output = self._GetOutput(MakeMockExperiment())
+ self.assertIn('no result', output.summary_table)
+ self.assertIn('no result', output.full_table)
+ self.assertEqual(output.charts, '')
+ self.assertNotEqual(output.experiment_file, '')
+
+ def testSuccessfulOutput(self):
+ num_success = 2
+ success_keyvals = {'retval': 0, 'a_float': 3.96}
+ output = self._GetOutput(_InjectSuccesses(MakeMockExperiment(), num_success,
+ success_keyvals))
+
+ self.assertNotIn('no result', output.summary_table)
+ #self.assertIn(success_keyvals['machine'], output.summary_table)
+ self.assertIn('a_float', output.summary_table)
+ self.assertIn(str(success_keyvals['a_float']), output.summary_table)
+ self.assertIn('a_float', output.full_table)
+ # The _ in a_float is filtered out when we're generating HTML.
+ self.assertIn('afloat', output.charts)
+ # And make sure we have our experiment file...
+ self.assertNotEqual(output.experiment_file, '')
+
+ def testBenchmarkResultFailure(self):
+ labels = ['label1']
+ benchmark_names_and_iterations = [('bench1', 1)]
+ benchmark_keyvals = {'bench1': [[]]}
+ results = BenchmarkResults(labels, benchmark_names_and_iterations,
+ benchmark_keyvals)
+ output = self._GetOutput(benchmark_results=results)
+ self.assertIn('no result', output.summary_table)
+ self.assertEqual(output.charts, '')
+ self.assertEqual(output.experiment_file, '')
+
+ def testBenchmarkResultSuccess(self):
+ labels = ['label1']
+ benchmark_names_and_iterations = [('bench1', 1)]
+ benchmark_keyvals = {'bench1': [[{'retval': 1, 'foo': 2.0}]]}
+ results = BenchmarkResults(labels, benchmark_names_and_iterations,
+ benchmark_keyvals)
+ output = self._GetOutput(benchmark_results=results)
+ self.assertNotIn('no result', output.summary_table)
+ self.assertIn('bench1', output.summary_table)
+ self.assertIn('bench1', output.full_table)
+ self.assertNotEqual(output.charts, '')
+ self.assertEqual(output.experiment_file, '')
+
+
+class JSONResultsReportTest(unittest.TestCase):
+ """Tests JSONResultsReport."""
+
+ REQUIRED_REPORT_KEYS = ('date', 'time', 'label', 'test_name', 'pass')
+ EXPERIMENT_REPORT_KEYS = ('board', 'chromeos_image', 'chromeos_version',
+ 'chrome_version', 'compiler')
+
+ @staticmethod
+ def _GetRequiredKeys(is_experiment):
+ required_keys = JSONResultsReportTest.REQUIRED_REPORT_KEYS
+ if is_experiment:
+ required_keys += JSONResultsReportTest.EXPERIMENT_REPORT_KEYS
+ return required_keys
+
+ def _CheckRequiredKeys(self, test_output, is_experiment):
+ required_keys = self._GetRequiredKeys(is_experiment)
+ for output in test_output:
+ for key in required_keys:
+ self.assertIn(key, output)
+
+ def testAllFailedJSONReportOutput(self):
+ experiment = MakeMockExperiment()
+ results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=True)
+ # Nothing succeeded; we don't send anything more than what's required.
+ required_keys = self._GetRequiredKeys(is_experiment=True)
+ for result in results:
+ self.assertItemsEqual(result.iterkeys(), required_keys)
+
+ def testJSONReportOutputWithSuccesses(self):
+ success_keyvals = {
+ 'retval': 0,
+ 'a_float': '2.3',
+ 'many_floats': [['1.0', '2.0'], ['3.0']],
+ 'machine': "i'm a pirate"
+ }
+
+ # 2 is arbitrary.
+ num_success = 2
+ experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
+ success_keyvals)
+ results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=True)
+
+ num_passes = num_success * len(experiment.labels)
+ non_failures = [r for r in results if r['pass']]
+ self.assertEqual(num_passes, len(non_failures))
+
+ # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped?
+ expected_detailed = {'a_float': 2.3, 'many_floats': [1.0, 2.0]}
+ for pass_ in non_failures:
+ self.assertIn('detailed_results', pass_)
+ self.assertDictEqual(expected_detailed, pass_['detailed_results'])
+ self.assertIn('machine', pass_)
+ self.assertEqual(success_keyvals['machine'], pass_['machine'])
+
+ def testFailedJSONReportOutputWithoutExperiment(self):
+ labels = ['label1']
+ benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2),
+ ('bench3', 1), ('bench4', 0)]
+ benchmark_keyvals = {
+ 'bench1': [[{'retval': 1, 'foo': 2.0}]],
+ 'bench2': [[{'retval': 1, 'foo': 4.0}, {'retval': -1, 'bar': 999}]],
+ # lack of retval is considered a failure.
+ 'bench3': [[{}]],
+ 'bench4': [[]]
+ }
+ bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
+ benchmark_keyvals)
+ results = JSONResultsReport(bench_results).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=False)
+ self.assertFalse(any(r['pass'] for r in results))
+
+ def testJSONGetReportObeysJSONSettings(self):
+ labels = ['label1']
+ benchmark_names_and_iterations = [('bench1', 1)]
+ # These can be anything, really. So long as they're distinctive.
+ separators = (',\t\n\t', ':\t\n\t')
+ benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]}
+ bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
+ benchmark_keyvals)
+ reporter = JSONResultsReport(bench_results,
+ json_args={'separators': separators})
+ result_str = reporter.GetReport()
+ self.assertIn(separators[0], result_str)
+ self.assertIn(separators[1], result_str)
+
+ def testSuccessfulJSONReportOutputWithoutExperiment(self):
+ labels = ['label1']
+ benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)]
+ benchmark_keyvals = {
+ 'bench1': [[{'retval': 0, 'foo': 2.0}]],
+ 'bench2': [[{'retval': 0, 'foo': 4.0}, {'retval': 0, 'bar': 999}]]
+ }
+ bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
+ benchmark_keyvals)
+ results = JSONResultsReport(bench_results).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=False)
+ self.assertTrue(all(r['pass'] for r in results))
+ # Enforce that the results have *some* deterministic order.
+ keyfn = lambda r: (r['test_name'], r['detailed_results'].get('foo', 5.0))
+ sorted_results = sorted(results, key=keyfn)
+ detailed_results = [r['detailed_results'] for r in sorted_results]
+ bench1, bench2_foo, bench2_bar = detailed_results
+ self.assertEqual(bench1['foo'], 2.0)
+ self.assertEqual(bench2_foo['foo'], 4.0)
+ self.assertEqual(bench2_bar['bar'], 999)
+ self.assertNotIn('bar', bench1)
+ self.assertNotIn('bar', bench2_foo)
+ self.assertNotIn('foo', bench2_bar)
+
+
+class PerfReportParserTest(unittest.TestCase):
+ """Tests for the perf report parser in results_report."""
+ @staticmethod
+ def _ReadRealPerfReport():
+ my_dir = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(my_dir, 'perf_files/perf.data.report.0')) as f:
+ return f.read()
+
+ def testParserParsesRealWorldPerfReport(self):
+ report = ParseStandardPerfReport(self._ReadRealPerfReport())
+ self.assertItemsEqual(['cycles', 'instructions'], report.keys())
+
+ # Arbitrarily selected known percentages from the perf report.
+ known_cycles_percentages = {
+ '0xffffffffa4a1f1c9': 0.66,
+ '0x0000115bb7ba9b54': 0.47,
+ '0x0000000000082e08': 0.00,
+ '0xffffffffa4a13e63': 0.00,
+ }
+ report_cycles = report['cycles']
+ self.assertEqual(len(report_cycles), 214)
+ for k, v in known_cycles_percentages.iteritems():
+ self.assertIn(k, report_cycles)
+ self.assertEqual(v, report_cycles[k])
+
+ known_instrunctions_percentages = {
+ '0x0000115bb6c35d7a': 1.65,
+ '0x0000115bb7ba9b54': 0.67,
+ '0x0000000000024f56': 0.00,
+ '0xffffffffa4a0ee03': 0.00,
+ }
+ report_instructions = report['instructions']
+ self.assertEqual(len(report_instructions), 492)
+ for k, v in known_instrunctions_percentages.iteritems():
+ self.assertIn(k, report_instructions)
+ self.assertEqual(v, report_instructions[k])
+
+
+if __name__ == '__main__':
+ test_flag.SetTestMode(True)
+ unittest.main()
diff --git a/crosperf/run_tests.sh b/crosperf/run_tests.sh
new file mode 100755
index 00000000..78a2b9fd
--- /dev/null
+++ b/crosperf/run_tests.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+# Author: raymes@google.com (Raymes Khoury)
+
+# Make sure the base toolchain-utils directory is in our PYTHONPATH before
+# trying to run this script.
+export PYTHONPATH+=":.."
+
+num_tests=0
+num_failed=0
+
+for test in $(find -name \*test.py); do
+ echo RUNNING: ${test}
+ ((num_tests++))
+ if ! ./${test} ; then
+ echo
+ echo "*** Test Failed! (${test}) ***"
+ echo
+ ((num_failed++))
+ fi
+done
+
+echo
+
+if [ ${num_failed} -eq 0 ] ; then
+ echo "ALL TESTS PASSED (${num_tests} ran)"
+ exit 0
+fi
+
+echo "${num_failed} TESTS FAILED (out of ${num_tests})"
+exit 1
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
new file mode 100644
index 00000000..90fe83a3
--- /dev/null
+++ b/crosperf/schedv2.py
@@ -0,0 +1,439 @@
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Module to optimize the scheduling of benchmark_run tasks."""
+
+
+from __future__ import print_function
+
+import sys
+import test_flag
+import traceback
+
+from collections import defaultdict
+from machine_image_manager import MachineImageManager
+from threading import Lock
+from threading import Thread
+from cros_utils import command_executer
+from cros_utils import logger
+
+
+class DutWorker(Thread):
+ """Working thread for a dut."""
+
+ def __init__(self, dut, sched):
+ super(DutWorker, self).__init__(name='DutWorker-{}'.format(dut.name))
+ self._dut = dut
+ self._sched = sched
+ self._stat_num_br_run = 0
+ self._stat_num_reimage = 0
+ self._stat_annotation = ''
+ self._logger = logger.GetLogger(self._sched.get_experiment().log_dir)
+ self.daemon = True
+ self._terminated = False
+ self._active_br = None
+ # Race condition accessing _active_br between _execute_benchmark_run and
+ # _terminate, so lock it up.
+ self._active_br_lock = Lock()
+
+ def terminate(self):
+ self._terminated = True
+ with self._active_br_lock:
+ if self._active_br is not None:
+ # BenchmarkRun.Terminate() terminates any running testcase via
+ # suite_runner.Terminate and updates timeline.
+ self._active_br.Terminate()
+
+ def run(self):
+ """Do the "run-test->(optionally reimage)->run-test" chore.
+
+ Note - 'br' below means 'benchmark_run'.
+ """
+
+ # Firstly, handle benchmarkruns that have cache hit.
+ br = self._sched.get_cached_benchmark_run()
+ while br:
+ try:
+ self._stat_annotation = 'finishing cached {}'.format(br)
+ br.run()
+ except RuntimeError:
+ traceback.print_exc(file=sys.stdout)
+ br = self._sched.get_cached_benchmark_run()
+
+ # Secondly, handle benchmarkruns that needs to be run on dut.
+ self._setup_dut_label()
+ try:
+ self._logger.LogOutput('{} started.'.format(self))
+ while not self._terminated:
+ br = self._sched.get_benchmark_run(self._dut)
+ if br is None:
+ # No br left for this label. Considering reimaging.
+ label = self._sched.allocate_label(self._dut)
+ if label is None:
+ # No br even for other labels. We are done.
+ self._logger.LogOutput('ImageManager found no label '
+ 'for dut, stopping working '
+ 'thread {}.'.format(self))
+ break
+ if self._reimage(label):
+ # Reimage to run other br fails, dut is doomed, stop
+ # this thread.
+ self._logger.LogWarning('Re-image failed, dut '
+ 'in an unstable state, stopping '
+ 'working thread {}.'.format(self))
+ break
+ else:
+ # Execute the br.
+ self._execute_benchmark_run(br)
+ finally:
+ self._stat_annotation = 'finished'
+ # Thread finishes. Notify scheduler that I'm done.
+ self._sched.dut_worker_finished(self)
+
+ def _reimage(self, label):
+ """Reimage image to label.
+
+ Args:
+ label: the label to remimage onto dut.
+
+ Returns:
+ 0 if successful, otherwise 1.
+ """
+
+ # Termination could happen anywhere, check it.
+ if self._terminated:
+ return 1
+
+ self._logger.LogOutput('Reimaging {} using {}'.format(self, label))
+ self._stat_num_reimage += 1
+ self._stat_annotation = 'reimaging using "{}"'.format(label.name)
+ try:
+ # Note, only 1 reimage at any given time, this is guaranteed in
+ # ImageMachine, so no sync needed below.
+ retval = self._sched.get_experiment().machine_manager.ImageMachine(
+ self._dut,
+ label)
+
+ if retval:
+ return 1
+ except RuntimeError:
+ return 1
+
+ self._dut.label = label
+ return 0
+
+ def _execute_benchmark_run(self, br):
+ """Execute a single benchmark_run.
+
+ Note - this function never throws exceptions.
+ """
+
+ # Termination could happen anywhere, check it.
+ if self._terminated:
+ return
+
+ self._logger.LogOutput('{} started working on {}'.format(self, br))
+ self._stat_num_br_run += 1
+ self._stat_annotation = 'executing {}'.format(br)
+ # benchmark_run.run does not throws, but just play it safe here.
+ try:
+ assert br.owner_thread is None
+ br.owner_thread = self
+ with self._active_br_lock:
+ self._active_br = br
+ br.run()
+ finally:
+ self._sched.get_experiment().BenchmarkRunFinished(br)
+ with self._active_br_lock:
+ self._active_br = None
+
+ def _setup_dut_label(self):
+ """Try to match dut image with a certain experiment label.
+
+ If such match is found, we just skip doing reimage and jump to execute
+ some benchmark_runs.
+ """
+
+ checksum_file = '/usr/local/osimage_checksum_file'
+ try:
+ rv, checksum, _ = command_executer.GetCommandExecuter().\
+ CrosRunCommandWOutput(
+ 'cat ' + checksum_file,
+ chromeos_root=self._sched.get_labels(0).chromeos_root,
+ machine=self._dut.name,
+ print_to_console=False)
+ if rv == 0:
+ checksum = checksum.strip()
+ for l in self._sched.get_labels():
+ if l.checksum == checksum:
+ self._logger.LogOutput("Dut '{}' is pre-installed with '{}'".format(
+ self._dut.name, l))
+ self._dut.label = l
+ return
+ except RuntimeError:
+ traceback.print_exc(file=sys.stdout)
+ self._dut.label = None
+
+ def __str__(self):
+ return 'DutWorker[dut="{}", label="{}"]'.format(
+ self._dut.name, self._dut.label.name if self._dut.label else 'None')
+
+ def dut(self):
+ return self._dut
+
+ def status_str(self):
+ """Report thread status."""
+
+ return ('Worker thread "{}", label="{}", benchmark_run={}, '
+ 'reimage={}, now {}'.format(
+ self._dut.name, 'None' if self._dut.label is None else
+ self._dut.label.name, self._stat_num_br_run,
+ self._stat_num_reimage, self._stat_annotation))
+
+
+class BenchmarkRunCacheReader(Thread):
+ """The thread to read cache for a list of benchmark_runs.
+
+ On creation, each instance of this class is given a br_list, which is a
+ subset of experiment._benchmark_runs.
+ """
+
+ def __init__(self, schedv2, br_list):
+ super(BenchmarkRunCacheReader, self).__init__()
+ self._schedv2 = schedv2
+ self._br_list = br_list
+ self._logger = self._schedv2.get_logger()
+
+ def run(self):
+ for br in self._br_list:
+ try:
+ br.ReadCache()
+ if br.cache_hit:
+ self._logger.LogOutput('Cache hit - {}'.format(br))
+ with self._schedv2.lock_on('_cached_br_list'):
+ self._schedv2.get_cached_run_list().append(br)
+ else:
+ self._logger.LogOutput('Cache not hit - {}'.format(br))
+ except RuntimeError:
+ traceback.print_exc(file=sys.stderr)
+
+
+class Schedv2(object):
+ """New scheduler for crosperf."""
+
+ def __init__(self, experiment):
+ self._experiment = experiment
+ self._logger = logger.GetLogger(experiment.log_dir)
+
+ # Create shortcuts to nested data structure. "_duts" points to a list of
+ # locked machines. _labels points to a list of all labels.
+ self._duts = self._experiment.machine_manager.GetMachines()
+ self._labels = self._experiment.labels
+
+ # Bookkeeping for synchronization.
+ self._workers_lock = Lock()
+ # pylint: disable=unnecessary-lambda
+ self._lock_map = defaultdict(lambda: Lock())
+
+ # Test mode flag
+ self._in_test_mode = test_flag.GetTestMode()
+
+ # Read benchmarkrun cache.
+ self._read_br_cache()
+
+ # Mapping from label to a list of benchmark_runs.
+ self._label_brl_map = dict((l, []) for l in self._labels)
+ for br in self._experiment.benchmark_runs:
+ assert br.label in self._label_brl_map
+ # Only put no-cache-hit br into the map.
+ if br not in self._cached_br_list:
+ self._label_brl_map[br.label].append(br)
+
+ # Use machine image manager to calculate initial label allocation.
+ self._mim = MachineImageManager(self._labels, self._duts)
+ self._mim.compute_initial_allocation()
+
+ # Create worker thread, 1 per dut.
+ self._active_workers = [DutWorker(dut, self) for dut in self._duts]
+ self._finished_workers = []
+
+ # Termination flag.
+ self._terminated = False
+
+ def run_sched(self):
+ """Start all dut worker threads and return immediately."""
+
+ for w in self._active_workers:
+ w.start()
+
+ def _read_br_cache(self):
+ """Use multi-threading to read cache for all benchmarkruns.
+
+ We do this by firstly creating a few threads, and then assign each
+ thread a segment of all brs. Each thread will check cache status for
+ each br and put those with cache into '_cached_br_list'.
+ """
+
+ self._cached_br_list = []
+ n_benchmarkruns = len(self._experiment.benchmark_runs)
+ if n_benchmarkruns <= 4:
+ # Use single thread to read cache.
+ self._logger.LogOutput(('Starting to read cache status for '
+ '{} benchmark runs ...').format(n_benchmarkruns))
+ BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run()
+ return
+
+ # Split benchmarkruns set into segments. Each segment will be handled by
+ # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
+ n_threads = max(2, min(20, (n_benchmarkruns + 3) / 4))
+ self._logger.LogOutput(('Starting {} threads to read cache status for '
+ '{} benchmark runs ...').format(n_threads,
+ n_benchmarkruns))
+ benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) / n_threads
+ benchmarkrun_segments = []
+ for i in range(n_threads - 1):
+ start = i * benchmarkruns_per_thread
+ end = (i + 1) * benchmarkruns_per_thread
+ benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end])
+ benchmarkrun_segments.append(self._experiment.benchmark_runs[
+ (n_threads - 1) * benchmarkruns_per_thread:])
+
+ # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
+ assert sum(len(x) for x in benchmarkrun_segments) == n_benchmarkruns
+
+ # Create and start all readers.
+ cache_readers = [
+ BenchmarkRunCacheReader(self, x) for x in benchmarkrun_segments
+ ]
+
+ for x in cache_readers:
+ x.start()
+
+ # Wait till all readers finish.
+ for x in cache_readers:
+ x.join()
+
+ # Summarize.
+ self._logger.LogOutput(
+ 'Total {} cache hit out of {} benchmark_runs.'.format(
+ len(self._cached_br_list), n_benchmarkruns))
+
+ def get_cached_run_list(self):
+ return self._cached_br_list
+
+ def get_label_map(self):
+ return self._label_brl_map
+
+ def get_experiment(self):
+ return self._experiment
+
+ def get_labels(self, i=None):
+ if i == None:
+ return self._labels
+ return self._labels[i]
+
+ def get_logger(self):
+ return self._logger
+
+ def get_cached_benchmark_run(self):
+ """Get a benchmark_run with 'cache hit'.
+
+ Returns:
+ The benchmark that has cache hit, if any. Otherwise none.
+ """
+
+ with self.lock_on('_cached_br_list'):
+ if self._cached_br_list:
+ return self._cached_br_list.pop()
+ return None
+
+ def get_benchmark_run(self, dut):
+ """Get a benchmark_run (br) object for a certain dut.
+
+ Args:
+ dut: the dut for which a br is returned.
+
+ Returns:
+ A br with its label matching that of the dut. If no such br could be
+ found, return None (this usually means a reimage is required for the
+ dut).
+ """
+
+ # If terminated, stop providing any br.
+ if self._terminated:
+ return None
+
+ # If dut bears an unrecognized label, return None.
+ if dut.label is None:
+ return None
+
+ # If br list for the dut's label is empty (that means all brs for this
+ # label have been done), return None.
+ with self.lock_on(dut.label):
+ brl = self._label_brl_map[dut.label]
+ if not brl:
+ return None
+ # Return the first br.
+ return brl.pop(0)
+
+ def allocate_label(self, dut):
+ """Allocate a label to a dut.
+
+ The work is delegated to MachineImageManager.
+
+ The dut_worker calling this method is responsible for reimage the dut to
+ this label.
+
+ Args:
+ dut: the new label that is to be reimaged onto the dut.
+
+ Returns:
+ The label or None.
+ """
+
+ if self._terminated:
+ return None
+
+ return self._mim.allocate(dut, self)
+
+ def dut_worker_finished(self, dut_worker):
+ """Notify schedv2 that the dut_worker thread finished.
+
+ Args:
+ dut_worker: the thread that is about to end.
+ """
+
+ self._logger.LogOutput('{} finished.'.format(dut_worker))
+ with self._workers_lock:
+ self._active_workers.remove(dut_worker)
+ self._finished_workers.append(dut_worker)
+
+ def is_complete(self):
+ return len(self._active_workers) == 0
+
+ def lock_on(self, my_object):
+ return self._lock_map[my_object]
+
+ def terminate(self):
+ """Mark flag so we stop providing br/reimages.
+
+ Also terminate each DutWorker, so they refuse to execute br or reimage.
+ """
+
+ self._terminated = True
+ for dut_worker in self._active_workers:
+ dut_worker.terminate()
+
+ def threads_status_as_string(self):
+ """Report the dut worker threads status."""
+
+ status = '{} active threads, {} finished threads.\n'.format(
+ len(self._active_workers), len(self._finished_workers))
+ status += ' Active threads:'
+ for dw in self._active_workers:
+ status += '\n ' + dw.status_str()
+ if self._finished_workers:
+ status += '\n Finished threads:'
+ for dw in self._finished_workers:
+ status += '\n ' + dw.status_str()
+ return status
diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py
new file mode 100755
index 00000000..be0fde4b
--- /dev/null
+++ b/crosperf/schedv2_unittest.py
@@ -0,0 +1,221 @@
+#!/usr/bin/env python2
+
+# Copyright 2015 Google Inc. All Rights Reserved.
+"""This contains the unit tests for the new Crosperf task scheduler."""
+
+from __future__ import print_function
+
+import mock
+import unittest
+import StringIO
+
+import benchmark_run
+import test_flag
+from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
+from cros_utils.command_executer import CommandExecuter
+from experiment_runner_unittest import FakeLogger
+from schedv2 import Schedv2
+
+EXPERIMENT_FILE_1 = """\
+board: daisy
+remote: chromeos-daisy1.cros chromeos-daisy2.cros
+
+benchmark: kraken {
+ suite: telemetry_Crosperf
+ iterations: 3
+}
+
+image1 {
+ chromeos_image: /chromeos/src/build/images/daisy/latest/cros_image1.bin
+ remote: chromeos-daisy3.cros
+}
+
+image2 {
+ chromeos_image: /chromeos/src/build/imaages/daisy/latest/cros_image2.bin
+ remote: chromeos-daisy4.cros chromeos-daisy5.cros
+}
+"""
+
+EXPERIMENT_FILE_WITH_FORMAT = """\
+board: daisy
+remote: chromeos-daisy1.cros chromeos-daisy2.cros
+
+benchmark: kraken {{
+ suite: telemetry_Crosperf
+ iterations: {kraken_iterations}
+}}
+
+image1 {{
+ chromeos_image: /chromeos/src/build/images/daisy/latest/cros_image1.bin
+ remote: chromeos-daisy3.cros
+}}
+
+image2 {{
+ chromeos_image: /chromeos/src/build/imaages/daisy/latest/cros_image2.bin
+ remote: chromeos-daisy4.cros chromeos-daisy5.cros
+}}
+"""
+
+
+class Schedv2Test(unittest.TestCase):
+ """Class for setting up and running the unit tests."""
+
+ def setUp(self):
+ self.exp = None
+
+ mock_logger = FakeLogger()
+ mock_cmd_exec = mock.Mock(spec=CommandExecuter)
+
+ @mock.patch('benchmark_run.BenchmarkRun', new=benchmark_run.MockBenchmarkRun)
+ def _make_fake_experiment(self, expstr):
+ """Create fake experiment from string.
+
+ Note - we mock out BenchmarkRun in this step.
+ """
+ experiment_file = ExperimentFile(StringIO.StringIO(expstr))
+ experiment = ExperimentFactory().GetExperiment(experiment_file,
+ working_directory='',
+ log_dir='')
+ return experiment
+
+ def test_remote(self):
+ """Test that remotes in labels are aggregated into experiment.remote."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
+ self.exp.log_level = 'verbose'
+ my_schedv2 = Schedv2(self.exp)
+ self.assertFalse(my_schedv2.is_complete())
+ self.assertIn('chromeos-daisy1.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy2.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy3.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy4.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy5.cros', self.exp.remote)
+
+ def test_unreachable_remote(self):
+ """Test unreachable remotes are removed from experiment and label."""
+
+ def MockIsReachable(cm):
+ return (cm.name != 'chromeos-daisy3.cros' and
+ cm.name != 'chromeos-daisy5.cros')
+
+ with mock.patch('machine_manager.MockCrosMachine.IsReachable',
+ new=MockIsReachable):
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
+ self.assertIn('chromeos-daisy1.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy2.cros', self.exp.remote)
+ self.assertNotIn('chromeos-daisy3.cros', self.exp.remote)
+ self.assertIn('chromeos-daisy4.cros', self.exp.remote)
+ self.assertNotIn('chromeos-daisy5.cros', self.exp.remote)
+
+ for l in self.exp.labels:
+ if l.name == 'image2':
+ self.assertNotIn('chromeos-daisy5.cros', l.remote)
+ self.assertIn('chromeos-daisy4.cros', l.remote)
+ elif l.name == 'image1':
+ self.assertNotIn('chromeos-daisy3.cros', l.remote)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_1(self, reader):
+ """Test benchmarkrun set is split into 5 segments."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=9))
+ my_schedv2 = Schedv2(self.exp)
+ self.assertFalse(my_schedv2.is_complete())
+ # We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4,
+ # 4, 2 brs respectively.
+ # Assert that BenchmarkRunCacheReader() is called 5 times.
+ self.assertEquals(reader.call_count, 5)
+ # reader.call_args_list[n] - nth call.
+ # reader.call_args_list[n][0] - positioned args in nth call.
+ # reader.call_args_list[n][0][1] - the 2nd arg in nth call,
+ # that is 'br_list' in 'schedv2.BenchmarkRunCacheReader'.
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[1][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[2][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[3][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[4][0][1]), 2)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_2(self, reader):
+ """Test benchmarkrun set is split into 4 segments."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=8))
+ my_schedv2 = Schedv2(self.exp)
+ self.assertFalse(my_schedv2.is_complete())
+ # We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs.
+ self.assertEquals(reader.call_count, 4)
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[1][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[2][0][1]), 4)
+ self.assertEquals(len(reader.call_args_list[3][0][1]), 4)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_3(self, reader):
+ """Test benchmarkrun set is split into 2 segments."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=3))
+ my_schedv2 = Schedv2(self.exp)
+ self.assertFalse(my_schedv2.is_complete())
+ # We have 3 * 2 == 6 brs, we use 2 threads.
+ self.assertEquals(reader.call_count, 2)
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 3)
+ self.assertEquals(len(reader.call_args_list[1][0][1]), 3)
+
+ @mock.patch('schedv2.BenchmarkRunCacheReader')
+ def test_BenchmarkRunCacheReader_4(self, reader):
+ """Test benchmarkrun set is not splitted."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=1))
+ my_schedv2 = Schedv2(self.exp)
+ self.assertFalse(my_schedv2.is_complete())
+ # We have 1 * 2 == 2 br, so only 1 instance.
+ self.assertEquals(reader.call_count, 1)
+ self.assertEquals(len(reader.call_args_list[0][0][1]), 2)
+
+ def test_cachehit(self):
+ """Test cache-hit and none-cache-hit brs are properly organized."""
+
+ def MockReadCache(br):
+ br.cache_hit = (br.label.name == 'image2')
+
+ with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
+ new=MockReadCache):
+ # We have 2 * 30 brs, half of which are put into _cached_br_list.
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=30))
+ my_schedv2 = Schedv2(self.exp)
+ self.assertEquals(len(my_schedv2.get_cached_run_list()), 30)
+ # The non-cache-hit brs are put into Schedv2._label_brl_map.
+ self.assertEquals(
+ reduce(lambda a, x: a + len(x[1]),
+ my_schedv2.get_label_map().iteritems(),
+ 0), 30)
+
+ def test_nocachehit(self):
+ """Test no cache-hit."""
+
+ def MockReadCache(br):
+ br.cache_hit = False
+
+ with mock.patch('benchmark_run.MockBenchmarkRun.ReadCache',
+ new=MockReadCache):
+ # We have 2 * 30 brs, none of which are put into _cached_br_list.
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_WITH_FORMAT.format(
+ kraken_iterations=30))
+ my_schedv2 = Schedv2(self.exp)
+ self.assertEquals(len(my_schedv2.get_cached_run_list()), 0)
+ # The non-cache-hit brs are put into Schedv2._label_brl_map.
+ self.assertEquals(
+ reduce(lambda a, x: a + len(x[1]),
+ my_schedv2.get_label_map().iteritems(),
+ 0), 60)
+
+
+if __name__ == '__main__':
+ test_flag.SetTestMode(True)
+ unittest.main()
diff --git a/crosperf/settings.py b/crosperf/settings.py
new file mode 100644
index 00000000..8d5a25fd
--- /dev/null
+++ b/crosperf/settings.py
@@ -0,0 +1,81 @@
+# Copyright 2011 Google Inc. All Rights Reserved.
+"""Module to get the settings from experiment file."""
+
+from __future__ import print_function
+
+from cros_utils import logger
+from cros_utils import misc
+from download_images import ImageDownloader
+
+
+class Settings(object):
+ """Class representing settings (a set of fields) from an experiment file."""
+
+ def __init__(self, name, settings_type):
+ self.name = name
+ self.settings_type = settings_type
+ self.fields = {}
+ self.parent = None
+
+ def SetParentSettings(self, settings):
+ """Set the parent settings which these settings can inherit from."""
+ self.parent = settings
+
+ def AddField(self, field):
+ name = field.name
+ if name in self.fields:
+ raise SyntaxError('Field %s defined previously.' % name)
+ self.fields[name] = field
+
+ def SetField(self, name, value, append=False):
+ if name not in self.fields:
+ raise SyntaxError("'%s' is not a valid field in '%s' settings" %
+ (name, self.settings_type))
+ if append:
+ self.fields[name].Append(value)
+ else:
+ self.fields[name].Set(value)
+
+ def GetField(self, name):
+ """Get the value of a field with a given name."""
+ if name not in self.fields:
+ raise SyntaxError("Field '%s' not a valid field in '%s' settings." %
+ (name, self.name))
+ field = self.fields[name]
+ if not field.assigned and field.required:
+ raise SyntaxError("Required field '%s' not defined in '%s' settings." %
+ (name, self.name))
+ return self.fields[name].Get()
+
+ def Inherit(self):
+ """Inherit any unset values from the parent settings."""
+ for name in self.fields:
+ if (not self.fields[name].assigned and self.parent and
+ name in self.parent.fields and self.parent.fields[name].assigned):
+ self.fields[name].Set(self.parent.GetField(name), parse=False)
+
+ def Override(self, settings):
+ """Override settings with settings from a different object."""
+ for name in settings.fields:
+ if name in self.fields and settings.fields[name].assigned:
+ self.fields[name].Set(settings.GetField(name), parse=False)
+
+ def Validate(self):
+ """Check that all required fields have been set."""
+ for name in self.fields:
+ if not self.fields[name].assigned and self.fields[name].required:
+ raise SyntaxError('Field %s is invalid.' % name)
+
+ def GetXbuddyPath(self, path_str, autotest_path, board, chromeos_root,
+ log_level):
+ prefix = 'remote'
+ l = logger.GetLogger()
+ if (path_str.find('trybot') < 0 and path_str.find('toolchain') < 0 and
+ path_str.find(board) < 0):
+ xbuddy_path = '%s/%s/%s' % (prefix, board, path_str)
+ else:
+ xbuddy_path = '%s/%s' % (prefix, path_str)
+ image_downloader = ImageDownloader(l, log_level)
+ image_and_autotest_path = image_downloader.Run(
+ misc.CanonicalizePath(chromeos_root), xbuddy_path, autotest_path)
+ return image_and_autotest_path
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
new file mode 100644
index 00000000..e42d82a9
--- /dev/null
+++ b/crosperf/settings_factory.py
@@ -0,0 +1,304 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Setting files for global, benchmark and labels."""
+
+from __future__ import print_function
+
+from field import BooleanField
+from field import IntegerField
+from field import ListField
+from field import TextField
+from settings import Settings
+
+
+class BenchmarkSettings(Settings):
+ """Settings used to configure individual benchmarks."""
+
+ def __init__(self, name):
+ super(BenchmarkSettings, self).__init__(name, 'benchmark')
+ self.AddField(
+ TextField(
+ 'test_name',
+ description='The name of the test to run. '
+ 'Defaults to the name of the benchmark.'))
+ self.AddField(
+ TextField(
+ 'test_args', description='Arguments to be passed to the '
+ 'test.'))
+ self.AddField(
+ IntegerField(
+ 'iterations',
+ default=1,
+ description='Number of iterations to run the '
+ 'test.'))
+ self.AddField(
+ TextField(
+ 'suite', default='', description='The type of the benchmark.'))
+ self.AddField(
+ IntegerField(
+ 'retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
+ self.AddField(
+ BooleanField(
+ 'run_local',
+ description='Run benchmark harness on the DUT. '
+ 'Currently only compatible with the suite: '
+ 'telemetry_Crosperf.',
+ required=False,
+ default=True))
+
+
+class LabelSettings(Settings):
+ """Settings for each label."""
+
+ def __init__(self, name):
+ super(LabelSettings, self).__init__(name, 'label')
+ self.AddField(
+ TextField(
+ 'chromeos_image',
+ required=False,
+ description='The path to the image to run tests '
+ 'on, for local/custom-built images. See the '
+ "'build' option for official or trybot images."))
+ self.AddField(
+ TextField(
+ 'autotest_path',
+ required=False,
+ description='Autotest directory path relative to chroot which '
+ 'has autotest files for the image to run tests requiring autotest files'
+ ))
+ self.AddField(
+ TextField(
+ 'chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ ListField(
+ 'remote',
+ description='A comma-separated list of IPs of chromeos'
+ 'devices to run experiments on.'))
+ self.AddField(
+ TextField(
+ 'image_args',
+ required=False,
+ default='',
+ description='Extra arguments to pass to '
+ 'image_chromeos.py.'))
+ self.AddField(
+ TextField(
+ 'cache_dir',
+ default='',
+ description='The cache dir for this image.'))
+ self.AddField(
+ TextField(
+ 'compiler',
+ default='gcc',
+ description='The compiler used to build the '
+ 'ChromeOS image (gcc or llvm).'))
+ self.AddField(
+ TextField(
+ 'chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(
+ TextField(
+ 'build',
+ description='The xbuddy specification for an '
+ 'official or trybot image to use for tests. '
+ "'/remote' is assumed, and the board is given "
+ "elsewhere, so omit the '/remote/<board>/' xbuddy "
+ 'prefix.',
+ required=False,
+ default=''))
+
+
+class GlobalSettings(Settings):
+ """Settings that apply per-experiment."""
+
+ def __init__(self, name):
+ super(GlobalSettings, self).__init__(name, 'global')
+ self.AddField(
+ TextField(
+ 'name',
+ description='The name of the experiment. Just an '
+ 'identifier.'))
+ self.AddField(
+ TextField(
+ 'board',
+ description='The target board for running '
+ 'experiments on, e.g. x86-alex.'))
+ self.AddField(
+ ListField(
+ 'remote',
+ description='A comma-separated list of IPs of '
+ 'chromeos devices to run experiments on.'))
+ self.AddField(
+ BooleanField(
+ 'rerun_if_failed',
+ description='Whether to re-run failed test runs '
+ 'or not.',
+ default=False))
+ self.AddField(
+ BooleanField(
+ 'rm_chroot_tmp',
+ default=False,
+ description='Whether to remove the test_that '
+ 'result in the chroot.'))
+ self.AddField(
+ ListField(
+ 'email',
+ description='Space-separated list of email '
+ 'addresses to send email to.'))
+ self.AddField(
+ BooleanField(
+ 'rerun',
+ description='Whether to ignore the cache and '
+ 'for tests to be re-run.',
+ default=False))
+ self.AddField(
+ BooleanField(
+ 'same_specs',
+ default=True,
+ description='Ensure cached runs are run on the '
+ 'same kind of devices which are specified as a '
+ 'remote.'))
+ self.AddField(
+ BooleanField(
+ 'same_machine',
+ default=False,
+ description='Ensure cached runs are run on the '
+ 'same remote.'))
+ self.AddField(
+ BooleanField(
+ 'use_file_locks',
+ default=False,
+ description='Whether to use the file locks '
+ 'mechanism (deprecated) instead of the AFE '
+ 'server lock mechanism.'))
+ self.AddField(
+ IntegerField(
+ 'iterations',
+ default=1,
+ description='Number of iterations to run all '
+ 'tests.'))
+ self.AddField(
+ TextField(
+ 'chromeos_root',
+ description='The path to a chromeos checkout which '
+ 'contains a src/scripts directory. Defaults to '
+ 'the chromeos checkout which contains the '
+ 'chromeos_image.'))
+ self.AddField(
+ TextField(
+ 'logging_level',
+ default='average',
+ description='The level of logging desired. '
+ "Options are 'quiet', 'average', and 'verbose'."))
+ self.AddField(
+ IntegerField(
+ 'acquire_timeout',
+ default=0,
+ description='Number of seconds to wait for '
+ 'machine before exit if all the machines in '
+ 'the experiment file are busy. Default is 0.'))
+ self.AddField(
+ TextField(
+ 'perf_args',
+ default='',
+ description='The optional profile command. It '
+ 'enables perf commands to record perforamance '
+ 'related counters. It must start with perf '
+ 'command record or stat followed by arguments.'))
+ self.AddField(
+ TextField(
+ 'cache_dir',
+ default='',
+ description='The abs path of cache dir. '
+ 'Default is /home/$(whoami)/cros_scratch.'))
+ self.AddField(
+ BooleanField(
+ 'cache_only',
+ default=False,
+ description='Whether to use only cached '
+ 'results (do not rerun failed tests).'))
+ self.AddField(
+ BooleanField(
+ 'no_email',
+ default=False,
+ description='Whether to disable the email to '
+ 'user after crosperf finishes.'))
+ self.AddField(
+ BooleanField(
+ 'json_report',
+ default=False,
+ description='Whether to generate a json version '
+ 'of the report, for archiving.'))
+ self.AddField(
+ BooleanField(
+ 'show_all_results',
+ default=False,
+ description='When running Telemetry tests, '
+ 'whether to all the results, instead of just '
+ 'the default (summary) results.'))
+ self.AddField(
+ TextField(
+ 'share_cache',
+ default='',
+ description='Path to alternate cache whose data '
+ 'you want to use. It accepts multiple directories '
+ 'separated by a ",".'))
+ self.AddField(
+ TextField(
+ 'results_dir', default='', description='The results dir.'))
+ self.AddField(
+ TextField(
+ 'locks_dir',
+ default='',
+ description='An alternate directory to use for '
+ 'storing/checking machine locks. Using this field '
+ 'automatically sets use_file_locks to True.\n'
+ 'WARNING: If you use your own locks directory, '
+ 'there is no guarantee that someone else might not '
+ 'hold a lock on the same machine in a different '
+ 'locks directory.'))
+ self.AddField(
+ TextField(
+ 'chrome_src',
+ description='The path to the source of chrome. '
+ 'This is used to run telemetry benchmarks. '
+ 'The default one is the src inside chroot.',
+ required=False,
+ default=''))
+ self.AddField(
+ IntegerField(
+ 'retries',
+ default=0,
+ description='Number of times to retry a '
+ 'benchmark run.'))
+
+
+class SettingsFactory(object):
+ """Factory class for building different types of Settings objects.
+
+ This factory is currently hardcoded to produce settings for ChromeOS
+ experiment files. The idea is that in the future, other types
+ of settings could be produced.
+ """
+
+ def GetSettings(self, name, settings_type):
+ if settings_type == 'label' or not settings_type:
+ return LabelSettings(name)
+ if settings_type == 'global':
+ return GlobalSettings(name)
+ if settings_type == 'benchmark':
+ return BenchmarkSettings(name)
+
+ raise TypeError("Invalid settings type: '%s'." % settings_type)
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
new file mode 100755
index 00000000..127bfd3c
--- /dev/null
+++ b/crosperf/settings_factory_unittest.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python2
+#
+# Copyright 2014 Google Inc. All Rights Reserved.
+"""Unittest for crosperf."""
+
+from __future__ import print_function
+
+import unittest
+
+import settings_factory
+
+
+class BenchmarkSettingsTest(unittest.TestCase):
+ """Class to test benchmark settings."""
+
+ def test_init(self):
+ res = settings_factory.BenchmarkSettings('b_settings')
+ self.assertIsNotNone(res)
+ self.assertEqual(len(res.fields), 6)
+ self.assertEqual(res.GetField('test_name'), '')
+ self.assertEqual(res.GetField('test_args'), '')
+ self.assertEqual(res.GetField('iterations'), 1)
+ self.assertEqual(res.GetField('suite'), '')
+
+
+class LabelSettingsTest(unittest.TestCase):
+ """Class to test label settings."""
+
+ def test_init(self):
+ res = settings_factory.LabelSettings('l_settings')
+ self.assertIsNotNone(res)
+ self.assertEqual(len(res.fields), 9)
+ self.assertEqual(res.GetField('chromeos_image'), '')
+ self.assertEqual(res.GetField('autotest_path'), '')
+ self.assertEqual(res.GetField('chromeos_root'), '')
+ self.assertEqual(res.GetField('remote'), None)
+ self.assertEqual(res.GetField('image_args'), '')
+ self.assertEqual(res.GetField('cache_dir'), '')
+ self.assertEqual(res.GetField('chrome_src'), '')
+ self.assertEqual(res.GetField('build'), '')
+
+
+class GlobalSettingsTest(unittest.TestCase):
+ """Class to test global settings."""
+
+ def test_init(self):
+ res = settings_factory.GlobalSettings('g_settings')
+ self.assertIsNotNone(res)
+ self.assertEqual(len(res.fields), 25)
+ self.assertEqual(res.GetField('name'), '')
+ self.assertEqual(res.GetField('board'), '')
+ self.assertEqual(res.GetField('remote'), None)
+ self.assertEqual(res.GetField('rerun_if_failed'), False)
+ self.assertEqual(res.GetField('rm_chroot_tmp'), False)
+ self.assertEqual(res.GetField('email'), None)
+ self.assertEqual(res.GetField('rerun'), False)
+ self.assertEqual(res.GetField('same_specs'), True)
+ self.assertEqual(res.GetField('same_machine'), False)
+ self.assertEqual(res.GetField('iterations'), 1)
+ self.assertEqual(res.GetField('chromeos_root'), '')
+ self.assertEqual(res.GetField('logging_level'), 'average')
+ self.assertEqual(res.GetField('acquire_timeout'), 0)
+ self.assertEqual(res.GetField('perf_args'), '')
+ self.assertEqual(res.GetField('cache_dir'), '')
+ self.assertEqual(res.GetField('cache_only'), False)
+ self.assertEqual(res.GetField('no_email'), False)
+ self.assertEqual(res.GetField('show_all_results'), False)
+ self.assertEqual(res.GetField('share_cache'), '')
+ self.assertEqual(res.GetField('results_dir'), '')
+ self.assertEqual(res.GetField('chrome_src'), '')
+
+
+class SettingsFactoryTest(unittest.TestCase):
+ """Class to test SettingsFactory."""
+
+ def test_get_settings(self):
+ self.assertRaises(Exception, settings_factory.SettingsFactory.GetSettings,
+ 'global', 'bad_type')
+
+ l_settings = settings_factory.SettingsFactory().GetSettings('label',
+ 'label')
+ self.assertIsInstance(l_settings, settings_factory.LabelSettings)
+ self.assertEqual(len(l_settings.fields), 9)
+
+ b_settings = settings_factory.SettingsFactory().GetSettings('benchmark',
+ 'benchmark')
+ self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings)
+ self.assertEqual(len(b_settings.fields), 6)
+
+ g_settings = settings_factory.SettingsFactory().GetSettings('global',
+ 'global')
+ self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
+ self.assertEqual(len(g_settings.fields), 25)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py
new file mode 100755
index 00000000..f1062f0d
--- /dev/null
+++ b/crosperf/settings_unittest.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python2
+#
+# Copyright 2014 Google Inc. All Rights Reserved.
+"""unittest for settings."""
+
+from __future__ import print_function
+
+import mock
+import unittest
+
+import settings
+import settings_factory
+
+from field import IntegerField
+from field import ListField
+import download_images
+
+from cros_utils import logger
+
+
+class TestSettings(unittest.TestCase):
+ """setting test class."""
+
+ def setUp(self):
+ self.settings = settings.Settings('global_name', 'global')
+
+ def test_init(self):
+ self.assertEqual(self.settings.name, 'global_name')
+ self.assertEqual(self.settings.settings_type, 'global')
+ self.assertIsNone(self.settings.parent)
+
+ def test_set_parent_settings(self):
+ self.assertIsNone(self.settings.parent)
+ settings_parent = {'fake_parent_entry': 0}
+ self.settings.SetParentSettings(settings_parent)
+ self.assertIsNotNone(self.settings.parent)
+ self.assertEqual(type(self.settings.parent), dict)
+ self.assertEqual(self.settings.parent, settings_parent)
+
+ def test_add_field(self):
+ self.assertEqual(self.settings.fields, {})
+ self.settings.AddField(
+ IntegerField(
+ 'iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to '
+ 'run the test.'))
+ self.assertEqual(len(self.settings.fields), 1)
+ # Adding the same field twice raises an exception.
+ self.assertRaises(
+ Exception,
+ self.settings.AddField, (IntegerField(
+ 'iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to run '
+ 'the test.')))
+ res = self.settings.fields['iterations']
+ self.assertIsInstance(res, IntegerField)
+ self.assertEqual(res.Get(), 1)
+
+ def test_set_field(self):
+ self.assertEqual(self.settings.fields, {})
+ self.settings.AddField(
+ IntegerField(
+ 'iterations',
+ default=1,
+ required=False,
+ description='Number of iterations to run the '
+ 'test.'))
+ res = self.settings.fields['iterations']
+ self.assertEqual(res.Get(), 1)
+
+ self.settings.SetField('iterations', 10)
+ res = self.settings.fields['iterations']
+ self.assertEqual(res.Get(), 10)
+
+ # Setting a field that's not there raises an exception.
+ self.assertRaises(Exception, self.settings.SetField, 'remote',
+ 'lumpy1.cros')
+
+ self.settings.AddField(
+ ListField(
+ 'remote',
+ default=[],
+ description="A comma-separated list of ip's of "
+ 'chromeos devices to run '
+ 'experiments on.'))
+ self.assertEqual(type(self.settings.fields), dict)
+ self.assertEqual(len(self.settings.fields), 2)
+ res = self.settings.fields['remote']
+ self.assertEqual(res.Get(), [])
+ self.settings.SetField('remote', 'lumpy1.cros', append=True)
+ self.settings.SetField('remote', 'lumpy2.cros', append=True)
+ res = self.settings.fields['remote']
+ self.assertEqual(res.Get(), ['lumpy1.cros', 'lumpy2.cros'])
+
+ def test_get_field(self):
+ # Getting a field that's not there raises an exception.
+ self.assertRaises(Exception, self.settings.GetField, 'iterations')
+
+ # Getting a required field that hasn't been assigned raises an exception.
+ self.settings.AddField(
+ IntegerField(
+ 'iterations',
+ required=True,
+ description='Number of iterations to '
+ 'run the test.'))
+ self.assertIsNotNone(self.settings.fields['iterations'])
+ self.assertRaises(Exception, self.settings.GetField, 'iterations')
+
+ # Set the value, then get it.
+ self.settings.SetField('iterations', 5)
+ res = self.settings.GetField('iterations')
+ self.assertEqual(res, 5)
+
+ def test_inherit(self):
+ parent_settings = settings_factory.SettingsFactory().GetSettings('global',
+ 'global')
+ label_settings = settings_factory.SettingsFactory().GetSettings('label',
+ 'label')
+ self.assertEqual(parent_settings.GetField('chromeos_root'), '')
+ self.assertEqual(label_settings.GetField('chromeos_root'), '')
+ self.assertIsNone(label_settings.parent)
+
+ parent_settings.SetField('chromeos_root', '/tmp/chromeos')
+ label_settings.SetParentSettings(parent_settings)
+ self.assertEqual(parent_settings.GetField('chromeos_root'), '/tmp/chromeos')
+ self.assertEqual(label_settings.GetField('chromeos_root'), '')
+ label_settings.Inherit()
+ self.assertEqual(label_settings.GetField('chromeos_root'), '/tmp/chromeos')
+
+ def test_override(self):
+ self.settings.AddField(
+ ListField(
+ 'email',
+ default=[],
+ description='Space-seperated'
+ 'list of email addresses to send '
+ 'email to.'))
+
+ global_settings = settings_factory.SettingsFactory().GetSettings('global',
+ 'global')
+
+ global_settings.SetField('email', 'john.doe@google.com', append=True)
+ global_settings.SetField('email', 'jane.smith@google.com', append=True)
+
+ res = self.settings.GetField('email')
+ self.assertEqual(res, [])
+
+ self.settings.Override(global_settings)
+ res = self.settings.GetField('email')
+ self.assertEqual(res, ['john.doe@google.com', 'jane.smith@google.com'])
+
+ def test_validate(self):
+
+ self.settings.AddField(
+ IntegerField(
+ 'iterations',
+ required=True,
+ description='Number of iterations '
+ 'to run the test.'))
+ self.settings.AddField(
+ ListField(
+ 'remote',
+ default=[],
+ required=True,
+ description='A comma-separated list '
+ "of ip's of chromeos "
+ 'devices to run experiments on.'))
+ self.settings.AddField(
+ ListField(
+ 'email',
+ default=[],
+ description='Space-seperated'
+ 'list of email addresses to '
+ 'send email to.'))
+
+ # 'required' fields have not been assigned; should raise an exception.
+ self.assertRaises(Exception, self.settings.Validate)
+ self.settings.SetField('iterations', 2)
+ self.settings.SetField('remote', 'x86-alex.cros', append=True)
+ # Should run without exception now.
+ self.settings.Validate()
+
+ @mock.patch.object(logger, 'GetLogger')
+ @mock.patch.object(download_images.ImageDownloader, 'Run')
+ @mock.patch.object(download_images, 'ImageDownloader')
+ def test_get_xbuddy_path(self, mock_downloader, mock_run, mock_logger):
+
+ mock_run.return_value = 'fake_xbuddy_translation'
+ mock_downloader.Run = mock_run
+ board = 'lumpy'
+ chromeos_root = '/tmp/chromeos'
+ log_level = 'average'
+
+ trybot_str = 'trybot-lumpy-paladin/R34-5417.0.0-b1506'
+ official_str = 'lumpy-release/R34-5417.0.0'
+ xbuddy_str = 'latest-dev'
+ autotest_path = ''
+
+ self.settings.GetXbuddyPath(trybot_str, autotest_path, board, chromeos_root,
+ log_level)
+ self.assertEqual(mock_run.call_count, 1)
+ self.assertEqual(mock_run.call_args_list[0][0],
+ ('/tmp/chromeos',
+ 'remote/trybot-lumpy-paladin/R34-5417.0.0-b1506', ''))
+
+ mock_run.reset_mock()
+ self.settings.GetXbuddyPath(official_str, autotest_path, board,
+ chromeos_root, log_level)
+ self.assertEqual(mock_run.call_count, 1)
+ self.assertEqual(mock_run.call_args_list[0][0],
+ ('/tmp/chromeos', 'remote/lumpy-release/R34-5417.0.0', ''))
+
+ mock_run.reset_mock()
+ self.settings.GetXbuddyPath(xbuddy_str, autotest_path, board, chromeos_root,
+ log_level)
+ self.assertEqual(mock_run.call_count, 1)
+ self.assertEqual(mock_run.call_args_list[0][0],
+ ('/tmp/chromeos', 'remote/lumpy/latest-dev', ''))
+
+ if mock_logger:
+ return
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
new file mode 100644
index 00000000..678113a7
--- /dev/null
+++ b/crosperf/suite_runner.py
@@ -0,0 +1,297 @@
+# Copyright (c) 2013~2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""SuiteRunner defines the interface from crosperf to test script."""
+
+from __future__ import print_function
+
+import os
+import time
+import shlex
+
+from cros_utils import command_executer
+import test_flag
+
+TEST_THAT_PATH = '/usr/bin/test_that'
+AUTOTEST_DIR = '~/trunk/src/third_party/autotest/files'
+CHROME_MOUNT_DIR = '/tmp/chrome_root'
+
+
+def GetProfilerArgs(profiler_args):
+ # Remove "--" from in front of profiler args.
+ args_list = shlex.split(profiler_args)
+ new_list = []
+ for arg in args_list:
+ if arg[0:2] == '--':
+ arg = arg[2:]
+ new_list.append(arg)
+ args_list = new_list
+
+ # Remove "perf_options=" from middle of profiler args.
+ new_list = []
+ for arg in args_list:
+ idx = arg.find('perf_options=')
+ if idx != -1:
+ prefix = arg[0:idx]
+ suffix = arg[idx + len('perf_options=') + 1:-1]
+ new_arg = prefix + "'" + suffix + "'"
+ new_list.append(new_arg)
+ else:
+ new_list.append(arg)
+ args_list = new_list
+
+ return ' '.join(args_list)
+
+
+class SuiteRunner(object):
+ """This defines the interface from crosperf to test script."""
+
+ def __init__(self,
+ logger_to_use=None,
+ log_level='verbose',
+ cmd_exec=None,
+ cmd_term=None):
+ self.logger = logger_to_use
+ self.log_level = log_level
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self.logger, log_level=self.log_level)
+ self._ct = cmd_term or command_executer.CommandTerminator()
+
+ def Run(self, machine, label, benchmark, test_args, profiler_args):
+ for i in range(0, benchmark.retries + 1):
+ self.PinGovernorExecutionFrequencies(machine, label.chromeos_root)
+ if benchmark.suite == 'telemetry':
+ self.DecreaseWaitTime(machine, label.chromeos_root)
+ ret_tup = self.Telemetry_Run(machine, label, benchmark, profiler_args)
+ elif benchmark.suite == 'telemetry_Crosperf':
+ self.DecreaseWaitTime(machine, label.chromeos_root)
+ ret_tup = self.Telemetry_Crosperf_Run(machine, label, benchmark,
+ test_args, profiler_args)
+ else:
+ ret_tup = self.Test_That_Run(machine, label, benchmark, test_args,
+ profiler_args)
+ if ret_tup[0] != 0:
+ self.logger.LogOutput('benchmark %s failed. Retries left: %s' %
+ (benchmark.name, benchmark.retries - i))
+ elif i > 0:
+ self.logger.LogOutput('benchmark %s succeded after %s retries' %
+ (benchmark.name, i))
+ break
+ else:
+ self.logger.LogOutput('benchmark %s succeded on first try' %
+ benchmark.name)
+ break
+ return ret_tup
+
+ def PinGovernorExecutionFrequencies(self, machine_name, chromeos_root):
+ """Set min and max frequencies to max static frequency."""
+ # pyformat: disable
+ set_cpu_freq = (
+ 'set -e && '
+ 'for f in /sys/devices/system/cpu/cpu*/cpufreq; do '
+ 'cd $f; '
+ 'val=0; '
+ 'if [[ -e scaling_available_frequencies ]]; then '
+ # pylint: disable=line-too-long
+ ' val=`cat scaling_available_frequencies | tr " " "\\n" | sort -n -b -r`; '
+ 'else '
+ ' val=`cat scaling_max_freq | tr " " "\\n" | sort -n -b -r`; fi ; '
+ 'set -- $val; '
+ 'highest=$1; '
+ 'if [[ $# -gt 1 ]]; then '
+ ' case $highest in *1000) highest=$2;; esac; '
+ 'fi ;'
+ 'echo $highest > scaling_max_freq; '
+ 'echo $highest > scaling_min_freq; '
+ 'echo performance > scaling_governor; '
+ 'done'
+ )
+ # pyformat: enable
+ if self.log_level == 'average':
+ self.logger.LogOutput('Pinning governor execution frequencies for %s' %
+ machine_name)
+ ret = self._ce.CrosRunCommand(
+ set_cpu_freq, machine=machine_name, chromeos_root=chromeos_root)
+ self.logger.LogFatalIf(ret, 'Could not pin frequencies on machine: %s' %
+ machine_name)
+
+ def DecreaseWaitTime(self, machine_name, chromeos_root):
+ """Change the ten seconds wait time for pagecycler to two seconds."""
+ FILE = '/usr/local/telemetry/src/tools/perf/page_sets/page_cycler_story.py'
+ ret = self._ce.CrosRunCommand(
+ 'ls ' + FILE, machine=machine_name, chromeos_root=chromeos_root)
+ self.logger.LogFatalIf(ret, 'Could not find {} on machine: {}'.format(
+ FILE, machine_name))
+
+ if not ret:
+ sed_command = 'sed -i "s/_TTI_WAIT_TIME = 10/_TTI_WAIT_TIME = 2/g" '
+ ret = self._ce.CrosRunCommand(
+ sed_command + FILE, machine=machine_name, chromeos_root=chromeos_root)
+ self.logger.LogFatalIf(ret, 'Could not modify {} on machine: {}'.format(
+ FILE, machine_name))
+
+ def RebootMachine(self, machine_name, chromeos_root):
+ command = 'reboot && exit'
+ self._ce.CrosRunCommand(
+ command, machine=machine_name, chromeos_root=chromeos_root)
+ time.sleep(60)
+ # Whenever we reboot the machine, we need to restore the governor settings.
+ self.PinGovernorExecutionFrequencies(machine_name, chromeos_root)
+
+ def Test_That_Run(self, machine, label, benchmark, test_args, profiler_args):
+ """Run the test_that test.."""
+ options = ''
+ if label.board:
+ options += ' --board=%s' % label.board
+ if test_args:
+ options += ' %s' % test_args
+ if profiler_args:
+ self.logger.LogFatal('test_that does not support profiler.')
+ command = 'rm -rf /usr/local/autotest/results/*'
+ self._ce.CrosRunCommand(
+ command, machine=machine, chromeos_root=label.chromeos_root)
+
+ # We do this because some tests leave the machine in weird states.
+ # Rebooting between iterations has proven to help with this.
+ self.RebootMachine(machine, label.chromeos_root)
+
+ autotest_dir = AUTOTEST_DIR
+ if label.autotest_path != '':
+ autotest_dir = label.autotest_path
+
+ autotest_dir_arg = '--autotest_dir %s' % autotest_dir
+ # For non-telemetry tests, specify an autotest directory only if the
+ # specified directory is different from default (crosbug.com/679001).
+ if autotest_dir == AUTOTEST_DIR:
+ autotest_dir_arg = ''
+
+ command = (('%s %s --fast '
+ '%s %s %s') % (TEST_THAT_PATH, autotest_dir_arg, options,
+ machine, benchmark.test_name))
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Running test.')
+ self.logger.LogOutput('CMD: %s' % command)
+ # Use --no-ns-pid so that cros_sdk does not create a different
+ # process namespace and we can kill process created easily by
+ # their process group.
+ return self._ce.ChrootRunCommandWOutput(
+ label.chromeos_root,
+ command,
+ command_terminator=self._ct,
+ cros_sdk_options='--no-ns-pid')
+
+ def RemoveTelemetryTempFile(self, machine, chromeos_root):
+ filename = 'telemetry@%s' % machine
+ fullname = os.path.join(chromeos_root, 'chroot', 'tmp', filename)
+ if os.path.exists(fullname):
+ os.remove(fullname)
+
+ def Telemetry_Crosperf_Run(self, machine, label, benchmark, test_args,
+ profiler_args):
+ if not os.path.isdir(label.chrome_src):
+ self.logger.LogFatal('Cannot find chrome src dir to'
+ ' run telemetry: %s' % label.chrome_src)
+
+ # Check for and remove temporary file that may have been left by
+ # previous telemetry runs (and which might prevent this run from
+ # working).
+ self.RemoveTelemetryTempFile(machine, label.chromeos_root)
+
+ # For telemetry runs, we can use the autotest copy from the source
+ # location. No need to have one under /build/<board>.
+ autotest_dir_arg = '--autotest_dir %s' % AUTOTEST_DIR
+ if label.autotest_path != '':
+ autotest_dir_arg = '--autotest_dir %s' % label.autotest_path
+
+ profiler_args = GetProfilerArgs(profiler_args)
+ fast_arg = ''
+ if not profiler_args:
+ # --fast works unless we are doing profiling (autotest limitation).
+ # --fast avoids unnecessary copies of syslogs.
+ fast_arg = '--fast'
+ args_string = ''
+ if test_args:
+ # Strip double quotes off args (so we can wrap them in single
+ # quotes, to pass through to Telemetry).
+ if test_args[0] == '"' and test_args[-1] == '"':
+ test_args = test_args[1:-1]
+ args_string = "test_args='%s'" % test_args
+
+ cmd = ('{} {} {} --board={} --args="{} run_local={} test={} '
+ '{}" {} telemetry_Crosperf'.format(TEST_THAT_PATH, autotest_dir_arg,
+ fast_arg, label.board,
+ args_string, benchmark.run_local,
+ benchmark.test_name,
+ profiler_args, machine))
+
+ # Use --no-ns-pid so that cros_sdk does not create a different
+ # process namespace and we can kill process created easily by their
+ # process group.
+ chrome_root_options = ('--no-ns-pid '
+ '--chrome_root={} --chrome_root_mount={} '
+ "FEATURES=\"-usersandbox\" "
+ 'CHROME_ROOT={}'.format(label.chrome_src,
+ CHROME_MOUNT_DIR,
+ CHROME_MOUNT_DIR))
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Running test.')
+ self.logger.LogOutput('CMD: %s' % cmd)
+ return self._ce.ChrootRunCommandWOutput(
+ label.chromeos_root,
+ cmd,
+ command_terminator=self._ct,
+ cros_sdk_options=chrome_root_options)
+
+ def Telemetry_Run(self, machine, label, benchmark, profiler_args):
+ telemetry_run_path = ''
+ if not os.path.isdir(label.chrome_src):
+ self.logger.LogFatal('Cannot find chrome src dir to' ' run telemetry.')
+ else:
+ telemetry_run_path = os.path.join(label.chrome_src, 'src/tools/perf')
+ if not os.path.exists(telemetry_run_path):
+ self.logger.LogFatal('Cannot find %s directory.' % telemetry_run_path)
+
+ if profiler_args:
+ self.logger.LogFatal('Telemetry does not support the perf profiler.')
+
+ # Check for and remove temporary file that may have been left by
+ # previous telemetry runs (and which might prevent this run from
+ # working).
+ if not test_flag.GetTestMode():
+ self.RemoveTelemetryTempFile(machine, label.chromeos_root)
+
+ rsa_key = os.path.join(
+ label.chromeos_root,
+ 'src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa')
+
+ cmd = ('cd {0} && '
+ './run_measurement '
+ '--browser=cros-chrome '
+ '--output-format=csv '
+ '--remote={1} '
+ '--identity {2} '
+ '{3} {4}'.format(telemetry_run_path, machine, rsa_key,
+ benchmark.test_name, benchmark.test_args))
+ if self.log_level != 'verbose':
+ self.logger.LogOutput('Running test.')
+ self.logger.LogOutput('CMD: %s' % cmd)
+ return self._ce.RunCommandWOutput(cmd, print_to_console=False)
+
+ def CommandTerminator(self):
+ return self._ct
+
+ def Terminate(self):
+ self._ct.Terminate()
+
+
+class MockSuiteRunner(object):
+ """Mock suite runner for test."""
+
+ def __init__(self):
+ self._true = True
+
+ def Run(self, *_args):
+ if self._true:
+ return [0, '', '']
+ else:
+ return [0, '', '']
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
new file mode 100755
index 00000000..fd8de661
--- /dev/null
+++ b/crosperf/suite_runner_unittest.py
@@ -0,0 +1,351 @@
+#!/usr/bin/env python2
+#
+# Copyright 2014 Google Inc. All Rights Reserved.
+"""Unittest for suite_runner."""
+
+from __future__ import print_function
+
+import os.path
+import time
+
+import mock
+import unittest
+
+import suite_runner
+import label
+import test_flag
+
+from benchmark import Benchmark
+
+from cros_utils import command_executer
+from cros_utils import logger
+
+
+class SuiteRunnerTest(unittest.TestCase):
+ """Class of SuiteRunner test."""
+ real_logger = logger.GetLogger()
+
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ mock_cmd_term = mock.Mock(spec=command_executer.CommandTerminator)
+ mock_logger = mock.Mock(spec=logger.Logger)
+ mock_label = label.MockLabel('lumpy', 'lumpy_chromeos_image', '',
+ '/tmp/chromeos', 'lumpy',
+ ['lumpy1.cros', 'lumpy.cros2'], '', '', False,
+ 'average', 'gcc', '')
+ telemetry_crosperf_bench = Benchmark(
+ 'b1_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry_Crosperf', # suite
+ True) # show_all_results
+
+ test_that_bench = Benchmark(
+ 'b2_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles') # perf_args
+
+ telemetry_bench = Benchmark(
+ 'b3_test', # name
+ 'octane', # test_name
+ '', # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ 'record -e cycles', # perf_args
+ 'telemetry', # suite
+ False) # show_all_results
+
+ def __init__(self, *args, **kwargs):
+ super(SuiteRunnerTest, self).__init__(*args, **kwargs)
+ self.call_test_that_run = False
+ self.pin_governor_args = []
+ self.test_that_args = []
+ self.telemetry_run_args = []
+ self.telemetry_crosperf_args = []
+ self.call_telemetry_crosperf_run = False
+ self.call_pin_governor = False
+ self.call_telemetry_run = False
+
+ def setUp(self):
+ self.runner = suite_runner.SuiteRunner(self.mock_logger, 'verbose',
+ self.mock_cmd_exec,
+ self.mock_cmd_term)
+
+ def test_get_profiler_args(self):
+ input_str = ('--profiler=custom_perf --profiler_args=\'perf_options'
+ '="record -a -e cycles,instructions"\'')
+ output_str = ("profiler=custom_perf profiler_args='record -a -e "
+ "cycles,instructions'")
+ res = suite_runner.GetProfilerArgs(input_str)
+ self.assertEqual(res, output_str)
+
+ def test_run(self):
+
+ def reset():
+ self.call_pin_governor = False
+ self.call_test_that_run = False
+ self.call_telemetry_run = False
+ self.call_telemetry_crosperf_run = False
+ self.pin_governor_args = []
+ self.test_that_args = []
+ self.telemetry_run_args = []
+ self.telemetry_crosperf_args = []
+
+ def FakePinGovernor(machine, chroot):
+ self.call_pin_governor = True
+ self.pin_governor_args = [machine, chroot]
+
+ def FakeTelemetryRun(machine, test_label, benchmark, profiler_args):
+ self.telemetry_run_args = [machine, test_label, benchmark, profiler_args]
+ self.call_telemetry_run = True
+ return 'Ran FakeTelemetryRun'
+
+ def FakeTelemetryCrosperfRun(machine, test_label, benchmark, test_args,
+ profiler_args):
+ self.telemetry_crosperf_args = [
+ machine, test_label, benchmark, test_args, profiler_args
+ ]
+ self.call_telemetry_crosperf_run = True
+ return 'Ran FakeTelemetryCrosperfRun'
+
+ def FakeTestThatRun(machine, test_label, benchmark, test_args,
+ profiler_args):
+ self.test_that_args = [
+ machine, test_label, benchmark, test_args, profiler_args
+ ]
+ self.call_test_that_run = True
+ return 'Ran FakeTestThatRun'
+
+ self.runner.PinGovernorExecutionFrequencies = FakePinGovernor
+ self.runner.Telemetry_Run = FakeTelemetryRun
+ self.runner.Telemetry_Crosperf_Run = FakeTelemetryCrosperfRun
+ self.runner.Test_That_Run = FakeTestThatRun
+
+ machine = 'fake_machine'
+ test_args = ''
+ profiler_args = ''
+ reset()
+ self.runner.Run(machine, self.mock_label, self.telemetry_bench, test_args,
+ profiler_args)
+ self.assertTrue(self.call_pin_governor)
+ self.assertTrue(self.call_telemetry_run)
+ self.assertFalse(self.call_test_that_run)
+ self.assertFalse(self.call_telemetry_crosperf_run)
+ self.assertEqual(
+ self.telemetry_run_args,
+ ['fake_machine', self.mock_label, self.telemetry_bench, ''])
+
+ reset()
+ self.runner.Run(machine, self.mock_label, self.test_that_bench, test_args,
+ profiler_args)
+ self.assertTrue(self.call_pin_governor)
+ self.assertFalse(self.call_telemetry_run)
+ self.assertTrue(self.call_test_that_run)
+ self.assertFalse(self.call_telemetry_crosperf_run)
+ self.assertEqual(
+ self.test_that_args,
+ ['fake_machine', self.mock_label, self.test_that_bench, '', ''])
+
+ reset()
+ self.runner.Run(machine, self.mock_label, self.telemetry_crosperf_bench,
+ test_args, profiler_args)
+ self.assertTrue(self.call_pin_governor)
+ self.assertFalse(self.call_telemetry_run)
+ self.assertFalse(self.call_test_that_run)
+ self.assertTrue(self.call_telemetry_crosperf_run)
+ self.assertEqual(self.telemetry_crosperf_args, [
+ 'fake_machine', self.mock_label, self.telemetry_crosperf_bench, '', ''
+ ])
+
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
+ def test_pin_governor_execution_frequencies(self, mock_cros_runcmd):
+ self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
+ self.runner.PinGovernorExecutionFrequencies('lumpy1.cros', '/tmp/chromeos')
+ self.assertEqual(mock_cros_runcmd.call_count, 1)
+ cmd = mock_cros_runcmd.call_args_list[0][0]
+ # pyformat: disable
+ set_cpu_cmd = (
+ 'set -e && '
+ 'for f in /sys/devices/system/cpu/cpu*/cpufreq; do '
+ 'cd $f; '
+ 'val=0; '
+ 'if [[ -e scaling_available_frequencies ]]; then '
+ # pylint: disable=line-too-long
+ ' val=`cat scaling_available_frequencies | tr " " "\\n" | sort -n -b -r`; '
+ 'else '
+ ' val=`cat scaling_max_freq | tr " " "\\n" | sort -n -b -r`; fi ; '
+ 'set -- $val; '
+ 'highest=$1; '
+ 'if [[ $# -gt 1 ]]; then '
+ ' case $highest in *1000) highest=$2;; esac; '
+ 'fi ;'
+ 'echo $highest > scaling_max_freq; '
+ 'echo $highest > scaling_min_freq; '
+ 'echo performance > scaling_governor; '
+ 'done'
+ )
+ # pyformat: enable
+ self.assertEqual(cmd, (set_cpu_cmd,))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
+ def test_reboot_machine(self, mock_cros_runcmd, mock_sleep):
+
+ def FakePinGovernor(machine_name, chromeos_root):
+ if machine_name or chromeos_root:
+ pass
+
+ self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
+ self.runner.PinGovernorExecutionFrequencies = FakePinGovernor
+ self.runner.RebootMachine('lumpy1.cros', '/tmp/chromeos')
+ self.assertEqual(mock_cros_runcmd.call_count, 1)
+ self.assertEqual(mock_cros_runcmd.call_args_list[0][0], ('reboot && exit',))
+ self.assertEqual(mock_sleep.call_count, 1)
+ self.assertEqual(mock_sleep.call_args_list[0][0], (60,))
+
+ @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
+ @mock.patch.object(command_executer.CommandExecuter,
+ 'ChrootRunCommandWOutput')
+ def test_test_that_run(self, mock_chroot_runcmd, mock_cros_runcmd):
+
+ def FakeRebootMachine(machine, chroot):
+ if machine or chroot:
+ pass
+
+ def FakeLogMsg(fd, termfd, msg, flush=True):
+ if fd or termfd or msg or flush:
+ pass
+
+ save_log_msg = self.real_logger.LogMsg
+ self.real_logger.LogMsg = FakeLogMsg
+ self.runner.logger = self.real_logger
+ self.runner.RebootMachine = FakeRebootMachine
+
+ raised_exception = False
+ try:
+ self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
+ self.test_that_bench, '', 'record -a -e cycles')
+ except SystemExit:
+ raised_exception = True
+ self.assertTrue(raised_exception)
+
+ mock_chroot_runcmd.return_value = 0
+ self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd
+ self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
+ res = self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
+ self.test_that_bench, '--iterations=2', '')
+ self.assertEqual(mock_cros_runcmd.call_count, 1)
+ self.assertEqual(mock_chroot_runcmd.call_count, 1)
+ self.assertEqual(res, 0)
+ self.assertEqual(mock_cros_runcmd.call_args_list[0][0],
+ ('rm -rf /usr/local/autotest/results/*',))
+ args_list = mock_chroot_runcmd.call_args_list[0][0]
+ args_dict = mock_chroot_runcmd.call_args_list[0][1]
+ self.assertEqual(len(args_list), 2)
+ self.assertEqual(args_list[0], '/tmp/chromeos')
+ self.assertEqual(args_list[1], ('/usr/bin/test_that '
+ '--fast --board=lumpy '
+ '--iterations=2 lumpy1.cros octane'))
+ self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
+ self.real_logger.LogMsg = save_log_msg
+
+ @mock.patch.object(os.path, 'isdir')
+ @mock.patch.object(command_executer.CommandExecuter,
+ 'ChrootRunCommandWOutput')
+ def test_telemetry_crosperf_run(self, mock_chroot_runcmd, mock_isdir):
+
+ mock_isdir.return_value = True
+ mock_chroot_runcmd.return_value = 0
+ self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd
+ profiler_args = ('--profiler=custom_perf --profiler_args=\'perf_options'
+ '="record -a -e cycles,instructions"\'')
+ res = self.runner.Telemetry_Crosperf_Run('lumpy1.cros', self.mock_label,
+ self.telemetry_crosperf_bench, '',
+ profiler_args)
+ self.assertEqual(res, 0)
+ self.assertEqual(mock_chroot_runcmd.call_count, 1)
+ args_list = mock_chroot_runcmd.call_args_list[0][0]
+ args_dict = mock_chroot_runcmd.call_args_list[0][1]
+ self.assertEqual(args_list[0], '/tmp/chromeos')
+ self.assertEqual(args_list[1],
+ ('/usr/bin/test_that --autotest_dir '
+ '~/trunk/src/third_party/autotest/files '
+ ' --board=lumpy --args=" run_local=False test=octane '
+ 'profiler=custom_perf profiler_args=\'record -a -e '
+ 'cycles,instructions\'" lumpy1.cros telemetry_Crosperf'))
+ self.assertEqual(args_dict['cros_sdk_options'],
+ ('--no-ns-pid --chrome_root= '
+ '--chrome_root_mount=/tmp/chrome_root '
+ 'FEATURES="-usersandbox" CHROME_ROOT=/tmp/chrome_root'))
+ self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
+ self.assertEqual(len(args_dict), 2)
+
+ @mock.patch.object(os.path, 'isdir')
+ @mock.patch.object(os.path, 'exists')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
+ def test_telemetry_run(self, mock_runcmd, mock_exists, mock_isdir):
+
+ def FakeLogMsg(fd, termfd, msg, flush=True):
+ if fd or termfd or msg or flush:
+ pass
+
+ save_log_msg = self.real_logger.LogMsg
+ self.real_logger.LogMsg = FakeLogMsg
+ mock_runcmd.return_value = 0
+
+ self.mock_cmd_exec.RunCommandWOutput = mock_runcmd
+ self.runner.logger = self.real_logger
+
+ profiler_args = ('--profiler=custom_perf --profiler_args=\'perf_options'
+ '="record -a -e cycles,instructions"\'')
+
+ raises_exception = False
+ mock_isdir.return_value = False
+ try:
+ self.runner.Telemetry_Run('lumpy1.cros', self.mock_label,
+ self.telemetry_bench, '')
+ except SystemExit:
+ raises_exception = True
+ self.assertTrue(raises_exception)
+
+ raises_exception = False
+ mock_isdir.return_value = True
+ mock_exists.return_value = False
+ try:
+ self.runner.Telemetry_Run('lumpy1.cros', self.mock_label,
+ self.telemetry_bench, '')
+ except SystemExit:
+ raises_exception = True
+ self.assertTrue(raises_exception)
+
+ raises_exception = False
+ mock_isdir.return_value = True
+ mock_exists.return_value = True
+ try:
+ self.runner.Telemetry_Run('lumpy1.cros', self.mock_label,
+ self.telemetry_bench, profiler_args)
+ except SystemExit:
+ raises_exception = True
+ self.assertTrue(raises_exception)
+
+ test_flag.SetTestMode(True)
+ res = self.runner.Telemetry_Run('lumpy1.cros', self.mock_label,
+ self.telemetry_bench, '')
+ self.assertEqual(res, 0)
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(mock_runcmd.call_args_list[0][0], (
+ ('cd src/tools/perf && ./run_measurement '
+ '--browser=cros-chrome --output-format=csv '
+ '--remote=lumpy1.cros --identity /tmp/chromeos/src/scripts'
+ '/mod_for_test_scripts/ssh_keys/testing_rsa octane '),))
+
+ self.real_logger.LogMsg = save_log_msg
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/crosperf/test_cache/compare_output/autotest.tbz2 b/crosperf/test_cache/compare_output/autotest.tbz2
new file mode 100644
index 00000000..066dd9ac
--- /dev/null
+++ b/crosperf/test_cache/compare_output/autotest.tbz2
Binary files differ
diff --git a/crosperf/test_cache/compare_output/machine.txt b/crosperf/test_cache/compare_output/machine.txt
new file mode 100644
index 00000000..a82af3aa
--- /dev/null
+++ b/crosperf/test_cache/compare_output/machine.txt
@@ -0,0 +1 @@
+fake_machine_checksum123 \ No newline at end of file
diff --git a/crosperf/test_cache/compare_output/results.txt b/crosperf/test_cache/compare_output/results.txt
new file mode 100644
index 00000000..db6803ce
--- /dev/null
+++ b/crosperf/test_cache/compare_output/results.txt
@@ -0,0 +1,6 @@
+S"CMD (True): ./test_that.sh --remote=172.17.128.241 --board=lumpy LibCBench\nCMD (None): cd /usr/local/google/home/yunlian/gd/src/build/images/lumpy/latest/../../../../..; cros_sdk -- ./in_chroot_cmd6X7Cxu.sh\nIdentity added: /tmp/test_that.PO1234567/autotest_key (/tmp/test_that.PO1234567/autotest_key)\nINFO : Using emerged autotests already installed at /build/lumpy/usr/local/autotest.\n\nINFO : Running the following control files 1 times:\nINFO : * 'client/site_tests/platform_LibCBench/control'\n\nINFO : Running client test client/site_tests/platform_LibCBench/control\n./server/autoserv -m 172.17.128.241 --ssh-port 22 -c client/site_tests/platform_LibCBench/control -r /tmp/test_that.PO1234567/platform_LibCBench --test-retry=0 --args \nERROR:root:import statsd failed, no stats will be reported.\n14:20:22 INFO | Results placed in /tmp/test_that.PO1234567/platform_LibCBench\n14:20:22 INFO | Processing control file\n14:20:23 INFO | Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_VIIP67ssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'\n14:20:23 ERROR| [stderr] Warning: Permanently added '172.17.128.241' (RSA) to the list of known hosts.\n14:20:23 INFO | INFO\t----\t----\tkernel=3.8.11\tlocaltime=May 22 14:20:23\ttimestamp=1369257623\t\n14:20:23 INFO | Installing autotest on 172.17.128.241\n14:20:23 INFO | Using installation dir /usr/local/autotest\n14:20:23 WARNI| No job_repo_url for <remote host: 172.17.128.241>\n14:20:23 INFO | Could not install autotest using the packaging system: No repos to install an autotest client from. Trying other methods\n14:20:23 INFO | Installation of autotest completed\n14:20:24 WARNI| No job_repo_url for <remote host: 172.17.128.241>\n14:20:24 INFO | Executing /usr/local/autotest/bin/autotest /usr/local/autotest/control phase 0\n14:20:24 INFO | Entered autotestd_monitor.\n14:20:24 INFO | Finished launching tail subprocesses.\n14:20:24 INFO | Finished waiting on autotestd to start.\n14:20:26 INFO | START\t----\t----\ttimestamp=1369257625\tlocaltime=May 22 14:20:25\t\n14:20:26 INFO | \tSTART\tplatform_LibCBench\tplatform_LibCBench\ttimestamp=1369257625\tlocaltime=May 22 14:20:25\t\n14:20:30 INFO | \t\tGOOD\tplatform_LibCBench\tplatform_LibCBench\ttimestamp=1369257630\tlocaltime=May 22 14:20:30\tcompleted successfully\n14:20:30 INFO | \tEND GOOD\tplatform_LibCBench\tplatform_LibCBench\ttimestamp=1369257630\tlocaltime=May 22 14:20:30\t\n14:20:31 INFO | END GOOD\t----\t----\ttimestamp=1369257630\tlocaltime=May 22 14:20:30\t\n14:20:31 INFO | Got lock of exit_code_file.\n14:20:31 INFO | Released lock of exit_code_file and closed it.\nOUTPUT: ==============================\nOUTPUT: Current time: 2013-05-22 14:20:32.818831 Elapsed: 0:01:30 ETA: Unknown\nDone: 0% [ ]\nOUTPUT: Thread Status:\nRUNNING: 1 ('ttt: LibCBench (1)' 0:01:21)\nMachine Status:\nMachine Thread Lock Status Checksum \n172.17.128.241 ttt: LibCBench (1) True RUNNING 3ba9f2ecbb222f20887daea5583d86ba\n\nOUTPUT: ==============================\n14:20:33 INFO | Killing child processes.\n14:20:33 INFO | Client complete\n14:20:33 INFO | Finished processing control file\n14:20:33 INFO | Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_aVJUgmssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 172.17.128.241'\n14:20:33 ERROR| [stderr] Warning: Permanently added '172.17.128.241' (RSA) to the list of known hosts.\n\nINFO : Test results:\n-------------------------------------------------------------------\nplatform_LibCBench [ PASSED ]\nplatform_LibCBench/platform_LibCBench [ PASSED ]\nplatform_LibCBench/platform_LibCBench b_malloc_big1__0_ 0.00375231466667\nplatform_LibCBench/platform_LibCBench b_malloc_big2__0_ 0.002951359\nplatform_LibCBench/platform_LibCBench b_malloc_bubble__0_ 0.015066374\nplatform_LibCBench/platform_LibCBench b_malloc_sparse__0_ 0.015053784\nplatform_LibCBench/platform_LibCBench b_malloc_thread_local__0_ 0.01138439\nplatform_LibCBench/platform_LibCBench b_malloc_thread_stress__0_ 0.0367894733333\nplatform_LibCBench/platform_LibCBench b_malloc_tiny1__0_ 0.000768474333333\nplatform_LibCBench/platform_LibCBench b_malloc_tiny2__0_ 0.000581407333333\nplatform_LibCBench/platform_LibCBench b_pthread_create_serial1__0_ 0.0291785246667\nplatform_LibCBench/platform_LibCBench b_pthread_createjoin_serial1__0_ 0.031907936\nplatform_LibCBench/platform_LibCBench b_pthread_createjoin_serial2__0_ 0.043485347\nplatform_LibCBench/platform_LibCBench b_pthread_uselesslock__0_ 0.0294113346667\nplatform_LibCBench/platform_LibCBench b_regex_compile____a_b_c__d_b__ 0.00529833933333\nplatform_LibCBench/platform_LibCBench b_regex_search____a_b_c__d_b__ 0.00165455066667\nplatform_LibCBench/platform_LibCBench b_regex_search___a_25_b__ 0.0496191923333\nplatform_LibCBench/platform_LibCBench b_stdio_putcgetc__0_ 0.100005711667\nplatform_LibCBench/platform_LibCBench b_stdio_putcgetc_unlocked__0_ 0.0371443833333\nplatform_LibCBench/platform_LibCBench b_string_memset__0_ 0.00275405066667\nplatform_LibCBench/platform_LibCBench b_string_strchr__0_ 0.00456903\nplatform_LibCBench/platform_LibCBench b_string_strlen__0_ 0.044893587\nplatform_LibCBench/platform_LibCBench b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__ 0.118360778\nplatform_LibCBench/platform_LibCBench b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__ 0.068957325\nplatform_LibCBench/platform_LibCBench b_string_strstr___aaaaaaaaaaaaaacccccccccccc__ 0.0135694476667\nplatform_LibCBench/platform_LibCBench b_string_strstr___abcdefghijklmnopqrstuvwxyz__ 0.0134553343333\nplatform_LibCBench/platform_LibCBench b_string_strstr___azbycxdwevfugthsirjqkplomn__ 0.0133123556667\nplatform_LibCBench/platform_LibCBench b_utf8_bigbuf__0_ 0.0473772253333\nplatform_LibCBench/platform_LibCBench b_utf8_onebyone__0_ 0.130938538333\n-------------------------------------------------------------------\nTotal PASS: 2/2 (100%)\n\nINFO : Elapsed time: 0m16s \n"
+p0
+.S"\nERROR: Identity added: /tmp/test_that.Z4Ld/autotest_key (/tmp/test_that.Z4Ld/autotest_key)\nINFO : Using emerged autotests already installed at /build/lumpy/usr/local/autotest.\nINFO : Running the following control files 1 times:\nINFO : * 'client/site_tests/platform_LibCBench/control'\nINFO : Running client test client/site_tests/platform_LibCBench/control\nINFO : Test results:\nINFO : Elapsed time: 0m18s\n"
+p0
+.I0
+. \ No newline at end of file
diff --git a/crosperf/test_cache/test_input/autotest.tbz2 b/crosperf/test_cache/test_input/autotest.tbz2
new file mode 100644
index 00000000..6ddbc6bf
--- /dev/null
+++ b/crosperf/test_cache/test_input/autotest.tbz2
Binary files differ
diff --git a/crosperf/test_cache/test_input/machine.txt b/crosperf/test_cache/test_input/machine.txt
new file mode 100644
index 00000000..9bd78434
--- /dev/null
+++ b/crosperf/test_cache/test_input/machine.txt
@@ -0,0 +1 @@
+processor : 0vendor_id : GenuineIntelcpu family : 6model : 42model name : Intel(R) Celeron(R) CPU 867 @ 1.30GHzstepping : 7microcode : 0x25cache size : 2048 KBphysical id : 0siblings : 2core id : 0cpu cores : 2apicid : 0initial apicid : 0fpu : yesfpu_exception : yescpuid level : 13wp : yesflags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush size : 64cache_alignment : 64address sizes : 36 bits physical, 48 bits virtualpower management:processor : 1vendor_id : GenuineIntelcpu family : 6model : 42model name : Intel(R) Celeron(R) CPU 867 @ 1.30GHzstepping : 7microcode : 0x25cache size : 2048 KBphysical id : 0siblings : 2core id : 1cpu cores : 2apicid : 2initial apicid : 2fpu : yesfpu_exception : yescpuid level : 13wp : yesflags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush size : 64cache_alignment : 64address sizes : 36 bits physical, 48 bits virtualpower management: 4194304 \ No newline at end of file
diff --git a/crosperf/test_cache/test_input/results.txt b/crosperf/test_cache/test_input/results.txt
new file mode 100644
index 00000000..33ba6ab7
--- /dev/null
+++ b/crosperf/test_cache/test_input/results.txt
@@ -0,0 +1,6 @@
+S"11:22:08 INFO | Running autotest_quickmerge step.\n11:22:08 INFO | quickmerge| 11:22:08: INFO: RunCommand: sudo -- /usr/bin/python2.7 /mnt/host/source/chromite/bin/autotest_quickmerge '--board=lumpy'\n11:22:08 INFO | quickmerge| 11:22:08: INFO: RunCommand: find /build/lumpy/usr/local/build/autotest/ -path /build/lumpy/usr/local/build/autotest/ExternalSource -prune -o -path /build/lumpy/usr/local/build/autotest/logs -prune -o -path /build/lumpy/usr/local/build/autotest/results -prune -o -path /build/lumpy/usr/local/build/autotest/site-packages -prune -o -printf '%T@\\n'\n11:22:22 INFO | quickmerge| 11:22:22: INFO: RunCommand: find /mnt/host/source/src/third_party/autotest/files/ -path /mnt/host/source/src/third_party/autotest/files/ExternalSource -prune -o -path /mnt/host/source/src/third_party/autotest/files/logs -prune -o -path /mnt/host/source/src/third_party/autotest/files/results -prune -o -path /mnt/host/source/src/third_party/autotest/files/site-packages -prune -o -printf '%T@\\n'\n11:22:32 INFO | quickmerge| 11:22:32: INFO: The sysroot appears to be newer than the source tree, doing nothing and exiting now.\n11:22:32 INFO | Re-running test_that script in /build/lumpy/usr/local/build/autotest copy of autotest.\n11:22:33 INFO | Began logging to /tmp/test_that_results_zZZfQa\nAdding labels [u'cros-version:ad_hoc_build', u'board:lumpy'] to host chromeos2-row2-rack4-host11.cros\n13:22:33 INFO | Fetching suite for job named telemetry_Crosperf...\n13:22:43 INFO | Scheduling suite for job named telemetry_Crosperf...\n13:22:43 INFO | ... scheduled 1 job(s).\n13:22:43 INFO | autoserv| DEBUG:root:import statsd failed, no stats will be reported.\n13:22:43 INFO | autoserv| Results placed in /tmp/test_that_results_zZZfQa/results-1-telemetry_Crosperf\n13:22:43 INFO | autoserv| Logged pid 25397 to /tmp/test_that_results_zZZfQa/results-1-telemetry_Crosperf/.autoserv_execute\n13:22:43 INFO | autoserv| I am PID 25397\n13:22:43 INFO | autoserv| Not checking if job_repo_url contains autotest packages on ['chromeos2-row2-rack4-host11.cros']\n13:22:43 INFO | autoserv| Processing control file\n13:22:44 INFO | autoserv| START\ttelemetry_Crosperf\ttelemetry_Crosperf\ttimestamp=1401301364\tlocaltime=May 28 11:22:44\n13:22:44 INFO | autoserv| Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_HsB3vQssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/tmp/tmpxFy6lj -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=300 -l root -p 22 chromeos2-row2-rack4-host11.cros'\n13:22:45 INFO | autoserv| Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_YTu9wYssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 chromeos2-row2-rack4-host11.cros'\n13:22:45 INFO | autoserv| Installing autotest on chromeos2-row2-rack4-host11.cros\n13:22:45 INFO | autoserv| Using installation dir /tmp/sysinfo/autoserv-MxOMOw\n13:22:46 INFO | autoserv| No job_repo_url for <remote host: chromeos2-row2-rack4-host11.cros>\n13:22:46 INFO | autoserv| Could not install autotest using the packaging system: No repos to install an autotest client from. Trying other methods\n13:22:47 INFO | autoserv| Installation of autotest completed\n13:22:47 INFO | autoserv| Installing updated global_config.ini.\n13:22:48 INFO | autoserv| No job_repo_url for <remote host: chromeos2-row2-rack4-host11.cros>\n13:22:48 INFO | autoserv| Executing /tmp/sysinfo/autoserv-MxOMOw/bin/autotest /tmp/sysinfo/autoserv-MxOMOw/control phase 0\n13:22:48 INFO | autoserv| Entered autotestd_monitor.\n13:22:48 INFO | autoserv| Finished launching tail subprocesses.\n13:22:48 INFO | autoserv| Finished waiting on autotestd to start.\n13:22:48 INFO | autoserv| START\t----\t----\ttimestamp=1401301368\tlocaltime=May 28 11:22:48\n13:22:48 INFO | autoserv| GOOD\t----\tsysinfo.before\ttimestamp=1401301368\tlocaltime=May 28 11:22:48\n13:22:48 INFO | autoserv| END GOOD\t----\t----\ttimestamp=1401301368\tlocaltime=May 28 11:22:48\n13:22:48 INFO | autoserv| Got lock of exit_code_file.\n13:22:48 INFO | autoserv| Released lock of exit_code_file and closed it.\n13:22:50 INFO | autoserv| Killing child processes.\n13:22:50 INFO | autoserv| Client complete\n13:22:52 INFO | autoserv| No job_repo_url for <remote host: chromeos2-row2-rack4-host11.cros>\n13:22:52 INFO | autoserv| Executing /tmp/sysinfo/autoserv-MxOMOw/bin/autotest /tmp/sysinfo/autoserv-MxOMOw/control phase 0\n13:22:53 INFO | autoserv| Entered autotestd_monitor.\n13:22:53 INFO | autoserv| Finished launching tail subprocesses.\n13:22:53 INFO | autoserv| Finished waiting on autotestd to start.\n13:22:53 INFO | autoserv| START\t----\t----\ttimestamp=1401301373\tlocaltime=May 28 11:22:53\n13:22:53 INFO | autoserv| GOOD\t----\tsysinfo.iteration.before\ttimestamp=1401301373\tlocaltime=May 28 11:22:53\n13:22:53 INFO | autoserv| END GOOD\t----\t----\ttimestamp=1401301373\tlocaltime=May 28 11:22:53\n13:22:53 INFO | autoserv| Got lock of exit_code_file.\n13:22:53 INFO | autoserv| Released lock of exit_code_file and closed it.\n13:22:55 INFO | autoserv| Killing child processes.\n13:22:55 INFO | autoserv| Client complete\n13:22:55 INFO | autoserv| Using Chrome source tree at /tmp/chrome_root\n13:22:55 INFO | autoserv| CMD: /tmp/chrome_root/src/tools/perf/run_benchmark --browser=cros-chrome --remote=chromeos2-row2-rack4-host11.cros sunspider\n13:23:35 INFO | autoserv| Telemetry completed with exit code: 0.\n13:23:35 INFO | autoserv| stdout:Pages: [http___www.webkit.org_perf_sunspider-1.0.2_sunspider-1.0.2_driver.html]\n13:23:35 INFO | autoserv| RESULT 3d-cube: 3d-cube= [28,28,28,28,31,26,28,28,28,27] ms\n13:23:35 INFO | autoserv| Avg 3d-cube: 28.000000ms\n13:23:35 INFO | autoserv| Sd 3d-cube: 1.247219ms\n13:23:35 INFO | autoserv| RESULT 3d-morph: 3d-morph= [23,22,22,22,22,22,22,22,22,22] ms\n13:23:35 INFO | autoserv| Avg 3d-morph: 22.100000ms\n13:23:35 INFO | autoserv| Sd 3d-morph: 0.316228ms\n13:23:35 INFO | autoserv| RESULT 3d-raytrace: 3d-raytrace= [26,23,24,25,25,25,26,24,24,25] ms\n13:23:35 INFO | autoserv| Avg 3d-raytrace: 24.700000ms\n13:23:35 INFO | autoserv| Sd 3d-raytrace: 0.948683ms\n13:23:35 INFO | autoserv| *RESULT Total: Total= [443,440,440,447,451,435,441,449,449,445] ms\n13:23:35 INFO | autoserv| Avg Total: 444.000000ms\n13:23:35 INFO | autoserv| Sd Total: 5.077182ms\n13:23:35 INFO | autoserv| RESULT access-binary-trees: access-binary-trees= [4,3,5,6,5,5,3,5,5,4] ms\n13:23:35 INFO | autoserv| Avg access-binary-trees: 4.500000ms\n13:23:35 INFO | autoserv| Sd access-binary-trees: 0.971825ms\n13:23:35 INFO | autoserv| RESULT access-fannkuch: access-fannkuch= [19,18,17,18,17,18,18,18,17,18] ms\n13:23:35 INFO | autoserv| Avg access-fannkuch: 17.800000ms\n13:23:35 INFO | autoserv| Sd access-fannkuch: 0.632456ms\n13:23:35 INFO | autoserv| RESULT access-nbody: access-nbody= [7,9,8,7,12,8,9,10,8,7] ms\n13:23:35 INFO | autoserv| Avg access-nbody: 8.500000ms\n13:23:35 INFO | autoserv| Sd access-nbody: 1.581139ms\n13:23:35 INFO | autoserv| RESULT access-nsieve: access-nsieve= [9,8,8,8,8,7,8,7,8,8] ms\n13:23:35 INFO | autoserv| Avg access-nsieve: 7.900000ms\n13:23:35 INFO | autoserv| Sd access-nsieve: 0.567646ms\n13:23:35 INFO | autoserv| RESULT bitops-3bit-bits-in-byte: bitops-3bit-bits-in-byte= [3,3,3,3,3,3,3,4,4,3] ms\n13:23:35 INFO | autoserv| Avg bitops-3bit-bits-in-byte: 3.200000ms\n13:23:35 INFO | autoserv| Sd bitops-3bit-bits-in-byte: 0.421637ms\n13:23:35 INFO | autoserv| RESULT bitops-bits-in-byte: bitops-bits-in-byte= [9,9,9,9,9,9,9,9,9,10] ms\n13:23:35 INFO | autoserv| Avg bitops-bits-in-byte: 9.100000ms\n13:23:35 INFO | autoserv| Sd bitops-bits-in-byte: 0.316228ms\n13:23:35 INFO | autoserv| RESULT bitops-bitwise-and: bitops-bitwise-and= [8,8,7,9,8,9,8,8,9,10] ms\n13:23:35 INFO | autoserv| Avg bitops-bitwise-and: 8.400000ms\n13:23:35 INFO | autoserv| Sd bitops-bitwise-and: 0.843274ms\n13:23:35 INFO | autoserv| RESULT bitops-nsieve-bits: bitops-nsieve-bits= [9,9,9,9,9,9,9,11,11,9] ms\n13:23:35 INFO | autoserv| Avg bitops-nsieve-bits: 9.400000ms\n13:23:35 INFO | autoserv| Sd bitops-nsieve-bits: 0.843274ms\n13:23:35 INFO | autoserv| RESULT controlflow-recursive: controlflow-recursive= [5,5,5,4,4,4,5,4,4,4] ms\n13:23:35 INFO | autoserv| Avg controlflow-recursive: 4.400000ms\n13:23:35 INFO | autoserv| Sd controlflow-recursive: 0.516398ms\n13:23:35 INFO | autoserv| RESULT crypto-aes: crypto-aes= [14,16,15,16,15,14,17,14,15,16] ms\n13:23:35 INFO | autoserv| Avg crypto-aes: 15.200000ms\n13:23:35 INFO | autoserv| Sd crypto-aes: 1.032796ms\n13:23:35 INFO | autoserv| RESULT crypto-md5: crypto-md5= [10,11,11,11,10,10,11,10,10,11] ms\n13:23:35 INFO | autoserv| Avg crypto-md5: 10.500000ms\n13:23:35 INFO | autoserv| Sd crypto-md5: 0.527046ms\n13:23:35 INFO | autoserv| RESULT crypto-sha1: crypto-sha1= [11,11,12,12,12,12,12,10,13,11] ms\n13:23:35 INFO | autoserv| Avg crypto-sha1: 11.600000ms\n13:23:35 INFO | autoserv| Sd crypto-sha1: 0.843274ms\n13:23:35 INFO | autoserv| RESULT date-format-tofte: date-format-tofte= [28,25,25,26,26,27,26,28,27,25] ms\n13:23:35 INFO | autoserv| Avg date-format-tofte: 26.300000ms\n13:23:35 INFO | autoserv| Sd date-format-tofte: 1.159502ms\n13:23:35 INFO | autoserv| RESULT date-format-xparb: date-format-xparb= [21,22,21,21,21,20,20,20,21,22] ms\n13:23:35 INFO | autoserv| Avg date-format-xparb: 20.900000ms\n13:23:35 INFO | autoserv| Sd date-format-xparb: 0.737865ms\n13:23:35 INFO | autoserv| RESULT math-cordic: math-cordic= [8,8,8,9,9,9,9,9,9,9] ms\n13:23:35 INFO | autoserv| Avg math-cordic: 8.700000ms\n13:23:35 INFO | autoserv| Sd math-cordic: 0.483046ms\n13:23:35 INFO | autoserv| RESULT math-partial-sums: math-partial-sums= [22,22,22,21,23,20,20,23,25,22] ms\n13:23:35 INFO | autoserv| Avg math-partial-sums: 22.000000ms\n13:23:35 INFO | autoserv| Sd math-partial-sums: 1.490712ms\n13:23:35 INFO | autoserv| RESULT math-spectral-norm: math-spectral-norm= [6,7,6,7,7,6,7,6,7,7] ms\n13:23:35 INFO | autoserv| Avg math-spectral-norm: 6.600000ms\n13:23:35 INFO | autoserv| Sd math-spectral-norm: 0.516398ms\n13:23:35 INFO | autoserv| RESULT regexp-dna: regexp-dna= [16,16,17,16,16,16,16,16,17,16] ms\n13:23:35 INFO | autoserv| Avg regexp-dna: 16.200000ms\n13:23:35 INFO | autoserv| Sd regexp-dna: 0.421637ms\n13:23:35 INFO | autoserv| RESULT string-base64: string-base64= [17,16,16,16,17,16,16,16,14,16] ms\n13:23:35 INFO | autoserv| Avg string-base64: 16.000000ms\n13:23:35 INFO | autoserv| Sd string-base64: 0.816497ms\n13:23:35 INFO | autoserv| RESULT string-fasta: string-fasta= [23,22,23,24,23,23,23,25,23,23] ms\n13:23:35 INFO | autoserv| Avg string-fasta: 23.200000ms\n13:23:35 INFO | autoserv| Sd string-fasta: 0.788811ms\n13:23:35 INFO | autoserv| RESULT string-tagcloud: string-tagcloud= [53,52,54,53,53,52,51,54,53,53] ms\n13:23:35 INFO | autoserv| Avg string-tagcloud: 52.800000ms\n13:23:35 INFO | autoserv| Sd string-tagcloud: 0.918937ms\n13:23:35 INFO | autoserv| RESULT string-unpack-code: string-unpack-code= [46,47,46,48,47,46,46,47,47,47] ms\n13:23:35 INFO | autoserv| Avg string-unpack-code: 46.700000ms\n13:23:35 INFO | autoserv| Sd string-unpack-code: 0.674949ms\n13:23:35 INFO | autoserv| RESULT string-validate-input: string-validate-input= [18,20,19,19,19,19,19,21,19,20] ms\n13:23:35 INFO | autoserv| Avg string-validate-input: 19.300000ms\n13:23:35 INFO | autoserv| Sd string-validate-input: 0.823273ms\n13:23:35 INFO | autoserv| RESULT telemetry_page_measurement_results: num_failed= 0 count\n13:23:35 INFO | autoserv| RESULT telemetry_page_measurement_results: num_errored= 0 count\n13:23:35 INFO | autoserv| \n13:23:35 INFO | autoserv| View result at file:///tmp/chrome_root/src/tools/perf/results.html\n13:23:35 INFO | autoserv| \n13:23:35 INFO | autoserv| stderr:\n13:23:35 INFO | autoserv| No job_repo_url for <remote host: chromeos2-row2-rack4-host11.cros>\n13:23:35 INFO | autoserv| Executing /tmp/sysinfo/autoserv-MxOMOw/bin/autotest /tmp/sysinfo/autoserv-MxOMOw/control phase 0\n13:23:36 INFO | autoserv| Entered autotestd_monitor.\n13:23:36 INFO | autoserv| Finished launching tail subprocesses.\n13:23:36 INFO | autoserv| Finished waiting on autotestd to start.\n13:23:37 INFO | autoserv| START\t----\t----\ttimestamp=1401301417\tlocaltime=May 28 11:23:37\n13:23:37 INFO | autoserv| GOOD\t----\tsysinfo.iteration.after\ttimestamp=1401301417\tlocaltime=May 28 11:23:37\n13:23:37 INFO | autoserv| END GOOD\t----\t----\ttimestamp=1401301417\tlocaltime=May 28 11:23:37\n13:23:37 INFO | autoserv| Got lock of exit_code_file.\n13:23:37 INFO | autoserv| Released lock of exit_code_file and closed it.\n13:23:39 INFO | autoserv| Killing child processes.\n13:23:39 INFO | autoserv| Client complete\n13:23:39 INFO | autoserv| No job_repo_url for <remote host: chromeos2-row2-rack4-host11.cros>\n13:23:40 INFO | autoserv| Executing /tmp/sysinfo/autoserv-MxOMOw/bin/autotest /tmp/sysinfo/autoserv-MxOMOw/control phase 0\n13:23:40 INFO | autoserv| Entered autotestd_monitor.\n13:23:40 INFO | autoserv| Finished launching tail subprocesses.\n13:23:40 INFO | autoserv| Finished waiting on autotestd to start.\n13:23:40 INFO | autoserv| START\t----\t----\ttimestamp=1401301420\tlocaltime=May 28 11:23:40\n13:23:40 INFO | autoserv| GOOD\t----\tsysinfo.after\ttimestamp=1401301420\tlocaltime=May 28 11:23:40\n13:23:40 INFO | autoserv| END GOOD\t----\t----\ttimestamp=1401301420\tlocaltime=May 28 11:23:40\n13:23:40 INFO | autoserv| Got lock of exit_code_file.\n13:23:40 INFO | autoserv| Released lock of exit_code_file and closed it.\n13:23:42 INFO | autoserv| Killing child processes.\n13:23:42 INFO | autoserv| Client complete\n13:23:44 INFO | autoserv| GOOD\ttelemetry_Crosperf\ttelemetry_Crosperf\ttimestamp=1401301424\tlocaltime=May 28 11:23:44\tcompleted successfully\n13:23:44 INFO | autoserv| END GOOD\ttelemetry_Crosperf\ttelemetry_Crosperf\ttimestamp=1401301424\tlocaltime=May 28 11:23:44\n13:23:44 INFO | autoserv| Finished processing control file\n13:23:44 INFO | autoserv| Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_UyjlWMssh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/tmp/tmpCvMigR -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=300 -l root -p 22 chromeos2-row2-rack4-host11.cros'\n13:23:45 INFO | autoserv| Starting master ssh connection '/usr/bin/ssh -a -x -N -o ControlMaster=yes -o ControlPath=/tmp/_autotmp_w_KGTassh-master/socket -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o ConnectTimeout=30 -o ServerAliveInterval=180 -o ServerAliveCountMax=3 -o ConnectionAttempts=4 -o Protocol=2 -l root -p 22 chromeos2-row2-rack4-host11.cros'\n-----------------------------------------------------------------------------------------\n/tmp/test_that_results_zZZfQa/results-1-telemetry_Crosperf [ PASSED ]\n/tmp/test_that_results_zZZfQa/results-1-telemetry_Crosperf/telemetry_Crosperf [ PASSED ]\n-----------------------------------------------------------------------------------------\nTotal PASS: 2/2 (100%)\n\n13:23:47 INFO | Finished running tests. Results can be found in /tmp/test_that_results_zZZfQa\n"
+p0
+.S'INFO:root:Identity added: /tmp/test_that_results_PPRMIh/testing_rsa (/tmp/test_that_results_PPRMIh/testing_rsa)\nINFO:root:Identity added: /tmp/test_that_results_zZZfQa/testing_rsa (/tmp/test_that_results_zZZfQa/testing_rsa)\n'
+p0
+.I0
+. \ No newline at end of file
diff --git a/crosperf/test_cache/test_puretelemetry_input/machine.txt b/crosperf/test_cache/test_puretelemetry_input/machine.txt
new file mode 100644
index 00000000..9bd78434
--- /dev/null
+++ b/crosperf/test_cache/test_puretelemetry_input/machine.txt
@@ -0,0 +1 @@
+processor : 0vendor_id : GenuineIntelcpu family : 6model : 42model name : Intel(R) Celeron(R) CPU 867 @ 1.30GHzstepping : 7microcode : 0x25cache size : 2048 KBphysical id : 0siblings : 2core id : 0cpu cores : 2apicid : 0initial apicid : 0fpu : yesfpu_exception : yescpuid level : 13wp : yesflags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush size : 64cache_alignment : 64address sizes : 36 bits physical, 48 bits virtualpower management:processor : 1vendor_id : GenuineIntelcpu family : 6model : 42model name : Intel(R) Celeron(R) CPU 867 @ 1.30GHzstepping : 7microcode : 0x25cache size : 2048 KBphysical id : 0siblings : 2core id : 1cpu cores : 2apicid : 2initial apicid : 2fpu : yesfpu_exception : yescpuid level : 13wp : yesflags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush size : 64cache_alignment : 64address sizes : 36 bits physical, 48 bits virtualpower management: 4194304 \ No newline at end of file
diff --git a/crosperf/test_cache/test_puretelemetry_input/results.txt b/crosperf/test_cache/test_puretelemetry_input/results.txt
new file mode 100644
index 00000000..497d1cf3
--- /dev/null
+++ b/crosperf/test_cache/test_puretelemetry_input/results.txt
@@ -0,0 +1,6 @@
+S'page_name,3d-cube (ms),3d-morph (ms),3d-raytrace (ms),Total (ms),access-binary-trees (ms),access-fannkuch (ms),access-nbody (ms),access-nsieve (ms),bitops-3bit-bits-in-byte (ms),bitops-bits-in-byte (ms),bitops-bitwise-and (ms),bitops-nsieve-bits (ms),controlflow-recursive (ms),crypto-aes (ms),crypto-md5 (ms),crypto-sha1 (ms),date-format-tofte (ms),date-format-xparb (ms),math-cordic (ms),math-partial-sums (ms),math-spectral-norm (ms),regexp-dna (ms),string-base64 (ms),string-fasta (ms),string-tagcloud (ms),string-unpack-code (ms),string-validate-input (ms)\r\nhttp://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html,42.7,50.2,28.7,656.5,7.3,26.3,6.9,8.6,3.5,9.8,8.8,9.3,5.3,19.2,10.8,12.4,31.2,138.1,11.4,32.8,6.3,16.1,17.5,36.3,47.2,45.0,24.8\r\n'
+p0
+.S''
+p0
+.I0
+. \ No newline at end of file
diff --git a/crosperf/test_flag.py b/crosperf/test_flag.py
new file mode 100644
index 00000000..70918693
--- /dev/null
+++ b/crosperf/test_flag.py
@@ -0,0 +1,12 @@
+# Copyright 2011 Google Inc. All Rights Reserved.
+"""A global variable for testing."""
+
+is_test = [False]
+
+
+def SetTestMode(flag):
+ is_test[0] = flag
+
+
+def GetTestMode():
+ return is_test[0]
diff --git a/crosperf/translate_xbuddy.py b/crosperf/translate_xbuddy.py
new file mode 100644
index 00000000..a32854e1
--- /dev/null
+++ b/crosperf/translate_xbuddy.py
@@ -0,0 +1,33 @@
+"""Module to translate the xbuddy config."""
+
+from __future__ import print_function
+
+import os
+import sys
+
+if '/mnt/host/source/src/third_party/toolchain-utils/crosperf' in sys.path:
+ dev_path = os.path.expanduser('~/trunk/src/platform/dev')
+ sys.path.append(dev_path)
+else:
+ print('This script can only be run from inside a ChromeOS chroot. Please '
+ 'enter your chroot, go to ~/src/third_party/toolchain-utils/crosperf'
+ ' and try again.')
+ sys.exit(0)
+
+#pylint: disable=import-error
+import xbuddy
+
+
+def Main(xbuddy_string):
+ if not os.path.exists('./xbuddy_config.ini'):
+ config_path = os.path.expanduser('~/trunk/src/platform/dev/'
+ 'xbuddy_config.ini')
+ os.symlink(config_path, './xbuddy_config.ini')
+ x = xbuddy.XBuddy(manage_builds=False, static_dir='/tmp/devserver/static')
+ build_id = x.Translate(os.path.split(xbuddy_string))
+ return build_id
+
+
+if __name__ == '__main__':
+ print(Main(sys.argv[1]))
+ sys.exit(0)
diff --git a/crosperf/unittest_keyval_file.txt b/crosperf/unittest_keyval_file.txt
new file mode 100644
index 00000000..cc76398e
--- /dev/null
+++ b/crosperf/unittest_keyval_file.txt
@@ -0,0 +1,20 @@
+{"description": "Box2D", "graph": "Box2D", "higher_is_better": true, "units": "score", "value": 4775}
+{"description": "CodeLoad", "graph": "CodeLoad", "higher_is_better": true, "units": "score", "value": 6271}
+{"description": "Crypto", "graph": "Crypto", "higher_is_better": true, "units": "score", "value": 8737}
+{"description": "DeltaBlue", "graph": "DeltaBlue", "higher_is_better": true, "units": "score", "value": 14401}
+{"description": "EarleyBoyer", "graph": "EarleyBoyer", "higher_is_better": true, "units": "score", "value": 14340}
+{"description": "Gameboy", "graph": "Gameboy", "higher_is_better": true, "units": "score", "value": 9901}
+{"description": "Mandreel", "graph": "Mandreel", "higher_is_better": true, "units": "score", "value": 6620}
+{"description": "MandreelLatency", "graph": "MandreelLatency", "higher_is_better": true, "units": "score", "value": 5188}
+{"description": "NavierStokes", "graph": "NavierStokes", "higher_is_better": true, "units": "score", "value": 9815}
+{"description": "PdfJS", "graph": "PdfJS", "higher_is_better": true, "units": "score", "value": 6455}
+{"description": "RayTrace", "graph": "RayTrace", "higher_is_better": true, "units": "score", "value": 16600}
+{"description": "RegExp", "graph": "RegExp", "higher_is_better": true, "units": "score", "value": 1765}
+{"description": "Richards", "graph": "Richards", "higher_is_better": true, "units": "score", "value": 10358}
+{"description": "Splay", "graph": "Splay", "higher_is_better": true, "units": "score", "value": 4425}
+{"description": "SplayLatency", "graph": "SplayLatency", "higher_is_better": true, "units": "score", "value": 7653}
+{"description": "Typescript", "graph": "Typescript", "higher_is_better": true, "units": "score", "value": 9815}
+{"description": "zlib", "graph": "zlib", "higher_is_better": true, "units": "score", "value": 16094}
+{"description": "Score", "graph": "Total", "higher_is_better": true, "units": "score", "value": 7918}
+{"description": "num_failed", "graph": "telemetry_page_measurement_results", "higher_is_better": true, "units": "count", "value": 0}
+{"description": "num_errored", "graph": "telemetry_page_measurement_results", "higher_is_better": true, "units": "count", "value": 0}