aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark.py128
-rw-r--r--crosperf/benchmark_run.py566
-rwxr-xr-xcrosperf/benchmark_run_unittest.py939
-rwxr-xr-xcrosperf/benchmark_unittest.py112
-rw-r--r--crosperf/column_chart.py100
-rw-r--r--crosperf/compare_machines.py102
-rw-r--r--crosperf/config.py6
-rwxr-xr-xcrosperf/config_unittest.py59
-rwxr-xr-xcrosperf/crosperf2
-rwxr-xr-xcrosperf/crosperf.py221
-rwxr-xr-xcrosperf/crosperf_autolock.py508
-rwxr-xr-xcrosperf/crosperf_unittest.py102
-rw-r--r--crosperf/default_remotes14
-rw-r--r--crosperf/download_images.py672
-rwxr-xr-xcrosperf/download_images_buildid_test.py202
-rwxr-xr-xcrosperf/download_images_unittest.py555
-rw-r--r--crosperf/experiment.py460
-rw-r--r--crosperf/experiment_factory.py1064
-rwxr-xr-xcrosperf/experiment_factory_unittest.py828
-rw-r--r--crosperf/experiment_file.py415
-rwxr-xr-xcrosperf/experiment_file_unittest.py203
-rwxr-xr-xcrosperf/experiment_files/telemetry_perf_perf5
-rw-r--r--crosperf/experiment_runner.py691
-rwxr-xr-xcrosperf/experiment_runner_unittest.py966
-rw-r--r--crosperf/experiment_status.py290
-rw-r--r--crosperf/field.py247
-rwxr-xr-xcrosperf/flag_test_unittest.py47
-rwxr-xr-xcrosperf/generate_report.py405
-rwxr-xr-xcrosperf/generate_report_unittest.py294
-rw-r--r--crosperf/help.py70
-rw-r--r--crosperf/image_checksummer.py113
-rw-r--r--crosperf/label.py357
-rw-r--r--crosperf/machine_image_manager.py344
-rwxr-xr-xcrosperf/machine_image_manager_unittest.py524
-rw-r--r--crosperf/machine_manager.py1254
-rwxr-xr-xcrosperf/machine_manager_unittest.py1456
-rw-r--r--crosperf/mock_instance.py234
-rw-r--r--crosperf/results_cache.py2997
-rwxr-xr-xcrosperf/results_cache_unittest.py3750
-rw-r--r--crosperf/results_organizer.py374
-rwxr-xr-xcrosperf/results_organizer_unittest.py275
-rw-r--r--crosperf/results_report.py1474
-rw-r--r--crosperf/results_report_templates.py129
-rwxr-xr-xcrosperf/results_report_unittest.py819
-rwxr-xr-xcrosperf/run_tests.sh2
-rw-r--r--crosperf/schedv2.py798
-rwxr-xr-xcrosperf/schedv2_unittest.py336
-rw-r--r--crosperf/settings.py153
-rw-r--r--crosperf/settings_factory.py932
-rwxr-xr-xcrosperf/settings_factory_unittest.py186
-rwxr-xr-xcrosperf/settings_unittest.py499
-rw-r--r--crosperf/suite_runner.py700
-rwxr-xr-xcrosperf/suite_runner_unittest.py673
-rw-r--r--crosperf/test_flag.py6
-rwxr-xr-xcrosperf/translate_xbuddy.py45
55 files changed, 15419 insertions, 13284 deletions
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index 0413b593..f9de0cf3 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -1,87 +1,91 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Define a type that wraps a Benchmark instance."""
-from __future__ import division
-from __future__ import print_function
import math
+
# FIXME(denik): Fix the import in chroot.
# pylint: disable=import-error
from scipy import stats
+
# See crbug.com/673558 for how these are estimated.
_estimated_stddev = {
- 'octane': 0.015,
- 'kraken': 0.019,
- 'speedometer': 0.007,
- 'speedometer2': 0.006,
- 'dromaeo.domcoreattr': 0.023,
- 'dromaeo.domcoremodify': 0.011,
- 'graphics_WebGLAquarium': 0.008,
- 'page_cycler_v2.typical_25': 0.021,
- 'loading.desktop': 0.021, # Copied from page_cycler initially
+ "octane": 0.015,
+ "kraken": 0.019,
+ "speedometer": 0.007,
+ "speedometer2": 0.006,
+ "dromaeo.domcoreattr": 0.023,
+ "dromaeo.domcoremodify": 0.011,
+ "graphics_WebGLAquarium": 0.008,
+ "page_cycler_v2.typical_25": 0.021,
+ "loading.desktop": 0.021, # Copied from page_cycler initially
}
# Get #samples needed to guarantee a given confidence interval, assuming the
# samples follow normal distribution.
def _samples(b):
- # TODO: Make this an option
- # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%.
- p = 0.9
- e = 0.02
- if b not in _estimated_stddev:
- return 1
- d = _estimated_stddev[b]
- # Get at least 2 samples so as to calculate standard deviation, which is
- # needed in T-test for p-value.
- n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e)**2))
- return n if n > 1 else 2
+ # TODO: Make this an option
+ # CI = (0.9, 0.02), i.e., 90% chance that |sample mean - true mean| < 2%.
+ p = 0.9
+ e = 0.02
+ if b not in _estimated_stddev:
+ return 1
+ d = _estimated_stddev[b]
+ # Get at least 2 samples so as to calculate standard deviation, which is
+ # needed in T-test for p-value.
+ n = int(math.ceil((stats.norm.isf((1 - p) / 2) * d / e) ** 2))
+ return n if n > 1 else 2
class Benchmark(object):
- """Class representing a benchmark to be run.
+ """Class representing a benchmark to be run.
- Contains details of the benchmark suite, arguments to pass to the suite,
- iterations to run the benchmark suite and so on. Note that the benchmark name
- can be different to the test suite name. For example, you may want to have
- two different benchmarks which run the same test_name with different
- arguments.
- """
+ Contains details of the benchmark suite, arguments to pass to the suite,
+ iterations to run the benchmark suite and so on. Note that the benchmark name
+ can be different to the test suite name. For example, you may want to have
+ two different benchmarks which run the same test_name with different
+ arguments.
+ """
- def __init__(self,
- name,
- test_name,
- test_args,
- iterations,
- rm_chroot_tmp,
- perf_args,
- suite='',
- show_all_results=False,
- retries=0,
- run_local=False,
- cwp_dso='',
- weight=0):
- self.name = name
- # For telemetry, this is the benchmark name.
- self.test_name = test_name
- # For telemetry, this is the data.
- self.test_args = test_args
- self.iterations = iterations if iterations > 0 else _samples(name)
- self.perf_args = perf_args
- self.rm_chroot_tmp = rm_chroot_tmp
- self.iteration_adjusted = False
- self.suite = suite
- self.show_all_results = show_all_results
- self.retries = retries
- if self.suite == 'telemetry':
- self.show_all_results = True
- if run_local and self.suite != 'telemetry_Crosperf':
- raise RuntimeError('run_local is only supported by telemetry_Crosperf.')
- self.run_local = run_local
- self.cwp_dso = cwp_dso
- self.weight = weight
+ def __init__(
+ self,
+ name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite="",
+ show_all_results=False,
+ retries=0,
+ run_local=False,
+ cwp_dso="",
+ weight=0,
+ ):
+ self.name = name
+ # For telemetry, this is the benchmark name.
+ self.test_name = test_name
+ # For telemetry, this is the data.
+ self.test_args = test_args
+ self.iterations = iterations if iterations > 0 else _samples(name)
+ self.perf_args = perf_args
+ self.rm_chroot_tmp = rm_chroot_tmp
+ self.iteration_adjusted = False
+ self.suite = suite
+ self.show_all_results = show_all_results
+ self.retries = retries
+ if self.suite == "telemetry":
+ self.show_all_results = True
+ if run_local and self.suite != "telemetry_Crosperf":
+ raise RuntimeError(
+ "run_local is only supported by telemetry_Crosperf."
+ )
+ self.run_local = run_local
+ self.cwp_dso = cwp_dso
+ self.weight = weight
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index b5912c11..84797d1c 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -1,10 +1,9 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module of benchmark runs."""
-from __future__ import print_function
import datetime
import threading
@@ -13,254 +12,339 @@ import traceback
from cros_utils import command_executer
from cros_utils import timeline
-
-from suite_runner import SuiteRunner
from results_cache import MockResult
from results_cache import MockResultsCache
from results_cache import Result
from results_cache import ResultsCache
+from suite_runner import SuiteRunner
+
-STATUS_FAILED = 'FAILED'
-STATUS_SUCCEEDED = 'SUCCEEDED'
-STATUS_IMAGING = 'IMAGING'
-STATUS_RUNNING = 'RUNNING'
-STATUS_WAITING = 'WAITING'
-STATUS_PENDING = 'PENDING'
+STATUS_FAILED = "FAILED"
+STATUS_SUCCEEDED = "SUCCEEDED"
+STATUS_IMAGING = "IMAGING"
+STATUS_RUNNING = "RUNNING"
+STATUS_WAITING = "WAITING"
+STATUS_PENDING = "PENDING"
class BenchmarkRun(threading.Thread):
- """The benchmarkrun class."""
-
- def __init__(self, name, benchmark, label, iteration, cache_conditions,
- machine_manager, logger_to_use, log_level, share_cache,
- dut_config):
- threading.Thread.__init__(self)
- self.name = name
- self._logger = logger_to_use
- self.log_level = log_level
- self.benchmark = benchmark
- self.iteration = iteration
- self.label = label
- self.result = None
- self.terminated = False
- self.retval = None
- self.run_completed = False
- self.machine_manager = machine_manager
- self.suite_runner = SuiteRunner(dut_config, self._logger, self.log_level)
- self.machine = None
- self.cache_conditions = cache_conditions
- self.runs_complete = 0
- self.cache_hit = False
- self.failure_reason = ''
- self.test_args = benchmark.test_args
- self.cache = None
- self.profiler_args = self.GetExtraAutotestArgs()
- self._ce = command_executer.GetCommandExecuter(
- self._logger, log_level=self.log_level)
- self.timeline = timeline.Timeline()
- self.timeline.Record(STATUS_PENDING)
- self.share_cache = share_cache
- self.cache_has_been_read = False
-
- # This is used by schedv2.
- self.owner_thread = None
-
- def ReadCache(self):
- # Just use the first machine for running the cached version,
- # without locking it.
- self.cache = ResultsCache()
- self.cache.Init(self.label.chromeos_image, self.label.chromeos_root,
- self.benchmark.test_name, self.iteration, self.test_args,
- self.profiler_args, self.machine_manager, self.machine,
- self.label.board, self.cache_conditions, self._logger,
- self.log_level, self.label, self.share_cache,
- self.benchmark.suite, self.benchmark.show_all_results,
- self.benchmark.run_local, self.benchmark.cwp_dso)
-
- self.result = self.cache.ReadResult()
- self.cache_hit = (self.result is not None)
- self.cache_has_been_read = True
-
- def run(self):
- try:
- if not self.cache_has_been_read:
- self.ReadCache()
-
- if self.result:
- self._logger.LogOutput('%s: Cache hit.' % self.name)
- self._logger.LogOutput(self.result.out, print_to_console=False)
- self._logger.LogError(self.result.err, print_to_console=False)
-
- elif self.label.cache_only:
- self._logger.LogOutput('%s: No cache hit.' % self.name)
- output = '%s: No Cache hit.' % self.name
- retval = 1
- err = 'No cache hit.'
- self.result = Result.CreateFromRun(
- self._logger, self.log_level, self.label, self.machine, output, err,
- retval, self.benchmark.test_name, self.benchmark.suite,
- self.benchmark.cwp_dso)
-
- else:
- self._logger.LogOutput('%s: No cache hit.' % self.name)
- self.timeline.Record(STATUS_WAITING)
- # Try to acquire a machine now.
- self.machine = self.AcquireMachine()
- self.cache.machine = self.machine
- self.result = self.RunTest(self.machine)
-
- self.cache.remote = self.machine.name
- self.label.chrome_version = self.machine_manager.GetChromeVersion(
- self.machine)
- self.cache.StoreResult(self.result)
-
- if not self.label.chrome_version:
- if self.machine:
- self.label.chrome_version = self.machine_manager.GetChromeVersion(
- self.machine)
- elif self.result.chrome_version:
- self.label.chrome_version = self.result.chrome_version
-
- if self.terminated:
- return
-
- if not self.result.retval:
- self.timeline.Record(STATUS_SUCCEEDED)
- else:
+ """The benchmarkrun class."""
+
+ def __init__(
+ self,
+ name,
+ benchmark,
+ label,
+ iteration,
+ cache_conditions,
+ machine_manager,
+ logger_to_use,
+ log_level,
+ share_cache,
+ dut_config,
+ ):
+ threading.Thread.__init__(self)
+ self.name = name
+ self._logger = logger_to_use
+ self.log_level = log_level
+ self.benchmark = benchmark
+ self.iteration = iteration
+ self.label = label
+ self.result = None
+ self.terminated = False
+ self.retval = None
+ self.run_completed = False
+ self.machine_manager = machine_manager
+ self.suite_runner = SuiteRunner(
+ dut_config, self._logger, self.log_level
+ )
+ self.machine = None
+ self.cache_conditions = cache_conditions
+ self.runs_complete = 0
+ self.cache_hit = False
+ self.failure_reason = ""
+ self.test_args = benchmark.test_args
+ self.cache = None
+ self.profiler_args = self.GetExtraAutotestArgs()
+ self._ce = command_executer.GetCommandExecuter(
+ self._logger, log_level=self.log_level
+ )
+ self.timeline = timeline.Timeline()
+ self.timeline.Record(STATUS_PENDING)
+ self.share_cache = share_cache
+ self.cache_has_been_read = False
+
+ # This is used by schedv2.
+ self.owner_thread = None
+
+ def ReadCache(self):
+ # Just use the first machine for running the cached version,
+ # without locking it.
+ self.cache = ResultsCache()
+ self.cache.Init(
+ self.label.chromeos_image,
+ self.label.chromeos_root,
+ self.benchmark.test_name,
+ self.iteration,
+ self.test_args,
+ self.profiler_args,
+ self.machine_manager,
+ self.machine,
+ self.label.board,
+ self.cache_conditions,
+ self._logger,
+ self.log_level,
+ self.label,
+ self.share_cache,
+ self.benchmark.suite,
+ self.benchmark.show_all_results,
+ self.benchmark.run_local,
+ self.benchmark.cwp_dso,
+ )
+
+ self.result = self.cache.ReadResult()
+ self.cache_hit = self.result is not None
+ self.cache_has_been_read = True
+
+ def run(self):
+ try:
+ if not self.cache_has_been_read:
+ self.ReadCache()
+
+ if self.result:
+ self._logger.LogOutput("%s: Cache hit." % self.name)
+ self._logger.LogOutput(self.result.out, print_to_console=False)
+ self._logger.LogError(self.result.err, print_to_console=False)
+
+ elif self.label.cache_only:
+ self._logger.LogOutput("%s: No cache hit." % self.name)
+ output = "%s: No Cache hit." % self.name
+ retval = 1
+ err = "No cache hit."
+ self.result = Result.CreateFromRun(
+ self._logger,
+ self.log_level,
+ self.label,
+ self.machine,
+ output,
+ err,
+ retval,
+ self.benchmark.test_name,
+ self.benchmark.suite,
+ self.benchmark.cwp_dso,
+ )
+
+ else:
+ self._logger.LogOutput("%s: No cache hit." % self.name)
+ self.timeline.Record(STATUS_WAITING)
+ # Try to acquire a machine now.
+ self.machine = self.AcquireMachine()
+ self.cache.machine = self.machine
+ self.result = self.RunTest(self.machine)
+
+ self.cache.remote = self.machine.name
+ self.label.chrome_version = (
+ self.machine_manager.GetChromeVersion(self.machine)
+ )
+ self.cache.StoreResult(self.result)
+
+ if not self.label.chrome_version:
+ if self.machine:
+ self.label.chrome_version = (
+ self.machine_manager.GetChromeVersion(self.machine)
+ )
+ elif self.result.chrome_version:
+ self.label.chrome_version = self.result.chrome_version
+
+ if self.terminated:
+ return
+
+ if not self.result.retval:
+ self.timeline.Record(STATUS_SUCCEEDED)
+ else:
+ if self.timeline.GetLastEvent() != STATUS_FAILED:
+ self.failure_reason = (
+ "Return value of test suite was non-zero."
+ )
+ self.timeline.Record(STATUS_FAILED)
+
+ except Exception as e:
+ self._logger.LogError(
+ "Benchmark run: '%s' failed: %s" % (self.name, e)
+ )
+ traceback.print_exc()
+ if self.timeline.GetLastEvent() != STATUS_FAILED:
+ self.timeline.Record(STATUS_FAILED)
+ self.failure_reason = str(e)
+ finally:
+ if self.owner_thread is not None:
+ # In schedv2 mode, we do not lock machine locally. So noop here.
+ pass
+ elif self.machine:
+ if not self.machine.IsReachable():
+ self._logger.LogOutput(
+ "Machine %s is not reachable, removing it."
+ % self.machine.name
+ )
+ self.machine_manager.RemoveMachine(self.machine.name)
+ self._logger.LogOutput(
+ "Releasing machine: %s" % self.machine.name
+ )
+ self.machine_manager.ReleaseMachine(self.machine)
+ self._logger.LogOutput(
+ "Released machine: %s" % self.machine.name
+ )
+
+ def Terminate(self):
+ self.terminated = True
+ self.suite_runner.Terminate()
if self.timeline.GetLastEvent() != STATUS_FAILED:
- self.failure_reason = 'Return value of test suite was non-zero.'
- self.timeline.Record(STATUS_FAILED)
-
- except Exception as e:
- self._logger.LogError("Benchmark run: '%s' failed: %s" % (self.name, e))
- traceback.print_exc()
- if self.timeline.GetLastEvent() != STATUS_FAILED:
- self.timeline.Record(STATUS_FAILED)
- self.failure_reason = str(e)
- finally:
- if self.owner_thread is not None:
- # In schedv2 mode, we do not lock machine locally. So noop here.
- pass
- elif self.machine:
- if not self.machine.IsReachable():
- self._logger.LogOutput(
- 'Machine %s is not reachable, removing it.' % self.machine.name)
- self.machine_manager.RemoveMachine(self.machine.name)
- self._logger.LogOutput('Releasing machine: %s' % self.machine.name)
- self.machine_manager.ReleaseMachine(self.machine)
- self._logger.LogOutput('Released machine: %s' % self.machine.name)
-
- def Terminate(self):
- self.terminated = True
- self.suite_runner.Terminate()
- if self.timeline.GetLastEvent() != STATUS_FAILED:
- self.timeline.Record(STATUS_FAILED)
- self.failure_reason = 'Thread terminated.'
-
- def AcquireMachine(self):
- if self.owner_thread is not None:
- # No need to lock machine locally, DutWorker, which is a thread, is
- # responsible for running br.
- return self.owner_thread.dut()
- while True:
- machine = None
- if self.terminated:
- raise RuntimeError('Thread terminated while trying to acquire machine.')
-
- machine = self.machine_manager.AcquireMachine(self.label)
-
- if machine:
- self._logger.LogOutput(
- '%s: Machine %s acquired at %s' % (self.name, machine.name,
- datetime.datetime.now()))
- break
- time.sleep(10)
- return machine
-
- def GetExtraAutotestArgs(self):
- if (self.benchmark.perf_args and
- self.benchmark.suite != 'telemetry_Crosperf'):
- self._logger.LogError(
- 'Non-telemetry benchmark does not support profiler.')
- self.benchmark.perf_args = ''
-
- if self.benchmark.perf_args:
- perf_args_list = self.benchmark.perf_args.split(' ')
- perf_args_list = [perf_args_list[0]] + ['-a'] + perf_args_list[1:]
- perf_args = ' '.join(perf_args_list)
- if not perf_args_list[0] in ['record', 'stat']:
- raise SyntaxError('perf_args must start with either record or stat')
- extra_test_args = [
- '--profiler=custom_perf',
- ('--profiler_args=\'perf_options="%s"\'' % perf_args)
- ]
- return ' '.join(extra_test_args)
- else:
- return ''
-
- def RunTest(self, machine):
- self.timeline.Record(STATUS_IMAGING)
- if self.owner_thread is not None:
- # In schedv2 mode, do not even call ImageMachine. Machine image is
- # guarenteed.
- pass
- else:
- self.machine_manager.ImageMachine(machine, self.label)
- self.timeline.Record(STATUS_RUNNING)
- retval, out, err = self.suite_runner.Run(
- machine, self.label, self.benchmark, self.test_args, self.profiler_args)
- self.run_completed = True
- return Result.CreateFromRun(self._logger, self.log_level, self.label,
- self.machine, out, err, retval,
- self.benchmark.test_name, self.benchmark.suite,
- self.benchmark.cwp_dso)
-
- def SetCacheConditions(self, cache_conditions):
- self.cache_conditions = cache_conditions
-
- def logger(self):
- """Return the logger, only used by unittest.
-
- Returns:
- self._logger
- """
-
- return self._logger
-
- def __str__(self):
- """For better debugging."""
-
- return 'BenchmarkRun[name="{}"]'.format(self.name)
+ self.timeline.Record(STATUS_FAILED)
+ self.failure_reason = "Thread terminated."
+
+ def AcquireMachine(self):
+ if self.owner_thread is not None:
+ # No need to lock machine locally, DutWorker, which is a thread, is
+ # responsible for running br.
+ return self.owner_thread.dut()
+ while True:
+ machine = None
+ if self.terminated:
+ raise RuntimeError(
+ "Thread terminated while trying to acquire machine."
+ )
+
+ machine = self.machine_manager.AcquireMachine(self.label)
+
+ if machine:
+ self._logger.LogOutput(
+ "%s: Machine %s acquired at %s"
+ % (self.name, machine.name, datetime.datetime.now())
+ )
+ break
+ time.sleep(10)
+ return machine
+
+ def GetExtraAutotestArgs(self):
+ if (
+ self.benchmark.perf_args
+ and self.benchmark.suite != "telemetry_Crosperf"
+ ):
+ self._logger.LogError(
+ "Non-telemetry benchmark does not support profiler."
+ )
+ self.benchmark.perf_args = ""
+
+ if self.benchmark.perf_args:
+ perf_args_list = self.benchmark.perf_args.split(" ")
+ perf_args_list = [perf_args_list[0]] + ["-a"] + perf_args_list[1:]
+ perf_args = " ".join(perf_args_list)
+ if not perf_args_list[0] in ["record", "stat"]:
+ raise SyntaxError(
+ "perf_args must start with either record or stat"
+ )
+ extra_test_args = [
+ "--profiler=custom_perf",
+ ("--profiler_args='perf_options=\"%s\"'" % perf_args),
+ ]
+ return " ".join(extra_test_args)
+ else:
+ return ""
+
+ def RunTest(self, machine):
+ self.timeline.Record(STATUS_IMAGING)
+ if self.owner_thread is not None:
+ # In schedv2 mode, do not even call ImageMachine. Machine image is
+ # guarenteed.
+ pass
+ else:
+ self.machine_manager.ImageMachine(machine, self.label)
+ self.timeline.Record(STATUS_RUNNING)
+ retval, out, err = self.suite_runner.Run(
+ machine,
+ self.label,
+ self.benchmark,
+ self.test_args,
+ self.profiler_args,
+ )
+ self.run_completed = True
+ return Result.CreateFromRun(
+ self._logger,
+ self.log_level,
+ self.label,
+ self.machine,
+ out,
+ err,
+ retval,
+ self.benchmark.test_name,
+ self.benchmark.suite,
+ self.benchmark.cwp_dso,
+ )
+
+ def SetCacheConditions(self, cache_conditions):
+ self.cache_conditions = cache_conditions
+
+ def logger(self):
+ """Return the logger, only used by unittest.
+
+ Returns:
+ self._logger
+ """
+
+ return self._logger
+
+ def __str__(self):
+ """For better debugging."""
+
+ return 'BenchmarkRun[name="{}"]'.format(self.name)
class MockBenchmarkRun(BenchmarkRun):
- """Inherited from BenchmarkRun."""
-
- def ReadCache(self):
- # Just use the first machine for running the cached version,
- # without locking it.
- self.cache = MockResultsCache()
- self.cache.Init(self.label.chromeos_image, self.label.chromeos_root,
- self.benchmark.test_name, self.iteration, self.test_args,
- self.profiler_args, self.machine_manager, self.machine,
- self.label.board, self.cache_conditions, self._logger,
- self.log_level, self.label, self.share_cache,
- self.benchmark.suite, self.benchmark.show_all_results,
- self.benchmark.run_local, self.benchmark.cwp_dso)
-
- self.result = self.cache.ReadResult()
- self.cache_hit = (self.result is not None)
-
- def RunTest(self, machine):
- """Remove Result.CreateFromRun for testing."""
- self.timeline.Record(STATUS_IMAGING)
- self.machine_manager.ImageMachine(machine, self.label)
- self.timeline.Record(STATUS_RUNNING)
- [retval, out, err] = self.suite_runner.Run(
- machine, self.label, self.benchmark, self.test_args, self.profiler_args)
- self.run_completed = True
- rr = MockResult('logger', self.label, self.log_level, machine)
- rr.out = out
- rr.err = err
- rr.retval = retval
- return rr
+ """Inherited from BenchmarkRun."""
+
+ def ReadCache(self):
+ # Just use the first machine for running the cached version,
+ # without locking it.
+ self.cache = MockResultsCache()
+ self.cache.Init(
+ self.label.chromeos_image,
+ self.label.chromeos_root,
+ self.benchmark.test_name,
+ self.iteration,
+ self.test_args,
+ self.profiler_args,
+ self.machine_manager,
+ self.machine,
+ self.label.board,
+ self.cache_conditions,
+ self._logger,
+ self.log_level,
+ self.label,
+ self.share_cache,
+ self.benchmark.suite,
+ self.benchmark.show_all_results,
+ self.benchmark.run_local,
+ self.benchmark.cwp_dso,
+ )
+
+ self.result = self.cache.ReadResult()
+ self.cache_hit = self.result is not None
+
+ def RunTest(self, machine):
+ """Remove Result.CreateFromRun for testing."""
+ self.timeline.Record(STATUS_IMAGING)
+ self.machine_manager.ImageMachine(machine, self.label)
+ self.timeline.Record(STATUS_RUNNING)
+ [retval, out, err] = self.suite_runner.Run(
+ machine,
+ self.label,
+ self.benchmark,
+ self.test_args,
+ self.profiler_args,
+ )
+ self.run_completed = True
+ rr = MockResult("logger", self.label, self.log_level, machine)
+ rr.out = out
+ rr.err = err
+ rr.retval = retval
+ return rr
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 9d815b80..0013e19b 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -1,442 +1,545 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Testing of benchmark_run."""
-from __future__ import print_function
import inspect
import unittest
import unittest.mock as mock
+from benchmark import Benchmark
import benchmark_run
-
from cros_utils import logger
-from suite_runner import MockSuiteRunner
-from suite_runner import SuiteRunner
from label import MockLabel
-from benchmark import Benchmark
-from machine_manager import MockMachineManager
from machine_manager import MachineManager
from machine_manager import MockCrosMachine
-from results_cache import MockResultsCache
+from machine_manager import MockMachineManager
from results_cache import CacheConditions
+from results_cache import MockResultsCache
from results_cache import Result
from results_cache import ResultsCache
+from suite_runner import MockSuiteRunner
+from suite_runner import SuiteRunner
class BenchmarkRunTest(unittest.TestCase):
- """Unit tests for the BenchmarkRun class and all of its methods."""
-
- def setUp(self):
- self.status = []
- self.called_ReadCache = None
- self.log_error = []
- self.log_output = []
- self.err_msg = None
- self.test_benchmark = Benchmark(
- 'page_cycler.netsim.top_10', # name
- 'page_cycler.netsim.top_10', # test_name
- '', # test_args
- 1, # iterations
- False, # rm_chroot_tmp
- '', # perf_args
- suite='telemetry_Crosperf') # suite
-
- self.test_label = MockLabel(
- 'test1',
- 'build',
- 'image1',
- 'autotest_dir',
- 'debug_dir',
- '/tmp/test_benchmark_run',
- 'x86-alex',
- 'chromeos2-row1-rack4-host9.cros',
- image_args='',
- cache_dir='',
- cache_only=False,
- log_level='average',
- compiler='gcc',
- crosfleet=False)
-
- self.test_cache_conditions = [
- CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
- ]
-
- self.mock_logger = logger.GetLogger(log_dir='', mock=True)
-
- self.mock_machine_manager = mock.Mock(spec=MachineManager)
-
- def testDryRun(self):
- my_label = MockLabel(
- 'test1',
- 'build',
- 'image1',
- 'autotest_dir',
- 'debug_dir',
- '/tmp/test_benchmark_run',
- 'x86-alex',
- 'chromeos2-row1-rack4-host9.cros',
- image_args='',
- cache_dir='',
- cache_only=False,
- log_level='average',
- compiler='gcc',
- crosfleet=False)
-
- logging_level = 'average'
- m = MockMachineManager('/tmp/chromeos_root', 0, logging_level, '')
- m.AddMachine('chromeos2-row1-rack4-host9.cros')
- bench = Benchmark(
- 'page_cycler.netsim.top_10', # name
- 'page_cycler.netsim.top_10', # test_name
- '', # test_args
- 1, # iterations
- False, # rm_chroot_tmp
- '', # perf_args
- suite='telemetry_Crosperf') # suite
- dut_conf = {
- 'cooldown_time': 5,
- 'cooldown_temp': 45,
- 'governor': 'powersave',
- 'cpu_usage': 'big_only',
- 'cpu_freq_pct': 80,
- }
- b = benchmark_run.MockBenchmarkRun('test run', bench, my_label, 1, [], m,
- logger.GetLogger(), logging_level, '',
- dut_conf)
- b.cache = MockResultsCache()
- b.suite_runner = MockSuiteRunner()
- b.start()
-
- # Make sure the arguments to BenchmarkRun.__init__ have not changed
- # since the last time this test was updated:
- args_list = [
- 'self', 'name', 'benchmark', 'label', 'iteration', 'cache_conditions',
- 'machine_manager', 'logger_to_use', 'log_level', 'share_cache',
- 'dut_config'
- ]
- arg_spec = inspect.getfullargspec(benchmark_run.BenchmarkRun.__init__)
- self.assertEqual(len(arg_spec.args), len(args_list))
- self.assertEqual(arg_spec.args, args_list)
-
- def test_init(self):
- # Nothing really worth testing here; just field assignments.
- pass
-
- def test_read_cache(self):
- # Nothing really worth testing here, either.
- pass
-
- def test_run(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
-
- def MockLogOutput(msg, print_to_console=False):
- """Helper function for test_run."""
- del print_to_console
- self.log_output.append(msg)
-
- def MockLogError(msg, print_to_console=False):
- """Helper function for test_run."""
- del print_to_console
- self.log_error.append(msg)
-
- def MockRecordStatus(msg):
- """Helper function for test_run."""
- self.status.append(msg)
-
- def FakeReadCache():
- """Helper function for test_run."""
- br.cache = mock.Mock(spec=ResultsCache)
- self.called_ReadCache = True
- return 0
-
- def FakeReadCacheSucceed():
- """Helper function for test_run."""
- br.cache = mock.Mock(spec=ResultsCache)
- br.result = mock.Mock(spec=Result)
- br.result.out = 'result.out stuff'
- br.result.err = 'result.err stuff'
- br.result.retval = 0
- self.called_ReadCache = True
- return 0
-
- def FakeReadCacheException():
- """Helper function for test_run."""
- raise RuntimeError('This is an exception test; it is supposed to happen')
-
- def FakeAcquireMachine():
- """Helper function for test_run."""
- mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
- return mock_machine
-
- def FakeRunTest(_machine):
- """Helper function for test_run."""
- mock_result = mock.Mock(spec=Result)
- mock_result.retval = 0
- return mock_result
-
- def FakeRunTestFail(_machine):
- """Helper function for test_run."""
- mock_result = mock.Mock(spec=Result)
- mock_result.retval = 1
- return mock_result
-
- def ResetTestValues():
- """Helper function for test_run."""
- self.log_output = []
- self.log_error = []
- self.status = []
- br.result = None
- self.called_ReadCache = False
-
- # Assign all the fake functions to the appropriate objects.
- br.logger().LogOutput = MockLogOutput
- br.logger().LogError = MockLogError
- br.timeline.Record = MockRecordStatus
- br.ReadCache = FakeReadCache
- br.RunTest = FakeRunTest
- br.AcquireMachine = FakeAcquireMachine
-
- # First test: No cache hit, all goes well.
- ResetTestValues()
- br.run()
- self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output, [
- 'test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'
- ])
- self.assertEqual(len(self.log_error), 0)
- self.assertEqual(self.status, ['WAITING', 'SUCCEEDED'])
-
- # Second test: No cached result found; test run was "terminated" for some
- # reason.
- ResetTestValues()
- br.terminated = True
- br.run()
- self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output, [
- 'test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'
- ])
- self.assertEqual(len(self.log_error), 0)
- self.assertEqual(self.status, ['WAITING'])
-
- # Third test. No cached result found; RunTest failed for some reason.
- ResetTestValues()
- br.terminated = False
- br.RunTest = FakeRunTestFail
- br.run()
- self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output, [
- 'test_run: No cache hit.',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'
- ])
- self.assertEqual(len(self.log_error), 0)
- self.assertEqual(self.status, ['WAITING', 'FAILED'])
-
- # Fourth test: ReadCache found a cached result.
- ResetTestValues()
- br.RunTest = FakeRunTest
- br.ReadCache = FakeReadCacheSucceed
- br.run()
- self.assertTrue(self.called_ReadCache)
- self.assertEqual(self.log_output, [
- 'test_run: Cache hit.', 'result.out stuff',
- 'Releasing machine: chromeos1-row3-rack5-host7.cros',
- 'Released machine: chromeos1-row3-rack5-host7.cros'
- ])
- self.assertEqual(self.log_error, ['result.err stuff'])
- self.assertEqual(self.status, ['SUCCEEDED'])
-
- # Fifth test: ReadCache generates an exception; does the try/finally block
- # work?
- ResetTestValues()
- br.ReadCache = FakeReadCacheException
- br.machine = FakeAcquireMachine()
- br.run()
- self.assertEqual(self.log_error, [
- "Benchmark run: 'test_run' failed: This is an exception test; it is "
- 'supposed to happen'
- ])
- self.assertEqual(self.status, ['FAILED'])
-
- def test_terminate_pass(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
-
- def GetLastEventPassed():
- """Helper function for test_terminate_pass"""
- return benchmark_run.STATUS_SUCCEEDED
-
- def RecordStub(status):
- """Helper function for test_terminate_pass"""
- self.status = status
-
- self.status = benchmark_run.STATUS_SUCCEEDED
- self.assertFalse(br.terminated)
- self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated())
-
- br.timeline.GetLastEvent = GetLastEventPassed
- br.timeline.Record = RecordStub
-
- br.Terminate()
-
- self.assertTrue(br.terminated)
- self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated())
- self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
-
- def test_terminate_fail(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
-
- def GetLastEventFailed():
- """Helper function for test_terminate_fail"""
- return benchmark_run.STATUS_FAILED
-
- def RecordStub(status):
- """Helper function for test_terminate_fail"""
- self.status = status
-
- self.status = benchmark_run.STATUS_SUCCEEDED
- self.assertFalse(br.terminated)
- self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated())
-
- br.timeline.GetLastEvent = GetLastEventFailed
- br.timeline.Record = RecordStub
-
- br.Terminate()
-
- self.assertTrue(br.terminated)
- self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated())
- self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
-
- def test_acquire_machine(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
-
- br.terminated = True
- self.assertRaises(Exception, br.AcquireMachine)
-
- br.terminated = False
- mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
- self.mock_machine_manager.AcquireMachine.return_value = mock_machine
-
- machine = br.AcquireMachine()
- self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
-
- def test_get_extra_autotest_args(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
-
- def MockLogError(err_msg):
- """Helper function for test_get_extra_autotest_args"""
- self.err_msg = err_msg
-
- self.mock_logger.LogError = MockLogError
-
- result = br.GetExtraAutotestArgs()
- self.assertEqual(result, '')
-
- self.test_benchmark.perf_args = 'record -e cycles'
- result = br.GetExtraAutotestArgs()
- self.assertEqual(
- result,
- '--profiler=custom_perf --profiler_args=\'perf_options="record -a -e '
- 'cycles"\'')
-
- self.test_benchmark.perf_args = 'record -e cycles'
- self.test_benchmark.suite = 'test_that'
- result = br.GetExtraAutotestArgs()
- self.assertEqual(result, '')
- self.assertEqual(self.err_msg,
- 'Non-telemetry benchmark does not support profiler.')
-
- self.test_benchmark.perf_args = 'junk args'
- self.test_benchmark.suite = 'telemetry_Crosperf'
- self.assertRaises(Exception, br.GetExtraAutotestArgs)
-
- @mock.patch.object(SuiteRunner, 'Run')
- @mock.patch.object(Result, 'CreateFromRun')
- def test_run_test(self, mock_result, mock_runner):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
-
- self.status = []
-
- def MockRecord(status):
- self.status.append(status)
-
- br.timeline.Record = MockRecord
- mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
- 'chromeos', 'average')
- mock_runner.return_value = [0, "{'Score':100}", '']
-
- br.RunTest(mock_machine)
-
- self.assertTrue(br.run_completed)
- self.assertEqual(
- self.status,
- [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING])
-
- self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
- br.machine_manager.ImageMachine.assert_called_with(mock_machine,
- self.test_label)
- self.assertEqual(mock_runner.call_count, 1)
- mock_runner.assert_called_with(mock_machine, br.label, br.benchmark, '',
- br.profiler_args)
-
- self.assertEqual(mock_result.call_count, 1)
- mock_result.assert_called_with(self.mock_logger, 'average', self.test_label,
- None, "{'Score':100}", '', 0,
- 'page_cycler.netsim.top_10',
- 'telemetry_Crosperf', '')
-
- def test_set_cache_conditions(self):
- br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
- self.test_label, 1,
- self.test_cache_conditions,
- self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
-
- phony_cache_conditions = [123, 456, True, False]
-
- self.assertEqual(br.cache_conditions, self.test_cache_conditions)
-
- br.SetCacheConditions(phony_cache_conditions)
- self.assertEqual(br.cache_conditions, phony_cache_conditions)
-
- br.SetCacheConditions(self.test_cache_conditions)
- self.assertEqual(br.cache_conditions, self.test_cache_conditions)
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Unit tests for the BenchmarkRun class and all of its methods."""
+
+ def setUp(self):
+ self.status = []
+ self.called_ReadCache = None
+ self.log_error = []
+ self.log_output = []
+ self.err_msg = None
+ self.test_benchmark = Benchmark(
+ "page_cycler.netsim.top_10", # name
+ "page_cycler.netsim.top_10", # test_name
+ "", # test_args
+ 1, # iterations
+ False, # rm_chroot_tmp
+ "", # perf_args
+ suite="telemetry_Crosperf",
+ ) # suite
+
+ self.test_label = MockLabel(
+ "test1",
+ "build",
+ "image1",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp/test_benchmark_run",
+ "x86-alex",
+ "chromeos2-row1-rack4-host9.cros",
+ image_args="",
+ cache_dir="",
+ cache_only=False,
+ log_level="average",
+ compiler="gcc",
+ crosfleet=False,
+ )
+
+ self.test_cache_conditions = [
+ CacheConditions.CACHE_FILE_EXISTS,
+ CacheConditions.CHECKSUMS_MATCH,
+ ]
+
+ self.mock_logger = logger.GetLogger(log_dir="", mock=True)
+
+ self.mock_machine_manager = mock.Mock(spec=MachineManager)
+
+ def testDryRun(self):
+ my_label = MockLabel(
+ "test1",
+ "build",
+ "image1",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp/test_benchmark_run",
+ "x86-alex",
+ "chromeos2-row1-rack4-host9.cros",
+ image_args="",
+ cache_dir="",
+ cache_only=False,
+ log_level="average",
+ compiler="gcc",
+ crosfleet=False,
+ )
+
+ logging_level = "average"
+ m = MockMachineManager("/tmp/chromeos_root", 0, logging_level, "")
+ m.AddMachine("chromeos2-row1-rack4-host9.cros")
+ bench = Benchmark(
+ "page_cycler.netsim.top_10", # name
+ "page_cycler.netsim.top_10", # test_name
+ "", # test_args
+ 1, # iterations
+ False, # rm_chroot_tmp
+ "", # perf_args
+ suite="telemetry_Crosperf",
+ ) # suite
+ dut_conf = {
+ "cooldown_time": 5,
+ "cooldown_temp": 45,
+ "governor": "powersave",
+ "cpu_usage": "big_only",
+ "cpu_freq_pct": 80,
+ }
+ b = benchmark_run.MockBenchmarkRun(
+ "test run",
+ bench,
+ my_label,
+ 1,
+ [],
+ m,
+ logger.GetLogger(),
+ logging_level,
+ "",
+ dut_conf,
+ )
+ b.cache = MockResultsCache()
+ b.suite_runner = MockSuiteRunner()
+ b.start()
+
+ # Make sure the arguments to BenchmarkRun.__init__ have not changed
+ # since the last time this test was updated:
+ args_list = [
+ "self",
+ "name",
+ "benchmark",
+ "label",
+ "iteration",
+ "cache_conditions",
+ "machine_manager",
+ "logger_to_use",
+ "log_level",
+ "share_cache",
+ "dut_config",
+ ]
+ arg_spec = inspect.getfullargspec(benchmark_run.BenchmarkRun.__init__)
+ self.assertEqual(len(arg_spec.args), len(args_list))
+ self.assertEqual(arg_spec.args, args_list)
+
+ def test_init(self):
+ # Nothing really worth testing here; just field assignments.
+ pass
+
+ def test_read_cache(self):
+ # Nothing really worth testing here, either.
+ pass
+
+ def test_run(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
+
+ def MockLogOutput(msg, print_to_console=False):
+ """Helper function for test_run."""
+ del print_to_console
+ self.log_output.append(msg)
+
+ def MockLogError(msg, print_to_console=False):
+ """Helper function for test_run."""
+ del print_to_console
+ self.log_error.append(msg)
+
+ def MockRecordStatus(msg):
+ """Helper function for test_run."""
+ self.status.append(msg)
+
+ def FakeReadCache():
+ """Helper function for test_run."""
+ br.cache = mock.Mock(spec=ResultsCache)
+ self.called_ReadCache = True
+ return 0
+
+ def FakeReadCacheSucceed():
+ """Helper function for test_run."""
+ br.cache = mock.Mock(spec=ResultsCache)
+ br.result = mock.Mock(spec=Result)
+ br.result.out = "result.out stuff"
+ br.result.err = "result.err stuff"
+ br.result.retval = 0
+ self.called_ReadCache = True
+ return 0
+
+ def FakeReadCacheException():
+ """Helper function for test_run."""
+ raise RuntimeError(
+ "This is an exception test; it is supposed to happen"
+ )
+
+ def FakeAcquireMachine():
+ """Helper function for test_run."""
+ mock_machine = MockCrosMachine(
+ "chromeos1-row3-rack5-host7.cros", "chromeos", "average"
+ )
+ return mock_machine
+
+ def FakeRunTest(_machine):
+ """Helper function for test_run."""
+ mock_result = mock.Mock(spec=Result)
+ mock_result.retval = 0
+ return mock_result
+
+ def FakeRunTestFail(_machine):
+ """Helper function for test_run."""
+ mock_result = mock.Mock(spec=Result)
+ mock_result.retval = 1
+ return mock_result
+
+ def ResetTestValues():
+ """Helper function for test_run."""
+ self.log_output = []
+ self.log_error = []
+ self.status = []
+ br.result = None
+ self.called_ReadCache = False
+
+ # Assign all the fake functions to the appropriate objects.
+ br.logger().LogOutput = MockLogOutput
+ br.logger().LogError = MockLogError
+ br.timeline.Record = MockRecordStatus
+ br.ReadCache = FakeReadCache
+ br.RunTest = FakeRunTest
+ br.AcquireMachine = FakeAcquireMachine
+
+ # First test: No cache hit, all goes well.
+ ResetTestValues()
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(
+ self.log_output,
+ [
+ "test_run: No cache hit.",
+ "Releasing machine: chromeos1-row3-rack5-host7.cros",
+ "Released machine: chromeos1-row3-rack5-host7.cros",
+ ],
+ )
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ["WAITING", "SUCCEEDED"])
+
+ # Second test: No cached result found; test run was "terminated" for some
+ # reason.
+ ResetTestValues()
+ br.terminated = True
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(
+ self.log_output,
+ [
+ "test_run: No cache hit.",
+ "Releasing machine: chromeos1-row3-rack5-host7.cros",
+ "Released machine: chromeos1-row3-rack5-host7.cros",
+ ],
+ )
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ["WAITING"])
+
+ # Third test. No cached result found; RunTest failed for some reason.
+ ResetTestValues()
+ br.terminated = False
+ br.RunTest = FakeRunTestFail
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(
+ self.log_output,
+ [
+ "test_run: No cache hit.",
+ "Releasing machine: chromeos1-row3-rack5-host7.cros",
+ "Released machine: chromeos1-row3-rack5-host7.cros",
+ ],
+ )
+ self.assertEqual(len(self.log_error), 0)
+ self.assertEqual(self.status, ["WAITING", "FAILED"])
+
+ # Fourth test: ReadCache found a cached result.
+ ResetTestValues()
+ br.RunTest = FakeRunTest
+ br.ReadCache = FakeReadCacheSucceed
+ br.run()
+ self.assertTrue(self.called_ReadCache)
+ self.assertEqual(
+ self.log_output,
+ [
+ "test_run: Cache hit.",
+ "result.out stuff",
+ "Releasing machine: chromeos1-row3-rack5-host7.cros",
+ "Released machine: chromeos1-row3-rack5-host7.cros",
+ ],
+ )
+ self.assertEqual(self.log_error, ["result.err stuff"])
+ self.assertEqual(self.status, ["SUCCEEDED"])
+
+ # Fifth test: ReadCache generates an exception; does the try/finally block
+ # work?
+ ResetTestValues()
+ br.ReadCache = FakeReadCacheException
+ br.machine = FakeAcquireMachine()
+ br.run()
+ self.assertEqual(
+ self.log_error,
+ [
+ "Benchmark run: 'test_run' failed: This is an exception test; it is "
+ "supposed to happen"
+ ],
+ )
+ self.assertEqual(self.status, ["FAILED"])
+
+ def test_terminate_pass(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
+
+ def GetLastEventPassed():
+ """Helper function for test_terminate_pass"""
+ return benchmark_run.STATUS_SUCCEEDED
+
+ def RecordStub(status):
+ """Helper function for test_terminate_pass"""
+ self.status = status
+
+ self.status = benchmark_run.STATUS_SUCCEEDED
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated())
+
+ br.timeline.GetLastEvent = GetLastEventPassed
+ br.timeline.Record = RecordStub
+
+ br.Terminate()
+
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
+
+ def test_terminate_fail(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
+
+ def GetLastEventFailed():
+ """Helper function for test_terminate_fail"""
+ return benchmark_run.STATUS_FAILED
+
+ def RecordStub(status):
+ """Helper function for test_terminate_fail"""
+ self.status = status
+
+ self.status = benchmark_run.STATUS_SUCCEEDED
+ self.assertFalse(br.terminated)
+ self.assertFalse(br.suite_runner.CommandTerminator().IsTerminated())
+
+ br.timeline.GetLastEvent = GetLastEventFailed
+ br.timeline.Record = RecordStub
+
+ br.Terminate()
+
+ self.assertTrue(br.terminated)
+ self.assertTrue(br.suite_runner.CommandTerminator().IsTerminated())
+ self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
+
+ def test_acquire_machine(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
+
+ br.terminated = True
+ self.assertRaises(Exception, br.AcquireMachine)
+
+ br.terminated = False
+ mock_machine = MockCrosMachine(
+ "chromeos1-row3-rack5-host7.cros", "chromeos", "average"
+ )
+ self.mock_machine_manager.AcquireMachine.return_value = mock_machine
+
+ machine = br.AcquireMachine()
+ self.assertEqual(machine.name, "chromeos1-row3-rack5-host7.cros")
+
+ def test_get_extra_autotest_args(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
+
+ def MockLogError(err_msg):
+ """Helper function for test_get_extra_autotest_args"""
+ self.err_msg = err_msg
+
+ self.mock_logger.LogError = MockLogError
+
+ result = br.GetExtraAutotestArgs()
+ self.assertEqual(result, "")
+
+ self.test_benchmark.perf_args = "record -e cycles"
+ result = br.GetExtraAutotestArgs()
+ self.assertEqual(
+ result,
+ "--profiler=custom_perf --profiler_args='perf_options=\"record -a -e "
+ "cycles\"'",
+ )
+
+ self.test_benchmark.perf_args = "record -e cycles"
+ self.test_benchmark.suite = "test_that"
+ result = br.GetExtraAutotestArgs()
+ self.assertEqual(result, "")
+ self.assertEqual(
+ self.err_msg, "Non-telemetry benchmark does not support profiler."
+ )
+
+ self.test_benchmark.perf_args = "junk args"
+ self.test_benchmark.suite = "telemetry_Crosperf"
+ self.assertRaises(Exception, br.GetExtraAutotestArgs)
+
+ @mock.patch.object(SuiteRunner, "Run")
+ @mock.patch.object(Result, "CreateFromRun")
+ def test_run_test(self, mock_result, mock_runner):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
+
+ self.status = []
+
+ def MockRecord(status):
+ self.status.append(status)
+
+ br.timeline.Record = MockRecord
+ mock_machine = MockCrosMachine(
+ "chromeos1-row3-rack5-host7.cros", "chromeos", "average"
+ )
+ mock_runner.return_value = [0, "{'Score':100}", ""]
+
+ br.RunTest(mock_machine)
+
+ self.assertTrue(br.run_completed)
+ self.assertEqual(
+ self.status,
+ [benchmark_run.STATUS_IMAGING, benchmark_run.STATUS_RUNNING],
+ )
+
+ self.assertEqual(br.machine_manager.ImageMachine.call_count, 1)
+ br.machine_manager.ImageMachine.assert_called_with(
+ mock_machine, self.test_label
+ )
+ self.assertEqual(mock_runner.call_count, 1)
+ mock_runner.assert_called_with(
+ mock_machine, br.label, br.benchmark, "", br.profiler_args
+ )
+
+ self.assertEqual(mock_result.call_count, 1)
+ mock_result.assert_called_with(
+ self.mock_logger,
+ "average",
+ self.test_label,
+ None,
+ "{'Score':100}",
+ "",
+ 0,
+ "page_cycler.netsim.top_10",
+ "telemetry_Crosperf",
+ "",
+ )
+
+ def test_set_cache_conditions(self):
+ br = benchmark_run.BenchmarkRun(
+ "test_run",
+ self.test_benchmark,
+ self.test_label,
+ 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager,
+ self.mock_logger,
+ "average",
+ "",
+ {},
+ )
+
+ phony_cache_conditions = [123, 456, True, False]
+
+ self.assertEqual(br.cache_conditions, self.test_cache_conditions)
+
+ br.SetCacheConditions(phony_cache_conditions)
+ self.assertEqual(br.cache_conditions, phony_cache_conditions)
+
+ br.SetCacheConditions(self.test_cache_conditions)
+ self.assertEqual(br.cache_conditions, self.test_cache_conditions)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/benchmark_unittest.py b/crosperf/benchmark_unittest.py
index 70508b19..bb23bdbb 100755
--- a/crosperf/benchmark_unittest.py
+++ b/crosperf/benchmark_unittest.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the Crosperf Benchmark class."""
-from __future__ import print_function
import inspect
import unittest
@@ -16,57 +15,70 @@ from benchmark import Benchmark
class BenchmarkTestCase(unittest.TestCase):
- """Individual tests for the Benchmark class."""
+ """Individual tests for the Benchmark class."""
- def test_benchmark(self):
- # Test creating a benchmark with all the fields filled out.
- b1 = Benchmark(
- 'b1_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry_Crosperf', # suite
- True) # show_all_results
- self.assertTrue(b1.suite, 'telemetry_Crosperf')
+ def test_benchmark(self):
+ # Test creating a benchmark with all the fields filled out.
+ b1 = Benchmark(
+ "b1_test", # name
+ "octane", # test_name
+ "", # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ "record -e cycles", # perf_args
+ "telemetry_Crosperf", # suite
+ True,
+ ) # show_all_results
+ self.assertTrue(b1.suite, "telemetry_Crosperf")
- # Test creating a benchmark field with default fields left out.
- b2 = Benchmark(
- 'b2_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles') # perf_args
- self.assertEqual(b2.suite, '')
- self.assertFalse(b2.show_all_results)
+ # Test creating a benchmark field with default fields left out.
+ b2 = Benchmark(
+ "b2_test", # name
+ "octane", # test_name
+ "", # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ "record -e cycles",
+ ) # perf_args
+ self.assertEqual(b2.suite, "")
+ self.assertFalse(b2.show_all_results)
- # Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
- # and see what happens.
- b3 = Benchmark(
- 'b3_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry', # suite
- False) # show_all_results
- self.assertTrue(b3.show_all_results)
+ # Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
+ # and see what happens.
+ b3 = Benchmark(
+ "b3_test", # name
+ "octane", # test_name
+ "", # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ "record -e cycles", # perf_args
+ "telemetry", # suite
+ False,
+ ) # show_all_results
+ self.assertTrue(b3.show_all_results)
- # Check to see if the args to Benchmark have changed since the last time
- # this test was updated.
- args_list = [
- 'self', 'name', 'test_name', 'test_args', 'iterations', 'rm_chroot_tmp',
- 'perf_args', 'suite', 'show_all_results', 'retries', 'run_local',
- 'cwp_dso', 'weight'
- ]
- arg_spec = inspect.getfullargspec(Benchmark.__init__)
- self.assertEqual(len(arg_spec.args), len(args_list))
- for arg in args_list:
- self.assertIn(arg, arg_spec.args)
+ # Check to see if the args to Benchmark have changed since the last time
+ # this test was updated.
+ args_list = [
+ "self",
+ "name",
+ "test_name",
+ "test_args",
+ "iterations",
+ "rm_chroot_tmp",
+ "perf_args",
+ "suite",
+ "show_all_results",
+ "retries",
+ "run_local",
+ "cwp_dso",
+ "weight",
+ ]
+ arg_spec = inspect.getfullargspec(Benchmark.__init__)
+ self.assertEqual(len(arg_spec.args), len(args_list))
+ for arg in args_list:
+ self.assertIn(arg, arg_spec.args)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/column_chart.py b/crosperf/column_chart.py
index 400979ee..6ed99bf0 100644
--- a/crosperf/column_chart.py
+++ b/crosperf/column_chart.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,46 +7,46 @@
class ColumnChart(object):
- """class to draw column chart."""
-
- def __init__(self, title, width, height):
- self.title = title
- self.chart_div = ''.join(t for t in title if t.isalnum())
- self.width = width
- self.height = height
- self.columns = []
- self.rows = []
- self.series = []
-
- def AddSeries(self, column_name, series_type, color):
- for i in range(len(self.columns)):
- if column_name == self.columns[i][1]:
- self.series.append((i - 1, series_type, color))
- break
-
- def AddColumn(self, name, column_type):
- self.columns.append((column_type, name))
-
- def AddRow(self, row):
- self.rows.append(row)
-
- def GetJavascript(self):
- res = 'var data = new google.visualization.DataTable();\n'
- for column in self.columns:
- res += "data.addColumn('%s', '%s');\n" % column
- res += 'data.addRows(%s);\n' % len(self.rows)
- for row in range(len(self.rows)):
- for column in range(len(self.columns)):
- val = self.rows[row][column]
- if isinstance(val, str):
- val = "'%s'" % val
- res += 'data.setValue(%s, %s, %s);\n' % (row, column, val)
-
- series_javascript = ''
- for series in self.series:
- series_javascript += "%s: {type: '%s', color: '%s'}, " % series
-
- chart_add_javascript = """
+ """class to draw column chart."""
+
+ def __init__(self, title, width, height):
+ self.title = title
+ self.chart_div = "".join(t for t in title if t.isalnum())
+ self.width = width
+ self.height = height
+ self.columns = []
+ self.rows = []
+ self.series = []
+
+ def AddSeries(self, column_name, series_type, color):
+ for i in range(len(self.columns)):
+ if column_name == self.columns[i][1]:
+ self.series.append((i - 1, series_type, color))
+ break
+
+ def AddColumn(self, name, column_type):
+ self.columns.append((column_type, name))
+
+ def AddRow(self, row):
+ self.rows.append(row)
+
+ def GetJavascript(self):
+ res = "var data = new google.visualization.DataTable();\n"
+ for column in self.columns:
+ res += "data.addColumn('%s', '%s');\n" % column
+ res += "data.addRows(%s);\n" % len(self.rows)
+ for row in range(len(self.rows)):
+ for column in range(len(self.columns)):
+ val = self.rows[row][column]
+ if isinstance(val, str):
+ val = "'%s'" % val
+ res += "data.setValue(%s, %s, %s);\n" % (row, column, val)
+
+ series_javascript = ""
+ for series in self.series:
+ series_javascript += "%s: {type: '%s', color: '%s'}, " % series
+
+ chart_add_javascript = """
var chart_%s = new google.visualization.ComboChart(
document.getElementById('%s'));
chart_%s.draw(data, {width: %s, height: %s, title: '%s', legend: 'none',
@@ -54,10 +54,16 @@ chart_%s.draw(data, {width: %s, height: %s, title: '%s', legend: 'none',
vAxis: {minValue: 0}})
"""
- res += chart_add_javascript % (self.chart_div, self.chart_div,
- self.chart_div, self.width, self.height,
- self.title, series_javascript)
- return res
+ res += chart_add_javascript % (
+ self.chart_div,
+ self.chart_div,
+ self.chart_div,
+ self.width,
+ self.height,
+ self.title,
+ series_javascript,
+ )
+ return res
- def GetDiv(self):
- return "<div id='%s' class='chart'></div>" % self.chart_div
+ def GetDiv(self):
+ return "<div id='%s' class='chart'></div>" % self.chart_div
diff --git a/crosperf/compare_machines.py b/crosperf/compare_machines.py
index c73f8756..756753a2 100644
--- a/crosperf/compare_machines.py
+++ b/crosperf/compare_machines.py
@@ -1,67 +1,71 @@
# -*- coding: utf-8 -*-
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to compare two machines."""
-from __future__ import print_function
+import argparse
import os.path
import sys
-import argparse
from machine_manager import CrosMachine
def PrintUsage(msg):
- print(msg)
- print('Usage: ')
- print('\n compare_machines.py --chromeos_root=/path/to/chroot/ '
- 'machine1 machine2 ...')
+ print(msg)
+ print("Usage: ")
+ print(
+ "\n compare_machines.py --chromeos_root=/path/to/chroot/ "
+ "machine1 machine2 ..."
+ )
def Main(argv):
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--chromeos_root',
- default='/path/to/chromeos',
- dest='chromeos_root',
- help='ChromeOS root checkout directory')
- parser.add_argument('remotes', nargs=argparse.REMAINDER)
-
- options = parser.parse_args(argv)
-
- machine_list = options.remotes
- if len(machine_list) < 2:
- PrintUsage('ERROR: Must specify at least two machines.')
- return 1
- elif not os.path.exists(options.chromeos_root):
- PrintUsage('Error: chromeos_root does not exist %s' % options.chromeos_root)
- return 1
-
- chroot = options.chromeos_root
- cros_machines = []
- test_machine_checksum = None
- for m in machine_list:
- cm = CrosMachine(m, chroot, 'average')
- cros_machines = cros_machines + [cm]
- test_machine_checksum = cm.machine_checksum
-
- ret = 0
- for cm in cros_machines:
- print('checksum for %s : %s' % (cm.name, cm.machine_checksum))
- if cm.machine_checksum != test_machine_checksum:
- ret = 1
- print('Machine checksums do not all match')
-
- if ret == 0:
- print('Machines all match.')
-
- return ret
-
-
-if __name__ == '__main__':
- retval = Main(sys.argv[1:])
- sys.exit(retval)
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--chromeos_root",
+ default="/path/to/chromeos",
+ dest="chromeos_root",
+ help="ChromeOS root checkout directory",
+ )
+ parser.add_argument("remotes", nargs=argparse.REMAINDER)
+
+ options = parser.parse_args(argv)
+
+ machine_list = options.remotes
+ if len(machine_list) < 2:
+ PrintUsage("ERROR: Must specify at least two machines.")
+ return 1
+ elif not os.path.exists(options.chromeos_root):
+ PrintUsage(
+ "Error: chromeos_root does not exist %s" % options.chromeos_root
+ )
+ return 1
+
+ chroot = options.chromeos_root
+ cros_machines = []
+ test_machine_checksum = None
+ for m in machine_list:
+ cm = CrosMachine(m, chroot, "average")
+ cros_machines = cros_machines + [cm]
+ test_machine_checksum = cm.machine_checksum
+
+ ret = 0
+ for cm in cros_machines:
+ print("checksum for %s : %s" % (cm.name, cm.machine_checksum))
+ if cm.machine_checksum != test_machine_checksum:
+ ret = 1
+ print("Machine checksums do not all match")
+
+ if ret == 0:
+ print("Machines all match.")
+
+ return ret
+
+
+if __name__ == "__main__":
+ retval = Main(sys.argv[1:])
+ sys.exit(retval)
diff --git a/crosperf/config.py b/crosperf/config.py
index 61ad9c1a..c2a7fe5d 100644
--- a/crosperf/config.py
+++ b/crosperf/config.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -8,8 +8,8 @@ config = {}
def GetConfig(key):
- return config.get(key)
+ return config.get(key)
def AddConfig(key, value):
- config[key] = value
+ config[key] = value
diff --git a/crosperf/config_unittest.py b/crosperf/config_unittest.py
index 208f44dc..fdff7ea6 100755
--- a/crosperf/config_unittest.py
+++ b/crosperf/config_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for config.py"""
-from __future__ import print_function
import unittest
@@ -14,40 +13,40 @@ import config
class ConfigTestCase(unittest.TestCase):
- """Class for the config unit tests."""
+ """Class for the config unit tests."""
- def test_config(self):
- # Verify that config exists, that it's a dictionary, and that it's
- # empty.
- self.assertTrue(isinstance(config.config, dict))
- self.assertEqual(len(config.config), 0)
+ def test_config(self):
+ # Verify that config exists, that it's a dictionary, and that it's
+ # empty.
+ self.assertTrue(isinstance(config.config, dict))
+ self.assertEqual(len(config.config), 0)
- # Verify that attempting to get a non-existant key out of the
- # dictionary returns None.
- self.assertIsNone(config.GetConfig('rabbit'))
- self.assertIsNone(config.GetConfig('key1'))
+ # Verify that attempting to get a non-existant key out of the
+ # dictionary returns None.
+ self.assertIsNone(config.GetConfig("rabbit"))
+ self.assertIsNone(config.GetConfig("key1"))
- config.AddConfig('key1', 16)
- config.AddConfig('key2', 32)
- config.AddConfig('key3', 'third value')
+ config.AddConfig("key1", 16)
+ config.AddConfig("key2", 32)
+ config.AddConfig("key3", "third value")
- # Verify that after 3 calls to AddConfig we have 3 values in the
- # dictionary.
- self.assertEqual(len(config.config), 3)
+ # Verify that after 3 calls to AddConfig we have 3 values in the
+ # dictionary.
+ self.assertEqual(len(config.config), 3)
- # Verify that GetConfig works and gets the expected values.
- self.assertIs(config.GetConfig('key2'), 32)
- self.assertIs(config.GetConfig('key3'), 'third value')
- self.assertIs(config.GetConfig('key1'), 16)
+ # Verify that GetConfig works and gets the expected values.
+ self.assertIs(config.GetConfig("key2"), 32)
+ self.assertIs(config.GetConfig("key3"), "third value")
+ self.assertIs(config.GetConfig("key1"), 16)
- # Re-set config.
- config.config.clear()
+ # Re-set config.
+ config.config.clear()
- # Verify that config exists, that it's a dictionary, and that it's
- # empty.
- self.assertTrue(isinstance(config.config, dict))
- self.assertEqual(len(config.config), 0)
+ # Verify that config exists, that it's a dictionary, and that it's
+ # empty.
+ self.assertTrue(isinstance(config.config, dict))
+ self.assertEqual(len(config.config), 0)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/crosperf b/crosperf/crosperf
index c98f2dd4..9a7bde0a 100755
--- a/crosperf/crosperf
+++ b/crosperf/crosperf
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py
index f195b13a..aace2c80 100755
--- a/crosperf/crosperf.py
+++ b/crosperf/crosperf.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The driver script for running performance benchmarks on ChromeOS."""
-from __future__ import print_function
import argparse
import atexit
@@ -14,139 +13,145 @@ import os
import signal
import sys
-from experiment_runner import ExperimentRunner
-from experiment_runner import MockExperimentRunner
-from experiment_factory import ExperimentFactory
-from experiment_file import ExperimentFile
-from settings_factory import GlobalSettings
-
# This import causes pylint to warn about "No name 'logger' in module
# 'cros_utils'". I do not understand why. The import works fine in python.
# pylint: disable=no-name-in-module
from cros_utils import logger
-
+from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
+from experiment_runner import ExperimentRunner
+from experiment_runner import MockExperimentRunner
+from settings_factory import GlobalSettings
import test_flag
+
HAS_FAILURE = 1
ALL_FAILED = 2
def SetupParserOptions(parser):
- """Add all options to the parser."""
- parser.add_argument(
- '--dry_run',
- dest='dry_run',
- help=('Parse the experiment file and '
- 'show what will be done'),
- action='store_true',
- default=False)
- # Allow each of the global fields to be overridden by passing in
- # options. Add each global field as an option.
- option_settings = GlobalSettings('')
- for field_name in option_settings.fields:
- field = option_settings.fields[field_name]
+ """Add all options to the parser."""
parser.add_argument(
- '--%s' % field.name,
- dest=field.name,
- help=field.description,
- action='store')
+ "--dry_run",
+ dest="dry_run",
+ help=("Parse the experiment file and " "show what will be done"),
+ action="store_true",
+ default=False,
+ )
+ # Allow each of the global fields to be overridden by passing in
+ # options. Add each global field as an option.
+ option_settings = GlobalSettings("")
+ for field_name in option_settings.fields:
+ field = option_settings.fields[field_name]
+ parser.add_argument(
+ "--%s" % field.name,
+ dest=field.name,
+ help=field.description,
+ action="store",
+ )
def ConvertOptionsToSettings(options):
- """Convert options passed in into global settings."""
- option_settings = GlobalSettings('option_settings')
- for option_name in options.__dict__:
- if (options.__dict__[option_name] is not None and
- option_name in option_settings.fields):
- option_settings.SetField(option_name, options.__dict__[option_name])
- return option_settings
+ """Convert options passed in into global settings."""
+ option_settings = GlobalSettings("option_settings")
+ for option_name in options.__dict__:
+ if (
+ options.__dict__[option_name] is not None
+ and option_name in option_settings.fields
+ ):
+ option_settings.SetField(option_name, options.__dict__[option_name])
+ return option_settings
def Cleanup(experiment):
- """Handler function which is registered to the atexit handler."""
- experiment.Cleanup()
+ """Handler function which is registered to the atexit handler."""
+ experiment.Cleanup()
def CallExitHandler(signum, _):
- """Signal handler that transforms a signal into a call to exit.
+ """Signal handler that transforms a signal into a call to exit.
- This is useful because functionality registered by "atexit" will
- be called. It also means you can "catch" the signal by catching
- the SystemExit exception.
- """
- sys.exit(128 + signum)
+ This is useful because functionality registered by "atexit" will
+ be called. It also means you can "catch" the signal by catching
+ the SystemExit exception.
+ """
+ sys.exit(128 + signum)
def RunCrosperf(argv):
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- '--noschedv2',
- dest='noschedv2',
- default=False,
- action='store_true',
- help=('Do not use new scheduler. '
- 'Use original scheduler instead.'))
- parser.add_argument(
- '-l',
- '--log_dir',
- dest='log_dir',
- default='',
- help='The log_dir, default is under <crosperf_logs>/logs')
-
- SetupParserOptions(parser)
- options, args = parser.parse_known_args(argv)
-
- # Convert the relevant options that are passed in into a settings
- # object which will override settings in the experiment file.
- option_settings = ConvertOptionsToSettings(options)
- log_dir = os.path.abspath(os.path.expanduser(options.log_dir))
- logger.GetLogger(log_dir)
-
- if len(args) == 2:
- experiment_filename = args[1]
- else:
- parser.error('Invalid number arguments.')
-
- working_directory = os.getcwd()
- if options.dry_run:
- test_flag.SetTestMode(True)
-
- experiment_file = ExperimentFile(
- open(experiment_filename, encoding='utf-8'), option_settings)
- if not experiment_file.GetGlobalSettings().GetField('name'):
- experiment_name = os.path.basename(experiment_filename)
- experiment_file.GetGlobalSettings().SetField('name', experiment_name)
- experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory, log_dir)
-
- json_report = experiment_file.GetGlobalSettings().GetField('json_report')
-
- signal.signal(signal.SIGTERM, CallExitHandler)
- atexit.register(Cleanup, experiment)
-
- if options.dry_run:
- runner = MockExperimentRunner(experiment, json_report)
- else:
- runner = ExperimentRunner(
- experiment, json_report, using_schedv2=(not options.noschedv2))
-
- ret = runner.Run()
- if ret == HAS_FAILURE:
- raise RuntimeError('One or more benchmarks failed.')
- if ret == ALL_FAILED:
- raise RuntimeError('All benchmarks failed to run.')
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--noschedv2",
+ dest="noschedv2",
+ default=False,
+ action="store_true",
+ help=("Do not use new scheduler. " "Use original scheduler instead."),
+ )
+ parser.add_argument(
+ "-l",
+ "--log_dir",
+ dest="log_dir",
+ default="",
+ help="The log_dir, default is under <crosperf_logs>/logs",
+ )
+
+ SetupParserOptions(parser)
+ options, args = parser.parse_known_args(argv)
+
+ # Convert the relevant options that are passed in into a settings
+ # object which will override settings in the experiment file.
+ option_settings = ConvertOptionsToSettings(options)
+ log_dir = os.path.abspath(os.path.expanduser(options.log_dir))
+ logger.GetLogger(log_dir)
+
+ if len(args) == 2:
+ experiment_filename = args[1]
+ else:
+ parser.error("Invalid number arguments.")
+
+ working_directory = os.getcwd()
+ if options.dry_run:
+ test_flag.SetTestMode(True)
+
+ experiment_file = ExperimentFile(
+ open(experiment_filename, encoding="utf-8"), option_settings
+ )
+ if not experiment_file.GetGlobalSettings().GetField("name"):
+ experiment_name = os.path.basename(experiment_filename)
+ experiment_file.GetGlobalSettings().SetField("name", experiment_name)
+ experiment = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory, log_dir
+ )
+
+ json_report = experiment_file.GetGlobalSettings().GetField("json_report")
+
+ signal.signal(signal.SIGTERM, CallExitHandler)
+ atexit.register(Cleanup, experiment)
+
+ if options.dry_run:
+ runner = MockExperimentRunner(experiment, json_report)
+ else:
+ runner = ExperimentRunner(
+ experiment, json_report, using_schedv2=(not options.noschedv2)
+ )
+
+ ret = runner.Run()
+ if ret == HAS_FAILURE:
+ raise RuntimeError("One or more benchmarks failed.")
+ if ret == ALL_FAILED:
+ raise RuntimeError("All benchmarks failed to run.")
def Main(argv):
- try:
- RunCrosperf(argv)
- except Exception:
- # Flush buffers before exiting to avoid out of order printing
- sys.stdout.flush()
- # Raise exception prints out traceback
- raise
+ try:
+ RunCrosperf(argv)
+ except Exception:
+ # Flush buffers before exiting to avoid out of order printing
+ sys.stdout.flush()
+ # Raise exception prints out traceback
+ raise
-if __name__ == '__main__':
- Main(sys.argv)
+if __name__ == "__main__":
+ Main(sys.argv)
diff --git a/crosperf/crosperf_autolock.py b/crosperf/crosperf_autolock.py
index b593fa9c..011f01e3 100755
--- a/crosperf/crosperf_autolock.py
+++ b/crosperf/crosperf_autolock.py
@@ -1,19 +1,20 @@
#!/usr/bin/env python3
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper script to automatically lock devices for crosperf."""
-import os
-import sys
import argparse
-import subprocess
import contextlib
-import json
-from typing import Optional, Any
import dataclasses
+import json
+import os
+import subprocess
+import sys
+from typing import Any, Dict, List, Optional, Tuple
+
# Have to do sys.path hackery because crosperf relies on PYTHONPATH
# modifications.
@@ -21,261 +22,292 @@ PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(PARENT_DIR)
-def main(sys_args: list[str]) -> Optional[str]:
- """Run crosperf_autolock. Returns error msg or None"""
- args, leftover_args = parse_args(sys_args)
- fleet_params = [
- CrosfleetParams(board=args.board,
- pool=args.pool,
- lease_time=args.lease_time)
- for _ in range(args.num_leases)
- ]
- if not fleet_params:
- return ('No board names identified. If you want to use'
- ' a known host, just use crosperf directly.')
- try:
- _run_crosperf(fleet_params, args.dut_lock_timeout, leftover_args)
- except BoardLockError as e:
- _eprint('ERROR:', e)
- _eprint('May need to login to crosfleet? Run "crosfleet login"')
- _eprint('The leases may also be successful later on. '
- 'Check with "crosfleet dut leases"')
- return 'crosperf_autolock failed'
- except BoardReleaseError as e:
- _eprint('ERROR:', e)
- _eprint('May need to re-run "crosfleet dut abandon"')
- return 'crosperf_autolock failed'
- return None
-
-
-def parse_args(args: list[str]) -> tuple[Any, list]:
- """Parse the CLI arguments."""
- parser = argparse.ArgumentParser(
- 'crosperf_autolock',
- description='Wrapper around crosperf'
- ' to autolock DUTs from crosfleet.',
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('--board',
- type=str,
- help='Space or comma separated list of boards to lock',
- required=True,
- default=argparse.SUPPRESS)
- parser.add_argument('--num-leases',
- type=int,
- help='Number of boards to lock.',
- metavar='NUM',
- default=1)
- parser.add_argument('--pool',
- type=str,
- help='Pool to pull from.',
- default='DUT_POOL_QUOTA')
- parser.add_argument('--dut-lock-timeout',
- type=float,
- metavar='SEC',
- help='Number of seconds we want to try to lease a board'
- ' from crosfleet. This option does NOT change the'
- ' lease length.',
- default=600)
- parser.add_argument('--lease-time',
- type=int,
- metavar='MIN',
- help='Number of minutes to lock the board. Max is 1440.',
- default=1440)
- parser.epilog = (
- 'For more detailed flags, you have to read the args taken by the'
- ' crosperf executable. Args are passed transparently to crosperf.')
- return parser.parse_known_args(args)
+def main(sys_args: List[str]) -> Optional[str]:
+ """Run crosperf_autolock. Returns error msg or None"""
+ args, leftover_args = parse_args(sys_args)
+ fleet_params = [
+ CrosfleetParams(
+ board=args.board, pool=args.pool, lease_time=args.lease_time
+ )
+ for _ in range(args.num_leases)
+ ]
+ if not fleet_params:
+ return (
+ "No board names identified. If you want to use"
+ " a known host, just use crosperf directly."
+ )
+ try:
+ _run_crosperf(fleet_params, args.dut_lock_timeout, leftover_args)
+ except BoardLockError as e:
+ _eprint("ERROR:", e)
+ _eprint('May need to login to crosfleet? Run "crosfleet login"')
+ _eprint(
+ "The leases may also be successful later on. "
+ 'Check with "crosfleet dut leases"'
+ )
+ return "crosperf_autolock failed"
+ except BoardReleaseError as e:
+ _eprint("ERROR:", e)
+ _eprint('May need to re-run "crosfleet dut abandon"')
+ return "crosperf_autolock failed"
+ return None
+
+
+def parse_args(args: List[str]) -> Tuple[Any, List]:
+ """Parse the CLI arguments."""
+ parser = argparse.ArgumentParser(
+ "crosperf_autolock",
+ description="Wrapper around crosperf"
+ " to autolock DUTs from crosfleet.",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(
+ "--board",
+ type=str,
+ help="Space or comma separated list of boards to lock",
+ required=True,
+ default=argparse.SUPPRESS,
+ )
+ parser.add_argument(
+ "--num-leases",
+ type=int,
+ help="Number of boards to lock.",
+ metavar="NUM",
+ default=1,
+ )
+ parser.add_argument(
+ "--pool", type=str, help="Pool to pull from.", default="DUT_POOL_QUOTA"
+ )
+ parser.add_argument(
+ "--dut-lock-timeout",
+ type=float,
+ metavar="SEC",
+ help="Number of seconds we want to try to lease a board"
+ " from crosfleet. This option does NOT change the"
+ " lease length.",
+ default=600,
+ )
+ parser.add_argument(
+ "--lease-time",
+ type=int,
+ metavar="MIN",
+ help="Number of minutes to lock the board. Max is 1440.",
+ default=1440,
+ )
+ parser.epilog = (
+ "For more detailed flags, you have to read the args taken by the"
+ " crosperf executable. Args are passed transparently to crosperf."
+ )
+ return parser.parse_known_args(args)
class BoardLockError(Exception):
- """Error to indicate failure to lock a board."""
+ """Error to indicate failure to lock a board."""
- def __init__(self, msg: str):
- self.msg = 'BoardLockError: ' + msg
- super().__init__(self.msg)
+ def __init__(self, msg: str):
+ self.msg = "BoardLockError: " + msg
+ super().__init__(self.msg)
class BoardReleaseError(Exception):
- """Error to indicate failure to release a board."""
+ """Error to indicate failure to release a board."""
- def __init__(self, msg: str):
- self.msg = 'BoardReleaseError: ' + msg
- super().__init__(self.msg)
+ def __init__(self, msg: str):
+ self.msg = "BoardReleaseError: " + msg
+ super().__init__(self.msg)
@dataclasses.dataclass(frozen=True)
class CrosfleetParams:
- """Dataclass to hold all crosfleet parameterizations."""
- board: str
- pool: str
- lease_time: int
+ """Dataclass to hold all crosfleet parameterizations."""
+
+ board: str
+ pool: str
+ lease_time: int
def _eprint(*msg, **kwargs):
- print(*msg, file=sys.stderr, **kwargs)
-
-
-def _run_crosperf(crosfleet_params: list[CrosfleetParams], lock_timeout: float,
- leftover_args: list[str]):
- """Autolock devices and run crosperf with leftover arguments.
-
- Raises:
- BoardLockError: When board was unable to be locked.
- BoardReleaseError: When board was unable to be released.
- """
- if not crosfleet_params:
- raise ValueError('No crosfleet params given; cannot call crosfleet.')
-
- # We'll assume all the boards are the same type, which seems to be the case
- # in experiments that actually get used.
- passed_board_arg = crosfleet_params[0].board
- with contextlib.ExitStack() as stack:
- dut_hostnames = []
- for param in crosfleet_params:
- print(
- f'Sent lock request for {param.board} for {param.lease_time} minutes'
- '\nIf this fails, you may need to run "crosfleet dut abandon <...>"')
- # May raise BoardLockError, abandoning previous DUTs.
- dut_hostname = stack.enter_context(
- crosfleet_machine_ctx(
- param.board,
- param.lease_time,
- lock_timeout,
- {'label-pool': param.pool},
- ))
- if dut_hostname:
- print(f'Locked {param.board} machine: {dut_hostname}')
- dut_hostnames.append(dut_hostname)
-
- # We import crosperf late, because this import is extremely slow.
- # We don't want the user to wait several seconds just to get
- # help info.
- import crosperf
- for dut_hostname in dut_hostnames:
- crosperf.Main([
- sys.argv[0],
- '--no_lock',
- 'True',
- '--remote',
- dut_hostname,
- '--board',
- passed_board_arg,
- ] + leftover_args)
+ print(*msg, file=sys.stderr, **kwargs)
+
+
+def _run_crosperf(
+ crosfleet_params: List[CrosfleetParams],
+ lock_timeout: float,
+ leftover_args: List[str],
+):
+ """Autolock devices and run crosperf with leftover arguments.
+
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ BoardReleaseError: When board was unable to be released.
+ """
+ if not crosfleet_params:
+ raise ValueError("No crosfleet params given; cannot call crosfleet.")
+
+ # We'll assume all the boards are the same type, which seems to be the case
+ # in experiments that actually get used.
+ passed_board_arg = crosfleet_params[0].board
+ with contextlib.ExitStack() as stack:
+ dut_hostnames = []
+ for param in crosfleet_params:
+ print(
+ f"Sent lock request for {param.board} for {param.lease_time} minutes"
+ '\nIf this fails, you may need to run "crosfleet dut abandon <...>"'
+ )
+ # May raise BoardLockError, abandoning previous DUTs.
+ dut_hostname = stack.enter_context(
+ crosfleet_machine_ctx(
+ param.board,
+ param.lease_time,
+ lock_timeout,
+ {"label-pool": param.pool},
+ )
+ )
+ if dut_hostname:
+ print(f"Locked {param.board} machine: {dut_hostname}")
+ dut_hostnames.append(dut_hostname)
+
+ # We import crosperf late, because this import is extremely slow.
+ # We don't want the user to wait several seconds just to get
+ # help info.
+ import crosperf
+
+ for dut_hostname in dut_hostnames:
+ crosperf.Main(
+ [
+ sys.argv[0],
+ "--no_lock",
+ "True",
+ "--remote",
+ dut_hostname,
+ "--board",
+ passed_board_arg,
+ ]
+ + leftover_args
+ )
@contextlib.contextmanager
-def crosfleet_machine_ctx(board: str,
- lease_minutes: int,
- lock_timeout: float,
- dims: dict[str, Any],
- abandon_timeout: float = 120.0) -> Any:
- """Acquire dut from crosfleet, and release once it leaves the context.
-
- Args:
- board: Board type to lease.
- lease_minutes: Length of lease, in minutes.
- lock_timeout: How long to wait for a lock until quitting.
- dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
- abandon_timeout (optional): How long to wait for releasing until quitting.
-
- Yields:
- A string representing the crosfleet DUT hostname.
-
- Raises:
- BoardLockError: When board was unable to be locked.
- BoardReleaseError: When board was unable to be released.
- """
- # This lock may raise an exception, but if it does, we can't release
- # the DUT anyways as we won't have the dut_hostname.
- dut_hostname = crosfleet_autolock(board, lease_minutes, dims, lock_timeout)
- try:
- yield dut_hostname
- finally:
- if dut_hostname:
- crosfleet_release(dut_hostname, abandon_timeout)
-
-
-def crosfleet_autolock(board: str, lease_minutes: int, dims: dict[str, Any],
- timeout_sec: float) -> str:
- """Lock a device using crosfleet, paramaterized by the board type.
-
- Args:
- board: Board of the DUT we want to lock.
- lease_minutes: Number of minutes we're trying to lease the DUT for.
- dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
- timeout_sec: Number of seconds to try to lease the DUT. Default 120s.
-
- Returns:
- The hostname of the board, or empty string if it couldn't be parsed.
-
- Raises:
- BoardLockError: When board was unable to be locked.
- """
- crosfleet_cmd_args = [
- 'crosfleet',
- 'dut',
- 'lease',
- '-json',
- '-reason="crosperf autolock"',
- f'-board={board}',
- f'-minutes={lease_minutes}',
- ]
- if dims:
- dims_arg = ','.join('{}={}'.format(k, v) for k, v in dims.items())
- crosfleet_cmd_args.extend(['-dims', f'{dims_arg}'])
-
- try:
- output = subprocess.check_output(crosfleet_cmd_args,
- timeout=timeout_sec,
- encoding='utf-8')
- except subprocess.CalledProcessError as e:
- raise BoardLockError(
- f'crosfleet dut lease failed with exit code: {e.returncode}')
- except subprocess.TimeoutExpired as e:
- raise BoardLockError(f'crosfleet dut lease timed out after {timeout_sec}s;'
- ' please abandon the dut manually.')
-
- try:
- json_obj = json.loads(output)
- dut_hostname = json_obj['DUT']['Hostname']
- if not isinstance(dut_hostname, str):
- raise TypeError('dut_hostname was not a string')
- except (json.JSONDecodeError, IndexError, KeyError, TypeError) as e:
- raise BoardLockError(
- f'crosfleet dut lease output was parsed incorrectly: {e!r};'
- f' observed output was {output}')
- return _maybe_append_suffix(dut_hostname)
+def crosfleet_machine_ctx(
+ board: str,
+ lease_minutes: int,
+ lock_timeout: float,
+ dims: Dict[str, Any],
+ abandon_timeout: float = 120.0,
+) -> Any:
+ """Acquire dut from crosfleet, and release once it leaves the context.
+
+ Args:
+ board: Board type to lease.
+ lease_minutes: Length of lease, in minutes.
+ lock_timeout: How long to wait for a lock until quitting.
+ dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
+ abandon_timeout: How long to wait for releasing until quitting.
+
+ Yields:
+ A string representing the crosfleet DUT hostname.
+
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ BoardReleaseError: When board was unable to be released.
+ """
+ # This lock may raise an exception, but if it does, we can't release
+ # the DUT anyways as we won't have the dut_hostname.
+ dut_hostname = crosfleet_autolock(board, lease_minutes, dims, lock_timeout)
+ try:
+ yield dut_hostname
+ finally:
+ if dut_hostname:
+ crosfleet_release(dut_hostname, abandon_timeout)
+
+
+def crosfleet_autolock(
+ board: str, lease_minutes: int, dims: Dict[str, Any], timeout_sec: float
+) -> str:
+ """Lock a device using crosfleet, paramaterized by the board type.
+
+ Args:
+ board: Board of the DUT we want to lock.
+ lease_minutes: Number of minutes we're trying to lease the DUT for.
+ dims: Dictionary of dimension arguments to pass to crosfleet's '-dims'
+ timeout_sec: Number of seconds to try to lease the DUT. Default 120s.
+
+ Returns:
+ The hostname of the board, or empty string if it couldn't be parsed.
+
+ Raises:
+ BoardLockError: When board was unable to be locked.
+ """
+ crosfleet_cmd_args = [
+ "crosfleet",
+ "dut",
+ "lease",
+ "-json",
+ '-reason="crosperf autolock"',
+ f"-board={board}",
+ f"-minutes={lease_minutes}",
+ ]
+ if dims:
+ dims_arg = ",".join(f"{k}={v}" for k, v in dims.items())
+ crosfleet_cmd_args.extend(["-dims", f"{dims_arg}"])
+
+ try:
+ output = subprocess.check_output(
+ crosfleet_cmd_args, timeout=timeout_sec, encoding="utf-8"
+ )
+ except subprocess.CalledProcessError as e:
+ raise BoardLockError(
+ f"crosfleet dut lease failed with exit code: {e.returncode}"
+ )
+ except subprocess.TimeoutExpired as e:
+ raise BoardLockError(
+ f"crosfleet dut lease timed out after {timeout_sec}s;"
+ " please abandon the dut manually."
+ )
+
+ try:
+ json_obj = json.loads(output)
+ dut_hostname = json_obj["DUT"]["Hostname"]
+ if not isinstance(dut_hostname, str):
+ raise TypeError("dut_hostname was not a string")
+ except (json.JSONDecodeError, IndexError, KeyError, TypeError) as e:
+ raise BoardLockError(
+ f"crosfleet dut lease output was parsed incorrectly: {e!r};"
+ f" observed output was {output}"
+ )
+ return _maybe_append_suffix(dut_hostname)
def crosfleet_release(dut_hostname: str, timeout_sec: float = 120.0):
- """Release a crosfleet device.
-
- Consider using the context managed crosfleet_machine_context
-
- Args:
- dut_hostname: Name of the device we want to release.
- timeout_sec: Number of seconds to try to release the DUT. Default is 120s.
-
- Raises:
- BoardReleaseError: Potentially failed to abandon the lease.
- """
- crosfleet_cmd_args = [
- 'crosfleet',
- 'dut',
- 'abandon',
- dut_hostname,
- ]
- exit_code = subprocess.call(crosfleet_cmd_args, timeout=timeout_sec)
- if exit_code != 0:
- raise BoardReleaseError(
- f'"crosfleet dut abandon" had exit code {exit_code}')
+ """Release a crosfleet device.
+
+ Consider using the context managed crosfleet_machine_context
+
+ Args:
+ dut_hostname: Name of the device we want to release.
+ timeout_sec: Number of seconds to try to release the DUT. Default is 120s.
+
+ Raises:
+ BoardReleaseError: Potentially failed to abandon the lease.
+ """
+ crosfleet_cmd_args = [
+ "crosfleet",
+ "dut",
+ "abandon",
+ dut_hostname,
+ ]
+ exit_code = subprocess.call(crosfleet_cmd_args, timeout=timeout_sec)
+ if exit_code != 0:
+ raise BoardReleaseError(
+ f'"crosfleet dut abandon" had exit code {exit_code}'
+ )
def _maybe_append_suffix(hostname: str) -> str:
- if hostname.endswith('.cros') or '.cros.' in hostname:
- return hostname
- return hostname + '.cros'
+ if hostname.endswith(".cros") or ".cros." in hostname:
+ return hostname
+ return hostname + ".cros"
-if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/crosperf/crosperf_unittest.py b/crosperf/crosperf_unittest.py
index 774159ff..7b52f2e0 100755
--- a/crosperf/crosperf_unittest.py
+++ b/crosperf/crosperf_unittest.py
@@ -1,14 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for crosperf."""
-from __future__ import division
-from __future__ import print_function
import argparse
import io
@@ -17,8 +15,9 @@ import unittest
import unittest.mock as mock
import crosperf
-import settings_factory
import experiment_file
+import settings_factory
+
EXPERIMENT_FILE_1 = """
board: x86-alex
@@ -41,50 +40,51 @@ EXPERIMENT_FILE_1 = """
class CrosperfTest(unittest.TestCase):
- """Crosperf test class."""
-
- def setUp(self):
- input_file = io.StringIO(EXPERIMENT_FILE_1)
- self.exp_file = experiment_file.ExperimentFile(input_file)
-
- def testDryRun(self):
- with tempfile.NamedTemporaryFile('w', encoding='utf-8') as f:
- f.write(EXPERIMENT_FILE_1)
- f.flush()
- crosperf.Main(['', f.name, '--dry_run'])
-
- def testConvertOptionsToSettings(self):
- parser = argparse.ArgumentParser()
- parser.add_argument('-l',
- '--log_dir',
- dest='log_dir',
- default='',
- help='The log_dir, default is under '
- '<crosperf_logs>/logs')
- crosperf.SetupParserOptions(parser)
- argv = ['crosperf/crosperf.py', 'temp.exp', '--rerun=True']
- options, _ = parser.parse_known_args(argv)
- settings = crosperf.ConvertOptionsToSettings(options)
- self.assertIsNotNone(settings)
- self.assertIsInstance(settings, settings_factory.GlobalSettings)
- self.assertEqual(len(settings.fields), 40)
- self.assertTrue(settings.GetField('rerun'))
- argv = ['crosperf/crosperf.py', 'temp.exp']
- options, _ = parser.parse_known_args(argv)
- settings = crosperf.ConvertOptionsToSettings(options)
- self.assertFalse(settings.GetField('rerun'))
-
- def testExceptionPrintTraceback(self):
- """Test the main function can print traceback in exception."""
-
- def mock_RunCrosperf(*_args, **_kwargs):
- return 10 / 0
-
- with mock.patch('crosperf.RunCrosperf', new=mock_RunCrosperf):
- with self.assertRaises(ZeroDivisionError) as context:
- crosperf.Main([])
- self.assertEqual('division by zero', str(context.exception))
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Crosperf test class."""
+
+ def setUp(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_1)
+ self.exp_file = experiment_file.ExperimentFile(input_file)
+
+ def testDryRun(self):
+ with tempfile.NamedTemporaryFile("w", encoding="utf-8") as f:
+ f.write(EXPERIMENT_FILE_1)
+ f.flush()
+ crosperf.Main(["", f.name, "--dry_run"])
+
+ def testConvertOptionsToSettings(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-l",
+ "--log_dir",
+ dest="log_dir",
+ default="",
+ help="The log_dir, default is under " "<crosperf_logs>/logs",
+ )
+ crosperf.SetupParserOptions(parser)
+ argv = ["crosperf/crosperf.py", "temp.exp", "--rerun=True"]
+ options, _ = parser.parse_known_args(argv)
+ settings = crosperf.ConvertOptionsToSettings(options)
+ self.assertIsNotNone(settings)
+ self.assertIsInstance(settings, settings_factory.GlobalSettings)
+ self.assertEqual(len(settings.fields), 40)
+ self.assertTrue(settings.GetField("rerun"))
+ argv = ["crosperf/crosperf.py", "temp.exp"]
+ options, _ = parser.parse_known_args(argv)
+ settings = crosperf.ConvertOptionsToSettings(options)
+ self.assertFalse(settings.GetField("rerun"))
+
+ def testExceptionPrintTraceback(self):
+ """Test the main function can print traceback in exception."""
+
+ def mock_RunCrosperf(*_args, **_kwargs):
+ return 10 / 0
+
+ with mock.patch("crosperf.RunCrosperf", new=mock_RunCrosperf):
+ with self.assertRaises(ZeroDivisionError) as context:
+ crosperf.Main([])
+ self.assertEqual("division by zero", str(context.exception))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/default_remotes b/crosperf/default_remotes
index faecb833..714385e7 100644
--- a/crosperf/default_remotes
+++ b/crosperf/default_remotes
@@ -1,8 +1,6 @@
-bob : chromeos6-row4-rack13-host6.cros
-chell : chromeos2-row1-rack10-host2.cros chromeos2-row1-rack10-host4.cros
-coral : chromeos6-row5-rack6-host1.cros chromeos6-row5-rack6-host3.cros chromeos6-row5-rack6-host5.cros
-elm : chromeos6-row14-rack15-host21.cros
-kefka : chromeos6-row6-rack22-host2.cros chromeos6-row6-rack22-host3.cros chromeos6-row11-rack22-host7.cros
-nautilus : chromeos6-row5-rack10-host1.cros chromeos6-row5-rack10-host3.cros
-snappy : chromeos6-row3-rack20-host1.cros chromeos6-row3-rack20-host3.cros
-veyron_tiger : chromeos6-row3-rack7-host1.cros
+bob : chromeos8-row12-rack16-host2
+chell : chromeos2-row1-rack10-host2 chromeos2-row1-rack10-host4
+coral : chromeos6-row5-rack6-host1 chromeos6-row5-rack6-host3 chromeos6-row5-rack6-host5
+elm : chromeos6-row14-rack15-host21
+nautilus : chromeos6-row5-rack10-host1 chromeos6-row5-rack10-host3
+snappy : chromeos8-row12-rack17-host1 chromeos8-row12-rack17-host2
diff --git a/crosperf/download_images.py b/crosperf/download_images.py
index 8e1bad11..9a46280d 100644
--- a/crosperf/download_images.py
+++ b/crosperf/download_images.py
@@ -1,327 +1,399 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2014-2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2014-2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download images from Cloud Storage."""
-from __future__ import print_function
import ast
import os
+from cros_utils import command_executer
import test_flag
-from cros_utils import command_executer
-GS_UTIL = 'src/chromium/depot_tools/gsutil.py'
+GS_UTIL = "src/chromium/depot_tools/gsutil.py"
class MissingImage(Exception):
- """Raised when the requested image does not exist in gs://"""
+ """Raised when the requested image does not exist in gs://"""
class MissingFile(Exception):
- """Raised when the requested file does not exist in gs://"""
+ """Raised when the requested file does not exist in gs://"""
class RunCommandExceptionHandler(object):
- """Handle Exceptions from calls to RunCommand"""
+ """Handle Exceptions from calls to RunCommand"""
- def __init__(self, logger_to_use, log_level, cmd_exec, command):
- self.logger = logger_to_use
- self.log_level = log_level
- self.ce = cmd_exec
- self.cleanup_command = command
+ def __init__(self, logger_to_use, log_level, cmd_exec, command):
+ self.logger = logger_to_use
+ self.log_level = log_level
+ self.ce = cmd_exec
+ self.cleanup_command = command
- def HandleException(self, _, e):
- # Exception handler, Run specified command
- if self.log_level != 'verbose' and self.cleanup_command is not None:
- self.logger.LogOutput('CMD: %s' % self.cleanup_command)
- if self.cleanup_command is not None:
- _ = self.ce.RunCommand(self.cleanup_command)
- # Raise exception again
- raise e
+ def HandleException(self, _, e):
+ # Exception handler, Run specified command
+ if self.log_level != "verbose" and self.cleanup_command is not None:
+ self.logger.LogOutput("CMD: %s" % self.cleanup_command)
+ if self.cleanup_command is not None:
+ _ = self.ce.RunCommand(self.cleanup_command)
+ # Raise exception again
+ raise e
class ImageDownloader(object):
- """Download images from Cloud Storage."""
-
- def __init__(self, logger_to_use=None, log_level='verbose', cmd_exec=None):
- self._logger = logger_to_use
- self.log_level = log_level
- self._ce = cmd_exec or command_executer.GetCommandExecuter(
- self._logger, log_level=self.log_level)
-
- def GetBuildID(self, chromeos_root, xbuddy_label):
- # Get the translation of the xbuddy_label into the real Google Storage
- # image name.
- command = ('cd /mnt/host/source/src/third_party/toolchain-utils/crosperf; '
- "./translate_xbuddy.py '%s'" % xbuddy_label)
- _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(
- chromeos_root, command)
- if not build_id_tuple_str:
- raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
-
- build_id_tuple = ast.literal_eval(build_id_tuple_str)
- build_id = build_id_tuple[0]
-
- return build_id
-
- def DownloadImage(self, chromeos_root, build_id, image_name):
- if self.log_level == 'average':
- self._logger.LogOutput('Preparing to download %s image to local '
- 'directory.' % build_id)
-
- # Make sure the directory for downloading the image exists.
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- image_path = os.path.join(download_path, 'chromiumos_test_image.bin')
- if not os.path.exists(download_path):
- os.makedirs(download_path)
-
- # Check to see if the image has already been downloaded. If not,
- # download the image.
- if not os.path.exists(image_path):
- gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
- command = '%s cp %s %s' % (gsutil_cmd, image_name, download_path)
-
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- status = self._ce.RunCommand(command)
- downloaded_image_name = os.path.join(download_path,
- 'chromiumos_test_image.tar.xz')
- if status != 0 or not os.path.exists(downloaded_image_name):
- raise MissingImage('Cannot download image: %s.' % downloaded_image_name)
-
- return image_path
-
- def UncompressImage(self, chromeos_root, build_id):
- # Check to see if the file has already been uncompresssed, etc.
- if os.path.exists(
- os.path.join(chromeos_root, 'chroot/tmp', build_id,
- 'chromiumos_test_image.bin')):
- return
-
- # Uncompress and untar the downloaded image.
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- command = ('cd %s ; tar -Jxf chromiumos_test_image.tar.xz ' % download_path)
- # Cleanup command for exception handler
- clean_cmd = ('cd %s ; rm -f chromiumos_test_image.bin ' % download_path)
- exception_handler = RunCommandExceptionHandler(self._logger, self.log_level,
- self._ce, clean_cmd)
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('(Uncompressing and un-tarring may take a couple of minutes...'
- 'please be patient.)')
- retval = self._ce.RunCommand(
- command, except_handler=exception_handler.HandleException)
- if retval != 0:
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % clean_cmd)
- print('(Removing file chromiumos_test_image.bin.)')
- # Remove partially uncompressed file
- _ = self._ce.RunCommand(clean_cmd)
- # Raise exception for failure to uncompress
- raise MissingImage('Cannot uncompress image: %s.' % build_id)
-
- # Remove compressed image
- command = ('cd %s ; rm -f chromiumos_test_image.tar.xz; ' % download_path)
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('(Removing file chromiumos_test_image.tar.xz.)')
- # try removing file, its ok to have an error, print if encountered
- retval = self._ce.RunCommand(command)
- if retval != 0:
- print('(Warning: Could not remove file chromiumos_test_image.tar.xz .)')
-
- def DownloadSingleFile(self, chromeos_root, build_id, package_file_name):
- # Verify if package files exist
- status = 0
- gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
- (build_id, package_file_name))
- gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
- if not test_flag.GetTestMode():
- cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
- status = self._ce.RunCommand(cmd)
- if status != 0:
- raise MissingFile('Cannot find package file: %s.' % package_file_name)
-
- if self.log_level == 'average':
- self._logger.LogOutput('Preparing to download %s package to local '
- 'directory.' % package_file_name)
-
- # Make sure the directory for downloading the package exists.
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- package_path = os.path.join(download_path, package_file_name)
- if not os.path.exists(download_path):
- os.makedirs(download_path)
-
- # Check to see if the package file has already been downloaded. If not,
- # download it.
- if not os.path.exists(package_path):
- command = '%s cp %s %s' % (gsutil_cmd, gs_package_name, download_path)
-
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- status = self._ce.RunCommand(command)
- if status != 0 or not os.path.exists(package_path):
- raise MissingFile('Cannot download package: %s .' % package_path)
-
- def UncompressSingleFile(self, chromeos_root, build_id, package_file_name,
- uncompress_cmd):
- # Uncompress file
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- command = ('cd %s ; %s %s' %
- (download_path, uncompress_cmd, package_file_name))
-
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('(Uncompressing file %s .)' % package_file_name)
- retval = self._ce.RunCommand(command)
- if retval != 0:
- raise MissingFile('Cannot uncompress file: %s.' % package_file_name)
- # Remove uncompressed downloaded file
- command = ('cd %s ; rm -f %s' % (download_path, package_file_name))
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('(Removing processed file %s .)' % package_file_name)
- # try removing file, its ok to have an error, print if encountered
- retval = self._ce.RunCommand(command)
- if retval != 0:
- print('(Warning: Could not remove file %s .)' % package_file_name)
-
- def VerifyFileExists(self, chromeos_root, build_id, package_file):
- # Quickly verify if the files are there
- status = 0
- gs_package_name = ('gs://chromeos-image-archive/%s/%s' %
- (build_id, package_file))
- gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
- if not test_flag.GetTestMode():
- cmd = '%s ls %s' % (gsutil_cmd, gs_package_name)
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % cmd)
- status = self._ce.RunCommand(cmd)
- if status != 0:
- print('(Warning: Could not find file %s )' % gs_package_name)
- return 1
- # Package exists on server
- return 0
-
- def DownloadAutotestFiles(self, chromeos_root, build_id):
- # Download autest package files (3 files)
- autotest_packages_name = ('autotest_packages.tar')
- autotest_server_package_name = ('autotest_server_package.tar.bz2')
- autotest_control_files_name = ('control_files.tar')
-
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- # Autotest directory relative path wrt chroot
- autotest_rel_path = os.path.join('/tmp', build_id, 'autotest_files')
- # Absolute Path to download files
- autotest_path = os.path.join(chromeos_root, 'chroot/tmp', build_id,
- 'autotest_files')
-
- if not os.path.exists(autotest_path):
- # Quickly verify if the files are present on server
- # If not, just exit with warning
- status = self.VerifyFileExists(chromeos_root, build_id,
- autotest_packages_name)
- if status != 0:
- default_autotest_dir = '/mnt/host/source/src/third_party/autotest/files'
- print('(Warning: Could not find autotest packages .)\n'
- '(Warning: Defaulting autotest path to %s .' %
- default_autotest_dir)
- return default_autotest_dir
-
- # Files exist on server, download and uncompress them
- self.DownloadSingleFile(chromeos_root, build_id, autotest_packages_name)
- self.DownloadSingleFile(chromeos_root, build_id,
- autotest_server_package_name)
- self.DownloadSingleFile(chromeos_root, build_id,
- autotest_control_files_name)
-
- self.UncompressSingleFile(chromeos_root, build_id, autotest_packages_name,
- 'tar -xf ')
- self.UncompressSingleFile(chromeos_root, build_id,
- autotest_server_package_name, 'tar -jxf ')
- self.UncompressSingleFile(chromeos_root, build_id,
- autotest_control_files_name, 'tar -xf ')
- # Rename created autotest directory to autotest_files
- command = ('cd %s ; mv autotest autotest_files' % download_path)
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('(Moving downloaded autotest files to autotest_files)')
- retval = self._ce.RunCommand(command)
- if retval != 0:
- raise MissingFile('Could not create directory autotest_files')
-
- return autotest_rel_path
-
- def DownloadDebugFile(self, chromeos_root, build_id):
- # Download autest package files (3 files)
- debug_archive_name = 'debug.tgz'
-
- download_path = os.path.join(chromeos_root, 'chroot/tmp', build_id)
- # Debug directory relative path wrt chroot
- debug_rel_path = os.path.join('/tmp', build_id, 'debug_files')
- # Debug path to download files
- debug_path = os.path.join(chromeos_root, 'chroot/tmp', build_id,
- 'debug_files')
-
- if not os.path.exists(debug_path):
- # Quickly verify if the file is present on server
- # If not, just exit with warning
- status = self.VerifyFileExists(chromeos_root, build_id,
- debug_archive_name)
- if status != 0:
- self._logger.LogOutput('WARNING: Could not find debug archive on gs')
- return ''
-
- # File exists on server, download and uncompress it
- self.DownloadSingleFile(chromeos_root, build_id, debug_archive_name)
-
- self.UncompressSingleFile(chromeos_root, build_id, debug_archive_name,
- 'tar -xf ')
- # Extract and move debug files into the proper location.
- debug_dir = 'debug_files/usr/lib'
- command = ('cd %s ; mkdir -p %s; mv debug %s' %
- (download_path, debug_dir, debug_dir))
- if self.log_level != 'verbose':
- self._logger.LogOutput('CMD: %s' % command)
- print('Moving downloaded debug files to %s' % debug_dir)
- retval = self._ce.RunCommand(command)
- if retval != 0:
- raise MissingFile('Could not create directory %s' %
- os.path.join(debug_dir, 'debug'))
-
- return debug_rel_path
-
- def Run(self, chromeos_root, xbuddy_label, autotest_path, debug_path,
- download_debug):
- build_id = self.GetBuildID(chromeos_root, xbuddy_label)
- image_name = (
- 'gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz' %
- build_id)
-
- # Verify that image exists for build_id, before attempting to
- # download it.
- status = 0
- if not test_flag.GetTestMode():
- gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
- cmd = '%s ls %s' % (gsutil_cmd, image_name)
- status = self._ce.RunCommand(cmd)
- if status != 0:
- raise MissingImage('Cannot find official image: %s.' % image_name)
-
- image_path = self.DownloadImage(chromeos_root, build_id, image_name)
- self.UncompressImage(chromeos_root, build_id)
-
- if self.log_level != 'quiet':
- self._logger.LogOutput('Using image from %s.' % image_path)
-
- if autotest_path == '':
- autotest_path = self.DownloadAutotestFiles(chromeos_root, build_id)
-
- if debug_path == '' and download_debug:
- debug_path = self.DownloadDebugFile(chromeos_root, build_id)
-
- return image_path, autotest_path, debug_path
+ """Download images from Cloud Storage."""
+
+ def __init__(self, logger_to_use=None, log_level="verbose", cmd_exec=None):
+ self._logger = logger_to_use
+ self.log_level = log_level
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger, log_level=self.log_level
+ )
+
+ def GetBuildID(self, chromeos_root, xbuddy_label):
+ # Get the translation of the xbuddy_label into the real Google Storage
+ # image name.
+ command = (
+ "cd /mnt/host/source/src/third_party/toolchain-utils/crosperf; "
+ "./translate_xbuddy.py '%s'" % xbuddy_label
+ )
+ _, build_id_tuple_str, _ = self._ce.ChrootRunCommandWOutput(
+ chromeos_root, command
+ )
+ if not build_id_tuple_str:
+ raise MissingImage("Unable to find image for '%s'" % xbuddy_label)
+
+ build_id_tuple = ast.literal_eval(build_id_tuple_str)
+ build_id = build_id_tuple[0]
+
+ return build_id
+
+ def DownloadImage(self, chromeos_root, build_id, image_name):
+ if self.log_level == "average":
+ self._logger.LogOutput(
+ "Preparing to download %s image to local "
+ "directory." % build_id
+ )
+
+ # Make sure the directory for downloading the image exists.
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ image_path = os.path.join(download_path, "chromiumos_test_image.bin")
+ if not os.path.exists(download_path):
+ os.makedirs(download_path)
+
+ # Check to see if the image has already been downloaded. If not,
+ # download the image.
+ if not os.path.exists(image_path):
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ command = "%s cp %s %s" % (gsutil_cmd, image_name, download_path)
+
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ status = self._ce.RunCommand(command)
+ downloaded_image_name = os.path.join(
+ download_path, "chromiumos_test_image.tar.xz"
+ )
+ if status != 0 or not os.path.exists(downloaded_image_name):
+ raise MissingImage(
+ "Cannot download image: %s." % downloaded_image_name
+ )
+
+ return image_path
+
+ def UncompressImage(self, chromeos_root, build_id):
+ # Check to see if the file has already been uncompresssed, etc.
+ if os.path.exists(
+ os.path.join(
+ chromeos_root,
+ "chroot/tmp",
+ build_id,
+ "chromiumos_test_image.bin",
+ )
+ ):
+ return
+
+ # Uncompress and untar the downloaded image.
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ command = (
+ "cd %s ; tar -Jxf chromiumos_test_image.tar.xz " % download_path
+ )
+ # Cleanup command for exception handler
+ clean_cmd = "cd %s ; rm -f chromiumos_test_image.bin " % download_path
+ exception_handler = RunCommandExceptionHandler(
+ self._logger, self.log_level, self._ce, clean_cmd
+ )
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print(
+ "(Uncompressing and un-tarring may take a couple of minutes..."
+ "please be patient.)"
+ )
+ retval = self._ce.RunCommand(
+ command, except_handler=exception_handler.HandleException
+ )
+ if retval != 0:
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % clean_cmd)
+ print("(Removing file chromiumos_test_image.bin.)")
+ # Remove partially uncompressed file
+ _ = self._ce.RunCommand(clean_cmd)
+ # Raise exception for failure to uncompress
+ raise MissingImage("Cannot uncompress image: %s." % build_id)
+
+ # Remove compressed image
+ command = "cd %s ; rm -f chromiumos_test_image.tar.xz; " % download_path
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print("(Removing file chromiumos_test_image.tar.xz.)")
+ # try removing file, its ok to have an error, print if encountered
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ print(
+ "(Warning: Could not remove file chromiumos_test_image.tar.xz .)"
+ )
+
+ def DownloadSingleFile(self, chromeos_root, build_id, package_file_name):
+ # Verify if package files exist
+ status = 0
+ gs_package_name = "gs://chromeos-image-archive/%s/%s" % (
+ build_id,
+ package_file_name,
+ )
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ if not test_flag.GetTestMode():
+ cmd = "%s ls %s" % (gsutil_cmd, gs_package_name)
+ status = self._ce.RunCommand(cmd)
+ if status != 0:
+ raise MissingFile(
+ "Cannot find package file: %s." % package_file_name
+ )
+
+ if self.log_level == "average":
+ self._logger.LogOutput(
+ "Preparing to download %s package to local "
+ "directory." % package_file_name
+ )
+
+ # Make sure the directory for downloading the package exists.
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ package_path = os.path.join(download_path, package_file_name)
+ if not os.path.exists(download_path):
+ os.makedirs(download_path)
+
+ # Check to see if the package file has already been downloaded. If not,
+ # download it.
+ if not os.path.exists(package_path):
+ command = "%s cp %s %s" % (
+ gsutil_cmd,
+ gs_package_name,
+ download_path,
+ )
+
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ status = self._ce.RunCommand(command)
+ if status != 0 or not os.path.exists(package_path):
+ raise MissingFile(
+ "Cannot download package: %s ." % package_path
+ )
+
+ def UncompressSingleFile(
+ self, chromeos_root, build_id, package_file_name, uncompress_cmd
+ ):
+ # Uncompress file
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ command = "cd %s ; %s %s" % (
+ download_path,
+ uncompress_cmd,
+ package_file_name,
+ )
+
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print("(Uncompressing file %s .)" % package_file_name)
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ raise MissingFile("Cannot uncompress file: %s." % package_file_name)
+ # Remove uncompressed downloaded file
+ command = "cd %s ; rm -f %s" % (download_path, package_file_name)
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print("(Removing processed file %s .)" % package_file_name)
+ # try removing file, its ok to have an error, print if encountered
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ print("(Warning: Could not remove file %s .)" % package_file_name)
+
+ def VerifyFileExists(self, chromeos_root, build_id, package_file):
+ # Quickly verify if the files are there
+ status = 0
+ gs_package_name = "gs://chromeos-image-archive/%s/%s" % (
+ build_id,
+ package_file,
+ )
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ if not test_flag.GetTestMode():
+ cmd = "%s ls %s" % (gsutil_cmd, gs_package_name)
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % cmd)
+ status = self._ce.RunCommand(cmd)
+ if status != 0:
+ print("(Warning: Could not find file %s )" % gs_package_name)
+ return 1
+ # Package exists on server
+ return 0
+
+ def DownloadAutotestFiles(self, chromeos_root, build_id):
+ # Download autest package files (3 files)
+ autotest_packages_name = "autotest_packages.tar"
+ autotest_server_package_name = "autotest_server_package.tar.bz2"
+ autotest_control_files_name = "control_files.tar"
+
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ # Autotest directory relative path wrt chroot
+ autotest_rel_path = os.path.join("/tmp", build_id, "autotest_files")
+ # Absolute Path to download files
+ autotest_path = os.path.join(
+ chromeos_root, "chroot/tmp", build_id, "autotest_files"
+ )
+
+ if not os.path.exists(autotest_path):
+ # Quickly verify if the files are present on server
+ # If not, just exit with warning
+ status = self.VerifyFileExists(
+ chromeos_root, build_id, autotest_packages_name
+ )
+ if status != 0:
+ default_autotest_dir = (
+ "/mnt/host/source/src/third_party/autotest/files"
+ )
+ print(
+ "(Warning: Could not find autotest packages .)\n"
+ "(Warning: Defaulting autotest path to %s ."
+ % default_autotest_dir
+ )
+ return default_autotest_dir
+
+ # Files exist on server, download and uncompress them
+ self.DownloadSingleFile(
+ chromeos_root, build_id, autotest_packages_name
+ )
+ self.DownloadSingleFile(
+ chromeos_root, build_id, autotest_server_package_name
+ )
+ self.DownloadSingleFile(
+ chromeos_root, build_id, autotest_control_files_name
+ )
+
+ self.UncompressSingleFile(
+ chromeos_root, build_id, autotest_packages_name, "tar -xf "
+ )
+ self.UncompressSingleFile(
+ chromeos_root,
+ build_id,
+ autotest_server_package_name,
+ "tar -jxf ",
+ )
+ self.UncompressSingleFile(
+ chromeos_root, build_id, autotest_control_files_name, "tar -xf "
+ )
+ # Rename created autotest directory to autotest_files
+ command = "cd %s ; mv autotest autotest_files" % download_path
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print("(Moving downloaded autotest files to autotest_files)")
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ raise MissingFile("Could not create directory autotest_files")
+
+ return autotest_rel_path
+
+ def DownloadDebugFile(self, chromeos_root, build_id):
+ # Download autest package files (3 files)
+ debug_archive_name = "debug.tgz"
+
+ download_path = os.path.join(chromeos_root, "chroot/tmp", build_id)
+ # Debug directory relative path wrt chroot
+ debug_rel_path = os.path.join("/tmp", build_id, "debug_files")
+ # Debug path to download files
+ debug_path = os.path.join(
+ chromeos_root, "chroot/tmp", build_id, "debug_files"
+ )
+
+ if not os.path.exists(debug_path):
+ # Quickly verify if the file is present on server
+ # If not, just exit with warning
+ status = self.VerifyFileExists(
+ chromeos_root, build_id, debug_archive_name
+ )
+ if status != 0:
+ self._logger.LogOutput(
+ "WARNING: Could not find debug archive on gs"
+ )
+ return ""
+
+ # File exists on server, download and uncompress it
+ self.DownloadSingleFile(chromeos_root, build_id, debug_archive_name)
+
+ self.UncompressSingleFile(
+ chromeos_root, build_id, debug_archive_name, "tar -xf "
+ )
+ # Extract and move debug files into the proper location.
+ debug_dir = "debug_files/usr/lib"
+ command = "cd %s ; mkdir -p %s; mv debug %s" % (
+ download_path,
+ debug_dir,
+ debug_dir,
+ )
+ if self.log_level != "verbose":
+ self._logger.LogOutput("CMD: %s" % command)
+ print("Moving downloaded debug files to %s" % debug_dir)
+ retval = self._ce.RunCommand(command)
+ if retval != 0:
+ raise MissingFile(
+ "Could not create directory %s"
+ % os.path.join(debug_dir, "debug")
+ )
+
+ return debug_rel_path
+
+ def Run(
+ self,
+ chromeos_root,
+ xbuddy_label,
+ autotest_path,
+ debug_path,
+ download_debug,
+ ):
+ build_id = self.GetBuildID(chromeos_root, xbuddy_label)
+ image_name = (
+ "gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz"
+ % build_id
+ )
+
+ # Verify that image exists for build_id, before attempting to
+ # download it.
+ status = 0
+ if not test_flag.GetTestMode():
+ gsutil_cmd = os.path.join(chromeos_root, GS_UTIL)
+ cmd = "%s ls %s" % (gsutil_cmd, image_name)
+ status = self._ce.RunCommand(cmd)
+ if status != 0:
+ raise MissingImage("Cannot find official image: %s." % image_name)
+
+ image_path = self.DownloadImage(chromeos_root, build_id, image_name)
+ self.UncompressImage(chromeos_root, build_id)
+
+ if self.log_level != "quiet":
+ self._logger.LogOutput("Using image from %s." % image_path)
+
+ if autotest_path == "":
+ autotest_path = self.DownloadAutotestFiles(chromeos_root, build_id)
+
+ if debug_path == "" and download_debug:
+ debug_path = self.DownloadDebugFile(chromeos_root, build_id)
+
+ return image_path, autotest_path, debug_path
diff --git a/crosperf/download_images_buildid_test.py b/crosperf/download_images_buildid_test.py
index fc37f2c1..20dd13c5 100755
--- a/crosperf/download_images_buildid_test.py
+++ b/crosperf/download_images_buildid_test.py
@@ -1,18 +1,18 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test translation of xbuddy names."""
-from __future__ import print_function
import argparse
import sys
import download_images
+
# On May 1, 2014:
# latest : lumpy-release/R34-5500.132.0
# latest-beta : lumpy-release/R35-5712.43.0
@@ -22,93 +22,111 @@ import download_images
class ImageDownloaderBuildIDTest(object):
- """Test translation of xbuddy names."""
-
- def __init__(self):
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '-c',
- '--chromeos_root',
- dest='chromeos_root',
- help='Directory containing ChromeOS root.')
-
- options = parser.parse_known_args(sys.argv[1:])[0]
- if options.chromeos_root is None:
- self._usage(parser, '--chromeos_root must be set')
- self.chromeos_root = options.chromeos_root
- self.tests_passed = 0
- self.tests_run = 0
- self.tests_failed = 0
-
- def _usage(self, parser, message):
- print('ERROR: ' + message)
- parser.print_help()
- sys.exit(0)
-
- def print_test_status(self):
- print('----------------------------------------\n')
- print('Tests attempted: %d' % self.tests_run)
- print('Tests passed: %d' % self.tests_passed)
- print('Tests failed: %d' % self.tests_failed)
- print('\n----------------------------------------')
-
- def assert_failure(self, msg):
- print('Assert failure: %s' % msg)
- self.print_test_status()
- sys.exit(1)
-
- def assertIsNotNone(self, arg, arg_name):
- if arg is None:
- self.tests_failed = self.tests_failed + 1
- self.assert_failure('%s is not None' % arg_name)
-
- def assertNotEqual(self, arg1, arg2, arg1_name, arg2_name):
- if arg1 == arg2:
- self.tests_failed = self.tests_failed + 1
- self.assert_failure('%s is not NotEqual to %s' % (arg1_name, arg2_name))
-
- def assertEqual(self, arg1, arg2, arg1_name, arg2_name):
- if arg1 != arg2:
- self.tests_failed = self.tests_failed + 1
- self.assert_failure('%s is not Equal to %s' % (arg1_name, arg2_name))
-
- def test_one_id(self, downloader, test_id, result_string, exact_match):
- print("Translating '%s'" % test_id)
- self.tests_run = self.tests_run + 1
-
- result = downloader.GetBuildID(self.chromeos_root, test_id)
- # Verify that we got a build id back.
- self.assertIsNotNone(result, 'result')
-
- # Verify that the result either contains or exactly matches the
- # result_string, depending on the exact_match argument.
- if exact_match:
- self.assertEqual(result, result_string, 'result', result_string)
- else:
- self.assertNotEqual(result.find(result_string), -1, 'result.find', '-1')
- self.tests_passed = self.tests_passed + 1
-
- def test_get_build_id(self):
- """Test that the actual translating of xbuddy names is working properly."""
- downloader = download_images.ImageDownloader(log_level='quiet')
-
- self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R',
- False)
- self.test_one_id(downloader,
- 'remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86',
- 'trybot-lumpy-release-afdo-use/R35-5672.0.0-b86', True)
- self.test_one_id(downloader, 'remote/lumpy-release/R35-5672.0.0',
- 'lumpy-release/R35-5672.0.0', True)
- self.test_one_id(downloader, 'remote/lumpy/latest-dev', 'lumpy-release/R',
- False)
- self.test_one_id(downloader, 'remote/lumpy/latest-official',
- 'lumpy-release/R', False)
- self.test_one_id(downloader, 'remote/lumpy/latest-beta', 'lumpy-release/R',
- False)
-
- self.print_test_status()
-
-
-if __name__ == '__main__':
- tester = ImageDownloaderBuildIDTest()
- tester.test_get_build_id()
+ """Test translation of xbuddy names."""
+
+ def __init__(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-c",
+ "--chromeos_root",
+ dest="chromeos_root",
+ help="Directory containing ChromeOS root.",
+ )
+
+ options = parser.parse_known_args(sys.argv[1:])[0]
+ if options.chromeos_root is None:
+ self._usage(parser, "--chromeos_root must be set")
+ self.chromeos_root = options.chromeos_root
+ self.tests_passed = 0
+ self.tests_run = 0
+ self.tests_failed = 0
+
+ def _usage(self, parser, message):
+ print("ERROR: " + message)
+ parser.print_help()
+ sys.exit(0)
+
+ def print_test_status(self):
+ print("----------------------------------------\n")
+ print("Tests attempted: %d" % self.tests_run)
+ print("Tests passed: %d" % self.tests_passed)
+ print("Tests failed: %d" % self.tests_failed)
+ print("\n----------------------------------------")
+
+ def assert_failure(self, msg):
+ print("Assert failure: %s" % msg)
+ self.print_test_status()
+ sys.exit(1)
+
+ def assertIsNotNone(self, arg, arg_name):
+ if arg is None:
+ self.tests_failed = self.tests_failed + 1
+ self.assert_failure("%s is not None" % arg_name)
+
+ def assertNotEqual(self, arg1, arg2, arg1_name, arg2_name):
+ if arg1 == arg2:
+ self.tests_failed = self.tests_failed + 1
+ self.assert_failure(
+ "%s is not NotEqual to %s" % (arg1_name, arg2_name)
+ )
+
+ def assertEqual(self, arg1, arg2, arg1_name, arg2_name):
+ if arg1 != arg2:
+ self.tests_failed = self.tests_failed + 1
+ self.assert_failure(
+ "%s is not Equal to %s" % (arg1_name, arg2_name)
+ )
+
+ def test_one_id(self, downloader, test_id, result_string, exact_match):
+ print("Translating '%s'" % test_id)
+ self.tests_run = self.tests_run + 1
+
+ result = downloader.GetBuildID(self.chromeos_root, test_id)
+ # Verify that we got a build id back.
+ self.assertIsNotNone(result, "result")
+
+ # Verify that the result either contains or exactly matches the
+ # result_string, depending on the exact_match argument.
+ if exact_match:
+ self.assertEqual(result, result_string, "result", result_string)
+ else:
+ self.assertNotEqual(
+ result.find(result_string), -1, "result.find", "-1"
+ )
+ self.tests_passed = self.tests_passed + 1
+
+ def test_get_build_id(self):
+ """Test that the actual translating of xbuddy names is working properly."""
+ downloader = download_images.ImageDownloader(log_level="quiet")
+
+ self.test_one_id(
+ downloader, "remote/lumpy/latest-dev", "lumpy-release/R", False
+ )
+ self.test_one_id(
+ downloader,
+ "remote/trybot-lumpy-release-afdo-use/R35-5672.0.0-b86",
+ "trybot-lumpy-release-afdo-use/R35-5672.0.0-b86",
+ True,
+ )
+ self.test_one_id(
+ downloader,
+ "remote/lumpy-release/R35-5672.0.0",
+ "lumpy-release/R35-5672.0.0",
+ True,
+ )
+ self.test_one_id(
+ downloader, "remote/lumpy/latest-dev", "lumpy-release/R", False
+ )
+ self.test_one_id(
+ downloader, "remote/lumpy/latest-official", "lumpy-release/R", False
+ )
+ self.test_one_id(
+ downloader, "remote/lumpy/latest-beta", "lumpy-release/R", False
+ )
+
+ self.print_test_status()
+
+
+if __name__ == "__main__":
+ tester = ImageDownloaderBuildIDTest()
+ tester.test_get_build_id()
diff --git a/crosperf/download_images_unittest.py b/crosperf/download_images_unittest.py
index 62b8d891..6a640f80 100755
--- a/crosperf/download_images_unittest.py
+++ b/crosperf/download_images_unittest.py
@@ -1,277 +1,316 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download image unittest."""
-from __future__ import print_function
import os
import unittest
import unittest.mock as mock
-import download_images
from cros_utils import command_executer
from cros_utils import logger
-
+import download_images
import test_flag
-MOCK_LOGGER = logger.GetLogger(log_dir='', mock=True)
+
+MOCK_LOGGER = logger.GetLogger(log_dir="", mock=True)
class ImageDownloaderTestcast(unittest.TestCase):
- """The image downloader test class."""
-
- def __init__(self, *args, **kwargs):
- super(ImageDownloaderTestcast, self).__init__(*args, **kwargs)
- self.called_download_image = False
- self.called_uncompress_image = False
- self.called_get_build_id = False
- self.called_download_autotest_files = False
- self.called_download_debug_file = False
-
- @mock.patch.object(os, 'makedirs')
- @mock.patch.object(os.path, 'exists')
- def test_download_image(self, mock_path_exists, mock_mkdirs):
-
- # Set mock and test values.
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- test_chroot = '/usr/local/home/chromeos'
- test_build_id = 'lumpy-release/R36-5814.0.0'
- image_path = ('gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz'
- % test_build_id)
-
- downloader = download_images.ImageDownloader(
- logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec)
-
- # Set os.path.exists to always return False and run downloader
- mock_path_exists.return_value = False
- test_flag.SetTestMode(True)
- self.assertRaises(download_images.MissingImage, downloader.DownloadImage,
- test_chroot, test_build_id, image_path)
-
- # Verify os.path.exists was called twice, with proper arguments.
- self.assertEqual(mock_path_exists.call_count, 2)
- mock_path_exists.assert_called_with(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
- 'R36-5814.0.0/chromiumos_test_image.bin')
- mock_path_exists.assert_any_call(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
-
- # Verify we called os.mkdirs
- self.assertEqual(mock_mkdirs.call_count, 1)
- mock_mkdirs.assert_called_with(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
-
- # Verify we called RunCommand once, with proper arguments.
- self.assertEqual(mock_cmd_exec.RunCommand.call_count, 1)
- expected_args = (
- '/usr/local/home/chromeos/src/chromium/depot_tools/gsutil.py '
- 'cp gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/'
- 'chromiumos_test_image.tar.xz '
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
-
- mock_cmd_exec.RunCommand.assert_called_with(expected_args)
-
- # Reset the velues in the mocks; set os.path.exists to always return True.
- mock_path_exists.reset_mock()
- mock_cmd_exec.reset_mock()
- mock_path_exists.return_value = True
-
- # Run downloader
- downloader.DownloadImage(test_chroot, test_build_id, image_path)
-
- # Verify os.path.exists was called twice, with proper arguments.
- self.assertEqual(mock_path_exists.call_count, 2)
- mock_path_exists.assert_called_with(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
- 'R36-5814.0.0/chromiumos_test_image.bin')
- mock_path_exists.assert_any_call(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0')
-
- # Verify we made no RunCommand or ChrootRunCommand calls (since
- # os.path.exists returned True, there was no work do be done).
- self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
- self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0)
-
- @mock.patch.object(os.path, 'exists')
- def test_uncompress_image(self, mock_path_exists):
-
- # set mock and test values.
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- test_chroot = '/usr/local/home/chromeos'
- test_build_id = 'lumpy-release/R36-5814.0.0'
-
- downloader = download_images.ImageDownloader(
- logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec)
-
- # Set os.path.exists to always return False and run uncompress.
- mock_path_exists.return_value = False
- self.assertRaises(download_images.MissingImage, downloader.UncompressImage,
- test_chroot, test_build_id)
-
- # Verify os.path.exists was called once, with correct arguments.
- self.assertEqual(mock_path_exists.call_count, 1)
- mock_path_exists.assert_called_with(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
- 'R36-5814.0.0/chromiumos_test_image.bin')
-
- # Verify RunCommand was called twice with correct arguments.
- self.assertEqual(mock_cmd_exec.RunCommand.call_count, 2)
- # Call 1, should have 2 arguments
- self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[0]), 2)
- actual_arg = mock_cmd_exec.RunCommand.call_args_list[0][0]
- expected_arg = (
- 'cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; '
- 'tar -Jxf chromiumos_test_image.tar.xz ',)
- self.assertEqual(expected_arg, actual_arg)
- # 2nd arg must be exception handler
- except_handler_string = 'RunCommandExceptionHandler.HandleException'
- self.assertTrue(
- except_handler_string in repr(mock_cmd_exec.RunCommand.call_args_list[0]
- [1]))
-
- # Call 2, should have 2 arguments
- self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[1]), 2)
- actual_arg = mock_cmd_exec.RunCommand.call_args_list[1][0]
- expected_arg = (
- 'cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; '
- 'rm -f chromiumos_test_image.bin ',)
- self.assertEqual(expected_arg, actual_arg)
- # 2nd arg must be empty
- self.assertTrue('{}' in repr(mock_cmd_exec.RunCommand.call_args_list[1][1]))
-
- # Set os.path.exists to always return True and run uncompress.
- mock_path_exists.reset_mock()
- mock_cmd_exec.reset_mock()
- mock_path_exists.return_value = True
- downloader.UncompressImage(test_chroot, test_build_id)
-
- # Verify os.path.exists was called once, with correct arguments.
- self.assertEqual(mock_path_exists.call_count, 1)
- mock_path_exists.assert_called_with(
- '/usr/local/home/chromeos/chroot/tmp/lumpy-release/'
- 'R36-5814.0.0/chromiumos_test_image.bin')
-
- # Verify RunCommand was not called.
- self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
-
- def test_run(self):
-
- # Set test arguments
- test_chroot = '/usr/local/home/chromeos'
- test_build_id = 'remote/lumpy/latest-dev'
- test_empty_autotest_path = ''
- test_empty_debug_path = ''
- test_autotest_path = '/tmp/autotest'
- test_debug_path = '/tmp/debug'
- download_debug = True
-
- # Set values to test/check.
- self.called_download_image = False
- self.called_uncompress_image = False
- self.called_get_build_id = False
- self.called_download_autotest_files = False
- self.called_download_debug_file = False
-
- # Define fake stub functions for Run to call
- def FakeGetBuildID(unused_root, unused_xbuddy_label):
- self.called_get_build_id = True
- return 'lumpy-release/R36-5814.0.0'
-
- def GoodDownloadImage(root, build_id, image_path):
- if root or build_id or image_path:
- pass
- self.called_download_image = True
- return 'chromiumos_test_image.bin'
-
- def BadDownloadImage(root, build_id, image_path):
- if root or build_id or image_path:
- pass
- self.called_download_image = True
- raise download_images.MissingImage('Could not download image')
-
- def FakeUncompressImage(root, build_id):
- if root or build_id:
- pass
- self.called_uncompress_image = True
- return 0
-
- def FakeDownloadAutotestFiles(root, build_id):
- if root or build_id:
- pass
- self.called_download_autotest_files = True
- return 'autotest'
-
- def FakeDownloadDebugFile(root, build_id):
- if root or build_id:
- pass
- self.called_download_debug_file = True
- return 'debug'
-
- # Initialize downloader
- downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER)
-
- # Set downloader to call fake stubs.
- downloader.GetBuildID = FakeGetBuildID
- downloader.UncompressImage = FakeUncompressImage
- downloader.DownloadImage = GoodDownloadImage
- downloader.DownloadAutotestFiles = FakeDownloadAutotestFiles
- downloader.DownloadDebugFile = FakeDownloadDebugFile
-
- # Call Run.
- image_path, autotest_path, debug_path = downloader.Run(
- test_chroot, test_build_id, test_empty_autotest_path,
- test_empty_debug_path, download_debug)
-
- # Make sure it called both _DownloadImage and _UncompressImage
- self.assertTrue(self.called_download_image)
- self.assertTrue(self.called_uncompress_image)
- # Make sure it called DownloadAutotestFiles
- self.assertTrue(self.called_download_autotest_files)
- # Make sure it called DownloadDebugFile
- self.assertTrue(self.called_download_debug_file)
- # Make sure it returned an image and autotest path returned from this call
- self.assertTrue(image_path == 'chromiumos_test_image.bin')
- self.assertTrue(autotest_path == 'autotest')
- self.assertTrue(debug_path == 'debug')
-
- # Call Run with a non-empty autotest and debug path
- self.called_download_autotest_files = False
- self.called_download_debug_file = False
-
- image_path, autotest_path, debug_path = downloader.Run(
- test_chroot, test_build_id, test_autotest_path, test_debug_path,
- download_debug)
-
- # Verify that downloadAutotestFiles was not called
- self.assertFalse(self.called_download_autotest_files)
- # Make sure it returned the specified autotest path returned from this call
- self.assertTrue(autotest_path == test_autotest_path)
- # Make sure it returned the specified debug path returned from this call
- self.assertTrue(debug_path == test_debug_path)
-
- # Reset values; Now use fake stub that simulates DownloadImage failing.
- self.called_download_image = False
- self.called_uncompress_image = False
- self.called_download_autotest_files = False
- self.called_download_debug_file = False
- downloader.DownloadImage = BadDownloadImage
-
- # Call Run again.
- self.assertRaises(download_images.MissingImage, downloader.Run, test_chroot,
- test_autotest_path, test_debug_path, test_build_id,
- download_debug)
-
- # Verify that UncompressImage and downloadAutotestFiles were not called,
- # since _DownloadImage "failed"
- self.assertTrue(self.called_download_image)
- self.assertFalse(self.called_uncompress_image)
- self.assertFalse(self.called_download_autotest_files)
- self.assertFalse(self.called_download_debug_file)
-
-
-if __name__ == '__main__':
- unittest.main()
+ """The image downloader test class."""
+
+ def __init__(self, *args, **kwargs):
+ super(ImageDownloaderTestcast, self).__init__(*args, **kwargs)
+ self.called_download_image = False
+ self.called_uncompress_image = False
+ self.called_get_build_id = False
+ self.called_download_autotest_files = False
+ self.called_download_debug_file = False
+
+ @mock.patch.object(os, "makedirs")
+ @mock.patch.object(os.path, "exists")
+ def test_download_image(self, mock_path_exists, mock_mkdirs):
+
+ # Set mock and test values.
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ test_chroot = "/usr/local/home/chromeos"
+ test_build_id = "lumpy-release/R36-5814.0.0"
+ image_path = (
+ "gs://chromeos-image-archive/%s/chromiumos_test_image.tar.xz"
+ % test_build_id
+ )
+
+ downloader = download_images.ImageDownloader(
+ logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec
+ )
+
+ # Set os.path.exists to always return False and run downloader
+ mock_path_exists.return_value = False
+ test_flag.SetTestMode(True)
+ self.assertRaises(
+ download_images.MissingImage,
+ downloader.DownloadImage,
+ test_chroot,
+ test_build_id,
+ image_path,
+ )
+
+ # Verify os.path.exists was called twice, with proper arguments.
+ self.assertEqual(mock_path_exists.call_count, 2)
+ mock_path_exists.assert_called_with(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/"
+ "R36-5814.0.0/chromiumos_test_image.bin"
+ )
+ mock_path_exists.assert_any_call(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0"
+ )
+
+ # Verify we called os.mkdirs
+ self.assertEqual(mock_mkdirs.call_count, 1)
+ mock_mkdirs.assert_called_with(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0"
+ )
+
+ # Verify we called RunCommand once, with proper arguments.
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 1)
+ expected_args = (
+ "/usr/local/home/chromeos/src/chromium/depot_tools/gsutil.py "
+ "cp gs://chromeos-image-archive/lumpy-release/R36-5814.0.0/"
+ "chromiumos_test_image.tar.xz "
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0"
+ )
+
+ mock_cmd_exec.RunCommand.assert_called_with(expected_args)
+
+ # Reset the velues in the mocks; set os.path.exists to always return True.
+ mock_path_exists.reset_mock()
+ mock_cmd_exec.reset_mock()
+ mock_path_exists.return_value = True
+
+ # Run downloader
+ downloader.DownloadImage(test_chroot, test_build_id, image_path)
+
+ # Verify os.path.exists was called twice, with proper arguments.
+ self.assertEqual(mock_path_exists.call_count, 2)
+ mock_path_exists.assert_called_with(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/"
+ "R36-5814.0.0/chromiumos_test_image.bin"
+ )
+ mock_path_exists.assert_any_call(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0"
+ )
+
+ # Verify we made no RunCommand or ChrootRunCommand calls (since
+ # os.path.exists returned True, there was no work do be done).
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
+ self.assertEqual(mock_cmd_exec.ChrootRunCommand.call_count, 0)
+
+ @mock.patch.object(os.path, "exists")
+ def test_uncompress_image(self, mock_path_exists):
+
+ # set mock and test values.
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ test_chroot = "/usr/local/home/chromeos"
+ test_build_id = "lumpy-release/R36-5814.0.0"
+
+ downloader = download_images.ImageDownloader(
+ logger_to_use=MOCK_LOGGER, cmd_exec=mock_cmd_exec
+ )
+
+ # Set os.path.exists to always return False and run uncompress.
+ mock_path_exists.return_value = False
+ self.assertRaises(
+ download_images.MissingImage,
+ downloader.UncompressImage,
+ test_chroot,
+ test_build_id,
+ )
+
+ # Verify os.path.exists was called once, with correct arguments.
+ self.assertEqual(mock_path_exists.call_count, 1)
+ mock_path_exists.assert_called_with(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/"
+ "R36-5814.0.0/chromiumos_test_image.bin"
+ )
+
+ # Verify RunCommand was called twice with correct arguments.
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 2)
+ # Call 1, should have 2 arguments
+ self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[0]), 2)
+ actual_arg = mock_cmd_exec.RunCommand.call_args_list[0][0]
+ expected_arg = (
+ "cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; "
+ "tar -Jxf chromiumos_test_image.tar.xz ",
+ )
+ self.assertEqual(expected_arg, actual_arg)
+ # 2nd arg must be exception handler
+ except_handler_string = "RunCommandExceptionHandler.HandleException"
+ self.assertTrue(
+ except_handler_string
+ in repr(mock_cmd_exec.RunCommand.call_args_list[0][1])
+ )
+
+ # Call 2, should have 2 arguments
+ self.assertEqual(len(mock_cmd_exec.RunCommand.call_args_list[1]), 2)
+ actual_arg = mock_cmd_exec.RunCommand.call_args_list[1][0]
+ expected_arg = (
+ "cd /usr/local/home/chromeos/chroot/tmp/lumpy-release/R36-5814.0.0 ; "
+ "rm -f chromiumos_test_image.bin ",
+ )
+ self.assertEqual(expected_arg, actual_arg)
+ # 2nd arg must be empty
+ self.assertTrue(
+ "{}" in repr(mock_cmd_exec.RunCommand.call_args_list[1][1])
+ )
+
+ # Set os.path.exists to always return True and run uncompress.
+ mock_path_exists.reset_mock()
+ mock_cmd_exec.reset_mock()
+ mock_path_exists.return_value = True
+ downloader.UncompressImage(test_chroot, test_build_id)
+
+ # Verify os.path.exists was called once, with correct arguments.
+ self.assertEqual(mock_path_exists.call_count, 1)
+ mock_path_exists.assert_called_with(
+ "/usr/local/home/chromeos/chroot/tmp/lumpy-release/"
+ "R36-5814.0.0/chromiumos_test_image.bin"
+ )
+
+ # Verify RunCommand was not called.
+ self.assertEqual(mock_cmd_exec.RunCommand.call_count, 0)
+
+ def test_run(self):
+
+ # Set test arguments
+ test_chroot = "/usr/local/home/chromeos"
+ test_build_id = "remote/lumpy/latest-dev"
+ test_empty_autotest_path = ""
+ test_empty_debug_path = ""
+ test_autotest_path = "/tmp/autotest"
+ test_debug_path = "/tmp/debug"
+ download_debug = True
+
+ # Set values to test/check.
+ self.called_download_image = False
+ self.called_uncompress_image = False
+ self.called_get_build_id = False
+ self.called_download_autotest_files = False
+ self.called_download_debug_file = False
+
+ # Define fake stub functions for Run to call
+ def FakeGetBuildID(unused_root, unused_xbuddy_label):
+ self.called_get_build_id = True
+ return "lumpy-release/R36-5814.0.0"
+
+ def GoodDownloadImage(root, build_id, image_path):
+ if root or build_id or image_path:
+ pass
+ self.called_download_image = True
+ return "chromiumos_test_image.bin"
+
+ def BadDownloadImage(root, build_id, image_path):
+ if root or build_id or image_path:
+ pass
+ self.called_download_image = True
+ raise download_images.MissingImage("Could not download image")
+
+ def FakeUncompressImage(root, build_id):
+ if root or build_id:
+ pass
+ self.called_uncompress_image = True
+ return 0
+
+ def FakeDownloadAutotestFiles(root, build_id):
+ if root or build_id:
+ pass
+ self.called_download_autotest_files = True
+ return "autotest"
+
+ def FakeDownloadDebugFile(root, build_id):
+ if root or build_id:
+ pass
+ self.called_download_debug_file = True
+ return "debug"
+
+ # Initialize downloader
+ downloader = download_images.ImageDownloader(logger_to_use=MOCK_LOGGER)
+
+ # Set downloader to call fake stubs.
+ downloader.GetBuildID = FakeGetBuildID
+ downloader.UncompressImage = FakeUncompressImage
+ downloader.DownloadImage = GoodDownloadImage
+ downloader.DownloadAutotestFiles = FakeDownloadAutotestFiles
+ downloader.DownloadDebugFile = FakeDownloadDebugFile
+
+ # Call Run.
+ image_path, autotest_path, debug_path = downloader.Run(
+ test_chroot,
+ test_build_id,
+ test_empty_autotest_path,
+ test_empty_debug_path,
+ download_debug,
+ )
+
+ # Make sure it called both _DownloadImage and _UncompressImage
+ self.assertTrue(self.called_download_image)
+ self.assertTrue(self.called_uncompress_image)
+ # Make sure it called DownloadAutotestFiles
+ self.assertTrue(self.called_download_autotest_files)
+ # Make sure it called DownloadDebugFile
+ self.assertTrue(self.called_download_debug_file)
+ # Make sure it returned an image and autotest path returned from this call
+ self.assertTrue(image_path == "chromiumos_test_image.bin")
+ self.assertTrue(autotest_path == "autotest")
+ self.assertTrue(debug_path == "debug")
+
+ # Call Run with a non-empty autotest and debug path
+ self.called_download_autotest_files = False
+ self.called_download_debug_file = False
+
+ image_path, autotest_path, debug_path = downloader.Run(
+ test_chroot,
+ test_build_id,
+ test_autotest_path,
+ test_debug_path,
+ download_debug,
+ )
+
+ # Verify that downloadAutotestFiles was not called
+ self.assertFalse(self.called_download_autotest_files)
+ # Make sure it returned the specified autotest path returned from this call
+ self.assertTrue(autotest_path == test_autotest_path)
+ # Make sure it returned the specified debug path returned from this call
+ self.assertTrue(debug_path == test_debug_path)
+
+ # Reset values; Now use fake stub that simulates DownloadImage failing.
+ self.called_download_image = False
+ self.called_uncompress_image = False
+ self.called_download_autotest_files = False
+ self.called_download_debug_file = False
+ downloader.DownloadImage = BadDownloadImage
+
+ # Call Run again.
+ self.assertRaises(
+ download_images.MissingImage,
+ downloader.Run,
+ test_chroot,
+ test_autotest_path,
+ test_debug_path,
+ test_build_id,
+ download_debug,
+ )
+
+ # Verify that UncompressImage and downloadAutotestFiles were not called,
+ # since _DownloadImage "failed"
+ self.assertTrue(self.called_download_image)
+ self.assertFalse(self.called_uncompress_image)
+ self.assertFalse(self.called_download_autotest_files)
+ self.assertFalse(self.called_download_debug_file)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index e919f6ee..9973f7e9 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -1,21 +1,18 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The experiment setting module."""
-from __future__ import print_function
import os
-import time
-
from threading import Lock
+import time
+import benchmark_run
from cros_utils import logger
from cros_utils import misc
-
-import benchmark_run
from machine_manager import BadChecksum
from machine_manager import MachineManager
from machine_manager import MockMachineManager
@@ -23,208 +20,249 @@ import test_flag
class Experiment(object):
- """Class representing an Experiment to be run."""
-
- def __init__(self, name, remote, working_directory, chromeos_root,
- cache_conditions, labels, benchmarks, experiment_file, email_to,
- acquire_timeout, log_dir, log_level, share_cache,
- results_directory, compress_results, locks_directory, cwp_dso,
- ignore_min_max, crosfleet, dut_config, no_lock: bool):
- self.name = name
- self.working_directory = working_directory
- self.remote = remote
- self.chromeos_root = chromeos_root
- self.cache_conditions = cache_conditions
- self.experiment_file = experiment_file
- self.email_to = email_to
- if not results_directory:
- self.results_directory = os.path.join(self.working_directory,
- self.name + '_results')
- else:
- self.results_directory = misc.CanonicalizePath(results_directory)
- self.compress_results = compress_results
- self.log_dir = log_dir
- self.log_level = log_level
- self.labels = labels
- self.benchmarks = benchmarks
- self.num_complete = 0
- self.num_run_complete = 0
- self.share_cache = share_cache
- self.active_threads = []
- self.locks_dir = locks_directory
- self.locked_machines = []
- self.lock_mgr = None
- self.cwp_dso = cwp_dso
- self.ignore_min_max = ignore_min_max
- self.crosfleet = crosfleet
- self.no_lock = no_lock
- self.l = logger.GetLogger(log_dir)
-
- if not self.benchmarks:
- raise RuntimeError('No benchmarks specified')
- if not self.labels:
- raise RuntimeError('No labels specified')
- if not remote and not self.crosfleet:
- raise RuntimeError('No remote hosts specified')
-
- # We need one chromeos_root to run the benchmarks in, but it doesn't
- # matter where it is, unless the ABIs are different.
- if not chromeos_root:
- for label in self.labels:
- if label.chromeos_root:
- chromeos_root = label.chromeos_root
- break
- if not chromeos_root:
- raise RuntimeError('No chromeos_root given and could not determine '
- 'one from the image path.')
-
- machine_manager_fn = MachineManager
- if test_flag.GetTestMode():
- machine_manager_fn = MockMachineManager
- self.machine_manager = machine_manager_fn(chromeos_root, acquire_timeout,
- log_level, locks_directory)
- self.l = logger.GetLogger(log_dir)
-
- for machine in self.remote:
- # machine_manager.AddMachine only adds reachable machines.
- self.machine_manager.AddMachine(machine)
- # Now machine_manager._all_machines contains a list of reachable
- # machines. This is a subset of self.remote. We make both lists the same.
- self.remote = [m.name for m in self.machine_manager.GetAllMachines()]
- if not self.remote:
- raise RuntimeError('No machine available for running experiment.')
-
- # Initialize checksums for all machines, ignore errors at this time.
- # The checksum will be double checked, and image will be flashed after
- # duts are locked/leased.
- self.SetCheckSums()
-
- self.start_time = None
- self.benchmark_runs = self._GenerateBenchmarkRuns(dut_config)
-
- self._schedv2 = None
- self._internal_counter_lock = Lock()
-
- def set_schedv2(self, schedv2):
- self._schedv2 = schedv2
-
- def schedv2(self):
- return self._schedv2
-
- def _GenerateBenchmarkRuns(self, dut_config):
- """Generate benchmark runs from labels and benchmark defintions."""
- benchmark_runs = []
- for label in self.labels:
- for benchmark in self.benchmarks:
- for iteration in range(1, benchmark.iterations + 1):
-
- benchmark_run_name = '%s: %s (%s)' % (label.name, benchmark.name,
- iteration)
- full_name = '%s_%s_%s' % (label.name, benchmark.name, iteration)
- logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
- True)
- benchmark_runs.append(
- benchmark_run.BenchmarkRun(benchmark_run_name, benchmark, label,
- iteration, self.cache_conditions,
- self.machine_manager, logger_to_use,
- self.log_level, self.share_cache,
- dut_config))
-
- return benchmark_runs
-
- def SetCheckSums(self, forceSameImage=False):
- for label in self.labels:
- # We filter out label remotes that are not reachable (not in
- # self.remote). So each label.remote is a sublist of experiment.remote.
- label.remote = [r for r in label.remote if r in self.remote]
- try:
- self.machine_manager.ComputeCommonCheckSum(label)
- except BadChecksum:
- # Force same image on all machines, then we do checksum again. No
- # bailout if checksums still do not match.
- # TODO (zhizhouy): Need to figure out how flashing image will influence
- # the new checksum.
- if forceSameImage:
- self.machine_manager.ForceSameImageToAllMachines(label)
- self.machine_manager.ComputeCommonCheckSum(label)
-
- self.machine_manager.ComputeCommonCheckSumString(label)
-
- def Build(self):
- pass
-
- def Terminate(self):
- if self._schedv2 is not None:
- self._schedv2.terminate()
- else:
- for t in self.benchmark_runs:
- if t.isAlive():
- self.l.LogError("Terminating run: '%s'." % t.name)
- t.Terminate()
-
- def IsComplete(self):
- if self._schedv2:
- return self._schedv2.is_complete()
- if self.active_threads:
- for t in self.active_threads:
- if t.isAlive():
- t.join(0)
- if not t.isAlive():
- self.num_complete += 1
- if not t.cache_hit:
- self.num_run_complete += 1
- self.active_threads.remove(t)
- return False
- return True
-
- def BenchmarkRunFinished(self, br):
- """Update internal counters after br finishes.
-
- Note this is only used by schedv2 and is called by multiple threads.
- Never throw any exception here.
- """
-
- assert self._schedv2 is not None
- with self._internal_counter_lock:
- self.num_complete += 1
- if not br.cache_hit:
- self.num_run_complete += 1
-
- def Run(self):
- self.start_time = time.time()
- if self._schedv2 is not None:
- self._schedv2.run_sched()
- else:
- self.active_threads = []
- for run in self.benchmark_runs:
- # Set threads to daemon so program exits when ctrl-c is pressed.
- run.daemon = True
- run.start()
- self.active_threads.append(run)
-
- def SetCacheConditions(self, cache_conditions):
- for run in self.benchmark_runs:
- run.SetCacheConditions(cache_conditions)
-
- def Cleanup(self):
- """Make sure all machines are unlocked."""
- if self.locks_dir:
- # We are using the file locks mechanism, so call machine_manager.Cleanup
- # to unlock everything.
- self.machine_manager.Cleanup()
-
- if test_flag.GetTestMode() or not self.locked_machines:
- return
-
- # If we locked any machines earlier, make sure we unlock them now.
- if self.lock_mgr:
- machine_states = self.lock_mgr.GetMachineStates('unlock')
- self.lock_mgr.CheckMachineLocks(machine_states, 'unlock')
- unlocked_machines = self.lock_mgr.UpdateMachines(False)
- failed_machines = [
- m for m in self.locked_machines if m not in unlocked_machines
- ]
- if failed_machines:
- raise RuntimeError('These machines are not unlocked correctly: %s' %
- failed_machines)
- self.lock_mgr = None
+ """Class representing an Experiment to be run."""
+
+ def __init__(
+ self,
+ name,
+ remote,
+ working_directory,
+ chromeos_root,
+ cache_conditions,
+ labels,
+ benchmarks,
+ experiment_file,
+ email_to,
+ acquire_timeout,
+ log_dir,
+ log_level,
+ share_cache,
+ results_directory,
+ compress_results,
+ locks_directory,
+ cwp_dso,
+ ignore_min_max,
+ crosfleet,
+ dut_config,
+ no_lock: bool,
+ ):
+ self.name = name
+ self.working_directory = working_directory
+ self.remote = remote
+ self.chromeos_root = chromeos_root
+ self.cache_conditions = cache_conditions
+ self.experiment_file = experiment_file
+ self.email_to = email_to
+ if not results_directory:
+ self.results_directory = os.path.join(
+ self.working_directory, self.name + "_results"
+ )
+ else:
+ self.results_directory = misc.CanonicalizePath(results_directory)
+ self.compress_results = compress_results
+ self.log_dir = log_dir
+ self.log_level = log_level
+ self.labels = labels
+ self.benchmarks = benchmarks
+ self.num_complete = 0
+ self.num_run_complete = 0
+ self.share_cache = share_cache
+ self.active_threads = []
+ self.locks_dir = locks_directory
+ self.locked_machines = []
+ self.lock_mgr = None
+ self.cwp_dso = cwp_dso
+ self.ignore_min_max = ignore_min_max
+ self.crosfleet = crosfleet
+ self.no_lock = no_lock
+ self.l = logger.GetLogger(log_dir)
+
+ if not self.benchmarks:
+ raise RuntimeError("No benchmarks specified")
+ if not self.labels:
+ raise RuntimeError("No labels specified")
+ if not remote and not self.crosfleet:
+ raise RuntimeError("No remote hosts specified")
+
+ # We need one chromeos_root to run the benchmarks in, but it doesn't
+ # matter where it is, unless the ABIs are different.
+ if not chromeos_root:
+ for label in self.labels:
+ if label.chromeos_root:
+ chromeos_root = label.chromeos_root
+ break
+ if not chromeos_root:
+ raise RuntimeError(
+ "No chromeos_root given and could not determine "
+ "one from the image path."
+ )
+
+ machine_manager_fn = MachineManager
+ if test_flag.GetTestMode():
+ machine_manager_fn = MockMachineManager
+ self.machine_manager = machine_manager_fn(
+ chromeos_root, acquire_timeout, log_level, locks_directory
+ )
+ self.l = logger.GetLogger(log_dir)
+
+ for machine in self.remote:
+ # machine_manager.AddMachine only adds reachable machines.
+ self.machine_manager.AddMachine(machine)
+ # Now machine_manager._all_machines contains a list of reachable
+ # machines. This is a subset of self.remote. We make both lists the same.
+ self.remote = [m.name for m in self.machine_manager.GetAllMachines()]
+ if not self.remote:
+ raise RuntimeError("No machine available for running experiment.")
+
+ # Initialize checksums for all machines, ignore errors at this time.
+ # The checksum will be double checked, and image will be flashed after
+ # duts are locked/leased.
+ self.SetCheckSums()
+
+ self.start_time = None
+ self.benchmark_runs = self._GenerateBenchmarkRuns(dut_config)
+
+ self._schedv2 = None
+ self._internal_counter_lock = Lock()
+
+ def set_schedv2(self, schedv2):
+ self._schedv2 = schedv2
+
+ def schedv2(self):
+ return self._schedv2
+
+ def _GenerateBenchmarkRuns(self, dut_config):
+ """Generate benchmark runs from labels and benchmark defintions."""
+ benchmark_runs = []
+ for label in self.labels:
+ for benchmark in self.benchmarks:
+ for iteration in range(1, benchmark.iterations + 1):
+
+ benchmark_run_name = "%s: %s (%s)" % (
+ label.name,
+ benchmark.name,
+ iteration,
+ )
+ full_name = "%s_%s_%s" % (
+ label.name,
+ benchmark.name,
+ iteration,
+ )
+ logger_to_use = logger.Logger(
+ self.log_dir, "run.%s" % (full_name), True
+ )
+ benchmark_runs.append(
+ benchmark_run.BenchmarkRun(
+ benchmark_run_name,
+ benchmark,
+ label,
+ iteration,
+ self.cache_conditions,
+ self.machine_manager,
+ logger_to_use,
+ self.log_level,
+ self.share_cache,
+ dut_config,
+ )
+ )
+
+ return benchmark_runs
+
+ def SetCheckSums(self, forceSameImage=False):
+ for label in self.labels:
+ # We filter out label remotes that are not reachable (not in
+ # self.remote). So each label.remote is a sublist of experiment.remote.
+ label.remote = [r for r in label.remote if r in self.remote]
+ try:
+ self.machine_manager.ComputeCommonCheckSum(label)
+ except BadChecksum:
+ # Force same image on all machines, then we do checksum again. No
+ # bailout if checksums still do not match.
+ # TODO (zhizhouy): Need to figure out how flashing image will influence
+ # the new checksum.
+ if forceSameImage:
+ self.machine_manager.ForceSameImageToAllMachines(label)
+ self.machine_manager.ComputeCommonCheckSum(label)
+
+ self.machine_manager.ComputeCommonCheckSumString(label)
+
+ def Build(self):
+ pass
+
+ def Terminate(self):
+ if self._schedv2 is not None:
+ self._schedv2.terminate()
+ else:
+ for t in self.benchmark_runs:
+ if t.isAlive():
+ self.l.LogError("Terminating run: '%s'." % t.name)
+ t.Terminate()
+
+ def IsComplete(self):
+ if self._schedv2:
+ return self._schedv2.is_complete()
+ if self.active_threads:
+ for t in self.active_threads:
+ if t.isAlive():
+ t.join(0)
+ if not t.isAlive():
+ self.num_complete += 1
+ if not t.cache_hit:
+ self.num_run_complete += 1
+ self.active_threads.remove(t)
+ return False
+ return True
+
+ def BenchmarkRunFinished(self, br):
+ """Update internal counters after br finishes.
+
+ Note this is only used by schedv2 and is called by multiple threads.
+ Never throw any exception here.
+ """
+
+ assert self._schedv2 is not None
+ with self._internal_counter_lock:
+ self.num_complete += 1
+ if not br.cache_hit:
+ self.num_run_complete += 1
+
+ def Run(self):
+ self.start_time = time.time()
+ if self._schedv2 is not None:
+ self._schedv2.run_sched()
+ else:
+ self.active_threads = []
+ for run in self.benchmark_runs:
+ # Set threads to daemon so program exits when ctrl-c is pressed.
+ run.daemon = True
+ run.start()
+ self.active_threads.append(run)
+
+ def SetCacheConditions(self, cache_conditions):
+ for run in self.benchmark_runs:
+ run.SetCacheConditions(cache_conditions)
+
+ def Cleanup(self):
+ """Make sure all machines are unlocked."""
+ if self.locks_dir:
+ # We are using the file locks mechanism, so call machine_manager.Cleanup
+ # to unlock everything.
+ self.machine_manager.Cleanup()
+
+ if test_flag.GetTestMode() or not self.locked_machines:
+ return
+
+ # If we locked any machines earlier, make sure we unlock them now.
+ if self.lock_mgr:
+ machine_states = self.lock_mgr.GetMachineStates("unlock")
+ self.lock_mgr.CheckMachineLocks(machine_states, "unlock")
+ unlocked_machines = self.lock_mgr.UpdateMachines(False)
+ failed_machines = [
+ m for m in self.locked_machines if m not in unlocked_machines
+ ]
+ if failed_machines:
+ raise RuntimeError(
+ "These machines are not unlocked correctly: %s"
+ % failed_machines
+ )
+ self.lock_mgr = None
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index a9594a20..c71981ab 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -1,81 +1,87 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module to generate experiments."""
-from __future__ import print_function
+
import os
import re
import socket
import sys
from benchmark import Benchmark
-import config
-from cros_utils import logger
from cros_utils import command_executer
+from cros_utils import logger
from experiment import Experiment
+import file_lock_machine
from label import Label
from label import MockLabel
from results_cache import CacheConditions
import test_flag
-import file_lock_machine
+
+import config
+
# Users may want to run Telemetry tests either individually, or in
# specified sets. Here we define sets of tests that users may want
# to run together.
telemetry_perfv2_tests = [
- 'kraken',
- 'octane',
+ "kraken",
+ "octane",
]
telemetry_pagecycler_tests = [
- 'page_cycler_v2.intl_ar_fa_he',
- 'page_cycler_v2.intl_es_fr_pt-BR',
- 'page_cycler_v2.intl_hi_ru',
- 'page_cycler_v2.intl_ja_zh',
- 'page_cycler_v2.intl_ko_th_vi',
- 'page_cycler_v2.typical_25',
+ "page_cycler_v2.intl_ar_fa_he",
+ "page_cycler_v2.intl_es_fr_pt-BR",
+ "page_cycler_v2.intl_hi_ru",
+ "page_cycler_v2.intl_ja_zh",
+ "page_cycler_v2.intl_ko_th_vi",
+ "page_cycler_v2.typical_25",
]
telemetry_toolchain_old_perf_tests = [
- 'page_cycler_v2.intl_es_fr_pt-BR',
- 'page_cycler_v2.intl_hi_ru',
- 'page_cycler_v2.intl_ja_zh',
- 'page_cycler_v2.intl_ko_th_vi',
- 'page_cycler_v2.netsim.top_10',
- 'page_cycler_v2.typical_25',
- 'spaceport',
- 'tab_switching.top_10',
+ "page_cycler_v2.intl_es_fr_pt-BR",
+ "page_cycler_v2.intl_hi_ru",
+ "page_cycler_v2.intl_ja_zh",
+ "page_cycler_v2.intl_ko_th_vi",
+ "page_cycler_v2.netsim.top_10",
+ "page_cycler_v2.typical_25",
+ "spaceport",
+ "tab_switching.top_10",
]
telemetry_toolchain_perf_tests = [
- 'octane', 'kraken', 'speedometer', 'speedometer2', 'jetstream2'
+ "octane",
+ "kraken",
+ "speedometer",
+ "speedometer2",
+ "jetstream2",
]
graphics_perf_tests = [
- 'graphics_GLBench',
- 'graphics_GLMark2',
- 'graphics_SanAngeles',
- 'graphics_WebGLAquarium',
- 'graphics_WebGLPerformance',
+ "graphics_GLBench",
+ "graphics_GLMark2",
+ "graphics_SanAngeles",
+ "graphics_WebGLAquarium",
+ "graphics_WebGLPerformance",
]
# TODO: disable rendering.desktop by default as the benchmark is
# currently in a bad state
# page_cycler_v2.typical_25 is deprecated and the recommend replacement is
# loading.desktop@@typical (crbug.com/916340)
telemetry_crosbolt_perf_tests = [
- 'octane',
- 'kraken',
- 'speedometer2',
- 'jetstream',
- 'loading.desktop',
+ "octane",
+ "kraken",
+ "speedometer2",
+ "jetstream",
+ "loading.desktop",
# 'rendering.desktop',
]
crosbolt_perf_tests = [
- 'graphics_WebGLAquarium',
- 'tast.video.PlaybackPerfVP91080P30FPS',
+ "graphics_WebGLAquarium",
+ "tast.video.PlaybackPerfVP91080P30FPS",
]
# 'cheets_AntutuTest',
@@ -85,424 +91,582 @@ crosbolt_perf_tests = [
# ]
dso_list = [
- 'all',
- 'chrome',
- 'kallsyms',
+ "all",
+ "chrome",
+ "kallsyms",
]
class ExperimentFactory(object):
- """Factory class for building an Experiment, given an ExperimentFile as input.
-
- This factory is currently hardcoded to produce an experiment for running
- ChromeOS benchmarks, but the idea is that in the future, other types
- of experiments could be produced.
- """
-
- def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso,
- weight):
- """Add all the tests in a set to the benchmarks list."""
- for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(test_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries,
- run_local, cwp_dso, weight)
- benchmarks.append(telemetry_benchmark)
-
- def GetExperiment(self, experiment_file, working_directory, log_dir):
- """Construct an experiment from an experiment file."""
- global_settings = experiment_file.GetGlobalSettings()
- experiment_name = global_settings.GetField('name')
- board = global_settings.GetField('board')
- chromeos_root = global_settings.GetField('chromeos_root')
- log_level = global_settings.GetField('logging_level')
- if log_level not in ('quiet', 'average', 'verbose'):
- log_level = 'verbose'
-
- crosfleet = global_settings.GetField('crosfleet')
- no_lock = bool(global_settings.GetField('no_lock'))
- # Check whether crosfleet tool is installed correctly for crosfleet mode.
- if crosfleet and not self.CheckCrosfleetTool(chromeos_root, log_level):
- sys.exit(0)
-
- remote = global_settings.GetField('remote')
- # This is used to remove the ",' from the remote if user
- # add them to the remote string.
- new_remote = []
- if remote:
- for i in remote:
- c = re.sub('["\']', '', i)
- new_remote.append(c)
- remote = new_remote
- rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
- perf_args = global_settings.GetField('perf_args')
- download_debug = global_settings.GetField('download_debug')
- # Do not download debug symbols when perf_args is not specified.
- if not perf_args and download_debug:
- download_debug = False
- acquire_timeout = global_settings.GetField('acquire_timeout')
- cache_dir = global_settings.GetField('cache_dir')
- cache_only = global_settings.GetField('cache_only')
- config.AddConfig('no_email', global_settings.GetField('no_email'))
- share_cache = global_settings.GetField('share_cache')
- results_dir = global_settings.GetField('results_dir')
- compress_results = global_settings.GetField('compress_results')
- # Warn user that option use_file_locks is deprecated.
- use_file_locks = global_settings.GetField('use_file_locks')
- if use_file_locks:
- l = logger.GetLogger()
- l.LogWarning('Option use_file_locks is deprecated, please remove it '
- 'from your experiment settings.')
- locks_dir = global_settings.GetField('locks_dir')
- # If not specified, set the locks dir to the default locks dir in
- # file_lock_machine.
- if not locks_dir:
- locks_dir = file_lock_machine.Machine.LOCKS_DIR
- if not os.path.exists(locks_dir):
- raise RuntimeError('Cannot access default lock directory. '
- 'Please run prodaccess or specify a local directory')
- chrome_src = global_settings.GetField('chrome_src')
- show_all_results = global_settings.GetField('show_all_results')
- cwp_dso = global_settings.GetField('cwp_dso')
- if cwp_dso and not cwp_dso in dso_list:
- raise RuntimeError('The DSO specified is not supported')
- ignore_min_max = global_settings.GetField('ignore_min_max')
- dut_config = {
- 'enable_aslr': global_settings.GetField('enable_aslr'),
- 'intel_pstate': global_settings.GetField('intel_pstate'),
- 'cooldown_time': global_settings.GetField('cooldown_time'),
- 'cooldown_temp': global_settings.GetField('cooldown_temp'),
- 'governor': global_settings.GetField('governor'),
- 'cpu_usage': global_settings.GetField('cpu_usage'),
- 'cpu_freq_pct': global_settings.GetField('cpu_freq_pct'),
- 'turbostat': global_settings.GetField('turbostat'),
- 'top_interval': global_settings.GetField('top_interval'),
- }
-
- # Default cache hit conditions. The image checksum in the cache and the
- # computed checksum of the image must match. Also a cache file must exist.
- cache_conditions = [
- CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
- ]
- if global_settings.GetField('rerun_if_failed'):
- cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
- if global_settings.GetField('rerun'):
- cache_conditions.append(CacheConditions.FALSE)
- if global_settings.GetField('same_machine'):
- cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
- if global_settings.GetField('same_specs'):
- cache_conditions.append(CacheConditions.MACHINES_MATCH)
-
- # Construct benchmarks.
- # Some fields are common with global settings. The values are
- # inherited and/or merged with the global settings values.
- benchmarks = []
- all_benchmark_settings = experiment_file.GetSettings('benchmark')
-
- # Check if there is duplicated benchmark name
- benchmark_names = {}
- # Check if in cwp_dso mode, all benchmarks should have same iterations
- cwp_dso_iterations = 0
-
- for benchmark_settings in all_benchmark_settings:
- benchmark_name = benchmark_settings.name
- test_name = benchmark_settings.GetField('test_name')
- if not test_name:
- test_name = benchmark_name
- test_args = benchmark_settings.GetField('test_args')
-
- # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified
- # in test_args. Make sure these two tags only appear once.
- story_count = 0
- for arg in test_args.split():
- if '--story-filter=' in arg or '--story-tag-filter=' in arg:
- story_count += 1
- if story_count > 1:
- raise RuntimeError('Only one story or story-tag filter allowed in '
- 'a single benchmark run')
- # Rename benchmark name with an extension of 'story'-option
- benchmark_name = '%s@@%s' % (benchmark_name, arg.split('=')[-1])
-
- # Check for duplicated benchmark name after renaming
- if not benchmark_name in benchmark_names:
- benchmark_names[benchmark_name] = True
- else:
- raise SyntaxError("Duplicate benchmark name: '%s'." % benchmark_name)
-
- iterations = benchmark_settings.GetField('iterations')
- if cwp_dso:
- if cwp_dso_iterations not in (0, iterations):
- raise RuntimeError('Iterations of each benchmark run are not the '
- 'same')
- cwp_dso_iterations = iterations
-
- suite = benchmark_settings.GetField('suite')
- retries = benchmark_settings.GetField('retries')
- run_local = benchmark_settings.GetField('run_local')
- weight = benchmark_settings.GetField('weight')
- if weight:
- if not cwp_dso:
- raise RuntimeError('Weight can only be set when DSO specified')
- if suite != 'telemetry_Crosperf':
- raise RuntimeError('CWP approximation weight only works with '
- 'telemetry_Crosperf suite')
- if run_local:
- raise RuntimeError('run_local must be set to False to use CWP '
- 'approximation')
- if weight < 0:
- raise RuntimeError('Weight should be a float >=0')
- elif cwp_dso:
- raise RuntimeError('With DSO specified, each benchmark should have a '
- 'weight')
-
- if suite == 'telemetry_Crosperf':
- if test_name == 'all_perfv2':
- self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local, cwp_dso, weight)
- elif test_name == 'all_pagecyclers':
- self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local, cwp_dso, weight)
- elif test_name == 'all_crosbolt_perf':
- self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, 'telemetry_Crosperf',
- show_all_results, retries, run_local,
- cwp_dso, weight)
- self.AppendBenchmarkSet(benchmarks,
- crosbolt_perf_tests,
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
- elif test_name == 'all_toolchain_perf':
- self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local, cwp_dso, weight)
- # Add non-telemetry toolchain-perf benchmarks:
-
- # Tast test platform.ReportDiskUsage for image size.
- benchmarks.append(
- Benchmark(
- 'platform.ReportDiskUsage',
- 'platform.ReportDiskUsage',
- '',
- 1, # This is not a performance benchmark, only run once.
- rm_chroot_tmp,
- '',
- 'tast', # Specify the suite to be 'tast'
- show_all_results,
- retries))
-
- # TODO: crbug.com/1057755 Do not enable graphics_WebGLAquarium until
- # it gets fixed.
- #
- # benchmarks.append(
- # Benchmark(
- # 'graphics_WebGLAquarium',
- # 'graphics_WebGLAquarium',
- # '',
- # iterations,
- # rm_chroot_tmp,
- # perf_args,
- # 'crosperf_Wrapper', # Use client wrapper in Autotest
- # show_all_results,
- # retries,
- # run_local=False,
- # cwp_dso=cwp_dso,
- # weight=weight))
- elif test_name == 'all_toolchain_perf_old':
- self.AppendBenchmarkSet(benchmarks,
- telemetry_toolchain_old_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local, cwp_dso, weight)
- else:
- benchmark = Benchmark(benchmark_name, test_name, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, cwp_dso,
- weight)
- benchmarks.append(benchmark)
- else:
- if test_name == 'all_graphics_perf':
- self.AppendBenchmarkSet(benchmarks,
- graphics_perf_tests,
- '',
- iterations,
- rm_chroot_tmp,
- perf_args,
- '',
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
- else:
- # Add the single benchmark.
- benchmark = Benchmark(benchmark_name,
- test_name,
- test_args,
- iterations,
- rm_chroot_tmp,
- perf_args,
- suite,
- show_all_results,
- retries,
- run_local=False,
- cwp_dso=cwp_dso,
- weight=weight)
- benchmarks.append(benchmark)
-
- if not benchmarks:
- raise RuntimeError('No benchmarks specified')
-
- # Construct labels.
- # Some fields are common with global settings. The values are
- # inherited and/or merged with the global settings values.
- labels = []
- all_label_settings = experiment_file.GetSettings('label')
- all_remote = list(remote)
- for label_settings in all_label_settings:
- label_name = label_settings.name
- image = label_settings.GetField('chromeos_image')
- build = label_settings.GetField('build')
- autotest_path = label_settings.GetField('autotest_path')
- debug_path = label_settings.GetField('debug_path')
- chromeos_root = label_settings.GetField('chromeos_root')
- my_remote = label_settings.GetField('remote')
- compiler = label_settings.GetField('compiler')
- new_remote = []
- if my_remote:
- for i in my_remote:
- c = re.sub('["\']', '', i)
- new_remote.append(c)
- my_remote = new_remote
-
- if image:
- if crosfleet:
- raise RuntimeError(
- 'In crosfleet mode, local image should not be used.')
- if build:
- raise RuntimeError('Image path and build are provided at the same '
- 'time, please use only one of them.')
- else:
- if not build:
- raise RuntimeError("Can not have empty 'build' field!")
- image, autotest_path, debug_path = label_settings.GetXbuddyPath(
- build, autotest_path, debug_path, board, chromeos_root, log_level,
- download_debug)
-
- cache_dir = label_settings.GetField('cache_dir')
- chrome_src = label_settings.GetField('chrome_src')
-
- # TODO(yunlian): We should consolidate code in machine_manager.py
- # to derermine whether we are running from within google or not
- if ('corp.google.com' in socket.gethostname() and not my_remote
- and not crosfleet):
- my_remote = self.GetDefaultRemotes(board)
- if global_settings.GetField('same_machine') and len(my_remote) > 1:
- raise RuntimeError('Only one remote is allowed when same_machine '
- 'is turned on')
- all_remote += my_remote
- image_args = label_settings.GetField('image_args')
- if test_flag.GetTestMode():
- # pylint: disable=too-many-function-args
- label = MockLabel(label_name, build, image, autotest_path, debug_path,
- chromeos_root, board, my_remote, image_args,
- cache_dir, cache_only, log_level, compiler,
- crosfleet, chrome_src)
- else:
- label = Label(label_name, build, image, autotest_path, debug_path,
- chromeos_root, board, my_remote, image_args, cache_dir,
- cache_only, log_level, compiler, crosfleet, chrome_src)
- labels.append(label)
-
- if not labels:
- raise RuntimeError('No labels specified')
-
- email = global_settings.GetField('email')
- all_remote += list(set(my_remote))
- all_remote = list(set(all_remote))
- if crosfleet:
- for remote in all_remote:
- self.CheckRemotesInCrosfleet(remote)
- experiment = Experiment(experiment_name,
- all_remote,
- working_directory,
- chromeos_root,
- cache_conditions,
- labels,
- benchmarks,
- experiment_file.Canonicalize(),
- email,
- acquire_timeout,
- log_dir,
- log_level,
- share_cache,
- results_dir,
- compress_results,
- locks_dir,
- cwp_dso,
- ignore_min_max,
- crosfleet,
- dut_config,
- no_lock=no_lock)
-
- return experiment
-
- def GetDefaultRemotes(self, board):
- default_remotes_file = os.path.join(os.path.dirname(__file__),
- 'default_remotes')
- try:
- with open(default_remotes_file) as f:
- for line in f:
- key, v = line.split(':')
- if key.strip() == board:
- remotes = v.strip().split()
- if remotes:
- return remotes
+ """Factory class for building an Experiment, given an ExperimentFile as input.
+
+ This factory is currently hardcoded to produce an experiment for running
+ ChromeOS benchmarks, but the idea is that in the future, other types
+ of experiments could be produced.
+ """
+
+ def AppendBenchmarkSet(
+ self,
+ benchmarks,
+ benchmark_list,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ ):
+ """Add all the tests in a set to the benchmarks list."""
+ for test_name in benchmark_list:
+ telemetry_benchmark = Benchmark(
+ test_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ benchmarks.append(telemetry_benchmark)
+
+ def GetExperiment(self, experiment_file, working_directory, log_dir):
+ """Construct an experiment from an experiment file."""
+ global_settings = experiment_file.GetGlobalSettings()
+ experiment_name = global_settings.GetField("name")
+ board = global_settings.GetField("board")
+ chromeos_root = global_settings.GetField("chromeos_root")
+ log_level = global_settings.GetField("logging_level")
+ if log_level not in ("quiet", "average", "verbose"):
+ log_level = "verbose"
+
+ crosfleet = global_settings.GetField("crosfleet")
+ no_lock = bool(global_settings.GetField("no_lock"))
+ # Check whether crosfleet tool is installed correctly for crosfleet mode.
+ if crosfleet and not self.CheckCrosfleetTool(chromeos_root, log_level):
+ sys.exit(0)
+
+ remote = global_settings.GetField("remote")
+ # This is used to remove the ",' from the remote if user
+ # add them to the remote string.
+ new_remote = []
+ if remote:
+ for i in remote:
+ c = re.sub("[\"']", "", i)
+ new_remote.append(c)
+ remote = new_remote
+ rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
+ perf_args = global_settings.GetField("perf_args")
+ download_debug = global_settings.GetField("download_debug")
+ # Do not download debug symbols when perf_args is not specified.
+ if not perf_args and download_debug:
+ download_debug = False
+ acquire_timeout = global_settings.GetField("acquire_timeout")
+ cache_dir = global_settings.GetField("cache_dir")
+ cache_only = global_settings.GetField("cache_only")
+ config.AddConfig("no_email", global_settings.GetField("no_email"))
+ share_cache = global_settings.GetField("share_cache")
+ results_dir = global_settings.GetField("results_dir")
+ compress_results = global_settings.GetField("compress_results")
+ # Warn user that option use_file_locks is deprecated.
+ use_file_locks = global_settings.GetField("use_file_locks")
+ if use_file_locks:
+ l = logger.GetLogger()
+ l.LogWarning(
+ "Option use_file_locks is deprecated, please remove it "
+ "from your experiment settings."
+ )
+ locks_dir = global_settings.GetField("locks_dir")
+ # If not specified, set the locks dir to the default locks dir in
+ # file_lock_machine.
+ if not locks_dir:
+ locks_dir = file_lock_machine.Machine.LOCKS_DIR
+ if not os.path.exists(locks_dir):
+ raise RuntimeError(
+ "Cannot access default lock directory. "
+ "Please run prodaccess or specify a local directory"
+ )
+ chrome_src = global_settings.GetField("chrome_src")
+ show_all_results = global_settings.GetField("show_all_results")
+ cwp_dso = global_settings.GetField("cwp_dso")
+ if cwp_dso and not cwp_dso in dso_list:
+ raise RuntimeError("The DSO specified is not supported")
+ ignore_min_max = global_settings.GetField("ignore_min_max")
+ dut_config = {
+ "enable_aslr": global_settings.GetField("enable_aslr"),
+ "intel_pstate": global_settings.GetField("intel_pstate"),
+ "cooldown_time": global_settings.GetField("cooldown_time"),
+ "cooldown_temp": global_settings.GetField("cooldown_temp"),
+ "governor": global_settings.GetField("governor"),
+ "cpu_usage": global_settings.GetField("cpu_usage"),
+ "cpu_freq_pct": global_settings.GetField("cpu_freq_pct"),
+ "turbostat": global_settings.GetField("turbostat"),
+ "top_interval": global_settings.GetField("top_interval"),
+ }
+
+ # Default cache hit conditions. The image checksum in the cache and the
+ # computed checksum of the image must match. Also a cache file must exist.
+ cache_conditions = [
+ CacheConditions.CACHE_FILE_EXISTS,
+ CacheConditions.CHECKSUMS_MATCH,
+ ]
+ if global_settings.GetField("rerun_if_failed"):
+ cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
+ if global_settings.GetField("rerun"):
+ cache_conditions.append(CacheConditions.FALSE)
+ if global_settings.GetField("same_machine"):
+ cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
+ if global_settings.GetField("same_specs"):
+ cache_conditions.append(CacheConditions.MACHINES_MATCH)
+
+ # Construct benchmarks.
+ # Some fields are common with global settings. The values are
+ # inherited and/or merged with the global settings values.
+ benchmarks = []
+ all_benchmark_settings = experiment_file.GetSettings("benchmark")
+
+ # Check if there is duplicated benchmark name
+ benchmark_names = {}
+ # Check if in cwp_dso mode, all benchmarks should have same iterations
+ cwp_dso_iterations = 0
+
+ for benchmark_settings in all_benchmark_settings:
+ benchmark_name = benchmark_settings.name
+ test_name = benchmark_settings.GetField("test_name")
+ if not test_name:
+ test_name = benchmark_name
+ test_args = benchmark_settings.GetField("test_args")
+
+ # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified
+ # in test_args. Make sure these two tags only appear once.
+ story_count = 0
+ for arg in test_args.split():
+ if "--story-filter=" in arg or "--story-tag-filter=" in arg:
+ story_count += 1
+ if story_count > 1:
+ raise RuntimeError(
+ "Only one story or story-tag filter allowed in "
+ "a single benchmark run"
+ )
+ # Rename benchmark name with an extension of 'story'-option
+ benchmark_name = "%s@@%s" % (
+ benchmark_name,
+ arg.split("=")[-1],
+ )
+
+ # Check for duplicated benchmark name after renaming
+ if not benchmark_name in benchmark_names:
+ benchmark_names[benchmark_name] = True
+ else:
+ raise SyntaxError(
+ "Duplicate benchmark name: '%s'." % benchmark_name
+ )
+
+ iterations = benchmark_settings.GetField("iterations")
+ if cwp_dso:
+ if cwp_dso_iterations not in (0, iterations):
+ raise RuntimeError(
+ "Iterations of each benchmark run are not the " "same"
+ )
+ cwp_dso_iterations = iterations
+
+ suite = benchmark_settings.GetField("suite")
+ retries = benchmark_settings.GetField("retries")
+ run_local = benchmark_settings.GetField("run_local")
+ weight = benchmark_settings.GetField("weight")
+ if weight:
+ if not cwp_dso:
+ raise RuntimeError(
+ "Weight can only be set when DSO specified"
+ )
+ if suite != "telemetry_Crosperf":
+ raise RuntimeError(
+ "CWP approximation weight only works with "
+ "telemetry_Crosperf suite"
+ )
+ if run_local:
+ raise RuntimeError(
+ "run_local must be set to False to use CWP "
+ "approximation"
+ )
+ if weight < 0:
+ raise RuntimeError("Weight should be a float >=0")
+ elif cwp_dso:
+ raise RuntimeError(
+ "With DSO specified, each benchmark should have a " "weight"
+ )
+
+ if suite == "telemetry_Crosperf":
+ if test_name == "all_perfv2":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_perfv2_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ elif test_name == "all_pagecyclers":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_pagecycler_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ elif test_name == "all_crosbolt_perf":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_crosbolt_perf_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ "telemetry_Crosperf",
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ self.AppendBenchmarkSet(
+ benchmarks,
+ crosbolt_perf_tests,
+ "",
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ "",
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight,
+ )
+ elif test_name == "all_toolchain_perf":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_toolchain_perf_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ # Add non-telemetry toolchain-perf benchmarks:
+
+ # Tast test platform.ReportDiskUsage for image size.
+ benchmarks.append(
+ Benchmark(
+ "platform.ReportDiskUsage",
+ "platform.ReportDiskUsage",
+ "",
+ 1, # This is not a performance benchmark, only run once.
+ rm_chroot_tmp,
+ "",
+ "tast", # Specify the suite to be 'tast'
+ show_all_results,
+ retries,
+ )
+ )
+
+ # TODO: crbug.com/1057755 Do not enable graphics_WebGLAquarium until
+ # it gets fixed.
+ #
+ # benchmarks.append(
+ # Benchmark(
+ # 'graphics_WebGLAquarium',
+ # 'graphics_WebGLAquarium',
+ # '',
+ # iterations,
+ # rm_chroot_tmp,
+ # perf_args,
+ # 'crosperf_Wrapper', # Use client wrapper in Autotest
+ # show_all_results,
+ # retries,
+ # run_local=False,
+ # cwp_dso=cwp_dso,
+ # weight=weight))
+ elif test_name == "all_toolchain_perf_old":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_toolchain_old_perf_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ else:
+ benchmark = Benchmark(
+ benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ )
+ benchmarks.append(benchmark)
else:
- raise RuntimeError('There is no remote for {0}'.format(board))
- except IOError:
- # TODO: rethrow instead of throwing different exception.
- raise RuntimeError(
- 'IOError while reading file {0}'.format(default_remotes_file))
- else:
- raise RuntimeError('There is no remote for {0}'.format(board))
-
- def CheckRemotesInCrosfleet(self, remote):
- # TODO: (AI:zhizhouy) need to check whether a remote is a local or lab
- # machine. If not lab machine, raise an error.
- pass
-
- def CheckCrosfleetTool(self, chromeos_root, log_level):
- CROSFLEET_PATH = 'crosfleet'
- if os.path.exists(CROSFLEET_PATH):
- return True
- l = logger.GetLogger()
- l.LogOutput('Crosfleet tool not installed, trying to install it.')
- ce = command_executer.GetCommandExecuter(l, log_level=log_level)
- setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin',
- 'lab-tools', 'setup_lab_tools')
- cmd = '%s' % setup_lab_tools
- status = ce.RunCommand(cmd)
- if status != 0:
- raise RuntimeError(
- 'Crosfleet tool not installed correctly, please try to '
- 'manually install it from %s' % setup_lab_tools)
- l.LogOutput('Crosfleet is installed at %s, please login before first use. '
- 'Login by running "crosfleet login" and follow instructions.' %
- CROSFLEET_PATH)
- return False
+ if test_name == "all_graphics_perf":
+ self.AppendBenchmarkSet(
+ benchmarks,
+ graphics_perf_tests,
+ "",
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ "",
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight,
+ )
+ else:
+ # Add the single benchmark.
+ benchmark = Benchmark(
+ benchmark_name,
+ test_name,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight,
+ )
+ benchmarks.append(benchmark)
+
+ if not benchmarks:
+ raise RuntimeError("No benchmarks specified")
+
+ # Construct labels.
+ # Some fields are common with global settings. The values are
+ # inherited and/or merged with the global settings values.
+ labels = []
+ all_label_settings = experiment_file.GetSettings("label")
+ all_remote = list(remote)
+ for label_settings in all_label_settings:
+ label_name = label_settings.name
+ image = label_settings.GetField("chromeos_image")
+ build = label_settings.GetField("build")
+ autotest_path = label_settings.GetField("autotest_path")
+ debug_path = label_settings.GetField("debug_path")
+ chromeos_root = label_settings.GetField("chromeos_root")
+ my_remote = label_settings.GetField("remote")
+ compiler = label_settings.GetField("compiler")
+ new_remote = []
+ if my_remote:
+ for i in my_remote:
+ c = re.sub("[\"']", "", i)
+ new_remote.append(c)
+ my_remote = new_remote
+
+ if image:
+ if crosfleet:
+ raise RuntimeError(
+ "In crosfleet mode, local image should not be used."
+ )
+ if build:
+ raise RuntimeError(
+ "Image path and build are provided at the same "
+ "time, please use only one of them."
+ )
+ else:
+ if not build:
+ raise RuntimeError("Can not have empty 'build' field!")
+ image, autotest_path, debug_path = label_settings.GetXbuddyPath(
+ build,
+ autotest_path,
+ debug_path,
+ board,
+ chromeos_root,
+ log_level,
+ download_debug,
+ )
+
+ cache_dir = label_settings.GetField("cache_dir")
+ chrome_src = label_settings.GetField("chrome_src")
+
+ # TODO(yunlian): We should consolidate code in machine_manager.py
+ # to derermine whether we are running from within google or not
+ if (
+ "corp.google.com" in socket.gethostname()
+ and not my_remote
+ and not crosfleet
+ ):
+ my_remote = self.GetDefaultRemotes(board)
+ if global_settings.GetField("same_machine") and len(my_remote) > 1:
+ raise RuntimeError(
+ "Only one remote is allowed when same_machine "
+ "is turned on"
+ )
+ all_remote += my_remote
+ image_args = label_settings.GetField("image_args")
+ if test_flag.GetTestMode():
+ # pylint: disable=too-many-function-args
+ label = MockLabel(
+ label_name,
+ build,
+ image,
+ autotest_path,
+ debug_path,
+ chromeos_root,
+ board,
+ my_remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
+ crosfleet,
+ chrome_src,
+ )
+ else:
+ label = Label(
+ label_name,
+ build,
+ image,
+ autotest_path,
+ debug_path,
+ chromeos_root,
+ board,
+ my_remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
+ crosfleet,
+ chrome_src,
+ )
+ labels.append(label)
+
+ if not labels:
+ raise RuntimeError("No labels specified")
+
+ email = global_settings.GetField("email")
+ all_remote += list(set(my_remote))
+ all_remote = list(set(all_remote))
+ if crosfleet:
+ for remote in all_remote:
+ self.CheckRemotesInCrosfleet(remote)
+ experiment = Experiment(
+ experiment_name,
+ all_remote,
+ working_directory,
+ chromeos_root,
+ cache_conditions,
+ labels,
+ benchmarks,
+ experiment_file.Canonicalize(),
+ email,
+ acquire_timeout,
+ log_dir,
+ log_level,
+ share_cache,
+ results_dir,
+ compress_results,
+ locks_dir,
+ cwp_dso,
+ ignore_min_max,
+ crosfleet,
+ dut_config,
+ no_lock=no_lock,
+ )
+
+ return experiment
+
+ def GetDefaultRemotes(self, board):
+ default_remotes_file = os.path.join(
+ os.path.dirname(__file__), "default_remotes"
+ )
+ try:
+ with open(default_remotes_file) as f:
+ for line in f:
+ key, v = line.split(":")
+ if key.strip() == board:
+ remotes = v.strip().split()
+ if remotes:
+ return remotes
+ else:
+ raise RuntimeError(
+ "There is no remote for {0}".format(board)
+ )
+ except IOError:
+ # TODO: rethrow instead of throwing different exception.
+ raise RuntimeError(
+ "IOError while reading file {0}".format(default_remotes_file)
+ )
+ else:
+ raise RuntimeError("There is no remote for {0}".format(board))
+
+ def CheckRemotesInCrosfleet(self, remote):
+ # TODO: (AI:zhizhouy) need to check whether a remote is a local or lab
+ # machine. If not lab machine, raise an error.
+ pass
+
+ def CheckCrosfleetTool(self, chromeos_root, log_level):
+ CROSFLEET_PATH = "crosfleet"
+ if os.path.exists(CROSFLEET_PATH):
+ return True
+ l = logger.GetLogger()
+ l.LogOutput("Crosfleet tool not installed, trying to install it.")
+ ce = command_executer.GetCommandExecuter(l, log_level=log_level)
+ setup_lab_tools = os.path.join(
+ chromeos_root, "chromeos-admin", "lab-tools", "setup_lab_tools"
+ )
+ cmd = "%s" % setup_lab_tools
+ status = ce.RunCommand(cmd)
+ if status != 0:
+ raise RuntimeError(
+ "Crosfleet tool not installed correctly, please try to "
+ "manually install it from %s" % setup_lab_tools
+ )
+ l.LogOutput(
+ "Crosfleet is installed at %s, please login before first use. "
+ 'Login by running "crosfleet login" and follow instructions.'
+ % CROSFLEET_PATH
+ )
+ return False
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 9637c108..0541bb9b 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit test for experiment_factory.py"""
-from __future__ import print_function
import io
import os
@@ -15,15 +14,15 @@ import socket
import unittest
import unittest.mock as mock
+import benchmark
from cros_utils import command_executer
from cros_utils.file_utils import FileUtils
-
-from experiment_file import ExperimentFile
-import test_flag
-import benchmark
import experiment_factory
from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
import settings_factory
+import test_flag
+
EXPERIMENT_FILE_1 = """
board: x86-alex
@@ -78,371 +77,454 @@ EXPERIMENT_FILE_2 = """
class ExperimentFactoryTest(unittest.TestCase):
- """Class for running experiment factory unittests."""
- def setUp(self):
- self.append_benchmark_call_args = []
-
- def testLoadExperimentFile1(self):
- experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
- exp = ExperimentFactory().GetExperiment(experiment_file,
- working_directory='',
- log_dir='')
- self.assertEqual(exp.remote, ['chromeos-alex3'])
-
- self.assertEqual(len(exp.benchmarks), 2)
- self.assertEqual(exp.benchmarks[0].name, 'PageCycler')
- self.assertEqual(exp.benchmarks[0].test_name, 'PageCycler')
- self.assertEqual(exp.benchmarks[0].iterations, 3)
- self.assertEqual(exp.benchmarks[1].name, 'webrtc@@datachannel')
- self.assertEqual(exp.benchmarks[1].test_name, 'webrtc')
- self.assertEqual(exp.benchmarks[1].iterations, 1)
-
- self.assertEqual(len(exp.labels), 2)
- self.assertEqual(exp.labels[0].chromeos_image,
- '/usr/local/google/cros_image1.bin')
- self.assertEqual(exp.labels[0].board, 'x86-alex')
-
- def testLoadExperimentFile2CWP(self):
- experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_2))
- exp = ExperimentFactory().GetExperiment(experiment_file,
- working_directory='',
- log_dir='')
- self.assertEqual(exp.cwp_dso, 'kallsyms')
- self.assertEqual(len(exp.benchmarks), 2)
- self.assertEqual(exp.benchmarks[0].weight, 0.8)
- self.assertEqual(exp.benchmarks[1].weight, 0.2)
-
- def testDuplecateBenchmark(self):
- mock_experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
- mock_experiment_file.all_settings = []
- benchmark_settings1 = settings_factory.BenchmarkSettings('name')
- mock_experiment_file.all_settings.append(benchmark_settings1)
- benchmark_settings2 = settings_factory.BenchmarkSettings('name')
- mock_experiment_file.all_settings.append(benchmark_settings2)
-
- with self.assertRaises(SyntaxError):
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
-
- def testCWPExceptions(self):
- mock_experiment_file = ExperimentFile(io.StringIO(''))
- mock_experiment_file.all_settings = []
- global_settings = settings_factory.GlobalSettings('test_name')
- global_settings.SetField('locks_dir', '/tmp')
-
- # Test 1: DSO type not supported
- global_settings.SetField('cwp_dso', 'test')
- self.assertEqual(global_settings.GetField('cwp_dso'), 'test')
- mock_experiment_file.global_settings = global_settings
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('The DSO specified is not supported', str(msg.exception))
-
- # Test 2: No weight after DSO specified
- global_settings.SetField('cwp_dso', 'kallsyms')
- mock_experiment_file.global_settings = global_settings
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('With DSO specified, each benchmark should have a weight',
- str(msg.exception))
-
- # Test 3: Weight is set, but no dso specified
- global_settings.SetField('cwp_dso', '')
- mock_experiment_file.global_settings = global_settings
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- benchmark_settings.SetField('weight', '0.8')
- mock_experiment_file.all_settings = []
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('Weight can only be set when DSO specified',
- str(msg.exception))
-
- # Test 4: cwp_dso only works for telemetry_Crosperf benchmarks
- global_settings.SetField('cwp_dso', 'kallsyms')
- mock_experiment_file.global_settings = global_settings
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- benchmark_settings.SetField('weight', '0.8')
- mock_experiment_file.all_settings = []
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(
- 'CWP approximation weight only works with '
- 'telemetry_Crosperf suite', str(msg.exception))
-
- # Test 5: cwp_dso does not work for local run
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- benchmark_settings.SetField('weight', '0.8')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- benchmark_settings.SetField('run_local', 'True')
- mock_experiment_file.all_settings = []
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('run_local must be set to False to use CWP approximation',
- str(msg.exception))
-
- # Test 6: weight should be float >=0
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- benchmark_settings.SetField('weight', '-1.2')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- benchmark_settings.SetField('run_local', 'False')
- mock_experiment_file.all_settings = []
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('Weight should be a float >=0', str(msg.exception))
-
- # Test 7: more than one story tag in test_args
- benchmark_settings = settings_factory.BenchmarkSettings('name')
- benchmark_settings.SetField('test_args',
- '--story-filter=a --story-tag-filter=b')
- benchmark_settings.SetField('weight', '1.2')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- mock_experiment_file.all_settings = []
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(
- 'Only one story or story-tag filter allowed in a single '
- 'benchmark run', str(msg.exception))
-
- # Test 8: Iterations of each benchmark run are not same in cwp mode
- mock_experiment_file.all_settings = []
- benchmark_settings = settings_factory.BenchmarkSettings('name1')
- benchmark_settings.SetField('iterations', '4')
- benchmark_settings.SetField('weight', '1.2')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- benchmark_settings.SetField('run_local', 'False')
- mock_experiment_file.all_settings.append(benchmark_settings)
- benchmark_settings = settings_factory.BenchmarkSettings('name2')
- benchmark_settings.SetField('iterations', '3')
- benchmark_settings.SetField('weight', '1.2')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- benchmark_settings.SetField('run_local', 'False')
- mock_experiment_file.all_settings.append(benchmark_settings)
- with self.assertRaises(RuntimeError) as msg:
- ef = ExperimentFactory()
- ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual('Iterations of each benchmark run are not the same',
- str(msg.exception))
-
- def test_append_benchmark_set(self):
- ef = ExperimentFactory()
-
- bench_list = []
- ef.AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_perfv2_tests, '', 1,
- False, '', 'telemetry_Crosperf', False, 0, False, '',
- 0)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_perfv2_tests))
- self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
-
- bench_list = []
- ef.AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_pagecycler_tests, '', 1,
- False, '', 'telemetry_Crosperf', False, 0, False, '',
- 0)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_pagecycler_tests))
- self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
-
- bench_list = []
- ef.AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_toolchain_perf_tests,
- '', 1, False, '', 'telemetry_Crosperf', False, 0,
- False, '', 0)
- self.assertEqual(len(bench_list),
- len(experiment_factory.telemetry_toolchain_perf_tests))
- self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
-
- @mock.patch.object(socket, 'gethostname')
- def test_get_experiment(self, mock_socket):
-
- test_flag.SetTestMode(False)
- self.append_benchmark_call_args = []
-
- def FakeAppendBenchmarkSet(bench_list, set_list, args, iters, rm_ch,
- perf_args, suite, show_all):
- 'Helper function for test_get_experiment'
- arg_list = [
- bench_list, set_list, args, iters, rm_ch, perf_args, suite, show_all
- ]
- self.append_benchmark_call_args.append(arg_list)
-
- def FakeGetDefaultRemotes(board):
- if not board:
- return []
- return ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros']
-
- def FakeGetXbuddyPath(build, autotest_dir, debug_dir, board, chroot,
- log_level, perf_args):
- autotest_path = autotest_dir
- if not autotest_path:
- autotest_path = 'fake_autotest_path'
- debug_path = debug_dir
- if not debug_path and perf_args:
- debug_path = 'fake_debug_path'
- if not build or not board or not chroot or not log_level:
- return '', autotest_path, debug_path
- return 'fake_image_path', autotest_path, debug_path
-
- ef = ExperimentFactory()
- ef.AppendBenchmarkSet = FakeAppendBenchmarkSet
- ef.GetDefaultRemotes = FakeGetDefaultRemotes
-
- label_settings = settings_factory.LabelSettings('image_label')
- benchmark_settings = settings_factory.BenchmarkSettings('bench_test')
- global_settings = settings_factory.GlobalSettings('test_name')
-
- label_settings.GetXbuddyPath = FakeGetXbuddyPath
-
- mock_experiment_file = ExperimentFile(io.StringIO(''))
- mock_experiment_file.all_settings = []
-
- test_flag.SetTestMode(True)
- # Basic test.
- global_settings.SetField('name', 'unittest_test')
- global_settings.SetField('board', 'lumpy')
- global_settings.SetField('locks_dir', '/tmp')
- global_settings.SetField('remote', '123.45.67.89 123.45.76.80')
- benchmark_settings.SetField('test_name', 'kraken')
- benchmark_settings.SetField('suite', 'telemetry_Crosperf')
- benchmark_settings.SetField('iterations', 1)
- label_settings.SetField(
- 'chromeos_image',
- 'chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin')
- label_settings.SetField('chrome_src', '/usr/local/google/home/chrome-top')
- label_settings.SetField('autotest_path', '/tmp/autotest')
-
- mock_experiment_file.global_settings = global_settings
- mock_experiment_file.all_settings.append(label_settings)
- mock_experiment_file.all_settings.append(benchmark_settings)
- mock_experiment_file.all_settings.append(global_settings)
-
- mock_socket.return_value = ''
-
- # First test. General test.
- exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertCountEqual(exp.remote, ['123.45.67.89', '123.45.76.80'])
- self.assertEqual(exp.cache_conditions, [0, 2, 1])
- self.assertEqual(exp.log_level, 'average')
-
- self.assertEqual(len(exp.benchmarks), 1)
- self.assertEqual(exp.benchmarks[0].name, 'bench_test')
- self.assertEqual(exp.benchmarks[0].test_name, 'kraken')
- self.assertEqual(exp.benchmarks[0].iterations, 1)
- self.assertEqual(exp.benchmarks[0].suite, 'telemetry_Crosperf')
- self.assertFalse(exp.benchmarks[0].show_all_results)
-
- self.assertEqual(len(exp.labels), 1)
- self.assertEqual(
- exp.labels[0].chromeos_image, 'chromeos/src/build/images/lumpy/latest/'
- 'chromiumos_test_image.bin')
- self.assertEqual(exp.labels[0].autotest_path, '/tmp/autotest')
- self.assertEqual(exp.labels[0].board, 'lumpy')
-
- # Second test: Remotes listed in labels.
+ """Class for running experiment factory unittests."""
+
+ def setUp(self):
+ self.append_benchmark_call_args = []
+
+ def testLoadExperimentFile1(self):
+ experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
+ exp = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory="", log_dir=""
+ )
+ self.assertEqual(exp.remote, ["chromeos-alex3"])
+
+ self.assertEqual(len(exp.benchmarks), 2)
+ self.assertEqual(exp.benchmarks[0].name, "PageCycler")
+ self.assertEqual(exp.benchmarks[0].test_name, "PageCycler")
+ self.assertEqual(exp.benchmarks[0].iterations, 3)
+ self.assertEqual(exp.benchmarks[1].name, "webrtc@@datachannel")
+ self.assertEqual(exp.benchmarks[1].test_name, "webrtc")
+ self.assertEqual(exp.benchmarks[1].iterations, 1)
+
+ self.assertEqual(len(exp.labels), 2)
+ self.assertEqual(
+ exp.labels[0].chromeos_image, "/usr/local/google/cros_image1.bin"
+ )
+ self.assertEqual(exp.labels[0].board, "x86-alex")
+
+ def testLoadExperimentFile2CWP(self):
+ experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_2))
+ exp = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory="", log_dir=""
+ )
+ self.assertEqual(exp.cwp_dso, "kallsyms")
+ self.assertEqual(len(exp.benchmarks), 2)
+ self.assertEqual(exp.benchmarks[0].weight, 0.8)
+ self.assertEqual(exp.benchmarks[1].weight, 0.2)
+
+ def testDuplecateBenchmark(self):
+ mock_experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
+ mock_experiment_file.all_settings = []
+ benchmark_settings1 = settings_factory.BenchmarkSettings("name")
+ mock_experiment_file.all_settings.append(benchmark_settings1)
+ benchmark_settings2 = settings_factory.BenchmarkSettings("name")
+ mock_experiment_file.all_settings.append(benchmark_settings2)
+
+ with self.assertRaises(SyntaxError):
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+
+ def testCWPExceptions(self):
+ mock_experiment_file = ExperimentFile(io.StringIO(""))
+ mock_experiment_file.all_settings = []
+ global_settings = settings_factory.GlobalSettings("test_name")
+ global_settings.SetField("locks_dir", "/tmp")
+
+ # Test 1: DSO type not supported
+ global_settings.SetField("cwp_dso", "test")
+ self.assertEqual(global_settings.GetField("cwp_dso"), "test")
+ mock_experiment_file.global_settings = global_settings
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "The DSO specified is not supported", str(msg.exception)
+ )
+
+ # Test 2: No weight after DSO specified
+ global_settings.SetField("cwp_dso", "kallsyms")
+ mock_experiment_file.global_settings = global_settings
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "With DSO specified, each benchmark should have a weight",
+ str(msg.exception),
+ )
+
+ # Test 3: Weight is set, but no dso specified
+ global_settings.SetField("cwp_dso", "")
+ mock_experiment_file.global_settings = global_settings
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ benchmark_settings.SetField("weight", "0.8")
+ mock_experiment_file.all_settings = []
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "Weight can only be set when DSO specified", str(msg.exception)
+ )
+
+ # Test 4: cwp_dso only works for telemetry_Crosperf benchmarks
+ global_settings.SetField("cwp_dso", "kallsyms")
+ mock_experiment_file.global_settings = global_settings
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ benchmark_settings.SetField("weight", "0.8")
+ mock_experiment_file.all_settings = []
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "CWP approximation weight only works with "
+ "telemetry_Crosperf suite",
+ str(msg.exception),
+ )
+
+ # Test 5: cwp_dso does not work for local run
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ benchmark_settings.SetField("weight", "0.8")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ benchmark_settings.SetField("run_local", "True")
+ mock_experiment_file.all_settings = []
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "run_local must be set to False to use CWP approximation",
+ str(msg.exception),
+ )
+
+ # Test 6: weight should be float >=0
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ benchmark_settings.SetField("weight", "-1.2")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ benchmark_settings.SetField("run_local", "False")
+ mock_experiment_file.all_settings = []
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual("Weight should be a float >=0", str(msg.exception))
+
+ # Test 7: more than one story tag in test_args
+ benchmark_settings = settings_factory.BenchmarkSettings("name")
+ benchmark_settings.SetField(
+ "test_args", "--story-filter=a --story-tag-filter=b"
+ )
+ benchmark_settings.SetField("weight", "1.2")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ mock_experiment_file.all_settings = []
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "Only one story or story-tag filter allowed in a single "
+ "benchmark run",
+ str(msg.exception),
+ )
+
+ # Test 8: Iterations of each benchmark run are not same in cwp mode
+ mock_experiment_file.all_settings = []
+ benchmark_settings = settings_factory.BenchmarkSettings("name1")
+ benchmark_settings.SetField("iterations", "4")
+ benchmark_settings.SetField("weight", "1.2")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ benchmark_settings.SetField("run_local", "False")
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ benchmark_settings = settings_factory.BenchmarkSettings("name2")
+ benchmark_settings.SetField("iterations", "3")
+ benchmark_settings.SetField("weight", "1.2")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ benchmark_settings.SetField("run_local", "False")
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ with self.assertRaises(RuntimeError) as msg:
+ ef = ExperimentFactory()
+ ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(
+ "Iterations of each benchmark run are not the same",
+ str(msg.exception),
+ )
+
+ def test_append_benchmark_set(self):
+ ef = ExperimentFactory()
+
+ bench_list = []
+ ef.AppendBenchmarkSet(
+ bench_list,
+ experiment_factory.telemetry_perfv2_tests,
+ "",
+ 1,
+ False,
+ "",
+ "telemetry_Crosperf",
+ False,
+ 0,
+ False,
+ "",
+ 0,
+ )
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_perfv2_tests)
+ )
+ self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
+
+ bench_list = []
+ ef.AppendBenchmarkSet(
+ bench_list,
+ experiment_factory.telemetry_pagecycler_tests,
+ "",
+ 1,
+ False,
+ "",
+ "telemetry_Crosperf",
+ False,
+ 0,
+ False,
+ "",
+ 0,
+ )
+ self.assertEqual(
+ len(bench_list), len(experiment_factory.telemetry_pagecycler_tests)
+ )
+ self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
+
+ bench_list = []
+ ef.AppendBenchmarkSet(
+ bench_list,
+ experiment_factory.telemetry_toolchain_perf_tests,
+ "",
+ 1,
+ False,
+ "",
+ "telemetry_Crosperf",
+ False,
+ 0,
+ False,
+ "",
+ 0,
+ )
+ self.assertEqual(
+ len(bench_list),
+ len(experiment_factory.telemetry_toolchain_perf_tests),
+ )
+ self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
+
+ @mock.patch.object(socket, "gethostname")
+ def test_get_experiment(self, mock_socket):
+
+ test_flag.SetTestMode(False)
+ self.append_benchmark_call_args = []
+
+ def FakeAppendBenchmarkSet(
+ bench_list, set_list, args, iters, rm_ch, perf_args, suite, show_all
+ ):
+ "Helper function for test_get_experiment"
+ arg_list = [
+ bench_list,
+ set_list,
+ args,
+ iters,
+ rm_ch,
+ perf_args,
+ suite,
+ show_all,
+ ]
+ self.append_benchmark_call_args.append(arg_list)
+
+ def FakeGetDefaultRemotes(board):
+ if not board:
+ return []
+ return [
+ "fake_chromeos_machine1.cros",
+ "fake_chromeos_machine2.cros",
+ ]
+
+ def FakeGetXbuddyPath(
+ build, autotest_dir, debug_dir, board, chroot, log_level, perf_args
+ ):
+ autotest_path = autotest_dir
+ if not autotest_path:
+ autotest_path = "fake_autotest_path"
+ debug_path = debug_dir
+ if not debug_path and perf_args:
+ debug_path = "fake_debug_path"
+ if not build or not board or not chroot or not log_level:
+ return "", autotest_path, debug_path
+ return "fake_image_path", autotest_path, debug_path
+
+ ef = ExperimentFactory()
+ ef.AppendBenchmarkSet = FakeAppendBenchmarkSet
+ ef.GetDefaultRemotes = FakeGetDefaultRemotes
+
+ label_settings = settings_factory.LabelSettings("image_label")
+ benchmark_settings = settings_factory.BenchmarkSettings("bench_test")
+ global_settings = settings_factory.GlobalSettings("test_name")
+
+ label_settings.GetXbuddyPath = FakeGetXbuddyPath
+
+ mock_experiment_file = ExperimentFile(io.StringIO(""))
+ mock_experiment_file.all_settings = []
+
+ test_flag.SetTestMode(True)
+ # Basic test.
+ global_settings.SetField("name", "unittest_test")
+ global_settings.SetField("board", "lumpy")
+ global_settings.SetField("locks_dir", "/tmp")
+ global_settings.SetField("remote", "123.45.67.89 123.45.76.80")
+ benchmark_settings.SetField("test_name", "kraken")
+ benchmark_settings.SetField("suite", "telemetry_Crosperf")
+ benchmark_settings.SetField("iterations", 1)
+ label_settings.SetField(
+ "chromeos_image",
+ "chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin",
+ )
+ label_settings.SetField(
+ "chrome_src", "/usr/local/google/home/chrome-top"
+ )
+ label_settings.SetField("autotest_path", "/tmp/autotest")
+
+ mock_experiment_file.global_settings = global_settings
+ mock_experiment_file.all_settings.append(label_settings)
+ mock_experiment_file.all_settings.append(benchmark_settings)
+ mock_experiment_file.all_settings.append(global_settings)
+
+ mock_socket.return_value = ""
+
+ # First test. General test.
+ exp = ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertCountEqual(exp.remote, ["123.45.67.89", "123.45.76.80"])
+ self.assertEqual(exp.cache_conditions, [0, 2, 1])
+ self.assertEqual(exp.log_level, "average")
+
+ self.assertEqual(len(exp.benchmarks), 1)
+ self.assertEqual(exp.benchmarks[0].name, "bench_test")
+ self.assertEqual(exp.benchmarks[0].test_name, "kraken")
+ self.assertEqual(exp.benchmarks[0].iterations, 1)
+ self.assertEqual(exp.benchmarks[0].suite, "telemetry_Crosperf")
+ self.assertFalse(exp.benchmarks[0].show_all_results)
+
+ self.assertEqual(len(exp.labels), 1)
+ self.assertEqual(
+ exp.labels[0].chromeos_image,
+ "chromeos/src/build/images/lumpy/latest/"
+ "chromiumos_test_image.bin",
+ )
+ self.assertEqual(exp.labels[0].autotest_path, "/tmp/autotest")
+ self.assertEqual(exp.labels[0].board, "lumpy")
+
+ # Second test: Remotes listed in labels.
+ test_flag.SetTestMode(True)
+ label_settings.SetField("remote", "chromeos1.cros chromeos2.cros")
+ exp = ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertCountEqual(
+ exp.remote,
+ [
+ "123.45.67.89",
+ "123.45.76.80",
+ "chromeos1.cros",
+ "chromeos2.cros",
+ ],
+ )
+
+ # Third test: Automatic fixing of bad logging_level param:
+ global_settings.SetField("logging_level", "really loud!")
+ exp = ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(exp.log_level, "verbose")
+
+ # Fourth test: Setting cache conditions; only 1 remote with "same_machine"
+ global_settings.SetField("rerun_if_failed", "true")
+ global_settings.SetField("rerun", "true")
+ global_settings.SetField("same_machine", "true")
+ global_settings.SetField("same_specs", "true")
+
+ self.assertRaises(
+ Exception, ef.GetExperiment, mock_experiment_file, "", ""
+ )
+ label_settings.SetField("remote", "")
+ global_settings.SetField("remote", "123.45.67.89")
+ exp = ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1])
+
+ # Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all
+ # remotes (Call GetDefaultRemotes).
+ mock_socket.return_value = "test.corp.google.com"
+ global_settings.SetField("remote", "")
+ global_settings.SetField("same_machine", "false")
+
+ label_settings_2 = settings_factory.LabelSettings(
+ "official_image_label"
+ )
+ label_settings_2.SetField("chromeos_root", "chromeos")
+ label_settings_2.SetField("build", "official-dev")
+ label_settings_2.SetField("autotest_path", "")
+ label_settings_2.GetXbuddyPath = FakeGetXbuddyPath
+
+ mock_experiment_file.all_settings.append(label_settings_2)
+ exp = ef.GetExperiment(mock_experiment_file, "", "")
+ self.assertEqual(len(exp.labels), 2)
+ self.assertEqual(exp.labels[1].chromeos_image, "fake_image_path")
+ self.assertEqual(exp.labels[1].autotest_path, "fake_autotest_path")
+ self.assertCountEqual(
+ exp.remote,
+ ["fake_chromeos_machine1.cros", "fake_chromeos_machine2.cros"],
+ )
+
+ def test_get_default_remotes(self):
+ board_list = [
+ "bob",
+ "chell",
+ "coral",
+ "elm",
+ "nautilus",
+ "snappy",
+ ]
+
+ ef = ExperimentFactory()
+ self.assertRaises(Exception, ef.GetDefaultRemotes, "bad-board")
+
+ # Verify that we have entries for every board
+ for b in board_list:
+ remotes = ef.GetDefaultRemotes(b)
+ self.assertGreaterEqual(len(remotes), 1)
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
+ @mock.patch.object(os.path, "exists")
+ def test_check_crosfleet_tool(self, mock_exists, mock_runcmd):
+ ef = ExperimentFactory()
+ chromeos_root = "/tmp/chromeos"
+ log_level = "average"
+
+ mock_exists.return_value = True
+ ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
+ self.assertTrue(ret)
+
+ mock_exists.return_value = False
+ mock_runcmd.return_value = 1
+ with self.assertRaises(RuntimeError) as err:
+ ef.CheckCrosfleetTool(chromeos_root, log_level)
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(
+ str(err.exception),
+ "Crosfleet tool not installed "
+ "correctly, please try to manually install it from "
+ "/tmp/chromeos/chromeos-admin/lab-tools/setup_lab_tools",
+ )
+
+ mock_runcmd.return_value = 0
+ mock_runcmd.call_count = 0
+ ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertFalse(ret)
+
+
+if __name__ == "__main__":
+ FileUtils.Configure(True)
test_flag.SetTestMode(True)
- label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros')
- exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertCountEqual(
- exp.remote,
- ['123.45.67.89', '123.45.76.80', 'chromeos1.cros', 'chromeos2.cros'])
-
- # Third test: Automatic fixing of bad logging_level param:
- global_settings.SetField('logging_level', 'really loud!')
- exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(exp.log_level, 'verbose')
-
- # Fourth test: Setting cache conditions; only 1 remote with "same_machine"
- global_settings.SetField('rerun_if_failed', 'true')
- global_settings.SetField('rerun', 'true')
- global_settings.SetField('same_machine', 'true')
- global_settings.SetField('same_specs', 'true')
-
- self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file, '',
- '')
- label_settings.SetField('remote', '')
- global_settings.SetField('remote', '123.45.67.89')
- exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1])
-
- # Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all
- # remotes (Call GetDefaultRemotes).
- mock_socket.return_value = 'test.corp.google.com'
- global_settings.SetField('remote', '')
- global_settings.SetField('same_machine', 'false')
-
- label_settings_2 = settings_factory.LabelSettings('official_image_label')
- label_settings_2.SetField('chromeos_root', 'chromeos')
- label_settings_2.SetField('build', 'official-dev')
- label_settings_2.SetField('autotest_path', '')
- label_settings_2.GetXbuddyPath = FakeGetXbuddyPath
-
- mock_experiment_file.all_settings.append(label_settings_2)
- exp = ef.GetExperiment(mock_experiment_file, '', '')
- self.assertEqual(len(exp.labels), 2)
- self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path')
- self.assertEqual(exp.labels[1].autotest_path, 'fake_autotest_path')
- self.assertCountEqual(
- exp.remote,
- ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'])
-
- def test_get_default_remotes(self):
- board_list = [
- 'bob', 'chell', 'coral', 'elm', 'kefka', 'nautilus', 'snappy',
- 'veyron_tiger'
- ]
-
- ef = ExperimentFactory()
- self.assertRaises(Exception, ef.GetDefaultRemotes, 'bad-board')
-
- # Verify that we have entries for every board
- for b in board_list:
- remotes = ef.GetDefaultRemotes(b)
- self.assertGreaterEqual(len(remotes), 1)
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object(os.path, 'exists')
- def test_check_crosfleet_tool(self, mock_exists, mock_runcmd):
- ef = ExperimentFactory()
- chromeos_root = '/tmp/chromeos'
- log_level = 'average'
-
- mock_exists.return_value = True
- ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
- self.assertTrue(ret)
-
- mock_exists.return_value = False
- mock_runcmd.return_value = 1
- with self.assertRaises(RuntimeError) as err:
- ef.CheckCrosfleetTool(chromeos_root, log_level)
- self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(
- str(err.exception), 'Crosfleet tool not installed '
- 'correctly, please try to manually install it from '
- '/tmp/chromeos/chromeos-admin/lab-tools/setup_lab_tools')
-
- mock_runcmd.return_value = 0
- mock_runcmd.call_count = 0
- ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
- self.assertEqual(mock_runcmd.call_count, 1)
- self.assertFalse(ret)
-
-
-if __name__ == '__main__':
- FileUtils.Configure(True)
- test_flag.SetTestMode(True)
- unittest.main()
+ unittest.main()
diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py
index d2831bda..70852a22 100644
--- a/crosperf/experiment_file.py
+++ b/crosperf/experiment_file.py
@@ -1,220 +1,241 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The experiment file module. It manages the input file of crosperf."""
-from __future__ import print_function
+
import os.path
import re
+
from settings_factory import SettingsFactory
class ExperimentFile(object):
- """Class for parsing the experiment file format.
+ """Class for parsing the experiment file format.
- The grammar for this format is:
+ The grammar for this format is:
- experiment = { _FIELD_VALUE_RE | settings }
- settings = _OPEN_SETTINGS_RE
- { _FIELD_VALUE_RE }
- _CLOSE_SETTINGS_RE
+ experiment = { _FIELD_VALUE_RE | settings }
+ settings = _OPEN_SETTINGS_RE
+ { _FIELD_VALUE_RE }
+ _CLOSE_SETTINGS_RE
- Where the regexes are terminals defined below. This results in an format
- which looks something like:
+ Where the regexes are terminals defined below. This results in an format
+ which looks something like:
- field_name: value
- settings_type: settings_name {
- field_name: value
field_name: value
- }
- """
-
- # Field regex, e.g. "iterations: 3"
- _FIELD_VALUE_RE = re.compile(r'(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)')
- # Open settings regex, e.g. "label {"
- _OPEN_SETTINGS_RE = re.compile(r'(?:([\w.-]+):)?\s*([\w.-]+)\s*{')
- # Close settings regex.
- _CLOSE_SETTINGS_RE = re.compile(r'}')
-
- def __init__(self, experiment_file, overrides=None):
- """Construct object from file-like experiment_file.
-
- Args:
- experiment_file: file-like object with text description of experiment.
- overrides: A settings object that will override fields in other settings.
-
- Raises:
- Exception: if invalid build type or description is invalid.
+ settings_type: settings_name {
+ field_name: value
+ field_name: value
+ }
"""
- self.all_settings = []
- self.global_settings = SettingsFactory().GetSettings('global', 'global')
- self.all_settings.append(self.global_settings)
-
- self._Parse(experiment_file)
-
- for settings in self.all_settings:
- settings.Inherit()
- settings.Validate()
- if overrides:
- settings.Override(overrides)
-
- def GetSettings(self, settings_type):
- """Return nested fields from the experiment file."""
- res = []
- for settings in self.all_settings:
- if settings.settings_type == settings_type:
- res.append(settings)
- return res
-
- def GetGlobalSettings(self):
- """Return the global fields from the experiment file."""
- return self.global_settings
-
- def _ParseField(self, reader):
- """Parse a key/value field."""
- line = reader.CurrentLine().strip()
- match = ExperimentFile._FIELD_VALUE_RE.match(line)
- append, name, _, text_value = match.groups()
- return (name, text_value, append)
-
- def _ParseSettings(self, reader):
- """Parse a settings block."""
- line = reader.CurrentLine().strip()
- match = ExperimentFile._OPEN_SETTINGS_RE.match(line)
- settings_type = match.group(1)
- if settings_type is None:
- settings_type = ''
- settings_name = match.group(2)
- settings = SettingsFactory().GetSettings(settings_name, settings_type)
- settings.SetParentSettings(self.global_settings)
-
- while reader.NextLine():
- line = reader.CurrentLine().strip()
-
- if not line:
- continue
-
- if ExperimentFile._FIELD_VALUE_RE.match(line):
- field = self._ParseField(reader)
- settings.SetField(field[0], field[1], field[2])
- elif ExperimentFile._CLOSE_SETTINGS_RE.match(line):
- return settings, settings_type
-
- raise EOFError('Unexpected EOF while parsing settings block.')
-
- def _Parse(self, experiment_file):
- """Parse experiment file and create settings."""
- reader = ExperimentFileReader(experiment_file)
- settings_names = {}
- try:
- while reader.NextLine():
+
+ # Field regex, e.g. "iterations: 3"
+ _FIELD_VALUE_RE = re.compile(r"(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)")
+ # Open settings regex, e.g. "label {"
+ _OPEN_SETTINGS_RE = re.compile(r"(?:([\w.-]+):)?\s*([\w.-]+)\s*{")
+ # Close settings regex.
+ _CLOSE_SETTINGS_RE = re.compile(r"}")
+
+ def __init__(self, experiment_file, overrides=None):
+ """Construct object from file-like experiment_file.
+
+ Args:
+ experiment_file: file-like object with text description of experiment.
+ overrides: A settings object that will override fields in other settings.
+
+ Raises:
+ Exception: if invalid build type or description is invalid.
+ """
+ self.all_settings = []
+ self.global_settings = SettingsFactory().GetSettings("global", "global")
+ self.all_settings.append(self.global_settings)
+
+ self._Parse(experiment_file)
+
+ for settings in self.all_settings:
+ settings.Inherit()
+ settings.Validate()
+ if overrides:
+ settings.Override(overrides)
+
+ def GetSettings(self, settings_type):
+ """Return nested fields from the experiment file."""
+ res = []
+ for settings in self.all_settings:
+ if settings.settings_type == settings_type:
+ res.append(settings)
+ return res
+
+ def GetGlobalSettings(self):
+ """Return the global fields from the experiment file."""
+ return self.global_settings
+
+ def _ParseField(self, reader):
+ """Parse a key/value field."""
line = reader.CurrentLine().strip()
+ match = ExperimentFile._FIELD_VALUE_RE.match(line)
+ append, name, _, text_value = match.groups()
+ return (name, text_value, append)
- if not line:
- continue
-
- if ExperimentFile._OPEN_SETTINGS_RE.match(line):
- new_settings, settings_type = self._ParseSettings(reader)
- # We will allow benchmarks with duplicated settings name for now.
- # Further decision will be made when parsing benchmark details in
- # ExperimentFactory.GetExperiment().
- if settings_type != 'benchmark':
- if new_settings.name in settings_names:
- raise SyntaxError(
- "Duplicate settings name: '%s'." % new_settings.name)
- settings_names[new_settings.name] = True
- self.all_settings.append(new_settings)
- elif ExperimentFile._FIELD_VALUE_RE.match(line):
- field = self._ParseField(reader)
- self.global_settings.SetField(field[0], field[1], field[2])
- else:
- raise IOError('Unexpected line.')
- except Exception as err:
- raise RuntimeError('Line %d: %s\n==> %s' % (reader.LineNo(), str(err),
- reader.CurrentLine(False)))
-
- def Canonicalize(self):
- """Convert parsed experiment file back into an experiment file."""
- res = ''
- board = ''
- for field_name in self.global_settings.fields:
- field = self.global_settings.fields[field_name]
- if field.assigned:
- res += '%s: %s\n' % (field.name, field.GetString())
- if field.name == 'board':
- board = field.GetString()
- res += '\n'
-
- for settings in self.all_settings:
- if settings.settings_type != 'global':
- res += '%s: %s {\n' % (settings.settings_type, settings.name)
- for field_name in settings.fields:
- field = settings.fields[field_name]
- if field.assigned:
- res += '\t%s: %s\n' % (field.name, field.GetString())
- if field.name == 'chromeos_image':
- real_file = (
- os.path.realpath(os.path.expanduser(field.GetString())))
- if real_file != field.GetString():
- res += '\t#actual_image: %s\n' % real_file
- if field.name == 'build':
- chromeos_root_field = settings.fields['chromeos_root']
- if chromeos_root_field:
- chromeos_root = chromeos_root_field.GetString()
- value = field.GetString()
- autotest_field = settings.fields['autotest_path']
- autotest_path = ''
- if autotest_field.assigned:
- autotest_path = autotest_field.GetString()
- debug_field = settings.fields['debug_path']
- debug_path = ''
- if debug_field.assigned:
- debug_path = autotest_field.GetString()
- # Do not download the debug symbols since this function is for
- # canonicalizing experiment file.
- downlad_debug = False
- image_path, autotest_path, debug_path = settings.GetXbuddyPath(
- value, autotest_path, debug_path, board, chromeos_root,
- 'quiet', downlad_debug)
- res += '\t#actual_image: %s\n' % image_path
- if not autotest_field.assigned:
- res += '\t#actual_autotest_path: %s\n' % autotest_path
- if not debug_field.assigned:
- res += '\t#actual_debug_path: %s\n' % debug_path
-
- res += '}\n\n'
-
- return res
+ def _ParseSettings(self, reader):
+ """Parse a settings block."""
+ line = reader.CurrentLine().strip()
+ match = ExperimentFile._OPEN_SETTINGS_RE.match(line)
+ settings_type = match.group(1)
+ if settings_type is None:
+ settings_type = ""
+ settings_name = match.group(2)
+ settings = SettingsFactory().GetSettings(settings_name, settings_type)
+ settings.SetParentSettings(self.global_settings)
+
+ while reader.NextLine():
+ line = reader.CurrentLine().strip()
+
+ if not line:
+ continue
+
+ if ExperimentFile._FIELD_VALUE_RE.match(line):
+ field = self._ParseField(reader)
+ settings.SetField(field[0], field[1], field[2])
+ elif ExperimentFile._CLOSE_SETTINGS_RE.match(line):
+ return settings, settings_type
+
+ raise EOFError("Unexpected EOF while parsing settings block.")
+
+ def _Parse(self, experiment_file):
+ """Parse experiment file and create settings."""
+ reader = ExperimentFileReader(experiment_file)
+ settings_names = {}
+ try:
+ while reader.NextLine():
+ line = reader.CurrentLine().strip()
+
+ if not line:
+ continue
+
+ if ExperimentFile._OPEN_SETTINGS_RE.match(line):
+ new_settings, settings_type = self._ParseSettings(reader)
+ # We will allow benchmarks with duplicated settings name for now.
+ # Further decision will be made when parsing benchmark details in
+ # ExperimentFactory.GetExperiment().
+ if settings_type != "benchmark":
+ if new_settings.name in settings_names:
+ raise SyntaxError(
+ "Duplicate settings name: '%s'."
+ % new_settings.name
+ )
+ settings_names[new_settings.name] = True
+ self.all_settings.append(new_settings)
+ elif ExperimentFile._FIELD_VALUE_RE.match(line):
+ field = self._ParseField(reader)
+ self.global_settings.SetField(field[0], field[1], field[2])
+ else:
+ raise IOError("Unexpected line.")
+ except Exception as err:
+ raise RuntimeError(
+ "Line %d: %s\n==> %s"
+ % (reader.LineNo(), str(err), reader.CurrentLine(False))
+ )
+
+ def Canonicalize(self):
+ """Convert parsed experiment file back into an experiment file."""
+ res = ""
+ board = ""
+ for field_name in self.global_settings.fields:
+ field = self.global_settings.fields[field_name]
+ if field.assigned:
+ res += "%s: %s\n" % (field.name, field.GetString())
+ if field.name == "board":
+ board = field.GetString()
+ res += "\n"
+
+ for settings in self.all_settings:
+ if settings.settings_type != "global":
+ res += "%s: %s {\n" % (settings.settings_type, settings.name)
+ for field_name in settings.fields:
+ field = settings.fields[field_name]
+ if field.assigned:
+ res += "\t%s: %s\n" % (field.name, field.GetString())
+ if field.name == "chromeos_image":
+ real_file = os.path.realpath(
+ os.path.expanduser(field.GetString())
+ )
+ if real_file != field.GetString():
+ res += "\t#actual_image: %s\n" % real_file
+ if field.name == "build":
+ chromeos_root_field = settings.fields[
+ "chromeos_root"
+ ]
+ if chromeos_root_field:
+ chromeos_root = chromeos_root_field.GetString()
+ value = field.GetString()
+ autotest_field = settings.fields["autotest_path"]
+ autotest_path = ""
+ if autotest_field.assigned:
+ autotest_path = autotest_field.GetString()
+ debug_field = settings.fields["debug_path"]
+ debug_path = ""
+ if debug_field.assigned:
+ debug_path = autotest_field.GetString()
+ # Do not download the debug symbols since this function is for
+ # canonicalizing experiment file.
+ downlad_debug = False
+ (
+ image_path,
+ autotest_path,
+ debug_path,
+ ) = settings.GetXbuddyPath(
+ value,
+ autotest_path,
+ debug_path,
+ board,
+ chromeos_root,
+ "quiet",
+ downlad_debug,
+ )
+ res += "\t#actual_image: %s\n" % image_path
+ if not autotest_field.assigned:
+ res += (
+ "\t#actual_autotest_path: %s\n"
+ % autotest_path
+ )
+ if not debug_field.assigned:
+ res += "\t#actual_debug_path: %s\n" % debug_path
+
+ res += "}\n\n"
+
+ return res
class ExperimentFileReader(object):
- """Handle reading lines from an experiment file."""
-
- def __init__(self, file_object):
- self.file_object = file_object
- self.current_line = None
- self.current_line_no = 0
-
- def CurrentLine(self, strip_comment=True):
- """Return the next line from the file, without advancing the iterator."""
- if strip_comment:
- return self._StripComment(self.current_line)
- return self.current_line
-
- def NextLine(self, strip_comment=True):
- """Advance the iterator and return the next line of the file."""
- self.current_line_no += 1
- self.current_line = self.file_object.readline()
- return self.CurrentLine(strip_comment)
-
- def _StripComment(self, line):
- """Strip comments starting with # from a line."""
- if '#' in line:
- line = line[:line.find('#')] + line[-1]
- return line
-
- def LineNo(self):
- """Return the current line number."""
- return self.current_line_no
+ """Handle reading lines from an experiment file."""
+
+ def __init__(self, file_object):
+ self.file_object = file_object
+ self.current_line = None
+ self.current_line_no = 0
+
+ def CurrentLine(self, strip_comment=True):
+ """Return the next line from the file, without advancing the iterator."""
+ if strip_comment:
+ return self._StripComment(self.current_line)
+ return self.current_line
+
+ def NextLine(self, strip_comment=True):
+ """Advance the iterator and return the next line of the file."""
+ self.current_line_no += 1
+ self.current_line = self.file_object.readline()
+ return self.CurrentLine(strip_comment)
+
+ def _StripComment(self, line):
+ """Strip comments starting with # from a line."""
+ if "#" in line:
+ line = line[: line.find("#")] + line[-1]
+ return line
+
+ def LineNo(self):
+ """Return the current line number."""
+ return self.current_line_no
diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py
index 0d4e1e67..5c09ee06 100755
--- a/crosperf/experiment_file_unittest.py
+++ b/crosperf/experiment_file_unittest.py
@@ -1,18 +1,18 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The unittest of experiment_file."""
-from __future__ import print_function
import io
import unittest
from experiment_file import ExperimentFile
+
EXPERIMENT_FILE_1 = """
board: x86-alex
remote: chromeos-alex3
@@ -158,94 +158,111 @@ label: image2 {
class ExperimentFileTest(unittest.TestCase):
- """The main class for Experiment File test."""
-
- def testLoadExperimentFile1(self):
- input_file = io.StringIO(EXPERIMENT_FILE_1)
- experiment_file = ExperimentFile(input_file)
- global_settings = experiment_file.GetGlobalSettings()
- self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
- self.assertEqual(
- global_settings.GetField('perf_args'), 'record -a -e cycles')
- benchmark_settings = experiment_file.GetSettings('benchmark')
- self.assertEqual(len(benchmark_settings), 1)
- self.assertEqual(benchmark_settings[0].name, 'PageCycler')
- self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
-
- label_settings = experiment_file.GetSettings('label')
- self.assertEqual(len(label_settings), 2)
- self.assertEqual(label_settings[0].name, 'image1')
- self.assertEqual(label_settings[0].GetField('chromeos_image'),
- '/usr/local/google/cros_image1.bin')
- self.assertEqual(label_settings[1].GetField('remote'), ['chromeos-lumpy1'])
- self.assertEqual(label_settings[0].GetField('remote'), ['chromeos-alex3'])
-
- def testOverrideSetting(self):
- input_file = io.StringIO(EXPERIMENT_FILE_2)
- experiment_file = ExperimentFile(input_file)
- global_settings = experiment_file.GetGlobalSettings()
- self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
-
- benchmark_settings = experiment_file.GetSettings('benchmark')
- self.assertEqual(len(benchmark_settings), 2)
- self.assertEqual(benchmark_settings[0].name, 'PageCycler')
- self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
- self.assertEqual(benchmark_settings[1].name, 'AndroidBench')
- self.assertEqual(benchmark_settings[1].GetField('iterations'), 2)
-
- def testDuplicateLabel(self):
- input_file = io.StringIO(EXPERIMENT_FILE_3)
- self.assertRaises(Exception, ExperimentFile, input_file)
-
- def testDuplicateBenchmark(self):
- input_file = io.StringIO(EXPERIMENT_FILE_4)
- experiment_file = ExperimentFile(input_file)
- benchmark_settings = experiment_file.GetSettings('benchmark')
- self.assertEqual(benchmark_settings[0].name, 'webrtc')
- self.assertEqual(benchmark_settings[0].GetField('test_args'),
- '--story-filter=datachannel')
- self.assertEqual(benchmark_settings[1].name, 'webrtc')
- self.assertEqual(benchmark_settings[1].GetField('test_args'),
- '--story-tag-filter=smoothness')
-
- def testCanonicalize(self):
- input_file = io.StringIO(EXPERIMENT_FILE_1)
- experiment_file = ExperimentFile(input_file)
- res = experiment_file.Canonicalize()
- self.assertEqual(res, OUTPUT_FILE)
-
- def testLoadDutConfigExperimentFile_Good(self):
- input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_GOOD)
- experiment_file = ExperimentFile(input_file)
- global_settings = experiment_file.GetGlobalSettings()
- self.assertEqual(global_settings.GetField('turbostat'), False)
- self.assertEqual(global_settings.GetField('intel_pstate'), 'no_hwp')
- self.assertEqual(global_settings.GetField('governor'), 'powersave')
- self.assertEqual(global_settings.GetField('cpu_usage'), 'exclusive_cores')
- self.assertEqual(global_settings.GetField('cpu_freq_pct'), 50)
- self.assertEqual(global_settings.GetField('cooldown_time'), 5)
- self.assertEqual(global_settings.GetField('cooldown_temp'), 38)
- self.assertEqual(global_settings.GetField('top_interval'), 5)
-
- def testLoadDutConfigExperimentFile_WrongGovernor(self):
- input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_GOV)
- with self.assertRaises(RuntimeError) as msg:
- ExperimentFile(input_file)
- self.assertRegex(str(msg.exception), 'governor: misspelled_governor')
- self.assertRegex(
- str(msg.exception), "Invalid enum value for field 'governor'."
- r' Must be one of \(performance, powersave, userspace, ondemand,'
- r' conservative, schedutils, sched, interactive\)')
-
- def testLoadDutConfigExperimentFile_WrongCpuUsage(self):
- input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_CPUUSE)
- with self.assertRaises(RuntimeError) as msg:
- ExperimentFile(input_file)
- self.assertRegex(str(msg.exception), 'cpu_usage: unknown')
- self.assertRegex(
- str(msg.exception), "Invalid enum value for field 'cpu_usage'."
- r' Must be one of \(all, big_only, little_only, exclusive_cores\)')
-
-
-if __name__ == '__main__':
- unittest.main()
+ """The main class for Experiment File test."""
+
+ def testLoadExperimentFile1(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_1)
+ experiment_file = ExperimentFile(input_file)
+ global_settings = experiment_file.GetGlobalSettings()
+ self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"])
+ self.assertEqual(
+ global_settings.GetField("perf_args"), "record -a -e cycles"
+ )
+ benchmark_settings = experiment_file.GetSettings("benchmark")
+ self.assertEqual(len(benchmark_settings), 1)
+ self.assertEqual(benchmark_settings[0].name, "PageCycler")
+ self.assertEqual(benchmark_settings[0].GetField("iterations"), 3)
+
+ label_settings = experiment_file.GetSettings("label")
+ self.assertEqual(len(label_settings), 2)
+ self.assertEqual(label_settings[0].name, "image1")
+ self.assertEqual(
+ label_settings[0].GetField("chromeos_image"),
+ "/usr/local/google/cros_image1.bin",
+ )
+ self.assertEqual(
+ label_settings[1].GetField("remote"), ["chromeos-lumpy1"]
+ )
+ self.assertEqual(
+ label_settings[0].GetField("remote"), ["chromeos-alex3"]
+ )
+
+ def testOverrideSetting(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_2)
+ experiment_file = ExperimentFile(input_file)
+ global_settings = experiment_file.GetGlobalSettings()
+ self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"])
+
+ benchmark_settings = experiment_file.GetSettings("benchmark")
+ self.assertEqual(len(benchmark_settings), 2)
+ self.assertEqual(benchmark_settings[0].name, "PageCycler")
+ self.assertEqual(benchmark_settings[0].GetField("iterations"), 3)
+ self.assertEqual(benchmark_settings[1].name, "AndroidBench")
+ self.assertEqual(benchmark_settings[1].GetField("iterations"), 2)
+
+ def testDuplicateLabel(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_3)
+ self.assertRaises(Exception, ExperimentFile, input_file)
+
+ def testDuplicateBenchmark(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_4)
+ experiment_file = ExperimentFile(input_file)
+ benchmark_settings = experiment_file.GetSettings("benchmark")
+ self.assertEqual(benchmark_settings[0].name, "webrtc")
+ self.assertEqual(
+ benchmark_settings[0].GetField("test_args"),
+ "--story-filter=datachannel",
+ )
+ self.assertEqual(benchmark_settings[1].name, "webrtc")
+ self.assertEqual(
+ benchmark_settings[1].GetField("test_args"),
+ "--story-tag-filter=smoothness",
+ )
+
+ def testCanonicalize(self):
+ input_file = io.StringIO(EXPERIMENT_FILE_1)
+ experiment_file = ExperimentFile(input_file)
+ res = experiment_file.Canonicalize()
+ self.assertEqual(res, OUTPUT_FILE)
+
+ def testLoadDutConfigExperimentFile_Good(self):
+ input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_GOOD)
+ experiment_file = ExperimentFile(input_file)
+ global_settings = experiment_file.GetGlobalSettings()
+ self.assertEqual(global_settings.GetField("turbostat"), False)
+ self.assertEqual(global_settings.GetField("intel_pstate"), "no_hwp")
+ self.assertEqual(global_settings.GetField("governor"), "powersave")
+ self.assertEqual(
+ global_settings.GetField("cpu_usage"), "exclusive_cores"
+ )
+ self.assertEqual(global_settings.GetField("cpu_freq_pct"), 50)
+ self.assertEqual(global_settings.GetField("cooldown_time"), 5)
+ self.assertEqual(global_settings.GetField("cooldown_temp"), 38)
+ self.assertEqual(global_settings.GetField("top_interval"), 5)
+
+ def testLoadDutConfigExperimentFile_WrongGovernor(self):
+ input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_GOV)
+ with self.assertRaises(RuntimeError) as msg:
+ ExperimentFile(input_file)
+ self.assertRegex(str(msg.exception), "governor: misspelled_governor")
+ self.assertRegex(
+ str(msg.exception),
+ "Invalid enum value for field 'governor'."
+ r" Must be one of \(performance, powersave, userspace, ondemand,"
+ r" conservative, schedutils, sched, interactive\)",
+ )
+
+ def testLoadDutConfigExperimentFile_WrongCpuUsage(self):
+ input_file = io.StringIO(DUT_CONFIG_EXPERIMENT_FILE_BAD_CPUUSE)
+ with self.assertRaises(RuntimeError) as msg:
+ ExperimentFile(input_file)
+ self.assertRegex(str(msg.exception), "cpu_usage: unknown")
+ self.assertRegex(
+ str(msg.exception),
+ "Invalid enum value for field 'cpu_usage'."
+ r" Must be one of \(all, big_only, little_only, exclusive_cores\)",
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/experiment_files/telemetry_perf_perf b/crosperf/experiment_files/telemetry_perf_perf
index acdf96d0..e46fdc2a 100755
--- a/crosperf/experiment_files/telemetry_perf_perf
+++ b/crosperf/experiment_files/telemetry_perf_perf
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
@@ -12,7 +12,7 @@
# Perf will run for the entire benchmark run, so results should be interpreted
# in that context. i.e, if this shows a 3% overhead for a particular perf
# command, that overhead would only be seen during the 2 seconds of measurement
-# during a Chrome OS Wide Profiling collection.
+# during a ChromeOS Wide Profiling collection.
set -e
board=xxx #<you-board-here>
@@ -74,4 +74,3 @@ RunExperiment 'cycles.callgraph' \
# overhead.
RunExperiment 'memory.bandwidth' \
'stat -e cycles -e instructions -e uncore_imc/data_reads/ -e uncore_imc/data_writes/ -e cpu/event=0xD0,umask=0x11,name=MEM_UOPS_RETIRED-STLB_MISS_LOADS/ -e cpu/event=0xD0,umask=0x12,name=MEM_UOPS_RETIRED-STLB_MISS_STORES/'
-
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 6daef780..1f78dcc0 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -1,363 +1,402 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The experiment runner module."""
-from __future__ import print_function
import getpass
import os
import shutil
import time
-import lock_machine
-import test_flag
-
from cros_utils import command_executer
from cros_utils import logger
from cros_utils.email_sender import EmailSender
from cros_utils.file_utils import FileUtils
-
-import config
from experiment_status import ExperimentStatus
+import lock_machine
from results_cache import CacheConditions
from results_cache import ResultsCache
from results_report import HTMLResultsReport
-from results_report import TextResultsReport
from results_report import JSONResultsReport
+from results_report import TextResultsReport
from schedv2 import Schedv2
+import test_flag
+
+import config
def _WriteJSONReportToFile(experiment, results_dir, json_report):
- """Writes a JSON report to a file in results_dir."""
- has_llvm = any('llvm' in l.compiler for l in experiment.labels)
- compiler_string = 'llvm' if has_llvm else 'gcc'
- board = experiment.labels[0].board
- filename = 'report_%s_%s_%s.%s.json' % (board, json_report.date,
- json_report.time.replace(
- ':', '.'), compiler_string)
- fullname = os.path.join(results_dir, filename)
- report_text = json_report.GetReport()
- with open(fullname, 'w') as out_file:
- out_file.write(report_text)
+ """Writes a JSON report to a file in results_dir."""
+ has_llvm = any("llvm" in l.compiler for l in experiment.labels)
+ compiler_string = "llvm" if has_llvm else "gcc"
+ board = experiment.labels[0].board
+ filename = "report_%s_%s_%s.%s.json" % (
+ board,
+ json_report.date,
+ json_report.time.replace(":", "."),
+ compiler_string,
+ )
+ fullname = os.path.join(results_dir, filename)
+ report_text = json_report.GetReport()
+ with open(fullname, "w") as out_file:
+ out_file.write(report_text)
class ExperimentRunner(object):
- """ExperimentRunner Class."""
-
- STATUS_TIME_DELAY = 30
- THREAD_MONITOR_DELAY = 2
-
- SUCCEEDED = 0
- HAS_FAILURE = 1
- ALL_FAILED = 2
-
- def __init__(self,
- experiment,
- json_report,
- using_schedv2=False,
- log=None,
- cmd_exec=None):
- self._experiment = experiment
- self.l = log or logger.GetLogger(experiment.log_dir)
- self._ce = cmd_exec or command_executer.GetCommandExecuter(self.l)
- self._terminated = False
- self.json_report = json_report
- self.locked_machines = []
- if experiment.log_level != 'verbose':
- self.STATUS_TIME_DELAY = 10
-
- # Setting this to True will use crosperf sched v2 (feature in progress).
- self._using_schedv2 = using_schedv2
-
- def _GetMachineList(self):
- """Return a list of all requested machines.
-
- Create a list of all the requested machines, both global requests and
- label-specific requests, and return the list.
- """
- machines = self._experiment.remote
- # All Label.remote is a sublist of experiment.remote.
- for l in self._experiment.labels:
- for r in l.remote:
- assert r in machines
- return machines
-
- def _UpdateMachineList(self, locked_machines):
- """Update machines lists to contain only locked machines.
-
- Go through all the lists of requested machines, both global and
- label-specific requests, and remove any machine that we were not
- able to lock.
-
- Args:
- locked_machines: A list of the machines we successfully locked.
- """
- for m in self._experiment.remote:
- if m not in locked_machines:
- self._experiment.remote.remove(m)
-
- for l in self._experiment.labels:
- for m in l.remote:
- if m not in locked_machines:
- l.remote.remove(m)
-
- def _GetMachineType(self, lock_mgr, machine):
- """Get where is the machine from.
-
- Returns:
- The location of the machine: local or crosfleet
- """
- # We assume that lab machine always starts with chromeos*, and local
- # machines are ip address.
- if 'chromeos' in machine:
- if lock_mgr.CheckMachineInCrosfleet(machine):
- return 'crosfleet'
- else:
- raise RuntimeError('Lab machine not in Crosfleet.')
- return 'local'
-
- def _LockAllMachines(self, experiment):
- """Attempt to globally lock all of the machines requested for run.
-
- This method tries to lock all machines requested for this crosperf run
- in three different modes automatically, to prevent any other crosperf runs
- from being able to update/use the machines while this experiment is
- running:
- - Crosfleet machines: Use crosfleet lease-dut mechanism to lease
- - Local machines: Use file lock mechanism to lock
- """
- if test_flag.GetTestMode():
- self.locked_machines = self._GetMachineList()
- experiment.locked_machines = self.locked_machines
- else:
- experiment.lock_mgr = lock_machine.LockManager(
- self._GetMachineList(),
- '',
- experiment.labels[0].chromeos_root,
- experiment.locks_dir,
- log=self.l,
- )
- for m in experiment.lock_mgr.machines:
- machine_type = self._GetMachineType(experiment.lock_mgr, m)
- if machine_type == 'local':
- experiment.lock_mgr.AddMachineToLocal(m)
- elif machine_type == 'crosfleet':
- experiment.lock_mgr.AddMachineToCrosfleet(m)
- machine_states = experiment.lock_mgr.GetMachineStates('lock')
- experiment.lock_mgr.CheckMachineLocks(machine_states, 'lock')
- self.locked_machines = experiment.lock_mgr.UpdateMachines(True)
- experiment.locked_machines = self.locked_machines
- self._UpdateMachineList(self.locked_machines)
- experiment.machine_manager.RemoveNonLockedMachines(self.locked_machines)
- if not self.locked_machines:
- raise RuntimeError('Unable to lock any machines.')
-
- def _ClearCacheEntries(self, experiment):
- for br in experiment.benchmark_runs:
- cache = ResultsCache()
- cache.Init(br.label.chromeos_image, br.label.chromeos_root,
- br.benchmark.test_name, br.iteration, br.test_args,
- br.profiler_args, br.machine_manager, br.machine,
- br.label.board, br.cache_conditions, br.logger(),
- br.log_level, br.label, br.share_cache, br.benchmark.suite,
- br.benchmark.show_all_results, br.benchmark.run_local,
- br.benchmark.cwp_dso)
- cache_dir = cache.GetCacheDirForWrite()
- if os.path.exists(cache_dir):
- self.l.LogOutput('Removing cache dir: %s' % cache_dir)
- shutil.rmtree(cache_dir)
-
- def _Run(self, experiment):
- try:
- # We should not lease machines if tests are launched via `crosfleet
- # create-test`. This is because leasing DUT in crosfleet will create a
- # no-op task on the DUT and new test created will be hanging there.
- # TODO(zhizhouy): Need to check whether machine is ready or not before
- # assigning a test to it.
- if not experiment.no_lock and not experiment.crosfleet:
- self._LockAllMachines(experiment)
- # Calculate all checksums of avaiable/locked machines, to ensure same
- # label has same machines for testing
- experiment.SetCheckSums(forceSameImage=True)
- if self._using_schedv2:
- schedv2 = Schedv2(experiment)
- experiment.set_schedv2(schedv2)
- if CacheConditions.FALSE in experiment.cache_conditions:
- self._ClearCacheEntries(experiment)
- status = ExperimentStatus(experiment)
- experiment.Run()
- last_status_time = 0
- last_status_string = ''
- try:
- if experiment.log_level != 'verbose':
- self.l.LogStartDots()
- while not experiment.IsComplete():
- if last_status_time + self.STATUS_TIME_DELAY < time.time():
- last_status_time = time.time()
- border = '=============================='
- if experiment.log_level == 'verbose':
- self.l.LogOutput(border)
- self.l.LogOutput(status.GetProgressString())
- self.l.LogOutput(status.GetStatusString())
- self.l.LogOutput(border)
+ """ExperimentRunner Class."""
+
+ STATUS_TIME_DELAY = 30
+ THREAD_MONITOR_DELAY = 2
+
+ SUCCEEDED = 0
+ HAS_FAILURE = 1
+ ALL_FAILED = 2
+
+ def __init__(
+ self,
+ experiment,
+ json_report,
+ using_schedv2=False,
+ log=None,
+ cmd_exec=None,
+ ):
+ self._experiment = experiment
+ self.l = log or logger.GetLogger(experiment.log_dir)
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(self.l)
+ self._terminated = False
+ self.json_report = json_report
+ self.locked_machines = []
+ if experiment.log_level != "verbose":
+ self.STATUS_TIME_DELAY = 10
+
+ # Setting this to True will use crosperf sched v2 (feature in progress).
+ self._using_schedv2 = using_schedv2
+
+ def _GetMachineList(self):
+ """Return a list of all requested machines.
+
+ Create a list of all the requested machines, both global requests and
+ label-specific requests, and return the list.
+ """
+ machines = self._experiment.remote
+ # All Label.remote is a sublist of experiment.remote.
+ for l in self._experiment.labels:
+ for r in l.remote:
+ assert r in machines
+ return machines
+
+ def _UpdateMachineList(self, locked_machines):
+ """Update machines lists to contain only locked machines.
+
+ Go through all the lists of requested machines, both global and
+ label-specific requests, and remove any machine that we were not
+ able to lock.
+
+ Args:
+ locked_machines: A list of the machines we successfully locked.
+ """
+ for m in self._experiment.remote:
+ if m not in locked_machines:
+ self._experiment.remote.remove(m)
+
+ for l in self._experiment.labels:
+ for m in l.remote:
+ if m not in locked_machines:
+ l.remote.remove(m)
+
+ def _GetMachineType(self, lock_mgr, machine):
+ """Get where is the machine from.
+
+ Returns:
+ The location of the machine: local or crosfleet
+ """
+ # We assume that lab machine always starts with chromeos*, and local
+ # machines are ip address.
+ if "chromeos" in machine:
+ if lock_mgr.CheckMachineInCrosfleet(machine):
+ return "crosfleet"
else:
- current_status_string = status.GetStatusString()
- if current_status_string != last_status_string:
- self.l.LogEndDots()
- self.l.LogOutput(border)
- self.l.LogOutput(current_status_string)
- self.l.LogOutput(border)
- last_status_string = current_status_string
- else:
- self.l.LogAppendDot()
- time.sleep(self.THREAD_MONITOR_DELAY)
- except KeyboardInterrupt:
- self._terminated = True
- self.l.LogError('Ctrl-c pressed. Cleaning up...')
- experiment.Terminate()
- raise
- except SystemExit:
- self._terminated = True
- self.l.LogError('Unexpected exit. Cleaning up...')
- experiment.Terminate()
- raise
- finally:
- experiment.Cleanup()
-
- def _PrintTable(self, experiment):
- self.l.LogOutput(TextResultsReport.FromExperiment(experiment).GetReport())
-
- def _Email(self, experiment):
- # Only email by default if a new run was completed.
- send_mail = False
- for benchmark_run in experiment.benchmark_runs:
- if not benchmark_run.cache_hit:
- send_mail = True
- break
- if (not send_mail and not experiment.email_to
- or config.GetConfig('no_email')):
- return
-
- label_names = []
- for label in experiment.labels:
- label_names.append(label.name)
- subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
-
- text_report = TextResultsReport.FromExperiment(experiment,
- True).GetReport()
- text_report += ('\nResults are stored in %s.\n' %
- experiment.results_directory)
- text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
- html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
- attachment = EmailSender.Attachment('report.html', html_report)
- email_to = experiment.email_to or []
- email_to.append(getpass.getuser())
- EmailSender().SendEmail(email_to,
- subject,
- text_report,
- attachments=[attachment],
- msg_type='html')
-
- def _StoreResults(self, experiment):
- if self._terminated:
- return self.ALL_FAILED
-
- results_directory = experiment.results_directory
- FileUtils().RmDir(results_directory)
- FileUtils().MkDirP(results_directory)
- self.l.LogOutput('Storing experiment file in %s.' % results_directory)
- experiment_file_path = os.path.join(results_directory, 'experiment.exp')
- FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)
-
- has_failure = False
- all_failed = True
-
- topstats_file = os.path.join(results_directory, 'topstats.log')
- self.l.LogOutput('Storing top statistics of each benchmark run into %s.' %
- topstats_file)
- with open(topstats_file, 'w') as top_fd:
- for benchmark_run in experiment.benchmark_runs:
- if benchmark_run.result:
- # FIXME: Pylint has a bug suggesting the following change, which
- # should be fixed in pylint 2.0. Resolve this after pylint >= 2.0.
- # Bug: https://github.com/PyCQA/pylint/issues/1984
- # pylint: disable=simplifiable-if-statement
- if benchmark_run.result.retval:
- has_failure = True
- else:
- all_failed = False
- # Header with benchmark run name.
- top_fd.write('%s\n' % str(benchmark_run))
- # Formatted string with top statistics.
- top_fd.write(benchmark_run.result.FormatStringTopCommands())
- top_fd.write('\n\n')
-
- if all_failed:
- return self.ALL_FAILED
-
- self.l.LogOutput('Storing results of each benchmark run.')
- for benchmark_run in experiment.benchmark_runs:
- if benchmark_run.result:
- benchmark_run_name = ''.join(ch for ch in benchmark_run.name
- if ch.isalnum())
- benchmark_run_path = os.path.join(results_directory,
- benchmark_run_name)
- if experiment.compress_results:
- benchmark_run.result.CompressResultsTo(benchmark_run_path)
+ raise RuntimeError("Lab machine not in Crosfleet.")
+ return "local"
+
+ def _LockAllMachines(self, experiment):
+ """Attempt to globally lock all of the machines requested for run.
+
+ This method tries to lock all machines requested for this crosperf run
+ in three different modes automatically, to prevent any other crosperf runs
+ from being able to update/use the machines while this experiment is
+ running:
+ - Crosfleet machines: Use crosfleet lease-dut mechanism to lease
+ - Local machines: Use file lock mechanism to lock
+ """
+ if test_flag.GetTestMode():
+ self.locked_machines = self._GetMachineList()
+ experiment.locked_machines = self.locked_machines
else:
- benchmark_run.result.CopyResultsTo(benchmark_run_path)
- benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
-
- self.l.LogOutput('Storing results report in %s.' % results_directory)
- results_table_path = os.path.join(results_directory, 'results.html')
- report = HTMLResultsReport.FromExperiment(experiment).GetReport()
- if self.json_report:
- json_report = JSONResultsReport.FromExperiment(experiment,
- json_args={'indent': 2})
- _WriteJSONReportToFile(experiment, results_directory, json_report)
-
- FileUtils().WriteFile(results_table_path, report)
-
- self.l.LogOutput('Storing email message body in %s.' % results_directory)
- msg_file_path = os.path.join(results_directory, 'msg_body.html')
- text_report = TextResultsReport.FromExperiment(experiment,
- True).GetReport()
- text_report += ('\nResults are stored in %s.\n' %
- experiment.results_directory)
- msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
- FileUtils().WriteFile(msg_file_path, msg_body)
-
- return self.SUCCEEDED if not has_failure else self.HAS_FAILURE
-
- def Run(self):
- try:
- self._Run(self._experiment)
- finally:
- # Always print the report at the end of the run.
- self._PrintTable(self._experiment)
- ret = self._StoreResults(self._experiment)
- if ret != self.ALL_FAILED:
- self._Email(self._experiment)
- return ret
+ experiment.lock_mgr = lock_machine.LockManager(
+ self._GetMachineList(),
+ "",
+ experiment.labels[0].chromeos_root,
+ experiment.locks_dir,
+ log=self.l,
+ )
+ for m in experiment.lock_mgr.machines:
+ machine_type = self._GetMachineType(experiment.lock_mgr, m)
+ if machine_type == "local":
+ experiment.lock_mgr.AddMachineToLocal(m)
+ elif machine_type == "crosfleet":
+ experiment.lock_mgr.AddMachineToCrosfleet(m)
+ machine_states = experiment.lock_mgr.GetMachineStates("lock")
+ experiment.lock_mgr.CheckMachineLocks(machine_states, "lock")
+ self.locked_machines = experiment.lock_mgr.UpdateMachines(True)
+ experiment.locked_machines = self.locked_machines
+ self._UpdateMachineList(self.locked_machines)
+ experiment.machine_manager.RemoveNonLockedMachines(
+ self.locked_machines
+ )
+ if not self.locked_machines:
+ raise RuntimeError("Unable to lock any machines.")
+
+ def _ClearCacheEntries(self, experiment):
+ for br in experiment.benchmark_runs:
+ cache = ResultsCache()
+ cache.Init(
+ br.label.chromeos_image,
+ br.label.chromeos_root,
+ br.benchmark.test_name,
+ br.iteration,
+ br.test_args,
+ br.profiler_args,
+ br.machine_manager,
+ br.machine,
+ br.label.board,
+ br.cache_conditions,
+ br.logger(),
+ br.log_level,
+ br.label,
+ br.share_cache,
+ br.benchmark.suite,
+ br.benchmark.show_all_results,
+ br.benchmark.run_local,
+ br.benchmark.cwp_dso,
+ )
+ cache_dir = cache.GetCacheDirForWrite()
+ if os.path.exists(cache_dir):
+ self.l.LogOutput("Removing cache dir: %s" % cache_dir)
+ shutil.rmtree(cache_dir)
+
+ def _Run(self, experiment):
+ try:
+ # We should not lease machines if tests are launched via `crosfleet
+ # create-test`. This is because leasing DUT in crosfleet will create a
+ # no-op task on the DUT and new test created will be hanging there.
+ # TODO(zhizhouy): Need to check whether machine is ready or not before
+ # assigning a test to it.
+ if not experiment.no_lock and not experiment.crosfleet:
+ self._LockAllMachines(experiment)
+ # Calculate all checksums of avaiable/locked machines, to ensure same
+ # label has same machines for testing
+ experiment.SetCheckSums(forceSameImage=True)
+ if self._using_schedv2:
+ schedv2 = Schedv2(experiment)
+ experiment.set_schedv2(schedv2)
+ if CacheConditions.FALSE in experiment.cache_conditions:
+ self._ClearCacheEntries(experiment)
+ status = ExperimentStatus(experiment)
+ experiment.Run()
+ last_status_time = 0
+ last_status_string = ""
+ try:
+ if experiment.log_level != "verbose":
+ self.l.LogStartDots()
+ while not experiment.IsComplete():
+ if last_status_time + self.STATUS_TIME_DELAY < time.time():
+ last_status_time = time.time()
+ border = "=============================="
+ if experiment.log_level == "verbose":
+ self.l.LogOutput(border)
+ self.l.LogOutput(status.GetProgressString())
+ self.l.LogOutput(status.GetStatusString())
+ self.l.LogOutput(border)
+ else:
+ current_status_string = status.GetStatusString()
+ if current_status_string != last_status_string:
+ self.l.LogEndDots()
+ self.l.LogOutput(border)
+ self.l.LogOutput(current_status_string)
+ self.l.LogOutput(border)
+ last_status_string = current_status_string
+ else:
+ self.l.LogAppendDot()
+ time.sleep(self.THREAD_MONITOR_DELAY)
+ except KeyboardInterrupt:
+ self._terminated = True
+ self.l.LogError("Ctrl-c pressed. Cleaning up...")
+ experiment.Terminate()
+ raise
+ except SystemExit:
+ self._terminated = True
+ self.l.LogError("Unexpected exit. Cleaning up...")
+ experiment.Terminate()
+ raise
+ finally:
+ experiment.Cleanup()
+
+ def _PrintTable(self, experiment):
+ self.l.LogOutput(
+ TextResultsReport.FromExperiment(experiment).GetReport()
+ )
+
+ def _Email(self, experiment):
+ # Only email by default if a new run was completed.
+ send_mail = False
+ for benchmark_run in experiment.benchmark_runs:
+ if not benchmark_run.cache_hit:
+ send_mail = True
+ break
+ if (
+ not send_mail
+ and not experiment.email_to
+ or config.GetConfig("no_email")
+ ):
+ return
+
+ label_names = []
+ for label in experiment.labels:
+ label_names.append(label.name)
+ subject = "%s: %s" % (experiment.name, " vs. ".join(label_names))
+
+ text_report = TextResultsReport.FromExperiment(
+ experiment, True
+ ).GetReport()
+ text_report += (
+ "\nResults are stored in %s.\n" % experiment.results_directory
+ )
+ text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
+ html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
+ attachment = EmailSender.Attachment("report.html", html_report)
+ email_to = experiment.email_to or []
+ email_to.append(getpass.getuser())
+ EmailSender().SendEmail(
+ email_to,
+ subject,
+ text_report,
+ attachments=[attachment],
+ msg_type="html",
+ )
+
+ def _StoreResults(self, experiment):
+ if self._terminated:
+ return self.ALL_FAILED
+
+ results_directory = experiment.results_directory
+ FileUtils().RmDir(results_directory)
+ FileUtils().MkDirP(results_directory)
+ self.l.LogOutput("Storing experiment file in %s." % results_directory)
+ experiment_file_path = os.path.join(results_directory, "experiment.exp")
+ FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)
+
+ has_failure = False
+ all_failed = True
+
+ topstats_file = os.path.join(results_directory, "topstats.log")
+ self.l.LogOutput(
+ "Storing top statistics of each benchmark run into %s."
+ % topstats_file
+ )
+ with open(topstats_file, "w") as top_fd:
+ for benchmark_run in experiment.benchmark_runs:
+ if benchmark_run.result:
+ # FIXME: Pylint has a bug suggesting the following change, which
+ # should be fixed in pylint 2.0. Resolve this after pylint >= 2.0.
+ # Bug: https://github.com/PyCQA/pylint/issues/1984
+ # pylint: disable=simplifiable-if-statement
+ if benchmark_run.result.retval:
+ has_failure = True
+ else:
+ all_failed = False
+ # Header with benchmark run name.
+ top_fd.write("%s\n" % str(benchmark_run))
+ # Formatted string with top statistics.
+ top_fd.write(benchmark_run.result.FormatStringTopCommands())
+ top_fd.write("\n\n")
+
+ if all_failed:
+ return self.ALL_FAILED
+
+ self.l.LogOutput("Storing results of each benchmark run.")
+ for benchmark_run in experiment.benchmark_runs:
+ if benchmark_run.result:
+ benchmark_run_name = "".join(
+ ch for ch in benchmark_run.name if ch.isalnum()
+ )
+ benchmark_run_path = os.path.join(
+ results_directory, benchmark_run_name
+ )
+ if experiment.compress_results:
+ benchmark_run.result.CompressResultsTo(benchmark_run_path)
+ else:
+ benchmark_run.result.CopyResultsTo(benchmark_run_path)
+ benchmark_run.result.CleanUp(
+ benchmark_run.benchmark.rm_chroot_tmp
+ )
+
+ self.l.LogOutput("Storing results report in %s." % results_directory)
+ results_table_path = os.path.join(results_directory, "results.html")
+ report = HTMLResultsReport.FromExperiment(experiment).GetReport()
+ if self.json_report:
+ json_report = JSONResultsReport.FromExperiment(
+ experiment, json_args={"indent": 2}
+ )
+ _WriteJSONReportToFile(experiment, results_directory, json_report)
+
+ FileUtils().WriteFile(results_table_path, report)
+
+ self.l.LogOutput(
+ "Storing email message body in %s." % results_directory
+ )
+ msg_file_path = os.path.join(results_directory, "msg_body.html")
+ text_report = TextResultsReport.FromExperiment(
+ experiment, True
+ ).GetReport()
+ text_report += (
+ "\nResults are stored in %s.\n" % experiment.results_directory
+ )
+ msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
+ FileUtils().WriteFile(msg_file_path, msg_body)
+
+ return self.SUCCEEDED if not has_failure else self.HAS_FAILURE
+
+ def Run(self):
+ try:
+ self._Run(self._experiment)
+ finally:
+ # Always print the report at the end of the run.
+ self._PrintTable(self._experiment)
+ ret = self._StoreResults(self._experiment)
+ if ret != self.ALL_FAILED:
+ self._Email(self._experiment)
+ return ret
class MockExperimentRunner(ExperimentRunner):
- """Mocked ExperimentRunner for testing."""
+ """Mocked ExperimentRunner for testing."""
- def __init__(self, experiment, json_report):
- super(MockExperimentRunner, self).__init__(experiment, json_report)
+ def __init__(self, experiment, json_report):
+ super(MockExperimentRunner, self).__init__(experiment, json_report)
- def _Run(self, experiment):
- self.l.LogOutput("Would run the following experiment: '%s'." %
- experiment.name)
+ def _Run(self, experiment):
+ self.l.LogOutput(
+ "Would run the following experiment: '%s'." % experiment.name
+ )
- def _PrintTable(self, experiment):
- self.l.LogOutput('Would print the experiment table.')
+ def _PrintTable(self, experiment):
+ self.l.LogOutput("Would print the experiment table.")
- def _Email(self, experiment):
- self.l.LogOutput('Would send result email.')
+ def _Email(self, experiment):
+ self.l.LogOutput("Would send result email.")
- def _StoreResults(self, experiment):
- self.l.LogOutput('Would store the results.')
+ def _StoreResults(self, experiment):
+ self.l.LogOutput("Would store the results.")
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index 31d02e71..a9a12630 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -1,37 +1,35 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the experiment runner module."""
-from __future__ import print_function
import getpass
import io
import os
import time
-
import unittest
import unittest.mock as mock
+from cros_utils import command_executer
+from cros_utils.email_sender import EmailSender
+from cros_utils.file_utils import FileUtils
+from experiment_factory import ExperimentFactory
+from experiment_file import ExperimentFile
import experiment_runner
import experiment_status
import machine_manager
-import config
-import test_flag
-
-from experiment_factory import ExperimentFactory
-from experiment_file import ExperimentFile
from results_cache import Result
from results_report import HTMLResultsReport
from results_report import TextResultsReport
+import test_flag
+
+import config
-from cros_utils import command_executer
-from cros_utils.email_sender import EmailSender
-from cros_utils.file_utils import FileUtils
EXPERIMENT_FILE_1 = """
board: parrot
@@ -57,445 +55,513 @@ EXPERIMENT_FILE_1 = """
class FakeLogger(object):
- """Fake logger for tests."""
-
- def __init__(self):
- self.LogOutputCount = 0
- self.LogErrorCount = 0
- self.output_msgs = []
- self.error_msgs = []
- self.dot_count = 0
- self.LogStartDotsCount = 0
- self.LogEndDotsCount = 0
- self.LogAppendDotCount = 0
-
- def LogOutput(self, msg):
- self.LogOutputCount += 1
- self.output_msgs.append(msg)
-
- def LogError(self, msg):
- self.LogErrorCount += 1
- self.error_msgs.append(msg)
-
- def LogStartDots(self):
- self.LogStartDotsCount += 1
- self.dot_count += 1
-
- def LogAppendDot(self):
- self.LogAppendDotCount += 1
- self.dot_count += 1
-
- def LogEndDots(self):
- self.LogEndDotsCount += 1
-
- def Reset(self):
- self.LogOutputCount = 0
- self.LogErrorCount = 0
- self.output_msgs = []
- self.error_msgs = []
- self.dot_count = 0
- self.LogStartDotsCount = 0
- self.LogEndDotsCount = 0
- self.LogAppendDotCount = 0
+ """Fake logger for tests."""
+
+ def __init__(self):
+ self.LogOutputCount = 0
+ self.LogErrorCount = 0
+ self.output_msgs = []
+ self.error_msgs = []
+ self.dot_count = 0
+ self.LogStartDotsCount = 0
+ self.LogEndDotsCount = 0
+ self.LogAppendDotCount = 0
+
+ def LogOutput(self, msg):
+ self.LogOutputCount += 1
+ self.output_msgs.append(msg)
+
+ def LogError(self, msg):
+ self.LogErrorCount += 1
+ self.error_msgs.append(msg)
+
+ def LogStartDots(self):
+ self.LogStartDotsCount += 1
+ self.dot_count += 1
+
+ def LogAppendDot(self):
+ self.LogAppendDotCount += 1
+ self.dot_count += 1
+
+ def LogEndDots(self):
+ self.LogEndDotsCount += 1
+
+ def Reset(self):
+ self.LogOutputCount = 0
+ self.LogErrorCount = 0
+ self.output_msgs = []
+ self.error_msgs = []
+ self.dot_count = 0
+ self.LogStartDotsCount = 0
+ self.LogEndDotsCount = 0
+ self.LogAppendDotCount = 0
class ExperimentRunnerTest(unittest.TestCase):
- """Test for experiment runner class."""
-
- run_count = 0
- is_complete_count = 0
- mock_logger = FakeLogger()
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
-
- def make_fake_experiment(self):
- test_flag.SetTestMode(True)
- experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
- experiment = ExperimentFactory().GetExperiment(
- experiment_file, working_directory='', log_dir='')
- return experiment
-
- @mock.patch.object(machine_manager.MachineManager, 'AddMachine')
- @mock.patch.object(os.path, 'isfile')
-
- # pylint: disable=arguments-differ
- def setUp(self, mock_isfile, _mock_addmachine):
- mock_isfile.return_value = True
- self.exp = self.make_fake_experiment()
-
- def test_init(self):
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- self.assertFalse(er._terminated)
- self.assertEqual(er.STATUS_TIME_DELAY, 10)
-
- self.exp.log_level = 'verbose'
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- self.assertEqual(er.STATUS_TIME_DELAY, 30)
-
- @mock.patch.object(time, 'time')
- @mock.patch.object(time, 'sleep')
- @mock.patch.object(experiment_status.ExperimentStatus, 'GetStatusString')
- @mock.patch.object(experiment_status.ExperimentStatus, 'GetProgressString')
- def test_run(self, mock_progress_string, mock_status_string, mock_sleep,
- mock_time):
-
- self.run_count = 0
- self.is_complete_count = 0
- mock_sleep.return_value = None
- # pylint: disable=range-builtin-not-iterating
- mock_time.side_effect = range(1, 50, 1)
-
- def reset():
- self.run_count = 0
- self.is_complete_count = 0
-
- def FakeRun():
- self.run_count += 1
- return 0
-
- def FakeIsComplete():
- self.is_complete_count += 1
- if self.is_complete_count < 6:
- return False
- else:
- return True
-
- self.mock_logger.Reset()
- self.exp.Run = FakeRun
- self.exp.IsComplete = FakeIsComplete
-
- # Test 1: log_level == "quiet"
- self.exp.log_level = 'quiet'
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = 'Fake status string'
- er._Run(self.exp)
- self.assertEqual(self.run_count, 1)
- self.assertTrue(self.is_complete_count > 0)
- self.assertEqual(self.mock_logger.LogStartDotsCount, 1)
- self.assertEqual(self.mock_logger.LogAppendDotCount, 1)
- self.assertEqual(self.mock_logger.LogEndDotsCount, 1)
- self.assertEqual(self.mock_logger.dot_count, 2)
- self.assertEqual(mock_progress_string.call_count, 0)
- self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs, [
- '==============================', 'Fake status string',
- '=============================='
- ])
- self.assertEqual(len(self.mock_logger.error_msgs), 0)
-
- # Test 2: log_level == "average"
- self.mock_logger.Reset()
- reset()
- self.exp.log_level = 'average'
- mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = 'Fake status string'
- er._Run(self.exp)
- self.assertEqual(self.run_count, 1)
- self.assertTrue(self.is_complete_count > 0)
- self.assertEqual(self.mock_logger.LogStartDotsCount, 1)
- self.assertEqual(self.mock_logger.LogAppendDotCount, 1)
- self.assertEqual(self.mock_logger.LogEndDotsCount, 1)
- self.assertEqual(self.mock_logger.dot_count, 2)
- self.assertEqual(mock_progress_string.call_count, 0)
- self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs, [
- '==============================', 'Fake status string',
- '=============================='
- ])
- self.assertEqual(len(self.mock_logger.error_msgs), 0)
-
- # Test 3: log_level == "verbose"
- self.mock_logger.Reset()
- reset()
- self.exp.log_level = 'verbose'
- mock_status_string.call_count = 0
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- er.STATUS_TIME_DELAY = 2
- mock_status_string.return_value = 'Fake status string'
- mock_progress_string.return_value = 'Fake progress string'
- er._Run(self.exp)
- self.assertEqual(self.run_count, 1)
- self.assertTrue(self.is_complete_count > 0)
- self.assertEqual(self.mock_logger.LogStartDotsCount, 0)
- self.assertEqual(self.mock_logger.LogAppendDotCount, 0)
- self.assertEqual(self.mock_logger.LogEndDotsCount, 0)
- self.assertEqual(self.mock_logger.dot_count, 0)
- self.assertEqual(mock_progress_string.call_count, 2)
- self.assertEqual(mock_status_string.call_count, 2)
- self.assertEqual(self.mock_logger.output_msgs, [
- '==============================', 'Fake progress string',
- 'Fake status string', '==============================',
- '==============================', 'Fake progress string',
- 'Fake status string', '=============================='
- ])
- self.assertEqual(len(self.mock_logger.error_msgs), 0)
-
- @mock.patch.object(TextResultsReport, 'GetReport')
- def test_print_table(self, mock_report):
- self.mock_logger.Reset()
- mock_report.return_value = 'This is a fake experiment report.'
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- er._PrintTable(self.exp)
- self.assertEqual(mock_report.call_count, 1)
- self.assertEqual(self.mock_logger.output_msgs,
- ['This is a fake experiment report.'])
-
- @mock.patch.object(HTMLResultsReport, 'GetReport')
- @mock.patch.object(TextResultsReport, 'GetReport')
- @mock.patch.object(EmailSender, 'Attachment')
- @mock.patch.object(EmailSender, 'SendEmail')
- @mock.patch.object(getpass, 'getuser')
- def test_email(self, mock_getuser, mock_emailer, mock_attachment,
- mock_text_report, mock_html_report):
-
- mock_getuser.return_value = 'john.smith@google.com'
- mock_text_report.return_value = 'This is a fake text report.'
- mock_html_report.return_value = 'This is a fake html report.'
-
- self.mock_logger.Reset()
- config.AddConfig('no_email', True)
- self.exp.email_to = ['jane.doe@google.com']
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
- # Test 1. Config:no_email; exp.email_to set ==> no email sent
- er._Email(self.exp)
- self.assertEqual(mock_getuser.call_count, 0)
- self.assertEqual(mock_emailer.call_count, 0)
- self.assertEqual(mock_attachment.call_count, 0)
- self.assertEqual(mock_text_report.call_count, 0)
- self.assertEqual(mock_html_report.call_count, 0)
-
- # Test 2. Config: email. exp.email_to set; cache hit. => send email
- self.mock_logger.Reset()
- config.AddConfig('no_email', False)
- for r in self.exp.benchmark_runs:
- r.cache_hit = True
- er._Email(self.exp)
- self.assertEqual(mock_getuser.call_count, 1)
- self.assertEqual(mock_emailer.call_count, 1)
- self.assertEqual(mock_attachment.call_count, 1)
- self.assertEqual(mock_text_report.call_count, 1)
- self.assertEqual(mock_html_report.call_count, 1)
- self.assertEqual(len(mock_emailer.call_args), 2)
- self.assertEqual(mock_emailer.call_args[0],
- (['jane.doe@google.com', 'john.smith@google.com'
- ], ': image1 vs. image2',
- "<pre style='font-size: 13px'>This is a fake text "
- 'report.\nResults are stored in _results.\n</pre>'))
- self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
- self.assertEqual(len(mock_emailer.call_args[1]), 2)
- self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
- self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html')
-
- mock_attachment.assert_called_with('report.html',
- 'This is a fake html report.')
-
- # Test 3. Config: email; exp.mail_to set; no cache hit. => send email
- self.mock_logger.Reset()
- mock_getuser.reset_mock()
- mock_emailer.reset_mock()
- mock_attachment.reset_mock()
- mock_text_report.reset_mock()
- mock_html_report.reset_mock()
- config.AddConfig('no_email', False)
- for r in self.exp.benchmark_runs:
- r.cache_hit = False
- er._Email(self.exp)
- self.assertEqual(mock_getuser.call_count, 1)
- self.assertEqual(mock_emailer.call_count, 1)
- self.assertEqual(mock_attachment.call_count, 1)
- self.assertEqual(mock_text_report.call_count, 1)
- self.assertEqual(mock_html_report.call_count, 1)
- self.assertEqual(len(mock_emailer.call_args), 2)
- self.assertEqual(mock_emailer.call_args[0],
- ([
- 'jane.doe@google.com', 'john.smith@google.com',
- 'john.smith@google.com'
- ], ': image1 vs. image2',
- "<pre style='font-size: 13px'>This is a fake text "
- 'report.\nResults are stored in _results.\n</pre>'))
- self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
- self.assertEqual(len(mock_emailer.call_args[1]), 2)
- self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
- self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html')
-
- mock_attachment.assert_called_with('report.html',
- 'This is a fake html report.')
-
- # Test 4. Config: email; exp.mail_to = None; no cache hit. => send email
- self.mock_logger.Reset()
- mock_getuser.reset_mock()
- mock_emailer.reset_mock()
- mock_attachment.reset_mock()
- mock_text_report.reset_mock()
- mock_html_report.reset_mock()
- self.exp.email_to = []
- er._Email(self.exp)
- self.assertEqual(mock_getuser.call_count, 1)
- self.assertEqual(mock_emailer.call_count, 1)
- self.assertEqual(mock_attachment.call_count, 1)
- self.assertEqual(mock_text_report.call_count, 1)
- self.assertEqual(mock_html_report.call_count, 1)
- self.assertEqual(len(mock_emailer.call_args), 2)
- self.assertEqual(mock_emailer.call_args[0],
- (['john.smith@google.com'], ': image1 vs. image2',
- "<pre style='font-size: 13px'>This is a fake text "
- 'report.\nResults are stored in _results.\n</pre>'))
- self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
- self.assertEqual(len(mock_emailer.call_args[1]), 2)
- self.assertTrue('attachments' in mock_emailer.call_args[1].keys())
- self.assertEqual(mock_emailer.call_args[1]['msg_type'], 'html')
-
- mock_attachment.assert_called_with('report.html',
- 'This is a fake html report.')
-
- # Test 5. Config: email; exp.mail_to = None; cache hit => no email sent
- self.mock_logger.Reset()
- mock_getuser.reset_mock()
- mock_emailer.reset_mock()
- mock_attachment.reset_mock()
- mock_text_report.reset_mock()
- mock_html_report.reset_mock()
- for r in self.exp.benchmark_runs:
- r.cache_hit = True
- er._Email(self.exp)
- self.assertEqual(mock_getuser.call_count, 0)
- self.assertEqual(mock_emailer.call_count, 0)
- self.assertEqual(mock_attachment.call_count, 0)
- self.assertEqual(mock_text_report.call_count, 0)
- self.assertEqual(mock_html_report.call_count, 0)
-
- @mock.patch.object(FileUtils, 'RmDir')
- @mock.patch.object(FileUtils, 'MkDirP')
- @mock.patch.object(FileUtils, 'WriteFile')
- @mock.patch.object(HTMLResultsReport, 'FromExperiment')
- @mock.patch.object(TextResultsReport, 'FromExperiment')
- @mock.patch.object(Result, 'CompressResultsTo')
- @mock.patch.object(Result, 'CopyResultsTo')
- @mock.patch.object(Result, 'CleanUp')
- @mock.patch.object(Result, 'FormatStringTopCommands')
- @mock.patch('builtins.open', new_callable=mock.mock_open)
- def test_store_results(self, mock_open, mock_top_commands, mock_cleanup,
- mock_copy, mock_compress, _mock_text_report,
- mock_report, mock_writefile, mock_mkdir, mock_rmdir):
-
- self.mock_logger.Reset()
- self.exp.results_directory = '/usr/local/crosperf-results'
- bench_run = self.exp.benchmark_runs[5]
- bench_path = '/usr/local/crosperf-results/' + ''.join(
- ch for ch in bench_run.name if ch.isalnum())
- self.assertEqual(len(self.exp.benchmark_runs), 6)
-
- er = experiment_runner.ExperimentRunner(
- self.exp,
- json_report=False,
- using_schedv2=False,
- log=self.mock_logger,
- cmd_exec=self.mock_cmd_exec)
-
- # Test 1. Make sure nothing is done if _terminated is true.
- er._terminated = True
- er._StoreResults(self.exp)
- self.assertEqual(mock_cleanup.call_count, 0)
- self.assertEqual(mock_copy.call_count, 0)
- self.assertEqual(mock_compress.call_count, 0)
- self.assertEqual(mock_report.call_count, 0)
- self.assertEqual(mock_writefile.call_count, 0)
- self.assertEqual(mock_mkdir.call_count, 0)
- self.assertEqual(mock_rmdir.call_count, 0)
- self.assertEqual(self.mock_logger.LogOutputCount, 0)
- self.assertEqual(mock_open.call_count, 0)
- self.assertEqual(mock_top_commands.call_count, 0)
-
- # Test 2. _terminated is false; everything works properly.
- fake_result = Result(self.mock_logger, self.exp.labels[0], 'average',
- 'daisy1')
- for r in self.exp.benchmark_runs:
- r.result = fake_result
- er._terminated = False
- self.exp.compress_results = False
- er._StoreResults(self.exp)
- self.assertEqual(mock_cleanup.call_count, 6)
- mock_cleanup.assert_called_with(bench_run.benchmark.rm_chroot_tmp)
- self.assertEqual(mock_copy.call_count, 6)
- mock_copy.assert_called_with(bench_path)
- self.assertEqual(mock_writefile.call_count, 3)
- self.assertEqual(len(mock_writefile.call_args_list), 3)
- first_args = mock_writefile.call_args_list[0]
- second_args = mock_writefile.call_args_list[1]
- self.assertEqual(first_args[0][0],
- '/usr/local/crosperf-results/experiment.exp')
- self.assertEqual(second_args[0][0],
- '/usr/local/crosperf-results/results.html')
- self.assertEqual(mock_mkdir.call_count, 1)
- mock_mkdir.assert_called_with('/usr/local/crosperf-results')
- self.assertEqual(mock_rmdir.call_count, 1)
- mock_rmdir.assert_called_with('/usr/local/crosperf-results')
- self.assertEqual(self.mock_logger.LogOutputCount, 5)
- self.assertEqual(self.mock_logger.output_msgs, [
- 'Storing experiment file in /usr/local/crosperf-results.',
- 'Storing top statistics of each benchmark run into'
- ' /usr/local/crosperf-results/topstats.log.',
- 'Storing results of each benchmark run.',
- 'Storing results report in /usr/local/crosperf-results.',
- 'Storing email message body in /usr/local/crosperf-results.',
- ])
- self.assertEqual(mock_open.call_count, 1)
- # Check write to a topstats.log file.
- mock_open.assert_called_with('/usr/local/crosperf-results/topstats.log',
- 'w')
- mock_open().write.assert_called()
-
- # Check top calls with no arguments.
- topcalls = [mock.call()] * 6
- self.assertEqual(mock_top_commands.call_args_list, topcalls)
-
- # Test 3. Test compress_results.
- self.exp.compress_results = True
- mock_copy.call_count = 0
- mock_compress.call_count = 0
- er._StoreResults(self.exp)
- self.assertEqual(mock_copy.call_count, 0)
- mock_copy.assert_called_with(bench_path)
- self.assertEqual(mock_compress.call_count, 6)
- mock_compress.assert_called_with(bench_path)
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Test for experiment runner class."""
+
+ run_count = 0
+ is_complete_count = 0
+ mock_logger = FakeLogger()
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+
+ def make_fake_experiment(self):
+ test_flag.SetTestMode(True)
+ experiment_file = ExperimentFile(io.StringIO(EXPERIMENT_FILE_1))
+ experiment = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory="", log_dir=""
+ )
+ return experiment
+
+ @mock.patch.object(machine_manager.MachineManager, "AddMachine")
+ @mock.patch.object(os.path, "isfile")
+
+ # pylint: disable=arguments-differ
+ def setUp(self, mock_isfile, _mock_addmachine):
+ mock_isfile.return_value = True
+ self.exp = self.make_fake_experiment()
+
+ def test_init(self):
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ self.assertFalse(er._terminated)
+ self.assertEqual(er.STATUS_TIME_DELAY, 10)
+
+ self.exp.log_level = "verbose"
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ self.assertEqual(er.STATUS_TIME_DELAY, 30)
+
+ @mock.patch.object(time, "time")
+ @mock.patch.object(time, "sleep")
+ @mock.patch.object(experiment_status.ExperimentStatus, "GetStatusString")
+ @mock.patch.object(experiment_status.ExperimentStatus, "GetProgressString")
+ def test_run(
+ self, mock_progress_string, mock_status_string, mock_sleep, mock_time
+ ):
+
+ self.run_count = 0
+ self.is_complete_count = 0
+ mock_sleep.return_value = None
+ # pylint: disable=range-builtin-not-iterating
+ mock_time.side_effect = range(1, 50, 1)
+
+ def reset():
+ self.run_count = 0
+ self.is_complete_count = 0
+
+ def FakeRun():
+ self.run_count += 1
+ return 0
+
+ def FakeIsComplete():
+ self.is_complete_count += 1
+ if self.is_complete_count < 6:
+ return False
+ else:
+ return True
+
+ self.mock_logger.Reset()
+ self.exp.Run = FakeRun
+ self.exp.IsComplete = FakeIsComplete
+
+ # Test 1: log_level == "quiet"
+ self.exp.log_level = "quiet"
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ er.STATUS_TIME_DELAY = 2
+ mock_status_string.return_value = "Fake status string"
+ er._Run(self.exp)
+ self.assertEqual(self.run_count, 1)
+ self.assertTrue(self.is_complete_count > 0)
+ self.assertEqual(self.mock_logger.LogStartDotsCount, 1)
+ self.assertEqual(self.mock_logger.LogAppendDotCount, 1)
+ self.assertEqual(self.mock_logger.LogEndDotsCount, 1)
+ self.assertEqual(self.mock_logger.dot_count, 2)
+ self.assertEqual(mock_progress_string.call_count, 0)
+ self.assertEqual(mock_status_string.call_count, 2)
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ [
+ "==============================",
+ "Fake status string",
+ "==============================",
+ ],
+ )
+ self.assertEqual(len(self.mock_logger.error_msgs), 0)
+
+ # Test 2: log_level == "average"
+ self.mock_logger.Reset()
+ reset()
+ self.exp.log_level = "average"
+ mock_status_string.call_count = 0
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ er.STATUS_TIME_DELAY = 2
+ mock_status_string.return_value = "Fake status string"
+ er._Run(self.exp)
+ self.assertEqual(self.run_count, 1)
+ self.assertTrue(self.is_complete_count > 0)
+ self.assertEqual(self.mock_logger.LogStartDotsCount, 1)
+ self.assertEqual(self.mock_logger.LogAppendDotCount, 1)
+ self.assertEqual(self.mock_logger.LogEndDotsCount, 1)
+ self.assertEqual(self.mock_logger.dot_count, 2)
+ self.assertEqual(mock_progress_string.call_count, 0)
+ self.assertEqual(mock_status_string.call_count, 2)
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ [
+ "==============================",
+ "Fake status string",
+ "==============================",
+ ],
+ )
+ self.assertEqual(len(self.mock_logger.error_msgs), 0)
+
+ # Test 3: log_level == "verbose"
+ self.mock_logger.Reset()
+ reset()
+ self.exp.log_level = "verbose"
+ mock_status_string.call_count = 0
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ er.STATUS_TIME_DELAY = 2
+ mock_status_string.return_value = "Fake status string"
+ mock_progress_string.return_value = "Fake progress string"
+ er._Run(self.exp)
+ self.assertEqual(self.run_count, 1)
+ self.assertTrue(self.is_complete_count > 0)
+ self.assertEqual(self.mock_logger.LogStartDotsCount, 0)
+ self.assertEqual(self.mock_logger.LogAppendDotCount, 0)
+ self.assertEqual(self.mock_logger.LogEndDotsCount, 0)
+ self.assertEqual(self.mock_logger.dot_count, 0)
+ self.assertEqual(mock_progress_string.call_count, 2)
+ self.assertEqual(mock_status_string.call_count, 2)
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ [
+ "==============================",
+ "Fake progress string",
+ "Fake status string",
+ "==============================",
+ "==============================",
+ "Fake progress string",
+ "Fake status string",
+ "==============================",
+ ],
+ )
+ self.assertEqual(len(self.mock_logger.error_msgs), 0)
+
+ @mock.patch.object(TextResultsReport, "GetReport")
+ def test_print_table(self, mock_report):
+ self.mock_logger.Reset()
+ mock_report.return_value = "This is a fake experiment report."
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ er._PrintTable(self.exp)
+ self.assertEqual(mock_report.call_count, 1)
+ self.assertEqual(
+ self.mock_logger.output_msgs, ["This is a fake experiment report."]
+ )
+
+ @mock.patch.object(HTMLResultsReport, "GetReport")
+ @mock.patch.object(TextResultsReport, "GetReport")
+ @mock.patch.object(EmailSender, "Attachment")
+ @mock.patch.object(EmailSender, "SendEmail")
+ @mock.patch.object(getpass, "getuser")
+ def test_email(
+ self,
+ mock_getuser,
+ mock_emailer,
+ mock_attachment,
+ mock_text_report,
+ mock_html_report,
+ ):
+
+ mock_getuser.return_value = "john.smith@google.com"
+ mock_text_report.return_value = "This is a fake text report."
+ mock_html_report.return_value = "This is a fake html report."
+
+ self.mock_logger.Reset()
+ config.AddConfig("no_email", True)
+ self.exp.email_to = ["jane.doe@google.com"]
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+ # Test 1. Config:no_email; exp.email_to set ==> no email sent
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 0)
+ self.assertEqual(mock_emailer.call_count, 0)
+ self.assertEqual(mock_attachment.call_count, 0)
+ self.assertEqual(mock_text_report.call_count, 0)
+ self.assertEqual(mock_html_report.call_count, 0)
+
+ # Test 2. Config: email. exp.email_to set; cache hit. => send email
+ self.mock_logger.Reset()
+ config.AddConfig("no_email", False)
+ for r in self.exp.benchmark_runs:
+ r.cache_hit = True
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 1)
+ self.assertEqual(mock_emailer.call_count, 1)
+ self.assertEqual(mock_attachment.call_count, 1)
+ self.assertEqual(mock_text_report.call_count, 1)
+ self.assertEqual(mock_html_report.call_count, 1)
+ self.assertEqual(len(mock_emailer.call_args), 2)
+ self.assertEqual(
+ mock_emailer.call_args[0],
+ (
+ ["jane.doe@google.com", "john.smith@google.com"],
+ ": image1 vs. image2",
+ "<pre style='font-size: 13px'>This is a fake text "
+ "report.\nResults are stored in _results.\n</pre>",
+ ),
+ )
+ self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
+ self.assertEqual(len(mock_emailer.call_args[1]), 2)
+ self.assertTrue("attachments" in mock_emailer.call_args[1].keys())
+ self.assertEqual(mock_emailer.call_args[1]["msg_type"], "html")
+
+ mock_attachment.assert_called_with(
+ "report.html", "This is a fake html report."
+ )
+
+ # Test 3. Config: email; exp.mail_to set; no cache hit. => send email
+ self.mock_logger.Reset()
+ mock_getuser.reset_mock()
+ mock_emailer.reset_mock()
+ mock_attachment.reset_mock()
+ mock_text_report.reset_mock()
+ mock_html_report.reset_mock()
+ config.AddConfig("no_email", False)
+ for r in self.exp.benchmark_runs:
+ r.cache_hit = False
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 1)
+ self.assertEqual(mock_emailer.call_count, 1)
+ self.assertEqual(mock_attachment.call_count, 1)
+ self.assertEqual(mock_text_report.call_count, 1)
+ self.assertEqual(mock_html_report.call_count, 1)
+ self.assertEqual(len(mock_emailer.call_args), 2)
+ self.assertEqual(
+ mock_emailer.call_args[0],
+ (
+ [
+ "jane.doe@google.com",
+ "john.smith@google.com",
+ "john.smith@google.com",
+ ],
+ ": image1 vs. image2",
+ "<pre style='font-size: 13px'>This is a fake text "
+ "report.\nResults are stored in _results.\n</pre>",
+ ),
+ )
+ self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
+ self.assertEqual(len(mock_emailer.call_args[1]), 2)
+ self.assertTrue("attachments" in mock_emailer.call_args[1].keys())
+ self.assertEqual(mock_emailer.call_args[1]["msg_type"], "html")
+
+ mock_attachment.assert_called_with(
+ "report.html", "This is a fake html report."
+ )
+
+ # Test 4. Config: email; exp.mail_to = None; no cache hit. => send email
+ self.mock_logger.Reset()
+ mock_getuser.reset_mock()
+ mock_emailer.reset_mock()
+ mock_attachment.reset_mock()
+ mock_text_report.reset_mock()
+ mock_html_report.reset_mock()
+ self.exp.email_to = []
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 1)
+ self.assertEqual(mock_emailer.call_count, 1)
+ self.assertEqual(mock_attachment.call_count, 1)
+ self.assertEqual(mock_text_report.call_count, 1)
+ self.assertEqual(mock_html_report.call_count, 1)
+ self.assertEqual(len(mock_emailer.call_args), 2)
+ self.assertEqual(
+ mock_emailer.call_args[0],
+ (
+ ["john.smith@google.com"],
+ ": image1 vs. image2",
+ "<pre style='font-size: 13px'>This is a fake text "
+ "report.\nResults are stored in _results.\n</pre>",
+ ),
+ )
+ self.assertTrue(isinstance(mock_emailer.call_args[1], dict))
+ self.assertEqual(len(mock_emailer.call_args[1]), 2)
+ self.assertTrue("attachments" in mock_emailer.call_args[1].keys())
+ self.assertEqual(mock_emailer.call_args[1]["msg_type"], "html")
+
+ mock_attachment.assert_called_with(
+ "report.html", "This is a fake html report."
+ )
+
+ # Test 5. Config: email; exp.mail_to = None; cache hit => no email sent
+ self.mock_logger.Reset()
+ mock_getuser.reset_mock()
+ mock_emailer.reset_mock()
+ mock_attachment.reset_mock()
+ mock_text_report.reset_mock()
+ mock_html_report.reset_mock()
+ for r in self.exp.benchmark_runs:
+ r.cache_hit = True
+ er._Email(self.exp)
+ self.assertEqual(mock_getuser.call_count, 0)
+ self.assertEqual(mock_emailer.call_count, 0)
+ self.assertEqual(mock_attachment.call_count, 0)
+ self.assertEqual(mock_text_report.call_count, 0)
+ self.assertEqual(mock_html_report.call_count, 0)
+
+ @mock.patch.object(FileUtils, "RmDir")
+ @mock.patch.object(FileUtils, "MkDirP")
+ @mock.patch.object(FileUtils, "WriteFile")
+ @mock.patch.object(HTMLResultsReport, "FromExperiment")
+ @mock.patch.object(TextResultsReport, "FromExperiment")
+ @mock.patch.object(Result, "CompressResultsTo")
+ @mock.patch.object(Result, "CopyResultsTo")
+ @mock.patch.object(Result, "CleanUp")
+ @mock.patch.object(Result, "FormatStringTopCommands")
+ @mock.patch("builtins.open", new_callable=mock.mock_open)
+ def test_store_results(
+ self,
+ mock_open,
+ mock_top_commands,
+ mock_cleanup,
+ mock_copy,
+ mock_compress,
+ _mock_text_report,
+ mock_report,
+ mock_writefile,
+ mock_mkdir,
+ mock_rmdir,
+ ):
+
+ self.mock_logger.Reset()
+ self.exp.results_directory = "/usr/local/crosperf-results"
+ bench_run = self.exp.benchmark_runs[5]
+ bench_path = "/usr/local/crosperf-results/" + "".join(
+ ch for ch in bench_run.name if ch.isalnum()
+ )
+ self.assertEqual(len(self.exp.benchmark_runs), 6)
+
+ er = experiment_runner.ExperimentRunner(
+ self.exp,
+ json_report=False,
+ using_schedv2=False,
+ log=self.mock_logger,
+ cmd_exec=self.mock_cmd_exec,
+ )
+
+ # Test 1. Make sure nothing is done if _terminated is true.
+ er._terminated = True
+ er._StoreResults(self.exp)
+ self.assertEqual(mock_cleanup.call_count, 0)
+ self.assertEqual(mock_copy.call_count, 0)
+ self.assertEqual(mock_compress.call_count, 0)
+ self.assertEqual(mock_report.call_count, 0)
+ self.assertEqual(mock_writefile.call_count, 0)
+ self.assertEqual(mock_mkdir.call_count, 0)
+ self.assertEqual(mock_rmdir.call_count, 0)
+ self.assertEqual(self.mock_logger.LogOutputCount, 0)
+ self.assertEqual(mock_open.call_count, 0)
+ self.assertEqual(mock_top_commands.call_count, 0)
+
+ # Test 2. _terminated is false; everything works properly.
+ fake_result = Result(
+ self.mock_logger, self.exp.labels[0], "average", "daisy1"
+ )
+ for r in self.exp.benchmark_runs:
+ r.result = fake_result
+ er._terminated = False
+ self.exp.compress_results = False
+ er._StoreResults(self.exp)
+ self.assertEqual(mock_cleanup.call_count, 6)
+ mock_cleanup.assert_called_with(bench_run.benchmark.rm_chroot_tmp)
+ self.assertEqual(mock_copy.call_count, 6)
+ mock_copy.assert_called_with(bench_path)
+ self.assertEqual(mock_writefile.call_count, 3)
+ self.assertEqual(len(mock_writefile.call_args_list), 3)
+ first_args = mock_writefile.call_args_list[0]
+ second_args = mock_writefile.call_args_list[1]
+ self.assertEqual(
+ first_args[0][0], "/usr/local/crosperf-results/experiment.exp"
+ )
+ self.assertEqual(
+ second_args[0][0], "/usr/local/crosperf-results/results.html"
+ )
+ self.assertEqual(mock_mkdir.call_count, 1)
+ mock_mkdir.assert_called_with("/usr/local/crosperf-results")
+ self.assertEqual(mock_rmdir.call_count, 1)
+ mock_rmdir.assert_called_with("/usr/local/crosperf-results")
+ self.assertEqual(self.mock_logger.LogOutputCount, 5)
+ self.assertEqual(
+ self.mock_logger.output_msgs,
+ [
+ "Storing experiment file in /usr/local/crosperf-results.",
+ "Storing top statistics of each benchmark run into"
+ " /usr/local/crosperf-results/topstats.log.",
+ "Storing results of each benchmark run.",
+ "Storing results report in /usr/local/crosperf-results.",
+ "Storing email message body in /usr/local/crosperf-results.",
+ ],
+ )
+ self.assertEqual(mock_open.call_count, 1)
+ # Check write to a topstats.log file.
+ mock_open.assert_called_with(
+ "/usr/local/crosperf-results/topstats.log", "w"
+ )
+ mock_open().write.assert_called()
+
+ # Check top calls with no arguments.
+ topcalls = [mock.call()] * 6
+ self.assertEqual(mock_top_commands.call_args_list, topcalls)
+
+ # Test 3. Test compress_results.
+ self.exp.compress_results = True
+ mock_copy.call_count = 0
+ mock_compress.call_count = 0
+ er._StoreResults(self.exp)
+ self.assertEqual(mock_copy.call_count, 0)
+ mock_copy.assert_called_with(bench_path)
+ self.assertEqual(mock_compress.call_count, 6)
+ mock_compress.assert_called_with(bench_path)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py
index 2ac47c74..fa6b1eec 100644
--- a/crosperf/experiment_status.py
+++ b/crosperf/experiment_status.py
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The class to show the banner."""
-from __future__ import division
-from __future__ import print_function
import collections
import datetime
@@ -14,136 +12,156 @@ import time
class ExperimentStatus(object):
- """The status class."""
-
- def __init__(self, experiment):
- self.experiment = experiment
- self.num_total = len(self.experiment.benchmark_runs)
- self.completed = 0
- self.new_job_start_time = time.time()
- self.log_level = experiment.log_level
-
- def _GetProgressBar(self, num_complete, num_total):
- ret = 'Done: %s%%' % int(100.0 * num_complete / num_total)
- bar_length = 50
- done_char = '>'
- undone_char = ' '
- num_complete_chars = bar_length * num_complete // num_total
- num_undone_chars = bar_length - num_complete_chars
- ret += ' [%s%s]' % (num_complete_chars * done_char,
- num_undone_chars * undone_char)
- return ret
-
- def GetProgressString(self):
- """Get the elapsed_time, ETA."""
- current_time = time.time()
- if self.experiment.start_time:
- elapsed_time = current_time - self.experiment.start_time
- else:
- elapsed_time = 0
- try:
- if self.completed != self.experiment.num_complete:
- self.completed = self.experiment.num_complete
- self.new_job_start_time = current_time
- time_completed_jobs = (
- elapsed_time - (current_time - self.new_job_start_time))
- # eta is calculated as:
- # ETA = (num_jobs_not_yet_started * estimated_time_per_job)
- # + time_left_for_current_job
- #
- # where
- # num_jobs_not_yet_started = (num_total - num_complete - 1)
- #
- # estimated_time_per_job = time_completed_jobs / num_run_complete
- #
- # time_left_for_current_job = estimated_time_per_job -
- # time_spent_so_far_on_current_job
- #
- # The biggest problem with this calculation is its assumption that
- # all jobs have roughly the same running time (blatantly false!).
- #
- # ETA can come out negative if the time spent on the current job is
- # greater than the estimated time per job (e.g. you're running the
- # first long job, after a series of short jobs). For now, if that
- # happens, we set the ETA to "Unknown."
- #
- eta_seconds = (
- float(self.num_total - self.experiment.num_complete - 1) *
- time_completed_jobs / self.experiment.num_run_complete +
- (time_completed_jobs / self.experiment.num_run_complete -
- (current_time - self.new_job_start_time)))
-
- eta_seconds = int(eta_seconds)
- if eta_seconds > 0:
- eta = datetime.timedelta(seconds=eta_seconds)
- else:
- eta = 'Unknown'
- except ZeroDivisionError:
- eta = 'Unknown'
- strings = []
- strings.append('Current time: %s Elapsed: %s ETA: %s' %
- (datetime.datetime.now(),
- datetime.timedelta(seconds=int(elapsed_time)), eta))
- strings.append(
- self._GetProgressBar(self.experiment.num_complete, self.num_total))
- return '\n'.join(strings)
-
- def GetStatusString(self):
- """Get the status string of all the benchmark_runs."""
- status_bins = collections.defaultdict(list)
- for benchmark_run in self.experiment.benchmark_runs:
- status_bins[benchmark_run.timeline.GetLastEvent()].append(benchmark_run)
-
- status_strings = []
- for key, val in status_bins.items():
- if key == 'RUNNING':
- get_description = self._GetNamesAndIterations
- else:
- get_description = self._GetCompactNamesAndIterations
- status_strings.append('%s: %s' % (key, get_description(val)))
-
- thread_status = ''
- thread_status_format = 'Thread Status: \n{}\n'
- if (self.experiment.schedv2() is None and
- self.experiment.log_level == 'verbose'):
- # Add the machine manager status.
- thread_status = thread_status_format.format(
- self.experiment.machine_manager.AsString())
- elif self.experiment.schedv2():
- # In schedv2 mode, we always print out thread status.
- thread_status = thread_status_format.format(
- self.experiment.schedv2().threads_status_as_string())
-
- result = '{}{}'.format(thread_status, '\n'.join(status_strings))
-
- return result
-
- def _GetNamesAndIterations(self, benchmark_runs):
- strings = []
- t = time.time()
- for benchmark_run in benchmark_runs:
- t_last = benchmark_run.timeline.GetLastEventTime()
- elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
- strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
- return ' %s (%s)' % (len(strings), ', '.join(strings))
-
- def _GetCompactNamesAndIterations(self, benchmark_runs):
- grouped_benchmarks = collections.defaultdict(list)
- for benchmark_run in benchmark_runs:
- grouped_benchmarks[benchmark_run.label.name].append(benchmark_run)
-
- output_segs = []
- for label_name, label_runs in grouped_benchmarks.items():
- strings = []
- benchmark_iterations = collections.defaultdict(list)
- for benchmark_run in label_runs:
- assert benchmark_run.label.name == label_name
- benchmark_name = benchmark_run.benchmark.name
- benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
- for key, val in benchmark_iterations.items():
- val.sort()
- iterations = ','.join(str(v) for v in val)
- strings.append('{} [{}]'.format(key, iterations))
- output_segs.append(' ' + label_name + ': ' + ', '.join(strings) + '\n')
-
- return ' %s \n%s' % (len(benchmark_runs), ''.join(output_segs))
+ """The status class."""
+
+ def __init__(self, experiment):
+ self.experiment = experiment
+ self.num_total = len(self.experiment.benchmark_runs)
+ self.completed = 0
+ self.new_job_start_time = time.time()
+ self.log_level = experiment.log_level
+
+ def _GetProgressBar(self, num_complete, num_total):
+ ret = "Done: %s%%" % int(100.0 * num_complete / num_total)
+ bar_length = 50
+ done_char = ">"
+ undone_char = " "
+ num_complete_chars = bar_length * num_complete // num_total
+ num_undone_chars = bar_length - num_complete_chars
+ ret += " [%s%s]" % (
+ num_complete_chars * done_char,
+ num_undone_chars * undone_char,
+ )
+ return ret
+
+ def GetProgressString(self):
+ """Get the elapsed_time, ETA."""
+ current_time = time.time()
+ if self.experiment.start_time:
+ elapsed_time = current_time - self.experiment.start_time
+ else:
+ elapsed_time = 0
+ try:
+ if self.completed != self.experiment.num_complete:
+ self.completed = self.experiment.num_complete
+ self.new_job_start_time = current_time
+ time_completed_jobs = elapsed_time - (
+ current_time - self.new_job_start_time
+ )
+ # eta is calculated as:
+ # ETA = (num_jobs_not_yet_started * estimated_time_per_job)
+ # + time_left_for_current_job
+ #
+ # where
+ # num_jobs_not_yet_started = (num_total - num_complete - 1)
+ #
+ # estimated_time_per_job = time_completed_jobs / num_run_complete
+ #
+ # time_left_for_current_job = estimated_time_per_job -
+ # time_spent_so_far_on_current_job
+ #
+ # The biggest problem with this calculation is its assumption that
+ # all jobs have roughly the same running time (blatantly false!).
+ #
+ # ETA can come out negative if the time spent on the current job is
+ # greater than the estimated time per job (e.g. you're running the
+ # first long job, after a series of short jobs). For now, if that
+ # happens, we set the ETA to "Unknown."
+ #
+ eta_seconds = float(
+ self.num_total - self.experiment.num_complete - 1
+ ) * time_completed_jobs / self.experiment.num_run_complete + (
+ time_completed_jobs / self.experiment.num_run_complete
+ - (current_time - self.new_job_start_time)
+ )
+
+ eta_seconds = int(eta_seconds)
+ if eta_seconds > 0:
+ eta = datetime.timedelta(seconds=eta_seconds)
+ else:
+ eta = "Unknown"
+ except ZeroDivisionError:
+ eta = "Unknown"
+ strings = []
+ strings.append(
+ "Current time: %s Elapsed: %s ETA: %s"
+ % (
+ datetime.datetime.now(),
+ datetime.timedelta(seconds=int(elapsed_time)),
+ eta,
+ )
+ )
+ strings.append(
+ self._GetProgressBar(self.experiment.num_complete, self.num_total)
+ )
+ return "\n".join(strings)
+
+ def GetStatusString(self):
+ """Get the status string of all the benchmark_runs."""
+ status_bins = collections.defaultdict(list)
+ for benchmark_run in self.experiment.benchmark_runs:
+ status_bins[benchmark_run.timeline.GetLastEvent()].append(
+ benchmark_run
+ )
+
+ status_strings = []
+ for key, val in status_bins.items():
+ if key == "RUNNING":
+ get_description = self._GetNamesAndIterations
+ else:
+ get_description = self._GetCompactNamesAndIterations
+ status_strings.append("%s: %s" % (key, get_description(val)))
+
+ thread_status = ""
+ thread_status_format = "Thread Status: \n{}\n"
+ if (
+ self.experiment.schedv2() is None
+ and self.experiment.log_level == "verbose"
+ ):
+ # Add the machine manager status.
+ thread_status = thread_status_format.format(
+ self.experiment.machine_manager.AsString()
+ )
+ elif self.experiment.schedv2():
+ # In schedv2 mode, we always print out thread status.
+ thread_status = thread_status_format.format(
+ self.experiment.schedv2().threads_status_as_string()
+ )
+
+ result = "{}{}".format(thread_status, "\n".join(status_strings))
+
+ return result
+
+ def _GetNamesAndIterations(self, benchmark_runs):
+ strings = []
+ t = time.time()
+ for benchmark_run in benchmark_runs:
+ t_last = benchmark_run.timeline.GetLastEventTime()
+ elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
+ strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
+ return " %s (%s)" % (len(strings), ", ".join(strings))
+
+ def _GetCompactNamesAndIterations(self, benchmark_runs):
+ grouped_benchmarks = collections.defaultdict(list)
+ for benchmark_run in benchmark_runs:
+ grouped_benchmarks[benchmark_run.label.name].append(benchmark_run)
+
+ output_segs = []
+ for label_name, label_runs in grouped_benchmarks.items():
+ strings = []
+ benchmark_iterations = collections.defaultdict(list)
+ for benchmark_run in label_runs:
+ assert benchmark_run.label.name == label_name
+ benchmark_name = benchmark_run.benchmark.name
+ benchmark_iterations[benchmark_name].append(
+ benchmark_run.iteration
+ )
+ for key, val in benchmark_iterations.items():
+ val.sort()
+ iterations = ",".join(str(v) for v in val)
+ strings.append("{} [{}]".format(key, iterations))
+ output_segs.append(
+ " " + label_name + ": " + ", ".join(strings) + "\n"
+ )
+
+ return " %s \n%s" % (len(benchmark_runs), "".join(output_segs))
diff --git a/crosperf/field.py b/crosperf/field.py
index f6300f9f..6b5ea110 100644
--- a/crosperf/field.py
+++ b/crosperf/field.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,150 +7,161 @@
class Field(object):
- """Class representing a Field in an experiment file."""
+ """Class representing a Field in an experiment file."""
- def __init__(self, name, required, default, inheritable, description):
- self.name = name
- self.required = required
- self.assigned = False
- self.default = default
- self._value = default
- self.inheritable = inheritable
- self.description = description
+ def __init__(self, name, required, default, inheritable, description):
+ self.name = name
+ self.required = required
+ self.assigned = False
+ self.default = default
+ self._value = default
+ self.inheritable = inheritable
+ self.description = description
- def Set(self, value, parse=True):
- if parse:
- self._value = self._Parse(value)
- else:
- self._value = value
- self.assigned = True
+ def Set(self, value, parse=True):
+ if parse:
+ self._value = self._Parse(value)
+ else:
+ self._value = value
+ self.assigned = True
- def Append(self, value):
- self._value += self._Parse(value)
- self.assigned = True
+ def Append(self, value):
+ self._value += self._Parse(value)
+ self.assigned = True
- def _Parse(self, value):
- return value
+ def _Parse(self, value):
+ return value
- def Get(self):
- return self._value
+ def Get(self):
+ return self._value
- def GetString(self):
- return str(self._value)
+ def GetString(self):
+ return str(self._value)
class TextField(Field):
- """Class of text field."""
+ """Class of text field."""
- def __init__(self,
- name,
- required=False,
- default='',
- inheritable=False,
- description=''):
- super(TextField, self).__init__(name, required, default, inheritable,
- description)
+ def __init__(
+ self,
+ name,
+ required=False,
+ default="",
+ inheritable=False,
+ description="",
+ ):
+ super(TextField, self).__init__(
+ name, required, default, inheritable, description
+ )
- def _Parse(self, value):
- return str(value)
+ def _Parse(self, value):
+ return str(value)
class BooleanField(Field):
- """Class of boolean field."""
-
- def __init__(self,
- name,
- required=False,
- default=False,
- inheritable=False,
- description=''):
- super(BooleanField, self).__init__(name, required, default, inheritable,
- description)
-
- def _Parse(self, value):
- if value.lower() == 'true':
- return True
- elif value.lower() == 'false':
- return False
- raise TypeError(
- "Invalid value for '%s'. Must be true or false." % self.name)
+ """Class of boolean field."""
+
+ def __init__(
+ self,
+ name,
+ required=False,
+ default=False,
+ inheritable=False,
+ description="",
+ ):
+ super(BooleanField, self).__init__(
+ name, required, default, inheritable, description
+ )
+
+ def _Parse(self, value):
+ if value.lower() == "true":
+ return True
+ elif value.lower() == "false":
+ return False
+ raise TypeError(
+ "Invalid value for '%s'. Must be true or false." % self.name
+ )
class IntegerField(Field):
- """Class of integer field."""
+ """Class of integer field."""
- def __init__(self,
- name,
- required=False,
- default=0,
- inheritable=False,
- description=''):
- super(IntegerField, self).__init__(name, required, default, inheritable,
- description)
+ def __init__(
+ self, name, required=False, default=0, inheritable=False, description=""
+ ):
+ super(IntegerField, self).__init__(
+ name, required, default, inheritable, description
+ )
- def _Parse(self, value):
- return int(value)
+ def _Parse(self, value):
+ return int(value)
class FloatField(Field):
- """Class of float field."""
+ """Class of float field."""
- def __init__(self,
- name,
- required=False,
- default=0,
- inheritable=False,
- description=''):
- super(FloatField, self).__init__(name, required, default, inheritable,
- description)
+ def __init__(
+ self, name, required=False, default=0, inheritable=False, description=""
+ ):
+ super(FloatField, self).__init__(
+ name, required, default, inheritable, description
+ )
- def _Parse(self, value):
- return float(value)
+ def _Parse(self, value):
+ return float(value)
class ListField(Field):
- """Class of list field."""
-
- def __init__(self,
- name,
- required=False,
- default=None,
- inheritable=False,
- description=''):
- super(ListField, self).__init__(name, required, default, inheritable,
- description)
-
- def _Parse(self, value):
- return value.split()
-
- def GetString(self):
- return ' '.join(self._value)
-
- def Append(self, value):
- v = self._Parse(value)
- if not self._value:
- self._value = v
- else:
- self._value += v
- self.assigned = True
+ """Class of list field."""
+
+ def __init__(
+ self,
+ name,
+ required=False,
+ default=None,
+ inheritable=False,
+ description="",
+ ):
+ super(ListField, self).__init__(
+ name, required, default, inheritable, description
+ )
+
+ def _Parse(self, value):
+ return value.split()
+
+ def GetString(self):
+ return " ".join(self._value)
+
+ def Append(self, value):
+ v = self._Parse(value)
+ if not self._value:
+ self._value = v
+ else:
+ self._value += v
+ self.assigned = True
class EnumField(Field):
- """Class of enum field."""
-
- def __init__(self,
- name,
- options,
- required=False,
- default='',
- inheritable=False,
- description=''):
- super(EnumField, self).__init__(name, required, default, inheritable,
- description)
- self.options = options
-
- def _Parse(self, value):
- if value not in self.options:
- raise TypeError("Invalid enum value for field '%s'. Must be one of (%s)" %
- (self.name, ', '.join(self.options)))
- return str(value)
+ """Class of enum field."""
+
+ def __init__(
+ self,
+ name,
+ options,
+ required=False,
+ default="",
+ inheritable=False,
+ description="",
+ ):
+ super(EnumField, self).__init__(
+ name, required, default, inheritable, description
+ )
+ self.options = options
+
+ def _Parse(self, value):
+ if value not in self.options:
+ raise TypeError(
+ "Invalid enum value for field '%s'. Must be one of (%s)"
+ % (self.name, ", ".join(self.options))
+ )
+ return str(value)
diff --git a/crosperf/flag_test_unittest.py b/crosperf/flag_test_unittest.py
index 1e77c8a5..024849cb 100755
--- a/crosperf/flag_test_unittest.py
+++ b/crosperf/flag_test_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The unittest of flags."""
-from __future__ import print_function
import unittest
@@ -14,32 +13,32 @@ import test_flag
class FlagTestCase(unittest.TestCase):
- """The unittest class."""
+ """The unittest class."""
- def test_test_flag(self):
- # Verify that test_flag.is_test exists, that it is a list,
- # and that it contains 1 element.
- self.assertTrue(isinstance(test_flag.is_test, list))
- self.assertEqual(len(test_flag.is_test), 1)
+ def test_test_flag(self):
+ # Verify that test_flag.is_test exists, that it is a list,
+ # and that it contains 1 element.
+ self.assertTrue(isinstance(test_flag.is_test, list))
+ self.assertEqual(len(test_flag.is_test), 1)
- # Verify that the getting the flag works and that the flag
- # contains False, its starting value.
- save_flag = test_flag.GetTestMode()
- self.assertFalse(save_flag)
+ # Verify that the getting the flag works and that the flag
+ # contains False, its starting value.
+ save_flag = test_flag.GetTestMode()
+ self.assertFalse(save_flag)
- # Verify that setting the flat to True, then getting it, works.
- test_flag.SetTestMode(True)
- self.assertTrue(test_flag.GetTestMode())
+ # Verify that setting the flat to True, then getting it, works.
+ test_flag.SetTestMode(True)
+ self.assertTrue(test_flag.GetTestMode())
- # Verify that setting the flag to False, then getting it, works.
- test_flag.SetTestMode(save_flag)
- self.assertFalse(test_flag.GetTestMode())
+ # Verify that setting the flag to False, then getting it, works.
+ test_flag.SetTestMode(save_flag)
+ self.assertFalse(test_flag.GetTestMode())
- # Verify that test_flag.is_test still exists, that it still is a
- # list, and that it still contains 1 element.
- self.assertTrue(isinstance(test_flag.is_test, list))
- self.assertEqual(len(test_flag.is_test), 1)
+ # Verify that test_flag.is_test still exists, that it still is a
+ # list, and that it still contains 1 element.
+ self.assertTrue(isinstance(test_flag.is_test, list))
+ self.assertEqual(len(test_flag.is_test), 1)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/generate_report.py b/crosperf/generate_report.py
index bae365dc..55c13212 100755
--- a/crosperf/generate_report.py
+++ b/crosperf/generate_report.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -44,8 +44,6 @@ Peppy's runs took 1.321ms and 1.920ms, while peppy-new-crosstool's took 1.221ms
and 1.423ms. None of the runs failed to complete.
"""
-from __future__ import division
-from __future__ import print_function
import argparse
import functools
@@ -61,223 +59,248 @@ from results_report import TextResultsReport
def CountBenchmarks(benchmark_runs):
- """Counts the number of iterations for each benchmark in benchmark_runs."""
+ """Counts the number of iterations for each benchmark in benchmark_runs."""
- # Example input for benchmark_runs:
- # {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]}
- def _MaxLen(results):
- return 0 if not results else max(len(r) for r in results)
+ # Example input for benchmark_runs:
+ # {"bench": [[run1, run2, run3], [run1, run2, run3, run4]]}
+ def _MaxLen(results):
+ return 0 if not results else max(len(r) for r in results)
- return [(name, _MaxLen(results)) for name, results in benchmark_runs.items()]
+ return [
+ (name, _MaxLen(results)) for name, results in benchmark_runs.items()
+ ]
def CutResultsInPlace(results, max_keys=50, complain_on_update=True):
- """Limits the given benchmark results to max_keys keys in-place.
-
- This takes the `data` field from the benchmark input, and mutates each
- benchmark run to contain `max_keys` elements (ignoring special elements, like
- "retval"). At the moment, it just selects the first `max_keys` keyvals,
- alphabetically.
-
- If complain_on_update is true, this will print a message noting that a
- truncation occurred.
-
- This returns the `results` object that was passed in, for convenience.
-
- e.g.
- >>> benchmark_data = {
- ... "bench_draw_line": [
- ... [{"time (ms)": 1.321, "memory (mb)": 128.1, "retval": 0},
- ... {"time (ms)": 1.920, "memory (mb)": 128.4, "retval": 0}],
- ... [{"time (ms)": 1.221, "memory (mb)": 124.3, "retval": 0},
- ... {"time (ms)": 1.423, "memory (mb)": 123.9, "retval": 0}]
- ... ]
- ... }
- >>> CutResultsInPlace(benchmark_data, max_keys=1, complain_on_update=False)
- {
- 'bench_draw_line': [
- [{'memory (mb)': 128.1, 'retval': 0},
- {'memory (mb)': 128.4, 'retval': 0}],
- [{'memory (mb)': 124.3, 'retval': 0},
- {'memory (mb)': 123.9, 'retval': 0}]
- ]
- }
- """
- actually_updated = False
- for bench_results in results.values():
- for platform_results in bench_results:
- for i, result in enumerate(platform_results):
- # Keep the keys that come earliest when sorted alphabetically.
- # Forcing alphabetical order is arbitrary, but necessary; otherwise,
- # the keyvals we'd emit would depend on our iteration order through a
- # map.
- removable_keys = sorted(k for k in result if k != 'retval')
- retained_keys = removable_keys[:max_keys]
- platform_results[i] = {k: result[k] for k in retained_keys}
- # retval needs to be passed through all of the time.
- retval = result.get('retval')
- if retval is not None:
- platform_results[i]['retval'] = retval
- actually_updated = actually_updated or \
- len(retained_keys) != len(removable_keys)
-
- if actually_updated and complain_on_update:
- print(
- 'Warning: Some benchmark keyvals have been truncated.', file=sys.stderr)
- return results
+ """Limits the given benchmark results to max_keys keys in-place.
+
+ This takes the `data` field from the benchmark input, and mutates each
+ benchmark run to contain `max_keys` elements (ignoring special elements, like
+ "retval"). At the moment, it just selects the first `max_keys` keyvals,
+ alphabetically.
+
+ If complain_on_update is true, this will print a message noting that a
+ truncation occurred.
+
+ This returns the `results` object that was passed in, for convenience.
+
+ e.g.
+ >>> benchmark_data = {
+ ... "bench_draw_line": [
+ ... [{"time (ms)": 1.321, "memory (mb)": 128.1, "retval": 0},
+ ... {"time (ms)": 1.920, "memory (mb)": 128.4, "retval": 0}],
+ ... [{"time (ms)": 1.221, "memory (mb)": 124.3, "retval": 0},
+ ... {"time (ms)": 1.423, "memory (mb)": 123.9, "retval": 0}]
+ ... ]
+ ... }
+ >>> CutResultsInPlace(benchmark_data, max_keys=1, complain_on_update=False)
+ {
+ 'bench_draw_line': [
+ [{'memory (mb)': 128.1, 'retval': 0},
+ {'memory (mb)': 128.4, 'retval': 0}],
+ [{'memory (mb)': 124.3, 'retval': 0},
+ {'memory (mb)': 123.9, 'retval': 0}]
+ ]
+ }
+ """
+ actually_updated = False
+ for bench_results in results.values():
+ for platform_results in bench_results:
+ for i, result in enumerate(platform_results):
+ # Keep the keys that come earliest when sorted alphabetically.
+ # Forcing alphabetical order is arbitrary, but necessary; otherwise,
+ # the keyvals we'd emit would depend on our iteration order through a
+ # map.
+ removable_keys = sorted(k for k in result if k != "retval")
+ retained_keys = removable_keys[:max_keys]
+ platform_results[i] = {k: result[k] for k in retained_keys}
+ # retval needs to be passed through all of the time.
+ retval = result.get("retval")
+ if retval is not None:
+ platform_results[i]["retval"] = retval
+ actually_updated = actually_updated or len(
+ retained_keys
+ ) != len(removable_keys)
+
+ if actually_updated and complain_on_update:
+ print(
+ "Warning: Some benchmark keyvals have been truncated.",
+ file=sys.stderr,
+ )
+ return results
def _PositiveInt(s):
- i = int(s)
- if i < 0:
- raise argparse.ArgumentTypeError('%d is not a positive integer.' % (i,))
- return i
+ i = int(s)
+ if i < 0:
+ raise argparse.ArgumentTypeError("%d is not a positive integer." % (i,))
+ return i
def _AccumulateActions(args):
- """Given program arguments, determines what actions we want to run.
-
- Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a
- ResultsReport, and the str is the file extension for the given report.
- """
- results = []
- # The order of these is arbitrary.
- if args.json:
- results.append((JSONResultsReport, 'json'))
- if args.text:
- results.append((TextResultsReport, 'txt'))
- if args.email:
- email_ctor = functools.partial(TextResultsReport, email=True)
- results.append((email_ctor, 'email'))
- # We emit HTML if nothing else was specified.
- if args.html or not results:
- results.append((HTMLResultsReport, 'html'))
- return results
+ """Given program arguments, determines what actions we want to run.
+
+ Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a
+ ResultsReport, and the str is the file extension for the given report.
+ """
+ results = []
+ # The order of these is arbitrary.
+ if args.json:
+ results.append((JSONResultsReport, "json"))
+ if args.text:
+ results.append((TextResultsReport, "txt"))
+ if args.email:
+ email_ctor = functools.partial(TextResultsReport, email=True)
+ results.append((email_ctor, "email"))
+ # We emit HTML if nothing else was specified.
+ if args.html or not results:
+ results.append((HTMLResultsReport, "html"))
+ return results
# Note: get_contents is a function, because it may be expensive (generating some
# HTML reports takes O(seconds) on my machine, depending on the size of the
# input data).
def WriteFile(output_prefix, extension, get_contents, overwrite, verbose):
- """Writes `contents` to a file named "${output_prefix}.${extension}".
-
- get_contents should be a zero-args function that returns a string (of the
- contents to write).
- If output_prefix == '-', this writes to stdout.
- If overwrite is False, this will not overwrite files.
- """
- if output_prefix == '-':
- if verbose:
- print('Writing %s report to stdout' % (extension,), file=sys.stderr)
- sys.stdout.write(get_contents())
- return
-
- file_name = '%s.%s' % (output_prefix, extension)
- if not overwrite and os.path.exists(file_name):
- raise IOError('Refusing to write %s -- it already exists' % (file_name,))
-
- with open(file_name, 'w') as out_file:
- if verbose:
- print('Writing %s report to %s' % (extension, file_name), file=sys.stderr)
- out_file.write(get_contents())
+ """Writes `contents` to a file named "${output_prefix}.${extension}".
+
+ get_contents should be a zero-args function that returns a string (of the
+ contents to write).
+ If output_prefix == '-', this writes to stdout.
+ If overwrite is False, this will not overwrite files.
+ """
+ if output_prefix == "-":
+ if verbose:
+ print("Writing %s report to stdout" % (extension,), file=sys.stderr)
+ sys.stdout.write(get_contents())
+ return
+
+ file_name = "%s.%s" % (output_prefix, extension)
+ if not overwrite and os.path.exists(file_name):
+ raise IOError(
+ "Refusing to write %s -- it already exists" % (file_name,)
+ )
+
+ with open(file_name, "w") as out_file:
+ if verbose:
+ print(
+ "Writing %s report to %s" % (extension, file_name),
+ file=sys.stderr,
+ )
+ out_file.write(get_contents())
def RunActions(actions, benchmark_results, output_prefix, overwrite, verbose):
- """Runs `actions`, returning True if all succeeded."""
- failed = False
-
- report_ctor = None # Make the linter happy
- for report_ctor, extension in actions:
- try:
- get_contents = lambda: report_ctor(benchmark_results).GetReport()
- WriteFile(output_prefix, extension, get_contents, overwrite, verbose)
- except Exception:
- # Complain and move along; we may have more actions that might complete
- # successfully.
- failed = True
- traceback.print_exc()
- return not failed
+ """Runs `actions`, returning True if all succeeded."""
+ failed = False
+
+ report_ctor = None # Make the linter happy
+ for report_ctor, extension in actions:
+ try:
+ get_contents = lambda: report_ctor(benchmark_results).GetReport()
+ WriteFile(
+ output_prefix, extension, get_contents, overwrite, verbose
+ )
+ except Exception:
+ # Complain and move along; we may have more actions that might complete
+ # successfully.
+ failed = True
+ traceback.print_exc()
+ return not failed
def PickInputFile(input_name):
- """Given program arguments, returns file to read for benchmark input."""
- return sys.stdin if input_name == '-' else open(input_name)
+ """Given program arguments, returns file to read for benchmark input."""
+ return sys.stdin if input_name == "-" else open(input_name)
def _NoPerfReport(_label_name, _benchmark_name, _benchmark_iteration):
- return {}
+ return {}
def _ParseArgs(argv):
- parser = argparse.ArgumentParser(description='Turns JSON into results '
- 'report(s).')
- parser.add_argument(
- '-v',
- '--verbose',
- action='store_true',
- help='Be a tiny bit more verbose.')
- parser.add_argument(
- '-f',
- '--force',
- action='store_true',
- help='Overwrite existing results files.')
- parser.add_argument(
- '-o',
- '--output',
- default='report',
- type=str,
- help='Prefix of the output filename (default: report). '
- '- means stdout.')
- parser.add_argument(
- '-i',
- '--input',
- required=True,
- type=str,
- help='Where to read the JSON from. - means stdin.')
- parser.add_argument(
- '-l',
- '--statistic-limit',
- default=0,
- type=_PositiveInt,
- help='The maximum number of benchmark statistics to '
- 'display from a single run. 0 implies unlimited.')
- parser.add_argument(
- '--json', action='store_true', help='Output a JSON report.')
- parser.add_argument(
- '--text', action='store_true', help='Output a text report.')
- parser.add_argument(
- '--email',
- action='store_true',
- help='Output a text report suitable for email.')
- parser.add_argument(
- '--html',
- action='store_true',
- help='Output an HTML report (this is the default if no '
- 'other output format is specified).')
- return parser.parse_args(argv)
+ parser = argparse.ArgumentParser(
+ description="Turns JSON into results " "report(s)."
+ )
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ action="store_true",
+ help="Be a tiny bit more verbose.",
+ )
+ parser.add_argument(
+ "-f",
+ "--force",
+ action="store_true",
+ help="Overwrite existing results files.",
+ )
+ parser.add_argument(
+ "-o",
+ "--output",
+ default="report",
+ type=str,
+ help="Prefix of the output filename (default: report). "
+ "- means stdout.",
+ )
+ parser.add_argument(
+ "-i",
+ "--input",
+ required=True,
+ type=str,
+ help="Where to read the JSON from. - means stdin.",
+ )
+ parser.add_argument(
+ "-l",
+ "--statistic-limit",
+ default=0,
+ type=_PositiveInt,
+ help="The maximum number of benchmark statistics to "
+ "display from a single run. 0 implies unlimited.",
+ )
+ parser.add_argument(
+ "--json", action="store_true", help="Output a JSON report."
+ )
+ parser.add_argument(
+ "--text", action="store_true", help="Output a text report."
+ )
+ parser.add_argument(
+ "--email",
+ action="store_true",
+ help="Output a text report suitable for email.",
+ )
+ parser.add_argument(
+ "--html",
+ action="store_true",
+ help="Output an HTML report (this is the default if no "
+ "other output format is specified).",
+ )
+ return parser.parse_args(argv)
def Main(argv):
- args = _ParseArgs(argv)
- with PickInputFile(args.input) as in_file:
- raw_results = json.load(in_file)
-
- platform_names = raw_results['platforms']
- results = raw_results['data']
- if args.statistic_limit:
- results = CutResultsInPlace(results, max_keys=args.statistic_limit)
- benches = CountBenchmarks(results)
- # In crosperf, a label is essentially a platform+configuration. So, a name of
- # a label and a name of a platform are equivalent for our purposes.
- bench_results = BenchmarkResults(
- label_names=platform_names,
- benchmark_names_and_iterations=benches,
- run_keyvals=results,
- read_perf_report=_NoPerfReport)
- actions = _AccumulateActions(args)
- ok = RunActions(actions, bench_results, args.output, args.force, args.verbose)
- return 0 if ok else 1
-
-
-if __name__ == '__main__':
- sys.exit(Main(sys.argv[1:]))
+ args = _ParseArgs(argv)
+ with PickInputFile(args.input) as in_file:
+ raw_results = json.load(in_file)
+
+ platform_names = raw_results["platforms"]
+ results = raw_results["data"]
+ if args.statistic_limit:
+ results = CutResultsInPlace(results, max_keys=args.statistic_limit)
+ benches = CountBenchmarks(results)
+ # In crosperf, a label is essentially a platform+configuration. So, a name of
+ # a label and a name of a platform are equivalent for our purposes.
+ bench_results = BenchmarkResults(
+ label_names=platform_names,
+ benchmark_names_and_iterations=benches,
+ run_keyvals=results,
+ read_perf_report=_NoPerfReport,
+ )
+ actions = _AccumulateActions(args)
+ ok = RunActions(
+ actions, bench_results, args.output, args.force, args.verbose
+ )
+ return 0 if ok else 1
+
+
+if __name__ == "__main__":
+ sys.exit(Main(sys.argv[1:]))
diff --git a/crosperf/generate_report_unittest.py b/crosperf/generate_report_unittest.py
index 8c3510a9..86bbc164 100755
--- a/crosperf/generate_report_unittest.py
+++ b/crosperf/generate_report_unittest.py
@@ -1,13 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test for generate_report.py."""
-from __future__ import division
-from __future__ import print_function
import copy
import json
@@ -18,161 +16,159 @@ import generate_report
import results_report
import test_flag
+
# pylint: disable=deprecated-module
try:
- from StringIO import StringIO # for Python 2
+ from StringIO import StringIO # for Python 2
except ImportError:
- from io import StringIO # for Python 3
+ from io import StringIO # for Python 3
class _ContextualStringIO(StringIO):
- """StringIO that can be used in `with` statements."""
+ """StringIO that can be used in `with` statements."""
- def __init__(self, *args):
- StringIO.__init__(self, *args)
+ def __init__(self, *args):
+ StringIO.__init__(self, *args)
- def __enter__(self):
- return self
+ def __enter__(self):
+ return self
- def __exit__(self, _type, _value, _traceback):
- pass
+ def __exit__(self, _type, _value, _traceback):
+ pass
class GenerateReportTests(unittest.TestCase):
- """Tests for generate_report.py."""
-
- def testCountBenchmarks(self):
- runs = {
- 'foo': [[{}, {}, {}], [{}, {}, {}, {}]],
- 'bar': [],
- 'baz': [[], [{}], [{}, {}, {}]]
- }
- results = generate_report.CountBenchmarks(runs)
- expected_results = [('foo', 4), ('bar', 0), ('baz', 3)]
- self.assertCountEqual(expected_results, results)
-
- def testCutResultsInPlace(self):
- bench_data = {
- 'foo': [[{
- 'a': 1,
- 'b': 2,
- 'c': 3
- }, {
- 'a': 3,
- 'b': 2.5,
- 'c': 1
- }]],
- 'bar': [[{
- 'd': 11,
- 'e': 12,
- 'f': 13
- }]],
- 'baz': [[{
- 'g': 12,
- 'h': 13
- }]],
- 'qux': [[{
- 'i': 11
- }]],
- }
- original_bench_data = copy.deepcopy(bench_data)
-
- max_keys = 2
- results = generate_report.CutResultsInPlace(
- bench_data, max_keys=max_keys, complain_on_update=False)
- # Cuts should be in-place.
- self.assertIs(results, bench_data)
- self.assertCountEqual(
- list(original_bench_data.keys()), list(bench_data.keys()))
- for bench_name, original_runs in original_bench_data.items():
- bench_runs = bench_data[bench_name]
- self.assertEqual(len(original_runs), len(bench_runs))
- # Order of these sub-lists shouldn't have changed.
- for original_list, new_list in zip(original_runs, bench_runs):
- self.assertEqual(len(original_list), len(new_list))
- for original_keyvals, sub_keyvals in zip(original_list, new_list):
- # sub_keyvals must be a subset of original_keyvals
- self.assertDictContainsSubset(sub_keyvals, original_keyvals)
-
- def testCutResultsInPlaceLeavesRetval(self):
- bench_data = {
- 'foo': [[{
- 'retval': 0,
- 'a': 1
- }]],
- 'bar': [[{
- 'retval': 1
- }]],
- 'baz': [[{
- 'RETVAL': 1
- }]],
- }
- results = generate_report.CutResultsInPlace(
- bench_data, max_keys=0, complain_on_update=False)
- # Just reach into results assuming we know it otherwise outputs things in
- # the expected way. If it doesn't, testCutResultsInPlace should give an
- # indication as to what, exactly, is broken.
- self.assertEqual(list(results['foo'][0][0].items()), [('retval', 0)])
- self.assertEqual(list(results['bar'][0][0].items()), [('retval', 1)])
- self.assertEqual(list(results['baz'][0][0].items()), [])
-
- def _RunMainWithInput(self, args, input_obj):
- assert '-i' not in args
- args += ['-i', '-']
- input_buf = _ContextualStringIO(json.dumps(input_obj))
- with mock.patch('generate_report.PickInputFile', return_value=input_buf) \
- as patched_pick:
- result = generate_report.Main(args)
- patched_pick.assert_called_once_with('-')
- return result
-
- @mock.patch('generate_report.RunActions')
- def testMain(self, mock_run_actions):
- # Email is left out because it's a bit more difficult to test, and it'll be
- # mildly obvious if it's failing.
- args = ['--json', '--html', '--text']
- return_code = self._RunMainWithInput(args, {'platforms': [], 'data': {}})
- self.assertEqual(0, return_code)
- self.assertEqual(mock_run_actions.call_count, 1)
- ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
- self.assertEqual(ctors, [
- results_report.JSONResultsReport,
- results_report.TextResultsReport,
- results_report.HTMLResultsReport,
- ])
-
- @mock.patch('generate_report.RunActions')
- def testMainSelectsHTMLIfNoReportsGiven(self, mock_run_actions):
- args = []
- return_code = self._RunMainWithInput(args, {'platforms': [], 'data': {}})
- self.assertEqual(0, return_code)
- self.assertEqual(mock_run_actions.call_count, 1)
- ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
- self.assertEqual(ctors, [results_report.HTMLResultsReport])
-
- # We only mock print_exc so we don't have exception info printed to stdout.
- @mock.patch('generate_report.WriteFile', side_effect=ValueError('Oh noo'))
- @mock.patch('traceback.print_exc')
- def testRunActionsRunsAllActionsRegardlessOfExceptions(
- self, mock_print_exc, mock_write_file):
- actions = [(None, 'json'), (None, 'html'), (None, 'text'), (None, 'email')]
- output_prefix = '-'
- ok = generate_report.RunActions(
- actions, {}, output_prefix, overwrite=False, verbose=False)
- self.assertFalse(ok)
- self.assertEqual(mock_write_file.call_count, len(actions))
- self.assertEqual(mock_print_exc.call_count, len(actions))
-
- @mock.patch('generate_report.WriteFile')
- def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file):
- actions = [(None, 'json'), (None, 'html'), (None, 'text')]
- output_prefix = '-'
- ok = generate_report.RunActions(
- actions, {}, output_prefix, overwrite=False, verbose=False)
- self.assertEqual(mock_write_file.call_count, len(actions))
- self.assertTrue(ok)
-
-
-if __name__ == '__main__':
- test_flag.SetTestMode(True)
- unittest.main()
+ """Tests for generate_report.py."""
+
+ def testCountBenchmarks(self):
+ runs = {
+ "foo": [[{}, {}, {}], [{}, {}, {}, {}]],
+ "bar": [],
+ "baz": [[], [{}], [{}, {}, {}]],
+ }
+ results = generate_report.CountBenchmarks(runs)
+ expected_results = [("foo", 4), ("bar", 0), ("baz", 3)]
+ self.assertCountEqual(expected_results, results)
+
+ def testCutResultsInPlace(self):
+ bench_data = {
+ "foo": [[{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 2.5, "c": 1}]],
+ "bar": [[{"d": 11, "e": 12, "f": 13}]],
+ "baz": [[{"g": 12, "h": 13}]],
+ "qux": [[{"i": 11}]],
+ }
+ original_bench_data = copy.deepcopy(bench_data)
+
+ max_keys = 2
+ results = generate_report.CutResultsInPlace(
+ bench_data, max_keys=max_keys, complain_on_update=False
+ )
+ # Cuts should be in-place.
+ self.assertIs(results, bench_data)
+ self.assertCountEqual(
+ list(original_bench_data.keys()), list(bench_data.keys())
+ )
+ for bench_name, original_runs in original_bench_data.items():
+ bench_runs = bench_data[bench_name]
+ self.assertEqual(len(original_runs), len(bench_runs))
+ # Order of these sub-lists shouldn't have changed.
+ for original_list, new_list in zip(original_runs, bench_runs):
+ self.assertEqual(len(original_list), len(new_list))
+ for original_keyvals, sub_keyvals in zip(
+ original_list, new_list
+ ):
+ # sub_keyvals must be a subset of original_keyvals
+ self.assertDictContainsSubset(sub_keyvals, original_keyvals)
+
+ def testCutResultsInPlaceLeavesRetval(self):
+ bench_data = {
+ "foo": [[{"retval": 0, "a": 1}]],
+ "bar": [[{"retval": 1}]],
+ "baz": [[{"RETVAL": 1}]],
+ }
+ results = generate_report.CutResultsInPlace(
+ bench_data, max_keys=0, complain_on_update=False
+ )
+ # Just reach into results assuming we know it otherwise outputs things in
+ # the expected way. If it doesn't, testCutResultsInPlace should give an
+ # indication as to what, exactly, is broken.
+ self.assertEqual(list(results["foo"][0][0].items()), [("retval", 0)])
+ self.assertEqual(list(results["bar"][0][0].items()), [("retval", 1)])
+ self.assertEqual(list(results["baz"][0][0].items()), [])
+
+ def _RunMainWithInput(self, args, input_obj):
+ assert "-i" not in args
+ args += ["-i", "-"]
+ input_buf = _ContextualStringIO(json.dumps(input_obj))
+ with mock.patch(
+ "generate_report.PickInputFile", return_value=input_buf
+ ) as patched_pick:
+ result = generate_report.Main(args)
+ patched_pick.assert_called_once_with("-")
+ return result
+
+ @mock.patch("generate_report.RunActions")
+ def testMain(self, mock_run_actions):
+ # Email is left out because it's a bit more difficult to test, and it'll be
+ # mildly obvious if it's failing.
+ args = ["--json", "--html", "--text"]
+ return_code = self._RunMainWithInput(
+ args, {"platforms": [], "data": {}}
+ )
+ self.assertEqual(0, return_code)
+ self.assertEqual(mock_run_actions.call_count, 1)
+ ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
+ self.assertEqual(
+ ctors,
+ [
+ results_report.JSONResultsReport,
+ results_report.TextResultsReport,
+ results_report.HTMLResultsReport,
+ ],
+ )
+
+ @mock.patch("generate_report.RunActions")
+ def testMainSelectsHTMLIfNoReportsGiven(self, mock_run_actions):
+ args = []
+ return_code = self._RunMainWithInput(
+ args, {"platforms": [], "data": {}}
+ )
+ self.assertEqual(0, return_code)
+ self.assertEqual(mock_run_actions.call_count, 1)
+ ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
+ self.assertEqual(ctors, [results_report.HTMLResultsReport])
+
+ # We only mock print_exc so we don't have exception info printed to stdout.
+ @mock.patch("generate_report.WriteFile", side_effect=ValueError("Oh noo"))
+ @mock.patch("traceback.print_exc")
+ def testRunActionsRunsAllActionsRegardlessOfExceptions(
+ self, mock_print_exc, mock_write_file
+ ):
+ actions = [
+ (None, "json"),
+ (None, "html"),
+ (None, "text"),
+ (None, "email"),
+ ]
+ output_prefix = "-"
+ ok = generate_report.RunActions(
+ actions, {}, output_prefix, overwrite=False, verbose=False
+ )
+ self.assertFalse(ok)
+ self.assertEqual(mock_write_file.call_count, len(actions))
+ self.assertEqual(mock_print_exc.call_count, len(actions))
+
+ @mock.patch("generate_report.WriteFile")
+ def testRunActionsReturnsTrueIfAllActionsSucceed(self, mock_write_file):
+ actions = [(None, "json"), (None, "html"), (None, "text")]
+ output_prefix = "-"
+ ok = generate_report.RunActions(
+ actions, {}, output_prefix, overwrite=False, verbose=False
+ )
+ self.assertEqual(mock_write_file.call_count, len(actions))
+ self.assertTrue(ok)
+
+
+if __name__ == "__main__":
+ test_flag.SetTestMode(True)
+ unittest.main()
diff --git a/crosperf/help.py b/crosperf/help.py
index 4409b770..db95fc6c 100644
--- a/crosperf/help.py
+++ b/crosperf/help.py
@@ -1,47 +1,49 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to print help message."""
-from __future__ import print_function
import sys
import textwrap
+
from settings_factory import BenchmarkSettings
from settings_factory import GlobalSettings
from settings_factory import LabelSettings
class Help(object):
- """The help class."""
-
- def GetUsage(self):
- return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0])
-
- def _WrapLine(self, line):
- return '\n'.join(textwrap.wrap(line, 80))
-
- def _GetFieldDescriptions(self, fields):
- res = ''
- for field_name in fields:
- field = fields[field_name]
- res += 'Field:\t\t%s\n' % field.name
- res += self._WrapLine('Description:\t%s' % field.description) + '\n'
- res += 'Type:\t\t%s\n' % type(field).__name__.replace('Field', '')
- res += 'Required:\t%s\n' % field.required
- if field.default:
- res += 'Default:\t%s\n' % field.default
- res += '\n'
- return res
-
- def GetHelp(self):
- global_fields = self._GetFieldDescriptions(GlobalSettings('').fields)
- benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings('').fields)
- label_fields = self._GetFieldDescriptions(LabelSettings('').fields)
-
- return """%s is a script for running performance experiments on
+ """The help class."""
+
+ def GetUsage(self):
+ return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0])
+
+ def _WrapLine(self, line):
+ return "\n".join(textwrap.wrap(line, 80))
+
+ def _GetFieldDescriptions(self, fields):
+ res = ""
+ for field_name in fields:
+ field = fields[field_name]
+ res += "Field:\t\t%s\n" % field.name
+ res += self._WrapLine("Description:\t%s" % field.description) + "\n"
+ res += "Type:\t\t%s\n" % type(field).__name__.replace("Field", "")
+ res += "Required:\t%s\n" % field.required
+ if field.default:
+ res += "Default:\t%s\n" % field.default
+ res += "\n"
+ return res
+
+ def GetHelp(self):
+ global_fields = self._GetFieldDescriptions(GlobalSettings("").fields)
+ benchmark_fields = self._GetFieldDescriptions(
+ BenchmarkSettings("").fields
+ )
+ label_fields = self._GetFieldDescriptions(LabelSettings("").fields)
+
+ return """%s is a script for running performance experiments on
ChromeOS. It allows one to run ChromeOS Autotest benchmarks over
several images and compare the results to determine whether there
is a performance difference.
@@ -114,5 +116,11 @@ experiment file). Crosperf runs the experiment and caches the results
generates and displays a report based on the run, and emails the
report to the user. If the results were all read out of the cache,
then by default no email is generated.
-""" % (sys.argv[0], sys.argv[0], global_fields, benchmark_fields, label_fields,
- sys.argv[0])
+""" % (
+ sys.argv[0],
+ sys.argv[0],
+ global_fields,
+ benchmark_fields,
+ label_fields,
+ sys.argv[0],
+ )
diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py
index 8ac5be25..87664e9d 100644
--- a/crosperf/image_checksummer.py
+++ b/crosperf/image_checksummer.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compute image checksum."""
-from __future__ import print_function
import os
import threading
@@ -15,59 +14,71 @@ from cros_utils.file_utils import FileUtils
class ImageChecksummer(object):
- """Compute image checksum."""
+ """Compute image checksum."""
- class PerImageChecksummer(object):
- """Compute checksum for an image."""
+ class PerImageChecksummer(object):
+ """Compute checksum for an image."""
- def __init__(self, label, log_level):
- self._lock = threading.Lock()
- self.label = label
- self._checksum = None
- self.log_level = log_level
+ def __init__(self, label, log_level):
+ self._lock = threading.Lock()
+ self.label = label
+ self._checksum = None
+ self.log_level = log_level
- def Checksum(self):
- with self._lock:
- if not self._checksum:
- logger.GetLogger().LogOutput(
- "Acquiring checksum for '%s'." % self.label.name)
- self._checksum = None
- if self.label.image_type != 'local':
- raise RuntimeError('Called Checksum on non-local image!')
- if self.label.chromeos_image:
- if os.path.exists(self.label.chromeos_image):
- self._checksum = FileUtils().Md5File(
- self.label.chromeos_image, log_level=self.log_level)
- logger.GetLogger().LogOutput('Computed checksum is '
- ': %s' % self._checksum)
- if not self._checksum:
- raise RuntimeError('Checksum computing error.')
- logger.GetLogger().LogOutput('Checksum is: %s' % self._checksum)
- return self._checksum
+ def Checksum(self):
+ with self._lock:
+ if not self._checksum:
+ logger.GetLogger().LogOutput(
+ "Acquiring checksum for '%s'." % self.label.name
+ )
+ self._checksum = None
+ if self.label.image_type != "local":
+ raise RuntimeError(
+ "Called Checksum on non-local image!"
+ )
+ if self.label.chromeos_image:
+ if os.path.exists(self.label.chromeos_image):
+ self._checksum = FileUtils().Md5File(
+ self.label.chromeos_image,
+ log_level=self.log_level,
+ )
+ logger.GetLogger().LogOutput(
+ "Computed checksum is " ": %s" % self._checksum
+ )
+ if not self._checksum:
+ raise RuntimeError("Checksum computing error.")
+ logger.GetLogger().LogOutput(
+ "Checksum is: %s" % self._checksum
+ )
+ return self._checksum
- _instance = None
- _lock = threading.Lock()
- _per_image_checksummers = {}
+ _instance = None
+ _lock = threading.Lock()
+ _per_image_checksummers = {}
- def __new__(cls, *args, **kwargs):
- with cls._lock:
- if not cls._instance:
- cls._instance = super(ImageChecksummer, cls).__new__(
- cls, *args, **kwargs)
- return cls._instance
+ def __new__(cls, *args, **kwargs):
+ with cls._lock:
+ if not cls._instance:
+ cls._instance = super(ImageChecksummer, cls).__new__(
+ cls, *args, **kwargs
+ )
+ return cls._instance
- def Checksum(self, label, log_level):
- if label.image_type != 'local':
- raise RuntimeError('Attempt to call Checksum on non-local image.')
- with self._lock:
- if label.name not in self._per_image_checksummers:
- self._per_image_checksummers[label.name] = (
- ImageChecksummer.PerImageChecksummer(label, log_level))
- checksummer = self._per_image_checksummers[label.name]
+ def Checksum(self, label, log_level):
+ if label.image_type != "local":
+ raise RuntimeError("Attempt to call Checksum on non-local image.")
+ with self._lock:
+ if label.name not in self._per_image_checksummers:
+ self._per_image_checksummers[
+ label.name
+ ] = ImageChecksummer.PerImageChecksummer(label, log_level)
+ checksummer = self._per_image_checksummers[label.name]
- try:
- return checksummer.Checksum()
- except:
- logger.GetLogger().LogError('Could not compute checksum of image in label'
- " '%s'." % label.name)
- raise
+ try:
+ return checksummer.Checksum()
+ except:
+ logger.GetLogger().LogError(
+ "Could not compute checksum of image in label"
+ " '%s'." % label.name
+ )
+ raise
diff --git a/crosperf/label.py b/crosperf/label.py
index 30bf5f8c..9aeff562 100644
--- a/crosperf/label.py
+++ b/crosperf/label.py
@@ -1,188 +1,203 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The label of benchamrks."""
-from __future__ import print_function
import hashlib
import os
-from image_checksummer import ImageChecksummer
-from cros_utils.file_utils import FileUtils
from cros_utils import misc
+from cros_utils.file_utils import FileUtils
+from image_checksummer import ImageChecksummer
class Label(object):
- """The label class."""
-
- def __init__(self,
- name,
- build,
- chromeos_image,
- autotest_path,
- debug_path,
- chromeos_root,
- board,
- remote,
- image_args,
- cache_dir,
- cache_only,
- log_level,
- compiler,
- crosfleet=False,
- chrome_src=None):
-
- self.image_type = self._GetImageType(chromeos_image)
-
- # Expand ~
- chromeos_root = os.path.expanduser(chromeos_root)
- if self.image_type == 'local':
- chromeos_image = os.path.expanduser(chromeos_image)
-
- self.name = name
- self.build = build
- self.chromeos_image = chromeos_image
- self.autotest_path = autotest_path
- self.debug_path = debug_path
- self.board = board
- self.remote = remote
- self.image_args = image_args
- self.cache_dir = cache_dir
- self.cache_only = cache_only
- self.log_level = log_level
- self.chrome_version = ''
- self.compiler = compiler
- self.crosfleet = crosfleet
-
- if not chromeos_root:
- if self.image_type == 'local':
- chromeos_root = FileUtils().ChromeOSRootFromImage(chromeos_image)
- if not chromeos_root:
- raise RuntimeError("No ChromeOS root given for label '%s' and could "
- "not determine one from image path: '%s'." %
- (name, chromeos_image))
- else:
- chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chromeos_root)
- if not chromeos_root:
- raise RuntimeError("Invalid ChromeOS root given for label '%s': '%s'." %
- (name, chromeos_root))
-
- self.chromeos_root = chromeos_root
- if not chrome_src:
- # Old and new chroots may have different chrome src locations.
- # The path also depends on the chrome build flags.
- # Give priority to chrome-src-internal.
- chrome_src_rel_paths = [
- '.cache/distfiles/target/chrome-src-internal',
- '.cache/distfiles/chrome-src-internal',
- '.cache/distfiles/target/chrome-src',
- '.cache/distfiles/chrome-src',
- ]
- for chrome_src_rel_path in chrome_src_rel_paths:
- chrome_src_abs_path = os.path.join(self.chromeos_root,
- chrome_src_rel_path)
- if os.path.exists(chrome_src_abs_path):
- chrome_src = chrome_src_abs_path
- break
- if not chrome_src:
- raise RuntimeError('Can not find location of Chrome sources.\n'
- f'Checked paths: {chrome_src_rel_paths}')
- else:
- chrome_src = misc.CanonicalizePath(chrome_src)
- # Make sure the path exists.
- if not os.path.exists(chrome_src):
- raise RuntimeError("Invalid Chrome src given for label '%s': '%s'." %
- (name, chrome_src))
- self.chrome_src = chrome_src
-
- self._SetupChecksum()
-
- def _SetupChecksum(self):
- """Compute label checksum only once."""
-
- self.checksum = None
- if self.image_type == 'local':
- self.checksum = ImageChecksummer().Checksum(self, self.log_level)
- elif self.image_type == 'trybot':
- self.checksum = hashlib.md5(
- self.chromeos_image.encode('utf-8')).hexdigest()
-
- def _GetImageType(self, chromeos_image):
- image_type = None
- if chromeos_image.find('xbuddy://') < 0:
- image_type = 'local'
- elif chromeos_image.find('trybot') >= 0:
- image_type = 'trybot'
- else:
- image_type = 'official'
- return image_type
-
- def __hash__(self):
- """Label objects are used in a map, so provide "hash" and "equal"."""
-
- return hash(self.name)
-
- def __eq__(self, other):
- """Label objects are used in a map, so provide "hash" and "equal"."""
-
- return isinstance(other, Label) and other.name == self.name
-
- def __str__(self):
- """For better debugging."""
-
- return 'label[name="{}"]'.format(self.name)
+ """The label class."""
+
+ def __init__(
+ self,
+ name,
+ build,
+ chromeos_image,
+ autotest_path,
+ debug_path,
+ chromeos_root,
+ board,
+ remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
+ crosfleet=False,
+ chrome_src=None,
+ ):
+
+ self.image_type = self._GetImageType(chromeos_image)
+
+ # Expand ~
+ chromeos_root = os.path.expanduser(chromeos_root)
+ if self.image_type == "local":
+ chromeos_image = os.path.expanduser(chromeos_image)
+
+ self.name = name
+ self.build = build
+ self.chromeos_image = chromeos_image
+ self.autotest_path = autotest_path
+ self.debug_path = debug_path
+ self.board = board
+ self.remote = remote
+ self.image_args = image_args
+ self.cache_dir = cache_dir
+ self.cache_only = cache_only
+ self.log_level = log_level
+ self.chrome_version = ""
+ self.compiler = compiler
+ self.crosfleet = crosfleet
+
+ if not chromeos_root:
+ if self.image_type == "local":
+ chromeos_root = FileUtils().ChromeOSRootFromImage(
+ chromeos_image
+ )
+ if not chromeos_root:
+ raise RuntimeError(
+ "No ChromeOS root given for label '%s' and could "
+ "not determine one from image path: '%s'."
+ % (name, chromeos_image)
+ )
+ else:
+ chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chromeos_root)
+ if not chromeos_root:
+ raise RuntimeError(
+ "Invalid ChromeOS root given for label '%s': '%s'."
+ % (name, chromeos_root)
+ )
+
+ self.chromeos_root = chromeos_root
+ if not chrome_src:
+ # Old and new chroots may have different chrome src locations.
+ # The path also depends on the chrome build flags.
+ # Give priority to chrome-src-internal.
+ chrome_src_rel_paths = [
+ ".cache/distfiles/target/chrome-src-internal",
+ ".cache/distfiles/chrome-src-internal",
+ ".cache/distfiles/target/chrome-src",
+ ".cache/distfiles/chrome-src",
+ ]
+ for chrome_src_rel_path in chrome_src_rel_paths:
+ chrome_src_abs_path = os.path.join(
+ self.chromeos_root, chrome_src_rel_path
+ )
+ if os.path.exists(chrome_src_abs_path):
+ chrome_src = chrome_src_abs_path
+ break
+ if not chrome_src:
+ raise RuntimeError(
+ "Can not find location of Chrome sources.\n"
+ f"Checked paths: {chrome_src_rel_paths}"
+ )
+ else:
+ chrome_src = misc.CanonicalizePath(chrome_src)
+ # Make sure the path exists.
+ if not os.path.exists(chrome_src):
+ raise RuntimeError(
+ "Invalid Chrome src given for label '%s': '%s'."
+ % (name, chrome_src)
+ )
+ self.chrome_src = chrome_src
+
+ self._SetupChecksum()
+
+ def _SetupChecksum(self):
+ """Compute label checksum only once."""
+
+ self.checksum = None
+ if self.image_type == "local":
+ self.checksum = ImageChecksummer().Checksum(self, self.log_level)
+ elif self.image_type == "trybot":
+ self.checksum = hashlib.md5(
+ self.chromeos_image.encode("utf-8")
+ ).hexdigest()
+
+ def _GetImageType(self, chromeos_image):
+ image_type = None
+ if chromeos_image.find("xbuddy://") < 0:
+ image_type = "local"
+ elif chromeos_image.find("trybot") >= 0:
+ image_type = "trybot"
+ else:
+ image_type = "official"
+ return image_type
+
+ def __hash__(self):
+ """Label objects are used in a map, so provide "hash" and "equal"."""
+
+ return hash(self.name)
+
+ def __eq__(self, other):
+ """Label objects are used in a map, so provide "hash" and "equal"."""
+
+ return isinstance(other, Label) and other.name == self.name
+
+ def __str__(self):
+ """For better debugging."""
+
+ return 'label[name="{}"]'.format(self.name)
class MockLabel(object):
- """The mock label class."""
-
- def __init__(self,
- name,
- build,
- chromeos_image,
- autotest_path,
- debug_path,
- chromeos_root,
- board,
- remote,
- image_args,
- cache_dir,
- cache_only,
- log_level,
- compiler,
- crosfleet=False,
- chrome_src=None):
- self.name = name
- self.build = build
- self.chromeos_image = chromeos_image
- self.autotest_path = autotest_path
- self.debug_path = debug_path
- self.board = board
- self.remote = remote
- self.cache_dir = cache_dir
- self.cache_only = cache_only
- if not chromeos_root:
- self.chromeos_root = '/tmp/chromeos_root'
- else:
- self.chromeos_root = chromeos_root
- self.image_args = image_args
- self.chrome_src = chrome_src
- self.image_type = self._GetImageType(chromeos_image)
- self.checksum = ''
- self.log_level = log_level
- self.compiler = compiler
- self.crosfleet = crosfleet
- self.chrome_version = 'Fake Chrome Version 50'
-
- def _GetImageType(self, chromeos_image):
- image_type = None
- if chromeos_image.find('xbuddy://') < 0:
- image_type = 'local'
- elif chromeos_image.find('trybot') >= 0:
- image_type = 'trybot'
- else:
- image_type = 'official'
- return image_type
+ """The mock label class."""
+
+ def __init__(
+ self,
+ name,
+ build,
+ chromeos_image,
+ autotest_path,
+ debug_path,
+ chromeos_root,
+ board,
+ remote,
+ image_args,
+ cache_dir,
+ cache_only,
+ log_level,
+ compiler,
+ crosfleet=False,
+ chrome_src=None,
+ ):
+ self.name = name
+ self.build = build
+ self.chromeos_image = chromeos_image
+ self.autotest_path = autotest_path
+ self.debug_path = debug_path
+ self.board = board
+ self.remote = remote
+ self.cache_dir = cache_dir
+ self.cache_only = cache_only
+ if not chromeos_root:
+ self.chromeos_root = "/tmp/chromeos_root"
+ else:
+ self.chromeos_root = chromeos_root
+ self.image_args = image_args
+ self.chrome_src = chrome_src
+ self.image_type = self._GetImageType(chromeos_image)
+ self.checksum = ""
+ self.log_level = log_level
+ self.compiler = compiler
+ self.crosfleet = crosfleet
+ self.chrome_version = "Fake Chrome Version 50"
+
+ def _GetImageType(self, chromeos_image):
+ image_type = None
+ if chromeos_image.find("xbuddy://") < 0:
+ image_type = "local"
+ elif chromeos_image.find("trybot") >= 0:
+ image_type = "trybot"
+ else:
+ image_type = "official"
+ return image_type
diff --git a/crosperf/machine_image_manager.py b/crosperf/machine_image_manager.py
index ffdd6436..74379bff 100644
--- a/crosperf/machine_image_manager.py
+++ b/crosperf/machine_image_manager.py
@@ -1,17 +1,16 @@
# -*- coding: utf-8 -*-
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""MachineImageManager allocates images to duts."""
-from __future__ import print_function
import functools
class MachineImageManager(object):
- """Management of allocating images to duts.
+ """Management of allocating images to duts.
* Data structure we have -
@@ -137,173 +136,180 @@ class MachineImageManager(object):
* Special / common case to handle seperately
We have only 1 dut or if we have only 1 label, that's simple enough.
- """
-
- def __init__(self, labels, duts):
- self.labels_ = labels
- self.duts_ = duts
- self.n_labels_ = len(labels)
- self.n_duts_ = len(duts)
- self.dut_name_ordinal_ = dict()
- for idx, dut in enumerate(self.duts_):
- self.dut_name_ordinal_[dut.name] = idx
-
- # Generate initial matrix containg 'X' or ' '.
- self.matrix_ = [['X' if l.remote else ' '
- for _ in range(self.n_duts_)]
- for l in self.labels_]
- for ol, l in enumerate(self.labels_):
- if l.remote:
- for r in l.remote:
- self.matrix_[ol][self.dut_name_ordinal_[r]] = ' '
-
- self.label_duts_ = [[] for _ in range(self.n_labels_)]
- self.allocate_log_ = []
-
- def compute_initial_allocation(self):
- """Compute the initial label-dut allocation.
-
- This method finds the most efficient way that every label gets imaged at
- least once.
-
- Returns:
- False, only if not all labels could be imaged to a certain machine,
- otherwise True.
"""
- if self.n_duts_ == 1:
- for i, v in self.matrix_vertical_generator(0):
- if v != 'X':
- self.matrix_[i][0] = 'Y'
- return
-
- if self.n_labels_ == 1:
- for j, v in self.matrix_horizontal_generator(0):
- if v != 'X':
- self.matrix_[0][j] = 'Y'
- return
-
- if self.n_duts_ >= self.n_labels_:
- n = 1
- else:
- n = self.n_labels_ - self.n_duts_ + 1
- while n <= self.n_labels_:
- if self._compute_initial_allocation_internal(0, n):
- break
- n += 1
-
- return n <= self.n_labels_
-
- def _record_allocate_log(self, label_i, dut_j):
- self.allocate_log_.append((label_i, dut_j))
- self.label_duts_[label_i].append(dut_j)
-
- def allocate(self, dut, schedv2=None):
- """Allocate a label for dut.
-
- Args:
- dut: the dut that asks for a new image.
- schedv2: the scheduling instance, we need the benchmark run
- information with schedv2 for a better allocation.
-
- Returns:
- a label to image onto the dut or None if no more available images for
- the dut.
- """
- j = self.dut_name_ordinal_[dut.name]
- # 'can_' prefix means candidate label's.
- can_reimage_number = 999
- can_i = 999
- can_label = None
- can_pending_br_num = 0
- for i, v in self.matrix_vertical_generator(j):
- label = self.labels_[i]
-
- # 2 optimizations here regarding allocating label to dut.
- # Note schedv2 might be None in case we do not need this
- # optimization or we are in testing mode.
- if schedv2 is not None:
- pending_br_num = len(schedv2.get_label_map()[label])
- if pending_br_num == 0:
- # (A) - we have finished all br of this label,
- # apparently, we do not want to reimaeg dut to
- # this label.
- continue
- else:
- # In case we do not have a schedv2 instance, mark
- # pending_br_num as 0, so pending_br_num >=
- # can_pending_br_num is always True.
- pending_br_num = 0
-
- # For this time being, I just comment this out until we have a
- # better estimation how long each benchmarkrun takes.
- # if (pending_br_num <= 5 and
- # len(self.label_duts_[i]) >= 1):
- # # (B) this is heuristic - if there are just a few test cases
- # # (say <5) left undone for this label, and there is at least
- # # 1 other machine working on this lable, we probably not want
- # # to bother to reimage this dut to help with these 5 test
- # # cases
- # continue
-
- if v == 'Y':
- self.matrix_[i][j] = '_'
- self._record_allocate_log(i, j)
- return label
- if v == ' ':
- label_reimage_number = len(self.label_duts_[i])
- if ((can_label is None) or
- (label_reimage_number < can_reimage_number or
- (label_reimage_number == can_reimage_number and
- pending_br_num >= can_pending_br_num))):
- can_reimage_number = label_reimage_number
- can_i = i
- can_label = label
- can_pending_br_num = pending_br_num
-
- # All labels are marked either '_' (already taken) or 'X' (not
- # compatible), so return None to notify machine thread to quit.
- if can_label is None:
- return None
-
- # At this point, we don't find any 'Y' for the machine, so we go the
- # 'min' approach.
- self.matrix_[can_i][j] = '_'
- self._record_allocate_log(can_i, j)
- return can_label
-
- def matrix_vertical_generator(self, col):
- """Iterate matrix vertically at column 'col'.
-
- Yield row number i and value at matrix_[i][col].
- """
- for i, _ in enumerate(self.labels_):
- yield i, self.matrix_[i][col]
-
- def matrix_horizontal_generator(self, row):
- """Iterate matrix horizontally at row 'row'.
-
- Yield col number j and value at matrix_[row][j].
- """
- for j, _ in enumerate(self.duts_):
- yield j, self.matrix_[row][j]
-
- def _compute_initial_allocation_internal(self, level, N):
- """Search matrix for d with N."""
-
- if level == self.n_labels_:
- return True
-
- for j, v in self.matrix_horizontal_generator(level):
- if v == ' ':
- # Before we put a 'Y', we check how many Y column 'j' has.
- # Note y[0] is row idx, y[1] is the cell value.
- ny = functools.reduce(lambda x, y: x + 1 if (y[1] == 'Y') else x,
- self.matrix_vertical_generator(j), 0)
- if ny < N:
- self.matrix_[level][j] = 'Y'
- if self._compute_initial_allocation_internal(level + 1, N):
+ def __init__(self, labels, duts):
+ self.labels_ = labels
+ self.duts_ = duts
+ self.n_labels_ = len(labels)
+ self.n_duts_ = len(duts)
+ self.dut_name_ordinal_ = dict()
+ for idx, dut in enumerate(self.duts_):
+ self.dut_name_ordinal_[dut.name] = idx
+
+ # Generate initial matrix containg 'X' or ' '.
+ self.matrix_ = [
+ ["X" if l.remote else " " for _ in range(self.n_duts_)]
+ for l in self.labels_
+ ]
+ for ol, l in enumerate(self.labels_):
+ if l.remote:
+ for r in l.remote:
+ self.matrix_[ol][self.dut_name_ordinal_[r]] = " "
+
+ self.label_duts_ = [[] for _ in range(self.n_labels_)]
+ self.allocate_log_ = []
+
+ def compute_initial_allocation(self):
+ """Compute the initial label-dut allocation.
+
+ This method finds the most efficient way that every label gets imaged at
+ least once.
+
+ Returns:
+ False, only if not all labels could be imaged to a certain machine,
+ otherwise True.
+ """
+
+ if self.n_duts_ == 1:
+ for i, v in self.matrix_vertical_generator(0):
+ if v != "X":
+ self.matrix_[i][0] = "Y"
+ return
+
+ if self.n_labels_ == 1:
+ for j, v in self.matrix_horizontal_generator(0):
+ if v != "X":
+ self.matrix_[0][j] = "Y"
+ return
+
+ if self.n_duts_ >= self.n_labels_:
+ n = 1
+ else:
+ n = self.n_labels_ - self.n_duts_ + 1
+ while n <= self.n_labels_:
+ if self._compute_initial_allocation_internal(0, n):
+ break
+ n += 1
+
+ return n <= self.n_labels_
+
+ def _record_allocate_log(self, label_i, dut_j):
+ self.allocate_log_.append((label_i, dut_j))
+ self.label_duts_[label_i].append(dut_j)
+
+ def allocate(self, dut, schedv2=None):
+ """Allocate a label for dut.
+
+ Args:
+ dut: the dut that asks for a new image.
+ schedv2: the scheduling instance, we need the benchmark run
+ information with schedv2 for a better allocation.
+
+ Returns:
+ a label to image onto the dut or None if no more available images for
+ the dut.
+ """
+ j = self.dut_name_ordinal_[dut.name]
+ # 'can_' prefix means candidate label's.
+ can_reimage_number = 999
+ can_i = 999
+ can_label = None
+ can_pending_br_num = 0
+ for i, v in self.matrix_vertical_generator(j):
+ label = self.labels_[i]
+
+ # 2 optimizations here regarding allocating label to dut.
+ # Note schedv2 might be None in case we do not need this
+ # optimization or we are in testing mode.
+ if schedv2 is not None:
+ pending_br_num = len(schedv2.get_label_map()[label])
+ if pending_br_num == 0:
+ # (A) - we have finished all br of this label,
+ # apparently, we do not want to reimaeg dut to
+ # this label.
+ continue
+ else:
+ # In case we do not have a schedv2 instance, mark
+ # pending_br_num as 0, so pending_br_num >=
+ # can_pending_br_num is always True.
+ pending_br_num = 0
+
+ # For this time being, I just comment this out until we have a
+ # better estimation how long each benchmarkrun takes.
+ # if (pending_br_num <= 5 and
+ # len(self.label_duts_[i]) >= 1):
+ # # (B) this is heuristic - if there are just a few test cases
+ # # (say <5) left undone for this label, and there is at least
+ # # 1 other machine working on this lable, we probably not want
+ # # to bother to reimage this dut to help with these 5 test
+ # # cases
+ # continue
+
+ if v == "Y":
+ self.matrix_[i][j] = "_"
+ self._record_allocate_log(i, j)
+ return label
+ if v == " ":
+ label_reimage_number = len(self.label_duts_[i])
+ if (can_label is None) or (
+ label_reimage_number < can_reimage_number
+ or (
+ label_reimage_number == can_reimage_number
+ and pending_br_num >= can_pending_br_num
+ )
+ ):
+ can_reimage_number = label_reimage_number
+ can_i = i
+ can_label = label
+ can_pending_br_num = pending_br_num
+
+ # All labels are marked either '_' (already taken) or 'X' (not
+ # compatible), so return None to notify machine thread to quit.
+ if can_label is None:
+ return None
+
+ # At this point, we don't find any 'Y' for the machine, so we go the
+ # 'min' approach.
+ self.matrix_[can_i][j] = "_"
+ self._record_allocate_log(can_i, j)
+ return can_label
+
+ def matrix_vertical_generator(self, col):
+ """Iterate matrix vertically at column 'col'.
+
+ Yield row number i and value at matrix_[i][col].
+ """
+ for i, _ in enumerate(self.labels_):
+ yield i, self.matrix_[i][col]
+
+ def matrix_horizontal_generator(self, row):
+ """Iterate matrix horizontally at row 'row'.
+
+ Yield col number j and value at matrix_[row][j].
+ """
+ for j, _ in enumerate(self.duts_):
+ yield j, self.matrix_[row][j]
+
+ def _compute_initial_allocation_internal(self, level, N):
+ """Search matrix for d with N."""
+
+ if level == self.n_labels_:
return True
- self.matrix_[level][j] = ' '
- return False
+ for j, v in self.matrix_horizontal_generator(level):
+ if v == " ":
+ # Before we put a 'Y', we check how many Y column 'j' has.
+ # Note y[0] is row idx, y[1] is the cell value.
+ ny = functools.reduce(
+ lambda x, y: x + 1 if (y[1] == "Y") else x,
+ self.matrix_vertical_generator(j),
+ 0,
+ )
+ if ny < N:
+ self.matrix_[level][j] = "Y"
+ if self._compute_initial_allocation_internal(level + 1, N):
+ return True
+ self.matrix_[level][j] = " "
+
+ return False
diff --git a/crosperf/machine_image_manager_unittest.py b/crosperf/machine_image_manager_unittest.py
index fbbca7b6..1ea63b1c 100755
--- a/crosperf/machine_image_manager_unittest.py
+++ b/crosperf/machine_image_manager_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MachineImageManager class."""
-from __future__ import print_function
import random
import unittest
@@ -15,251 +14,282 @@ from machine_image_manager import MachineImageManager
class MockLabel(object):
- """Class for generating a mock Label."""
+ """Class for generating a mock Label."""
- def __init__(self, name, remotes=None):
- self.name = name
- self.remote = remotes
+ def __init__(self, name, remotes=None):
+ self.name = name
+ self.remote = remotes
- def __hash__(self):
- """Provide hash function for label.
+ def __hash__(self):
+ """Provide hash function for label.
- This is required because Label object is used inside a dict as key.
- """
- return hash(self.name)
+ This is required because Label object is used inside a dict as key.
+ """
+ return hash(self.name)
- def __eq__(self, other):
- """Provide eq function for label.
+ def __eq__(self, other):
+ """Provide eq function for label.
- This is required because Label object is used inside a dict as key.
- """
- return isinstance(other, MockLabel) and other.name == self.name
+ This is required because Label object is used inside a dict as key.
+ """
+ return isinstance(other, MockLabel) and other.name == self.name
class MockDut(object):
- """Class for creating a mock Device-Under-Test (DUT)."""
+ """Class for creating a mock Device-Under-Test (DUT)."""
- def __init__(self, name, label=None):
- self.name = name
- self.label_ = label
+ def __init__(self, name, label=None):
+ self.name = name
+ self.label_ = label
class MachineImageManagerTester(unittest.TestCase):
- """Class for testing MachineImageManager."""
-
- def gen_duts_by_name(self, *names):
- duts = []
- for n in names:
- duts.append(MockDut(n))
- return duts
-
- def create_labels_and_duts_from_pattern(self, pattern):
- labels = []
- duts = []
- for i, r in enumerate(pattern):
- l = MockLabel('l{}'.format(i), [])
- for j, v in enumerate(r.split()):
- if v == '.':
- l.remote.append('m{}'.format(j))
- if i == 0:
- duts.append(MockDut('m{}'.format(j)))
- labels.append(l)
- return labels, duts
-
- def check_matrix_against_pattern(self, matrix, pattern):
- for i, s in enumerate(pattern):
- for j, v in enumerate(s.split()):
- self.assertTrue(v == '.' and matrix[i][j] == ' ' or v == matrix[i][j])
-
- def pattern_based_test(self, inp, output):
- labels, duts = self.create_labels_and_duts_from_pattern(inp)
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.check_matrix_against_pattern(mim.matrix_, output)
- return mim
-
- def test_single_dut(self):
- labels = [MockLabel('l1'), MockLabel('l2'), MockLabel('l3')]
- dut = MockDut('m1')
- mim = MachineImageManager(labels, [dut])
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [['Y'], ['Y'], ['Y']])
-
- def test_single_label(self):
- labels = [MockLabel('l1')]
- duts = self.gen_duts_by_name('m1', 'm2', 'm3')
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(mim.matrix_ == [['Y', 'Y', 'Y']])
-
- def test_case1(self):
- labels = [
- MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])
- ]
- duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(
- mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X', 'X']])
- mim.compute_initial_allocation()
- self.assertTrue(
- mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']])
-
- def test_case2(self):
- labels = [
- MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])
- ]
- duts = [MockDut('m1'), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(
- mim.matrix_ == [[' ', ' ', 'X'], ['X', ' ', ' '], [' ', 'X', 'X']])
- mim.compute_initial_allocation()
- self.assertTrue(
- mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']])
-
- def test_case3(self):
- labels = [
- MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])
- ]
- duts = [MockDut('m1', labels[0]), MockDut('m2'), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(
- mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']])
-
- def test_case4(self):
- labels = [
- MockLabel('l1', ['m1', 'm2']),
- MockLabel('l2', ['m2', 'm3']),
- MockLabel('l3', ['m1'])
- ]
- duts = [MockDut('m1'), MockDut('m2', labels[0]), MockDut('m3')]
- mim = MachineImageManager(labels, duts)
- mim.compute_initial_allocation()
- self.assertTrue(
- mim.matrix_ == [[' ', 'Y', 'X'], ['X', ' ', 'Y'], ['Y', 'X', 'X']])
-
- def test_case5(self):
- labels = [
- MockLabel('l1', ['m3']),
- MockLabel('l2', ['m3']),
- MockLabel('l3', ['m1'])
- ]
- duts = self.gen_duts_by_name('m1', 'm2', 'm3')
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.assertTrue(
- mim.matrix_ == [['X', 'X', 'Y'], ['X', 'X', 'Y'], ['Y', 'X', 'X']])
-
- def test_2x2_with_allocation(self):
- labels = [MockLabel('l0'), MockLabel('l1')]
- duts = [MockDut('m0'), MockDut('m1')]
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- self.assertTrue(mim.allocate(duts[0]) == labels[0])
- self.assertTrue(mim.allocate(duts[0]) == labels[1])
- self.assertTrue(mim.allocate(duts[0]) is None)
- self.assertTrue(mim.matrix_[0][0] == '_')
- self.assertTrue(mim.matrix_[1][0] == '_')
- self.assertTrue(mim.allocate(duts[1]) == labels[1])
-
- def test_10x10_general(self):
- """Gen 10x10 matrix."""
- n = 10
- labels = []
- duts = []
- for i in range(n):
- labels.append(MockLabel('l{}'.format(i)))
- duts.append(MockDut('m{}'.format(i)))
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
- for i in range(n):
- for j in range(n):
- if i == j:
- self.assertTrue(mim.matrix_[i][j] == 'Y')
- else:
- self.assertTrue(mim.matrix_[i][j] == ' ')
- self.assertTrue(mim.allocate(duts[3]).name == 'l3')
-
- def test_random_generated(self):
- n = 10
- labels = []
- duts = []
- for i in range(10):
- # generate 3-5 machines that is compatible with this label
- l = MockLabel('l{}'.format(i), [])
- r = random.random()
- for _ in range(4):
- t = int(r * 10) % n
- r *= 10
- l.remote.append('m{}'.format(t))
- labels.append(l)
- duts.append(MockDut('m{}'.format(i)))
- mim = MachineImageManager(labels, duts)
- self.assertTrue(mim.compute_initial_allocation())
-
- def test_10x10_fully_random(self):
- inp = [
- 'X . . . X X . X X .', 'X X . X . X . X X .',
- 'X X X . . X . X . X', 'X . X X . . X X . X',
- 'X X X X . . . X . .', 'X X . X . X . . X .',
- '. X . X . X X X . .', '. X . X X . X X . .',
- 'X X . . . X X X . .', '. X X X X . . . . X'
- ]
- output = [
- 'X Y . . X X . X X .', 'X X Y X . X . X X .',
- 'X X X Y . X . X . X', 'X . X X Y . X X . X',
- 'X X X X . Y . X . .', 'X X . X . X Y . X .',
- 'Y X . X . X X X . .', '. X . X X . X X Y .',
- 'X X . . . X X X . Y', '. X X X X . . Y . X'
- ]
- self.pattern_based_test(inp, output)
-
- def test_10x10_fully_random2(self):
- inp = [
- 'X . X . . X . X X X', 'X X X X X X . . X .',
- 'X . X X X X X . . X', 'X X X . X . X X . .',
- '. X . X . X X X X X', 'X X X X X X X . . X',
- 'X . X X X X X . . X', 'X X X . X X X X . .',
- 'X X X . . . X X X X', '. X X . X X X . X X'
- ]
- output = [
- 'X . X Y . X . X X X', 'X X X X X X Y . X .',
- 'X Y X X X X X . . X', 'X X X . X Y X X . .',
- '. X Y X . X X X X X', 'X X X X X X X Y . X',
- 'X . X X X X X . Y X', 'X X X . X X X X . Y',
- 'X X X . Y . X X X X', 'Y X X . X X X . X X'
- ]
- self.pattern_based_test(inp, output)
-
- def test_3x4_with_allocation(self):
- inp = ['X X . .', '. . X .', 'X . X .']
- output = ['X X Y .', 'Y . X .', 'X Y X .']
- mim = self.pattern_based_test(inp, output)
- self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[3]) is None)
- self.assertTrue(mim.allocate(mim.duts_[2]) is None)
- self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[1]) is None)
- self.assertTrue(mim.allocate(mim.duts_[0]) is None)
- self.assertTrue(mim.label_duts_[0] == [2, 3])
- self.assertTrue(mim.label_duts_[1] == [0, 3, 1])
- self.assertTrue(mim.label_duts_[2] == [3, 1])
- self.assertListEqual(mim.allocate_log_, [(0, 2), (2, 3), (1, 0), (2, 1),
- (1, 3), (0, 3), (1, 1)])
-
- def test_cornercase_1(self):
- """This corner case is brought up by Caroline.
+ """Class for testing MachineImageManager."""
+
+ def gen_duts_by_name(self, *names):
+ duts = []
+ for n in names:
+ duts.append(MockDut(n))
+ return duts
+
+ def create_labels_and_duts_from_pattern(self, pattern):
+ labels = []
+ duts = []
+ for i, r in enumerate(pattern):
+ l = MockLabel("l{}".format(i), [])
+ for j, v in enumerate(r.split()):
+ if v == ".":
+ l.remote.append("m{}".format(j))
+ if i == 0:
+ duts.append(MockDut("m{}".format(j)))
+ labels.append(l)
+ return labels, duts
+
+ def check_matrix_against_pattern(self, matrix, pattern):
+ for i, s in enumerate(pattern):
+ for j, v in enumerate(s.split()):
+ self.assertTrue(
+ v == "." and matrix[i][j] == " " or v == matrix[i][j]
+ )
+
+ def pattern_based_test(self, inp, output):
+ labels, duts = self.create_labels_and_duts_from_pattern(inp)
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.check_matrix_against_pattern(mim.matrix_, output)
+ return mim
+
+ def test_single_dut(self):
+ labels = [MockLabel("l1"), MockLabel("l2"), MockLabel("l3")]
+ dut = MockDut("m1")
+ mim = MachineImageManager(labels, [dut])
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [["Y"], ["Y"], ["Y"]])
+
+ def test_single_label(self):
+ labels = [MockLabel("l1")]
+ duts = self.gen_duts_by_name("m1", "m2", "m3")
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(mim.matrix_ == [["Y", "Y", "Y"]])
+
+ def test_case1(self):
+ labels = [
+ MockLabel("l1", ["m1", "m2"]),
+ MockLabel("l2", ["m2", "m3"]),
+ MockLabel("l3", ["m1"]),
+ ]
+ duts = [MockDut("m1"), MockDut("m2"), MockDut("m3")]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(
+ mim.matrix_ == [[" ", " ", "X"], ["X", " ", " "], [" ", "X", "X"]]
+ )
+ mim.compute_initial_allocation()
+ self.assertTrue(
+ mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]]
+ )
+
+ def test_case2(self):
+ labels = [
+ MockLabel("l1", ["m1", "m2"]),
+ MockLabel("l2", ["m2", "m3"]),
+ MockLabel("l3", ["m1"]),
+ ]
+ duts = [MockDut("m1"), MockDut("m2"), MockDut("m3")]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(
+ mim.matrix_ == [[" ", " ", "X"], ["X", " ", " "], [" ", "X", "X"]]
+ )
+ mim.compute_initial_allocation()
+ self.assertTrue(
+ mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]]
+ )
+
+ def test_case3(self):
+ labels = [
+ MockLabel("l1", ["m1", "m2"]),
+ MockLabel("l2", ["m2", "m3"]),
+ MockLabel("l3", ["m1"]),
+ ]
+ duts = [MockDut("m1", labels[0]), MockDut("m2"), MockDut("m3")]
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(
+ mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]]
+ )
+
+ def test_case4(self):
+ labels = [
+ MockLabel("l1", ["m1", "m2"]),
+ MockLabel("l2", ["m2", "m3"]),
+ MockLabel("l3", ["m1"]),
+ ]
+ duts = [MockDut("m1"), MockDut("m2", labels[0]), MockDut("m3")]
+ mim = MachineImageManager(labels, duts)
+ mim.compute_initial_allocation()
+ self.assertTrue(
+ mim.matrix_ == [[" ", "Y", "X"], ["X", " ", "Y"], ["Y", "X", "X"]]
+ )
+
+ def test_case5(self):
+ labels = [
+ MockLabel("l1", ["m3"]),
+ MockLabel("l2", ["m3"]),
+ MockLabel("l3", ["m1"]),
+ ]
+ duts = self.gen_duts_by_name("m1", "m2", "m3")
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.assertTrue(
+ mim.matrix_ == [["X", "X", "Y"], ["X", "X", "Y"], ["Y", "X", "X"]]
+ )
+
+ def test_2x2_with_allocation(self):
+ labels = [MockLabel("l0"), MockLabel("l1")]
+ duts = [MockDut("m0"), MockDut("m1")]
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ self.assertTrue(mim.allocate(duts[0]) == labels[0])
+ self.assertTrue(mim.allocate(duts[0]) == labels[1])
+ self.assertTrue(mim.allocate(duts[0]) is None)
+ self.assertTrue(mim.matrix_[0][0] == "_")
+ self.assertTrue(mim.matrix_[1][0] == "_")
+ self.assertTrue(mim.allocate(duts[1]) == labels[1])
+
+ def test_10x10_general(self):
+ """Gen 10x10 matrix."""
+ n = 10
+ labels = []
+ duts = []
+ for i in range(n):
+ labels.append(MockLabel("l{}".format(i)))
+ duts.append(MockDut("m{}".format(i)))
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+ for i in range(n):
+ for j in range(n):
+ if i == j:
+ self.assertTrue(mim.matrix_[i][j] == "Y")
+ else:
+ self.assertTrue(mim.matrix_[i][j] == " ")
+ self.assertTrue(mim.allocate(duts[3]).name == "l3")
+
+ def test_random_generated(self):
+ n = 10
+ labels = []
+ duts = []
+ for i in range(10):
+ # generate 3-5 machines that is compatible with this label
+ l = MockLabel("l{}".format(i), [])
+ r = random.random()
+ for _ in range(4):
+ t = int(r * 10) % n
+ r *= 10
+ l.remote.append("m{}".format(t))
+ labels.append(l)
+ duts.append(MockDut("m{}".format(i)))
+ mim = MachineImageManager(labels, duts)
+ self.assertTrue(mim.compute_initial_allocation())
+
+ def test_10x10_fully_random(self):
+ inp = [
+ "X . . . X X . X X .",
+ "X X . X . X . X X .",
+ "X X X . . X . X . X",
+ "X . X X . . X X . X",
+ "X X X X . . . X . .",
+ "X X . X . X . . X .",
+ ". X . X . X X X . .",
+ ". X . X X . X X . .",
+ "X X . . . X X X . .",
+ ". X X X X . . . . X",
+ ]
+ output = [
+ "X Y . . X X . X X .",
+ "X X Y X . X . X X .",
+ "X X X Y . X . X . X",
+ "X . X X Y . X X . X",
+ "X X X X . Y . X . .",
+ "X X . X . X Y . X .",
+ "Y X . X . X X X . .",
+ ". X . X X . X X Y .",
+ "X X . . . X X X . Y",
+ ". X X X X . . Y . X",
+ ]
+ self.pattern_based_test(inp, output)
+
+ def test_10x10_fully_random2(self):
+ inp = [
+ "X . X . . X . X X X",
+ "X X X X X X . . X .",
+ "X . X X X X X . . X",
+ "X X X . X . X X . .",
+ ". X . X . X X X X X",
+ "X X X X X X X . . X",
+ "X . X X X X X . . X",
+ "X X X . X X X X . .",
+ "X X X . . . X X X X",
+ ". X X . X X X . X X",
+ ]
+ output = [
+ "X . X Y . X . X X X",
+ "X X X X X X Y . X .",
+ "X Y X X X X X . . X",
+ "X X X . X Y X X . .",
+ ". X Y X . X X X X X",
+ "X X X X X X X Y . X",
+ "X . X X X X X . Y X",
+ "X X X . X X X X . Y",
+ "X X X . Y . X X X X",
+ "Y X X . X X X . X X",
+ ]
+ self.pattern_based_test(inp, output)
+
+ def test_3x4_with_allocation(self):
+ inp = ["X X . .", ". . X .", "X . X ."]
+ output = ["X X Y .", "Y . X .", "X Y X ."]
+ mim = self.pattern_based_test(inp, output)
+ self.assertTrue(mim.allocate(mim.duts_[2]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[3]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[3]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[2]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[1]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[1]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[0]) is None)
+ self.assertTrue(mim.label_duts_[0] == [2, 3])
+ self.assertTrue(mim.label_duts_[1] == [0, 3, 1])
+ self.assertTrue(mim.label_duts_[2] == [3, 1])
+ self.assertListEqual(
+ mim.allocate_log_,
+ [(0, 2), (2, 3), (1, 0), (2, 1), (1, 3), (0, 3), (1, 1)],
+ )
+
+ def test_cornercase_1(self):
+ """This corner case is brought up by Caroline.
The description is -
@@ -292,18 +322,18 @@ class MachineImageManagerTester(unittest.TestCase):
l1 Y X X
l2 Y X X
- """
+ """
- inp = ['. X X', '. X X', '. X X']
- output = ['Y X X', 'Y X X', 'Y X X']
- mim = self.pattern_based_test(inp, output)
- self.assertTrue(mim.allocate(mim.duts_[1]) is None)
- self.assertTrue(mim.allocate(mim.duts_[2]) is None)
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
- self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2])
- self.assertTrue(mim.allocate(mim.duts_[0]) is None)
+ inp = [". X X", ". X X", ". X X"]
+ output = ["Y X X", "Y X X", "Y X X"]
+ mim = self.pattern_based_test(inp, output)
+ self.assertTrue(mim.allocate(mim.duts_[1]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[2]) is None)
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[0])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[1])
+ self.assertTrue(mim.allocate(mim.duts_[0]) == mim.labels_[2])
+ self.assertTrue(mim.allocate(mim.duts_[0]) is None)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index aaf09bf5..ffb0b5e6 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Machine Manager module."""
-from __future__ import division
-from __future__ import print_function
import collections
import hashlib
@@ -17,538 +15,600 @@ import sys
import threading
import time
+from cros_utils import command_executer
+from cros_utils import logger
import file_lock_machine
import image_chromeos
import test_flag
-from cros_utils import command_executer
-from cros_utils import logger
-CHECKSUM_FILE = '/usr/local/osimage_checksum_file'
+
+CHECKSUM_FILE = "/usr/local/osimage_checksum_file"
class BadChecksum(Exception):
- """Raised if all machines for a label don't have the same checksum."""
+ """Raised if all machines for a label don't have the same checksum."""
class BadChecksumString(Exception):
- """Raised if all machines for a label don't have the same checksum string."""
+ """Raised if all machines for a label don't have the same checksum string."""
class MissingLocksDirectory(Exception):
- """Raised when cannot find/access the machine locks directory."""
+ """Raised when cannot find/access the machine locks directory."""
class CrosCommandError(Exception):
- """Raised when an error occurs running command on DUT."""
+ """Raised when an error occurs running command on DUT."""
class CrosMachine(object):
- """The machine class."""
-
- def __init__(self, name, chromeos_root, log_level, cmd_exec=None):
- self.name = name
- self.image = None
- # We relate a dut with a label if we reimage the dut using label or we
- # detect at the very beginning that the dut is running this label.
- self.label = None
- self.checksum = None
- self.locked = False
- self.released_time = time.time()
- self.test_run = None
- self.chromeos_root = chromeos_root
- self.log_level = log_level
- self.cpuinfo = None
- self.machine_id = None
- self.checksum_string = None
- self.meminfo = None
- self.phys_kbytes = None
- self.cooldown_wait_time = 0
- self.ce = cmd_exec or command_executer.GetCommandExecuter(
- log_level=self.log_level)
- self.SetUpChecksumInfo()
-
- def SetUpChecksumInfo(self):
- if not self.IsReachable():
- self.machine_checksum = None
- return
- self._GetMemoryInfo()
- self._GetCPUInfo()
- self._ComputeMachineChecksumString()
- self._GetMachineID()
- self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
- self.machine_id_checksum = self._GetMD5Checksum(self.machine_id)
-
- def IsReachable(self):
- command = 'ls'
- ret = self.ce.CrosRunCommand(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- if ret:
- return False
- return True
-
- def AddCooldownWaitTime(self, wait_time):
- self.cooldown_wait_time += wait_time
-
- def GetCooldownWaitTime(self):
- return self.cooldown_wait_time
-
- def _ParseMemoryInfo(self):
- line = self.meminfo.splitlines()[0]
- usable_kbytes = int(line.split()[1])
- # This code is from src/third_party/test/files/client/bin/base_utils.py
- # usable_kbytes is system's usable DRAM in kbytes,
- # as reported by memtotal() from device /proc/meminfo memtotal
- # after Linux deducts 1.5% to 9.5% for system table overhead
- # Undo the unknown actual deduction by rounding up
- # to next small multiple of a big power-of-two
- # eg 12GB - 5.1% gets rounded back up to 12GB
- mindeduct = 0.005 # 0.5 percent
- maxdeduct = 0.095 # 9.5 percent
- # deduction range 1.5% .. 9.5% supports physical mem sizes
- # 6GB .. 12GB in steps of .5GB
- # 12GB .. 24GB in steps of 1 GB
- # 24GB .. 48GB in steps of 2 GB ...
- # Finer granularity in physical mem sizes would require
- # tighter spread between min and max possible deductions
-
- # increase mem size by at least min deduction, without rounding
- min_kbytes = int(usable_kbytes / (1.0 - mindeduct))
- # increase mem size further by 2**n rounding, by 0..roundKb or more
- round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes
- # find least binary roundup 2**n that covers worst-cast roundKb
- mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2)))
- # have round_kbytes <= mod2n < round_kbytes*2
- # round min_kbytes up to next multiple of mod2n
- phys_kbytes = min_kbytes + mod2n - 1
- phys_kbytes -= phys_kbytes % mod2n # clear low bits
- self.phys_kbytes = phys_kbytes
-
- def _GetMemoryInfo(self):
- # TODO yunlian: when the machine in rebooting, it will not return
- # meminfo, the assert does not catch it either
- command = 'cat /proc/meminfo'
- ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- assert ret == 0, 'Could not get meminfo from machine: %s' % self.name
- if ret == 0:
- self._ParseMemoryInfo()
-
- def _GetCPUInfo(self):
- command = 'cat /proc/cpuinfo'
- ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- assert ret == 0, 'Could not get cpuinfo from machine: %s' % self.name
-
- def _ComputeMachineChecksumString(self):
- self.checksum_string = ''
- # Some lines from cpuinfo have to be excluded because they are not
- # persistent across DUTs.
- # MHz, BogoMIPS are dynamically changing values.
- # core id, apicid are identifiers assigned on startup
- # and may differ on the same type of machine.
- exclude_lines_list = ['MHz', 'BogoMIPS', 'bogomips', 'core id', 'apicid']
- for line in self.cpuinfo.splitlines():
- if not any(e in line for e in exclude_lines_list):
- self.checksum_string += line
- self.checksum_string += ' ' + str(self.phys_kbytes)
-
- def _GetMD5Checksum(self, ss):
- if ss:
- return hashlib.md5(ss.encode('utf-8')).hexdigest()
- return ''
-
- def _GetMachineID(self):
- command = 'dump_vpd_log --full --stdout'
- _, if_out, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- b = if_out.splitlines()
- a = [l for l in b if 'Product' in l]
- if a:
- self.machine_id = a[0]
- return
- command = 'ifconfig'
- _, if_out, _ = self.ce.CrosRunCommandWOutput(
- command, machine=self.name, chromeos_root=self.chromeos_root)
- b = if_out.splitlines()
- a = [l for l in b if 'HWaddr' in l]
- if a:
- self.machine_id = '_'.join(a)
- return
- a = [l for l in b if 'ether' in l]
- if a:
- self.machine_id = '_'.join(a)
- return
- assert 0, 'Could not get machine_id from machine: %s' % self.name
-
- def __str__(self):
- l = []
- l.append(self.name)
- l.append(str(self.image))
- l.append(str(self.checksum))
- l.append(str(self.locked))
- l.append(str(self.released_time))
- return ', '.join(l)
+ """The machine class."""
+
+ def __init__(self, name, chromeos_root, log_level, cmd_exec=None):
+ self.name = name
+ self.image = None
+ # We relate a dut with a label if we reimage the dut using label or we
+ # detect at the very beginning that the dut is running this label.
+ self.label = None
+ self.checksum = None
+ self.locked = False
+ self.released_time = time.time()
+ self.test_run = None
+ self.chromeos_root = chromeos_root
+ self.log_level = log_level
+ self.cpuinfo = None
+ self.machine_id = None
+ self.checksum_string = None
+ self.meminfo = None
+ self.phys_kbytes = None
+ self.cooldown_wait_time = 0
+ self.ce = cmd_exec or command_executer.GetCommandExecuter(
+ log_level=self.log_level
+ )
+ self.SetUpChecksumInfo()
+
+ def SetUpChecksumInfo(self):
+ if not self.IsReachable():
+ self.machine_checksum = None
+ return
+ self._GetMemoryInfo()
+ self._GetCPUInfo()
+ self._ComputeMachineChecksumString()
+ self._GetMachineID()
+ self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
+ self.machine_id_checksum = self._GetMD5Checksum(self.machine_id)
+
+ def IsReachable(self):
+ command = "ls"
+ ret = self.ce.CrosRunCommand(
+ command, machine=self.name, chromeos_root=self.chromeos_root
+ )
+ if ret:
+ return False
+ return True
+
+ def AddCooldownWaitTime(self, wait_time):
+ self.cooldown_wait_time += wait_time
+
+ def GetCooldownWaitTime(self):
+ return self.cooldown_wait_time
+
+ def _ParseMemoryInfo(self):
+ line = self.meminfo.splitlines()[0]
+ usable_kbytes = int(line.split()[1])
+ # This code is from src/third_party/test/files/client/bin/base_utils.py
+ # usable_kbytes is system's usable DRAM in kbytes,
+ # as reported by memtotal() from device /proc/meminfo memtotal
+ # after Linux deducts 1.5% to 9.5% for system table overhead
+ # Undo the unknown actual deduction by rounding up
+ # to next small multiple of a big power-of-two
+ # eg 12GB - 5.1% gets rounded back up to 12GB
+ mindeduct = 0.005 # 0.5 percent
+ maxdeduct = 0.095 # 9.5 percent
+ # deduction range 1.5% .. 9.5% supports physical mem sizes
+ # 6GB .. 12GB in steps of .5GB
+ # 12GB .. 24GB in steps of 1 GB
+ # 24GB .. 48GB in steps of 2 GB ...
+ # Finer granularity in physical mem sizes would require
+ # tighter spread between min and max possible deductions
+
+ # increase mem size by at least min deduction, without rounding
+ min_kbytes = int(usable_kbytes / (1.0 - mindeduct))
+ # increase mem size further by 2**n rounding, by 0..roundKb or more
+ round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes
+ # find least binary roundup 2**n that covers worst-cast roundKb
+ mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2)))
+ # have round_kbytes <= mod2n < round_kbytes*2
+ # round min_kbytes up to next multiple of mod2n
+ phys_kbytes = min_kbytes + mod2n - 1
+ phys_kbytes -= phys_kbytes % mod2n # clear low bits
+ self.phys_kbytes = phys_kbytes
+
+ def _GetMemoryInfo(self):
+ # TODO yunlian: when the machine in rebooting, it will not return
+ # meminfo, the assert does not catch it either
+ command = "cat /proc/meminfo"
+ ret, self.meminfo, _ = self.ce.CrosRunCommandWOutput(
+ command, machine=self.name, chromeos_root=self.chromeos_root
+ )
+ assert ret == 0, "Could not get meminfo from machine: %s" % self.name
+ if ret == 0:
+ self._ParseMemoryInfo()
+
+ def _GetCPUInfo(self):
+ command = "cat /proc/cpuinfo"
+ ret, self.cpuinfo, _ = self.ce.CrosRunCommandWOutput(
+ command, machine=self.name, chromeos_root=self.chromeos_root
+ )
+ assert ret == 0, "Could not get cpuinfo from machine: %s" % self.name
+
+ def _ComputeMachineChecksumString(self):
+ self.checksum_string = ""
+ # Some lines from cpuinfo have to be excluded because they are not
+ # persistent across DUTs.
+ # MHz, BogoMIPS are dynamically changing values.
+ # core id, apicid are identifiers assigned on startup
+ # and may differ on the same type of machine.
+ exclude_lines_list = [
+ "MHz",
+ "BogoMIPS",
+ "bogomips",
+ "core id",
+ "apicid",
+ ]
+ for line in self.cpuinfo.splitlines():
+ if not any(e in line for e in exclude_lines_list):
+ self.checksum_string += line
+ self.checksum_string += " " + str(self.phys_kbytes)
+
+ def _GetMD5Checksum(self, ss):
+ if ss:
+ return hashlib.md5(ss.encode("utf-8")).hexdigest()
+ return ""
+
+ def _GetMachineID(self):
+ command = "dump_vpd_log --full --stdout"
+ _, if_out, _ = self.ce.CrosRunCommandWOutput(
+ command, machine=self.name, chromeos_root=self.chromeos_root
+ )
+ b = if_out.splitlines()
+ a = [l for l in b if "Product" in l]
+ if a:
+ self.machine_id = a[0]
+ return
+ command = "ifconfig"
+ _, if_out, _ = self.ce.CrosRunCommandWOutput(
+ command, machine=self.name, chromeos_root=self.chromeos_root
+ )
+ b = if_out.splitlines()
+ a = [l for l in b if "HWaddr" in l]
+ if a:
+ self.machine_id = "_".join(a)
+ return
+ a = [l for l in b if "ether" in l]
+ if a:
+ self.machine_id = "_".join(a)
+ return
+ assert 0, "Could not get machine_id from machine: %s" % self.name
+
+ def __str__(self):
+ l = []
+ l.append(self.name)
+ l.append(str(self.image))
+ l.append(str(self.checksum))
+ l.append(str(self.locked))
+ l.append(str(self.released_time))
+ return ", ".join(l)
class MachineManager(object):
- """Lock, image and unlock machines locally for benchmark runs.
-
- This class contains methods and calls to lock, unlock and image
- machines and distribute machines to each benchmark run. The assumption is
- that all of the machines for the experiment have been globally locked
- in the ExperimentRunner, but the machines still need to be locally
- locked/unlocked (allocated to benchmark runs) to prevent multiple benchmark
- runs within the same experiment from trying to use the same machine at the
- same time.
- """
-
- def __init__(self,
- chromeos_root,
- acquire_timeout,
- log_level,
- locks_dir,
- cmd_exec=None,
- lgr=None):
- self._lock = threading.RLock()
- self._all_machines = []
- self._machines = []
- self.image_lock = threading.Lock()
- self.num_reimages = 0
- self.chromeos_root = None
- self.machine_checksum = {}
- self.machine_checksum_string = {}
- self.acquire_timeout = acquire_timeout
- self.log_level = log_level
- self.locks_dir = locks_dir
- self.ce = cmd_exec or command_executer.GetCommandExecuter(
- log_level=self.log_level)
- self.logger = lgr or logger.GetLogger()
-
- if self.locks_dir and not os.path.isdir(self.locks_dir):
- raise MissingLocksDirectory('Cannot access locks directory: %s' %
- self.locks_dir)
-
- self._initialized_machines = []
- self.chromeos_root = chromeos_root
-
- def RemoveNonLockedMachines(self, locked_machines):
- for m in self._all_machines:
- if m.name not in locked_machines:
- self._all_machines.remove(m)
-
- for m in self._machines:
- if m.name not in locked_machines:
- self._machines.remove(m)
-
- def GetChromeVersion(self, machine):
- """Get the version of Chrome running on the DUT."""
-
- cmd = '/opt/google/chrome/chrome --version'
- ret, version, _ = self.ce.CrosRunCommandWOutput(
- cmd, machine=machine.name, chromeos_root=self.chromeos_root)
- if ret != 0:
- raise CrosCommandError("Couldn't get Chrome version from %s." %
- machine.name)
-
- if ret != 0:
- version = ''
- return version.rstrip()
-
- def ImageMachine(self, machine, label):
- checksum = label.checksum
-
- if checksum and (machine.checksum == checksum):
- return
- chromeos_root = label.chromeos_root
- if not chromeos_root:
- chromeos_root = self.chromeos_root
- image_chromeos_args = [
- image_chromeos.__file__, '--no_lock',
- '--chromeos_root=%s' % chromeos_root,
- '--image=%s' % label.chromeos_image,
- '--image_args=%s' % label.image_args,
- '--remote=%s' % machine.name,
- '--logging_level=%s' % self.log_level
- ]
- if label.board:
- image_chromeos_args.append('--board=%s' % label.board)
-
- # Currently can't image two machines at once.
- # So have to serialized on this lock.
- save_ce_log_level = self.ce.log_level
- if self.log_level != 'verbose':
- self.ce.log_level = 'average'
-
- with self.image_lock:
- if self.log_level != 'verbose':
- self.logger.LogOutput('Pushing image onto machine.')
- self.logger.LogOutput('Running image_chromeos.DoImage with %s' %
- ' '.join(image_chromeos_args))
- retval = 0
- if not test_flag.GetTestMode():
- retval = image_chromeos.DoImage(image_chromeos_args)
- if retval:
- cmd = 'reboot && exit'
- if self.log_level != 'verbose':
- self.logger.LogOutput('reboot & exit.')
- self.ce.CrosRunCommand(
- cmd, machine=machine.name, chromeos_root=self.chromeos_root)
- time.sleep(60)
- if self.log_level != 'verbose':
- self.logger.LogOutput('Pushing image onto machine.')
- self.logger.LogOutput('Running image_chromeos.DoImage with %s' %
- ' '.join(image_chromeos_args))
- retval = image_chromeos.DoImage(image_chromeos_args)
- if retval:
- raise RuntimeError("Could not image machine: '%s'." % machine.name)
-
- self.num_reimages += 1
- machine.checksum = checksum
- machine.image = label.chromeos_image
- machine.label = label
-
- if not label.chrome_version:
- label.chrome_version = self.GetChromeVersion(machine)
-
- self.ce.log_level = save_ce_log_level
- return retval
-
- def ComputeCommonCheckSum(self, label):
- # Since this is used for cache lookups before the machines have been
- # compared/verified, check here to make sure they all have the same
- # checksum (otherwise the cache lookup may not be valid).
- base = None
- for machine in self.GetMachines(label):
- # Make sure the machine's checksums are calculated.
- if not machine.machine_checksum:
- machine.SetUpChecksumInfo()
- # Use the first machine as the basis for comparison.
- if not base:
- base = machine
- # Make sure this machine's checksum matches our 'common' checksum.
- if base.machine_checksum != machine.machine_checksum:
- # Found a difference. Fatal error.
- # Extract non-matching part and report it.
- for mismatch_index in range(len(base.checksum_string)):
- if (mismatch_index >= len(machine.checksum_string) or
- base.checksum_string[mismatch_index] !=
- machine.checksum_string[mismatch_index]):
- break
- # We want to show some context after the mismatch.
- end_ind = mismatch_index + 8
- # Print a mismatching string.
- raise BadChecksum(
- 'Machine checksums do not match!\n'
- 'Diff:\n'
- f'{base.name}: {base.checksum_string[:end_ind]}\n'
- f'{machine.name}: {machine.checksum_string[:end_ind]}\n'
- '\nCheck for matching /proc/cpuinfo and /proc/meminfo on DUTs.\n')
- self.machine_checksum[label.name] = base.machine_checksum
-
- def ComputeCommonCheckSumString(self, label):
- # The assumption is that this function is only called AFTER
- # ComputeCommonCheckSum, so there is no need to verify the machines
- # are the same here. If this is ever changed, this function should be
- # modified to verify that all the machines for a given label are the
- # same.
- for machine in self.GetMachines(label):
- if machine.checksum_string:
- self.machine_checksum_string[label.name] = machine.checksum_string
- break
-
- def _TryToLockMachine(self, cros_machine):
- with self._lock:
- assert cros_machine, "Machine can't be None"
- for m in self._machines:
- if m.name == cros_machine.name:
- return
- locked = True
- if self.locks_dir:
- locked = file_lock_machine.Machine(cros_machine.name,
- self.locks_dir).Lock(
- True, sys.argv[0])
- if locked:
- self._machines.append(cros_machine)
- command = 'cat %s' % CHECKSUM_FILE
- ret, out, _ = self.ce.CrosRunCommandWOutput(
- command,
- chromeos_root=self.chromeos_root,
- machine=cros_machine.name)
- if ret == 0:
- cros_machine.checksum = out.strip()
- elif self.locks_dir:
- self.logger.LogOutput("Couldn't lock: %s" % cros_machine.name)
-
- # This is called from single threaded mode.
- def AddMachine(self, machine_name):
- with self._lock:
- for m in self._all_machines:
- assert m.name != machine_name, 'Tried to double-add %s' % machine_name
-
- if self.log_level != 'verbose':
- self.logger.LogOutput('Setting up remote access to %s' % machine_name)
- self.logger.LogOutput('Checking machine characteristics for %s' %
- machine_name)
- cm = CrosMachine(machine_name, self.chromeos_root, self.log_level)
- if cm.machine_checksum:
- self._all_machines.append(cm)
-
- def RemoveMachine(self, machine_name):
- with self._lock:
- self._machines = [m for m in self._machines if m.name != machine_name]
- if self.locks_dir:
- res = file_lock_machine.Machine(machine_name,
- self.locks_dir).Unlock(True)
- if not res:
- self.logger.LogError("Could not unlock machine: '%s'." % machine_name)
-
- def ForceSameImageToAllMachines(self, label):
- machines = self.GetMachines(label)
- for m in machines:
- self.ImageMachine(m, label)
- m.SetUpChecksumInfo()
-
- def AcquireMachine(self, label):
- image_checksum = label.checksum
- machines = self.GetMachines(label)
- check_interval_time = 120
- with self._lock:
- # Lazily external lock machines
- while self.acquire_timeout >= 0:
+ """Lock, image and unlock machines locally for benchmark runs.
+
+ This class contains methods and calls to lock, unlock and image
+ machines and distribute machines to each benchmark run. The assumption is
+ that all of the machines for the experiment have been globally locked
+ in the ExperimentRunner, but the machines still need to be locally
+ locked/unlocked (allocated to benchmark runs) to prevent multiple benchmark
+ runs within the same experiment from trying to use the same machine at the
+ same time.
+ """
+
+ def __init__(
+ self,
+ chromeos_root,
+ acquire_timeout,
+ log_level,
+ locks_dir,
+ cmd_exec=None,
+ lgr=None,
+ ):
+ self._lock = threading.RLock()
+ self._all_machines = []
+ self._machines = []
+ self.image_lock = threading.Lock()
+ self.num_reimages = 0
+ self.chromeos_root = None
+ self.machine_checksum = {}
+ self.machine_checksum_string = {}
+ self.acquire_timeout = acquire_timeout
+ self.log_level = log_level
+ self.locks_dir = locks_dir
+ self.ce = cmd_exec or command_executer.GetCommandExecuter(
+ log_level=self.log_level
+ )
+ self.logger = lgr or logger.GetLogger()
+
+ if self.locks_dir and not os.path.isdir(self.locks_dir):
+ raise MissingLocksDirectory(
+ "Cannot access locks directory: %s" % self.locks_dir
+ )
+
+ self._initialized_machines = []
+ self.chromeos_root = chromeos_root
+
+ def RemoveNonLockedMachines(self, locked_machines):
+ for m in self._all_machines:
+ if m.name not in locked_machines:
+ self._all_machines.remove(m)
+
+ for m in self._machines:
+ if m.name not in locked_machines:
+ self._machines.remove(m)
+
+ def GetChromeVersion(self, machine):
+ """Get the version of Chrome running on the DUT."""
+
+ cmd = "/opt/google/chrome/chrome --version"
+ ret, version, _ = self.ce.CrosRunCommandWOutput(
+ cmd, machine=machine.name, chromeos_root=self.chromeos_root
+ )
+ if ret != 0:
+ raise CrosCommandError(
+ "Couldn't get Chrome version from %s." % machine.name
+ )
+
+ if ret != 0:
+ version = ""
+ return version.rstrip()
+
+ def ImageMachine(self, machine, label):
+ checksum = label.checksum
+
+ if checksum and (machine.checksum == checksum):
+ return
+ chromeos_root = label.chromeos_root
+ if not chromeos_root:
+ chromeos_root = self.chromeos_root
+ image_chromeos_args = [
+ image_chromeos.__file__,
+ "--no_lock",
+ "--chromeos_root=%s" % chromeos_root,
+ "--image=%s" % label.chromeos_image,
+ "--image_args=%s" % label.image_args,
+ "--remote=%s" % machine.name,
+ "--logging_level=%s" % self.log_level,
+ ]
+ if label.board:
+ image_chromeos_args.append("--board=%s" % label.board)
+
+ # Currently can't image two machines at once.
+ # So have to serialized on this lock.
+ save_ce_log_level = self.ce.log_level
+ if self.log_level != "verbose":
+ self.ce.log_level = "average"
+
+ with self.image_lock:
+ if self.log_level != "verbose":
+ self.logger.LogOutput("Pushing image onto machine.")
+ self.logger.LogOutput(
+ "Running image_chromeos.DoImage with %s"
+ % " ".join(image_chromeos_args)
+ )
+ retval = 0
+ if not test_flag.GetTestMode():
+ retval = image_chromeos.DoImage(image_chromeos_args)
+ if retval:
+ cmd = "reboot && exit"
+ if self.log_level != "verbose":
+ self.logger.LogOutput("reboot & exit.")
+ self.ce.CrosRunCommand(
+ cmd, machine=machine.name, chromeos_root=self.chromeos_root
+ )
+ time.sleep(60)
+ if self.log_level != "verbose":
+ self.logger.LogOutput("Pushing image onto machine.")
+ self.logger.LogOutput(
+ "Running image_chromeos.DoImage with %s"
+ % " ".join(image_chromeos_args)
+ )
+ retval = image_chromeos.DoImage(image_chromeos_args)
+ if retval:
+ raise RuntimeError(
+ "Could not image machine: '%s'." % machine.name
+ )
+
+ self.num_reimages += 1
+ machine.checksum = checksum
+ machine.image = label.chromeos_image
+ machine.label = label
+
+ if not label.chrome_version:
+ label.chrome_version = self.GetChromeVersion(machine)
+
+ self.ce.log_level = save_ce_log_level
+ return retval
+
+ def ComputeCommonCheckSum(self, label):
+ # Since this is used for cache lookups before the machines have been
+ # compared/verified, check here to make sure they all have the same
+ # checksum (otherwise the cache lookup may not be valid).
+ base = None
+ for machine in self.GetMachines(label):
+ # Make sure the machine's checksums are calculated.
+ if not machine.machine_checksum:
+ machine.SetUpChecksumInfo()
+ # Use the first machine as the basis for comparison.
+ if not base:
+ base = machine
+ # Make sure this machine's checksum matches our 'common' checksum.
+ if base.machine_checksum != machine.machine_checksum:
+ # Found a difference. Fatal error.
+ # Extract non-matching part and report it.
+ for mismatch_index in range(len(base.checksum_string)):
+ if (
+ mismatch_index >= len(machine.checksum_string)
+ or base.checksum_string[mismatch_index]
+ != machine.checksum_string[mismatch_index]
+ ):
+ break
+ # We want to show some context after the mismatch.
+ end_ind = mismatch_index + 8
+ # Print a mismatching string.
+ raise BadChecksum(
+ "Machine checksums do not match!\n"
+ "Diff:\n"
+ f"{base.name}: {base.checksum_string[:end_ind]}\n"
+ f"{machine.name}: {machine.checksum_string[:end_ind]}\n"
+ "\nCheck for matching /proc/cpuinfo and /proc/meminfo on DUTs.\n"
+ )
+ self.machine_checksum[label.name] = base.machine_checksum
+
+ def ComputeCommonCheckSumString(self, label):
+ # The assumption is that this function is only called AFTER
+ # ComputeCommonCheckSum, so there is no need to verify the machines
+ # are the same here. If this is ever changed, this function should be
+ # modified to verify that all the machines for a given label are the
+ # same.
+ for machine in self.GetMachines(label):
+ if machine.checksum_string:
+ self.machine_checksum_string[
+ label.name
+ ] = machine.checksum_string
+ break
+
+ def _TryToLockMachine(self, cros_machine):
+ with self._lock:
+ assert cros_machine, "Machine can't be None"
+ for m in self._machines:
+ if m.name == cros_machine.name:
+ return
+ locked = True
+ if self.locks_dir:
+ locked = file_lock_machine.Machine(
+ cros_machine.name, self.locks_dir
+ ).Lock(True, sys.argv[0])
+ if locked:
+ self._machines.append(cros_machine)
+ command = "cat %s" % CHECKSUM_FILE
+ ret, out, _ = self.ce.CrosRunCommandWOutput(
+ command,
+ chromeos_root=self.chromeos_root,
+ machine=cros_machine.name,
+ )
+ if ret == 0:
+ cros_machine.checksum = out.strip()
+ elif self.locks_dir:
+ self.logger.LogOutput("Couldn't lock: %s" % cros_machine.name)
+
+ # This is called from single threaded mode.
+ def AddMachine(self, machine_name):
+ with self._lock:
+ for m in self._all_machines:
+ assert m.name != machine_name, (
+ "Tried to double-add %s" % machine_name
+ )
+
+ if self.log_level != "verbose":
+ self.logger.LogOutput(
+ "Setting up remote access to %s" % machine_name
+ )
+ self.logger.LogOutput(
+ "Checking machine characteristics for %s" % machine_name
+ )
+ cm = CrosMachine(machine_name, self.chromeos_root, self.log_level)
+ if cm.machine_checksum:
+ self._all_machines.append(cm)
+
+ def RemoveMachine(self, machine_name):
+ with self._lock:
+ self._machines = [
+ m for m in self._machines if m.name != machine_name
+ ]
+ if self.locks_dir:
+ res = file_lock_machine.Machine(
+ machine_name, self.locks_dir
+ ).Unlock(True)
+ if not res:
+ self.logger.LogError(
+ "Could not unlock machine: '%s'." % machine_name
+ )
+
+ def ForceSameImageToAllMachines(self, label):
+ machines = self.GetMachines(label)
for m in machines:
- new_machine = m not in self._all_machines
- self._TryToLockMachine(m)
- if new_machine:
- m.released_time = time.time()
- if self.GetAvailableMachines(label):
- break
- sleep_time = max(1, min(self.acquire_timeout, check_interval_time))
- time.sleep(sleep_time)
- self.acquire_timeout -= sleep_time
-
- if self.acquire_timeout < 0:
- self.logger.LogFatal('Could not acquire any of the '
- "following machines: '%s'" %
- ', '.join(machine.name for machine in machines))
-
-
-### for m in self._machines:
-### if (m.locked and time.time() - m.released_time < 10 and
-### m.checksum == image_checksum):
-### return None
- unlocked_machines = [
- machine for machine in self.GetAvailableMachines(label)
- if not machine.locked
- ]
- for m in unlocked_machines:
- if image_checksum and m.checksum == image_checksum:
- m.locked = True
- m.test_run = threading.current_thread()
- return m
- for m in unlocked_machines:
- if not m.checksum:
- m.locked = True
- m.test_run = threading.current_thread()
- return m
- # This logic ensures that threads waiting on a machine will get a machine
- # with a checksum equal to their image over other threads. This saves time
- # when crosperf initially assigns the machines to threads by minimizing
- # the number of re-images.
- # TODO(asharif): If we centralize the thread-scheduler, we wont need this
- # code and can implement minimal reimaging code more cleanly.
- for m in unlocked_machines:
- if time.time() - m.released_time > 15:
- # The release time gap is too large, so it is probably in the start
- # stage, we need to reset the released_time.
- m.released_time = time.time()
- elif time.time() - m.released_time > 8:
- m.locked = True
- m.test_run = threading.current_thread()
- return m
- return None
-
- def GetAvailableMachines(self, label=None):
- if not label:
- return self._machines
- return [m for m in self._machines if m.name in label.remote]
-
- def GetMachines(self, label=None):
- if not label:
- return self._all_machines
- return [m for m in self._all_machines if m.name in label.remote]
-
- def ReleaseMachine(self, machine):
- with self._lock:
- for m in self._machines:
- if machine.name == m.name:
- assert m.locked, 'Tried to double-release %s' % m.name
- m.released_time = time.time()
- m.locked = False
- m.status = 'Available'
- break
-
- def Cleanup(self):
- with self._lock:
- # Unlock all machines (via file lock)
- for m in self._machines:
- res = file_lock_machine.Machine(m.name, self.locks_dir).Unlock(True)
-
- if not res:
- self.logger.LogError("Could not unlock machine: '%s'." % m.name)
-
- def __str__(self):
- with self._lock:
- l = ['MachineManager Status:'] + [str(m) for m in self._machines]
- return '\n'.join(l)
-
- def AsString(self):
- with self._lock:
- stringify_fmt = '%-30s %-10s %-4s %-25s %-32s'
- header = stringify_fmt % ('Machine', 'Thread', 'Lock', 'Status',
- 'Checksum')
- table = [header]
- for m in self._machines:
- if m.test_run:
- test_name = m.test_run.name
- test_status = m.test_run.timeline.GetLastEvent()
- else:
- test_name = ''
- test_status = ''
-
- try:
- machine_string = stringify_fmt % (m.name, test_name, m.locked,
- test_status, m.checksum)
- except ValueError:
- machine_string = ''
- table.append(machine_string)
- return 'Machine Status:\n%s' % '\n'.join(table)
-
- def GetAllCPUInfo(self, labels):
- """Get cpuinfo for labels, merge them if their cpuinfo are the same."""
- dic = collections.defaultdict(list)
- for label in labels:
- for machine in self._all_machines:
- if machine.name in label.remote:
- dic[machine.cpuinfo].append(label.name)
- break
- output_segs = []
- for key, v in dic.items():
- output = ' '.join(v)
- output += '\n-------------------\n'
- output += key
- output += '\n\n\n'
- output_segs.append(output)
- return ''.join(output_segs)
-
- def GetAllMachines(self):
- return self._all_machines
+ self.ImageMachine(m, label)
+ m.SetUpChecksumInfo()
+
+ def AcquireMachine(self, label):
+ image_checksum = label.checksum
+ machines = self.GetMachines(label)
+ check_interval_time = 120
+ with self._lock:
+ # Lazily external lock machines
+ while self.acquire_timeout >= 0:
+ for m in machines:
+ new_machine = m not in self._all_machines
+ self._TryToLockMachine(m)
+ if new_machine:
+ m.released_time = time.time()
+ if self.GetAvailableMachines(label):
+ break
+ sleep_time = max(
+ 1, min(self.acquire_timeout, check_interval_time)
+ )
+ time.sleep(sleep_time)
+ self.acquire_timeout -= sleep_time
+
+ if self.acquire_timeout < 0:
+ self.logger.LogFatal(
+ "Could not acquire any of the "
+ "following machines: '%s'"
+ % ", ".join(machine.name for machine in machines)
+ )
+
+ ### for m in self._machines:
+ ### if (m.locked and time.time() - m.released_time < 10 and
+ ### m.checksum == image_checksum):
+ ### return None
+ unlocked_machines = [
+ machine
+ for machine in self.GetAvailableMachines(label)
+ if not machine.locked
+ ]
+ for m in unlocked_machines:
+ if image_checksum and m.checksum == image_checksum:
+ m.locked = True
+ m.test_run = threading.current_thread()
+ return m
+ for m in unlocked_machines:
+ if not m.checksum:
+ m.locked = True
+ m.test_run = threading.current_thread()
+ return m
+ # This logic ensures that threads waiting on a machine will get a machine
+ # with a checksum equal to their image over other threads. This saves time
+ # when crosperf initially assigns the machines to threads by minimizing
+ # the number of re-images.
+ # TODO(asharif): If we centralize the thread-scheduler, we wont need this
+ # code and can implement minimal reimaging code more cleanly.
+ for m in unlocked_machines:
+ if time.time() - m.released_time > 15:
+ # The release time gap is too large, so it is probably in the start
+ # stage, we need to reset the released_time.
+ m.released_time = time.time()
+ elif time.time() - m.released_time > 8:
+ m.locked = True
+ m.test_run = threading.current_thread()
+ return m
+ return None
+
+ def GetAvailableMachines(self, label=None):
+ if not label:
+ return self._machines
+ return [m for m in self._machines if m.name in label.remote]
+
+ def GetMachines(self, label=None):
+ if not label:
+ return self._all_machines
+ return [m for m in self._all_machines if m.name in label.remote]
+
+ def ReleaseMachine(self, machine):
+ with self._lock:
+ for m in self._machines:
+ if machine.name == m.name:
+ assert m.locked, "Tried to double-release %s" % m.name
+ m.released_time = time.time()
+ m.locked = False
+ m.status = "Available"
+ break
+
+ def Cleanup(self):
+ with self._lock:
+ # Unlock all machines (via file lock)
+ for m in self._machines:
+ res = file_lock_machine.Machine(m.name, self.locks_dir).Unlock(
+ True
+ )
+
+ if not res:
+ self.logger.LogError(
+ "Could not unlock machine: '%s'." % m.name
+ )
+
+ def __str__(self):
+ with self._lock:
+ l = ["MachineManager Status:"] + [str(m) for m in self._machines]
+ return "\n".join(l)
+
+ def AsString(self):
+ with self._lock:
+ stringify_fmt = "%-30s %-10s %-4s %-25s %-32s"
+ header = stringify_fmt % (
+ "Machine",
+ "Thread",
+ "Lock",
+ "Status",
+ "Checksum",
+ )
+ table = [header]
+ for m in self._machines:
+ if m.test_run:
+ test_name = m.test_run.name
+ test_status = m.test_run.timeline.GetLastEvent()
+ else:
+ test_name = ""
+ test_status = ""
+
+ try:
+ machine_string = stringify_fmt % (
+ m.name,
+ test_name,
+ m.locked,
+ test_status,
+ m.checksum,
+ )
+ except ValueError:
+ machine_string = ""
+ table.append(machine_string)
+ return "Machine Status:\n%s" % "\n".join(table)
+
+ def GetAllCPUInfo(self, labels):
+ """Get cpuinfo for labels, merge them if their cpuinfo are the same."""
+ dic = collections.defaultdict(list)
+ for label in labels:
+ for machine in self._all_machines:
+ if machine.name in label.remote:
+ dic[machine.cpuinfo].append(label.name)
+ break
+ output_segs = []
+ for key, v in dic.items():
+ output = " ".join(v)
+ output += "\n-------------------\n"
+ output += key
+ output += "\n\n\n"
+ output_segs.append(output)
+ return "".join(output_segs)
+
+ def GetAllMachines(self):
+ return self._all_machines
class MockCrosMachine(CrosMachine):
- """Mock cros machine class."""
- # pylint: disable=super-init-not-called
+ """Mock cros machine class."""
+
+ # pylint: disable=super-init-not-called
- MEMINFO_STRING = """MemTotal: 3990332 kB
+ MEMINFO_STRING = """MemTotal: 3990332 kB
MemFree: 2608396 kB
Buffers: 147168 kB
Cached: 811560 kB
@@ -585,7 +645,7 @@ DirectMap4k: 45824 kB
DirectMap2M: 4096000 kB
"""
- CPUINFO_STRING = """processor: 0
+ CPUINFO_STRING = """processor: 0
vendor_id: GenuineIntel
cpu family: 6
model: 42
@@ -638,91 +698,97 @@ address sizes: 36 bits physical, 48 bits virtual
power management:
"""
- def __init__(self, name, chromeos_root, log_level):
- self.name = name
- self.image = None
- self.checksum = None
- self.locked = False
- self.released_time = time.time()
- self.test_run = None
- self.chromeos_root = chromeos_root
- self.checksum_string = re.sub(r'\d', '', name)
- # In test, we assume "lumpy1", "lumpy2" are the same machine.
- self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
- self.log_level = log_level
- self.label = None
- self.cooldown_wait_time = 0
- self.ce = command_executer.GetCommandExecuter(log_level=self.log_level)
- self._GetCPUInfo()
-
- def IsReachable(self):
- return True
-
- def _GetMemoryInfo(self):
- self.meminfo = self.MEMINFO_STRING
- self._ParseMemoryInfo()
-
- def _GetCPUInfo(self):
- self.cpuinfo = self.CPUINFO_STRING
+ def __init__(self, name, chromeos_root, log_level):
+ self.name = name
+ self.image = None
+ self.checksum = None
+ self.locked = False
+ self.released_time = time.time()
+ self.test_run = None
+ self.chromeos_root = chromeos_root
+ self.checksum_string = re.sub(r"\d", "", name)
+ # In test, we assume "lumpy1", "lumpy2" are the same machine.
+ self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
+ self.log_level = log_level
+ self.label = None
+ self.cooldown_wait_time = 0
+ self.ce = command_executer.GetCommandExecuter(log_level=self.log_level)
+ self._GetCPUInfo()
+
+ def IsReachable(self):
+ return True
+
+ def _GetMemoryInfo(self):
+ self.meminfo = self.MEMINFO_STRING
+ self._ParseMemoryInfo()
+
+ def _GetCPUInfo(self):
+ self.cpuinfo = self.CPUINFO_STRING
class MockMachineManager(MachineManager):
- """Mock machine manager class."""
-
- def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir):
- super(MockMachineManager, self).__init__(chromeos_root, acquire_timeout,
- log_level, locks_dir)
-
- def _TryToLockMachine(self, cros_machine):
- self._machines.append(cros_machine)
- cros_machine.checksum = ''
-
- def AddMachine(self, machine_name):
- with self._lock:
- for m in self._all_machines:
- assert m.name != machine_name, 'Tried to double-add %s' % machine_name
- cm = MockCrosMachine(machine_name, self.chromeos_root, self.log_level)
- assert cm.machine_checksum, ('Could not find checksum for machine %s' %
- machine_name)
- # In Original MachineManager, the test is 'if cm.machine_checksum:' - if a
- # machine is unreachable, then its machine_checksum is None. Here we
- # cannot do this, because machine_checksum is always faked, so we directly
- # test cm.IsReachable, which is properly mocked.
- if cm.IsReachable():
- self._all_machines.append(cm)
-
- def GetChromeVersion(self, machine):
- return 'Mock Chrome Version R50'
-
- def AcquireMachine(self, label):
- for machine in self._all_machines:
- if not machine.locked:
- machine.locked = True
- return machine
- return None
-
- def ImageMachine(self, machine, label):
- if machine or label:
- return 0
- return 1
-
- def ReleaseMachine(self, machine):
- machine.locked = False
-
- def GetMachines(self, label=None):
- return self._all_machines
-
- def GetAvailableMachines(self, label=None):
- return self._all_machines
-
- def ForceSameImageToAllMachines(self, label=None):
- return 0
-
- def ComputeCommonCheckSum(self, label=None):
- common_checksum = 12345
- for machine in self.GetMachines(label):
- machine.machine_checksum = common_checksum
- self.machine_checksum[label.name] = common_checksum
-
- def GetAllMachines(self):
- return self._all_machines
+ """Mock machine manager class."""
+
+ def __init__(self, chromeos_root, acquire_timeout, log_level, locks_dir):
+ super(MockMachineManager, self).__init__(
+ chromeos_root, acquire_timeout, log_level, locks_dir
+ )
+
+ def _TryToLockMachine(self, cros_machine):
+ self._machines.append(cros_machine)
+ cros_machine.checksum = ""
+
+ def AddMachine(self, machine_name):
+ with self._lock:
+ for m in self._all_machines:
+ assert m.name != machine_name, (
+ "Tried to double-add %s" % machine_name
+ )
+ cm = MockCrosMachine(
+ machine_name, self.chromeos_root, self.log_level
+ )
+ assert cm.machine_checksum, (
+ "Could not find checksum for machine %s" % machine_name
+ )
+ # In Original MachineManager, the test is 'if cm.machine_checksum:' - if a
+ # machine is unreachable, then its machine_checksum is None. Here we
+ # cannot do this, because machine_checksum is always faked, so we directly
+ # test cm.IsReachable, which is properly mocked.
+ if cm.IsReachable():
+ self._all_machines.append(cm)
+
+ def GetChromeVersion(self, machine):
+ return "Mock Chrome Version R50"
+
+ def AcquireMachine(self, label):
+ for machine in self._all_machines:
+ if not machine.locked:
+ machine.locked = True
+ return machine
+ return None
+
+ def ImageMachine(self, machine, label):
+ if machine or label:
+ return 0
+ return 1
+
+ def ReleaseMachine(self, machine):
+ machine.locked = False
+
+ def GetMachines(self, label=None):
+ return self._all_machines
+
+ def GetAvailableMachines(self, label=None):
+ return self._all_machines
+
+ def ForceSameImageToAllMachines(self, label=None):
+ return 0
+
+ def ComputeCommonCheckSum(self, label=None):
+ common_checksum = 12345
+ for machine in self.GetMachines(label):
+ machine.machine_checksum = common_checksum
+ self.machine_checksum[label.name] = common_checksum
+
+ def GetAllMachines(self):
+ return self._all_machines
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index f47cc881..6324a227 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -1,493 +1,574 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for machine_manager."""
-from __future__ import print_function
+import hashlib
import os.path
import time
-import hashlib
import unittest
import unittest.mock as mock
-import label
-import machine_manager
-import image_checksummer
-import test_flag
-
from benchmark import Benchmark
from benchmark_run import MockBenchmarkRun
from cros_utils import command_executer
from cros_utils import logger
+import image_checksummer
+import label
+import machine_manager
+import test_flag
+
# pylint: disable=protected-access
class MyMachineManager(machine_manager.MachineManager):
- """Machine manager for test."""
-
- def __init__(self, chromeos_root):
- super(MyMachineManager, self).__init__(chromeos_root, 0, 'average', '')
-
- def _TryToLockMachine(self, cros_machine):
- self._machines.append(cros_machine)
- cros_machine.checksum = ''
-
- def AddMachine(self, machine_name):
- with self._lock:
- for m in self._all_machines:
- assert m.name != machine_name, 'Tried to double-add %s' % machine_name
- cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root,
- 'average')
- assert cm.machine_checksum, ('Could not find checksum for machine %s' %
- machine_name)
- self._all_machines.append(cm)
-
-
-CHROMEOS_ROOT = '/tmp/chromeos-root'
-MACHINE_NAMES = ['lumpy1', 'lumpy2', 'lumpy3', 'daisy1', 'daisy2']
-LABEL_LUMPY = label.MockLabel('lumpy', 'build', 'lumpy_chromeos_image',
- 'autotest_dir', 'debug_dir', CHROMEOS_ROOT,
- 'lumpy', ['lumpy1', 'lumpy2', 'lumpy3', 'lumpy4'],
- '', '', False, 'average', 'gcc', False, None)
-LABEL_MIX = label.MockLabel('mix', 'build', 'chromeos_image', 'autotest_dir',
- 'debug_dir', CHROMEOS_ROOT, 'mix',
- ['daisy1', 'daisy2', 'lumpy3', 'lumpy4'], '', '',
- False, 'average', 'gcc', False, None)
+ """Machine manager for test."""
+
+ def __init__(self, chromeos_root):
+ super(MyMachineManager, self).__init__(chromeos_root, 0, "average", "")
+
+ def _TryToLockMachine(self, cros_machine):
+ self._machines.append(cros_machine)
+ cros_machine.checksum = ""
+
+ def AddMachine(self, machine_name):
+ with self._lock:
+ for m in self._all_machines:
+ assert m.name != machine_name, (
+ "Tried to double-add %s" % machine_name
+ )
+ cm = machine_manager.MockCrosMachine(
+ machine_name, self.chromeos_root, "average"
+ )
+ assert cm.machine_checksum, (
+ "Could not find checksum for machine %s" % machine_name
+ )
+ self._all_machines.append(cm)
+
+
+CHROMEOS_ROOT = "/tmp/chromeos-root"
+MACHINE_NAMES = ["lumpy1", "lumpy2", "lumpy3", "daisy1", "daisy2"]
+LABEL_LUMPY = label.MockLabel(
+ "lumpy",
+ "build",
+ "lumpy_chromeos_image",
+ "autotest_dir",
+ "debug_dir",
+ CHROMEOS_ROOT,
+ "lumpy",
+ ["lumpy1", "lumpy2", "lumpy3", "lumpy4"],
+ "",
+ "",
+ False,
+ "average",
+ "gcc",
+ False,
+ None,
+)
+LABEL_MIX = label.MockLabel(
+ "mix",
+ "build",
+ "chromeos_image",
+ "autotest_dir",
+ "debug_dir",
+ CHROMEOS_ROOT,
+ "mix",
+ ["daisy1", "daisy2", "lumpy3", "lumpy4"],
+ "",
+ "",
+ False,
+ "average",
+ "gcc",
+ False,
+ None,
+)
class MachineManagerTest(unittest.TestCase):
- """Test for machine manager class."""
-
- msgs = []
- image_log = []
- log_fatal_msgs = []
- fake_logger_count = 0
- fake_logger_msgs = []
-
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
-
- mock_logger = mock.Mock(spec=logger.Logger)
-
- mock_lumpy1 = mock.Mock(spec=machine_manager.CrosMachine)
- mock_lumpy2 = mock.Mock(spec=machine_manager.CrosMachine)
- mock_lumpy3 = mock.Mock(spec=machine_manager.CrosMachine)
- mock_lumpy4 = mock.Mock(spec=machine_manager.CrosMachine)
- mock_daisy1 = mock.Mock(spec=machine_manager.CrosMachine)
- mock_daisy2 = mock.Mock(spec=machine_manager.CrosMachine)
-
- @mock.patch.object(os.path, 'isdir')
-
- # pylint: disable=arguments-differ
- def setUp(self, mock_isdir):
-
- mock_isdir.return_value = True
- self.mm = machine_manager.MachineManager('/usr/local/chromeos', 0,
- 'average', None,
- self.mock_cmd_exec,
- self.mock_logger)
-
- self.mock_lumpy1.name = 'lumpy1'
- self.mock_lumpy2.name = 'lumpy2'
- self.mock_lumpy3.name = 'lumpy3'
- self.mock_lumpy4.name = 'lumpy4'
- self.mock_daisy1.name = 'daisy1'
- self.mock_daisy2.name = 'daisy2'
- self.mock_lumpy1.machine_checksum = 'lumpy123'
- self.mock_lumpy2.machine_checksum = 'lumpy123'
- self.mock_lumpy3.machine_checksum = 'lumpy123'
- self.mock_lumpy4.machine_checksum = 'lumpy123'
- self.mock_daisy1.machine_checksum = 'daisy12'
- self.mock_daisy2.machine_checksum = 'daisy12'
- self.mock_lumpy1.checksum_string = 'lumpy_checksum_str'
- self.mock_lumpy2.checksum_string = 'lumpy_checksum_str'
- self.mock_lumpy3.checksum_string = 'lumpy_checksum_str'
- self.mock_lumpy4.checksum_string = 'lumpy_checksum_str'
- self.mock_daisy1.checksum_string = 'daisy_checksum_str'
- self.mock_daisy2.checksum_string = 'daisy_checksum_str'
- self.mock_lumpy1.cpuinfo = 'lumpy_cpu_info'
- self.mock_lumpy2.cpuinfo = 'lumpy_cpu_info'
- self.mock_lumpy3.cpuinfo = 'lumpy_cpu_info'
- self.mock_lumpy4.cpuinfo = 'lumpy_cpu_info'
- self.mock_daisy1.cpuinfo = 'daisy_cpu_info'
- self.mock_daisy2.cpuinfo = 'daisy_cpu_info'
- self.mm._all_machines.append(self.mock_daisy1)
- self.mm._all_machines.append(self.mock_daisy2)
- self.mm._all_machines.append(self.mock_lumpy1)
- self.mm._all_machines.append(self.mock_lumpy2)
- self.mm._all_machines.append(self.mock_lumpy3)
-
- def testGetMachines(self):
- manager = MyMachineManager(CHROMEOS_ROOT)
- for m in MACHINE_NAMES:
- manager.AddMachine(m)
- names = [m.name for m in manager.GetMachines(LABEL_LUMPY)]
- self.assertEqual(names, ['lumpy1', 'lumpy2', 'lumpy3'])
-
- def testGetAvailableMachines(self):
- manager = MyMachineManager(CHROMEOS_ROOT)
- for m in MACHINE_NAMES:
- manager.AddMachine(m)
- for m in manager._all_machines:
- if int(m.name[-1]) % 2:
- manager._TryToLockMachine(m)
- names = [m.name for m in manager.GetAvailableMachines(LABEL_LUMPY)]
- self.assertEqual(names, ['lumpy1', 'lumpy3'])
-
- @mock.patch.object(time, 'sleep')
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
- def test_image_machine(self, mock_checksummer, mock_run_croscmd, mock_run_cmd,
- mock_sleep):
-
- def FakeMD5Checksum(_input_str):
- return 'machine_fake_md5_checksum'
-
- self.fake_logger_count = 0
- self.fake_logger_msgs = []
-
- def FakeLogOutput(msg):
- self.fake_logger_count += 1
- self.fake_logger_msgs.append(msg)
-
- def ResetValues():
- self.fake_logger_count = 0
- self.fake_logger_msgs = []
- mock_run_cmd.reset_mock()
- mock_run_croscmd.reset_mock()
- mock_checksummer.reset_mock()
- mock_sleep.reset_mock()
- machine.checksum = 'fake_md5_checksum'
- self.mm.checksum = None
- self.mm.num_reimages = 0
-
- self.mock_cmd_exec.CrosRunCommand = mock_run_croscmd
- self.mock_cmd_exec.RunCommand = mock_run_cmd
-
- self.mm.logger.LogOutput = FakeLogOutput
- machine = self.mock_lumpy1
- machine._GetMD5Checksum = FakeMD5Checksum
- machine.checksum = 'fake_md5_checksum'
- mock_checksummer.return_value = 'fake_md5_checksum'
- self.mock_cmd_exec.log_level = 'verbose'
-
- test_flag.SetTestMode(True)
- # Test 1: label.image_type == "local"
- LABEL_LUMPY.image_type = 'local'
- self.mm.ImageMachine(machine, LABEL_LUMPY)
- self.assertEqual(mock_run_cmd.call_count, 0)
- self.assertEqual(mock_run_croscmd.call_count, 0)
-
- # Test 2: label.image_type == "trybot"
- ResetValues()
- LABEL_LUMPY.image_type = 'trybot'
- mock_run_cmd.return_value = 0
- self.mm.ImageMachine(machine, LABEL_LUMPY)
- self.assertEqual(mock_run_croscmd.call_count, 0)
- self.assertEqual(mock_checksummer.call_count, 0)
-
- # Test 3: label.image_type is neither local nor trybot; retval from
- # RunCommand is 1, i.e. image_chromeos fails...
- ResetValues()
- LABEL_LUMPY.image_type = 'other'
- mock_run_cmd.return_value = 1
- try:
- self.mm.ImageMachine(machine, LABEL_LUMPY)
- except RuntimeError:
- self.assertEqual(mock_checksummer.call_count, 0)
- self.assertEqual(mock_run_cmd.call_count, 2)
- self.assertEqual(mock_run_croscmd.call_count, 1)
- self.assertEqual(mock_sleep.call_count, 1)
- image_call_args_str = mock_run_cmd.call_args[0][0]
- image_call_args = image_call_args_str.split(' ')
- self.assertEqual(image_call_args[0], 'python')
- self.assertEqual(image_call_args[1].split('/')[-1], 'image_chromeos.pyc')
- image_call_args = image_call_args[2:]
- self.assertEqual(image_call_args, [
- '--chromeos_root=/tmp/chromeos-root', '--image=lumpy_chromeos_image',
- '--image_args=', '--remote=lumpy1', '--logging_level=average',
- '--board=lumpy'
- ])
- self.assertEqual(mock_run_croscmd.call_args[0][0], 'reboot && exit')
-
- # Test 4: Everything works properly. Trybot image type.
- ResetValues()
- LABEL_LUMPY.image_type = 'trybot'
- mock_run_cmd.return_value = 0
- self.mm.ImageMachine(machine, LABEL_LUMPY)
- self.assertEqual(mock_checksummer.call_count, 0)
- self.assertEqual(mock_run_croscmd.call_count, 0)
- self.assertEqual(mock_sleep.call_count, 0)
-
- def test_compute_common_checksum(self):
- self.mm.machine_checksum = {}
- self.mm.ComputeCommonCheckSum(LABEL_LUMPY)
- self.assertEqual(self.mm.machine_checksum['lumpy'], 'lumpy123')
- self.assertEqual(len(self.mm.machine_checksum), 1)
-
- self.mm.machine_checksum = {}
- self.assertRaisesRegex(machine_manager.BadChecksum, r'daisy.*\n.*lumpy',
- self.mm.ComputeCommonCheckSum, LABEL_MIX)
-
- def test_compute_common_checksum_string(self):
- self.mm.machine_checksum_string = {}
- self.mm.ComputeCommonCheckSumString(LABEL_LUMPY)
- self.assertEqual(len(self.mm.machine_checksum_string), 1)
- self.assertEqual(self.mm.machine_checksum_string['lumpy'],
- 'lumpy_checksum_str')
-
- self.mm.machine_checksum_string = {}
- self.mm.ComputeCommonCheckSumString(LABEL_MIX)
- self.assertEqual(len(self.mm.machine_checksum_string), 1)
- self.assertEqual(self.mm.machine_checksum_string['mix'],
- 'daisy_checksum_str')
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- def test_try_to_lock_machine(self, mock_cros_runcmd):
- mock_cros_runcmd.return_value = [0, 'false_lock_checksum', '']
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
- self.mm._machines = []
- self.mm._TryToLockMachine(self.mock_lumpy1)
- self.assertEqual(len(self.mm._machines), 1)
- self.assertEqual(self.mm._machines[0], self.mock_lumpy1)
- self.assertEqual(self.mock_lumpy1.checksum, 'false_lock_checksum')
- self.assertEqual(mock_cros_runcmd.call_count, 1)
- cmd_str = mock_cros_runcmd.call_args[0][0]
- self.assertEqual(cmd_str, 'cat /usr/local/osimage_checksum_file')
- args_dict = mock_cros_runcmd.call_args[1]
- self.assertEqual(len(args_dict), 2)
- self.assertEqual(args_dict['machine'], self.mock_lumpy1.name)
- self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
-
- @mock.patch.object(machine_manager, 'CrosMachine')
- def test_add_machine(self, mock_machine):
-
- mock_machine.machine_checksum = 'daisy123'
- self.assertEqual(len(self.mm._all_machines), 5)
- self.mm.AddMachine('daisy3')
- self.assertEqual(len(self.mm._all_machines), 6)
-
- self.assertRaises(Exception, self.mm.AddMachine, 'lumpy1')
-
- def test_remove_machine(self):
- self.mm._machines = self.mm._all_machines
- self.assertTrue(self.mock_lumpy2 in self.mm._machines)
- self.mm.RemoveMachine(self.mock_lumpy2.name)
- self.assertFalse(self.mock_lumpy2 in self.mm._machines)
-
- def test_force_same_image_to_all_machines(self):
- self.image_log = []
-
- def FakeImageMachine(machine, label_arg):
- image = label_arg.chromeos_image
- self.image_log.append('Pushed %s onto %s' % (image, machine.name))
-
- def FakeSetUpChecksumInfo():
- pass
-
- self.mm.ImageMachine = FakeImageMachine
- self.mock_lumpy1.SetUpChecksumInfo = FakeSetUpChecksumInfo
- self.mock_lumpy2.SetUpChecksumInfo = FakeSetUpChecksumInfo
- self.mock_lumpy3.SetUpChecksumInfo = FakeSetUpChecksumInfo
-
- self.mm.ForceSameImageToAllMachines(LABEL_LUMPY)
- self.assertEqual(len(self.image_log), 3)
- self.assertEqual(self.image_log[0],
- 'Pushed lumpy_chromeos_image onto lumpy1')
- self.assertEqual(self.image_log[1],
- 'Pushed lumpy_chromeos_image onto lumpy2')
- self.assertEqual(self.image_log[2],
- 'Pushed lumpy_chromeos_image onto lumpy3')
-
- @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
- @mock.patch.object(hashlib, 'md5')
- def test_acquire_machine(self, mock_md5, mock_checksum):
-
- self.msgs = []
- self.log_fatal_msgs = []
-
- def FakeLock(machine):
- self.msgs.append('Tried to lock %s' % machine.name)
-
- def FakeLogFatal(msg):
- self.log_fatal_msgs.append(msg)
-
- self.mm._TryToLockMachine = FakeLock
- self.mm.logger.LogFatal = FakeLogFatal
-
- mock_md5.return_value = '123456'
- mock_checksum.return_value = 'fake_md5_checksum'
-
- self.mm._machines = self.mm._all_machines
- self.mock_lumpy1.locked = True
- self.mock_lumpy2.locked = True
- self.mock_lumpy3.locked = False
- self.mock_lumpy3.checksum = 'fake_md5_checksum'
- self.mock_daisy1.locked = True
- self.mock_daisy2.locked = False
- self.mock_daisy2.checksum = 'fake_md5_checksum'
-
- self.mock_lumpy1.released_time = time.time()
- self.mock_lumpy2.released_time = time.time()
- self.mock_lumpy3.released_time = time.time()
- self.mock_daisy1.released_time = time.time()
- self.mock_daisy2.released_time = time.time()
-
- # Test 1. Basic test. Acquire lumpy3.
- self.mm.AcquireMachine(LABEL_LUMPY)
- m = self.mock_lumpy1
- self.assertEqual(m, self.mock_lumpy1)
- self.assertTrue(self.mock_lumpy1.locked)
- self.assertEqual(mock_md5.call_count, 0)
- self.assertEqual(self.msgs, [
- 'Tried to lock lumpy1', 'Tried to lock lumpy2', 'Tried to lock lumpy3'
- ])
-
- # Test the second return statment (machine is unlocked, has no checksum)
- save_locked = self.mock_lumpy1.locked
- self.mock_lumpy1.locked = False
- self.mock_lumpy1.checksum = None
- m = self.mm.AcquireMachine(LABEL_LUMPY)
- self.assertEqual(m, self.mock_lumpy1)
- self.assertTrue(self.mock_lumpy1.locked)
-
- # Test the third return statement:
- # - machine is unlocked
- # - checksums don't match
- # - current time minus release time is > 20.
- self.mock_lumpy1.locked = False
- self.mock_lumpy1.checksum = '123'
- self.mock_lumpy1.released_time = time.time() - 8
- m = self.mm.AcquireMachine(LABEL_LUMPY)
- self.assertEqual(m, self.mock_lumpy1)
- self.assertTrue(self.mock_lumpy1.locked)
-
- # Test all machines are already locked.
- m = self.mm.AcquireMachine(LABEL_LUMPY)
- self.assertIsNone(m)
-
- # Restore values of mock_lumpy1, so other tests succeed.
- self.mock_lumpy1.locked = save_locked
- self.mock_lumpy1.checksum = '123'
-
- def test_get_available_machines(self):
- self.mm._machines = self.mm._all_machines
-
- machine_list = self.mm.GetAvailableMachines()
- self.assertEqual(machine_list, self.mm._all_machines)
-
- machine_list = self.mm.GetAvailableMachines(LABEL_MIX)
- self.assertEqual(machine_list,
- [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3])
-
- machine_list = self.mm.GetAvailableMachines(LABEL_LUMPY)
- self.assertEqual(machine_list,
- [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3])
-
- def test_get_machines(self):
- machine_list = self.mm.GetMachines()
- self.assertEqual(machine_list, self.mm._all_machines)
-
- machine_list = self.mm.GetMachines(LABEL_MIX)
- self.assertEqual(machine_list,
- [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3])
-
- machine_list = self.mm.GetMachines(LABEL_LUMPY)
- self.assertEqual(machine_list,
- [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3])
-
- def test_release_machines(self):
-
- self.mm._machines = [self.mock_lumpy1, self.mock_daisy2]
-
- self.mock_lumpy1.locked = True
- self.mock_daisy2.locked = True
-
- self.assertTrue(self.mock_lumpy1.locked)
- self.mm.ReleaseMachine(self.mock_lumpy1)
- self.assertFalse(self.mock_lumpy1.locked)
- self.assertEqual(self.mock_lumpy1.status, 'Available')
-
- self.assertTrue(self.mock_daisy2.locked)
- self.mm.ReleaseMachine(self.mock_daisy2)
- self.assertFalse(self.mock_daisy2.locked)
- self.assertEqual(self.mock_daisy2.status, 'Available')
-
- # Test double-relase...
- self.assertRaises(AssertionError, self.mm.ReleaseMachine, self.mock_lumpy1)
-
- def test_cleanup(self):
- self.mock_logger.reset_mock()
- self.mm.Cleanup()
- self.assertEqual(self.mock_logger.call_count, 0)
-
- OUTPUT_STR = ('Machine Status:\nMachine Thread '
- 'Lock Status Checksum'
- ' \nlumpy1 test '
- 'run True PENDING 123'
- ' \nlumpy2 '
- 'test run False PENDING 123'
- ' \nlumpy3 '
- 'test run False PENDING 123'
- ' \ndaisy1 '
- 'test run False PENDING 678'
- ' \ndaisy2 '
- 'test run True PENDING 678'
- ' ')
-
- def test_as_string(self):
+ """Test for machine manager class."""
+
+ msgs = []
+ image_log = []
+ log_fatal_msgs = []
+ fake_logger_count = 0
+ fake_logger_msgs = []
+
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
mock_logger = mock.Mock(spec=logger.Logger)
- bench = Benchmark(
- 'page_cycler_v2.netsim.top_10', # name
- 'page_cycler_v2.netsim.top_10', # test_name
- '', # test_args
- 1, # iteratins
- False, # rm_chroot_tmp
- '', # perf_args
- suite='telemetry_Crosperf') # suite
-
- test_run = MockBenchmarkRun('test run', bench, LABEL_LUMPY, 1, [], self.mm,
- mock_logger, 'verbose', '', {})
-
- self.mm._machines = [
- self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3, self.mock_daisy1,
- self.mock_daisy2
- ]
-
- self.mock_lumpy1.test_run = test_run
- self.mock_lumpy2.test_run = test_run
- self.mock_lumpy3.test_run = test_run
- self.mock_daisy1.test_run = test_run
- self.mock_daisy2.test_run = test_run
-
- self.mock_lumpy1.locked = True
- self.mock_lumpy2.locked = False
- self.mock_lumpy3.locked = False
- self.mock_daisy1.locked = False
- self.mock_daisy2.locked = True
-
- self.mock_lumpy1.checksum = '123'
- self.mock_lumpy2.checksum = '123'
- self.mock_lumpy3.checksum = '123'
- self.mock_daisy1.checksum = '678'
- self.mock_daisy2.checksum = '678'
-
- output = self.mm.AsString()
- self.assertEqual(output, self.OUTPUT_STR)
-
- def test_get_all_cpu_info(self):
- info = self.mm.GetAllCPUInfo([LABEL_LUMPY, LABEL_MIX])
- self.assertEqual(
- info, 'lumpy\n-------------------\nlumpy_cpu_info\n\n\nmix\n-'
- '------------------\ndaisy_cpu_info\n\n\n')
+ mock_lumpy1 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_lumpy2 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_lumpy3 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_lumpy4 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_daisy1 = mock.Mock(spec=machine_manager.CrosMachine)
+ mock_daisy2 = mock.Mock(spec=machine_manager.CrosMachine)
+
+ @mock.patch.object(os.path, "isdir")
+
+ # pylint: disable=arguments-differ
+ def setUp(self, mock_isdir):
+
+ mock_isdir.return_value = True
+ self.mm = machine_manager.MachineManager(
+ "/usr/local/chromeos",
+ 0,
+ "average",
+ None,
+ self.mock_cmd_exec,
+ self.mock_logger,
+ )
+
+ self.mock_lumpy1.name = "lumpy1"
+ self.mock_lumpy2.name = "lumpy2"
+ self.mock_lumpy3.name = "lumpy3"
+ self.mock_lumpy4.name = "lumpy4"
+ self.mock_daisy1.name = "daisy1"
+ self.mock_daisy2.name = "daisy2"
+ self.mock_lumpy1.machine_checksum = "lumpy123"
+ self.mock_lumpy2.machine_checksum = "lumpy123"
+ self.mock_lumpy3.machine_checksum = "lumpy123"
+ self.mock_lumpy4.machine_checksum = "lumpy123"
+ self.mock_daisy1.machine_checksum = "daisy12"
+ self.mock_daisy2.machine_checksum = "daisy12"
+ self.mock_lumpy1.checksum_string = "lumpy_checksum_str"
+ self.mock_lumpy2.checksum_string = "lumpy_checksum_str"
+ self.mock_lumpy3.checksum_string = "lumpy_checksum_str"
+ self.mock_lumpy4.checksum_string = "lumpy_checksum_str"
+ self.mock_daisy1.checksum_string = "daisy_checksum_str"
+ self.mock_daisy2.checksum_string = "daisy_checksum_str"
+ self.mock_lumpy1.cpuinfo = "lumpy_cpu_info"
+ self.mock_lumpy2.cpuinfo = "lumpy_cpu_info"
+ self.mock_lumpy3.cpuinfo = "lumpy_cpu_info"
+ self.mock_lumpy4.cpuinfo = "lumpy_cpu_info"
+ self.mock_daisy1.cpuinfo = "daisy_cpu_info"
+ self.mock_daisy2.cpuinfo = "daisy_cpu_info"
+ self.mm._all_machines.append(self.mock_daisy1)
+ self.mm._all_machines.append(self.mock_daisy2)
+ self.mm._all_machines.append(self.mock_lumpy1)
+ self.mm._all_machines.append(self.mock_lumpy2)
+ self.mm._all_machines.append(self.mock_lumpy3)
+
+ def testGetMachines(self):
+ manager = MyMachineManager(CHROMEOS_ROOT)
+ for m in MACHINE_NAMES:
+ manager.AddMachine(m)
+ names = [m.name for m in manager.GetMachines(LABEL_LUMPY)]
+ self.assertEqual(names, ["lumpy1", "lumpy2", "lumpy3"])
+
+ def testGetAvailableMachines(self):
+ manager = MyMachineManager(CHROMEOS_ROOT)
+ for m in MACHINE_NAMES:
+ manager.AddMachine(m)
+ for m in manager._all_machines:
+ if int(m.name[-1]) % 2:
+ manager._TryToLockMachine(m)
+ names = [m.name for m in manager.GetAvailableMachines(LABEL_LUMPY)]
+ self.assertEqual(names, ["lumpy1", "lumpy3"])
+
+ @mock.patch.object(time, "sleep")
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
+ @mock.patch.object(command_executer.CommandExecuter, "CrosRunCommand")
+ @mock.patch.object(image_checksummer.ImageChecksummer, "Checksum")
+ def test_image_machine(
+ self, mock_checksummer, mock_run_croscmd, mock_run_cmd, mock_sleep
+ ):
+ def FakeMD5Checksum(_input_str):
+ return "machine_fake_md5_checksum"
+
+ self.fake_logger_count = 0
+ self.fake_logger_msgs = []
+
+ def FakeLogOutput(msg):
+ self.fake_logger_count += 1
+ self.fake_logger_msgs.append(msg)
+
+ def ResetValues():
+ self.fake_logger_count = 0
+ self.fake_logger_msgs = []
+ mock_run_cmd.reset_mock()
+ mock_run_croscmd.reset_mock()
+ mock_checksummer.reset_mock()
+ mock_sleep.reset_mock()
+ machine.checksum = "fake_md5_checksum"
+ self.mm.checksum = None
+ self.mm.num_reimages = 0
+
+ self.mock_cmd_exec.CrosRunCommand = mock_run_croscmd
+ self.mock_cmd_exec.RunCommand = mock_run_cmd
+
+ self.mm.logger.LogOutput = FakeLogOutput
+ machine = self.mock_lumpy1
+ machine._GetMD5Checksum = FakeMD5Checksum
+ machine.checksum = "fake_md5_checksum"
+ mock_checksummer.return_value = "fake_md5_checksum"
+ self.mock_cmd_exec.log_level = "verbose"
+
+ test_flag.SetTestMode(True)
+ # Test 1: label.image_type == "local"
+ LABEL_LUMPY.image_type = "local"
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ self.assertEqual(mock_run_cmd.call_count, 0)
+ self.assertEqual(mock_run_croscmd.call_count, 0)
+
+ # Test 2: label.image_type == "trybot"
+ ResetValues()
+ LABEL_LUMPY.image_type = "trybot"
+ mock_run_cmd.return_value = 0
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ self.assertEqual(mock_run_croscmd.call_count, 0)
+ self.assertEqual(mock_checksummer.call_count, 0)
+
+ # Test 3: label.image_type is neither local nor trybot; retval from
+ # RunCommand is 1, i.e. image_chromeos fails...
+ ResetValues()
+ LABEL_LUMPY.image_type = "other"
+ mock_run_cmd.return_value = 1
+ try:
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ except RuntimeError:
+ self.assertEqual(mock_checksummer.call_count, 0)
+ self.assertEqual(mock_run_cmd.call_count, 2)
+ self.assertEqual(mock_run_croscmd.call_count, 1)
+ self.assertEqual(mock_sleep.call_count, 1)
+ image_call_args_str = mock_run_cmd.call_args[0][0]
+ image_call_args = image_call_args_str.split(" ")
+ self.assertEqual(image_call_args[0], "python")
+ self.assertEqual(
+ image_call_args[1].split("/")[-1], "image_chromeos.pyc"
+ )
+ image_call_args = image_call_args[2:]
+ self.assertEqual(
+ image_call_args,
+ [
+ "--chromeos_root=/tmp/chromeos-root",
+ "--image=lumpy_chromeos_image",
+ "--image_args=",
+ "--remote=lumpy1",
+ "--logging_level=average",
+ "--board=lumpy",
+ ],
+ )
+ self.assertEqual(mock_run_croscmd.call_args[0][0], "reboot && exit")
+
+ # Test 4: Everything works properly. Trybot image type.
+ ResetValues()
+ LABEL_LUMPY.image_type = "trybot"
+ mock_run_cmd.return_value = 0
+ self.mm.ImageMachine(machine, LABEL_LUMPY)
+ self.assertEqual(mock_checksummer.call_count, 0)
+ self.assertEqual(mock_run_croscmd.call_count, 0)
+ self.assertEqual(mock_sleep.call_count, 0)
+
+ def test_compute_common_checksum(self):
+ self.mm.machine_checksum = {}
+ self.mm.ComputeCommonCheckSum(LABEL_LUMPY)
+ self.assertEqual(self.mm.machine_checksum["lumpy"], "lumpy123")
+ self.assertEqual(len(self.mm.machine_checksum), 1)
+
+ self.mm.machine_checksum = {}
+ self.assertRaisesRegex(
+ machine_manager.BadChecksum,
+ r"daisy.*\n.*lumpy",
+ self.mm.ComputeCommonCheckSum,
+ LABEL_MIX,
+ )
+
+ def test_compute_common_checksum_string(self):
+ self.mm.machine_checksum_string = {}
+ self.mm.ComputeCommonCheckSumString(LABEL_LUMPY)
+ self.assertEqual(len(self.mm.machine_checksum_string), 1)
+ self.assertEqual(
+ self.mm.machine_checksum_string["lumpy"], "lumpy_checksum_str"
+ )
+
+ self.mm.machine_checksum_string = {}
+ self.mm.ComputeCommonCheckSumString(LABEL_MIX)
+ self.assertEqual(len(self.mm.machine_checksum_string), 1)
+ self.assertEqual(
+ self.mm.machine_checksum_string["mix"], "daisy_checksum_str"
+ )
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ def test_try_to_lock_machine(self, mock_cros_runcmd):
+ mock_cros_runcmd.return_value = [0, "false_lock_checksum", ""]
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
+ self.mm._machines = []
+ self.mm._TryToLockMachine(self.mock_lumpy1)
+ self.assertEqual(len(self.mm._machines), 1)
+ self.assertEqual(self.mm._machines[0], self.mock_lumpy1)
+ self.assertEqual(self.mock_lumpy1.checksum, "false_lock_checksum")
+ self.assertEqual(mock_cros_runcmd.call_count, 1)
+ cmd_str = mock_cros_runcmd.call_args[0][0]
+ self.assertEqual(cmd_str, "cat /usr/local/osimage_checksum_file")
+ args_dict = mock_cros_runcmd.call_args[1]
+ self.assertEqual(len(args_dict), 2)
+ self.assertEqual(args_dict["machine"], self.mock_lumpy1.name)
+ self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos")
+
+ @mock.patch.object(machine_manager, "CrosMachine")
+ def test_add_machine(self, mock_machine):
+
+ mock_machine.machine_checksum = "daisy123"
+ self.assertEqual(len(self.mm._all_machines), 5)
+ self.mm.AddMachine("daisy3")
+ self.assertEqual(len(self.mm._all_machines), 6)
+
+ self.assertRaises(Exception, self.mm.AddMachine, "lumpy1")
+
+ def test_remove_machine(self):
+ self.mm._machines = self.mm._all_machines
+ self.assertTrue(self.mock_lumpy2 in self.mm._machines)
+ self.mm.RemoveMachine(self.mock_lumpy2.name)
+ self.assertFalse(self.mock_lumpy2 in self.mm._machines)
+
+ def test_force_same_image_to_all_machines(self):
+ self.image_log = []
+
+ def FakeImageMachine(machine, label_arg):
+ image = label_arg.chromeos_image
+ self.image_log.append("Pushed %s onto %s" % (image, machine.name))
+
+ def FakeSetUpChecksumInfo():
+ pass
+
+ self.mm.ImageMachine = FakeImageMachine
+ self.mock_lumpy1.SetUpChecksumInfo = FakeSetUpChecksumInfo
+ self.mock_lumpy2.SetUpChecksumInfo = FakeSetUpChecksumInfo
+ self.mock_lumpy3.SetUpChecksumInfo = FakeSetUpChecksumInfo
+
+ self.mm.ForceSameImageToAllMachines(LABEL_LUMPY)
+ self.assertEqual(len(self.image_log), 3)
+ self.assertEqual(
+ self.image_log[0], "Pushed lumpy_chromeos_image onto lumpy1"
+ )
+ self.assertEqual(
+ self.image_log[1], "Pushed lumpy_chromeos_image onto lumpy2"
+ )
+ self.assertEqual(
+ self.image_log[2], "Pushed lumpy_chromeos_image onto lumpy3"
+ )
+
+ @mock.patch.object(image_checksummer.ImageChecksummer, "Checksum")
+ @mock.patch.object(hashlib, "md5")
+ def test_acquire_machine(self, mock_md5, mock_checksum):
+
+ self.msgs = []
+ self.log_fatal_msgs = []
+
+ def FakeLock(machine):
+ self.msgs.append("Tried to lock %s" % machine.name)
+
+ def FakeLogFatal(msg):
+ self.log_fatal_msgs.append(msg)
+
+ self.mm._TryToLockMachine = FakeLock
+ self.mm.logger.LogFatal = FakeLogFatal
+
+ mock_md5.return_value = "123456"
+ mock_checksum.return_value = "fake_md5_checksum"
+
+ self.mm._machines = self.mm._all_machines
+ self.mock_lumpy1.locked = True
+ self.mock_lumpy2.locked = True
+ self.mock_lumpy3.locked = False
+ self.mock_lumpy3.checksum = "fake_md5_checksum"
+ self.mock_daisy1.locked = True
+ self.mock_daisy2.locked = False
+ self.mock_daisy2.checksum = "fake_md5_checksum"
+
+ self.mock_lumpy1.released_time = time.time()
+ self.mock_lumpy2.released_time = time.time()
+ self.mock_lumpy3.released_time = time.time()
+ self.mock_daisy1.released_time = time.time()
+ self.mock_daisy2.released_time = time.time()
+
+ # Test 1. Basic test. Acquire lumpy3.
+ self.mm.AcquireMachine(LABEL_LUMPY)
+ m = self.mock_lumpy1
+ self.assertEqual(m, self.mock_lumpy1)
+ self.assertTrue(self.mock_lumpy1.locked)
+ self.assertEqual(mock_md5.call_count, 0)
+ self.assertEqual(
+ self.msgs,
+ [
+ "Tried to lock lumpy1",
+ "Tried to lock lumpy2",
+ "Tried to lock lumpy3",
+ ],
+ )
+
+ # Test the second return statment (machine is unlocked, has no checksum)
+ save_locked = self.mock_lumpy1.locked
+ self.mock_lumpy1.locked = False
+ self.mock_lumpy1.checksum = None
+ m = self.mm.AcquireMachine(LABEL_LUMPY)
+ self.assertEqual(m, self.mock_lumpy1)
+ self.assertTrue(self.mock_lumpy1.locked)
+
+ # Test the third return statement:
+ # - machine is unlocked
+ # - checksums don't match
+ # - current time minus release time is > 20.
+ self.mock_lumpy1.locked = False
+ self.mock_lumpy1.checksum = "123"
+ self.mock_lumpy1.released_time = time.time() - 8
+ m = self.mm.AcquireMachine(LABEL_LUMPY)
+ self.assertEqual(m, self.mock_lumpy1)
+ self.assertTrue(self.mock_lumpy1.locked)
+
+ # Test all machines are already locked.
+ m = self.mm.AcquireMachine(LABEL_LUMPY)
+ self.assertIsNone(m)
+
+ # Restore values of mock_lumpy1, so other tests succeed.
+ self.mock_lumpy1.locked = save_locked
+ self.mock_lumpy1.checksum = "123"
+
+ def test_get_available_machines(self):
+ self.mm._machines = self.mm._all_machines
+
+ machine_list = self.mm.GetAvailableMachines()
+ self.assertEqual(machine_list, self.mm._all_machines)
+
+ machine_list = self.mm.GetAvailableMachines(LABEL_MIX)
+ self.assertEqual(
+ machine_list, [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3]
+ )
+
+ machine_list = self.mm.GetAvailableMachines(LABEL_LUMPY)
+ self.assertEqual(
+ machine_list, [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3]
+ )
+
+ def test_get_machines(self):
+ machine_list = self.mm.GetMachines()
+ self.assertEqual(machine_list, self.mm._all_machines)
+
+ machine_list = self.mm.GetMachines(LABEL_MIX)
+ self.assertEqual(
+ machine_list, [self.mock_daisy1, self.mock_daisy2, self.mock_lumpy3]
+ )
+
+ machine_list = self.mm.GetMachines(LABEL_LUMPY)
+ self.assertEqual(
+ machine_list, [self.mock_lumpy1, self.mock_lumpy2, self.mock_lumpy3]
+ )
+
+ def test_release_machines(self):
+
+ self.mm._machines = [self.mock_lumpy1, self.mock_daisy2]
+
+ self.mock_lumpy1.locked = True
+ self.mock_daisy2.locked = True
+
+ self.assertTrue(self.mock_lumpy1.locked)
+ self.mm.ReleaseMachine(self.mock_lumpy1)
+ self.assertFalse(self.mock_lumpy1.locked)
+ self.assertEqual(self.mock_lumpy1.status, "Available")
+
+ self.assertTrue(self.mock_daisy2.locked)
+ self.mm.ReleaseMachine(self.mock_daisy2)
+ self.assertFalse(self.mock_daisy2.locked)
+ self.assertEqual(self.mock_daisy2.status, "Available")
+
+ # Test double-relase...
+ self.assertRaises(
+ AssertionError, self.mm.ReleaseMachine, self.mock_lumpy1
+ )
+
+ def test_cleanup(self):
+ self.mock_logger.reset_mock()
+ self.mm.Cleanup()
+ self.assertEqual(self.mock_logger.call_count, 0)
+
+ OUTPUT_STR = (
+ "Machine Status:\nMachine Thread "
+ "Lock Status Checksum"
+ " \nlumpy1 test "
+ "run True PENDING 123"
+ " \nlumpy2 "
+ "test run False PENDING 123"
+ " \nlumpy3 "
+ "test run False PENDING 123"
+ " \ndaisy1 "
+ "test run False PENDING 678"
+ " \ndaisy2 "
+ "test run True PENDING 678"
+ " "
+ )
+
+ def test_as_string(self):
+
+ mock_logger = mock.Mock(spec=logger.Logger)
+
+ bench = Benchmark(
+ "page_cycler_v2.netsim.top_10", # name
+ "page_cycler_v2.netsim.top_10", # test_name
+ "", # test_args
+ 1, # iteratins
+ False, # rm_chroot_tmp
+ "", # perf_args
+ suite="telemetry_Crosperf",
+ ) # suite
+
+ test_run = MockBenchmarkRun(
+ "test run",
+ bench,
+ LABEL_LUMPY,
+ 1,
+ [],
+ self.mm,
+ mock_logger,
+ "verbose",
+ "",
+ {},
+ )
+
+ self.mm._machines = [
+ self.mock_lumpy1,
+ self.mock_lumpy2,
+ self.mock_lumpy3,
+ self.mock_daisy1,
+ self.mock_daisy2,
+ ]
+
+ self.mock_lumpy1.test_run = test_run
+ self.mock_lumpy2.test_run = test_run
+ self.mock_lumpy3.test_run = test_run
+ self.mock_daisy1.test_run = test_run
+ self.mock_daisy2.test_run = test_run
+
+ self.mock_lumpy1.locked = True
+ self.mock_lumpy2.locked = False
+ self.mock_lumpy3.locked = False
+ self.mock_daisy1.locked = False
+ self.mock_daisy2.locked = True
+
+ self.mock_lumpy1.checksum = "123"
+ self.mock_lumpy2.checksum = "123"
+ self.mock_lumpy3.checksum = "123"
+ self.mock_daisy1.checksum = "678"
+ self.mock_daisy2.checksum = "678"
+
+ output = self.mm.AsString()
+ self.assertEqual(output, self.OUTPUT_STR)
+
+ def test_get_all_cpu_info(self):
+ info = self.mm.GetAllCPUInfo([LABEL_LUMPY, LABEL_MIX])
+ self.assertEqual(
+ info,
+ "lumpy\n-------------------\nlumpy_cpu_info\n\n\nmix\n-"
+ "------------------\ndaisy_cpu_info\n\n\n",
+ )
MEMINFO_STRING = """MemTotal: 3990332 kB
@@ -580,35 +661,37 @@ address sizes: 36 bits physical, 48 bits virtual
power management:
"""
-CHECKSUM_STRING = ('processor: 0vendor_id: GenuineIntelcpu family: 6model: '
- '42model name: Intel(R) Celeron(R) CPU 867 @ '
- '1.30GHzstepping: 7microcode: 0x25cache size: 2048 '
- 'KBphysical id: 0siblings: 2cpu cores: 2'
- 'fpu: yesfpu_exception: yescpuid level: '
- '13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep'
- ' mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse '
- 'sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc '
- 'arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc '
- 'aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 '
- 'ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt '
- 'tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts '
- 'dts tpr_shadow vnmi flexpriority ept vpidclflush size: '
- '64cache_alignment: 64address sizes: 36 bits physical, 48 '
- 'bits virtualpower management:processor: 1vendor_id: '
- 'GenuineIntelcpu family: 6model: 42model name: Intel(R) '
- 'Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache'
- ' size: 2048 KBphysical id: 0siblings: 2cpu cores:'
- ' 2fpu: yesfpu_exception: yescpuid'
- ' level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 '
- 'apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx '
- 'fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm '
- 'constant_tsc arch_perfmon pebs bts rep_good nopl xtopology '
- 'nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl '
- 'vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic '
- 'popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt '
- 'pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush '
- 'size: 64cache_alignment: 64address sizes: 36 bits physical,'
- ' 48 bits virtualpower management: 4194304')
+CHECKSUM_STRING = (
+ "processor: 0vendor_id: GenuineIntelcpu family: 6model: "
+ "42model name: Intel(R) Celeron(R) CPU 867 @ "
+ "1.30GHzstepping: 7microcode: 0x25cache size: 2048 "
+ "KBphysical id: 0siblings: 2cpu cores: 2"
+ "fpu: yesfpu_exception: yescpuid level: "
+ "13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 apic sep"
+ " mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse "
+ "sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc "
+ "arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc "
+ "aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 "
+ "ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt "
+ "tsc_deadline_timer xsave lahf_lm arat epb xsaveopt pln pts "
+ "dts tpr_shadow vnmi flexpriority ept vpidclflush size: "
+ "64cache_alignment: 64address sizes: 36 bits physical, 48 "
+ "bits virtualpower management:processor: 1vendor_id: "
+ "GenuineIntelcpu family: 6model: 42model name: Intel(R) "
+ "Celeron(R) CPU 867 @ 1.30GHzstepping: 7microcode: 0x25cache"
+ " size: 2048 KBphysical id: 0siblings: 2cpu cores:"
+ " 2fpu: yesfpu_exception: yescpuid"
+ " level: 13wp: yesflags: fpu vme de pse tsc msr pae mce cx8 "
+ "apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx "
+ "fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm "
+ "constant_tsc arch_perfmon pebs bts rep_good nopl xtopology "
+ "nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl "
+ "vmx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic "
+ "popcnt tsc_deadline_timer xsave lahf_lm arat epb xsaveopt "
+ "pln pts dts tpr_shadow vnmi flexpriority ept vpidclflush "
+ "size: 64cache_alignment: 64address sizes: 36 bits physical,"
+ " 48 bits virtualpower management: 4194304"
+)
DUMP_VPD_STRING = """
"PBA_SN"="Pba.txt"
@@ -667,187 +750,212 @@ wlan0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
class CrosMachineTest(unittest.TestCase):
- """Test for CrosMachine class."""
-
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
-
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_init(self, mock_setup):
-
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- self.assertEqual(mock_setup.call_count, 1)
- self.assertEqual(cm.chromeos_root, '/usr/local/chromeos')
- self.assertEqual(cm.log_level, 'average')
-
- @mock.patch.object(machine_manager.CrosMachine, 'IsReachable')
- @mock.patch.object(machine_manager.CrosMachine, '_GetMemoryInfo')
- @mock.patch.object(machine_manager.CrosMachine, '_GetCPUInfo')
- @mock.patch.object(machine_manager.CrosMachine,
- '_ComputeMachineChecksumString')
- @mock.patch.object(machine_manager.CrosMachine, '_GetMachineID')
- @mock.patch.object(machine_manager.CrosMachine, '_GetMD5Checksum')
- def test_setup_checksum_info(self, mock_md5sum, mock_machineid,
- mock_checkstring, mock_cpuinfo, mock_meminfo,
- mock_isreachable):
-
- # Test 1. Machine is not reachable; SetUpChecksumInfo is called via
- # __init__.
- mock_isreachable.return_value = False
- mock_md5sum.return_value = 'md5_checksum'
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- cm.checksum_string = 'This is a checksum string.'
- cm.machine_id = 'machine_id1'
- self.assertEqual(mock_isreachable.call_count, 1)
- self.assertIsNone(cm.machine_checksum)
- self.assertEqual(mock_meminfo.call_count, 0)
-
- # Test 2. Machine is reachable. Call explicitly.
- mock_isreachable.return_value = True
- cm.checksum_string = 'This is a checksum string.'
- cm.machine_id = 'machine_id1'
- cm.SetUpChecksumInfo()
- self.assertEqual(mock_isreachable.call_count, 2)
- self.assertEqual(mock_meminfo.call_count, 1)
- self.assertEqual(mock_cpuinfo.call_count, 1)
- self.assertEqual(mock_checkstring.call_count, 1)
- self.assertEqual(mock_machineid.call_count, 1)
- self.assertEqual(mock_md5sum.call_count, 2)
- self.assertEqual(cm.machine_checksum, 'md5_checksum')
- self.assertEqual(cm.machine_id_checksum, 'md5_checksum')
- self.assertEqual(mock_md5sum.call_args_list[0][0][0],
- 'This is a checksum string.')
- self.assertEqual(mock_md5sum.call_args_list[1][0][0], 'machine_id1')
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_is_reachable(self, mock_setup, mock_run_cmd):
-
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
-
- # Test 1. CrosRunCommand returns 1 (fail)
- mock_run_cmd.return_value = 1
- result = cm.IsReachable()
- self.assertFalse(result)
- self.assertEqual(mock_setup.call_count, 1)
- self.assertEqual(mock_run_cmd.call_count, 1)
-
- # Test 2. CrosRunCommand returns 0 (success)
- mock_run_cmd.return_value = 0
- result = cm.IsReachable()
- self.assertTrue(result)
- self.assertEqual(mock_run_cmd.call_count, 2)
- first_args = mock_run_cmd.call_args_list[0]
- second_args = mock_run_cmd.call_args_list[1]
- self.assertEqual(first_args[0], second_args[0])
- self.assertEqual(first_args[1], second_args[1])
- self.assertEqual(len(first_args[0]), 1)
- self.assertEqual(len(first_args[1]), 2)
- self.assertEqual(first_args[0][0], 'ls')
- args_dict = first_args[1]
- self.assertEqual(args_dict['machine'], 'daisy.cros')
- self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
-
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_parse_memory_info(self, _mock_setup):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- cm.meminfo = MEMINFO_STRING
- cm._ParseMemoryInfo()
- self.assertEqual(cm.phys_kbytes, 4194304)
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_get_memory_info(self, _mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
- mock_run_cmd.return_value = [0, MEMINFO_STRING, '']
- cm._GetMemoryInfo()
- self.assertEqual(mock_run_cmd.call_count, 1)
- call_args = mock_run_cmd.call_args_list[0]
- self.assertEqual(call_args[0][0], 'cat /proc/meminfo')
- args_dict = call_args[1]
- self.assertEqual(args_dict['machine'], 'daisy.cros')
- self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
- self.assertEqual(cm.meminfo, MEMINFO_STRING)
- self.assertEqual(cm.phys_kbytes, 4194304)
-
- mock_run_cmd.return_value = [1, MEMINFO_STRING, '']
- self.assertRaises(Exception, cm._GetMemoryInfo)
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_get_cpu_info(self, _mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
- mock_run_cmd.return_value = [0, CPUINFO_STRING, '']
- cm._GetCPUInfo()
- self.assertEqual(mock_run_cmd.call_count, 1)
- call_args = mock_run_cmd.call_args_list[0]
- self.assertEqual(call_args[0][0], 'cat /proc/cpuinfo')
- args_dict = call_args[1]
- self.assertEqual(args_dict['machine'], 'daisy.cros')
- self.assertEqual(args_dict['chromeos_root'], '/usr/local/chromeos')
- self.assertEqual(cm.cpuinfo, CPUINFO_STRING)
-
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_compute_machine_checksum_string(self, _mock_setup):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- cm.cpuinfo = CPUINFO_STRING
- cm.meminfo = MEMINFO_STRING
- cm._ParseMemoryInfo()
- cm._ComputeMachineChecksumString()
- self.assertEqual(cm.checksum_string, CHECKSUM_STRING)
-
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_get_md5_checksum(self, _mock_setup):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- temp_str = 'abcde'
- checksum_str = cm._GetMD5Checksum(temp_str)
- self.assertEqual(checksum_str, 'ab56b4d92b40713acc5af89985d4b786')
-
- temp_str = ''
- checksum_str = cm._GetMD5Checksum(temp_str)
- self.assertEqual(checksum_str, '')
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- @mock.patch.object(machine_manager.CrosMachine, 'SetUpChecksumInfo')
- def test_get_machine_id(self, _mock_setup, mock_run_cmd):
- cm = machine_manager.CrosMachine('daisy.cros', '/usr/local/chromeos',
- 'average', self.mock_cmd_exec)
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
- mock_run_cmd.return_value = [0, DUMP_VPD_STRING, '']
-
- cm._GetMachineID()
- self.assertEqual(cm.machine_id, '"Product_S/N"="HT4L91SC300208"')
-
- mock_run_cmd.return_value = [0, IFCONFIG_STRING, '']
- cm._GetMachineID()
- self.assertEqual(
- cm.machine_id,
- ' ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ '
- 'ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether '
- '44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)')
-
- mock_run_cmd.return_value = [0, 'invalid hardware config', '']
- self.assertRaises(Exception, cm._GetMachineID)
-
- def test_add_cooldown_waittime(self):
- cm = machine_manager.CrosMachine('1.2.3.4.cros', '/usr/local/chromeos',
- 'average')
- self.assertEqual(cm.GetCooldownWaitTime(), 0)
- cm.AddCooldownWaitTime(250)
- self.assertEqual(cm.GetCooldownWaitTime(), 250)
- cm.AddCooldownWaitTime(1)
- self.assertEqual(cm.GetCooldownWaitTime(), 251)
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Test for CrosMachine class."""
+
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_init(self, mock_setup):
+
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ self.assertEqual(mock_setup.call_count, 1)
+ self.assertEqual(cm.chromeos_root, "/usr/local/chromeos")
+ self.assertEqual(cm.log_level, "average")
+
+ @mock.patch.object(machine_manager.CrosMachine, "IsReachable")
+ @mock.patch.object(machine_manager.CrosMachine, "_GetMemoryInfo")
+ @mock.patch.object(machine_manager.CrosMachine, "_GetCPUInfo")
+ @mock.patch.object(
+ machine_manager.CrosMachine, "_ComputeMachineChecksumString"
+ )
+ @mock.patch.object(machine_manager.CrosMachine, "_GetMachineID")
+ @mock.patch.object(machine_manager.CrosMachine, "_GetMD5Checksum")
+ def test_setup_checksum_info(
+ self,
+ mock_md5sum,
+ mock_machineid,
+ mock_checkstring,
+ mock_cpuinfo,
+ mock_meminfo,
+ mock_isreachable,
+ ):
+
+ # Test 1. Machine is not reachable; SetUpChecksumInfo is called via
+ # __init__.
+ mock_isreachable.return_value = False
+ mock_md5sum.return_value = "md5_checksum"
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ cm.checksum_string = "This is a checksum string."
+ cm.machine_id = "machine_id1"
+ self.assertEqual(mock_isreachable.call_count, 1)
+ self.assertIsNone(cm.machine_checksum)
+ self.assertEqual(mock_meminfo.call_count, 0)
+
+ # Test 2. Machine is reachable. Call explicitly.
+ mock_isreachable.return_value = True
+ cm.checksum_string = "This is a checksum string."
+ cm.machine_id = "machine_id1"
+ cm.SetUpChecksumInfo()
+ self.assertEqual(mock_isreachable.call_count, 2)
+ self.assertEqual(mock_meminfo.call_count, 1)
+ self.assertEqual(mock_cpuinfo.call_count, 1)
+ self.assertEqual(mock_checkstring.call_count, 1)
+ self.assertEqual(mock_machineid.call_count, 1)
+ self.assertEqual(mock_md5sum.call_count, 2)
+ self.assertEqual(cm.machine_checksum, "md5_checksum")
+ self.assertEqual(cm.machine_id_checksum, "md5_checksum")
+ self.assertEqual(
+ mock_md5sum.call_args_list[0][0][0], "This is a checksum string."
+ )
+ self.assertEqual(mock_md5sum.call_args_list[1][0][0], "machine_id1")
+
+ @mock.patch.object(command_executer.CommandExecuter, "CrosRunCommand")
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_is_reachable(self, mock_setup, mock_run_cmd):
+
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ self.mock_cmd_exec.CrosRunCommand = mock_run_cmd
+
+ # Test 1. CrosRunCommand returns 1 (fail)
+ mock_run_cmd.return_value = 1
+ result = cm.IsReachable()
+ self.assertFalse(result)
+ self.assertEqual(mock_setup.call_count, 1)
+ self.assertEqual(mock_run_cmd.call_count, 1)
+
+ # Test 2. CrosRunCommand returns 0 (success)
+ mock_run_cmd.return_value = 0
+ result = cm.IsReachable()
+ self.assertTrue(result)
+ self.assertEqual(mock_run_cmd.call_count, 2)
+ first_args = mock_run_cmd.call_args_list[0]
+ second_args = mock_run_cmd.call_args_list[1]
+ self.assertEqual(first_args[0], second_args[0])
+ self.assertEqual(first_args[1], second_args[1])
+ self.assertEqual(len(first_args[0]), 1)
+ self.assertEqual(len(first_args[1]), 2)
+ self.assertEqual(first_args[0][0], "ls")
+ args_dict = first_args[1]
+ self.assertEqual(args_dict["machine"], "daisy.cros")
+ self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos")
+
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_parse_memory_info(self, _mock_setup):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ cm.meminfo = MEMINFO_STRING
+ cm._ParseMemoryInfo()
+ self.assertEqual(cm.phys_kbytes, 4194304)
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_get_memory_info(self, _mock_setup, mock_run_cmd):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
+ mock_run_cmd.return_value = [0, MEMINFO_STRING, ""]
+ cm._GetMemoryInfo()
+ self.assertEqual(mock_run_cmd.call_count, 1)
+ call_args = mock_run_cmd.call_args_list[0]
+ self.assertEqual(call_args[0][0], "cat /proc/meminfo")
+ args_dict = call_args[1]
+ self.assertEqual(args_dict["machine"], "daisy.cros")
+ self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos")
+ self.assertEqual(cm.meminfo, MEMINFO_STRING)
+ self.assertEqual(cm.phys_kbytes, 4194304)
+
+ mock_run_cmd.return_value = [1, MEMINFO_STRING, ""]
+ self.assertRaises(Exception, cm._GetMemoryInfo)
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_get_cpu_info(self, _mock_setup, mock_run_cmd):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
+ mock_run_cmd.return_value = [0, CPUINFO_STRING, ""]
+ cm._GetCPUInfo()
+ self.assertEqual(mock_run_cmd.call_count, 1)
+ call_args = mock_run_cmd.call_args_list[0]
+ self.assertEqual(call_args[0][0], "cat /proc/cpuinfo")
+ args_dict = call_args[1]
+ self.assertEqual(args_dict["machine"], "daisy.cros")
+ self.assertEqual(args_dict["chromeos_root"], "/usr/local/chromeos")
+ self.assertEqual(cm.cpuinfo, CPUINFO_STRING)
+
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_compute_machine_checksum_string(self, _mock_setup):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ cm.cpuinfo = CPUINFO_STRING
+ cm.meminfo = MEMINFO_STRING
+ cm._ParseMemoryInfo()
+ cm._ComputeMachineChecksumString()
+ self.assertEqual(cm.checksum_string, CHECKSUM_STRING)
+
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_get_md5_checksum(self, _mock_setup):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ temp_str = "abcde"
+ checksum_str = cm._GetMD5Checksum(temp_str)
+ self.assertEqual(checksum_str, "ab56b4d92b40713acc5af89985d4b786")
+
+ temp_str = ""
+ checksum_str = cm._GetMD5Checksum(temp_str)
+ self.assertEqual(checksum_str, "")
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ @mock.patch.object(machine_manager.CrosMachine, "SetUpChecksumInfo")
+ def test_get_machine_id(self, _mock_setup, mock_run_cmd):
+ cm = machine_manager.CrosMachine(
+ "daisy.cros", "/usr/local/chromeos", "average", self.mock_cmd_exec
+ )
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_run_cmd
+ mock_run_cmd.return_value = [0, DUMP_VPD_STRING, ""]
+
+ cm._GetMachineID()
+ self.assertEqual(cm.machine_id, '"Product_S/N"="HT4L91SC300208"')
+
+ mock_run_cmd.return_value = [0, IFCONFIG_STRING, ""]
+ cm._GetMachineID()
+ self.assertEqual(
+ cm.machine_id,
+ " ether 00:50:b6:63:db:65 txqueuelen 1000 (Ethernet)_ "
+ "ether e8:03:9a:9c:50:3d txqueuelen 1000 (Ethernet)_ ether "
+ "44:6d:57:20:4a:c5 txqueuelen 1000 (Ethernet)",
+ )
+
+ mock_run_cmd.return_value = [0, "invalid hardware config", ""]
+ self.assertRaises(Exception, cm._GetMachineID)
+
+ def test_add_cooldown_waittime(self):
+ cm = machine_manager.CrosMachine(
+ "1.2.3.4.cros", "/usr/local/chromeos", "average"
+ )
+ self.assertEqual(cm.GetCooldownWaitTime(), 0)
+ cm.AddCooldownWaitTime(250)
+ self.assertEqual(cm.GetCooldownWaitTime(), 250)
+ cm.AddCooldownWaitTime(1)
+ self.assertEqual(cm.GetCooldownWaitTime(), 251)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
index f44ed87c..4a3f9a72 100644
--- a/crosperf/mock_instance.py
+++ b/crosperf/mock_instance.py
@@ -1,153 +1,171 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This contains some mock instances for testing."""
-from __future__ import print_function
from benchmark import Benchmark
from label import MockLabel
-perf_args = 'record -a -e cycles'
+
+perf_args = "record -a -e cycles"
label1 = MockLabel(
- 'test1',
- 'build1',
- 'image1',
- 'autotest_dir',
- 'debug_dir',
- '/tmp/test_benchmark_run',
- 'x86-alex',
- 'chromeos-alex1',
- image_args='',
- cache_dir='',
+ "test1",
+ "build1",
+ "image1",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp/test_benchmark_run",
+ "x86-alex",
+ "chromeos-alex1",
+ image_args="",
+ cache_dir="",
cache_only=False,
- log_level='average',
- compiler='gcc',
+ log_level="average",
+ compiler="gcc",
crosfleet=False,
- chrome_src=None)
+ chrome_src=None,
+)
label2 = MockLabel(
- 'test2',
- 'build2',
- 'image2',
- 'autotest_dir',
- 'debug_dir',
- '/tmp/test_benchmark_run_2',
- 'x86-alex',
- 'chromeos-alex2',
- image_args='',
- cache_dir='',
+ "test2",
+ "build2",
+ "image2",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp/test_benchmark_run_2",
+ "x86-alex",
+ "chromeos-alex2",
+ image_args="",
+ cache_dir="",
cache_only=False,
- log_level='average',
- compiler='gcc',
+ log_level="average",
+ compiler="gcc",
crosfleet=False,
- chrome_src=None)
-
-benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '',
- perf_args, 'telemetry_Crosperf', '')
-
-benchmark2 = Benchmark('benchmark2', 'autotest_name_2', 'autotest_args', 2, '',
- perf_args, 'telemetry_Crosperf', '')
+ chrome_src=None,
+)
+
+benchmark1 = Benchmark(
+ "benchmark1",
+ "autotest_name_1",
+ "autotest_args",
+ 2,
+ "",
+ perf_args,
+ "telemetry_Crosperf",
+ "",
+)
+
+benchmark2 = Benchmark(
+ "benchmark2",
+ "autotest_name_2",
+ "autotest_args",
+ 2,
+ "",
+ perf_args,
+ "telemetry_Crosperf",
+ "",
+)
keyval = {}
keyval[0] = {
- '': 'PASS',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'test{1}': '2',
- 'test{2}': '4',
- 'ms_1': '2.1',
- 'total': '5',
- 'bool': 'True'
+ "": "PASS",
+ "milliseconds_1": "1",
+ "milliseconds_2": "8",
+ "milliseconds_3": "9.2",
+ "test{1}": "2",
+ "test{2}": "4",
+ "ms_1": "2.1",
+ "total": "5",
+ "bool": "True",
}
keyval[1] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6',
- 'test{1}': '3',
- 'test{2}': '4',
- 'bool': 'FALSE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_2": "5",
+ "ms_1": "2.2",
+ "total": "6",
+ "test{1}": "3",
+ "test{2}": "4",
+ "bool": "FALSE",
}
keyval[2] = {
- '': 'PASS',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6',
- 'test{1}': '5',
- 'test{2}': '4',
- 'bool': 'FALSE'
+ "": "PASS",
+ "milliseconds_4": "30",
+ "milliseconds_5": "50",
+ "ms_1": "2.23",
+ "total": "6",
+ "test{1}": "5",
+ "test{2}": "4",
+ "bool": "FALSE",
}
keyval[3] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '6',
- 'bool': 'FALSE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_6": "7",
+ "ms_1": "2.3",
+ "total": "7",
+ "test{1}": "2",
+ "test{2}": "6",
+ "bool": "FALSE",
}
keyval[4] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '6',
- 'bool': 'TRUE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "2.3",
+ "total": "7",
+ "test{1}": "2",
+ "test{2}": "6",
+ "bool": "TRUE",
}
keyval[5] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '2',
- 'bool': 'TRUE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "2.2",
+ "total": "7",
+ "test{1}": "2",
+ "test{2}": "2",
+ "bool": "TRUE",
}
keyval[6] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '4',
- 'bool': 'TRUE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "2",
+ "total": "7",
+ "test{1}": "2",
+ "test{2}": "4",
+ "bool": "TRUE",
}
keyval[7] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7',
- 'test{1}': '1',
- 'test{2}': '6',
- 'bool': 'TRUE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "1",
+ "total": "7",
+ "test{1}": "1",
+ "test{2}": "6",
+ "bool": "TRUE",
}
keyval[8] = {
- '': 'PASS',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '3.3',
- 'total': '7',
- 'test{1}': '2',
- 'test{2}': '8',
- 'bool': 'TRUE'
+ "": "PASS",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "3.3",
+ "total": "7",
+ "test{1}": "2",
+ "test{2}": "8",
+ "bool": "TRUE",
}
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 5525858c..043da990 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to deal with result cache."""
-from __future__ import division
-from __future__ import print_function
import collections
import glob
@@ -20,642 +18,729 @@ import tempfile
from cros_utils import command_executer
from cros_utils import misc
-
from image_checksummer import ImageChecksummer
-
import results_report
import test_flag
-SCRATCH_DIR = os.path.expanduser('~/cros_scratch')
-RESULTS_FILE = 'results.pickle'
-MACHINE_FILE = 'machine.txt'
-AUTOTEST_TARBALL = 'autotest.tbz2'
-RESULTS_TARBALL = 'results.tbz2'
-PERF_RESULTS_FILE = 'perf-results.txt'
-CACHE_KEYS_FILE = 'cache_keys.txt'
+
+SCRATCH_DIR = os.path.expanduser("~/cros_scratch")
+RESULTS_FILE = "results.pickle"
+MACHINE_FILE = "machine.txt"
+AUTOTEST_TARBALL = "autotest.tbz2"
+RESULTS_TARBALL = "results.tbz2"
+PERF_RESULTS_FILE = "perf-results.txt"
+CACHE_KEYS_FILE = "cache_keys.txt"
class PidVerificationError(Exception):
- """Error of perf PID verification in per-process mode."""
+ """Error of perf PID verification in per-process mode."""
class PerfDataReadError(Exception):
- """Error of reading a perf.data header."""
+ """Error of reading a perf.data header."""
class Result(object):
- """Class for holding the results of a single test run.
-
- This class manages what exactly is stored inside the cache without knowing
- what the key of the cache is. For runs with perf, it stores perf.data,
- perf.report, etc. The key generation is handled by the ResultsCache class.
- """
-
- def __init__(self, logger, label, log_level, machine, cmd_exec=None):
- self.chromeos_root = label.chromeos_root
- self._logger = logger
- self.ce = cmd_exec or command_executer.GetCommandExecuter(
- self._logger, log_level=log_level)
- self.temp_dir = None
- self.label = label
- self.results_dir = None
- self.log_level = log_level
- self.machine = machine
- self.perf_data_files = []
- self.perf_report_files = []
- self.results_file = []
- self.turbostat_log_file = ''
- self.cpustats_log_file = ''
- self.cpuinfo_file = ''
- self.top_log_file = ''
- self.wait_time_log_file = ''
- self.chrome_version = ''
- self.err = None
- self.chroot_results_dir = ''
- self.test_name = ''
- self.keyvals = None
- self.board = None
- self.suite = None
- self.cwp_dso = ''
- self.retval = None
- self.out = None
- self.top_cmds = []
-
- def GetTopCmds(self):
- """Get the list of top commands consuming CPU on the machine."""
- return self.top_cmds
-
- def FormatStringTopCommands(self):
- """Get formatted string of top commands.
-
- Get the formatted string with top commands consuming CPU on DUT machine.
- Number of "non-chrome" processes in the list is limited to 5.
- """
- format_list = [
- 'Top commands with highest CPU usage:',
- # Header.
- '%20s %9s %6s %s' % ('COMMAND', 'AVG CPU%', 'COUNT', 'HIGHEST 5'),
- '-' * 50,
- ]
- if self.top_cmds:
- # After switching to top processes we have to expand the list since there
- # will be a lot of 'chrome' processes (up to 10, sometimes more) in the
- # top.
- # Let's limit the list size by the number of non-chrome processes.
- limit_of_non_chrome_procs = 5
- num_of_non_chrome_procs = 0
- for topcmd in self.top_cmds:
- print_line = '%20s %9.2f %6s %s' % (
- topcmd['cmd'], topcmd['cpu_use_avg'], topcmd['count'],
- topcmd['top5_cpu_use'])
- format_list.append(print_line)
- if not topcmd['cmd'].startswith('chrome'):
- num_of_non_chrome_procs += 1
- if num_of_non_chrome_procs >= limit_of_non_chrome_procs:
- break
- else:
- format_list.append('[NO DATA FROM THE TOP LOG]')
- format_list.append('-' * 50)
- return '\n'.join(format_list)
-
- def CopyFilesTo(self, dest_dir, files_to_copy):
- file_index = 0
- for file_to_copy in files_to_copy:
- if not os.path.isdir(dest_dir):
- command = 'mkdir -p %s' % dest_dir
- self.ce.RunCommand(command)
- dest_file = os.path.join(
- dest_dir, ('%s.%s' % (os.path.basename(file_to_copy), file_index)))
- ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
- if ret:
- raise IOError('Could not copy results file: %s' % file_to_copy)
- file_index += 1
-
- def CopyResultsTo(self, dest_dir):
- self.CopyFilesTo(dest_dir, self.results_file)
- self.CopyFilesTo(dest_dir, self.perf_data_files)
- self.CopyFilesTo(dest_dir, self.perf_report_files)
- extra_files = []
- if self.top_log_file:
- extra_files.append(self.top_log_file)
- if self.cpuinfo_file:
- extra_files.append(self.cpuinfo_file)
- if extra_files:
- self.CopyFilesTo(dest_dir, extra_files)
- if self.results_file or self.perf_data_files or self.perf_report_files:
- self._logger.LogOutput('Results files stored in %s.' % dest_dir)
-
- def CompressResultsTo(self, dest_dir):
- tarball = os.path.join(self.results_dir, RESULTS_TARBALL)
- # Test_that runs hold all output under TEST_NAME_HASHTAG/results/,
- # while tast runs hold output under TEST_NAME/.
- # Both ensure to be unique.
- result_dir_name = self.test_name if self.suite == 'tast' else 'results'
- results_dir = self.FindFilesInResultsDir('-name %s' %
- result_dir_name).split('\n')[0]
-
- if not results_dir:
- self._logger.LogOutput('WARNING: No results dir matching %r found' %
- result_dir_name)
- return
-
- self.CreateTarball(results_dir, tarball)
- self.CopyFilesTo(dest_dir, [tarball])
- if results_dir:
- self._logger.LogOutput('Results files compressed into %s.' % dest_dir)
-
- def GetNewKeyvals(self, keyvals_dict):
- # Initialize 'units' dictionary.
- units_dict = {}
- for k in keyvals_dict:
- units_dict[k] = ''
- results_files = self.GetDataMeasurementsFiles()
- for f in results_files:
- # Make sure we can find the results file
- if os.path.exists(f):
- data_filename = f
- else:
- # Otherwise get the base filename and create the correct
- # path for it.
- _, f_base = misc.GetRoot(f)
- data_filename = os.path.join(self.chromeos_root, 'chroot/tmp',
- self.temp_dir, f_base)
- if data_filename.find('.json') > 0:
- raw_dict = dict()
- if os.path.exists(data_filename):
- with open(data_filename, 'r') as data_file:
- raw_dict = json.load(data_file)
-
- if 'charts' in raw_dict:
- raw_dict = raw_dict['charts']
- for k1 in raw_dict:
- field_dict = raw_dict[k1]
- for k2 in field_dict:
- result_dict = field_dict[k2]
- key = k1 + '__' + k2
- if 'value' in result_dict:
- keyvals_dict[key] = result_dict['value']
- elif 'values' in result_dict:
- values = result_dict['values']
- if ('type' in result_dict
- and result_dict['type'] == 'list_of_scalar_values' and values
- and values != 'null'):
- keyvals_dict[key] = sum(values) / float(len(values))
- else:
- keyvals_dict[key] = values
- units_dict[key] = result_dict['units']
- else:
- if os.path.exists(data_filename):
- with open(data_filename, 'r') as data_file:
- lines = data_file.readlines()
- for line in lines:
- tmp_dict = json.loads(line)
- graph_name = tmp_dict['graph']
- graph_str = (graph_name + '__') if graph_name else ''
- key = graph_str + tmp_dict['description']
- keyvals_dict[key] = tmp_dict['value']
- units_dict[key] = tmp_dict['units']
-
- return keyvals_dict, units_dict
-
- def AppendTelemetryUnits(self, keyvals_dict, units_dict):
- """keyvals_dict is the dict of key-value used to generate Crosperf reports.
-
- units_dict is a dictionary of the units for the return values in
- keyvals_dict. We need to associate the units with the return values,
- for Telemetry tests, so that we can include the units in the reports.
- This function takes each value in keyvals_dict, finds the corresponding
- unit in the units_dict, and replaces the old value with a list of the
- old value and the units. This later gets properly parsed in the
- ResultOrganizer class, for generating the reports.
- """
+ """Class for holding the results of a single test run.
- results_dict = {}
- for k in keyvals_dict:
- # We don't want these lines in our reports; they add no useful data.
- if not k or k == 'telemetry_Crosperf':
- continue
- val = keyvals_dict[k]
- units = units_dict[k]
- new_val = [val, units]
- results_dict[k] = new_val
- return results_dict
-
- def GetKeyvals(self):
- results_in_chroot = os.path.join(self.chromeos_root, 'chroot', 'tmp')
- if not self.temp_dir:
- self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
- command = f'cp -r {self.results_dir}/* {self.temp_dir}'
- self.ce.RunCommand(command, print_to_console=False)
-
- command = ('./generate_test_report --no-color --csv %s' %
- (os.path.join('/tmp', os.path.basename(self.temp_dir))))
- _, out, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root,
- command,
- print_to_console=False)
- keyvals_dict = {}
- tmp_dir_in_chroot = misc.GetInsideChrootPath(self.chromeos_root,
- self.temp_dir)
- for line in out.splitlines():
- tokens = re.split('=|,', line)
- key = tokens[-2]
- if key.startswith(tmp_dir_in_chroot):
- key = key[len(tmp_dir_in_chroot) + 1:]
- value = tokens[-1]
- keyvals_dict[key] = value
-
- # Check to see if there is a perf_measurements file and get the
- # data from it if so.
- keyvals_dict, units_dict = self.GetNewKeyvals(keyvals_dict)
- if self.suite == 'telemetry_Crosperf':
- # For telemtry_Crosperf results, append the units to the return
- # results, for use in generating the reports.
- keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict)
- return keyvals_dict
-
- def GetSamples(self):
- actual_samples = 0
- for perf_data_file in self.perf_data_files:
- chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
- perf_data_file)
- perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
- perf_file = '/usr/sbin/perf'
- if os.path.exists(perf_path):
- perf_file = '/usr/bin/perf'
-
- # For each perf.data, we want to collect sample count for specific DSO.
- # We specify exact match for known DSO type, and every sample for `all`.
- exact_match = ''
- if self.cwp_dso == 'all':
- exact_match = '""'
- elif self.cwp_dso == 'chrome':
- exact_match = '" chrome "'
- elif self.cwp_dso == 'kallsyms':
- exact_match = '"[kernel.kallsyms]"'
- else:
- # This will need to be updated once there are more DSO types supported,
- # if user want an exact match for the field they want.
- exact_match = '"%s"' % self.cwp_dso
-
- command = ('%s report -n -s dso -i %s 2> /dev/null | grep %s' %
- (perf_file, chroot_perf_data_file, exact_match))
- _, result, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root,
- command)
- # Accumulate the sample count for all matched fields.
- # Each line looks like this:
- # 45.42% 237210 chrome
- # And we want the second number which is the sample count.
- samples = 0
- try:
- for line in result.split('\n'):
- attr = line.split()
- if len(attr) == 3 and '%' in attr[0]:
- samples += int(attr[1])
- except:
- raise RuntimeError('Cannot parse perf dso result')
-
- actual_samples += samples
-
- # Remove idle cycles from the accumulated sample count.
- perf_report_file = f'{perf_data_file}.report'
- if not os.path.exists(perf_report_file):
- raise RuntimeError(f'Missing perf report file: {perf_report_file}')
-
- idle_functions = {
- '[kernel.kallsyms]':
- ('intel_idle', 'arch_cpu_idle', 'intel_idle', 'cpu_startup_entry',
- 'default_idle', 'cpu_idle_loop', 'do_idle'),
- }
- idle_samples = 0
-
- with open(perf_report_file) as f:
- try:
- for line in f:
- line = line.strip()
- if not line or line[0] == '#':
- continue
- # Each line has the following fields,
- # pylint: disable=line-too-long
- # Overhead Samples Command Shared Object Symbol
- # pylint: disable=line-too-long
- # 1.48% 60 swapper [kernel.kallsyms] [k] intel_idle
- # pylint: disable=line-too-long
- # 0.00% 1 shill libshill-net.so [.] std::__1::vector<unsigned char, std::__1::allocator<unsigned char> >::vector<unsigned char const*>
- _, samples, _, dso, _, function = line.split(None, 5)
-
- if dso in idle_functions and function in idle_functions[dso]:
- if self.log_level != 'verbose':
- self._logger.LogOutput('Removing %s samples from %s in %s' %
- (samples, function, dso))
- idle_samples += int(samples)
- except:
- raise RuntimeError('Cannot parse perf report')
- actual_samples -= idle_samples
- return [actual_samples, u'samples']
-
- def GetResultsDir(self):
- if self.suite == 'tast':
- mo = re.search(r'Writing results to (\S+)', self.out)
- else:
- mo = re.search(r'Results placed in (\S+)', self.out)
- if mo:
- result = mo.group(1)
- return result
- raise RuntimeError('Could not find results directory.')
-
- def FindFilesInResultsDir(self, find_args):
- if not self.results_dir:
- return ''
-
- command = 'find %s %s' % (self.results_dir, find_args)
- ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False)
- if ret:
- raise RuntimeError('Could not run find command!')
- return out
-
- def GetResultsFile(self):
- if self.suite == 'telemetry_Crosperf':
- return self.FindFilesInResultsDir('-name histograms.json').splitlines()
- return self.FindFilesInResultsDir('-name results-chart.json').splitlines()
-
- def GetPerfDataFiles(self):
- return self.FindFilesInResultsDir('-name perf.data').splitlines()
-
- def GetPerfReportFiles(self):
- return self.FindFilesInResultsDir('-name perf.data.report').splitlines()
-
- def GetDataMeasurementsFiles(self):
- result = self.FindFilesInResultsDir('-name perf_measurements').splitlines()
- if not result:
- if self.suite == 'telemetry_Crosperf':
- result = (
- self.FindFilesInResultsDir('-name histograms.json').splitlines())
- else:
- result = (self.FindFilesInResultsDir(
- '-name results-chart.json').splitlines())
- return result
-
- def GetTurbostatFile(self):
- """Get turbostat log path string."""
- return self.FindFilesInResultsDir('-name turbostat.log').split('\n')[0]
-
- def GetCpustatsFile(self):
- """Get cpustats log path string."""
- return self.FindFilesInResultsDir('-name cpustats.log').split('\n')[0]
-
- def GetCpuinfoFile(self):
- """Get cpustats log path string."""
- return self.FindFilesInResultsDir('-name cpuinfo.log').split('\n')[0]
-
- def GetTopFile(self):
- """Get cpustats log path string."""
- return self.FindFilesInResultsDir('-name top.log').split('\n')[0]
-
- def GetWaitTimeFile(self):
- """Get wait time log path string."""
- return self.FindFilesInResultsDir('-name wait_time.log').split('\n')[0]
-
- def _CheckDebugPath(self, option, path):
- relative_path = path[1:]
- out_chroot_path = os.path.join(self.chromeos_root, 'chroot', relative_path)
- if os.path.exists(out_chroot_path):
- if option == 'kallsyms':
- path = os.path.join(path, 'System.map-*')
- return '--' + option + ' ' + path
- else:
- print('** WARNING **: --%s option not applied, %s does not exist' %
- (option, out_chroot_path))
- return ''
-
- def GeneratePerfReportFiles(self):
- perf_report_files = []
- for perf_data_file in self.perf_data_files:
- # Generate a perf.report and store it side-by-side with the perf.data
- # file.
- chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
- perf_data_file)
- perf_report_file = '%s.report' % perf_data_file
- if os.path.exists(perf_report_file):
- raise RuntimeError('Perf report file already exists: %s' %
- perf_report_file)
- chroot_perf_report_file = misc.GetInsideChrootPath(
- self.chromeos_root, perf_report_file)
- perf_path = os.path.join(self.chromeos_root, 'chroot', 'usr/bin/perf')
-
- perf_file = '/usr/sbin/perf'
- if os.path.exists(perf_path):
- perf_file = '/usr/bin/perf'
-
- debug_path = self.label.debug_path
-
- if debug_path:
- symfs = '--symfs ' + debug_path
- vmlinux = '--vmlinux ' + os.path.join(debug_path, 'usr', 'lib',
- 'debug', 'boot', 'vmlinux')
- kallsyms = ''
- print('** WARNING **: --kallsyms option not applied, no System.map-* '
- 'for downloaded image.')
- else:
- if self.label.image_type != 'local':
- print('** WARNING **: Using local debug info in /build, this may '
- 'not match the downloaded image.')
- build_path = os.path.join('/build', self.board)
- symfs = self._CheckDebugPath('symfs', build_path)
- vmlinux_path = os.path.join(build_path, 'usr/lib/debug/boot/vmlinux')
- vmlinux = self._CheckDebugPath('vmlinux', vmlinux_path)
- kallsyms_path = os.path.join(build_path, 'boot')
- kallsyms = self._CheckDebugPath('kallsyms', kallsyms_path)
-
- command = ('%s report -n %s %s %s -i %s --stdio > %s' %
- (perf_file, symfs, vmlinux, kallsyms, chroot_perf_data_file,
- chroot_perf_report_file))
- if self.log_level != 'verbose':
- self._logger.LogOutput('Generating perf report...\nCMD: %s' % command)
- exit_code = self.ce.ChrootRunCommand(self.chromeos_root, command)
- if exit_code == 0:
- if self.log_level != 'verbose':
- self._logger.LogOutput('Perf report generated successfully.')
- else:
- raise RuntimeError('Perf report not generated correctly. CMD: %s' %
- command)
-
- # Add a keyval to the dictionary for the events captured.
- perf_report_files.append(
- misc.GetOutsideChrootPath(self.chromeos_root,
- chroot_perf_report_file))
- return perf_report_files
-
- def GatherPerfResults(self):
- report_id = 0
- for perf_report_file in self.perf_report_files:
- with open(perf_report_file, 'r') as f:
- report_contents = f.read()
- for group in re.findall(r'Events: (\S+) (\S+)', report_contents):
- num_events = group[0]
- event_name = group[1]
- key = 'perf_%s_%s' % (report_id, event_name)
- value = str(misc.UnitToNumber(num_events))
- self.keyvals[key] = value
-
- def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
- self.board = self.label.board
- self.out = out
- self.err = err
- self.retval = retval
- self.test_name = test
- self.suite = suite
- self.cwp_dso = cwp_dso
- self.chroot_results_dir = self.GetResultsDir()
- self.results_dir = misc.GetOutsideChrootPath(self.chromeos_root,
- self.chroot_results_dir)
- self.results_file = self.GetResultsFile()
- self.perf_data_files = self.GetPerfDataFiles()
- # Include all perf.report data in table.
- self.perf_report_files = self.GeneratePerfReportFiles()
- self.turbostat_log_file = self.GetTurbostatFile()
- self.cpustats_log_file = self.GetCpustatsFile()
- self.cpuinfo_file = self.GetCpuinfoFile()
- self.top_log_file = self.GetTopFile()
- self.wait_time_log_file = self.GetWaitTimeFile()
- # TODO(asharif): Do something similar with perf stat.
-
- # Grab keyvals from the directory.
- self.ProcessResults()
-
- def ProcessChartResults(self):
- # Open and parse the json results file generated by telemetry/test_that.
- if not self.results_file:
- raise IOError('No results file found.')
- filename = self.results_file[0]
- if not filename.endswith('.json'):
- raise IOError('Attempt to call json on non-json file: %s' % filename)
- if not os.path.exists(filename):
- raise IOError('%s does not exist' % filename)
-
- keyvals = {}
- with open(filename, 'r') as f:
- raw_dict = json.load(f)
- if 'charts' in raw_dict:
- raw_dict = raw_dict['charts']
- for k, field_dict in raw_dict.items():
- for item in field_dict:
- keyname = k + '__' + item
- value_dict = field_dict[item]
- if 'value' in value_dict:
- result = value_dict['value']
- elif 'values' in value_dict:
- values = value_dict['values']
- if not values:
- continue
- if ('type' in value_dict
- and value_dict['type'] == 'list_of_scalar_values'
- and values != 'null'):
- result = sum(values) / float(len(values))
- else:
- result = values
- else:
- continue
- units = value_dict['units']
- new_value = [result, units]
- keyvals[keyname] = new_value
- return keyvals
-
- def ProcessTurbostatResults(self):
- """Given turbostat_log_file non-null parse cpu stats from file.
-
- Returns:
- Dictionary of 'cpufreq', 'cputemp' where each
- includes dictionary 'all': [list_of_values]
-
- Example of the output of turbostat_log.
- ----------------------
- CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
- - 329 12.13 2723 2393 10975 77
- 0 336 12.41 2715 2393 6328 77
- 2 323 11.86 2731 2393 4647 69
- CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
- - 1940 67.46 2884 2393 39920 83
- 0 1827 63.70 2877 2393 21184 83
- """
- cpustats = {}
- read_data = ''
- with open(self.turbostat_log_file) as f:
- read_data = f.readlines()
-
- if not read_data:
- self._logger.LogOutput('WARNING: Turbostat output file is empty.')
- return {}
-
- # First line always contains the header.
- stats = read_data[0].split()
-
- # Mandatory parameters.
- if 'CPU' not in stats:
- self._logger.LogOutput(
- 'WARNING: Missing data for CPU# in Turbostat output.')
- return {}
- if 'Bzy_MHz' not in stats:
- self._logger.LogOutput(
- 'WARNING: Missing data for Bzy_MHz in Turbostat output.')
- return {}
- cpu_index = stats.index('CPU')
- cpufreq_index = stats.index('Bzy_MHz')
- cpufreq = cpustats.setdefault('cpufreq', {'all': []})
-
- # Optional parameters.
- cputemp_index = -1
- if 'CoreTmp' in stats:
- cputemp_index = stats.index('CoreTmp')
- cputemp = cpustats.setdefault('cputemp', {'all': []})
-
- # Parse data starting from the second line ignoring repeating headers.
- for st in read_data[1:]:
- # Data represented by int or float separated by spaces.
- numbers = st.split()
- if not all(word.replace('.', '', 1).isdigit() for word in numbers[1:]):
- # Skip the line if data mismatch.
- continue
- if numbers[cpu_index] != '-':
- # Ignore Core-specific statistics which starts with Core number.
- # Combined statistics for all core has "-" CPU identifier.
- continue
-
- cpufreq['all'].append(int(numbers[cpufreq_index]))
- if cputemp_index != -1:
- cputemp['all'].append(int(numbers[cputemp_index]))
- return cpustats
-
- def ProcessTopResults(self):
- """Given self.top_log_file process top log data.
-
- Returns:
- List of dictionaries with the following keyvals:
- 'cmd': command name (string),
- 'cpu_use_avg': average cpu usage (float),
- 'count': number of occurrences (int),
- 'top5_cpu_use': up to 5 highest cpu usages (descending list of floats)
-
- Example of the top log:
- PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
- 4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome
- 375 root 0 -20 0 0 0 S 5.9 0.0 0:00.17 kworker
- 617 syslog 20 0 25332 8372 7888 S 5.9 0.2 0:00.77 systemd
-
- PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
- 5745 chronos 20 0 5438580 139328 67988 R 122.8 3.6 0:04.26 chrome
- 912 root -51 0 0 0 0 S 2.0 0.0 0:01.04 irq/cro
- 121 root 20 0 0 0 0 S 1.0 0.0 0:00.45 spi5
+ This class manages what exactly is stored inside the cache without knowing
+ what the key of the cache is. For runs with perf, it stores perf.data,
+ perf.report, etc. The key generation is handled by the ResultsCache class.
"""
- all_data = ''
- with open(self.top_log_file) as f:
- all_data = f.read()
- if not all_data:
- self._logger.LogOutput('WARNING: Top log file is empty.')
- return []
-
- top_line_regex = re.compile(
- r"""
+ def __init__(self, logger, label, log_level, machine, cmd_exec=None):
+ self.chromeos_root = label.chromeos_root
+ self._logger = logger
+ self.ce = cmd_exec or command_executer.GetCommandExecuter(
+ self._logger, log_level=log_level
+ )
+ self.temp_dir = None
+ self.label = label
+ self.results_dir = None
+ self.log_level = log_level
+ self.machine = machine
+ self.perf_data_files = []
+ self.perf_report_files = []
+ self.results_file = []
+ self.turbostat_log_file = ""
+ self.cpustats_log_file = ""
+ self.cpuinfo_file = ""
+ self.top_log_file = ""
+ self.wait_time_log_file = ""
+ self.chrome_version = ""
+ self.err = None
+ self.chroot_results_dir = ""
+ self.test_name = ""
+ self.keyvals = None
+ self.board = None
+ self.suite = None
+ self.cwp_dso = ""
+ self.retval = None
+ self.out = None
+ self.top_cmds = []
+
+ def GetTopCmds(self):
+ """Get the list of top commands consuming CPU on the machine."""
+ return self.top_cmds
+
+ def FormatStringTopCommands(self):
+ """Get formatted string of top commands.
+
+ Get the formatted string with top commands consuming CPU on DUT machine.
+ Number of "non-chrome" processes in the list is limited to 5.
+ """
+ format_list = [
+ "Top commands with highest CPU usage:",
+ # Header.
+ "%20s %9s %6s %s" % ("COMMAND", "AVG CPU%", "COUNT", "HIGHEST 5"),
+ "-" * 50,
+ ]
+ if self.top_cmds:
+ # After switching to top processes we have to expand the list since there
+ # will be a lot of 'chrome' processes (up to 10, sometimes more) in the
+ # top.
+ # Let's limit the list size by the number of non-chrome processes.
+ limit_of_non_chrome_procs = 5
+ num_of_non_chrome_procs = 0
+ for topcmd in self.top_cmds:
+ print_line = "%20s %9.2f %6s %s" % (
+ topcmd["cmd"],
+ topcmd["cpu_use_avg"],
+ topcmd["count"],
+ topcmd["top5_cpu_use"],
+ )
+ format_list.append(print_line)
+ if not topcmd["cmd"].startswith("chrome"):
+ num_of_non_chrome_procs += 1
+ if num_of_non_chrome_procs >= limit_of_non_chrome_procs:
+ break
+ else:
+ format_list.append("[NO DATA FROM THE TOP LOG]")
+ format_list.append("-" * 50)
+ return "\n".join(format_list)
+
+ def CopyFilesTo(self, dest_dir, files_to_copy):
+ file_index = 0
+ for file_to_copy in files_to_copy:
+ if not os.path.isdir(dest_dir):
+ command = "mkdir -p %s" % dest_dir
+ self.ce.RunCommand(command)
+ dest_file = os.path.join(
+ dest_dir,
+ ("%s.%s" % (os.path.basename(file_to_copy), file_index)),
+ )
+ ret = self.ce.CopyFiles(file_to_copy, dest_file, recursive=False)
+ if ret:
+ raise IOError("Could not copy results file: %s" % file_to_copy)
+ file_index += 1
+
+ def CopyResultsTo(self, dest_dir):
+ self.CopyFilesTo(dest_dir, self.results_file)
+ self.CopyFilesTo(dest_dir, self.perf_data_files)
+ self.CopyFilesTo(dest_dir, self.perf_report_files)
+ extra_files = []
+ if self.top_log_file:
+ extra_files.append(self.top_log_file)
+ if self.cpuinfo_file:
+ extra_files.append(self.cpuinfo_file)
+ if extra_files:
+ self.CopyFilesTo(dest_dir, extra_files)
+ if self.results_file or self.perf_data_files or self.perf_report_files:
+ self._logger.LogOutput("Results files stored in %s." % dest_dir)
+
+ def CompressResultsTo(self, dest_dir):
+ tarball = os.path.join(self.results_dir, RESULTS_TARBALL)
+ # Test_that runs hold all output under TEST_NAME_HASHTAG/results/,
+ # while tast runs hold output under TEST_NAME/.
+ # Both ensure to be unique.
+ result_dir_name = self.test_name if self.suite == "tast" else "results"
+ results_dir = self.FindFilesInResultsDir(
+ "-name %s" % result_dir_name
+ ).split("\n")[0]
+
+ if not results_dir:
+ self._logger.LogOutput(
+ "WARNING: No results dir matching %r found" % result_dir_name
+ )
+ return
+
+ self.CreateTarball(results_dir, tarball)
+ self.CopyFilesTo(dest_dir, [tarball])
+ if results_dir:
+ self._logger.LogOutput(
+ "Results files compressed into %s." % dest_dir
+ )
+
+ def GetNewKeyvals(self, keyvals_dict):
+ # Initialize 'units' dictionary.
+ units_dict = {}
+ for k in keyvals_dict:
+ units_dict[k] = ""
+ results_files = self.GetDataMeasurementsFiles()
+ for f in results_files:
+ # Make sure we can find the results file
+ if os.path.exists(f):
+ data_filename = f
+ else:
+ # Otherwise get the base filename and create the correct
+ # path for it.
+ _, f_base = misc.GetRoot(f)
+ data_filename = os.path.join(
+ self.chromeos_root, "chroot/tmp", self.temp_dir, f_base
+ )
+ if data_filename.find(".json") > 0:
+ raw_dict = dict()
+ if os.path.exists(data_filename):
+ with open(data_filename, "r") as data_file:
+ raw_dict = json.load(data_file)
+
+ if "charts" in raw_dict:
+ raw_dict = raw_dict["charts"]
+ for k1 in raw_dict:
+ field_dict = raw_dict[k1]
+ for k2 in field_dict:
+ result_dict = field_dict[k2]
+ key = k1 + "__" + k2
+ if "value" in result_dict:
+ keyvals_dict[key] = result_dict["value"]
+ elif "values" in result_dict:
+ values = result_dict["values"]
+ if (
+ "type" in result_dict
+ and result_dict["type"]
+ == "list_of_scalar_values"
+ and values
+ and values != "null"
+ ):
+ keyvals_dict[key] = sum(values) / float(
+ len(values)
+ )
+ else:
+ keyvals_dict[key] = values
+ units_dict[key] = result_dict["units"]
+ else:
+ if os.path.exists(data_filename):
+ with open(data_filename, "r") as data_file:
+ lines = data_file.readlines()
+ for line in lines:
+ tmp_dict = json.loads(line)
+ graph_name = tmp_dict["graph"]
+ graph_str = (
+ (graph_name + "__") if graph_name else ""
+ )
+ key = graph_str + tmp_dict["description"]
+ keyvals_dict[key] = tmp_dict["value"]
+ units_dict[key] = tmp_dict["units"]
+
+ return keyvals_dict, units_dict
+
+ def AppendTelemetryUnits(self, keyvals_dict, units_dict):
+ """keyvals_dict is the dict of key-value used to generate Crosperf reports.
+
+ units_dict is a dictionary of the units for the return values in
+ keyvals_dict. We need to associate the units with the return values,
+ for Telemetry tests, so that we can include the units in the reports.
+ This function takes each value in keyvals_dict, finds the corresponding
+ unit in the units_dict, and replaces the old value with a list of the
+ old value and the units. This later gets properly parsed in the
+ ResultOrganizer class, for generating the reports.
+ """
+
+ results_dict = {}
+ for k in keyvals_dict:
+ # We don't want these lines in our reports; they add no useful data.
+ if not k or k == "telemetry_Crosperf":
+ continue
+ val = keyvals_dict[k]
+ units = units_dict[k]
+ new_val = [val, units]
+ results_dict[k] = new_val
+ return results_dict
+
+ def GetKeyvals(self):
+ results_in_chroot = os.path.join(self.chromeos_root, "chroot", "tmp")
+ if not self.temp_dir:
+ self.temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
+ command = f"cp -r {self.results_dir}/* {self.temp_dir}"
+ self.ce.RunCommand(command, print_to_console=False)
+
+ command = "./generate_test_report --no-color --csv %s" % (
+ os.path.join("/tmp", os.path.basename(self.temp_dir))
+ )
+ _, out, _ = self.ce.ChrootRunCommandWOutput(
+ self.chromeos_root, command, print_to_console=False
+ )
+ keyvals_dict = {}
+ tmp_dir_in_chroot = misc.GetInsideChrootPath(
+ self.chromeos_root, self.temp_dir
+ )
+ for line in out.splitlines():
+ tokens = re.split("=|,", line)
+ key = tokens[-2]
+ if key.startswith(tmp_dir_in_chroot):
+ key = key[len(tmp_dir_in_chroot) + 1 :]
+ value = tokens[-1]
+ keyvals_dict[key] = value
+
+ # Check to see if there is a perf_measurements file and get the
+ # data from it if so.
+ keyvals_dict, units_dict = self.GetNewKeyvals(keyvals_dict)
+ if self.suite == "telemetry_Crosperf":
+ # For telemtry_Crosperf results, append the units to the return
+ # results, for use in generating the reports.
+ keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict)
+ return keyvals_dict
+
+ def GetSamples(self):
+ actual_samples = 0
+ for perf_data_file in self.perf_data_files:
+ chroot_perf_data_file = misc.GetInsideChrootPath(
+ self.chromeos_root, perf_data_file
+ )
+ perf_path = os.path.join(
+ self.chromeos_root, "chroot", "usr/bin/perf"
+ )
+ perf_file = "/usr/sbin/perf"
+ if os.path.exists(perf_path):
+ perf_file = "/usr/bin/perf"
+
+ # For each perf.data, we want to collect sample count for specific DSO.
+ # We specify exact match for known DSO type, and every sample for `all`.
+ exact_match = ""
+ if self.cwp_dso == "all":
+ exact_match = '""'
+ elif self.cwp_dso == "chrome":
+ exact_match = '" chrome "'
+ elif self.cwp_dso == "kallsyms":
+ exact_match = '"[kernel.kallsyms]"'
+ else:
+ # This will need to be updated once there are more DSO types supported,
+ # if user want an exact match for the field they want.
+ exact_match = '"%s"' % self.cwp_dso
+
+ command = "%s report -n -s dso -i %s 2> /dev/null | grep %s" % (
+ perf_file,
+ chroot_perf_data_file,
+ exact_match,
+ )
+ _, result, _ = self.ce.ChrootRunCommandWOutput(
+ self.chromeos_root, command
+ )
+ # Accumulate the sample count for all matched fields.
+ # Each line looks like this:
+ # 45.42% 237210 chrome
+ # And we want the second number which is the sample count.
+ samples = 0
+ try:
+ for line in result.split("\n"):
+ attr = line.split()
+ if len(attr) == 3 and "%" in attr[0]:
+ samples += int(attr[1])
+ except:
+ raise RuntimeError("Cannot parse perf dso result")
+
+ actual_samples += samples
+
+ # Remove idle cycles from the accumulated sample count.
+ perf_report_file = f"{perf_data_file}.report"
+ if not os.path.exists(perf_report_file):
+ raise RuntimeError(
+ f"Missing perf report file: {perf_report_file}"
+ )
+
+ idle_functions = {
+ "[kernel.kallsyms]": (
+ "intel_idle",
+ "arch_cpu_idle",
+ "intel_idle",
+ "cpu_startup_entry",
+ "default_idle",
+ "cpu_idle_loop",
+ "do_idle",
+ ),
+ }
+ idle_samples = 0
+
+ with open(perf_report_file) as f:
+ try:
+ for line in f:
+ line = line.strip()
+ if not line or line[0] == "#":
+ continue
+ # Each line has the following fields,
+ # pylint: disable=line-too-long
+ # Overhead Samples Command Shared Object Symbol
+ # pylint: disable=line-too-long
+ # 1.48% 60 swapper [kernel.kallsyms] [k] intel_idle
+ # pylint: disable=line-too-long
+ # 0.00% 1 shill libshill-net.so [.] std::__1::vector<unsigned char, std::__1::allocator<unsigned char> >::vector<unsigned char const*>
+ _, samples, _, dso, _, function = line.split(None, 5)
+
+ if (
+ dso in idle_functions
+ and function in idle_functions[dso]
+ ):
+ if self.log_level != "verbose":
+ self._logger.LogOutput(
+ "Removing %s samples from %s in %s"
+ % (samples, function, dso)
+ )
+ idle_samples += int(samples)
+ except:
+ raise RuntimeError("Cannot parse perf report")
+ actual_samples -= idle_samples
+ return [actual_samples, "samples"]
+
+ def GetResultsDir(self):
+ if self.suite == "tast":
+ mo = re.search(r"Writing results to (\S+)", self.out)
+ else:
+ mo = re.search(r"Results placed in (\S+)", self.out)
+ if mo:
+ result = mo.group(1)
+ return result
+ raise RuntimeError("Could not find results directory.")
+
+ def FindFilesInResultsDir(self, find_args):
+ if not self.results_dir:
+ return ""
+
+ command = "find %s %s" % (self.results_dir, find_args)
+ ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False)
+ if ret:
+ raise RuntimeError("Could not run find command!")
+ return out
+
+ def GetResultsFile(self):
+ if self.suite == "telemetry_Crosperf":
+ return self.FindFilesInResultsDir(
+ "-name histograms.json"
+ ).splitlines()
+ return self.FindFilesInResultsDir(
+ "-name results-chart.json"
+ ).splitlines()
+
+ def GetPerfDataFiles(self):
+ return self.FindFilesInResultsDir("-name perf.data").splitlines()
+
+ def GetPerfReportFiles(self):
+ return self.FindFilesInResultsDir("-name perf.data.report").splitlines()
+
+ def GetDataMeasurementsFiles(self):
+ result = self.FindFilesInResultsDir(
+ "-name perf_measurements"
+ ).splitlines()
+ if not result:
+ if self.suite == "telemetry_Crosperf":
+ result = self.FindFilesInResultsDir(
+ "-name histograms.json"
+ ).splitlines()
+ else:
+ result = self.FindFilesInResultsDir(
+ "-name results-chart.json"
+ ).splitlines()
+ return result
+
+ def GetTurbostatFile(self):
+ """Get turbostat log path string."""
+ return self.FindFilesInResultsDir("-name turbostat.log").split("\n")[0]
+
+ def GetCpustatsFile(self):
+ """Get cpustats log path string."""
+ return self.FindFilesInResultsDir("-name cpustats.log").split("\n")[0]
+
+ def GetCpuinfoFile(self):
+ """Get cpustats log path string."""
+ return self.FindFilesInResultsDir("-name cpuinfo.log").split("\n")[0]
+
+ def GetTopFile(self):
+ """Get cpustats log path string."""
+ return self.FindFilesInResultsDir("-name top.log").split("\n")[0]
+
+ def GetWaitTimeFile(self):
+ """Get wait time log path string."""
+ return self.FindFilesInResultsDir("-name wait_time.log").split("\n")[0]
+
+ def _CheckDebugPath(self, option, path):
+ relative_path = path[1:]
+ out_chroot_path = os.path.join(
+ self.chromeos_root, "chroot", relative_path
+ )
+ if os.path.exists(out_chroot_path):
+ if option == "kallsyms":
+ path = os.path.join(path, "System.map-*")
+ return "--" + option + " " + path
+ else:
+ print(
+ "** WARNING **: --%s option not applied, %s does not exist"
+ % (option, out_chroot_path)
+ )
+ return ""
+
+ def GeneratePerfReportFiles(self):
+ perf_report_files = []
+ for perf_data_file in self.perf_data_files:
+ # Generate a perf.report and store it side-by-side with the perf.data
+ # file.
+ chroot_perf_data_file = misc.GetInsideChrootPath(
+ self.chromeos_root, perf_data_file
+ )
+ perf_report_file = "%s.report" % perf_data_file
+ if os.path.exists(perf_report_file):
+ raise RuntimeError(
+ "Perf report file already exists: %s" % perf_report_file
+ )
+ chroot_perf_report_file = misc.GetInsideChrootPath(
+ self.chromeos_root, perf_report_file
+ )
+ perf_path = os.path.join(
+ self.chromeos_root, "chroot", "usr/bin/perf"
+ )
+
+ perf_file = "/usr/sbin/perf"
+ if os.path.exists(perf_path):
+ perf_file = "/usr/bin/perf"
+
+ debug_path = self.label.debug_path
+
+ if debug_path:
+ symfs = "--symfs " + debug_path
+ vmlinux = "--vmlinux " + os.path.join(
+ debug_path, "usr", "lib", "debug", "boot", "vmlinux"
+ )
+ kallsyms = ""
+ print(
+ "** WARNING **: --kallsyms option not applied, no System.map-* "
+ "for downloaded image."
+ )
+ else:
+ if self.label.image_type != "local":
+ print(
+ "** WARNING **: Using local debug info in /build, this may "
+ "not match the downloaded image."
+ )
+ build_path = os.path.join("/build", self.board)
+ symfs = self._CheckDebugPath("symfs", build_path)
+ vmlinux_path = os.path.join(
+ build_path, "usr/lib/debug/boot/vmlinux"
+ )
+ vmlinux = self._CheckDebugPath("vmlinux", vmlinux_path)
+ kallsyms_path = os.path.join(build_path, "boot")
+ kallsyms = self._CheckDebugPath("kallsyms", kallsyms_path)
+
+ command = "%s report -n %s %s %s -i %s --stdio > %s" % (
+ perf_file,
+ symfs,
+ vmlinux,
+ kallsyms,
+ chroot_perf_data_file,
+ chroot_perf_report_file,
+ )
+ if self.log_level != "verbose":
+ self._logger.LogOutput(
+ "Generating perf report...\nCMD: %s" % command
+ )
+ exit_code = self.ce.ChrootRunCommand(self.chromeos_root, command)
+ if exit_code == 0:
+ if self.log_level != "verbose":
+ self._logger.LogOutput(
+ "Perf report generated successfully."
+ )
+ else:
+ raise RuntimeError(
+ "Perf report not generated correctly. CMD: %s" % command
+ )
+
+ # Add a keyval to the dictionary for the events captured.
+ perf_report_files.append(
+ misc.GetOutsideChrootPath(
+ self.chromeos_root, chroot_perf_report_file
+ )
+ )
+ return perf_report_files
+
+ def GatherPerfResults(self):
+ report_id = 0
+ for perf_report_file in self.perf_report_files:
+ with open(perf_report_file, "r") as f:
+ report_contents = f.read()
+ for group in re.findall(
+ r"Events: (\S+) (\S+)", report_contents
+ ):
+ num_events = group[0]
+ event_name = group[1]
+ key = "perf_%s_%s" % (report_id, event_name)
+ value = str(misc.UnitToNumber(num_events))
+ self.keyvals[key] = value
+
+ def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
+ self.board = self.label.board
+ self.out = out
+ self.err = err
+ self.retval = retval
+ self.test_name = test
+ self.suite = suite
+ self.cwp_dso = cwp_dso
+ self.chroot_results_dir = self.GetResultsDir()
+ self.results_dir = misc.GetOutsideChrootPath(
+ self.chromeos_root, self.chroot_results_dir
+ )
+ self.results_file = self.GetResultsFile()
+ self.perf_data_files = self.GetPerfDataFiles()
+ # Include all perf.report data in table.
+ self.perf_report_files = self.GeneratePerfReportFiles()
+ self.turbostat_log_file = self.GetTurbostatFile()
+ self.cpustats_log_file = self.GetCpustatsFile()
+ self.cpuinfo_file = self.GetCpuinfoFile()
+ self.top_log_file = self.GetTopFile()
+ self.wait_time_log_file = self.GetWaitTimeFile()
+ # TODO(asharif): Do something similar with perf stat.
+
+ # Grab keyvals from the directory.
+ self.ProcessResults()
+
+ def ProcessChartResults(self):
+ # Open and parse the json results file generated by telemetry/test_that.
+ if not self.results_file:
+ raise IOError("No results file found.")
+ filename = self.results_file[0]
+ if not filename.endswith(".json"):
+ raise IOError(
+ "Attempt to call json on non-json file: %s" % filename
+ )
+ if not os.path.exists(filename):
+ raise IOError("%s does not exist" % filename)
+
+ keyvals = {}
+ with open(filename, "r") as f:
+ raw_dict = json.load(f)
+ if "charts" in raw_dict:
+ raw_dict = raw_dict["charts"]
+ for k, field_dict in raw_dict.items():
+ for item in field_dict:
+ keyname = k + "__" + item
+ value_dict = field_dict[item]
+ if "value" in value_dict:
+ result = value_dict["value"]
+ elif "values" in value_dict:
+ values = value_dict["values"]
+ if not values:
+ continue
+ if (
+ "type" in value_dict
+ and value_dict["type"] == "list_of_scalar_values"
+ and values != "null"
+ ):
+ result = sum(values) / float(len(values))
+ else:
+ result = values
+ else:
+ continue
+ units = value_dict["units"]
+ new_value = [result, units]
+ keyvals[keyname] = new_value
+ return keyvals
+
+ def ProcessTurbostatResults(self):
+ """Given turbostat_log_file non-null parse cpu stats from file.
+
+ Returns:
+ Dictionary of 'cpufreq', 'cputemp' where each
+ includes dictionary 'all': [list_of_values]
+
+ Example of the output of turbostat_log.
+ ----------------------
+ CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+ - 329 12.13 2723 2393 10975 77
+ 0 336 12.41 2715 2393 6328 77
+ 2 323 11.86 2731 2393 4647 69
+ CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+ - 1940 67.46 2884 2393 39920 83
+ 0 1827 63.70 2877 2393 21184 83
+ """
+ cpustats = {}
+ read_data = ""
+ with open(self.turbostat_log_file) as f:
+ read_data = f.readlines()
+
+ if not read_data:
+ self._logger.LogOutput("WARNING: Turbostat output file is empty.")
+ return {}
+
+ # First line always contains the header.
+ stats = read_data[0].split()
+
+ # Mandatory parameters.
+ if "CPU" not in stats:
+ self._logger.LogOutput(
+ "WARNING: Missing data for CPU# in Turbostat output."
+ )
+ return {}
+ if "Bzy_MHz" not in stats:
+ self._logger.LogOutput(
+ "WARNING: Missing data for Bzy_MHz in Turbostat output."
+ )
+ return {}
+ cpu_index = stats.index("CPU")
+ cpufreq_index = stats.index("Bzy_MHz")
+ cpufreq = cpustats.setdefault("cpufreq", {"all": []})
+
+ # Optional parameters.
+ cputemp_index = -1
+ if "CoreTmp" in stats:
+ cputemp_index = stats.index("CoreTmp")
+ cputemp = cpustats.setdefault("cputemp", {"all": []})
+
+ # Parse data starting from the second line ignoring repeating headers.
+ for st in read_data[1:]:
+ # Data represented by int or float separated by spaces.
+ numbers = st.split()
+ if not all(
+ word.replace(".", "", 1).isdigit() for word in numbers[1:]
+ ):
+ # Skip the line if data mismatch.
+ continue
+ if numbers[cpu_index] != "-":
+ # Ignore Core-specific statistics which starts with Core number.
+ # Combined statistics for all core has "-" CPU identifier.
+ continue
+
+ cpufreq["all"].append(int(numbers[cpufreq_index]))
+ if cputemp_index != -1:
+ cputemp["all"].append(int(numbers[cputemp_index]))
+ return cpustats
+
+ def ProcessTopResults(self):
+ """Given self.top_log_file process top log data.
+
+ Returns:
+ List of dictionaries with the following keyvals:
+ 'cmd': command name (string),
+ 'cpu_use_avg': average cpu usage (float),
+ 'count': number of occurrences (int),
+ 'top5_cpu_use': up to 5 highest cpu usages (descending list of floats)
+
+ Example of the top log:
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome
+ 375 root 0 -20 0 0 0 S 5.9 0.0 0:00.17 kworker
+ 617 syslog 20 0 25332 8372 7888 S 5.9 0.2 0:00.77 systemd
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 5745 chronos 20 0 5438580 139328 67988 R 122.8 3.6 0:04.26 chrome
+ 912 root -51 0 0 0 0 S 2.0 0.0 0:01.04 irq/cro
+ 121 root 20 0 0 0 0 S 1.0 0.0 0:00.45 spi5
+ """
+ all_data = ""
+ with open(self.top_log_file) as f:
+ all_data = f.read()
+
+ if not all_data:
+ self._logger.LogOutput("WARNING: Top log file is empty.")
+ return []
+
+ top_line_regex = re.compile(
+ r"""
^\s*(?P<pid>\d+)\s+ # Group 1: PID
\S+\s+\S+\s+-?\d+\s+ # Ignore: user, prio, nice
\d+\s+\d+\s+\d+\s+ # Ignore: virt/res/shared mem
@@ -663,814 +748,922 @@ class Result(object):
(?P<cpu_use>\d+\.\d+)\s+ # Group 2: CPU usage
\d+\.\d+\s+\d+:\d+\.\d+\s+ # Ignore: mem usage, time
(?P<cmd>\S+)$ # Group 3: command
- """, re.VERBOSE)
- # Page represents top log data per one measurement within time interval
- # 'top_interval'.
- # Pages separated by empty line.
- pages = all_data.split('\n\n')
- # Snapshots are structured representation of the pages.
- snapshots = []
- for page in pages:
- if not page:
- continue
-
- # Snapshot list will contain all processes (command duplicates are
- # allowed).
- snapshot = []
- for line in page.splitlines():
- match = top_line_regex.match(line)
- if match:
- # Top line is valid, collect data.
- process = {
- # NOTE: One command may be represented by multiple processes.
- 'cmd': match.group('cmd'),
- 'pid': match.group('pid'),
- 'cpu_use': float(match.group('cpu_use')),
- }
-
- # Filter out processes with 0 CPU usage and top command.
- if process['cpu_use'] > 0 and process['cmd'] != 'top':
- snapshot.append(process)
-
- # If page contained meaningful data add snapshot to the list.
- if snapshot:
- snapshots.append(snapshot)
-
- # Define threshold of CPU usage when Chrome is busy, i.e. benchmark is
- # running.
- # Ideally it should be 100% but it will be hardly reachable with 1 core.
- # Statistics on DUT with 2-6 cores shows that chrome load of 100%, 95% and
- # 90% equally occurs in 72-74% of all top log snapshots.
- # Further decreasing of load threshold leads to a shifting percent of
- # "high load" snapshots which might include snapshots when benchmark is
- # not running.
- # On 1-core DUT 90% chrome cpu load occurs in 55%, 95% in 33% and 100% in 2%
- # of snapshots accordingly.
- # Threshold of "high load" is reduced to 70% (from 90) when we switched to
- # topstats per process. From experiment data the rest 20% are distributed
- # among other chrome processes.
- CHROME_HIGH_CPU_LOAD = 70
- # Number of snapshots where chrome is heavily used.
- high_load_snapshots = 0
- # Total CPU use per process in ALL active snapshots.
- cmd_total_cpu_use = collections.defaultdict(float)
- # Top CPU usages per command.
- cmd_top5_cpu_use = collections.defaultdict(list)
- # List of Top Commands to be returned.
- topcmds = []
-
- for snapshot_processes in snapshots:
- # CPU usage per command, per PID in one snapshot.
- cmd_cpu_use_per_snapshot = collections.defaultdict(dict)
- for process in snapshot_processes:
- cmd = process['cmd']
- cpu_use = process['cpu_use']
- pid = process['pid']
- cmd_cpu_use_per_snapshot[cmd][pid] = cpu_use
-
- # Chrome processes, pid: cpu_usage.
- chrome_processes = cmd_cpu_use_per_snapshot.get('chrome', {})
- chrome_cpu_use_list = chrome_processes.values()
-
- if chrome_cpu_use_list and max(
- chrome_cpu_use_list) > CHROME_HIGH_CPU_LOAD:
- # CPU usage of any of the "chrome" processes exceeds "High load"
- # threshold which means DUT is busy running a benchmark.
- high_load_snapshots += 1
- for cmd, cpu_use_per_pid in cmd_cpu_use_per_snapshot.items():
- for pid, cpu_use in cpu_use_per_pid.items():
- # Append PID to the name of the command.
- cmd_with_pid = cmd + '-' + pid
- cmd_total_cpu_use[cmd_with_pid] += cpu_use
-
- # Add cpu_use into command top cpu usages, sorted in descending
- # order.
- heapq.heappush(cmd_top5_cpu_use[cmd_with_pid], round(cpu_use, 1))
-
- for consumer, usage in sorted(cmd_total_cpu_use.items(),
- key=lambda x: x[1],
- reverse=True):
- # Iterate through commands by descending order of total CPU usage.
- topcmd = {
- 'cmd': consumer,
- 'cpu_use_avg': usage / high_load_snapshots,
- 'count': len(cmd_top5_cpu_use[consumer]),
- 'top5_cpu_use': heapq.nlargest(5, cmd_top5_cpu_use[consumer]),
- }
- topcmds.append(topcmd)
-
- return topcmds
-
- def ProcessCpustatsResults(self):
- """Given cpustats_log_file non-null parse cpu data from file.
-
- Returns:
- Dictionary of 'cpufreq', 'cputemp' where each
- includes dictionary of parameter: [list_of_values]
-
- Example of cpustats.log output.
- ----------------------
- /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
- /sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 2016000
- little-cpu 41234
- big-cpu 51234
-
- If cores share the same policy their frequencies may always match
- on some devices.
- To make report concise we should eliminate redundancy in the output.
- Function removes cpuN data if it duplicates data from other cores.
- """
-
- cpustats = {}
- read_data = ''
- with open(self.cpustats_log_file) as f:
- read_data = f.readlines()
-
- if not read_data:
- self._logger.LogOutput('WARNING: Cpustats output file is empty.')
- return {}
-
- cpufreq_regex = re.compile(r'^[/\S]+/(cpu\d+)/[/\S]+\s+(\d+)$')
- cputemp_regex = re.compile(r'^([^/\s]+)\s+(\d+)$')
-
- for st in read_data:
- match = cpufreq_regex.match(st)
- if match:
- cpu = match.group(1)
- # CPU frequency comes in kHz.
- freq_khz = int(match.group(2))
- freq_mhz = freq_khz / 1000
- # cpufreq represents a dictionary with CPU frequency-related
- # data from cpustats.log.
- cpufreq = cpustats.setdefault('cpufreq', {})
- cpu_n_freq = cpufreq.setdefault(cpu, [])
- cpu_n_freq.append(freq_mhz)
- else:
- match = cputemp_regex.match(st)
- if match:
- therm_type = match.group(1)
- # The value is int, uCelsius unit.
- temp_uc = float(match.group(2))
- # Round to XX.X float.
- temp_c = round(temp_uc / 1000, 1)
- # cputemp represents a dictionary with temperature measurements
- # from cpustats.log.
- cputemp = cpustats.setdefault('cputemp', {})
- therm_type = cputemp.setdefault(therm_type, [])
- therm_type.append(temp_c)
-
- # Remove duplicate statistics from cpustats.
- pruned_stats = {}
- for cpukey, cpuparam in cpustats.items():
- # Copy 'cpufreq' and 'cputemp'.
- pruned_params = pruned_stats.setdefault(cpukey, {})
- for paramkey, paramvalue in sorted(cpuparam.items()):
- # paramvalue is list of all measured data.
- if paramvalue not in pruned_params.values():
- pruned_params[paramkey] = paramvalue
-
- return pruned_stats
-
- def ProcessHistogramsResults(self):
- # Open and parse the json results file generated by telemetry/test_that.
- if not self.results_file:
- raise IOError('No results file found.')
- filename = self.results_file[0]
- if not filename.endswith('.json'):
- raise IOError('Attempt to call json on non-json file: %s' % filename)
- if not os.path.exists(filename):
- raise IOError('%s does not exist' % filename)
-
- keyvals = {}
- with open(filename) as f:
- histograms = json.load(f)
- value_map = {}
- # Gets generic set values.
- for obj in histograms:
- if 'type' in obj and obj['type'] == 'GenericSet':
- value_map[obj['guid']] = obj['values']
-
- for obj in histograms:
- if 'name' not in obj or 'sampleValues' not in obj:
- continue
- metric_name = obj['name']
- vals = obj['sampleValues']
- if isinstance(vals, list):
- # Remove None elements from the list
- vals = [val for val in vals if val is not None]
- if vals:
+ """,
+ re.VERBOSE,
+ )
+ # Page represents top log data per one measurement within time interval
+ # 'top_interval'.
+ # Pages separated by empty line.
+ pages = all_data.split("\n\n")
+ # Snapshots are structured representation of the pages.
+ snapshots = []
+ for page in pages:
+ if not page:
+ continue
+
+ # Snapshot list will contain all processes (command duplicates are
+ # allowed).
+ snapshot = []
+ for line in page.splitlines():
+ match = top_line_regex.match(line)
+ if match:
+ # Top line is valid, collect data.
+ process = {
+ # NOTE: One command may be represented by multiple processes.
+ "cmd": match.group("cmd"),
+ "pid": match.group("pid"),
+ "cpu_use": float(match.group("cpu_use")),
+ }
+
+ # Filter out processes with 0 CPU usage and top command.
+ if process["cpu_use"] > 0 and process["cmd"] != "top":
+ snapshot.append(process)
+
+ # If page contained meaningful data add snapshot to the list.
+ if snapshot:
+ snapshots.append(snapshot)
+
+ # Define threshold of CPU usage when Chrome is busy, i.e. benchmark is
+ # running.
+ # Ideally it should be 100% but it will be hardly reachable with 1 core.
+ # Statistics on DUT with 2-6 cores shows that chrome load of 100%, 95% and
+ # 90% equally occurs in 72-74% of all top log snapshots.
+ # Further decreasing of load threshold leads to a shifting percent of
+ # "high load" snapshots which might include snapshots when benchmark is
+ # not running.
+ # On 1-core DUT 90% chrome cpu load occurs in 55%, 95% in 33% and 100% in 2%
+ # of snapshots accordingly.
+ # Threshold of "high load" is reduced to 70% (from 90) when we switched to
+ # topstats per process. From experiment data the rest 20% are distributed
+ # among other chrome processes.
+ CHROME_HIGH_CPU_LOAD = 70
+ # Number of snapshots where chrome is heavily used.
+ high_load_snapshots = 0
+ # Total CPU use per process in ALL active snapshots.
+ cmd_total_cpu_use = collections.defaultdict(float)
+ # Top CPU usages per command.
+ cmd_top5_cpu_use = collections.defaultdict(list)
+ # List of Top Commands to be returned.
+ topcmds = []
+
+ for snapshot_processes in snapshots:
+ # CPU usage per command, per PID in one snapshot.
+ cmd_cpu_use_per_snapshot = collections.defaultdict(dict)
+ for process in snapshot_processes:
+ cmd = process["cmd"]
+ cpu_use = process["cpu_use"]
+ pid = process["pid"]
+ cmd_cpu_use_per_snapshot[cmd][pid] = cpu_use
+
+ # Chrome processes, pid: cpu_usage.
+ chrome_processes = cmd_cpu_use_per_snapshot.get("chrome", {})
+ chrome_cpu_use_list = chrome_processes.values()
+
+ if (
+ chrome_cpu_use_list
+ and max(chrome_cpu_use_list) > CHROME_HIGH_CPU_LOAD
+ ):
+ # CPU usage of any of the "chrome" processes exceeds "High load"
+ # threshold which means DUT is busy running a benchmark.
+ high_load_snapshots += 1
+ for cmd, cpu_use_per_pid in cmd_cpu_use_per_snapshot.items():
+ for pid, cpu_use in cpu_use_per_pid.items():
+ # Append PID to the name of the command.
+ cmd_with_pid = cmd + "-" + pid
+ cmd_total_cpu_use[cmd_with_pid] += cpu_use
+
+ # Add cpu_use into command top cpu usages, sorted in descending
+ # order.
+ heapq.heappush(
+ cmd_top5_cpu_use[cmd_with_pid], round(cpu_use, 1)
+ )
+
+ for consumer, usage in sorted(
+ cmd_total_cpu_use.items(), key=lambda x: x[1], reverse=True
+ ):
+ # Iterate through commands by descending order of total CPU usage.
+ topcmd = {
+ "cmd": consumer,
+ "cpu_use_avg": usage / high_load_snapshots,
+ "count": len(cmd_top5_cpu_use[consumer]),
+ "top5_cpu_use": heapq.nlargest(5, cmd_top5_cpu_use[consumer]),
+ }
+ topcmds.append(topcmd)
+
+ return topcmds
+
+ def ProcessCpustatsResults(self):
+ """Given cpustats_log_file non-null parse cpu data from file.
+
+ Returns:
+ Dictionary of 'cpufreq', 'cputemp' where each
+ includes dictionary of parameter: [list_of_values]
+
+ Example of cpustats.log output.
+ ----------------------
+ /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
+ /sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 2016000
+ little-cpu 41234
+ big-cpu 51234
+
+ If cores share the same policy their frequencies may always match
+ on some devices.
+ To make report concise we should eliminate redundancy in the output.
+ Function removes cpuN data if it duplicates data from other cores.
+ """
+
+ cpustats = {}
+ read_data = ""
+ with open(self.cpustats_log_file) as f:
+ read_data = f.readlines()
+
+ if not read_data:
+ self._logger.LogOutput("WARNING: Cpustats output file is empty.")
+ return {}
+
+ cpufreq_regex = re.compile(r"^[/\S]+/(cpu\d+)/[/\S]+\s+(\d+)$")
+ cputemp_regex = re.compile(r"^([^/\s]+)\s+(\d+)$")
+
+ for st in read_data:
+ match = cpufreq_regex.match(st)
+ if match:
+ cpu = match.group(1)
+ # CPU frequency comes in kHz.
+ freq_khz = int(match.group(2))
+ freq_mhz = freq_khz / 1000
+ # cpufreq represents a dictionary with CPU frequency-related
+ # data from cpustats.log.
+ cpufreq = cpustats.setdefault("cpufreq", {})
+ cpu_n_freq = cpufreq.setdefault(cpu, [])
+ cpu_n_freq.append(freq_mhz)
+ else:
+ match = cputemp_regex.match(st)
+ if match:
+ therm_type = match.group(1)
+ # The value is int, uCelsius unit.
+ temp_uc = float(match.group(2))
+ # Round to XX.X float.
+ temp_c = round(temp_uc / 1000, 1)
+ # cputemp represents a dictionary with temperature measurements
+ # from cpustats.log.
+ cputemp = cpustats.setdefault("cputemp", {})
+ therm_type = cputemp.setdefault(therm_type, [])
+ therm_type.append(temp_c)
+
+ # Remove duplicate statistics from cpustats.
+ pruned_stats = {}
+ for cpukey, cpuparam in cpustats.items():
+ # Copy 'cpufreq' and 'cputemp'.
+ pruned_params = pruned_stats.setdefault(cpukey, {})
+ for paramkey, paramvalue in sorted(cpuparam.items()):
+ # paramvalue is list of all measured data.
+ if paramvalue not in pruned_params.values():
+ pruned_params[paramkey] = paramvalue
+
+ return pruned_stats
+
+ def ProcessHistogramsResults(self):
+ # Open and parse the json results file generated by telemetry/test_that.
+ if not self.results_file:
+ raise IOError("No results file found.")
+ filename = self.results_file[0]
+ if not filename.endswith(".json"):
+ raise IOError(
+ "Attempt to call json on non-json file: %s" % filename
+ )
+ if not os.path.exists(filename):
+ raise IOError("%s does not exist" % filename)
+
+ keyvals = {}
+ with open(filename) as f:
+ histograms = json.load(f)
+ value_map = {}
+ # Gets generic set values.
+ for obj in histograms:
+ if "type" in obj and obj["type"] == "GenericSet":
+ value_map[obj["guid"]] = obj["values"]
+
+ for obj in histograms:
+ if "name" not in obj or "sampleValues" not in obj:
+ continue
+ metric_name = obj["name"]
+ vals = obj["sampleValues"]
+ if isinstance(vals, list):
+ # Remove None elements from the list
+ vals = [val for val in vals if val is not None]
+ if vals:
+ result = float(sum(vals)) / len(vals)
+ else:
+ result = 0
+ else:
+ result = vals
+ unit = obj["unit"]
+ diagnostics = obj["diagnostics"]
+ # for summaries of benchmarks
+ key = metric_name
+ if key not in keyvals:
+ keyvals[key] = [[result], unit]
+ else:
+ keyvals[key][0].append(result)
+ # TODO: do we need summaries of stories?
+ # for summaries of story tags
+ if "storyTags" in diagnostics:
+ guid = diagnostics["storyTags"]
+ if guid not in value_map:
+ raise RuntimeError(
+ "Unrecognized storyTags in %s " % (obj)
+ )
+ for story_tag in value_map[guid]:
+ key = metric_name + "__" + story_tag
+ if key not in keyvals:
+ keyvals[key] = [[result], unit]
+ else:
+ keyvals[key][0].append(result)
+ # calculate summary
+ for key in keyvals:
+ vals = keyvals[key][0]
+ unit = keyvals[key][1]
result = float(sum(vals)) / len(vals)
- else:
- result = 0
- else:
- result = vals
- unit = obj['unit']
- diagnostics = obj['diagnostics']
- # for summaries of benchmarks
- key = metric_name
- if key not in keyvals:
- keyvals[key] = [[result], unit]
+ keyvals[key] = [result, unit]
+ return keyvals
+
+ def ReadPidFromPerfData(self):
+ """Read PIDs from perf.data files.
+
+ Extract PID from perf.data if "perf record" was running per process,
+ i.e. with "-p <PID>" and no "-a".
+
+ Returns:
+ pids: list of PIDs.
+
+ Raises:
+ PerfDataReadError when perf.data header reading fails.
+ """
+ cmd = ["/usr/bin/perf", "report", "--header-only", "-i"]
+ pids = []
+
+ for perf_data_path in self.perf_data_files:
+ perf_data_path_in_chroot = misc.GetInsideChrootPath(
+ self.chromeos_root, perf_data_path
+ )
+ path_str = " ".join(cmd + [perf_data_path_in_chroot])
+ status, output, _ = self.ce.ChrootRunCommandWOutput(
+ self.chromeos_root, path_str
+ )
+ if status:
+ # Error of reading a perf.data profile is fatal.
+ raise PerfDataReadError(
+ f"Failed to read perf.data profile: {path_str}"
+ )
+
+ # Pattern to search a line with "perf record" command line:
+ # # cmdline : /usr/bin/perf record -e instructions -p 123"
+ cmdline_regex = re.compile(
+ r"^\#\scmdline\s:\s+(?P<cmd>.*perf\s+record\s+.*)$"
+ )
+ # Pattern to search PID in a command line.
+ pid_regex = re.compile(r"^.*\s-p\s(?P<pid>\d+)\s*.*$")
+ for line in output.splitlines():
+ cmd_match = cmdline_regex.match(line)
+ if cmd_match:
+ # Found a perf command line.
+ cmdline = cmd_match.group("cmd")
+ # '-a' is a system-wide mode argument.
+ if "-a" not in cmdline.split():
+ # It can be that perf was attached to PID and was still running in
+ # system-wide mode.
+ # We filter out this case here since it's not per-process.
+ pid_match = pid_regex.match(cmdline)
+ if pid_match:
+ pids.append(pid_match.group("pid"))
+ # Stop the search and move to the next perf.data file.
+ break
+ else:
+ # cmdline wasn't found in the header. It's a fatal error.
+ raise PerfDataReadError(
+ f"Perf command line is not found in {path_str}"
+ )
+ return pids
+
+ def VerifyPerfDataPID(self):
+ """Verify PIDs in per-process perf.data profiles.
+
+ Check that at list one top process is profiled if perf was running in
+ per-process mode.
+
+ Raises:
+ PidVerificationError if PID verification of per-process perf.data profiles
+ fail.
+ """
+ perf_data_pids = self.ReadPidFromPerfData()
+ if not perf_data_pids:
+ # In system-wide mode there are no PIDs.
+ self._logger.LogOutput("System-wide perf mode. Skip verification.")
+ return
+
+ # PIDs will be present only in per-process profiles.
+ # In this case we need to verify that profiles are collected on the
+ # hottest processes.
+ top_processes = [top_cmd["cmd"] for top_cmd in self.top_cmds]
+ # top_process structure: <cmd>-<pid>
+ top_pids = [top_process.split("-")[-1] for top_process in top_processes]
+ for top_pid in top_pids:
+ if top_pid in perf_data_pids:
+ self._logger.LogOutput(
+ "PID verification passed! "
+ f"Top process {top_pid} is profiled."
+ )
+ return
+ raise PidVerificationError(
+ f"top processes {top_processes} are missing in perf.data traces with"
+ f" PID: {perf_data_pids}."
+ )
+
+ def ProcessResults(self, use_cache=False):
+ # Note that this function doesn't know anything about whether there is a
+ # cache hit or miss. It should process results agnostic of the cache hit
+ # state.
+ if (
+ self.results_file
+ and self.suite == "telemetry_Crosperf"
+ and "histograms.json" in self.results_file[0]
+ ):
+ self.keyvals = self.ProcessHistogramsResults()
+ elif (
+ self.results_file
+ and self.suite != "telemetry_Crosperf"
+ and "results-chart.json" in self.results_file[0]
+ ):
+ self.keyvals = self.ProcessChartResults()
else:
- keyvals[key][0].append(result)
- # TODO: do we need summaries of stories?
- # for summaries of story tags
- if 'storyTags' in diagnostics:
- guid = diagnostics['storyTags']
- if guid not in value_map:
- raise RuntimeError('Unrecognized storyTags in %s ' % (obj))
- for story_tag in value_map[guid]:
- key = metric_name + '__' + story_tag
- if key not in keyvals:
- keyvals[key] = [[result], unit]
+ if not use_cache:
+ print(
+ "\n ** WARNING **: Had to use deprecated output-method to "
+ "collect results.\n"
+ )
+ self.keyvals = self.GetKeyvals()
+ self.keyvals["retval"] = self.retval
+ # If we are in CWP approximation mode, we want to collect DSO samples
+ # for each perf.data file
+ if self.cwp_dso and self.retval == 0:
+ self.keyvals["samples"] = self.GetSamples()
+ # If the samples count collected from perf file is 0, we will treat
+ # it as a failed run.
+ if self.keyvals["samples"][0] == 0:
+ del self.keyvals["samples"]
+ self.keyvals["retval"] = 1
+ # Generate report from all perf.data files.
+ # Now parse all perf report files and include them in keyvals.
+ self.GatherPerfResults()
+
+ cpustats = {}
+ # Turbostat output has higher priority of processing.
+ if self.turbostat_log_file:
+ cpustats = self.ProcessTurbostatResults()
+ # Process cpustats output only if turbostat has no data.
+ if not cpustats and self.cpustats_log_file:
+ cpustats = self.ProcessCpustatsResults()
+ if self.top_log_file:
+ self.top_cmds = self.ProcessTopResults()
+ # Verify that PID in non system-wide perf.data and top_cmds are matching.
+ if self.perf_data_files and self.top_cmds:
+ self.VerifyPerfDataPID()
+ if self.wait_time_log_file:
+ with open(self.wait_time_log_file) as f:
+ wait_time = f.readline().strip()
+ try:
+ wait_time = float(wait_time)
+ except ValueError:
+ raise ValueError("Wait time in log file is not a number.")
+ # This is for accumulating wait time for telemtry_Crosperf runs only,
+ # for test_that runs, please refer to suite_runner.
+ self.machine.AddCooldownWaitTime(wait_time)
+
+ for param_key, param in cpustats.items():
+ for param_type, param_values in param.items():
+ val_avg = sum(param_values) / len(param_values)
+ val_min = min(param_values)
+ val_max = max(param_values)
+ # Average data is always included.
+ self.keyvals["_".join([param_key, param_type, "avg"])] = val_avg
+ # Insert min/max results only if they deviate
+ # from average.
+ if val_min != val_avg:
+ self.keyvals[
+ "_".join([param_key, param_type, "min"])
+ ] = val_min
+ if val_max != val_avg:
+ self.keyvals[
+ "_".join([param_key, param_type, "max"])
+ ] = val_max
+
+ def GetChromeVersionFromCache(self, cache_dir):
+ # Read chrome_version from keys file, if present.
+ chrome_version = ""
+ keys_file = os.path.join(cache_dir, CACHE_KEYS_FILE)
+ if os.path.exists(keys_file):
+ with open(keys_file, "r") as f:
+ lines = f.readlines()
+ for l in lines:
+ if l.startswith("Google Chrome "):
+ chrome_version = l
+ if chrome_version.endswith("\n"):
+ chrome_version = chrome_version[:-1]
+ break
+ return chrome_version
+
+ def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
+ self.test_name = test
+ self.suite = suite
+ self.cwp_dso = cwp_dso
+ # Read in everything from the cache directory.
+ with open(os.path.join(cache_dir, RESULTS_FILE), "rb") as f:
+ self.out = pickle.load(f)
+ self.err = pickle.load(f)
+ self.retval = pickle.load(f)
+
+ # Untar the tarball to a temporary directory
+ self.temp_dir = tempfile.mkdtemp(
+ dir=os.path.join(self.chromeos_root, "chroot", "tmp")
+ )
+
+ command = "cd %s && tar xf %s" % (
+ self.temp_dir,
+ os.path.join(cache_dir, AUTOTEST_TARBALL),
+ )
+ ret = self.ce.RunCommand(command, print_to_console=False)
+ if ret:
+ raise RuntimeError("Could not untar cached tarball")
+ self.results_dir = self.temp_dir
+ self.results_file = self.GetDataMeasurementsFiles()
+ self.perf_data_files = self.GetPerfDataFiles()
+ self.perf_report_files = self.GetPerfReportFiles()
+ self.chrome_version = self.GetChromeVersionFromCache(cache_dir)
+ self.ProcessResults(use_cache=True)
+
+ def CleanUp(self, rm_chroot_tmp):
+ if rm_chroot_tmp and self.results_dir:
+ dirname, basename = misc.GetRoot(self.results_dir)
+ if basename.find("test_that_results_") != -1:
+ command = "rm -rf %s" % self.results_dir
else:
- keyvals[key][0].append(result)
- # calculate summary
- for key in keyvals:
- vals = keyvals[key][0]
- unit = keyvals[key][1]
- result = float(sum(vals)) / len(vals)
- keyvals[key] = [result, unit]
- return keyvals
-
- def ReadPidFromPerfData(self):
- """Read PIDs from perf.data files.
-
- Extract PID from perf.data if "perf record" was running per process,
- i.e. with "-p <PID>" and no "-a".
-
- Returns:
- pids: list of PIDs.
-
- Raises:
- PerfDataReadError when perf.data header reading fails.
- """
- cmd = ['/usr/bin/perf', 'report', '--header-only', '-i']
- pids = []
-
- for perf_data_path in self.perf_data_files:
- perf_data_path_in_chroot = misc.GetInsideChrootPath(
- self.chromeos_root, perf_data_path)
- path_str = ' '.join(cmd + [perf_data_path_in_chroot])
- status, output, _ = self.ce.ChrootRunCommandWOutput(
- self.chromeos_root, path_str)
- if status:
- # Error of reading a perf.data profile is fatal.
- raise PerfDataReadError(
- f'Failed to read perf.data profile: {path_str}')
-
- # Pattern to search a line with "perf record" command line:
- # # cmdline : /usr/bin/perf record -e instructions -p 123"
- cmdline_regex = re.compile(
- r'^\#\scmdline\s:\s+(?P<cmd>.*perf\s+record\s+.*)$')
- # Pattern to search PID in a command line.
- pid_regex = re.compile(r'^.*\s-p\s(?P<pid>\d+)\s*.*$')
- for line in output.splitlines():
- cmd_match = cmdline_regex.match(line)
- if cmd_match:
- # Found a perf command line.
- cmdline = cmd_match.group('cmd')
- # '-a' is a system-wide mode argument.
- if '-a' not in cmdline.split():
- # It can be that perf was attached to PID and was still running in
- # system-wide mode.
- # We filter out this case here since it's not per-process.
- pid_match = pid_regex.match(cmdline)
- if pid_match:
- pids.append(pid_match.group('pid'))
- # Stop the search and move to the next perf.data file.
- break
- else:
- # cmdline wasn't found in the header. It's a fatal error.
- raise PerfDataReadError(
- f'Perf command line is not found in {path_str}')
- return pids
-
- def VerifyPerfDataPID(self):
- """Verify PIDs in per-process perf.data profiles.
-
- Check that at list one top process is profiled if perf was running in
- per-process mode.
-
- Raises:
- PidVerificationError if PID verification of per-process perf.data profiles
- fail.
- """
- perf_data_pids = self.ReadPidFromPerfData()
- if not perf_data_pids:
- # In system-wide mode there are no PIDs.
- self._logger.LogOutput('System-wide perf mode. Skip verification.')
- return
-
- # PIDs will be present only in per-process profiles.
- # In this case we need to verify that profiles are collected on the
- # hottest processes.
- top_processes = [top_cmd['cmd'] for top_cmd in self.top_cmds]
- # top_process structure: <cmd>-<pid>
- top_pids = [top_process.split('-')[-1] for top_process in top_processes]
- for top_pid in top_pids:
- if top_pid in perf_data_pids:
- self._logger.LogOutput('PID verification passed! '
- f'Top process {top_pid} is profiled.')
- return
- raise PidVerificationError(
- f'top processes {top_processes} are missing in perf.data traces with'
- f' PID: {perf_data_pids}.')
-
- def ProcessResults(self, use_cache=False):
- # Note that this function doesn't know anything about whether there is a
- # cache hit or miss. It should process results agnostic of the cache hit
- # state.
- if (self.results_file and self.suite == 'telemetry_Crosperf'
- and 'histograms.json' in self.results_file[0]):
- self.keyvals = self.ProcessHistogramsResults()
- elif (self.results_file and self.suite != 'telemetry_Crosperf'
- and 'results-chart.json' in self.results_file[0]):
- self.keyvals = self.ProcessChartResults()
- else:
- if not use_cache:
- print('\n ** WARNING **: Had to use deprecated output-method to '
- 'collect results.\n')
- self.keyvals = self.GetKeyvals()
- self.keyvals['retval'] = self.retval
- # If we are in CWP approximation mode, we want to collect DSO samples
- # for each perf.data file
- if self.cwp_dso and self.retval == 0:
- self.keyvals['samples'] = self.GetSamples()
- # If the samples count collected from perf file is 0, we will treat
- # it as a failed run.
- if self.keyvals['samples'][0] == 0:
- del self.keyvals['samples']
- self.keyvals['retval'] = 1
- # Generate report from all perf.data files.
- # Now parse all perf report files and include them in keyvals.
- self.GatherPerfResults()
-
- cpustats = {}
- # Turbostat output has higher priority of processing.
- if self.turbostat_log_file:
- cpustats = self.ProcessTurbostatResults()
- # Process cpustats output only if turbostat has no data.
- if not cpustats and self.cpustats_log_file:
- cpustats = self.ProcessCpustatsResults()
- if self.top_log_file:
- self.top_cmds = self.ProcessTopResults()
- # Verify that PID in non system-wide perf.data and top_cmds are matching.
- if self.perf_data_files and self.top_cmds:
- self.VerifyPerfDataPID()
- if self.wait_time_log_file:
- with open(self.wait_time_log_file) as f:
- wait_time = f.readline().strip()
+ command = "rm -rf %s" % dirname
+ self.ce.RunCommand(command)
+ if self.temp_dir:
+ command = "rm -rf %s" % self.temp_dir
+ self.ce.RunCommand(command)
+
+ def CreateTarball(self, results_dir, tarball):
+ if not results_dir.strip():
+ raise ValueError(
+ "Refusing to `tar` an empty results_dir: %r" % results_dir
+ )
+
+ ret = self.ce.RunCommand(
+ "cd %s && "
+ "tar "
+ "--exclude=var/spool "
+ "--exclude=var/log "
+ "-cjf %s ." % (results_dir, tarball)
+ )
+ if ret:
+ raise RuntimeError("Couldn't compress test output directory.")
+
+ def StoreToCacheDir(self, cache_dir, machine_manager, key_list):
+ # Create the dir if it doesn't exist.
+ temp_dir = tempfile.mkdtemp()
+
+ # Store to the temp directory.
+ with open(os.path.join(temp_dir, RESULTS_FILE), "wb") as f:
+ pickle.dump(self.out, f)
+ pickle.dump(self.err, f)
+ pickle.dump(self.retval, f)
+
+ if not test_flag.GetTestMode():
+ with open(os.path.join(temp_dir, CACHE_KEYS_FILE), "w") as f:
+ f.write("%s\n" % self.label.name)
+ f.write("%s\n" % self.label.chrome_version)
+ f.write("%s\n" % self.machine.checksum_string)
+ for k in key_list:
+ f.write(k)
+ f.write("\n")
+
+ if self.results_dir:
+ tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
+ self.CreateTarball(self.results_dir, tarball)
+
+ # Store machine info.
+ # TODO(asharif): Make machine_manager a singleton, and don't pass it into
+ # this function.
+ with open(os.path.join(temp_dir, MACHINE_FILE), "w") as f:
+ f.write(machine_manager.machine_checksum_string[self.label.name])
+
+ if os.path.exists(cache_dir):
+ command = f"rm -rf {cache_dir}"
+ self.ce.RunCommand(command)
+
+ parent_dir = os.path.dirname(cache_dir)
+ command = f"mkdir -p {parent_dir} && "
+ command += f"chmod g+x {temp_dir} && "
+ command += f"mv {temp_dir} {cache_dir}"
+ ret = self.ce.RunCommand(command)
+ if ret:
+ command = f"rm -rf {temp_dir}"
+ self.ce.RunCommand(command)
+ raise RuntimeError(
+ "Could not move dir %s to dir %s" % (temp_dir, cache_dir)
+ )
+
+ @classmethod
+ def CreateFromRun(
+ cls,
+ logger,
+ log_level,
+ label,
+ machine,
+ out,
+ err,
+ retval,
+ test,
+ suite="telemetry_Crosperf",
+ cwp_dso="",
+ ):
+ if suite == "telemetry":
+ result = TelemetryResult(logger, label, log_level, machine)
+ else:
+ result = cls(logger, label, log_level, machine)
+ result.PopulateFromRun(out, err, retval, test, suite, cwp_dso)
+ return result
+
+ @classmethod
+ def CreateFromCacheHit(
+ cls,
+ logger,
+ log_level,
+ label,
+ machine,
+ cache_dir,
+ test,
+ suite="telemetry_Crosperf",
+ cwp_dso="",
+ ):
+ if suite == "telemetry":
+ result = TelemetryResult(logger, label, log_level, machine)
+ else:
+ result = cls(logger, label, log_level, machine)
try:
- wait_time = float(wait_time)
- except ValueError:
- raise ValueError('Wait time in log file is not a number.')
- # This is for accumulating wait time for telemtry_Crosperf runs only,
- # for test_that runs, please refer to suite_runner.
- self.machine.AddCooldownWaitTime(wait_time)
-
- for param_key, param in cpustats.items():
- for param_type, param_values in param.items():
- val_avg = sum(param_values) / len(param_values)
- val_min = min(param_values)
- val_max = max(param_values)
- # Average data is always included.
- self.keyvals['_'.join([param_key, param_type, 'avg'])] = val_avg
- # Insert min/max results only if they deviate
- # from average.
- if val_min != val_avg:
- self.keyvals['_'.join([param_key, param_type, 'min'])] = val_min
- if val_max != val_avg:
- self.keyvals['_'.join([param_key, param_type, 'max'])] = val_max
-
- def GetChromeVersionFromCache(self, cache_dir):
- # Read chrome_version from keys file, if present.
- chrome_version = ''
- keys_file = os.path.join(cache_dir, CACHE_KEYS_FILE)
- if os.path.exists(keys_file):
- with open(keys_file, 'r') as f:
- lines = f.readlines()
- for l in lines:
- if l.startswith('Google Chrome '):
- chrome_version = l
- if chrome_version.endswith('\n'):
- chrome_version = chrome_version[:-1]
- break
- return chrome_version
-
- def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
- self.test_name = test
- self.suite = suite
- self.cwp_dso = cwp_dso
- # Read in everything from the cache directory.
- with open(os.path.join(cache_dir, RESULTS_FILE), 'rb') as f:
- self.out = pickle.load(f)
- self.err = pickle.load(f)
- self.retval = pickle.load(f)
-
- # Untar the tarball to a temporary directory
- self.temp_dir = tempfile.mkdtemp(
- dir=os.path.join(self.chromeos_root, 'chroot', 'tmp'))
-
- command = ('cd %s && tar xf %s' %
- (self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
- ret = self.ce.RunCommand(command, print_to_console=False)
- if ret:
- raise RuntimeError('Could not untar cached tarball')
- self.results_dir = self.temp_dir
- self.results_file = self.GetDataMeasurementsFiles()
- self.perf_data_files = self.GetPerfDataFiles()
- self.perf_report_files = self.GetPerfReportFiles()
- self.chrome_version = self.GetChromeVersionFromCache(cache_dir)
- self.ProcessResults(use_cache=True)
-
- def CleanUp(self, rm_chroot_tmp):
- if rm_chroot_tmp and self.results_dir:
- dirname, basename = misc.GetRoot(self.results_dir)
- if basename.find('test_that_results_') != -1:
- command = 'rm -rf %s' % self.results_dir
- else:
- command = 'rm -rf %s' % dirname
- self.ce.RunCommand(command)
- if self.temp_dir:
- command = 'rm -rf %s' % self.temp_dir
- self.ce.RunCommand(command)
-
- def CreateTarball(self, results_dir, tarball):
- if not results_dir.strip():
- raise ValueError('Refusing to `tar` an empty results_dir: %r' %
- results_dir)
-
- ret = self.ce.RunCommand('cd %s && '
- 'tar '
- '--exclude=var/spool '
- '--exclude=var/log '
- '-cjf %s .' % (results_dir, tarball))
- if ret:
- raise RuntimeError("Couldn't compress test output directory.")
-
- def StoreToCacheDir(self, cache_dir, machine_manager, key_list):
- # Create the dir if it doesn't exist.
- temp_dir = tempfile.mkdtemp()
-
- # Store to the temp directory.
- with open(os.path.join(temp_dir, RESULTS_FILE), 'wb') as f:
- pickle.dump(self.out, f)
- pickle.dump(self.err, f)
- pickle.dump(self.retval, f)
-
- if not test_flag.GetTestMode():
- with open(os.path.join(temp_dir, CACHE_KEYS_FILE), 'w') as f:
- f.write('%s\n' % self.label.name)
- f.write('%s\n' % self.label.chrome_version)
- f.write('%s\n' % self.machine.checksum_string)
- for k in key_list:
- f.write(k)
- f.write('\n')
-
- if self.results_dir:
- tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
- self.CreateTarball(self.results_dir, tarball)
-
- # Store machine info.
- # TODO(asharif): Make machine_manager a singleton, and don't pass it into
- # this function.
- with open(os.path.join(temp_dir, MACHINE_FILE), 'w') as f:
- f.write(machine_manager.machine_checksum_string[self.label.name])
-
- if os.path.exists(cache_dir):
- command = f'rm -rf {cache_dir}'
- self.ce.RunCommand(command)
-
- parent_dir = os.path.dirname(cache_dir)
- command = f'mkdir -p {parent_dir} && '
- command += f'chmod g+x {temp_dir} && '
- command += f'mv {temp_dir} {cache_dir}'
- ret = self.ce.RunCommand(command)
- if ret:
- command = f'rm -rf {temp_dir}'
- self.ce.RunCommand(command)
- raise RuntimeError('Could not move dir %s to dir %s' %
- (temp_dir, cache_dir))
-
- @classmethod
- def CreateFromRun(cls,
- logger,
- log_level,
- label,
- machine,
- out,
- err,
- retval,
- test,
- suite='telemetry_Crosperf',
- cwp_dso=''):
- if suite == 'telemetry':
- result = TelemetryResult(logger, label, log_level, machine)
- else:
- result = cls(logger, label, log_level, machine)
- result.PopulateFromRun(out, err, retval, test, suite, cwp_dso)
- return result
-
- @classmethod
- def CreateFromCacheHit(cls,
- logger,
- log_level,
- label,
- machine,
- cache_dir,
- test,
- suite='telemetry_Crosperf',
- cwp_dso=''):
- if suite == 'telemetry':
- result = TelemetryResult(logger, label, log_level, machine)
- else:
- result = cls(logger, label, log_level, machine)
- try:
- result.PopulateFromCacheDir(cache_dir, test, suite, cwp_dso)
-
- except RuntimeError as e:
- logger.LogError('Exception while using cache: %s' % e)
- return None
- return result
+ result.PopulateFromCacheDir(cache_dir, test, suite, cwp_dso)
+
+ except RuntimeError as e:
+ logger.LogError("Exception while using cache: %s" % e)
+ return None
+ return result
class TelemetryResult(Result):
- """Class to hold the results of a single Telemetry run."""
-
- def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
- self.out = out
- self.err = err
- self.retval = retval
-
- self.ProcessResults()
-
- # pylint: disable=arguments-differ
- def ProcessResults(self):
- # The output is:
- # url,average_commit_time (ms),...
- # www.google.com,33.4,21.2,...
- # We need to convert to this format:
- # {"www.google.com:average_commit_time (ms)": "33.4",
- # "www.google.com:...": "21.2"}
- # Added note: Occasionally the output comes back
- # with "JSON.stringify(window.automation.GetResults())" on
- # the first line, and then the rest of the output as
- # described above.
-
- lines = self.out.splitlines()
- self.keyvals = {}
-
- if lines:
- if lines[0].startswith('JSON.stringify'):
- lines = lines[1:]
-
- if not lines:
- return
- labels = lines[0].split(',')
- for line in lines[1:]:
- fields = line.split(',')
- if len(fields) != len(labels):
- continue
- for i in range(1, len(labels)):
- key = '%s %s' % (fields[0], labels[i])
- value = fields[i]
- self.keyvals[key] = value
- self.keyvals['retval'] = self.retval
-
- def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
- self.test_name = test
- self.suite = suite
- self.cwp_dso = cwp_dso
- with open(os.path.join(cache_dir, RESULTS_FILE), 'rb') as f:
- self.out = pickle.load(f)
- self.err = pickle.load(f)
- self.retval = pickle.load(f)
-
- self.chrome_version = (super(TelemetryResult,
- self).GetChromeVersionFromCache(cache_dir))
- self.ProcessResults()
+ """Class to hold the results of a single Telemetry run."""
+
+ def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
+ self.out = out
+ self.err = err
+ self.retval = retval
+
+ self.ProcessResults()
+
+ # pylint: disable=arguments-differ
+ def ProcessResults(self):
+ # The output is:
+ # url,average_commit_time (ms),...
+ # www.google.com,33.4,21.2,...
+ # We need to convert to this format:
+ # {"www.google.com:average_commit_time (ms)": "33.4",
+ # "www.google.com:...": "21.2"}
+ # Added note: Occasionally the output comes back
+ # with "JSON.stringify(window.automation.GetResults())" on
+ # the first line, and then the rest of the output as
+ # described above.
+
+ lines = self.out.splitlines()
+ self.keyvals = {}
+
+ if lines:
+ if lines[0].startswith("JSON.stringify"):
+ lines = lines[1:]
+
+ if not lines:
+ return
+ labels = lines[0].split(",")
+ for line in lines[1:]:
+ fields = line.split(",")
+ if len(fields) != len(labels):
+ continue
+ for i in range(1, len(labels)):
+ key = "%s %s" % (fields[0], labels[i])
+ value = fields[i]
+ self.keyvals[key] = value
+ self.keyvals["retval"] = self.retval
+
+ def PopulateFromCacheDir(self, cache_dir, test, suite, cwp_dso):
+ self.test_name = test
+ self.suite = suite
+ self.cwp_dso = cwp_dso
+ with open(os.path.join(cache_dir, RESULTS_FILE), "rb") as f:
+ self.out = pickle.load(f)
+ self.err = pickle.load(f)
+ self.retval = pickle.load(f)
+
+ self.chrome_version = super(
+ TelemetryResult, self
+ ).GetChromeVersionFromCache(cache_dir)
+ self.ProcessResults()
class CacheConditions(object):
- """Various Cache condition values, for export."""
+ """Various Cache condition values, for export."""
- # Cache hit only if the result file exists.
- CACHE_FILE_EXISTS = 0
+ # Cache hit only if the result file exists.
+ CACHE_FILE_EXISTS = 0
- # Cache hit if the checksum of cpuinfo and totalmem of
- # the cached result and the new run match.
- MACHINES_MATCH = 1
+ # Cache hit if the checksum of cpuinfo and totalmem of
+ # the cached result and the new run match.
+ MACHINES_MATCH = 1
- # Cache hit if the image checksum of the cached result and the new run match.
- CHECKSUMS_MATCH = 2
+ # Cache hit if the image checksum of the cached result and the new run match.
+ CHECKSUMS_MATCH = 2
- # Cache hit only if the cached result was successful
- RUN_SUCCEEDED = 3
+ # Cache hit only if the cached result was successful
+ RUN_SUCCEEDED = 3
- # Never a cache hit.
- FALSE = 4
+ # Never a cache hit.
+ FALSE = 4
- # Cache hit if the image path matches the cached image path.
- IMAGE_PATH_MATCH = 5
+ # Cache hit if the image path matches the cached image path.
+ IMAGE_PATH_MATCH = 5
- # Cache hit if the uuid of hard disk mataches the cached one
+ # Cache hit if the uuid of hard disk mataches the cached one
- SAME_MACHINE_MATCH = 6
+ SAME_MACHINE_MATCH = 6
class ResultsCache(object):
- """Class to handle the cache for storing/retrieving test run results.
-
- This class manages the key of the cached runs without worrying about what
- is exactly stored (value). The value generation is handled by the Results
- class.
- """
- CACHE_VERSION = 6
-
- def __init__(self):
- # Proper initialization happens in the Init function below.
- self.chromeos_image = None
- self.chromeos_root = None
- self.test_name = None
- self.iteration = None
- self.test_args = None
- self.profiler_args = None
- self.board = None
- self.cache_conditions = None
- self.machine_manager = None
- self.machine = None
- self._logger = None
- self.ce = None
- self.label = None
- self.share_cache = None
- self.suite = None
- self.log_level = None
- self.show_all = None
- self.run_local = None
- self.cwp_dso = None
-
- def Init(self, chromeos_image, chromeos_root, test_name, iteration,
- test_args, profiler_args, machine_manager, machine, board,
- cache_conditions, logger_to_use, log_level, label, share_cache,
- suite, show_all_results, run_local, cwp_dso):
- self.chromeos_image = chromeos_image
- self.chromeos_root = chromeos_root
- self.test_name = test_name
- self.iteration = iteration
- self.test_args = test_args
- self.profiler_args = profiler_args
- self.board = board
- self.cache_conditions = cache_conditions
- self.machine_manager = machine_manager
- self.machine = machine
- self._logger = logger_to_use
- self.ce = command_executer.GetCommandExecuter(self._logger,
- log_level=log_level)
- self.label = label
- self.share_cache = share_cache
- self.suite = suite
- self.log_level = log_level
- self.show_all = show_all_results
- self.run_local = run_local
- self.cwp_dso = cwp_dso
-
- def GetCacheDirForRead(self):
- matching_dirs = []
- for glob_path in self.FormCacheDir(self.GetCacheKeyList(True)):
- matching_dirs += glob.glob(glob_path)
-
- if matching_dirs:
- # Cache file found.
- return matching_dirs[0]
- return None
-
- def GetCacheDirForWrite(self, get_keylist=False):
- cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0]
- if get_keylist:
- args_str = '%s_%s_%s' % (self.test_args, self.profiler_args,
- self.run_local)
- version, image = results_report.ParseChromeosImage(
- self.label.chromeos_image)
- keylist = [
- version, image, self.label.board, self.machine.name, self.test_name,
- str(self.iteration), args_str
- ]
- return cache_path, keylist
- return cache_path
-
- def FormCacheDir(self, list_of_strings):
- cache_key = ' '.join(list_of_strings)
- cache_dir = misc.GetFilenameFromString(cache_key)
- if self.label.cache_dir:
- cache_home = os.path.abspath(os.path.expanduser(self.label.cache_dir))
- cache_path = [os.path.join(cache_home, cache_dir)]
- else:
- cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
-
- if self.share_cache:
- for path in [x.strip() for x in self.share_cache.split(',')]:
- if os.path.exists(path):
- cache_path.append(os.path.join(path, cache_dir))
+ """Class to handle the cache for storing/retrieving test run results.
+
+ This class manages the key of the cached runs without worrying about what
+ is exactly stored (value). The value generation is handled by the Results
+ class.
+ """
+
+ CACHE_VERSION = 6
+
+ def __init__(self):
+ # Proper initialization happens in the Init function below.
+ self.chromeos_image = None
+ self.chromeos_root = None
+ self.test_name = None
+ self.iteration = None
+ self.test_args = None
+ self.profiler_args = None
+ self.board = None
+ self.cache_conditions = None
+ self.machine_manager = None
+ self.machine = None
+ self._logger = None
+ self.ce = None
+ self.label = None
+ self.share_cache = None
+ self.suite = None
+ self.log_level = None
+ self.show_all = None
+ self.run_local = None
+ self.cwp_dso = None
+
+ def Init(
+ self,
+ chromeos_image,
+ chromeos_root,
+ test_name,
+ iteration,
+ test_args,
+ profiler_args,
+ machine_manager,
+ machine,
+ board,
+ cache_conditions,
+ logger_to_use,
+ log_level,
+ label,
+ share_cache,
+ suite,
+ show_all_results,
+ run_local,
+ cwp_dso,
+ ):
+ self.chromeos_image = chromeos_image
+ self.chromeos_root = chromeos_root
+ self.test_name = test_name
+ self.iteration = iteration
+ self.test_args = test_args
+ self.profiler_args = profiler_args
+ self.board = board
+ self.cache_conditions = cache_conditions
+ self.machine_manager = machine_manager
+ self.machine = machine
+ self._logger = logger_to_use
+ self.ce = command_executer.GetCommandExecuter(
+ self._logger, log_level=log_level
+ )
+ self.label = label
+ self.share_cache = share_cache
+ self.suite = suite
+ self.log_level = log_level
+ self.show_all = show_all_results
+ self.run_local = run_local
+ self.cwp_dso = cwp_dso
+
+ def GetCacheDirForRead(self):
+ matching_dirs = []
+ for glob_path in self.FormCacheDir(self.GetCacheKeyList(True)):
+ matching_dirs += glob.glob(glob_path)
+
+ if matching_dirs:
+ # Cache file found.
+ return matching_dirs[0]
+ return None
+
+ def GetCacheDirForWrite(self, get_keylist=False):
+ cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0]
+ if get_keylist:
+ args_str = "%s_%s_%s" % (
+ self.test_args,
+ self.profiler_args,
+ self.run_local,
+ )
+ version, image = results_report.ParseChromeosImage(
+ self.label.chromeos_image
+ )
+ keylist = [
+ version,
+ image,
+ self.label.board,
+ self.machine.name,
+ self.test_name,
+ str(self.iteration),
+ args_str,
+ ]
+ return cache_path, keylist
+ return cache_path
+
+ def FormCacheDir(self, list_of_strings):
+ cache_key = " ".join(list_of_strings)
+ cache_dir = misc.GetFilenameFromString(cache_key)
+ if self.label.cache_dir:
+ cache_home = os.path.abspath(
+ os.path.expanduser(self.label.cache_dir)
+ )
+ cache_path = [os.path.join(cache_home, cache_dir)]
+ else:
+ cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
+
+ if self.share_cache:
+ for path in [x.strip() for x in self.share_cache.split(",")]:
+ if os.path.exists(path):
+ cache_path.append(os.path.join(path, cache_dir))
+ else:
+ self._logger.LogFatal(
+ "Unable to find shared cache: %s" % path
+ )
+
+ return cache_path
+
+ def GetCacheKeyList(self, read):
+ if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
+ machine_checksum = "*"
+ else:
+ machine_checksum = self.machine_manager.machine_checksum[
+ self.label.name
+ ]
+ if (
+ read
+ and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions
+ ):
+ checksum = "*"
+ elif self.label.image_type == "trybot":
+ checksum = hashlib.md5(
+ self.label.chromeos_image.encode("utf-8")
+ ).hexdigest()
+ elif self.label.image_type == "official":
+ checksum = "*"
else:
- self._logger.LogFatal('Unable to find shared cache: %s' % path)
-
- return cache_path
-
- def GetCacheKeyList(self, read):
- if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
- machine_checksum = '*'
- else:
- machine_checksum = self.machine_manager.machine_checksum[self.label.name]
- if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions:
- checksum = '*'
- elif self.label.image_type == 'trybot':
- checksum = hashlib.md5(
- self.label.chromeos_image.encode('utf-8')).hexdigest()
- elif self.label.image_type == 'official':
- checksum = '*'
- else:
- checksum = ImageChecksummer().Checksum(self.label, self.log_level)
-
- if read and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions:
- image_path_checksum = '*'
- else:
- image_path_checksum = hashlib.md5(
- self.chromeos_image.encode('utf-8')).hexdigest()
-
- machine_id_checksum = ''
- if read and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions:
- machine_id_checksum = '*'
- else:
- if self.machine and self.machine.name in self.label.remote:
- machine_id_checksum = self.machine.machine_id_checksum
- else:
- for machine in self.machine_manager.GetMachines(self.label):
- if machine.name == self.label.remote[0]:
- machine_id_checksum = machine.machine_id_checksum
- break
-
- temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
- self.run_local)
- test_args_checksum = hashlib.md5(
- temp_test_args.encode('utf-8')).hexdigest()
- return (image_path_checksum, self.test_name, str(self.iteration),
- test_args_checksum, checksum, machine_checksum,
- machine_id_checksum, str(self.CACHE_VERSION))
-
- def ReadResult(self):
- if CacheConditions.FALSE in self.cache_conditions:
- cache_dir = self.GetCacheDirForWrite()
- command = 'rm -rf %s' % (cache_dir, )
- self.ce.RunCommand(command)
- return None
- cache_dir = self.GetCacheDirForRead()
-
- if not cache_dir:
- return None
-
- if not os.path.isdir(cache_dir):
- return None
-
- if self.log_level == 'verbose':
- self._logger.LogOutput('Trying to read from cache dir: %s' % cache_dir)
- result = Result.CreateFromCacheHit(self._logger, self.log_level,
- self.label, self.machine, cache_dir,
- self.test_name, self.suite,
- self.cwp_dso)
- if not result:
- return None
-
- if (result.retval == 0
- or CacheConditions.RUN_SUCCEEDED not in self.cache_conditions):
- return result
-
- return None
-
- def StoreResult(self, result):
- cache_dir, keylist = self.GetCacheDirForWrite(get_keylist=True)
- result.StoreToCacheDir(cache_dir, self.machine_manager, keylist)
+ checksum = ImageChecksummer().Checksum(self.label, self.log_level)
+
+ if (
+ read
+ and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions
+ ):
+ image_path_checksum = "*"
+ else:
+ image_path_checksum = hashlib.md5(
+ self.chromeos_image.encode("utf-8")
+ ).hexdigest()
+
+ machine_id_checksum = ""
+ if (
+ read
+ and CacheConditions.SAME_MACHINE_MATCH not in self.cache_conditions
+ ):
+ machine_id_checksum = "*"
+ else:
+ if self.machine and self.machine.name in self.label.remote:
+ machine_id_checksum = self.machine.machine_id_checksum
+ else:
+ for machine in self.machine_manager.GetMachines(self.label):
+ if machine.name == self.label.remote[0]:
+ machine_id_checksum = machine.machine_id_checksum
+ break
+
+ temp_test_args = "%s %s %s" % (
+ self.test_args,
+ self.profiler_args,
+ self.run_local,
+ )
+ test_args_checksum = hashlib.md5(
+ temp_test_args.encode("utf-8")
+ ).hexdigest()
+ return (
+ image_path_checksum,
+ self.test_name,
+ str(self.iteration),
+ test_args_checksum,
+ checksum,
+ machine_checksum,
+ machine_id_checksum,
+ str(self.CACHE_VERSION),
+ )
+
+ def ReadResult(self):
+ if CacheConditions.FALSE in self.cache_conditions:
+ cache_dir = self.GetCacheDirForWrite()
+ command = "rm -rf %s" % (cache_dir,)
+ self.ce.RunCommand(command)
+ return None
+ cache_dir = self.GetCacheDirForRead()
+
+ if not cache_dir:
+ return None
+
+ if not os.path.isdir(cache_dir):
+ return None
+
+ if self.log_level == "verbose":
+ self._logger.LogOutput(
+ "Trying to read from cache dir: %s" % cache_dir
+ )
+ result = Result.CreateFromCacheHit(
+ self._logger,
+ self.log_level,
+ self.label,
+ self.machine,
+ cache_dir,
+ self.test_name,
+ self.suite,
+ self.cwp_dso,
+ )
+ if not result:
+ return None
+
+ if (
+ result.retval == 0
+ or CacheConditions.RUN_SUCCEEDED not in self.cache_conditions
+ ):
+ return result
+
+ return None
+
+ def StoreResult(self, result):
+ cache_dir, keylist = self.GetCacheDirForWrite(get_keylist=True)
+ result.StoreToCacheDir(cache_dir, self.machine_manager, keylist)
class MockResultsCache(ResultsCache):
- """Class for mock testing, corresponding to ResultsCache class."""
+ """Class for mock testing, corresponding to ResultsCache class."""
- # FIXME: pylint complains about this mock init method, we should probably
- # replace all Mock classes in Crosperf with simple Mock.mock().
- # pylint: disable=arguments-differ
- def Init(self, *args):
- pass
+ # FIXME: pylint complains about this mock init method, we should probably
+ # replace all Mock classes in Crosperf with simple Mock.mock().
+ # pylint: disable=arguments-differ
+ def Init(self, *args):
+ pass
- def ReadResult(self):
- return None
+ def ReadResult(self):
+ return None
- def StoreResult(self, result):
- pass
+ def StoreResult(self, result):
+ pass
class MockResult(Result):
- """Class for mock testing, corresponding to Result class."""
+ """Class for mock testing, corresponding to Result class."""
- def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
- self.out = out
- self.err = err
- self.retval = retval
+ def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso):
+ self.out = out
+ self.err = err
+ self.retval = retval
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index d6953eed..cad149e0 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module of result cache unittest."""
-from __future__ import print_function
import io
import os
@@ -17,20 +16,20 @@ import tempfile
import unittest
import unittest.mock as mock
+from cros_utils import command_executer
+from cros_utils import logger
+from cros_utils import misc
import image_checksummer
-import machine_manager
-import test_flag
-
from label import MockLabel
+import machine_manager
from results_cache import CacheConditions
from results_cache import PerfDataReadError
from results_cache import PidVerificationError
from results_cache import Result
from results_cache import ResultsCache
from results_cache import TelemetryResult
-from cros_utils import command_executer
-from cros_utils import logger
-from cros_utils import misc
+import test_flag
+
# The following hardcoded string has blocked words replaced, and thus
# is not representative of a true crosperf output.
@@ -133,35 +132,35 @@ INFO : Elapsed time: 0m18s
"""
keyvals = {
- '': 'PASS',
- 'b_stdio_putcgetc__0_': '0.100005711667',
- 'b_string_strstr___azbycxdwevfugthsirjqkplomn__': '0.0133123556667',
- 'b_malloc_thread_local__0_': '0.01138439',
- 'b_string_strlen__0_': '0.044893587',
- 'b_malloc_sparse__0_': '0.015053784',
- 'b_string_memset__0_': '0.00275405066667',
- 'platform_LibCBench': 'PASS',
- 'b_pthread_uselesslock__0_': '0.0294113346667',
- 'b_string_strchr__0_': '0.00456903',
- 'b_pthread_create_serial1__0_': '0.0291785246667',
- 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__': '0.118360778',
- 'b_string_strstr___aaaaaaaaaaaaaacccccccccccc__': '0.0135694476667',
- 'b_pthread_createjoin_serial1__0_': '0.031907936',
- 'b_malloc_thread_stress__0_': '0.0367894733333',
- 'b_regex_search____a_b_c__d_b__': '0.00165455066667',
- 'b_malloc_bubble__0_': '0.015066374',
- 'b_malloc_big2__0_': '0.002951359',
- 'b_stdio_putcgetc_unlocked__0_': '0.0371443833333',
- 'b_pthread_createjoin_serial2__0_': '0.043485347',
- 'b_regex_search___a_25_b__': '0.0496191923333',
- 'b_utf8_bigbuf__0_': '0.0473772253333',
- 'b_malloc_big1__0_': '0.00375231466667',
- 'b_regex_compile____a_b_c__d_b__': '0.00529833933333',
- 'b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__': '0.068957325',
- 'b_malloc_tiny2__0_': '0.000581407333333',
- 'b_utf8_onebyone__0_': '0.130938538333',
- 'b_malloc_tiny1__0_': '0.000768474333333',
- 'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333'
+ "": "PASS",
+ "b_stdio_putcgetc__0_": "0.100005711667",
+ "b_string_strstr___azbycxdwevfugthsirjqkplomn__": "0.0133123556667",
+ "b_malloc_thread_local__0_": "0.01138439",
+ "b_string_strlen__0_": "0.044893587",
+ "b_malloc_sparse__0_": "0.015053784",
+ "b_string_memset__0_": "0.00275405066667",
+ "platform_LibCBench": "PASS",
+ "b_pthread_uselesslock__0_": "0.0294113346667",
+ "b_string_strchr__0_": "0.00456903",
+ "b_pthread_create_serial1__0_": "0.0291785246667",
+ "b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac__": "0.118360778",
+ "b_string_strstr___aaaaaaaaaaaaaacccccccccccc__": "0.0135694476667",
+ "b_pthread_createjoin_serial1__0_": "0.031907936",
+ "b_malloc_thread_stress__0_": "0.0367894733333",
+ "b_regex_search____a_b_c__d_b__": "0.00165455066667",
+ "b_malloc_bubble__0_": "0.015066374",
+ "b_malloc_big2__0_": "0.002951359",
+ "b_stdio_putcgetc_unlocked__0_": "0.0371443833333",
+ "b_pthread_createjoin_serial2__0_": "0.043485347",
+ "b_regex_search___a_25_b__": "0.0496191923333",
+ "b_utf8_bigbuf__0_": "0.0473772253333",
+ "b_malloc_big1__0_": "0.00375231466667",
+ "b_regex_compile____a_b_c__d_b__": "0.00529833933333",
+ "b_string_strstr___aaaaaaaaaaaaaaaaaaaaaaaaac__": "0.068957325",
+ "b_malloc_tiny2__0_": "0.000581407333333",
+ "b_utf8_onebyone__0_": "0.130938538333",
+ "b_malloc_tiny1__0_": "0.000768474333333",
+ "b_string_strstr___abcdefghijklmnopqrstuvwxyz__": "0.0134553343333",
}
PERF_DATA_HEADER = """
@@ -192,8 +191,7 @@ PERF_DATA_HEADER = """
#
"""
-TURBOSTAT_LOG_OUTPUT = (
- """CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+TURBOSTAT_LOG_OUTPUT = """CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
- 329 12.13 2723 2393 10975 77
0 336 12.41 2715 2393 6328 77
2 323 11.86 2731 2393 4647 69
@@ -221,17 +219,13 @@ CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
- 843 29.83 2832 2393 28161 47
0 827 29.35 2826 2393 16093 47
2 858 30.31 2838 2393 12068 46
-""")
+"""
TURBOSTAT_DATA = {
- 'cpufreq': {
- 'all': [2723, 2884, 2927, 2937, 2932, 2933, 2832]
- },
- 'cputemp': {
- 'all': [77, 83, 84, 72, 75, 46, 47]
- },
+ "cpufreq": {"all": [2723, 2884, 2927, 2937, 2932, 2933, 2832]},
+ "cputemp": {"all": [77, 83, 84, 72, 75, 46, 47]},
}
-TOP_LOG = ("""
+TOP_LOG = """
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome
4204 chronos 12 -8 2492716 205728 179016 S 11.8 5.3 0:03.89 chrome
@@ -253,58 +247,58 @@ TOP_LOG = ("""
5713 chronos 20 0 5178652 103120 50372 S 17.8 2.6 0:01.13 chrome
7 root 20 0 0 0 0 S 1.0 0.0 0:00.73 rcu_preempt
855 root 20 0 0 0 0 S 1.0 0.0 0:00.01 kworker/4:2
-""")
+"""
TOP_DATA = [
{
- 'cmd': 'chrome-5745',
- 'cpu_use_avg': 115.35,
- 'count': 2,
- 'top5_cpu_use': [122.8, 107.9],
+ "cmd": "chrome-5745",
+ "cpu_use_avg": 115.35,
+ "count": 2,
+ "top5_cpu_use": [122.8, 107.9],
},
{
- 'cmd': 'chrome-5713',
- 'cpu_use_avg': 8.9,
- 'count': 1,
- 'top5_cpu_use': [17.8]
+ "cmd": "chrome-5713",
+ "cpu_use_avg": 8.9,
+ "count": 1,
+ "top5_cpu_use": [17.8],
},
{
- 'cmd': 'irq/cros-ec-912',
- 'cpu_use_avg': 1.0,
- 'count': 1,
- 'top5_cpu_use': [2.0],
+ "cmd": "irq/cros-ec-912",
+ "cpu_use_avg": 1.0,
+ "count": 1,
+ "top5_cpu_use": [2.0],
},
{
- 'cmd': 'chrome-5205',
- 'cpu_use_avg': 0.5,
- 'count': 1,
- 'top5_cpu_use': [1.0]
+ "cmd": "chrome-5205",
+ "cpu_use_avg": 0.5,
+ "count": 1,
+ "top5_cpu_use": [1.0],
},
{
- 'cmd': 'spi5-121',
- 'cpu_use_avg': 0.5,
- 'count': 1,
- 'top5_cpu_use': [1.0],
+ "cmd": "spi5-121",
+ "cpu_use_avg": 0.5,
+ "count": 1,
+ "top5_cpu_use": [1.0],
},
{
- 'cmd': 'sshd-4811',
- 'cpu_use_avg': 0.5,
- 'count': 1,
- 'top5_cpu_use': [1.0],
+ "cmd": "sshd-4811",
+ "cpu_use_avg": 0.5,
+ "count": 1,
+ "top5_cpu_use": [1.0],
},
{
- 'cmd': 'rcu_preempt-7',
- 'cpu_use_avg': 0.5,
- 'count': 1,
- 'top5_cpu_use': [1.0],
+ "cmd": "rcu_preempt-7",
+ "cpu_use_avg": 0.5,
+ "count": 1,
+ "top5_cpu_use": [1.0],
},
{
- 'cmd': 'kworker/4:2-855',
- 'cpu_use_avg': 0.5,
- 'count': 1,
- 'top5_cpu_use': [1.0],
+ "cmd": "kworker/4:2-855",
+ "cpu_use_avg": 0.5,
+ "count": 1,
+ "top5_cpu_use": [1.0],
},
]
-TOP_OUTPUT = (""" COMMAND AVG CPU% SEEN HIGHEST 5
+TOP_OUTPUT = """ COMMAND AVG CPU% SEEN HIGHEST 5
chrome 128.250000 6 [122.8, 107.9, 17.8, 5.0, 2.0]
irq/230-cros-ec 1.000000 1 [2.0]
sshd 0.500000 1 [1.0]
@@ -312,9 +306,9 @@ TOP_OUTPUT = (""" COMMAND AVG CPU% SEEN HIGHEST 5
spi5 0.500000 1 [1.0]
rcu_preempt 0.500000 1 [1.0]
kworker/4:2 0.500000 1 [1.0]
-""")
+"""
-CPUSTATS_UNIQ_OUTPUT = ("""
+CPUSTATS_UNIQ_OUTPUT = """
/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_cur_freq 2016000
@@ -327,20 +321,20 @@ big-cpu 51234
soc-thermal 45456
little-cpu 42555
big-cpu 61724
-""")
+"""
CPUSTATS_UNIQ_DATA = {
- 'cpufreq': {
- 'cpu0': [1512, 1500],
- 'cpu1': [1512, 1600],
- 'cpu3': [2016, 2012]
+ "cpufreq": {
+ "cpu0": [1512, 1500],
+ "cpu1": [1512, 1600],
+ "cpu3": [2016, 2012],
+ },
+ "cputemp": {
+ "soc-thermal": [44.4, 45.5],
+ "little-cpu": [41.2, 42.6],
+ "big-cpu": [51.2, 61.7],
},
- 'cputemp': {
- 'soc-thermal': [44.4, 45.5],
- 'little-cpu': [41.2, 42.6],
- 'big-cpu': [51.2, 61.7]
- }
}
-CPUSTATS_DUPL_OUTPUT = ("""
+CPUSTATS_DUPL_OUTPUT = """
/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1512000
/sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 1512000
@@ -353,17 +347,14 @@ CPUSTATS_DUPL_OUTPUT = ("""
/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq 1614000
/sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_cur_freq 1614000
/sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_cur_freq 1982000
-""")
+"""
CPUSTATS_DUPL_DATA = {
- 'cpufreq': {
- 'cpu0': [1512, 1500, 1614],
- 'cpu3': [2016, 2016, 1982]
- },
+ "cpufreq": {"cpu0": [1512, 1500, 1614], "cpu3": [2016, 2016, 1982]},
}
-TMP_DIR1 = '/tmp/tmpAbcXyz'
+TMP_DIR1 = "/tmp/tmpAbcXyz"
-HISTOGRAMSET = ("""
+HISTOGRAMSET = """
[
{
"values": [
@@ -435,1427 +426,1557 @@ HISTOGRAMSET = ("""
}
]
-""")
+"""
# pylint: enable=line-too-long
class MockResult(Result):
- """Mock result class."""
- def __init__(self, mylogger, label, logging_level, machine):
- super(MockResult, self).__init__(mylogger, label, logging_level, machine)
+ """Mock result class."""
+
+ def __init__(self, mylogger, label, logging_level, machine):
+ super(MockResult, self).__init__(
+ mylogger, label, logging_level, machine
+ )
- def FindFilesInResultsDir(self, find_args):
- return ''
+ def FindFilesInResultsDir(self, find_args):
+ return ""
- # pylint: disable=arguments-differ
- def GetKeyvals(self, temp=False):
- if temp:
- pass
- return keyvals
+ # pylint: disable=arguments-differ
+ def GetKeyvals(self, temp=False):
+ if temp:
+ pass
+ return keyvals
class ResultTest(unittest.TestCase):
- """Result test class."""
- def __init__(self, *args, **kwargs):
- super(ResultTest, self).__init__(*args, **kwargs)
- self.callFakeProcessResults = False
- self.fakeCacheReturnResult = None
- self.callGetResultsDir = False
- self.callProcessResults = False
- self.callGetPerfReportFiles = False
- self.kv_dict = None
- self.tmpdir = ''
- self.callGetNewKeyvals = False
- self.callGetResultsFile = False
- self.callGetPerfDataFiles = False
- self.callGetTurbostatFile = False
- self.callGetCpustatsFile = False
- self.callGetTopFile = False
- self.callGetCpuinfoFile = False
- self.callGetWaitTimeFile = False
- self.args = None
- self.callGatherPerfResults = False
- self.mock_logger = mock.Mock(spec=logger.Logger)
- self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- self.mock_label = MockLabel('mock_label', 'build', 'chromeos_image',
- 'autotest_dir', 'debug_dir', '/tmp', 'lumpy',
- 'remote', 'image_args', 'cache_dir', 'average',
- 'gcc', False, None)
-
- def testCreateFromRun(self):
- result = MockResult.CreateFromRun(logger.GetLogger(), 'average',
- self.mock_label, 'remote1', OUTPUT,
- error, 0, True)
- self.assertEqual(result.keyvals, keyvals)
- self.assertEqual(result.chroot_results_dir,
- '/tmp/test_that.PO1234567/platform_LibCBench')
- self.assertEqual(result.results_dir,
- '/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench')
- self.assertEqual(result.retval, 0)
-
- def setUp(self):
- self.result = Result(self.mock_logger, self.mock_label, 'average',
- self.mock_cmd_exec)
- self.result.chromeos_root = '/tmp/chromeos'
-
- @mock.patch.object(os.path, 'isdir')
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object(command_executer.CommandExecuter, 'CopyFiles')
- def test_copy_files_to(self, mock_copyfiles, mock_runcmd, mock_isdir):
-
- files = ['src_file_1', 'src_file_2', 'src_file_3']
- dest_dir = '/tmp/test'
- self.mock_cmd_exec.RunCommand = mock_runcmd
- self.mock_cmd_exec.CopyFiles = mock_copyfiles
-
- mock_copyfiles.return_value = 0
-
- # test 1. dest_dir exists; CopyFiles returns 0.
- mock_isdir.return_value = True
- self.result.CopyFilesTo(dest_dir, files)
- self.assertEqual(mock_runcmd.call_count, 0)
- self.assertEqual(mock_copyfiles.call_count, 3)
- first_args = mock_copyfiles.call_args_list[0][0]
- second_args = mock_copyfiles.call_args_list[1][0]
- third_args = mock_copyfiles.call_args_list[2][0]
- self.assertEqual(first_args, ('src_file_1', '/tmp/test/src_file_1.0'))
- self.assertEqual(second_args, ('src_file_2', '/tmp/test/src_file_2.1'))
- self.assertEqual(third_args, ('src_file_3', '/tmp/test/src_file_3.2'))
-
- mock_runcmd.reset_mock()
- mock_copyfiles.reset_mock()
- # test 2. dest_dir does not exist; CopyFiles returns 0.
- mock_isdir.return_value = False
- self.result.CopyFilesTo(dest_dir, files)
- self.assertEqual(mock_runcmd.call_count, 3)
- self.assertEqual(mock_copyfiles.call_count, 3)
- self.assertEqual(mock_runcmd.call_args_list[0],
- mock_runcmd.call_args_list[1])
- self.assertEqual(mock_runcmd.call_args_list[0],
- mock_runcmd.call_args_list[2])
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('mkdir -p /tmp/test', ))
-
- # test 3. CopyFiles returns 1 (fails).
- mock_copyfiles.return_value = 1
- self.assertRaises(Exception, self.result.CopyFilesTo, dest_dir, files)
-
- @mock.patch.object(Result, 'CopyFilesTo')
- def test_copy_results_to(self, mockCopyFilesTo):
- results_file = [
- '/tmp/result.json.0', '/tmp/result.json.1', '/tmp/result.json.2'
- ]
- perf_data_files = [
- '/tmp/perf.data.0', '/tmp/perf.data.1', '/tmp/perf.data.2'
- ]
- perf_report_files = [
- '/tmp/perf.report.0', '/tmp/perf.report.1', '/tmp/perf.report.2'
- ]
-
- self.result.results_file = results_file
- self.result.perf_data_files = perf_data_files
- self.result.perf_report_files = perf_report_files
-
- self.result.CopyFilesTo = mockCopyFilesTo
- self.result.CopyResultsTo('/tmp/results/')
- self.assertEqual(mockCopyFilesTo.call_count, 3)
- self.assertEqual(len(mockCopyFilesTo.call_args_list), 3)
- self.assertEqual(mockCopyFilesTo.call_args_list[0][0],
- ('/tmp/results/', results_file))
- self.assertEqual(mockCopyFilesTo.call_args_list[1][0],
- ('/tmp/results/', perf_data_files))
- self.assertEqual(mockCopyFilesTo.call_args_list[2][0],
- ('/tmp/results/', perf_report_files))
-
- def test_get_new_keyvals(self):
- kv_dict = {}
-
- def FakeGetDataMeasurementsFiles():
- filename = os.path.join(os.getcwd(), 'unittest_keyval_file.txt')
- return [filename]
-
- self.result.GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles
- kv_dict2, udict = self.result.GetNewKeyvals(kv_dict)
- self.assertEqual(
- kv_dict2, {
- u'Box2D__Box2D': 4775,
- u'Mandreel__Mandreel': 6620,
- u'Gameboy__Gameboy': 9901,
- u'Crypto__Crypto': 8737,
- u'telemetry_page_measurement_results__num_errored': 0,
- u'telemetry_page_measurement_results__num_failed': 0,
- u'PdfJS__PdfJS': 6455,
- u'Total__Score': 7918,
- u'EarleyBoyer__EarleyBoyer': 14340,
- u'MandreelLatency__MandreelLatency': 5188,
- u'CodeLoad__CodeLoad': 6271,
- u'DeltaBlue__DeltaBlue': 14401,
- u'Typescript__Typescript': 9815,
- u'SplayLatency__SplayLatency': 7653,
- u'zlib__zlib': 16094,
- u'Richards__Richards': 10358,
- u'RegExp__RegExp': 1765,
- u'NavierStokes__NavierStokes': 9815,
- u'Splay__Splay': 4425,
- u'RayTrace__RayTrace': 16600
- })
- self.assertEqual(
- udict, {
- u'Box2D__Box2D': u'score',
- u'Mandreel__Mandreel': u'score',
- u'Gameboy__Gameboy': u'score',
- u'Crypto__Crypto': u'score',
- u'telemetry_page_measurement_results__num_errored': u'count',
- u'telemetry_page_measurement_results__num_failed': u'count',
- u'PdfJS__PdfJS': u'score',
- u'Total__Score': u'score',
- u'EarleyBoyer__EarleyBoyer': u'score',
- u'MandreelLatency__MandreelLatency': u'score',
- u'CodeLoad__CodeLoad': u'score',
- u'DeltaBlue__DeltaBlue': u'score',
- u'Typescript__Typescript': u'score',
- u'SplayLatency__SplayLatency': u'score',
- u'zlib__zlib': u'score',
- u'Richards__Richards': u'score',
- u'RegExp__RegExp': u'score',
- u'NavierStokes__NavierStokes': u'score',
- u'Splay__Splay': u'score',
- u'RayTrace__RayTrace': u'score'
- })
-
- def test_append_telemetry_units(self):
- kv_dict = {
- u'Box2D__Box2D': 4775,
- u'Mandreel__Mandreel': 6620,
- u'Gameboy__Gameboy': 9901,
- u'Crypto__Crypto': 8737,
- u'PdfJS__PdfJS': 6455,
- u'Total__Score': 7918,
- u'EarleyBoyer__EarleyBoyer': 14340,
- u'MandreelLatency__MandreelLatency': 5188,
- u'CodeLoad__CodeLoad': 6271,
- u'DeltaBlue__DeltaBlue': 14401,
- u'Typescript__Typescript': 9815,
- u'SplayLatency__SplayLatency': 7653,
- u'zlib__zlib': 16094,
- u'Richards__Richards': 10358,
- u'RegExp__RegExp': 1765,
- u'NavierStokes__NavierStokes': 9815,
- u'Splay__Splay': 4425,
- u'RayTrace__RayTrace': 16600
- }
- units_dict = {
- u'Box2D__Box2D': u'score',
- u'Mandreel__Mandreel': u'score',
- u'Gameboy__Gameboy': u'score',
- u'Crypto__Crypto': u'score',
- u'PdfJS__PdfJS': u'score',
- u'Total__Score': u'score',
- u'EarleyBoyer__EarleyBoyer': u'score',
- u'MandreelLatency__MandreelLatency': u'score',
- u'CodeLoad__CodeLoad': u'score',
- u'DeltaBlue__DeltaBlue': u'score',
- u'Typescript__Typescript': u'score',
- u'SplayLatency__SplayLatency': u'score',
- u'zlib__zlib': u'score',
- u'Richards__Richards': u'score',
- u'RegExp__RegExp': u'score',
- u'NavierStokes__NavierStokes': u'score',
- u'Splay__Splay': u'score',
- u'RayTrace__RayTrace': u'score'
- }
+ """Result test class."""
+
+ def __init__(self, *args, **kwargs):
+ super(ResultTest, self).__init__(*args, **kwargs)
+ self.callFakeProcessResults = False
+ self.fakeCacheReturnResult = None
+ self.callGetResultsDir = False
+ self.callProcessResults = False
+ self.callGetPerfReportFiles = False
+ self.kv_dict = None
+ self.tmpdir = ""
+ self.callGetNewKeyvals = False
+ self.callGetResultsFile = False
+ self.callGetPerfDataFiles = False
+ self.callGetTurbostatFile = False
+ self.callGetCpustatsFile = False
+ self.callGetTopFile = False
+ self.callGetCpuinfoFile = False
+ self.callGetWaitTimeFile = False
+ self.args = None
+ self.callGatherPerfResults = False
+ self.mock_logger = mock.Mock(spec=logger.Logger)
+ self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ self.mock_label = MockLabel(
+ "mock_label",
+ "build",
+ "chromeos_image",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp",
+ "lumpy",
+ "remote",
+ "image_args",
+ "cache_dir",
+ "average",
+ "gcc",
+ False,
+ None,
+ )
+
+ def testCreateFromRun(self):
+ result = MockResult.CreateFromRun(
+ logger.GetLogger(),
+ "average",
+ self.mock_label,
+ "remote1",
+ OUTPUT,
+ error,
+ 0,
+ True,
+ )
+ self.assertEqual(result.keyvals, keyvals)
+ self.assertEqual(
+ result.chroot_results_dir,
+ "/tmp/test_that.PO1234567/platform_LibCBench",
+ )
+ self.assertEqual(
+ result.results_dir,
+ "/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench",
+ )
+ self.assertEqual(result.retval, 0)
+
+ def setUp(self):
+ self.result = Result(
+ self.mock_logger, self.mock_label, "average", self.mock_cmd_exec
+ )
+ self.result.chromeos_root = "/tmp/chromeos"
+
+ @mock.patch.object(os.path, "isdir")
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
+ @mock.patch.object(command_executer.CommandExecuter, "CopyFiles")
+ def test_copy_files_to(self, mock_copyfiles, mock_runcmd, mock_isdir):
+
+ files = ["src_file_1", "src_file_2", "src_file_3"]
+ dest_dir = "/tmp/test"
+ self.mock_cmd_exec.RunCommand = mock_runcmd
+ self.mock_cmd_exec.CopyFiles = mock_copyfiles
+
+ mock_copyfiles.return_value = 0
+
+ # test 1. dest_dir exists; CopyFiles returns 0.
+ mock_isdir.return_value = True
+ self.result.CopyFilesTo(dest_dir, files)
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertEqual(mock_copyfiles.call_count, 3)
+ first_args = mock_copyfiles.call_args_list[0][0]
+ second_args = mock_copyfiles.call_args_list[1][0]
+ third_args = mock_copyfiles.call_args_list[2][0]
+ self.assertEqual(first_args, ("src_file_1", "/tmp/test/src_file_1.0"))
+ self.assertEqual(second_args, ("src_file_2", "/tmp/test/src_file_2.1"))
+ self.assertEqual(third_args, ("src_file_3", "/tmp/test/src_file_3.2"))
+
+ mock_runcmd.reset_mock()
+ mock_copyfiles.reset_mock()
+ # test 2. dest_dir does not exist; CopyFiles returns 0.
+ mock_isdir.return_value = False
+ self.result.CopyFilesTo(dest_dir, files)
+ self.assertEqual(mock_runcmd.call_count, 3)
+ self.assertEqual(mock_copyfiles.call_count, 3)
+ self.assertEqual(
+ mock_runcmd.call_args_list[0], mock_runcmd.call_args_list[1]
+ )
+ self.assertEqual(
+ mock_runcmd.call_args_list[0], mock_runcmd.call_args_list[2]
+ )
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0], ("mkdir -p /tmp/test",)
+ )
+
+ # test 3. CopyFiles returns 1 (fails).
+ mock_copyfiles.return_value = 1
+ self.assertRaises(Exception, self.result.CopyFilesTo, dest_dir, files)
+
+ @mock.patch.object(Result, "CopyFilesTo")
+ def test_copy_results_to(self, mockCopyFilesTo):
+ results_file = [
+ "/tmp/result.json.0",
+ "/tmp/result.json.1",
+ "/tmp/result.json.2",
+ ]
+ perf_data_files = [
+ "/tmp/perf.data.0",
+ "/tmp/perf.data.1",
+ "/tmp/perf.data.2",
+ ]
+ perf_report_files = [
+ "/tmp/perf.report.0",
+ "/tmp/perf.report.1",
+ "/tmp/perf.report.2",
+ ]
+
+ self.result.results_file = results_file
+ self.result.perf_data_files = perf_data_files
+ self.result.perf_report_files = perf_report_files
+
+ self.result.CopyFilesTo = mockCopyFilesTo
+ self.result.CopyResultsTo("/tmp/results/")
+ self.assertEqual(mockCopyFilesTo.call_count, 3)
+ self.assertEqual(len(mockCopyFilesTo.call_args_list), 3)
+ self.assertEqual(
+ mockCopyFilesTo.call_args_list[0][0],
+ ("/tmp/results/", results_file),
+ )
+ self.assertEqual(
+ mockCopyFilesTo.call_args_list[1][0],
+ ("/tmp/results/", perf_data_files),
+ )
+ self.assertEqual(
+ mockCopyFilesTo.call_args_list[2][0],
+ ("/tmp/results/", perf_report_files),
+ )
+
+ def test_get_new_keyvals(self):
+ kv_dict = {}
+
+ def FakeGetDataMeasurementsFiles():
+ filename = os.path.join(os.getcwd(), "unittest_keyval_file.txt")
+ return [filename]
+
+ self.result.GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles
+ kv_dict2, udict = self.result.GetNewKeyvals(kv_dict)
+ self.assertEqual(
+ kv_dict2,
+ {
+ u"Box2D__Box2D": 4775,
+ u"Mandreel__Mandreel": 6620,
+ u"Gameboy__Gameboy": 9901,
+ u"Crypto__Crypto": 8737,
+ u"telemetry_page_measurement_results__num_errored": 0,
+ u"telemetry_page_measurement_results__num_failed": 0,
+ u"PdfJS__PdfJS": 6455,
+ u"Total__Score": 7918,
+ u"EarleyBoyer__EarleyBoyer": 14340,
+ u"MandreelLatency__MandreelLatency": 5188,
+ u"CodeLoad__CodeLoad": 6271,
+ u"DeltaBlue__DeltaBlue": 14401,
+ u"Typescript__Typescript": 9815,
+ u"SplayLatency__SplayLatency": 7653,
+ u"zlib__zlib": 16094,
+ u"Richards__Richards": 10358,
+ u"RegExp__RegExp": 1765,
+ u"NavierStokes__NavierStokes": 9815,
+ u"Splay__Splay": 4425,
+ u"RayTrace__RayTrace": 16600,
+ },
+ )
+ self.assertEqual(
+ udict,
+ {
+ u"Box2D__Box2D": u"score",
+ u"Mandreel__Mandreel": u"score",
+ u"Gameboy__Gameboy": u"score",
+ u"Crypto__Crypto": u"score",
+ u"telemetry_page_measurement_results__num_errored": u"count",
+ u"telemetry_page_measurement_results__num_failed": u"count",
+ u"PdfJS__PdfJS": u"score",
+ u"Total__Score": u"score",
+ u"EarleyBoyer__EarleyBoyer": u"score",
+ u"MandreelLatency__MandreelLatency": u"score",
+ u"CodeLoad__CodeLoad": u"score",
+ u"DeltaBlue__DeltaBlue": u"score",
+ u"Typescript__Typescript": u"score",
+ u"SplayLatency__SplayLatency": u"score",
+ u"zlib__zlib": u"score",
+ u"Richards__Richards": u"score",
+ u"RegExp__RegExp": u"score",
+ u"NavierStokes__NavierStokes": u"score",
+ u"Splay__Splay": u"score",
+ u"RayTrace__RayTrace": u"score",
+ },
+ )
+
+ def test_append_telemetry_units(self):
+ kv_dict = {
+ u"Box2D__Box2D": 4775,
+ u"Mandreel__Mandreel": 6620,
+ u"Gameboy__Gameboy": 9901,
+ u"Crypto__Crypto": 8737,
+ u"PdfJS__PdfJS": 6455,
+ u"Total__Score": 7918,
+ u"EarleyBoyer__EarleyBoyer": 14340,
+ u"MandreelLatency__MandreelLatency": 5188,
+ u"CodeLoad__CodeLoad": 6271,
+ u"DeltaBlue__DeltaBlue": 14401,
+ u"Typescript__Typescript": 9815,
+ u"SplayLatency__SplayLatency": 7653,
+ u"zlib__zlib": 16094,
+ u"Richards__Richards": 10358,
+ u"RegExp__RegExp": 1765,
+ u"NavierStokes__NavierStokes": 9815,
+ u"Splay__Splay": 4425,
+ u"RayTrace__RayTrace": 16600,
+ }
+ units_dict = {
+ u"Box2D__Box2D": u"score",
+ u"Mandreel__Mandreel": u"score",
+ u"Gameboy__Gameboy": u"score",
+ u"Crypto__Crypto": u"score",
+ u"PdfJS__PdfJS": u"score",
+ u"Total__Score": u"score",
+ u"EarleyBoyer__EarleyBoyer": u"score",
+ u"MandreelLatency__MandreelLatency": u"score",
+ u"CodeLoad__CodeLoad": u"score",
+ u"DeltaBlue__DeltaBlue": u"score",
+ u"Typescript__Typescript": u"score",
+ u"SplayLatency__SplayLatency": u"score",
+ u"zlib__zlib": u"score",
+ u"Richards__Richards": u"score",
+ u"RegExp__RegExp": u"score",
+ u"NavierStokes__NavierStokes": u"score",
+ u"Splay__Splay": u"score",
+ u"RayTrace__RayTrace": u"score",
+ }
- results_dict = self.result.AppendTelemetryUnits(kv_dict, units_dict)
- self.assertEqual(
- results_dict, {
- u'Box2D__Box2D': [4775, u'score'],
- u'Splay__Splay': [4425, u'score'],
- u'Gameboy__Gameboy': [9901, u'score'],
- u'Crypto__Crypto': [8737, u'score'],
- u'PdfJS__PdfJS': [6455, u'score'],
- u'Total__Score': [7918, u'score'],
- u'EarleyBoyer__EarleyBoyer': [14340, u'score'],
- u'MandreelLatency__MandreelLatency': [5188, u'score'],
- u'DeltaBlue__DeltaBlue': [14401, u'score'],
- u'SplayLatency__SplayLatency': [7653, u'score'],
- u'Mandreel__Mandreel': [6620, u'score'],
- u'Richards__Richards': [10358, u'score'],
- u'zlib__zlib': [16094, u'score'],
- u'CodeLoad__CodeLoad': [6271, u'score'],
- u'Typescript__Typescript': [9815, u'score'],
- u'RegExp__RegExp': [1765, u'score'],
- u'RayTrace__RayTrace': [16600, u'score'],
- u'NavierStokes__NavierStokes': [9815, u'score']
- })
-
- @mock.patch.object(misc, 'GetInsideChrootPath')
- @mock.patch.object(tempfile, 'mkdtemp')
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_get_keyvals(self, mock_chrootruncmd, mock_runcmd, mock_mkdtemp,
- mock_getpath):
-
- self.kv_dict = {}
- self.callGetNewKeyvals = False
-
- def reset():
- self.kv_dict = {}
- self.callGetNewKeyvals = False
- mock_chrootruncmd.reset_mock()
- mock_runcmd.reset_mock()
- mock_mkdtemp.reset_mock()
- mock_getpath.reset_mock()
-
- def FakeGetNewKeyvals(kv_dict):
- self.kv_dict = kv_dict
- self.callGetNewKeyvals = True
- return_kvdict = {'first_time': 680, 'Total': 10}
- return_udict = {'first_time': 'ms', 'Total': 'score'}
- return return_kvdict, return_udict
-
- mock_mkdtemp.return_value = TMP_DIR1
- mock_chrootruncmd.return_value = [
- '', ('%s,PASS\n%s/telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1),
- ''
- ]
- mock_getpath.return_value = TMP_DIR1
- self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
- self.result.ce.RunCommand = mock_runcmd
- self.result.GetNewKeyvals = FakeGetNewKeyvals
- self.result.suite = 'telemetry_Crosperf'
- self.result.results_dir = '/tmp/test_that_resultsNmq'
-
- # Test 1. no self.temp_dir.
- res = self.result.GetKeyvals()
- self.assertTrue(self.callGetNewKeyvals)
- self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
- self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1, ))
- self.assertEqual(mock_chrootruncmd.call_count, 1)
- self.assertEqual(
- mock_chrootruncmd.call_args_list[0][0],
- (self.result.chromeos_root,
- ('./generate_test_report --no-color --csv %s') % TMP_DIR1))
- self.assertEqual(mock_getpath.call_count, 1)
- self.assertEqual(mock_mkdtemp.call_count, 1)
- self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
-
- # Test 2. self.temp_dir
- reset()
- mock_chrootruncmd.return_value = [
- '', ('/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/'
- 'telemetry_Crosperf,PASS\n'), ''
- ]
- mock_getpath.return_value = '/tmp/tmpJCajRG'
- self.result.temp_dir = '/tmp/tmpJCajRG'
- res = self.result.GetKeyvals()
- self.assertEqual(mock_runcmd.call_count, 0)
- self.assertEqual(mock_mkdtemp.call_count, 0)
- self.assertEqual(mock_chrootruncmd.call_count, 1)
- self.assertTrue(self.callGetNewKeyvals)
- self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
- self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
-
- # Test 3. suite != telemetry_Crosperf. Normally this would be for
- # running non-Telemetry autotests, such as BootPerfServer. In this test
- # case, the keyvals we have set up were returned from a Telemetry test run;
- # so this pass is basically testing that we don't append the units to the
- # test results (which we do for Telemetry autotest runs).
- reset()
- self.result.suite = ''
- res = self.result.GetKeyvals()
- self.assertEqual(res, {'Total': 10, 'first_time': 680})
-
- @mock.patch.object(misc, 'GetInsideChrootPath')
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- @mock.patch.object(os.path, 'exists')
- def test_get_samples(self, mock_exists, mock_get_total_samples,
- mock_getpath):
- self.result.perf_data_files = ['/tmp/results/perf.data']
- self.result.board = 'samus'
- mock_getpath.return_value = '/usr/chromeos/chroot/tmp/results/perf.data'
- mock_get_total_samples.return_value = [
- '', '45.42% 237210 chrome ', ''
- ]
- mock_exists.return_value = True
-
- # mock_open does not seem to support iteration.
- # pylint: disable=line-too-long
- content = """1.63% 66 dav1d-tile chrome [.] decode_coefs
+ results_dict = self.result.AppendTelemetryUnits(kv_dict, units_dict)
+ self.assertEqual(
+ results_dict,
+ {
+ u"Box2D__Box2D": [4775, u"score"],
+ u"Splay__Splay": [4425, u"score"],
+ u"Gameboy__Gameboy": [9901, u"score"],
+ u"Crypto__Crypto": [8737, u"score"],
+ u"PdfJS__PdfJS": [6455, u"score"],
+ u"Total__Score": [7918, u"score"],
+ u"EarleyBoyer__EarleyBoyer": [14340, u"score"],
+ u"MandreelLatency__MandreelLatency": [5188, u"score"],
+ u"DeltaBlue__DeltaBlue": [14401, u"score"],
+ u"SplayLatency__SplayLatency": [7653, u"score"],
+ u"Mandreel__Mandreel": [6620, u"score"],
+ u"Richards__Richards": [10358, u"score"],
+ u"zlib__zlib": [16094, u"score"],
+ u"CodeLoad__CodeLoad": [6271, u"score"],
+ u"Typescript__Typescript": [9815, u"score"],
+ u"RegExp__RegExp": [1765, u"score"],
+ u"RayTrace__RayTrace": [16600, u"score"],
+ u"NavierStokes__NavierStokes": [9815, u"score"],
+ },
+ )
+
+ @mock.patch.object(misc, "GetInsideChrootPath")
+ @mock.patch.object(tempfile, "mkdtemp")
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_get_keyvals(
+ self, mock_chrootruncmd, mock_runcmd, mock_mkdtemp, mock_getpath
+ ):
+
+ self.kv_dict = {}
+ self.callGetNewKeyvals = False
+
+ def reset():
+ self.kv_dict = {}
+ self.callGetNewKeyvals = False
+ mock_chrootruncmd.reset_mock()
+ mock_runcmd.reset_mock()
+ mock_mkdtemp.reset_mock()
+ mock_getpath.reset_mock()
+
+ def FakeGetNewKeyvals(kv_dict):
+ self.kv_dict = kv_dict
+ self.callGetNewKeyvals = True
+ return_kvdict = {"first_time": 680, "Total": 10}
+ return_udict = {"first_time": "ms", "Total": "score"}
+ return return_kvdict, return_udict
+
+ mock_mkdtemp.return_value = TMP_DIR1
+ mock_chrootruncmd.return_value = [
+ "",
+ ("%s,PASS\n%s/telemetry_Crosperf,PASS\n") % (TMP_DIR1, TMP_DIR1),
+ "",
+ ]
+ mock_getpath.return_value = TMP_DIR1
+ self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
+ self.result.ce.RunCommand = mock_runcmd
+ self.result.GetNewKeyvals = FakeGetNewKeyvals
+ self.result.suite = "telemetry_Crosperf"
+ self.result.results_dir = "/tmp/test_that_resultsNmq"
+
+ # Test 1. no self.temp_dir.
+ res = self.result.GetKeyvals()
+ self.assertTrue(self.callGetNewKeyvals)
+ self.assertEqual(
+ self.kv_dict, {"": "PASS", "telemetry_Crosperf": "PASS"}
+ )
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0],
+ ("cp -r /tmp/test_that_resultsNmq/* %s" % TMP_DIR1,),
+ )
+ self.assertEqual(mock_chrootruncmd.call_count, 1)
+ self.assertEqual(
+ mock_chrootruncmd.call_args_list[0][0],
+ (
+ self.result.chromeos_root,
+ ("./generate_test_report --no-color --csv %s") % TMP_DIR1,
+ ),
+ )
+ self.assertEqual(mock_getpath.call_count, 1)
+ self.assertEqual(mock_mkdtemp.call_count, 1)
+ self.assertEqual(
+ res, {"Total": [10, "score"], "first_time": [680, "ms"]}
+ )
+
+ # Test 2. self.temp_dir
+ reset()
+ mock_chrootruncmd.return_value = [
+ "",
+ (
+ "/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/"
+ "telemetry_Crosperf,PASS\n"
+ ),
+ "",
+ ]
+ mock_getpath.return_value = "/tmp/tmpJCajRG"
+ self.result.temp_dir = "/tmp/tmpJCajRG"
+ res = self.result.GetKeyvals()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertEqual(mock_mkdtemp.call_count, 0)
+ self.assertEqual(mock_chrootruncmd.call_count, 1)
+ self.assertTrue(self.callGetNewKeyvals)
+ self.assertEqual(
+ self.kv_dict, {"": "PASS", "telemetry_Crosperf": "PASS"}
+ )
+ self.assertEqual(
+ res, {"Total": [10, "score"], "first_time": [680, "ms"]}
+ )
+
+ # Test 3. suite != telemetry_Crosperf. Normally this would be for
+ # running non-Telemetry autotests, such as BootPerfServer. In this test
+ # case, the keyvals we have set up were returned from a Telemetry test run;
+ # so this pass is basically testing that we don't append the units to the
+ # test results (which we do for Telemetry autotest runs).
+ reset()
+ self.result.suite = ""
+ res = self.result.GetKeyvals()
+ self.assertEqual(res, {"Total": 10, "first_time": 680})
+
+ @mock.patch.object(misc, "GetInsideChrootPath")
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ @mock.patch.object(os.path, "exists")
+ def test_get_samples(
+ self, mock_exists, mock_get_total_samples, mock_getpath
+ ):
+ self.result.perf_data_files = ["/tmp/results/perf.data"]
+ self.result.board = "samus"
+ mock_getpath.return_value = "/usr/chromeos/chroot/tmp/results/perf.data"
+ mock_get_total_samples.return_value = [
+ "",
+ "45.42% 237210 chrome ",
+ "",
+ ]
+ mock_exists.return_value = True
+
+ # mock_open does not seem to support iteration.
+ # pylint: disable=line-too-long
+ content = """1.63% 66 dav1d-tile chrome [.] decode_coefs
1.48% 60 swapper [kernel.kallsyms] [k] intel_idle
1.16% 47 dav1d-tile chrome [.] decode_sb"""
- with mock.patch('builtins.open', return_value=io.StringIO(content)):
- samples = self.result.GetSamples()
- self.assertEqual(samples, [237210 - 60, u'samples'])
-
- def test_get_results_dir(self):
-
- self.result.out = ''
- self.assertRaises(Exception, self.result.GetResultsDir)
-
- self.result.out = OUTPUT
- resdir = self.result.GetResultsDir()
- self.assertEqual(resdir, '/tmp/test_that.PO1234567/platform_LibCBench')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandGeneric')
- def test_find_files_in_results_dir(self, mock_runcmd):
-
- self.result.results_dir = None
- res = self.result.FindFilesInResultsDir('-name perf.data')
- self.assertEqual(res, '')
-
- self.result.ce.RunCommand = mock_runcmd
- self.result.results_dir = '/tmp/test_results'
- mock_runcmd.return_value = [0, '/tmp/test_results/perf.data', '']
- res = self.result.FindFilesInResultsDir('-name perf.data')
- self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('find /tmp/test_results -name perf.data', ))
- self.assertEqual(res, '/tmp/test_results/perf.data')
-
- mock_runcmd.reset_mock()
- mock_runcmd.return_value = [1, '', '']
- self.assertRaises(Exception, self.result.FindFilesInResultsDir,
- '-name perf.data')
-
- @mock.patch.object(Result, 'FindFilesInResultsDir')
- def test_get_perf_data_files(self, mock_findfiles):
- self.args = None
-
- mock_findfiles.return_value = 'line1\nline1\n'
- self.result.FindFilesInResultsDir = mock_findfiles
- res = self.result.GetPerfDataFiles()
- self.assertEqual(res, ['line1', 'line1'])
- self.assertEqual(mock_findfiles.call_args_list[0][0],
- ('-name perf.data', ))
-
- def test_get_perf_report_files(self):
- self.args = None
-
- def FakeFindFiles(find_args):
- self.args = find_args
- return 'line1\nline1\n'
-
- self.result.FindFilesInResultsDir = FakeFindFiles
- res = self.result.GetPerfReportFiles()
- self.assertEqual(res, ['line1', 'line1'])
- self.assertEqual(self.args, '-name perf.data.report')
-
- def test_get_data_measurement_files(self):
- self.args = None
-
- def FakeFindFiles(find_args):
- self.args = find_args
- return 'line1\nline1\n'
-
- self.result.FindFilesInResultsDir = FakeFindFiles
- res = self.result.GetDataMeasurementsFiles()
- self.assertEqual(res, ['line1', 'line1'])
- self.assertEqual(self.args, '-name perf_measurements')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_turbostat_file_finds_single_log(self, mock_runcmd):
- """Expected behavior when a single log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/turbostat.log', '')
- found_single_log = self.result.GetTurbostatFile()
- self.assertEqual(found_single_log, 'some/long/path/turbostat.log')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_turbostat_file_finds_multiple_logs(self, mock_runcmd):
- """Error case when multiple files found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0,
- 'some/long/path/turbostat.log\nturbostat.log',
- '')
- found_first_logs = self.result.GetTurbostatFile()
- self.assertEqual(found_first_logs, 'some/long/path/turbostat.log')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_turbostat_file_finds_no_logs(self, mock_runcmd):
- """Error case when no log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, '', '')
- found_no_logs = self.result.GetTurbostatFile()
- self.assertEqual(found_no_logs, '')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_turbostat_file_with_failing_find(self, mock_runcmd):
- """Error case when file search returns an error."""
- self.result.results_dir = '/tmp/test_results'
- mock_runcmd.return_value = (-1, '', 'error')
- with self.assertRaises(RuntimeError):
- self.result.GetTurbostatFile()
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_top_file_finds_single_log(self, mock_runcmd):
- """Expected behavior when a single top log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/top.log', '')
- found_single_log = self.result.GetTopFile()
- self.assertEqual(found_single_log, 'some/long/path/top.log')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_top_file_finds_multiple_logs(self, mock_runcmd):
- """The case when multiple top files found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/top.log\ntop.log', '')
- found_first_logs = self.result.GetTopFile()
- self.assertEqual(found_first_logs, 'some/long/path/top.log')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_top_file_finds_no_logs(self, mock_runcmd):
- """Error case when no log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, '', '')
- found_no_logs = self.result.GetTopFile()
- self.assertEqual(found_no_logs, '')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_cpuinfo_file_finds_single_log(self, mock_runcmd):
- """Expected behavior when a single cpuinfo file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/cpuinfo.log', '')
- found_single_log = self.result.GetCpuinfoFile()
- self.assertEqual(found_single_log, 'some/long/path/cpuinfo.log')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_cpustats_file_finds_single_log(self, mock_runcmd):
- """Expected behavior when a single log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/cpustats.log', '')
- found_single_log = self.result.GetCpustatsFile()
- self.assertEqual(found_single_log, 'some/long/path/cpustats.log')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_cpustats_file_finds_multiple_logs(self, mock_runcmd):
- """The case when multiple files found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, 'some/long/path/cpustats.log\ncpustats.log',
- '')
- found_first_logs = self.result.GetCpustatsFile()
- self.assertEqual(found_first_logs, 'some/long/path/cpustats.log')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- def test_get_cpustats_file_finds_no_logs(self, mock_runcmd):
- """Error case when no log file found."""
- self.result.results_dir = '/tmp/test_results'
- self.result.ce.RunCommandWOutput = mock_runcmd
- mock_runcmd.return_value = (0, '', '')
- found_no_logs = self.result.GetCpustatsFile()
- self.assertEqual(found_no_logs, '')
-
- def test_verify_perf_data_pid_ok(self):
- """Verify perf PID which is present in TOP_DATA."""
- self.result.top_cmds = TOP_DATA
- # pid is present in TOP_DATA.
- with mock.patch.object(Result,
- 'ReadPidFromPerfData',
- return_value=['5713']):
- self.result.VerifyPerfDataPID()
-
- def test_verify_perf_data_pid_fail(self):
- """Test perf PID missing in top raises the error."""
- self.result.top_cmds = TOP_DATA
- # pid is not in the list of top processes.
- with mock.patch.object(Result,
- 'ReadPidFromPerfData',
- return_value=['9999']):
- with self.assertRaises(PidVerificationError):
- self.result.VerifyPerfDataPID()
-
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_ok(self, mock_runcmd):
- """Test perf header parser, normal flow."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data'
- ]
- exp_pid = '12345'
- mock_runcmd.return_value = (0, PERF_DATA_HEADER.format(pid=exp_pid), '')
- pids = self.result.ReadPidFromPerfData()
- self.assertEqual(pids, [exp_pid])
-
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_mult_profiles(self, mock_runcmd):
- """Test multiple perf.data files with PID."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- # self.result.chromeos_root = '/tmp/chromeos'
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data.0',
- '/tmp/chromeos/chroot/tmp/results/perf.data.1',
- ]
- # There is '-p <pid>' in command line but it's still system-wide: '-a'.
- cmd_line = '# cmdline : /usr/bin/perf record -e instructions -p {pid}'
- exp_perf_pids = ['1111', '2222']
- mock_runcmd.side_effect = [
- (0, cmd_line.format(pid=exp_perf_pids[0]), ''),
- (0, cmd_line.format(pid=exp_perf_pids[1]), ''),
- ]
- pids = self.result.ReadPidFromPerfData()
- self.assertEqual(pids, exp_perf_pids)
-
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_no_pid(self, mock_runcmd):
- """Test perf.data without PID."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data'
- ]
- cmd_line = '# cmdline : /usr/bin/perf record -e instructions'
- mock_runcmd.return_value = (0, cmd_line, '')
- pids = self.result.ReadPidFromPerfData()
- # pids is empty.
- self.assertEqual(pids, [])
-
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_system_wide(self, mock_runcmd):
- """Test reading from system-wide profile with PID."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data'
- ]
- # There is '-p <pid>' in command line but it's still system-wide: '-a'.
- cmd_line = '# cmdline : /usr/bin/perf record -e instructions -a -p 1234'
- mock_runcmd.return_value = (0, cmd_line, '')
- pids = self.result.ReadPidFromPerfData()
- # pids should be empty since it's not a per-process profiling.
- self.assertEqual(pids, [])
-
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_read_fail(self, mock_runcmd):
- """Failure to read perf.data raises the error."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data'
- ]
- # Error status of the profile read.
- mock_runcmd.return_value = (1, '', '')
- with self.assertRaises(PerfDataReadError):
- self.result.ReadPidFromPerfData()
-
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_read_pid_from_perf_data_fail(self, mock_runcmd):
- """Failure to find cmdline in perf.data header raises the error."""
- self.result.ce.ChrootRunCommandWOutput = mock_runcmd
- self.result.perf_data_files = [
- '/tmp/chromeos/chroot/tmp/results/perf.data'
- ]
- # Empty output.
- mock_runcmd.return_value = (0, '', '')
- with self.assertRaises(PerfDataReadError):
- self.result.ReadPidFromPerfData()
-
- def test_process_turbostat_results_with_valid_data(self):
- """Normal case when log exists and contains valid data."""
- self.result.turbostat_log_file = '/tmp/somelogfile.log'
- with mock.patch('builtins.open',
- mock.mock_open(read_data=TURBOSTAT_LOG_OUTPUT)) as mo:
- cpustats = self.result.ProcessTurbostatResults()
- # Check that the log got opened and data were read/parsed.
- calls = [mock.call('/tmp/somelogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(cpustats, TURBOSTAT_DATA)
-
- def test_process_turbostat_results_from_empty_file(self):
- """Error case when log exists but file is empty."""
- self.result.turbostat_log_file = '/tmp/emptylogfile.log'
- with mock.patch('builtins.open', mock.mock_open(read_data='')) as mo:
- cpustats = self.result.ProcessTurbostatResults()
- # Check that the log got opened and parsed successfully and empty data
- # returned.
- calls = [mock.call('/tmp/emptylogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(cpustats, {})
-
- def test_process_turbostat_results_when_file_doesnt_exist(self):
- """Error case when file does not exist."""
- nonexistinglog = '/tmp/1'
- while os.path.exists(nonexistinglog):
- # Extend file path if it happens to exist.
- nonexistinglog = os.path.join(nonexistinglog, '1')
- self.result.turbostat_log_file = nonexistinglog
- # Allow the tested function to call a 'real' open and hopefully crash.
- with self.assertRaises(IOError):
- self.result.ProcessTurbostatResults()
-
- def test_process_cpustats_results_with_uniq_data(self):
- """Process cpustats log which has freq unique to each core.
-
- Testing normal case when frequency data vary between
- different cores.
- Expecting that data for all cores will be present in
- returned cpustats.
- """
- self.result.cpustats_log_file = '/tmp/somelogfile.log'
- with mock.patch('builtins.open',
- mock.mock_open(read_data=CPUSTATS_UNIQ_OUTPUT)) as mo:
- cpustats = self.result.ProcessCpustatsResults()
- # Check that the log got opened and data were read/parsed.
- calls = [mock.call('/tmp/somelogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(cpustats, CPUSTATS_UNIQ_DATA)
-
- def test_process_cpustats_results_with_dupl_data(self):
- """Process cpustats log where cores have duplicate freq.
-
- Testing normal case when frequency data on some cores
- are duplicated.
- Expecting that duplicated data is discarded in
- returned cpustats.
- """
- self.result.cpustats_log_file = '/tmp/somelogfile.log'
- with mock.patch('builtins.open',
- mock.mock_open(read_data=CPUSTATS_DUPL_OUTPUT)) as mo:
- cpustats = self.result.ProcessCpustatsResults()
- # Check that the log got opened and data were read/parsed.
- calls = [mock.call('/tmp/somelogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(cpustats, CPUSTATS_DUPL_DATA)
-
- def test_process_cpustats_results_from_empty_file(self):
- """Error case when log exists but file is empty."""
- self.result.cpustats_log_file = '/tmp/emptylogfile.log'
- with mock.patch('builtins.open', mock.mock_open(read_data='')) as mo:
- cpustats = self.result.ProcessCpustatsResults()
- # Check that the log got opened and parsed successfully and empty data
- # returned.
- calls = [mock.call('/tmp/emptylogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(cpustats, {})
-
- def test_process_top_results_with_valid_data(self):
- """Process top log with valid data."""
-
- self.result.top_log_file = '/tmp/fakelogfile.log'
- with mock.patch('builtins.open', mock.mock_open(read_data=TOP_LOG)) as mo:
- topproc = self.result.ProcessTopResults()
- # Check that the log got opened and data were read/parsed.
- calls = [mock.call('/tmp/fakelogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(topproc, TOP_DATA)
-
- def test_process_top_results_from_empty_file(self):
- """Error case when log exists but file is empty."""
- self.result.top_log_file = '/tmp/emptylogfile.log'
- with mock.patch('builtins.open', mock.mock_open(read_data='')) as mo:
- topcalls = self.result.ProcessTopResults()
- # Check that the log got opened and parsed successfully and empty data
- # returned.
- calls = [mock.call('/tmp/emptylogfile.log')]
- mo.assert_has_calls(calls)
- self.assertEqual(topcalls, [])
-
- def test_format_string_top_cmds(self):
- """Test formatted string with top commands."""
- self.result.top_cmds = [
- {
- 'cmd': 'chrome-111',
- 'cpu_use_avg': 119.753453465,
- 'count': 44444,
- 'top5_cpu_use': [222.8, 217.9, 217.8, 191.0, 189.9],
- },
- {
- 'cmd': 'chrome-222',
- 'cpu_use_avg': 100,
- 'count': 33333,
- 'top5_cpu_use': [200.0, 195.0, 190.0, 185.0, 180.0],
- },
- {
- 'cmd': 'irq/230-cros-ec',
- 'cpu_use_avg': 10.000000000000001,
- 'count': 1000,
- 'top5_cpu_use': [11.5, 11.4, 11.3, 11.2, 11.1],
- },
- {
- 'cmd': 'powerd',
- 'cpu_use_avg': 2.0,
- 'count': 2,
- 'top5_cpu_use': [3.0, 1.0]
- },
- {
- 'cmd': 'cmd3',
- 'cpu_use_avg': 1.0,
- 'count': 1,
- 'top5_cpu_use': [1.0],
- },
- {
- 'cmd': 'cmd4',
- 'cpu_use_avg': 1.0,
- 'count': 1,
- 'top5_cpu_use': [1.0],
- },
- {
- 'cmd': 'cmd5',
- 'cpu_use_avg': 1.0,
- 'count': 1,
- 'top5_cpu_use': [1.0],
- },
- {
- 'cmd': 'cmd6_not_for_print',
- 'cpu_avg': 1.0,
- 'count': 1,
- 'top5': [1.0],
- },
- ]
- form_str = self.result.FormatStringTopCommands()
- self.assertEqual(
- form_str, '\n'.join([
- 'Top commands with highest CPU usage:',
- ' COMMAND AVG CPU% COUNT HIGHEST 5',
- '-' * 50,
- ' chrome-111 119.75 44444 '
- '[222.8, 217.9, 217.8, 191.0, 189.9]',
- ' chrome-222 100.00 33333 '
- '[200.0, 195.0, 190.0, 185.0, 180.0]',
- ' irq/230-cros-ec 10.00 1000 '
- '[11.5, 11.4, 11.3, 11.2, 11.1]',
- ' powerd 2.00 2 [3.0, 1.0]',
- ' cmd3 1.00 1 [1.0]',
- ' cmd4 1.00 1 [1.0]',
- ' cmd5 1.00 1 [1.0]',
- '-' * 50,
- ]))
-
- def test_format_string_top_calls_no_data(self):
- """Test formatted string of top with no data."""
- self.result.top_cmds = []
- form_str = self.result.FormatStringTopCommands()
- self.assertEqual(
- form_str, '\n'.join([
- 'Top commands with highest CPU usage:',
- ' COMMAND AVG CPU% COUNT HIGHEST 5',
- '-' * 50,
- '[NO DATA FROM THE TOP LOG]',
- '-' * 50,
- ]))
-
- @mock.patch.object(misc, 'GetInsideChrootPath')
- @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
- def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath):
- fake_file = '/usr/chromeos/chroot/tmp/results/fake_file'
- self.result.perf_data_files = ['/tmp/results/perf.data']
- self.result.board = 'lumpy'
- mock_getpath.return_value = fake_file
- self.result.ce.ChrootRunCommand = mock_chrootruncmd
- mock_chrootruncmd.return_value = 0
- # Debug path not found
- self.result.label.debug_path = ''
- tmp = self.result.GeneratePerfReportFiles()
- self.assertEqual(tmp, ['/tmp/chromeos/chroot%s' % fake_file])
- self.assertEqual(mock_chrootruncmd.call_args_list[0][0],
- (self.result.chromeos_root,
- ('/usr/sbin/perf report -n '
- '-i %s --stdio > %s') % (fake_file, fake_file)))
-
- @mock.patch.object(misc, 'GetInsideChrootPath')
- @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
- def test_generate_perf_report_files_debug(self, mock_chrootruncmd,
- mock_getpath):
- fake_file = '/usr/chromeos/chroot/tmp/results/fake_file'
- self.result.perf_data_files = ['/tmp/results/perf.data']
- self.result.board = 'lumpy'
- mock_getpath.return_value = fake_file
- self.result.ce.ChrootRunCommand = mock_chrootruncmd
- mock_chrootruncmd.return_value = 0
- # Debug path found
- self.result.label.debug_path = '/tmp/debug'
- tmp = self.result.GeneratePerfReportFiles()
- self.assertEqual(tmp, ['/tmp/chromeos/chroot%s' % fake_file])
- self.assertEqual(mock_chrootruncmd.call_args_list[0][0],
- (self.result.chromeos_root,
- ('/usr/sbin/perf report -n --symfs /tmp/debug '
- '--vmlinux /tmp/debug/usr/lib/debug/boot/vmlinux '
- '-i %s --stdio > %s') % (fake_file, fake_file)))
-
- @mock.patch.object(misc, 'GetOutsideChrootPath')
- def test_populate_from_run(self, mock_getpath):
- def FakeGetResultsDir():
- self.callGetResultsDir = True
- return '/tmp/results_dir'
-
- def FakeGetResultsFile():
- self.callGetResultsFile = True
- return []
-
- def FakeGetPerfDataFiles():
- self.callGetPerfDataFiles = True
- return []
-
- def FakeGetPerfReportFiles():
- self.callGetPerfReportFiles = True
- return []
-
- def FakeGetTurbostatFile():
- self.callGetTurbostatFile = True
- return []
-
- def FakeGetCpustatsFile():
- self.callGetCpustatsFile = True
- return []
-
- def FakeGetTopFile():
- self.callGetTopFile = True
- return []
-
- def FakeGetCpuinfoFile():
- self.callGetCpuinfoFile = True
- return []
-
- def FakeGetWaitTimeFile():
- self.callGetWaitTimeFile = True
- return []
-
- def FakeProcessResults(show_results=False):
- if show_results:
- pass
- self.callProcessResults = True
-
- if mock_getpath:
- pass
- mock.get_path = '/tmp/chromeos/tmp/results_dir'
-
- self.callGetResultsDir = False
- self.callGetResultsFile = False
- self.callGetPerfDataFiles = False
- self.callGetPerfReportFiles = False
- self.callGetTurbostatFile = False
- self.callGetCpustatsFile = False
- self.callGetTopFile = False
- self.callGetCpuinfoFile = False
- self.callGetWaitTimeFile = False
- self.callProcessResults = False
-
- self.result.GetResultsDir = FakeGetResultsDir
- self.result.GetResultsFile = FakeGetResultsFile
- self.result.GetPerfDataFiles = FakeGetPerfDataFiles
- self.result.GeneratePerfReportFiles = FakeGetPerfReportFiles
- self.result.GetTurbostatFile = FakeGetTurbostatFile
- self.result.GetCpustatsFile = FakeGetCpustatsFile
- self.result.GetTopFile = FakeGetTopFile
- self.result.GetCpuinfoFile = FakeGetCpuinfoFile
- self.result.GetWaitTimeFile = FakeGetWaitTimeFile
- self.result.ProcessResults = FakeProcessResults
-
- self.result.PopulateFromRun(OUTPUT, '', 0, 'test', 'telemetry_Crosperf',
- 'chrome')
- self.assertTrue(self.callGetResultsDir)
- self.assertTrue(self.callGetResultsFile)
- self.assertTrue(self.callGetPerfDataFiles)
- self.assertTrue(self.callGetPerfReportFiles)
- self.assertTrue(self.callGetTurbostatFile)
- self.assertTrue(self.callGetCpustatsFile)
- self.assertTrue(self.callGetTopFile)
- self.assertTrue(self.callGetCpuinfoFile)
- self.assertTrue(self.callGetWaitTimeFile)
- self.assertTrue(self.callProcessResults)
-
- def FakeGetKeyvals(self, show_all=False):
- if show_all:
- return {'first_time': 680, 'Total': 10}
- else:
- return {'Total': 10}
-
- def test_process_results(self):
- def FakeGatherPerfResults():
- self.callGatherPerfResults = True
-
- def FakeGetSamples():
- return (1, 'samples')
-
- # Test 1
- self.callGatherPerfResults = False
-
- self.result.GetKeyvals = self.FakeGetKeyvals
- self.result.GatherPerfResults = FakeGatherPerfResults
-
- self.result.retval = 0
- self.result.ProcessResults()
- self.assertTrue(self.callGatherPerfResults)
- self.assertEqual(len(self.result.keyvals), 2)
- self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 0})
-
- # Test 2
- self.result.retval = 1
- self.result.ProcessResults()
- self.assertEqual(len(self.result.keyvals), 2)
- self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 1})
-
- # Test 3
- self.result.cwp_dso = 'chrome'
- self.result.retval = 0
- self.result.GetSamples = FakeGetSamples
- self.result.ProcessResults()
- self.assertEqual(len(self.result.keyvals), 3)
- self.assertEqual(self.result.keyvals, {
- 'Total': 10,
- 'samples': (1, 'samples'),
- 'retval': 0
- })
-
- # Test 4. Parse output of benchmarks with multiple sotries in histogram
- # format
- self.result.suite = 'telemetry_Crosperf'
- self.result.results_file = [tempfile.mkdtemp() + '/histograms.json']
- with open(self.result.results_file[0], 'w') as f:
- f.write(HISTOGRAMSET)
- self.result.ProcessResults()
- shutil.rmtree(os.path.dirname(self.result.results_file[0]))
- # Verify the summary for the story is correct
- self.assertEqual(
- self.result.keyvals['timeToFirstContentfulPaint__typical'],
- [880.000, u'ms_smallerIsBetter'])
- # Veirfy the summary for a certain stroy tag is correct
- self.assertEqual(
- self.result.
- keyvals['timeToFirstContentfulPaint__cache_temperature:cold'],
- [1000.000, u'ms_smallerIsBetter'])
- self.assertEqual(
- self.result.
- keyvals['timeToFirstContentfulPaint__cache_temperature:warm'],
- [800.000, u'ms_smallerIsBetter'])
-
- @mock.patch.object(Result, 'ProcessCpustatsResults')
- @mock.patch.object(Result, 'ProcessTurbostatResults')
- def test_process_results_with_turbostat_log(self, mock_proc_turbo,
- mock_proc_cpustats):
- self.result.GetKeyvals = self.FakeGetKeyvals
-
- self.result.retval = 0
- self.result.turbostat_log_file = '/tmp/turbostat.log'
- mock_proc_turbo.return_value = {
- 'cpufreq': {
- 'all': [1, 2, 3]
- },
- 'cputemp': {
- 'all': [5.0, 6.0, 7.0]
+ with mock.patch("builtins.open", return_value=io.StringIO(content)):
+ samples = self.result.GetSamples()
+ self.assertEqual(samples, [237210 - 60, u"samples"])
+
+ def test_get_results_dir(self):
+
+ self.result.out = ""
+ self.assertRaises(Exception, self.result.GetResultsDir)
+
+ self.result.out = OUTPUT
+ resdir = self.result.GetResultsDir()
+ self.assertEqual(resdir, "/tmp/test_that.PO1234567/platform_LibCBench")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandGeneric")
+ def test_find_files_in_results_dir(self, mock_runcmd):
+
+ self.result.results_dir = None
+ res = self.result.FindFilesInResultsDir("-name perf.data")
+ self.assertEqual(res, "")
+
+ self.result.ce.RunCommand = mock_runcmd
+ self.result.results_dir = "/tmp/test_results"
+ mock_runcmd.return_value = [0, "/tmp/test_results/perf.data", ""]
+ res = self.result.FindFilesInResultsDir("-name perf.data")
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0],
+ ("find /tmp/test_results -name perf.data",),
+ )
+ self.assertEqual(res, "/tmp/test_results/perf.data")
+
+ mock_runcmd.reset_mock()
+ mock_runcmd.return_value = [1, "", ""]
+ self.assertRaises(
+ Exception, self.result.FindFilesInResultsDir, "-name perf.data"
+ )
+
+ @mock.patch.object(Result, "FindFilesInResultsDir")
+ def test_get_perf_data_files(self, mock_findfiles):
+ self.args = None
+
+ mock_findfiles.return_value = "line1\nline1\n"
+ self.result.FindFilesInResultsDir = mock_findfiles
+ res = self.result.GetPerfDataFiles()
+ self.assertEqual(res, ["line1", "line1"])
+ self.assertEqual(
+ mock_findfiles.call_args_list[0][0], ("-name perf.data",)
+ )
+
+ def test_get_perf_report_files(self):
+ self.args = None
+
+ def FakeFindFiles(find_args):
+ self.args = find_args
+ return "line1\nline1\n"
+
+ self.result.FindFilesInResultsDir = FakeFindFiles
+ res = self.result.GetPerfReportFiles()
+ self.assertEqual(res, ["line1", "line1"])
+ self.assertEqual(self.args, "-name perf.data.report")
+
+ def test_get_data_measurement_files(self):
+ self.args = None
+
+ def FakeFindFiles(find_args):
+ self.args = find_args
+ return "line1\nline1\n"
+
+ self.result.FindFilesInResultsDir = FakeFindFiles
+ res = self.result.GetDataMeasurementsFiles()
+ self.assertEqual(res, ["line1", "line1"])
+ self.assertEqual(self.args, "-name perf_measurements")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_turbostat_file_finds_single_log(self, mock_runcmd):
+ """Expected behavior when a single log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "some/long/path/turbostat.log", "")
+ found_single_log = self.result.GetTurbostatFile()
+ self.assertEqual(found_single_log, "some/long/path/turbostat.log")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_turbostat_file_finds_multiple_logs(self, mock_runcmd):
+ """Error case when multiple files found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (
+ 0,
+ "some/long/path/turbostat.log\nturbostat.log",
+ "",
+ )
+ found_first_logs = self.result.GetTurbostatFile()
+ self.assertEqual(found_first_logs, "some/long/path/turbostat.log")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_turbostat_file_finds_no_logs(self, mock_runcmd):
+ """Error case when no log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "", "")
+ found_no_logs = self.result.GetTurbostatFile()
+ self.assertEqual(found_no_logs, "")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_turbostat_file_with_failing_find(self, mock_runcmd):
+ """Error case when file search returns an error."""
+ self.result.results_dir = "/tmp/test_results"
+ mock_runcmd.return_value = (-1, "", "error")
+ with self.assertRaises(RuntimeError):
+ self.result.GetTurbostatFile()
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_top_file_finds_single_log(self, mock_runcmd):
+ """Expected behavior when a single top log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "some/long/path/top.log", "")
+ found_single_log = self.result.GetTopFile()
+ self.assertEqual(found_single_log, "some/long/path/top.log")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_top_file_finds_multiple_logs(self, mock_runcmd):
+ """The case when multiple top files found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "some/long/path/top.log\ntop.log", "")
+ found_first_logs = self.result.GetTopFile()
+ self.assertEqual(found_first_logs, "some/long/path/top.log")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_top_file_finds_no_logs(self, mock_runcmd):
+ """Error case when no log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "", "")
+ found_no_logs = self.result.GetTopFile()
+ self.assertEqual(found_no_logs, "")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_cpuinfo_file_finds_single_log(self, mock_runcmd):
+ """Expected behavior when a single cpuinfo file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "some/long/path/cpuinfo.log", "")
+ found_single_log = self.result.GetCpuinfoFile()
+ self.assertEqual(found_single_log, "some/long/path/cpuinfo.log")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_cpustats_file_finds_single_log(self, mock_runcmd):
+ """Expected behavior when a single log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "some/long/path/cpustats.log", "")
+ found_single_log = self.result.GetCpustatsFile()
+ self.assertEqual(found_single_log, "some/long/path/cpustats.log")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_cpustats_file_finds_multiple_logs(self, mock_runcmd):
+ """The case when multiple files found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (
+ 0,
+ "some/long/path/cpustats.log\ncpustats.log",
+ "",
+ )
+ found_first_logs = self.result.GetCpustatsFile()
+ self.assertEqual(found_first_logs, "some/long/path/cpustats.log")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ def test_get_cpustats_file_finds_no_logs(self, mock_runcmd):
+ """Error case when no log file found."""
+ self.result.results_dir = "/tmp/test_results"
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, "", "")
+ found_no_logs = self.result.GetCpustatsFile()
+ self.assertEqual(found_no_logs, "")
+
+ def test_verify_perf_data_pid_ok(self):
+ """Verify perf PID which is present in TOP_DATA."""
+ self.result.top_cmds = TOP_DATA
+ # pid is present in TOP_DATA.
+ with mock.patch.object(
+ Result, "ReadPidFromPerfData", return_value=["5713"]
+ ):
+ self.result.VerifyPerfDataPID()
+
+ def test_verify_perf_data_pid_fail(self):
+ """Test perf PID missing in top raises the error."""
+ self.result.top_cmds = TOP_DATA
+ # pid is not in the list of top processes.
+ with mock.patch.object(
+ Result, "ReadPidFromPerfData", return_value=["9999"]
+ ):
+ with self.assertRaises(PidVerificationError):
+ self.result.VerifyPerfDataPID()
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_ok(self, mock_runcmd):
+ """Test perf header parser, normal flow."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data"
+ ]
+ exp_pid = "12345"
+ mock_runcmd.return_value = (0, PERF_DATA_HEADER.format(pid=exp_pid), "")
+ pids = self.result.ReadPidFromPerfData()
+ self.assertEqual(pids, [exp_pid])
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_mult_profiles(self, mock_runcmd):
+ """Test multiple perf.data files with PID."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ # self.result.chromeos_root = '/tmp/chromeos'
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data.0",
+ "/tmp/chromeos/chroot/tmp/results/perf.data.1",
+ ]
+ # There is '-p <pid>' in command line but it's still system-wide: '-a'.
+ cmd_line = "# cmdline : /usr/bin/perf record -e instructions -p {pid}"
+ exp_perf_pids = ["1111", "2222"]
+ mock_runcmd.side_effect = [
+ (0, cmd_line.format(pid=exp_perf_pids[0]), ""),
+ (0, cmd_line.format(pid=exp_perf_pids[1]), ""),
+ ]
+ pids = self.result.ReadPidFromPerfData()
+ self.assertEqual(pids, exp_perf_pids)
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_no_pid(self, mock_runcmd):
+ """Test perf.data without PID."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data"
+ ]
+ cmd_line = "# cmdline : /usr/bin/perf record -e instructions"
+ mock_runcmd.return_value = (0, cmd_line, "")
+ pids = self.result.ReadPidFromPerfData()
+ # pids is empty.
+ self.assertEqual(pids, [])
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_system_wide(self, mock_runcmd):
+ """Test reading from system-wide profile with PID."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data"
+ ]
+ # There is '-p <pid>' in command line but it's still system-wide: '-a'.
+ cmd_line = "# cmdline : /usr/bin/perf record -e instructions -a -p 1234"
+ mock_runcmd.return_value = (0, cmd_line, "")
+ pids = self.result.ReadPidFromPerfData()
+ # pids should be empty since it's not a per-process profiling.
+ self.assertEqual(pids, [])
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_read_fail(self, mock_runcmd):
+ """Failure to read perf.data raises the error."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data"
+ ]
+ # Error status of the profile read.
+ mock_runcmd.return_value = (1, "", "")
+ with self.assertRaises(PerfDataReadError):
+ self.result.ReadPidFromPerfData()
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_read_pid_from_perf_data_fail(self, mock_runcmd):
+ """Failure to find cmdline in perf.data header raises the error."""
+ self.result.ce.ChrootRunCommandWOutput = mock_runcmd
+ self.result.perf_data_files = [
+ "/tmp/chromeos/chroot/tmp/results/perf.data"
+ ]
+ # Empty output.
+ mock_runcmd.return_value = (0, "", "")
+ with self.assertRaises(PerfDataReadError):
+ self.result.ReadPidFromPerfData()
+
+ def test_process_turbostat_results_with_valid_data(self):
+ """Normal case when log exists and contains valid data."""
+ self.result.turbostat_log_file = "/tmp/somelogfile.log"
+ with mock.patch(
+ "builtins.open", mock.mock_open(read_data=TURBOSTAT_LOG_OUTPUT)
+ ) as mo:
+ cpustats = self.result.ProcessTurbostatResults()
+ # Check that the log got opened and data were read/parsed.
+ calls = [mock.call("/tmp/somelogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, TURBOSTAT_DATA)
+
+ def test_process_turbostat_results_from_empty_file(self):
+ """Error case when log exists but file is empty."""
+ self.result.turbostat_log_file = "/tmp/emptylogfile.log"
+ with mock.patch("builtins.open", mock.mock_open(read_data="")) as mo:
+ cpustats = self.result.ProcessTurbostatResults()
+ # Check that the log got opened and parsed successfully and empty data
+ # returned.
+ calls = [mock.call("/tmp/emptylogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, {})
+
+ def test_process_turbostat_results_when_file_doesnt_exist(self):
+ """Error case when file does not exist."""
+ nonexistinglog = "/tmp/1"
+ while os.path.exists(nonexistinglog):
+ # Extend file path if it happens to exist.
+ nonexistinglog = os.path.join(nonexistinglog, "1")
+ self.result.turbostat_log_file = nonexistinglog
+ # Allow the tested function to call a 'real' open and hopefully crash.
+ with self.assertRaises(IOError):
+ self.result.ProcessTurbostatResults()
+
+ def test_process_cpustats_results_with_uniq_data(self):
+ """Process cpustats log which has freq unique to each core.
+
+ Testing normal case when frequency data vary between
+ different cores.
+ Expecting that data for all cores will be present in
+ returned cpustats.
+ """
+ self.result.cpustats_log_file = "/tmp/somelogfile.log"
+ with mock.patch(
+ "builtins.open", mock.mock_open(read_data=CPUSTATS_UNIQ_OUTPUT)
+ ) as mo:
+ cpustats = self.result.ProcessCpustatsResults()
+ # Check that the log got opened and data were read/parsed.
+ calls = [mock.call("/tmp/somelogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, CPUSTATS_UNIQ_DATA)
+
+ def test_process_cpustats_results_with_dupl_data(self):
+ """Process cpustats log where cores have duplicate freq.
+
+ Testing normal case when frequency data on some cores
+ are duplicated.
+ Expecting that duplicated data is discarded in
+ returned cpustats.
+ """
+ self.result.cpustats_log_file = "/tmp/somelogfile.log"
+ with mock.patch(
+ "builtins.open", mock.mock_open(read_data=CPUSTATS_DUPL_OUTPUT)
+ ) as mo:
+ cpustats = self.result.ProcessCpustatsResults()
+ # Check that the log got opened and data were read/parsed.
+ calls = [mock.call("/tmp/somelogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, CPUSTATS_DUPL_DATA)
+
+ def test_process_cpustats_results_from_empty_file(self):
+ """Error case when log exists but file is empty."""
+ self.result.cpustats_log_file = "/tmp/emptylogfile.log"
+ with mock.patch("builtins.open", mock.mock_open(read_data="")) as mo:
+ cpustats = self.result.ProcessCpustatsResults()
+ # Check that the log got opened and parsed successfully and empty data
+ # returned.
+ calls = [mock.call("/tmp/emptylogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, {})
+
+ def test_process_top_results_with_valid_data(self):
+ """Process top log with valid data."""
+
+ self.result.top_log_file = "/tmp/fakelogfile.log"
+ with mock.patch(
+ "builtins.open", mock.mock_open(read_data=TOP_LOG)
+ ) as mo:
+ topproc = self.result.ProcessTopResults()
+ # Check that the log got opened and data were read/parsed.
+ calls = [mock.call("/tmp/fakelogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(topproc, TOP_DATA)
+
+ def test_process_top_results_from_empty_file(self):
+ """Error case when log exists but file is empty."""
+ self.result.top_log_file = "/tmp/emptylogfile.log"
+ with mock.patch("builtins.open", mock.mock_open(read_data="")) as mo:
+ topcalls = self.result.ProcessTopResults()
+ # Check that the log got opened and parsed successfully and empty data
+ # returned.
+ calls = [mock.call("/tmp/emptylogfile.log")]
+ mo.assert_has_calls(calls)
+ self.assertEqual(topcalls, [])
+
+ def test_format_string_top_cmds(self):
+ """Test formatted string with top commands."""
+ self.result.top_cmds = [
+ {
+ "cmd": "chrome-111",
+ "cpu_use_avg": 119.753453465,
+ "count": 44444,
+ "top5_cpu_use": [222.8, 217.9, 217.8, 191.0, 189.9],
+ },
+ {
+ "cmd": "chrome-222",
+ "cpu_use_avg": 100,
+ "count": 33333,
+ "top5_cpu_use": [200.0, 195.0, 190.0, 185.0, 180.0],
+ },
+ {
+ "cmd": "irq/230-cros-ec",
+ "cpu_use_avg": 10.000000000000001,
+ "count": 1000,
+ "top5_cpu_use": [11.5, 11.4, 11.3, 11.2, 11.1],
+ },
+ {
+ "cmd": "powerd",
+ "cpu_use_avg": 2.0,
+ "count": 2,
+ "top5_cpu_use": [3.0, 1.0],
+ },
+ {
+ "cmd": "cmd3",
+ "cpu_use_avg": 1.0,
+ "count": 1,
+ "top5_cpu_use": [1.0],
+ },
+ {
+ "cmd": "cmd4",
+ "cpu_use_avg": 1.0,
+ "count": 1,
+ "top5_cpu_use": [1.0],
+ },
+ {
+ "cmd": "cmd5",
+ "cpu_use_avg": 1.0,
+ "count": 1,
+ "top5_cpu_use": [1.0],
+ },
+ {
+ "cmd": "cmd6_not_for_print",
+ "cpu_avg": 1.0,
+ "count": 1,
+ "top5": [1.0],
+ },
+ ]
+ form_str = self.result.FormatStringTopCommands()
+ self.assertEqual(
+ form_str,
+ "\n".join(
+ [
+ "Top commands with highest CPU usage:",
+ " COMMAND AVG CPU% COUNT HIGHEST 5",
+ "-" * 50,
+ " chrome-111 119.75 44444 "
+ "[222.8, 217.9, 217.8, 191.0, 189.9]",
+ " chrome-222 100.00 33333 "
+ "[200.0, 195.0, 190.0, 185.0, 180.0]",
+ " irq/230-cros-ec 10.00 1000 "
+ "[11.5, 11.4, 11.3, 11.2, 11.1]",
+ " powerd 2.00 2 [3.0, 1.0]",
+ " cmd3 1.00 1 [1.0]",
+ " cmd4 1.00 1 [1.0]",
+ " cmd5 1.00 1 [1.0]",
+ "-" * 50,
+ ]
+ ),
+ )
+
+ def test_format_string_top_calls_no_data(self):
+ """Test formatted string of top with no data."""
+ self.result.top_cmds = []
+ form_str = self.result.FormatStringTopCommands()
+ self.assertEqual(
+ form_str,
+ "\n".join(
+ [
+ "Top commands with highest CPU usage:",
+ " COMMAND AVG CPU% COUNT HIGHEST 5",
+ "-" * 50,
+ "[NO DATA FROM THE TOP LOG]",
+ "-" * 50,
+ ]
+ ),
+ )
+
+ @mock.patch.object(misc, "GetInsideChrootPath")
+ @mock.patch.object(command_executer.CommandExecuter, "ChrootRunCommand")
+ def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath):
+ fake_file = "/usr/chromeos/chroot/tmp/results/fake_file"
+ self.result.perf_data_files = ["/tmp/results/perf.data"]
+ self.result.board = "lumpy"
+ mock_getpath.return_value = fake_file
+ self.result.ce.ChrootRunCommand = mock_chrootruncmd
+ mock_chrootruncmd.return_value = 0
+ # Debug path not found
+ self.result.label.debug_path = ""
+ tmp = self.result.GeneratePerfReportFiles()
+ self.assertEqual(tmp, ["/tmp/chromeos/chroot%s" % fake_file])
+ self.assertEqual(
+ mock_chrootruncmd.call_args_list[0][0],
+ (
+ self.result.chromeos_root,
+ ("/usr/sbin/perf report -n " "-i %s --stdio > %s")
+ % (fake_file, fake_file),
+ ),
+ )
+
+ @mock.patch.object(misc, "GetInsideChrootPath")
+ @mock.patch.object(command_executer.CommandExecuter, "ChrootRunCommand")
+ def test_generate_perf_report_files_debug(
+ self, mock_chrootruncmd, mock_getpath
+ ):
+ fake_file = "/usr/chromeos/chroot/tmp/results/fake_file"
+ self.result.perf_data_files = ["/tmp/results/perf.data"]
+ self.result.board = "lumpy"
+ mock_getpath.return_value = fake_file
+ self.result.ce.ChrootRunCommand = mock_chrootruncmd
+ mock_chrootruncmd.return_value = 0
+ # Debug path found
+ self.result.label.debug_path = "/tmp/debug"
+ tmp = self.result.GeneratePerfReportFiles()
+ self.assertEqual(tmp, ["/tmp/chromeos/chroot%s" % fake_file])
+ self.assertEqual(
+ mock_chrootruncmd.call_args_list[0][0],
+ (
+ self.result.chromeos_root,
+ (
+ "/usr/sbin/perf report -n --symfs /tmp/debug "
+ "--vmlinux /tmp/debug/usr/lib/debug/boot/vmlinux "
+ "-i %s --stdio > %s"
+ )
+ % (fake_file, fake_file),
+ ),
+ )
+
+ @mock.patch.object(misc, "GetOutsideChrootPath")
+ def test_populate_from_run(self, mock_getpath):
+ def FakeGetResultsDir():
+ self.callGetResultsDir = True
+ return "/tmp/results_dir"
+
+ def FakeGetResultsFile():
+ self.callGetResultsFile = True
+ return []
+
+ def FakeGetPerfDataFiles():
+ self.callGetPerfDataFiles = True
+ return []
+
+ def FakeGetPerfReportFiles():
+ self.callGetPerfReportFiles = True
+ return []
+
+ def FakeGetTurbostatFile():
+ self.callGetTurbostatFile = True
+ return []
+
+ def FakeGetCpustatsFile():
+ self.callGetCpustatsFile = True
+ return []
+
+ def FakeGetTopFile():
+ self.callGetTopFile = True
+ return []
+
+ def FakeGetCpuinfoFile():
+ self.callGetCpuinfoFile = True
+ return []
+
+ def FakeGetWaitTimeFile():
+ self.callGetWaitTimeFile = True
+ return []
+
+ def FakeProcessResults(show_results=False):
+ if show_results:
+ pass
+ self.callProcessResults = True
+
+ if mock_getpath:
+ pass
+ mock.get_path = "/tmp/chromeos/tmp/results_dir"
+
+ self.callGetResultsDir = False
+ self.callGetResultsFile = False
+ self.callGetPerfDataFiles = False
+ self.callGetPerfReportFiles = False
+ self.callGetTurbostatFile = False
+ self.callGetCpustatsFile = False
+ self.callGetTopFile = False
+ self.callGetCpuinfoFile = False
+ self.callGetWaitTimeFile = False
+ self.callProcessResults = False
+
+ self.result.GetResultsDir = FakeGetResultsDir
+ self.result.GetResultsFile = FakeGetResultsFile
+ self.result.GetPerfDataFiles = FakeGetPerfDataFiles
+ self.result.GeneratePerfReportFiles = FakeGetPerfReportFiles
+ self.result.GetTurbostatFile = FakeGetTurbostatFile
+ self.result.GetCpustatsFile = FakeGetCpustatsFile
+ self.result.GetTopFile = FakeGetTopFile
+ self.result.GetCpuinfoFile = FakeGetCpuinfoFile
+ self.result.GetWaitTimeFile = FakeGetWaitTimeFile
+ self.result.ProcessResults = FakeProcessResults
+
+ self.result.PopulateFromRun(
+ OUTPUT, "", 0, "test", "telemetry_Crosperf", "chrome"
+ )
+ self.assertTrue(self.callGetResultsDir)
+ self.assertTrue(self.callGetResultsFile)
+ self.assertTrue(self.callGetPerfDataFiles)
+ self.assertTrue(self.callGetPerfReportFiles)
+ self.assertTrue(self.callGetTurbostatFile)
+ self.assertTrue(self.callGetCpustatsFile)
+ self.assertTrue(self.callGetTopFile)
+ self.assertTrue(self.callGetCpuinfoFile)
+ self.assertTrue(self.callGetWaitTimeFile)
+ self.assertTrue(self.callProcessResults)
+
+ def FakeGetKeyvals(self, show_all=False):
+ if show_all:
+ return {"first_time": 680, "Total": 10}
+ else:
+ return {"Total": 10}
+
+ def test_process_results(self):
+ def FakeGatherPerfResults():
+ self.callGatherPerfResults = True
+
+ def FakeGetSamples():
+ return (1, "samples")
+
+ # Test 1
+ self.callGatherPerfResults = False
+
+ self.result.GetKeyvals = self.FakeGetKeyvals
+ self.result.GatherPerfResults = FakeGatherPerfResults
+
+ self.result.retval = 0
+ self.result.ProcessResults()
+ self.assertTrue(self.callGatherPerfResults)
+ self.assertEqual(len(self.result.keyvals), 2)
+ self.assertEqual(self.result.keyvals, {"Total": 10, "retval": 0})
+
+ # Test 2
+ self.result.retval = 1
+ self.result.ProcessResults()
+ self.assertEqual(len(self.result.keyvals), 2)
+ self.assertEqual(self.result.keyvals, {"Total": 10, "retval": 1})
+
+ # Test 3
+ self.result.cwp_dso = "chrome"
+ self.result.retval = 0
+ self.result.GetSamples = FakeGetSamples
+ self.result.ProcessResults()
+ self.assertEqual(len(self.result.keyvals), 3)
+ self.assertEqual(
+ self.result.keyvals,
+ {"Total": 10, "samples": (1, "samples"), "retval": 0},
+ )
+
+ # Test 4. Parse output of benchmarks with multiple sotries in histogram
+ # format
+ self.result.suite = "telemetry_Crosperf"
+ self.result.results_file = [tempfile.mkdtemp() + "/histograms.json"]
+ with open(self.result.results_file[0], "w") as f:
+ f.write(HISTOGRAMSET)
+ self.result.ProcessResults()
+ shutil.rmtree(os.path.dirname(self.result.results_file[0]))
+ # Verify the summary for the story is correct
+ self.assertEqual(
+ self.result.keyvals["timeToFirstContentfulPaint__typical"],
+ [880.000, u"ms_smallerIsBetter"],
+ )
+ # Veirfy the summary for a certain stroy tag is correct
+ self.assertEqual(
+ self.result.keyvals[
+ "timeToFirstContentfulPaint__cache_temperature:cold"
+ ],
+ [1000.000, u"ms_smallerIsBetter"],
+ )
+ self.assertEqual(
+ self.result.keyvals[
+ "timeToFirstContentfulPaint__cache_temperature:warm"
+ ],
+ [800.000, u"ms_smallerIsBetter"],
+ )
+
+ @mock.patch.object(Result, "ProcessCpustatsResults")
+ @mock.patch.object(Result, "ProcessTurbostatResults")
+ def test_process_results_with_turbostat_log(
+ self, mock_proc_turbo, mock_proc_cpustats
+ ):
+ self.result.GetKeyvals = self.FakeGetKeyvals
+
+ self.result.retval = 0
+ self.result.turbostat_log_file = "/tmp/turbostat.log"
+ mock_proc_turbo.return_value = {
+ "cpufreq": {"all": [1, 2, 3]},
+ "cputemp": {"all": [5.0, 6.0, 7.0]},
}
- }
- self.result.ProcessResults()
- mock_proc_turbo.assert_has_calls([mock.call()])
- mock_proc_cpustats.assert_not_called()
- self.assertEqual(len(self.result.keyvals), 8)
- self.assertEqual(
- self.result.keyvals, {
- 'Total': 10,
- 'cpufreq_all_avg': 2,
- 'cpufreq_all_max': 3,
- 'cpufreq_all_min': 1,
- 'cputemp_all_avg': 6.0,
- 'cputemp_all_min': 5.0,
- 'cputemp_all_max': 7.0,
- 'retval': 0
- })
-
- @mock.patch.object(Result, 'ProcessCpustatsResults')
- @mock.patch.object(Result, 'ProcessTurbostatResults')
- def test_process_results_with_cpustats_log(self, mock_proc_turbo,
- mock_proc_cpustats):
- self.result.GetKeyvals = self.FakeGetKeyvals
-
- self.result.retval = 0
- self.result.cpustats_log_file = '/tmp/cpustats.log'
- mock_proc_cpustats.return_value = {
- 'cpufreq': {
- 'cpu0': [100, 100, 100],
- 'cpu1': [4, 5, 6]
- },
- 'cputemp': {
- 'little': [20.2, 20.2, 20.2],
- 'big': [55.2, 66.1, 77.3]
+ self.result.ProcessResults()
+ mock_proc_turbo.assert_has_calls([mock.call()])
+ mock_proc_cpustats.assert_not_called()
+ self.assertEqual(len(self.result.keyvals), 8)
+ self.assertEqual(
+ self.result.keyvals,
+ {
+ "Total": 10,
+ "cpufreq_all_avg": 2,
+ "cpufreq_all_max": 3,
+ "cpufreq_all_min": 1,
+ "cputemp_all_avg": 6.0,
+ "cputemp_all_min": 5.0,
+ "cputemp_all_max": 7.0,
+ "retval": 0,
+ },
+ )
+
+ @mock.patch.object(Result, "ProcessCpustatsResults")
+ @mock.patch.object(Result, "ProcessTurbostatResults")
+ def test_process_results_with_cpustats_log(
+ self, mock_proc_turbo, mock_proc_cpustats
+ ):
+ self.result.GetKeyvals = self.FakeGetKeyvals
+
+ self.result.retval = 0
+ self.result.cpustats_log_file = "/tmp/cpustats.log"
+ mock_proc_cpustats.return_value = {
+ "cpufreq": {"cpu0": [100, 100, 100], "cpu1": [4, 5, 6]},
+ "cputemp": {
+ "little": [20.2, 20.2, 20.2],
+ "big": [55.2, 66.1, 77.3],
+ },
}
- }
- self.result.ProcessResults()
- mock_proc_turbo.assert_not_called()
- mock_proc_cpustats.assert_has_calls([mock.call()])
- self.assertEqual(len(self.result.keyvals), 10)
- self.assertEqual(
- self.result.keyvals, {
- 'Total': 10,
- 'cpufreq_cpu0_avg': 100,
- 'cpufreq_cpu1_avg': 5,
- 'cpufreq_cpu1_max': 6,
- 'cpufreq_cpu1_min': 4,
- 'cputemp_big_avg': 66.2,
- 'cputemp_big_max': 77.3,
- 'cputemp_big_min': 55.2,
- 'cputemp_little_avg': 20.2,
- 'retval': 0
- })
-
- @mock.patch.object(Result, 'ProcessCpustatsResults')
- @mock.patch.object(Result, 'ProcessTurbostatResults')
- def test_process_results_with_turbostat_and_cpustats_logs(
- self, mock_proc_turbo, mock_proc_cpustats):
- self.result.GetKeyvals = self.FakeGetKeyvals
-
- self.result.retval = 0
- self.result.turbostat_log_file = '/tmp/turbostat.log'
- self.result.cpustats_log_file = '/tmp/cpustats.log'
- mock_proc_turbo.return_value = {
- 'cpufreq': {
- 'all': [1, 2, 3]
- },
- 'cputemp': {
- 'all': [5.0, 6.0, 7.0]
+ self.result.ProcessResults()
+ mock_proc_turbo.assert_not_called()
+ mock_proc_cpustats.assert_has_calls([mock.call()])
+ self.assertEqual(len(self.result.keyvals), 10)
+ self.assertEqual(
+ self.result.keyvals,
+ {
+ "Total": 10,
+ "cpufreq_cpu0_avg": 100,
+ "cpufreq_cpu1_avg": 5,
+ "cpufreq_cpu1_max": 6,
+ "cpufreq_cpu1_min": 4,
+ "cputemp_big_avg": 66.2,
+ "cputemp_big_max": 77.3,
+ "cputemp_big_min": 55.2,
+ "cputemp_little_avg": 20.2,
+ "retval": 0,
+ },
+ )
+
+ @mock.patch.object(Result, "ProcessCpustatsResults")
+ @mock.patch.object(Result, "ProcessTurbostatResults")
+ def test_process_results_with_turbostat_and_cpustats_logs(
+ self, mock_proc_turbo, mock_proc_cpustats
+ ):
+ self.result.GetKeyvals = self.FakeGetKeyvals
+
+ self.result.retval = 0
+ self.result.turbostat_log_file = "/tmp/turbostat.log"
+ self.result.cpustats_log_file = "/tmp/cpustats.log"
+ mock_proc_turbo.return_value = {
+ "cpufreq": {"all": [1, 2, 3]},
+ "cputemp": {"all": [5.0, 6.0, 7.0]},
}
- }
- self.result.ProcessResults()
- mock_proc_turbo.assert_has_calls([mock.call()])
- mock_proc_cpustats.assert_not_called()
- self.assertEqual(len(self.result.keyvals), 8)
- self.assertEqual(
- self.result.keyvals, {
- 'Total': 10,
- 'cpufreq_all_avg': 2,
- 'cpufreq_all_max': 3,
- 'cpufreq_all_min': 1,
- 'cputemp_all_avg': 6.0,
- 'cputemp_all_min': 5.0,
- 'cputemp_all_max': 7.0,
- 'retval': 0
- })
-
- @mock.patch.object(Result, 'ProcessCpustatsResults')
- @mock.patch.object(Result, 'ProcessTurbostatResults')
- def test_process_results_without_cpu_data(self, mock_proc_turbo,
- mock_proc_cpustats):
- self.result.GetKeyvals = self.FakeGetKeyvals
-
- self.result.retval = 0
- self.result.turbostat_log_file = ''
- self.result.cpustats_log_file = ''
- self.result.ProcessResults()
- mock_proc_turbo.assert_not_called()
- mock_proc_cpustats.assert_not_called()
- self.assertEqual(len(self.result.keyvals), 2)
- self.assertEqual(self.result.keyvals, {'Total': 10, 'retval': 0})
-
- @mock.patch.object(misc, 'GetInsideChrootPath')
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_populate_from_cache_dir(self, mock_runchrootcmd, mock_getpath):
-
- # pylint: disable=redefined-builtin
- def FakeMkdtemp(dir=None):
- if dir:
- pass
- return self.tmpdir
-
- def FakeGetSamples():
- return [1, u'samples']
-
- current_path = os.getcwd()
- cache_dir = os.path.join(current_path, 'test_cache/test_input')
- self.result.ce = command_executer.GetCommandExecuter(log_level='average')
- self.result.ce.ChrootRunCommandWOutput = mock_runchrootcmd
- mock_runchrootcmd.return_value = [
- '', ('%s,PASS\n%s/\telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1),
- ''
- ]
- mock_getpath.return_value = TMP_DIR1
- self.tmpdir = tempfile.mkdtemp()
- save_real_mkdtemp = tempfile.mkdtemp
- tempfile.mkdtemp = FakeMkdtemp
-
- self.result.PopulateFromCacheDir(cache_dir, 'sunspider',
- 'telemetry_Crosperf', '')
- self.assertEqual(
- self.result.keyvals, {
- u'Total__Total': [444.0, u'ms'],
- u'regexp-dna__regexp-dna': [16.2, u'ms'],
- u'telemetry_page_measurement_results__num_failed': [0, u'count'],
- u'telemetry_page_measurement_results__num_errored': [0, u'count'],
- u'string-fasta__string-fasta': [23.2, u'ms'],
- u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
- u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte':
- [3.2, u'ms'],
- u'access-nsieve__access-nsieve': [7.9, u'ms'],
- u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
- u'string-validate-input__string-validate-input': [19.3, u'ms'],
- u'3d-raytrace__3d-raytrace': [24.7, u'ms'],
- u'3d-cube__3d-cube': [28.0, u'ms'],
- u'string-unpack-code__string-unpack-code': [46.7, u'ms'],
- u'date-format-tofte__date-format-tofte': [26.3, u'ms'],
- u'math-partial-sums__math-partial-sums': [22.0, u'ms'],
- '\telemetry_Crosperf': ['PASS', ''],
- u'crypto-aes__crypto-aes': [15.2, u'ms'],
- u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'],
- u'crypto-md5__crypto-md5': [10.5, u'ms'],
- u'string-tagcloud__string-tagcloud': [52.8, u'ms'],
- u'access-nbody__access-nbody': [8.5, u'ms'],
- 'retval': 0,
- u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'],
- u'math-cordic__math-cordic': [8.7, u'ms'],
- u'access-binary-trees__access-binary-trees': [4.5, u'ms'],
- u'controlflow-recursive__controlflow-recursive': [4.4, u'ms'],
- u'access-fannkuch__access-fannkuch': [17.8, u'ms'],
- u'string-base64__string-base64': [16.0, u'ms'],
- u'date-format-xparb__date-format-xparb': [20.9, u'ms'],
- u'3d-morph__3d-morph': [22.1, u'ms'],
- u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms']
- })
-
- self.result.GetSamples = FakeGetSamples
- self.result.PopulateFromCacheDir(cache_dir, 'sunspider',
- 'telemetry_Crosperf', 'chrome')
- self.assertEqual(
- self.result.keyvals, {
- u'Total__Total': [444.0, u'ms'],
- u'regexp-dna__regexp-dna': [16.2, u'ms'],
- u'telemetry_page_measurement_results__num_failed': [0, u'count'],
- u'telemetry_page_measurement_results__num_errored': [0, u'count'],
- u'string-fasta__string-fasta': [23.2, u'ms'],
- u'crypto-sha1__crypto-sha1': [11.6, u'ms'],
- u'bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte':
- [3.2, u'ms'],
- u'access-nsieve__access-nsieve': [7.9, u'ms'],
- u'bitops-nsieve-bits__bitops-nsieve-bits': [9.4, u'ms'],
- u'string-validate-input__string-validate-input': [19.3, u'ms'],
- u'3d-raytrace__3d-raytrace': [24.7, u'ms'],
- u'3d-cube__3d-cube': [28.0, u'ms'],
- u'string-unpack-code__string-unpack-code': [46.7, u'ms'],
- u'date-format-tofte__date-format-tofte': [26.3, u'ms'],
- u'math-partial-sums__math-partial-sums': [22.0, u'ms'],
- '\telemetry_Crosperf': ['PASS', ''],
- u'crypto-aes__crypto-aes': [15.2, u'ms'],
- u'bitops-bitwise-and__bitops-bitwise-and': [8.4, u'ms'],
- u'crypto-md5__crypto-md5': [10.5, u'ms'],
- u'string-tagcloud__string-tagcloud': [52.8, u'ms'],
- u'access-nbody__access-nbody': [8.5, u'ms'],
- 'retval': 0,
- u'math-spectral-norm__math-spectral-norm': [6.6, u'ms'],
- u'math-cordic__math-cordic': [8.7, u'ms'],
- u'access-binary-trees__access-binary-trees': [4.5, u'ms'],
- u'controlflow-recursive__controlflow-recursive': [4.4, u'ms'],
- u'access-fannkuch__access-fannkuch': [17.8, u'ms'],
- u'string-base64__string-base64': [16.0, u'ms'],
- u'date-format-xparb__date-format-xparb': [20.9, u'ms'],
- u'3d-morph__3d-morph': [22.1, u'ms'],
- u'bitops-bits-in-byte__bitops-bits-in-byte': [9.1, u'ms'],
- u'samples': [1, u'samples']
- })
-
- # Clean up after test.
- tempfile.mkdtemp = save_real_mkdtemp
- command = 'rm -Rf %s' % self.tmpdir
- self.result.ce.RunCommand(command)
-
- @mock.patch.object(misc, 'GetRoot')
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
- def test_cleanup(self, mock_runcmd, mock_getroot):
-
- # Test 1. 'rm_chroot_tmp' is True; self.results_dir exists;
- # self.temp_dir exists; results_dir name contains 'test_that_results_'.
- mock_getroot.return_value = ['/tmp/tmp_AbcXyz', 'test_that_results_fake']
- self.result.ce.RunCommand = mock_runcmd
- self.result.results_dir = 'test_results_dir'
- self.result.temp_dir = 'testtemp_dir'
- self.result.CleanUp(True)
- self.assertEqual(mock_getroot.call_count, 1)
- self.assertEqual(mock_runcmd.call_count, 2)
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('rm -rf test_results_dir', ))
- self.assertEqual(mock_runcmd.call_args_list[1][0],
- ('rm -rf testtemp_dir', ))
-
- # Test 2. Same, except ath results_dir name does not contain
- # 'test_that_results_'
- mock_getroot.reset_mock()
- mock_runcmd.reset_mock()
- mock_getroot.return_value = ['/tmp/tmp_AbcXyz', 'other_results_fake']
- self.result.ce.RunCommand = mock_runcmd
- self.result.results_dir = 'test_results_dir'
- self.result.temp_dir = 'testtemp_dir'
- self.result.CleanUp(True)
- self.assertEqual(mock_getroot.call_count, 1)
- self.assertEqual(mock_runcmd.call_count, 2)
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('rm -rf /tmp/tmp_AbcXyz', ))
- self.assertEqual(mock_runcmd.call_args_list[1][0],
- ('rm -rf testtemp_dir', ))
-
- # Test 3. mock_getroot returns nothing; 'rm_chroot_tmp' is False.
- mock_getroot.reset_mock()
- mock_runcmd.reset_mock()
- self.result.CleanUp(False)
- self.assertEqual(mock_getroot.call_count, 0)
- self.assertEqual(mock_runcmd.call_count, 1)
- self.assertEqual(mock_runcmd.call_args_list[0][0],
- ('rm -rf testtemp_dir', ))
-
- # Test 4. 'rm_chroot_tmp' is True, but result_dir & temp_dir are None.
- mock_getroot.reset_mock()
- mock_runcmd.reset_mock()
- self.result.results_dir = None
- self.result.temp_dir = None
- self.result.CleanUp(True)
- self.assertEqual(mock_getroot.call_count, 0)
- self.assertEqual(mock_runcmd.call_count, 0)
-
- @mock.patch.object(misc, 'GetInsideChrootPath')
- @mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
- def test_store_to_cache_dir(self, mock_chrootruncmd, mock_getpath):
- def FakeMkdtemp(directory=''):
- if directory:
- pass
- return self.tmpdir
-
- if mock_chrootruncmd or mock_getpath:
- pass
- current_path = os.getcwd()
- cache_dir = os.path.join(current_path, 'test_cache/test_output')
-
- self.result.ce = command_executer.GetCommandExecuter(log_level='average')
- self.result.out = OUTPUT
- self.result.err = error
- self.result.retval = 0
- self.tmpdir = tempfile.mkdtemp()
- if not os.path.exists(self.tmpdir):
- os.makedirs(self.tmpdir)
- self.result.results_dir = os.path.join(os.getcwd(), 'test_cache')
- save_real_mkdtemp = tempfile.mkdtemp
- tempfile.mkdtemp = FakeMkdtemp
-
- mock_mm = machine_manager.MockMachineManager('/tmp/chromeos_root', 0,
- 'average', '')
- mock_mm.machine_checksum_string['mock_label'] = 'fake_machine_checksum123'
-
- mock_keylist = ['key1', 'key2', 'key3']
- test_flag.SetTestMode(True)
- self.result.StoreToCacheDir(cache_dir, mock_mm, mock_keylist)
-
- # Check that the correct things were written to the 'cache'.
- test_dir = os.path.join(os.getcwd(), 'test_cache/test_output')
- base_dir = os.path.join(os.getcwd(), 'test_cache/compare_output')
- self.assertTrue(os.path.exists(os.path.join(test_dir, 'autotest.tbz2')))
- self.assertTrue(os.path.exists(os.path.join(test_dir, 'machine.txt')))
- self.assertTrue(os.path.exists(os.path.join(test_dir, 'results.pickle')))
-
- f1 = os.path.join(test_dir, 'machine.txt')
- f2 = os.path.join(base_dir, 'machine.txt')
- cmd = 'diff %s %s' % (f1, f2)
- [_, out, _] = self.result.ce.RunCommandWOutput(cmd)
- self.assertEqual(len(out), 0)
-
- f1 = os.path.join(test_dir, 'results.pickle')
- f2 = os.path.join(base_dir, 'results.pickle')
- with open(f1, 'rb') as f:
- f1_obj = pickle.load(f)
- with open(f2, 'rb') as f:
- f2_obj = pickle.load(f)
- self.assertEqual(f1_obj, f2_obj)
-
- # Clean up after test.
- tempfile.mkdtemp = save_real_mkdtemp
- command = 'rm %s/*' % test_dir
- self.result.ce.RunCommand(command)
+ self.result.ProcessResults()
+ mock_proc_turbo.assert_has_calls([mock.call()])
+ mock_proc_cpustats.assert_not_called()
+ self.assertEqual(len(self.result.keyvals), 8)
+ self.assertEqual(
+ self.result.keyvals,
+ {
+ "Total": 10,
+ "cpufreq_all_avg": 2,
+ "cpufreq_all_max": 3,
+ "cpufreq_all_min": 1,
+ "cputemp_all_avg": 6.0,
+ "cputemp_all_min": 5.0,
+ "cputemp_all_max": 7.0,
+ "retval": 0,
+ },
+ )
+
+ @mock.patch.object(Result, "ProcessCpustatsResults")
+ @mock.patch.object(Result, "ProcessTurbostatResults")
+ def test_process_results_without_cpu_data(
+ self, mock_proc_turbo, mock_proc_cpustats
+ ):
+ self.result.GetKeyvals = self.FakeGetKeyvals
+
+ self.result.retval = 0
+ self.result.turbostat_log_file = ""
+ self.result.cpustats_log_file = ""
+ self.result.ProcessResults()
+ mock_proc_turbo.assert_not_called()
+ mock_proc_cpustats.assert_not_called()
+ self.assertEqual(len(self.result.keyvals), 2)
+ self.assertEqual(self.result.keyvals, {"Total": 10, "retval": 0})
+
+ @mock.patch.object(misc, "GetInsideChrootPath")
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_populate_from_cache_dir(self, mock_runchrootcmd, mock_getpath):
+
+ # pylint: disable=redefined-builtin
+ def FakeMkdtemp(dir=None):
+ if dir:
+ pass
+ return self.tmpdir
+
+ def FakeGetSamples():
+ return [1, u"samples"]
+
+ current_path = os.getcwd()
+ cache_dir = os.path.join(current_path, "test_cache/test_input")
+ self.result.ce = command_executer.GetCommandExecuter(
+ log_level="average"
+ )
+ self.result.ce.ChrootRunCommandWOutput = mock_runchrootcmd
+ mock_runchrootcmd.return_value = [
+ "",
+ ("%s,PASS\n%s/\telemetry_Crosperf,PASS\n") % (TMP_DIR1, TMP_DIR1),
+ "",
+ ]
+ mock_getpath.return_value = TMP_DIR1
+ self.tmpdir = tempfile.mkdtemp()
+ save_real_mkdtemp = tempfile.mkdtemp
+ tempfile.mkdtemp = FakeMkdtemp
+
+ self.result.PopulateFromCacheDir(
+ cache_dir, "sunspider", "telemetry_Crosperf", ""
+ )
+ self.assertEqual(
+ self.result.keyvals,
+ {
+ u"Total__Total": [444.0, u"ms"],
+ u"regexp-dna__regexp-dna": [16.2, u"ms"],
+ u"telemetry_page_measurement_results__num_failed": [
+ 0,
+ u"count",
+ ],
+ u"telemetry_page_measurement_results__num_errored": [
+ 0,
+ u"count",
+ ],
+ u"string-fasta__string-fasta": [23.2, u"ms"],
+ u"crypto-sha1__crypto-sha1": [11.6, u"ms"],
+ u"bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte": [
+ 3.2,
+ u"ms",
+ ],
+ u"access-nsieve__access-nsieve": [7.9, u"ms"],
+ u"bitops-nsieve-bits__bitops-nsieve-bits": [9.4, u"ms"],
+ u"string-validate-input__string-validate-input": [19.3, u"ms"],
+ u"3d-raytrace__3d-raytrace": [24.7, u"ms"],
+ u"3d-cube__3d-cube": [28.0, u"ms"],
+ u"string-unpack-code__string-unpack-code": [46.7, u"ms"],
+ u"date-format-tofte__date-format-tofte": [26.3, u"ms"],
+ u"math-partial-sums__math-partial-sums": [22.0, u"ms"],
+ "\telemetry_Crosperf": ["PASS", ""],
+ u"crypto-aes__crypto-aes": [15.2, u"ms"],
+ u"bitops-bitwise-and__bitops-bitwise-and": [8.4, u"ms"],
+ u"crypto-md5__crypto-md5": [10.5, u"ms"],
+ u"string-tagcloud__string-tagcloud": [52.8, u"ms"],
+ u"access-nbody__access-nbody": [8.5, u"ms"],
+ "retval": 0,
+ u"math-spectral-norm__math-spectral-norm": [6.6, u"ms"],
+ u"math-cordic__math-cordic": [8.7, u"ms"],
+ u"access-binary-trees__access-binary-trees": [4.5, u"ms"],
+ u"controlflow-recursive__controlflow-recursive": [4.4, u"ms"],
+ u"access-fannkuch__access-fannkuch": [17.8, u"ms"],
+ u"string-base64__string-base64": [16.0, u"ms"],
+ u"date-format-xparb__date-format-xparb": [20.9, u"ms"],
+ u"3d-morph__3d-morph": [22.1, u"ms"],
+ u"bitops-bits-in-byte__bitops-bits-in-byte": [9.1, u"ms"],
+ },
+ )
+
+ self.result.GetSamples = FakeGetSamples
+ self.result.PopulateFromCacheDir(
+ cache_dir, "sunspider", "telemetry_Crosperf", "chrome"
+ )
+ self.assertEqual(
+ self.result.keyvals,
+ {
+ u"Total__Total": [444.0, u"ms"],
+ u"regexp-dna__regexp-dna": [16.2, u"ms"],
+ u"telemetry_page_measurement_results__num_failed": [
+ 0,
+ u"count",
+ ],
+ u"telemetry_page_measurement_results__num_errored": [
+ 0,
+ u"count",
+ ],
+ u"string-fasta__string-fasta": [23.2, u"ms"],
+ u"crypto-sha1__crypto-sha1": [11.6, u"ms"],
+ u"bitops-3bit-bits-in-byte__bitops-3bit-bits-in-byte": [
+ 3.2,
+ u"ms",
+ ],
+ u"access-nsieve__access-nsieve": [7.9, u"ms"],
+ u"bitops-nsieve-bits__bitops-nsieve-bits": [9.4, u"ms"],
+ u"string-validate-input__string-validate-input": [19.3, u"ms"],
+ u"3d-raytrace__3d-raytrace": [24.7, u"ms"],
+ u"3d-cube__3d-cube": [28.0, u"ms"],
+ u"string-unpack-code__string-unpack-code": [46.7, u"ms"],
+ u"date-format-tofte__date-format-tofte": [26.3, u"ms"],
+ u"math-partial-sums__math-partial-sums": [22.0, u"ms"],
+ "\telemetry_Crosperf": ["PASS", ""],
+ u"crypto-aes__crypto-aes": [15.2, u"ms"],
+ u"bitops-bitwise-and__bitops-bitwise-and": [8.4, u"ms"],
+ u"crypto-md5__crypto-md5": [10.5, u"ms"],
+ u"string-tagcloud__string-tagcloud": [52.8, u"ms"],
+ u"access-nbody__access-nbody": [8.5, u"ms"],
+ "retval": 0,
+ u"math-spectral-norm__math-spectral-norm": [6.6, u"ms"],
+ u"math-cordic__math-cordic": [8.7, u"ms"],
+ u"access-binary-trees__access-binary-trees": [4.5, u"ms"],
+ u"controlflow-recursive__controlflow-recursive": [4.4, u"ms"],
+ u"access-fannkuch__access-fannkuch": [17.8, u"ms"],
+ u"string-base64__string-base64": [16.0, u"ms"],
+ u"date-format-xparb__date-format-xparb": [20.9, u"ms"],
+ u"3d-morph__3d-morph": [22.1, u"ms"],
+ u"bitops-bits-in-byte__bitops-bits-in-byte": [9.1, u"ms"],
+ u"samples": [1, u"samples"],
+ },
+ )
+
+ # Clean up after test.
+ tempfile.mkdtemp = save_real_mkdtemp
+ command = "rm -Rf %s" % self.tmpdir
+ self.result.ce.RunCommand(command)
+
+ @mock.patch.object(misc, "GetRoot")
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
+ def test_cleanup(self, mock_runcmd, mock_getroot):
+
+ # Test 1. 'rm_chroot_tmp' is True; self.results_dir exists;
+ # self.temp_dir exists; results_dir name contains 'test_that_results_'.
+ mock_getroot.return_value = [
+ "/tmp/tmp_AbcXyz",
+ "test_that_results_fake",
+ ]
+ self.result.ce.RunCommand = mock_runcmd
+ self.result.results_dir = "test_results_dir"
+ self.result.temp_dir = "testtemp_dir"
+ self.result.CleanUp(True)
+ self.assertEqual(mock_getroot.call_count, 1)
+ self.assertEqual(mock_runcmd.call_count, 2)
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0], ("rm -rf test_results_dir",)
+ )
+ self.assertEqual(
+ mock_runcmd.call_args_list[1][0], ("rm -rf testtemp_dir",)
+ )
+
+ # Test 2. Same, except ath results_dir name does not contain
+ # 'test_that_results_'
+ mock_getroot.reset_mock()
+ mock_runcmd.reset_mock()
+ mock_getroot.return_value = ["/tmp/tmp_AbcXyz", "other_results_fake"]
+ self.result.ce.RunCommand = mock_runcmd
+ self.result.results_dir = "test_results_dir"
+ self.result.temp_dir = "testtemp_dir"
+ self.result.CleanUp(True)
+ self.assertEqual(mock_getroot.call_count, 1)
+ self.assertEqual(mock_runcmd.call_count, 2)
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0], ("rm -rf /tmp/tmp_AbcXyz",)
+ )
+ self.assertEqual(
+ mock_runcmd.call_args_list[1][0], ("rm -rf testtemp_dir",)
+ )
+
+ # Test 3. mock_getroot returns nothing; 'rm_chroot_tmp' is False.
+ mock_getroot.reset_mock()
+ mock_runcmd.reset_mock()
+ self.result.CleanUp(False)
+ self.assertEqual(mock_getroot.call_count, 0)
+ self.assertEqual(mock_runcmd.call_count, 1)
+ self.assertEqual(
+ mock_runcmd.call_args_list[0][0], ("rm -rf testtemp_dir",)
+ )
+
+ # Test 4. 'rm_chroot_tmp' is True, but result_dir & temp_dir are None.
+ mock_getroot.reset_mock()
+ mock_runcmd.reset_mock()
+ self.result.results_dir = None
+ self.result.temp_dir = None
+ self.result.CleanUp(True)
+ self.assertEqual(mock_getroot.call_count, 0)
+ self.assertEqual(mock_runcmd.call_count, 0)
+
+ @mock.patch.object(misc, "GetInsideChrootPath")
+ @mock.patch.object(command_executer.CommandExecuter, "ChrootRunCommand")
+ def test_store_to_cache_dir(self, mock_chrootruncmd, mock_getpath):
+ def FakeMkdtemp(directory=""):
+ if directory:
+ pass
+ return self.tmpdir
+
+ if mock_chrootruncmd or mock_getpath:
+ pass
+ current_path = os.getcwd()
+ cache_dir = os.path.join(current_path, "test_cache/test_output")
+
+ self.result.ce = command_executer.GetCommandExecuter(
+ log_level="average"
+ )
+ self.result.out = OUTPUT
+ self.result.err = error
+ self.result.retval = 0
+ self.tmpdir = tempfile.mkdtemp()
+ if not os.path.exists(self.tmpdir):
+ os.makedirs(self.tmpdir)
+ self.result.results_dir = os.path.join(os.getcwd(), "test_cache")
+ save_real_mkdtemp = tempfile.mkdtemp
+ tempfile.mkdtemp = FakeMkdtemp
+
+ mock_mm = machine_manager.MockMachineManager(
+ "/tmp/chromeos_root", 0, "average", ""
+ )
+ mock_mm.machine_checksum_string[
+ "mock_label"
+ ] = "fake_machine_checksum123"
+
+ mock_keylist = ["key1", "key2", "key3"]
+ test_flag.SetTestMode(True)
+ self.result.StoreToCacheDir(cache_dir, mock_mm, mock_keylist)
+
+ # Check that the correct things were written to the 'cache'.
+ test_dir = os.path.join(os.getcwd(), "test_cache/test_output")
+ base_dir = os.path.join(os.getcwd(), "test_cache/compare_output")
+ self.assertTrue(os.path.exists(os.path.join(test_dir, "autotest.tbz2")))
+ self.assertTrue(os.path.exists(os.path.join(test_dir, "machine.txt")))
+ self.assertTrue(
+ os.path.exists(os.path.join(test_dir, "results.pickle"))
+ )
+
+ f1 = os.path.join(test_dir, "machine.txt")
+ f2 = os.path.join(base_dir, "machine.txt")
+ cmd = "diff %s %s" % (f1, f2)
+ [_, out, _] = self.result.ce.RunCommandWOutput(cmd)
+ self.assertEqual(len(out), 0)
+
+ f1 = os.path.join(test_dir, "results.pickle")
+ f2 = os.path.join(base_dir, "results.pickle")
+ with open(f1, "rb") as f:
+ f1_obj = pickle.load(f)
+ with open(f2, "rb") as f:
+ f2_obj = pickle.load(f)
+ self.assertEqual(f1_obj, f2_obj)
+
+ # Clean up after test.
+ tempfile.mkdtemp = save_real_mkdtemp
+ command = "rm %s/*" % test_dir
+ self.result.ce.RunCommand(command)
TELEMETRY_RESULT_KEYVALS = {
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'math-cordic (ms)':
- '11.4',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'access-nbody (ms)':
- '6.9',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'access-fannkuch (ms)':
- '26.3',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'math-spectral-norm (ms)':
- '6.3',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'bitops-nsieve-bits (ms)':
- '9.3',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'math-partial-sums (ms)':
- '32.8',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'regexp-dna (ms)':
- '16.1',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- '3d-cube (ms)':
- '42.7',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'crypto-md5 (ms)':
- '10.8',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'crypto-sha1 (ms)':
- '12.4',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'string-tagcloud (ms)':
- '47.2',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'string-fasta (ms)':
- '36.3',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'access-binary-trees (ms)':
- '7.3',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'date-format-xparb (ms)':
- '138.1',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'crypto-aes (ms)':
- '19.2',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'Total (ms)':
- '656.5',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'string-base64 (ms)':
- '17.5',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'string-validate-input (ms)':
- '24.8',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- '3d-raytrace (ms)':
- '28.7',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'controlflow-recursive (ms)':
- '5.3',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'bitops-bits-in-byte (ms)':
- '9.8',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- '3d-morph (ms)':
- '50.2',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'bitops-bitwise-and (ms)':
- '8.8',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'access-nsieve (ms)':
- '8.6',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'date-format-tofte (ms)':
- '31.2',
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'bitops-3bit-bits-in-byte (ms)':
- '3.5',
- 'retval':
- 0,
- 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html '
- 'string-unpack-code (ms)':
- '45.0'
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "math-cordic (ms)": "11.4",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "access-nbody (ms)": "6.9",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "access-fannkuch (ms)": "26.3",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "math-spectral-norm (ms)": "6.3",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "bitops-nsieve-bits (ms)": "9.3",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "math-partial-sums (ms)": "32.8",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "regexp-dna (ms)": "16.1",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "3d-cube (ms)": "42.7",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "crypto-md5 (ms)": "10.8",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "crypto-sha1 (ms)": "12.4",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "string-tagcloud (ms)": "47.2",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "string-fasta (ms)": "36.3",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "access-binary-trees (ms)": "7.3",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "date-format-xparb (ms)": "138.1",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "crypto-aes (ms)": "19.2",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "Total (ms)": "656.5",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "string-base64 (ms)": "17.5",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "string-validate-input (ms)": "24.8",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "3d-raytrace (ms)": "28.7",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "controlflow-recursive (ms)": "5.3",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "bitops-bits-in-byte (ms)": "9.8",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "3d-morph (ms)": "50.2",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "bitops-bitwise-and (ms)": "8.8",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "access-nsieve (ms)": "8.6",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "date-format-tofte (ms)": "31.2",
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "bitops-3bit-bits-in-byte (ms)": "3.5",
+ "retval": 0,
+ "http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html "
+ "string-unpack-code (ms)": "45.0",
}
PURE_TELEMETRY_OUTPUT = """
@@ -1864,290 +1985,347 @@ page_name,3d-cube (ms),3d-morph (ms),3d-raytrace (ms),Total (ms),access-binary-t
class TelemetryResultTest(unittest.TestCase):
- """Telemetry result test."""
- def __init__(self, *args, **kwargs):
- super(TelemetryResultTest, self).__init__(*args, **kwargs)
- self.callFakeProcessResults = False
- self.result = None
- self.mock_logger = mock.Mock(spec=logger.Logger)
- self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- self.mock_label = MockLabel('mock_label', 'build', 'chromeos_image',
- 'autotest_dir', 'debug_dir', '/tmp', 'lumpy',
- 'remote', 'image_args', 'cache_dir', 'average',
- 'gcc', False, None)
- self.mock_machine = machine_manager.MockCrosMachine(
- 'falco.cros', '/tmp/chromeos', 'average')
-
- def test_populate_from_run(self):
- def FakeProcessResults():
- self.callFakeProcessResults = True
-
- self.callFakeProcessResults = False
- self.result = TelemetryResult(self.mock_logger, self.mock_label, 'average',
- self.mock_cmd_exec)
- self.result.ProcessResults = FakeProcessResults
- self.result.PopulateFromRun(OUTPUT, error, 3, 'fake_test',
- 'telemetry_Crosperf', '')
- self.assertTrue(self.callFakeProcessResults)
- self.assertEqual(self.result.out, OUTPUT)
- self.assertEqual(self.result.err, error)
- self.assertEqual(self.result.retval, 3)
-
- def test_populate_from_cache_dir_and_process_results(self):
-
- self.result = TelemetryResult(self.mock_logger, self.mock_label, 'average',
- self.mock_machine)
- current_path = os.getcwd()
- cache_dir = os.path.join(current_path,
- 'test_cache/test_puretelemetry_input')
- self.result.PopulateFromCacheDir(cache_dir, '', '', '')
- self.assertEqual(self.result.out.strip(), PURE_TELEMETRY_OUTPUT.strip())
- self.assertEqual(self.result.err, '')
- self.assertEqual(self.result.retval, 0)
- self.assertEqual(self.result.keyvals, TELEMETRY_RESULT_KEYVALS)
+ """Telemetry result test."""
+
+ def __init__(self, *args, **kwargs):
+ super(TelemetryResultTest, self).__init__(*args, **kwargs)
+ self.callFakeProcessResults = False
+ self.result = None
+ self.mock_logger = mock.Mock(spec=logger.Logger)
+ self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ self.mock_label = MockLabel(
+ "mock_label",
+ "build",
+ "chromeos_image",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp",
+ "lumpy",
+ "remote",
+ "image_args",
+ "cache_dir",
+ "average",
+ "gcc",
+ False,
+ None,
+ )
+ self.mock_machine = machine_manager.MockCrosMachine(
+ "falco.cros", "/tmp/chromeos", "average"
+ )
+
+ def test_populate_from_run(self):
+ def FakeProcessResults():
+ self.callFakeProcessResults = True
+
+ self.callFakeProcessResults = False
+ self.result = TelemetryResult(
+ self.mock_logger, self.mock_label, "average", self.mock_cmd_exec
+ )
+ self.result.ProcessResults = FakeProcessResults
+ self.result.PopulateFromRun(
+ OUTPUT, error, 3, "fake_test", "telemetry_Crosperf", ""
+ )
+ self.assertTrue(self.callFakeProcessResults)
+ self.assertEqual(self.result.out, OUTPUT)
+ self.assertEqual(self.result.err, error)
+ self.assertEqual(self.result.retval, 3)
+
+ def test_populate_from_cache_dir_and_process_results(self):
+
+ self.result = TelemetryResult(
+ self.mock_logger, self.mock_label, "average", self.mock_machine
+ )
+ current_path = os.getcwd()
+ cache_dir = os.path.join(
+ current_path, "test_cache/test_puretelemetry_input"
+ )
+ self.result.PopulateFromCacheDir(cache_dir, "", "", "")
+ self.assertEqual(self.result.out.strip(), PURE_TELEMETRY_OUTPUT.strip())
+ self.assertEqual(self.result.err, "")
+ self.assertEqual(self.result.retval, 0)
+ self.assertEqual(self.result.keyvals, TELEMETRY_RESULT_KEYVALS)
class ResultsCacheTest(unittest.TestCase):
- """Resultcache test class."""
- def __init__(self, *args, **kwargs):
- super(ResultsCacheTest, self).__init__(*args, **kwargs)
- self.fakeCacheReturnResult = None
- self.mock_logger = mock.Mock(spec=logger.Logger)
- self.mock_label = MockLabel('mock_label', 'build', 'chromeos_image',
- 'autotest_dir', 'debug_dir', '/tmp', 'lumpy',
- 'remote', 'image_args', 'cache_dir', 'average',
- 'gcc', False, None)
-
- def setUp(self):
- self.results_cache = ResultsCache()
-
- mock_machine = machine_manager.MockCrosMachine('falco.cros',
- '/tmp/chromeos', 'average')
-
- mock_mm = machine_manager.MockMachineManager('/tmp/chromeos_root', 0,
- 'average', '')
- mock_mm.machine_checksum_string['mock_label'] = 'fake_machine_checksum123'
-
- self.results_cache.Init(
- self.mock_label.chromeos_image,
- self.mock_label.chromeos_root,
- 'sunspider',
- 1, # benchmark_run.iteration,
- '', # benchmark_run.test_args,
- '', # benchmark_run.profiler_args,
- mock_mm,
- mock_machine,
- self.mock_label.board,
- [CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH],
- self.mock_logger,
- 'average',
- self.mock_label,
- '', # benchmark_run.share_cache
- 'telemetry_Crosperf',
- True, # benchmark_run.show_all_results
- False, # benchmark_run.run_local
- '') # benchmark_run.cwp_dso
-
- @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
- def test_get_cache_dir_for_write(self, mock_checksum):
- def FakeGetMachines(label):
- if label:
- pass
- m1 = machine_manager.MockCrosMachine('lumpy1.cros',
- self.results_cache.chromeos_root,
- 'average')
- m2 = machine_manager.MockCrosMachine('lumpy2.cros',
- self.results_cache.chromeos_root,
- 'average')
- return [m1, m2]
-
- mock_checksum.return_value = 'FakeImageChecksumabc123'
- self.results_cache.machine_manager.GetMachines = FakeGetMachines
- self.results_cache.machine_manager.machine_checksum['mock_label'] = (
- 'FakeMachineChecksumabc987')
- # Based on the label, benchmark and machines, get the directory in which
- # to store the cache information for this test run.
- result_path = self.results_cache.GetCacheDirForWrite()
- # Verify that the returned directory is correct (since the label
- # contained a cache_dir, named 'cache_dir', that's what is expected in
- # the result, rather than '~/cros_scratch').
- comp_path = os.path.join(
- os.getcwd(), 'cache_dir/54524606abaae4fdf7b02f49f7ae7127_'
- 'sunspider_1_fda29412ceccb72977516c4785d08e2c_'
- 'FakeImageChecksumabc123_FakeMachineChecksum'
- 'abc987__6')
- self.assertEqual(result_path, comp_path)
-
- def test_form_cache_dir(self):
- # This is very similar to the previous test (FormCacheDir is called
- # from GetCacheDirForWrite).
- cache_key_list = ('54524606abaae4fdf7b02f49f7ae7127', 'sunspider', '1',
- '7215ee9c7d9dc229d2921a40e899ec5f',
- 'FakeImageChecksumabc123', '*', '*', '6')
- path = self.results_cache.FormCacheDir(cache_key_list)
- self.assertEqual(len(path), 1)
- path1 = path[0]
- test_dirname = ('54524606abaae4fdf7b02f49f7ae7127_sunspider_1_7215ee9'
- 'c7d9dc229d2921a40e899ec5f_FakeImageChecksumabc123_*_*_6')
- comp_path = os.path.join(os.getcwd(), 'cache_dir', test_dirname)
- self.assertEqual(path1, comp_path)
-
- @mock.patch.object(image_checksummer.ImageChecksummer, 'Checksum')
- def test_get_cache_key_list(self, mock_checksum):
- # This tests the mechanism that generates the various pieces of the
- # cache directory name, based on various conditions.
-
- def FakeGetMachines(label):
- if label:
- pass
- m1 = machine_manager.MockCrosMachine('lumpy1.cros',
- self.results_cache.chromeos_root,
- 'average')
- m2 = machine_manager.MockCrosMachine('lumpy2.cros',
- self.results_cache.chromeos_root,
- 'average')
- return [m1, m2]
-
- mock_checksum.return_value = 'FakeImageChecksumabc123'
- self.results_cache.machine_manager.GetMachines = FakeGetMachines
- self.results_cache.machine_manager.machine_checksum['mock_label'] = (
- 'FakeMachineChecksumabc987')
-
- # Test 1. Generating cache name for reading (not writing).
- key_list = self.results_cache.GetCacheKeyList(True)
- self.assertEqual(key_list[0], '*') # Machine checksum value, for read.
- self.assertEqual(key_list[1], 'sunspider')
- self.assertEqual(key_list[2], '1')
- self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
- self.assertEqual(key_list[4], 'FakeImageChecksumabc123')
- self.assertEqual(key_list[5], '*')
- self.assertEqual(key_list[6], '*')
- self.assertEqual(key_list[7], '6')
-
- # Test 2. Generating cache name for writing, with local image type.
- key_list = self.results_cache.GetCacheKeyList(False)
- self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127')
- self.assertEqual(key_list[1], 'sunspider')
- self.assertEqual(key_list[2], '1')
- self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
- self.assertEqual(key_list[4], 'FakeImageChecksumabc123')
- self.assertEqual(key_list[5], 'FakeMachineChecksumabc987')
- self.assertEqual(key_list[6], '')
- self.assertEqual(key_list[7], '6')
-
- # Test 3. Generating cache name for writing, with trybot image type.
- self.results_cache.label.image_type = 'trybot'
- key_list = self.results_cache.GetCacheKeyList(False)
- self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127')
- self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
- self.assertEqual(key_list[4], '54524606abaae4fdf7b02f49f7ae7127')
- self.assertEqual(key_list[5], 'FakeMachineChecksumabc987')
-
- # Test 4. Generating cache name for writing, with official image type.
- self.results_cache.label.image_type = 'official'
- key_list = self.results_cache.GetCacheKeyList(False)
- self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127')
- self.assertEqual(key_list[1], 'sunspider')
- self.assertEqual(key_list[2], '1')
- self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
- self.assertEqual(key_list[4], '*')
- self.assertEqual(key_list[5], 'FakeMachineChecksumabc987')
- self.assertEqual(key_list[6], '')
- self.assertEqual(key_list[7], '6')
-
- # Test 5. Generating cache name for writing, with local image type, and
- # specifying that the image path must match the cached image path.
- self.results_cache.label.image_type = 'local'
- self.results_cache.cache_conditions.append(
- CacheConditions.IMAGE_PATH_MATCH)
- key_list = self.results_cache.GetCacheKeyList(False)
- self.assertEqual(key_list[0], '54524606abaae4fdf7b02f49f7ae7127')
- self.assertEqual(key_list[3], 'fda29412ceccb72977516c4785d08e2c')
- self.assertEqual(key_list[4], 'FakeImageChecksumabc123')
- self.assertEqual(key_list[5], 'FakeMachineChecksumabc987')
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
- @mock.patch.object(os.path, 'isdir')
- @mock.patch.object(Result, 'CreateFromCacheHit')
- def test_read_result(self, mock_create, mock_isdir, mock_runcmd):
-
- self.fakeCacheReturnResult = None
-
- def FakeGetCacheDirForRead():
- return self.fakeCacheReturnResult
-
- def FakeGetCacheDirForWrite():
- return self.fakeCacheReturnResult
-
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- fake_result = Result(self.mock_logger, self.mock_label, 'average',
- mock_cmd_exec)
- fake_result.retval = 0
-
- # Set up results_cache _GetCacheDirFor{Read,Write} to return
- # self.fakeCacheReturnResult, which is initially None (see above).
- # So initially, no cache dir is returned.
- self.results_cache.GetCacheDirForRead = FakeGetCacheDirForRead
- self.results_cache.GetCacheDirForWrite = FakeGetCacheDirForWrite
-
- mock_isdir.return_value = True
- save_cc = [
- CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
- ]
- self.results_cache.cache_conditions.append(CacheConditions.FALSE)
-
- # Test 1. CacheCondition.FALSE, which means do not read from the cache.
- # (force re-running of test). Result should be None.
- res = self.results_cache.ReadResult()
- self.assertIsNone(res)
- self.assertEqual(mock_runcmd.call_count, 1)
-
- # Test 2. Remove CacheCondition.FALSE. Result should still be None,
- # because GetCacheDirForRead is returning None at the moment.
- mock_runcmd.reset_mock()
- self.results_cache.cache_conditions = save_cc
- res = self.results_cache.ReadResult()
- self.assertIsNone(res)
- self.assertEqual(mock_runcmd.call_count, 0)
-
- # Test 3. Now set up cache dir to be returned by GetCacheDirForRead.
- # Since cache_dir is found, will call Result.CreateFromCacheHit, which
- # which will actually all our mock_create and should return fake_result.
- self.fakeCacheReturnResult = 'fake/cache/dir'
- mock_create.return_value = fake_result
- res = self.results_cache.ReadResult()
- self.assertEqual(mock_runcmd.call_count, 0)
- self.assertEqual(res, fake_result)
-
- # Test 4. os.path.isdir(cache_dir) will now return false, so result
- # should be None again (no cache found).
- mock_isdir.return_value = False
- res = self.results_cache.ReadResult()
- self.assertEqual(mock_runcmd.call_count, 0)
- self.assertIsNone(res)
-
- # Test 5. os.path.isdir returns true, but mock_create now returns None
- # (the call to CreateFromCacheHit returns None), so overal result is None.
- mock_isdir.return_value = True
- mock_create.return_value = None
- res = self.results_cache.ReadResult()
- self.assertEqual(mock_runcmd.call_count, 0)
- self.assertIsNone(res)
-
- # Test 6. Everything works 'as expected', result should be fake_result.
- mock_create.return_value = fake_result
- res = self.results_cache.ReadResult()
- self.assertEqual(mock_runcmd.call_count, 0)
- self.assertEqual(res, fake_result)
-
- # Test 7. The run failed; result should be None.
- mock_create.return_value = fake_result
- fake_result.retval = 1
- self.results_cache.cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
- res = self.results_cache.ReadResult()
- self.assertEqual(mock_runcmd.call_count, 0)
- self.assertIsNone(res)
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Resultcache test class."""
+
+ def __init__(self, *args, **kwargs):
+ super(ResultsCacheTest, self).__init__(*args, **kwargs)
+ self.fakeCacheReturnResult = None
+ self.mock_logger = mock.Mock(spec=logger.Logger)
+ self.mock_label = MockLabel(
+ "mock_label",
+ "build",
+ "chromeos_image",
+ "autotest_dir",
+ "debug_dir",
+ "/tmp",
+ "lumpy",
+ "remote",
+ "image_args",
+ "cache_dir",
+ "average",
+ "gcc",
+ False,
+ None,
+ )
+
+ def setUp(self):
+ self.results_cache = ResultsCache()
+
+ mock_machine = machine_manager.MockCrosMachine(
+ "falco.cros", "/tmp/chromeos", "average"
+ )
+
+ mock_mm = machine_manager.MockMachineManager(
+ "/tmp/chromeos_root", 0, "average", ""
+ )
+ mock_mm.machine_checksum_string[
+ "mock_label"
+ ] = "fake_machine_checksum123"
+
+ self.results_cache.Init(
+ self.mock_label.chromeos_image,
+ self.mock_label.chromeos_root,
+ "sunspider",
+ 1, # benchmark_run.iteration,
+ "", # benchmark_run.test_args,
+ "", # benchmark_run.profiler_args,
+ mock_mm,
+ mock_machine,
+ self.mock_label.board,
+ [
+ CacheConditions.CACHE_FILE_EXISTS,
+ CacheConditions.CHECKSUMS_MATCH,
+ ],
+ self.mock_logger,
+ "average",
+ self.mock_label,
+ "", # benchmark_run.share_cache
+ "telemetry_Crosperf",
+ True, # benchmark_run.show_all_results
+ False, # benchmark_run.run_local
+ "",
+ ) # benchmark_run.cwp_dso
+
+ @mock.patch.object(image_checksummer.ImageChecksummer, "Checksum")
+ def test_get_cache_dir_for_write(self, mock_checksum):
+ def FakeGetMachines(label):
+ if label:
+ pass
+ m1 = machine_manager.MockCrosMachine(
+ "lumpy1.cros", self.results_cache.chromeos_root, "average"
+ )
+ m2 = machine_manager.MockCrosMachine(
+ "lumpy2.cros", self.results_cache.chromeos_root, "average"
+ )
+ return [m1, m2]
+
+ mock_checksum.return_value = "FakeImageChecksumabc123"
+ self.results_cache.machine_manager.GetMachines = FakeGetMachines
+ self.results_cache.machine_manager.machine_checksum[
+ "mock_label"
+ ] = "FakeMachineChecksumabc987"
+ # Based on the label, benchmark and machines, get the directory in which
+ # to store the cache information for this test run.
+ result_path = self.results_cache.GetCacheDirForWrite()
+ # Verify that the returned directory is correct (since the label
+ # contained a cache_dir, named 'cache_dir', that's what is expected in
+ # the result, rather than '~/cros_scratch').
+ comp_path = os.path.join(
+ os.getcwd(),
+ "cache_dir/54524606abaae4fdf7b02f49f7ae7127_"
+ "sunspider_1_fda29412ceccb72977516c4785d08e2c_"
+ "FakeImageChecksumabc123_FakeMachineChecksum"
+ "abc987__6",
+ )
+ self.assertEqual(result_path, comp_path)
+
+ def test_form_cache_dir(self):
+ # This is very similar to the previous test (FormCacheDir is called
+ # from GetCacheDirForWrite).
+ cache_key_list = (
+ "54524606abaae4fdf7b02f49f7ae7127",
+ "sunspider",
+ "1",
+ "7215ee9c7d9dc229d2921a40e899ec5f",
+ "FakeImageChecksumabc123",
+ "*",
+ "*",
+ "6",
+ )
+ path = self.results_cache.FormCacheDir(cache_key_list)
+ self.assertEqual(len(path), 1)
+ path1 = path[0]
+ test_dirname = (
+ "54524606abaae4fdf7b02f49f7ae7127_sunspider_1_7215ee9"
+ "c7d9dc229d2921a40e899ec5f_FakeImageChecksumabc123_*_*_6"
+ )
+ comp_path = os.path.join(os.getcwd(), "cache_dir", test_dirname)
+ self.assertEqual(path1, comp_path)
+
+ @mock.patch.object(image_checksummer.ImageChecksummer, "Checksum")
+ def test_get_cache_key_list(self, mock_checksum):
+ # This tests the mechanism that generates the various pieces of the
+ # cache directory name, based on various conditions.
+
+ def FakeGetMachines(label):
+ if label:
+ pass
+ m1 = machine_manager.MockCrosMachine(
+ "lumpy1.cros", self.results_cache.chromeos_root, "average"
+ )
+ m2 = machine_manager.MockCrosMachine(
+ "lumpy2.cros", self.results_cache.chromeos_root, "average"
+ )
+ return [m1, m2]
+
+ mock_checksum.return_value = "FakeImageChecksumabc123"
+ self.results_cache.machine_manager.GetMachines = FakeGetMachines
+ self.results_cache.machine_manager.machine_checksum[
+ "mock_label"
+ ] = "FakeMachineChecksumabc987"
+
+ # Test 1. Generating cache name for reading (not writing).
+ key_list = self.results_cache.GetCacheKeyList(True)
+ self.assertEqual(key_list[0], "*") # Machine checksum value, for read.
+ self.assertEqual(key_list[1], "sunspider")
+ self.assertEqual(key_list[2], "1")
+ self.assertEqual(key_list[3], "fda29412ceccb72977516c4785d08e2c")
+ self.assertEqual(key_list[4], "FakeImageChecksumabc123")
+ self.assertEqual(key_list[5], "*")
+ self.assertEqual(key_list[6], "*")
+ self.assertEqual(key_list[7], "6")
+
+ # Test 2. Generating cache name for writing, with local image type.
+ key_list = self.results_cache.GetCacheKeyList(False)
+ self.assertEqual(key_list[0], "54524606abaae4fdf7b02f49f7ae7127")
+ self.assertEqual(key_list[1], "sunspider")
+ self.assertEqual(key_list[2], "1")
+ self.assertEqual(key_list[3], "fda29412ceccb72977516c4785d08e2c")
+ self.assertEqual(key_list[4], "FakeImageChecksumabc123")
+ self.assertEqual(key_list[5], "FakeMachineChecksumabc987")
+ self.assertEqual(key_list[6], "")
+ self.assertEqual(key_list[7], "6")
+
+ # Test 3. Generating cache name for writing, with trybot image type.
+ self.results_cache.label.image_type = "trybot"
+ key_list = self.results_cache.GetCacheKeyList(False)
+ self.assertEqual(key_list[0], "54524606abaae4fdf7b02f49f7ae7127")
+ self.assertEqual(key_list[3], "fda29412ceccb72977516c4785d08e2c")
+ self.assertEqual(key_list[4], "54524606abaae4fdf7b02f49f7ae7127")
+ self.assertEqual(key_list[5], "FakeMachineChecksumabc987")
+
+ # Test 4. Generating cache name for writing, with official image type.
+ self.results_cache.label.image_type = "official"
+ key_list = self.results_cache.GetCacheKeyList(False)
+ self.assertEqual(key_list[0], "54524606abaae4fdf7b02f49f7ae7127")
+ self.assertEqual(key_list[1], "sunspider")
+ self.assertEqual(key_list[2], "1")
+ self.assertEqual(key_list[3], "fda29412ceccb72977516c4785d08e2c")
+ self.assertEqual(key_list[4], "*")
+ self.assertEqual(key_list[5], "FakeMachineChecksumabc987")
+ self.assertEqual(key_list[6], "")
+ self.assertEqual(key_list[7], "6")
+
+ # Test 5. Generating cache name for writing, with local image type, and
+ # specifying that the image path must match the cached image path.
+ self.results_cache.label.image_type = "local"
+ self.results_cache.cache_conditions.append(
+ CacheConditions.IMAGE_PATH_MATCH
+ )
+ key_list = self.results_cache.GetCacheKeyList(False)
+ self.assertEqual(key_list[0], "54524606abaae4fdf7b02f49f7ae7127")
+ self.assertEqual(key_list[3], "fda29412ceccb72977516c4785d08e2c")
+ self.assertEqual(key_list[4], "FakeImageChecksumabc123")
+ self.assertEqual(key_list[5], "FakeMachineChecksumabc987")
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommand")
+ @mock.patch.object(os.path, "isdir")
+ @mock.patch.object(Result, "CreateFromCacheHit")
+ def test_read_result(self, mock_create, mock_isdir, mock_runcmd):
+
+ self.fakeCacheReturnResult = None
+
+ def FakeGetCacheDirForRead():
+ return self.fakeCacheReturnResult
+
+ def FakeGetCacheDirForWrite():
+ return self.fakeCacheReturnResult
+
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ fake_result = Result(
+ self.mock_logger, self.mock_label, "average", mock_cmd_exec
+ )
+ fake_result.retval = 0
+
+ # Set up results_cache _GetCacheDirFor{Read,Write} to return
+ # self.fakeCacheReturnResult, which is initially None (see above).
+ # So initially, no cache dir is returned.
+ self.results_cache.GetCacheDirForRead = FakeGetCacheDirForRead
+ self.results_cache.GetCacheDirForWrite = FakeGetCacheDirForWrite
+
+ mock_isdir.return_value = True
+ save_cc = [
+ CacheConditions.CACHE_FILE_EXISTS,
+ CacheConditions.CHECKSUMS_MATCH,
+ ]
+ self.results_cache.cache_conditions.append(CacheConditions.FALSE)
+
+ # Test 1. CacheCondition.FALSE, which means do not read from the cache.
+ # (force re-running of test). Result should be None.
+ res = self.results_cache.ReadResult()
+ self.assertIsNone(res)
+ self.assertEqual(mock_runcmd.call_count, 1)
+
+ # Test 2. Remove CacheCondition.FALSE. Result should still be None,
+ # because GetCacheDirForRead is returning None at the moment.
+ mock_runcmd.reset_mock()
+ self.results_cache.cache_conditions = save_cc
+ res = self.results_cache.ReadResult()
+ self.assertIsNone(res)
+ self.assertEqual(mock_runcmd.call_count, 0)
+
+ # Test 3. Now set up cache dir to be returned by GetCacheDirForRead.
+ # Since cache_dir is found, will call Result.CreateFromCacheHit, which
+ # which will actually all our mock_create and should return fake_result.
+ self.fakeCacheReturnResult = "fake/cache/dir"
+ mock_create.return_value = fake_result
+ res = self.results_cache.ReadResult()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertEqual(res, fake_result)
+
+ # Test 4. os.path.isdir(cache_dir) will now return false, so result
+ # should be None again (no cache found).
+ mock_isdir.return_value = False
+ res = self.results_cache.ReadResult()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertIsNone(res)
+
+ # Test 5. os.path.isdir returns true, but mock_create now returns None
+ # (the call to CreateFromCacheHit returns None), so overal result is None.
+ mock_isdir.return_value = True
+ mock_create.return_value = None
+ res = self.results_cache.ReadResult()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertIsNone(res)
+
+ # Test 6. Everything works 'as expected', result should be fake_result.
+ mock_create.return_value = fake_result
+ res = self.results_cache.ReadResult()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertEqual(res, fake_result)
+
+ # Test 7. The run failed; result should be None.
+ mock_create.return_value = fake_result
+ fake_result.retval = 1
+ self.results_cache.cache_conditions.append(
+ CacheConditions.RUN_SUCCEEDED
+ )
+ res = self.results_cache.ReadResult()
+ self.assertEqual(mock_runcmd.call_count, 0)
+ self.assertIsNone(res)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 674745fb..354e002d 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parse data from benchmark_runs for tabulator."""
-from __future__ import print_function
import errno
import json
@@ -15,214 +14,219 @@ import sys
from cros_utils import misc
-_TELEMETRY_RESULT_DEFAULTS_FILE = 'default-telemetry-results.json'
-_DUP_KEY_REGEX = re.compile(r'(\w+)\{(\d+)\}')
+
+_TELEMETRY_RESULT_DEFAULTS_FILE = "default-telemetry-results.json"
+_DUP_KEY_REGEX = re.compile(r"(\w+)\{(\d+)\}")
def _AdjustIteration(benchmarks, max_dup, bench):
- """Adjust the interation numbers if they have keys like ABCD{i}."""
- for benchmark in benchmarks:
- if benchmark.name != bench or benchmark.iteration_adjusted:
- continue
- benchmark.iteration_adjusted = True
- benchmark.iterations *= (max_dup + 1)
+ """Adjust the interation numbers if they have keys like ABCD{i}."""
+ for benchmark in benchmarks:
+ if benchmark.name != bench or benchmark.iteration_adjusted:
+ continue
+ benchmark.iteration_adjusted = True
+ benchmark.iterations *= max_dup + 1
def _GetMaxDup(data):
- """Find the maximum i inside ABCD{i}.
+ """Find the maximum i inside ABCD{i}.
- data should be a [[[Key]]], where Key is a string that may look like
- ABCD{i}.
- """
- max_dup = 0
- for label in data:
- for run in label:
- for key in run:
- match = _DUP_KEY_REGEX.match(key)
- if match:
- max_dup = max(max_dup, int(match.group(2)))
- return max_dup
+ data should be a [[[Key]]], where Key is a string that may look like
+ ABCD{i}.
+ """
+ max_dup = 0
+ for label in data:
+ for run in label:
+ for key in run:
+ match = _DUP_KEY_REGEX.match(key)
+ if match:
+ max_dup = max(max_dup, int(match.group(2)))
+ return max_dup
def _Repeat(func, times):
- """Returns the result of running func() n times."""
- return [func() for _ in range(times)]
+ """Returns the result of running func() n times."""
+ return [func() for _ in range(times)]
def _DictWithReturnValues(retval, pass_fail):
- """Create a new dictionary pre-populated with success/fail values."""
- new_dict = {}
- # Note: 0 is a valid retval; test to make sure it's not None.
- if retval is not None:
- new_dict['retval'] = retval
- if pass_fail:
- new_dict[''] = pass_fail
- return new_dict
+ """Create a new dictionary pre-populated with success/fail values."""
+ new_dict = {}
+ # Note: 0 is a valid retval; test to make sure it's not None.
+ if retval is not None:
+ new_dict["retval"] = retval
+ if pass_fail:
+ new_dict[""] = pass_fail
+ return new_dict
def _GetNonDupLabel(max_dup, runs):
- """Create new list for the runs of the same label.
-
- Specifically, this will split out keys like foo{0}, foo{1} from one run into
- their own runs. For example, given a run like:
- {"foo": 1, "bar{0}": 2, "baz": 3, "qux{1}": 4, "pirate{0}": 5}
-
- You'll get:
- [{"foo": 1, "baz": 3}, {"bar": 2, "pirate": 5}, {"qux": 4}]
-
- Hands back the lists of transformed runs, all concatenated together.
- """
- new_runs = []
- for run in runs:
- run_retval = run.get('retval', None)
- run_pass_fail = run.get('', None)
- new_run = {}
- # pylint: disable=cell-var-from-loop
- added_runs = _Repeat(
- lambda: _DictWithReturnValues(run_retval, run_pass_fail), max_dup)
- for key, value in run.items():
- match = _DUP_KEY_REGEX.match(key)
- if not match:
- new_run[key] = value
- else:
- new_key, index_str = match.groups()
- added_runs[int(index_str) - 1][new_key] = str(value)
- new_runs.append(new_run)
- new_runs += added_runs
- return new_runs
+ """Create new list for the runs of the same label.
+
+ Specifically, this will split out keys like foo{0}, foo{1} from one run into
+ their own runs. For example, given a run like:
+ {"foo": 1, "bar{0}": 2, "baz": 3, "qux{1}": 4, "pirate{0}": 5}
+
+ You'll get:
+ [{"foo": 1, "baz": 3}, {"bar": 2, "pirate": 5}, {"qux": 4}]
+
+ Hands back the lists of transformed runs, all concatenated together.
+ """
+ new_runs = []
+ for run in runs:
+ run_retval = run.get("retval", None)
+ run_pass_fail = run.get("", None)
+ new_run = {}
+ # pylint: disable=cell-var-from-loop
+ added_runs = _Repeat(
+ lambda: _DictWithReturnValues(run_retval, run_pass_fail), max_dup
+ )
+ for key, value in run.items():
+ match = _DUP_KEY_REGEX.match(key)
+ if not match:
+ new_run[key] = value
+ else:
+ new_key, index_str = match.groups()
+ added_runs[int(index_str) - 1][new_key] = str(value)
+ new_runs.append(new_run)
+ new_runs += added_runs
+ return new_runs
def _DuplicatePass(result, benchmarks):
- """Properly expands keys like `foo{1}` in `result`."""
- for bench, data in result.items():
- max_dup = _GetMaxDup(data)
- # If there's nothing to expand, there's nothing to do.
- if not max_dup:
- continue
- for i, runs in enumerate(data):
- data[i] = _GetNonDupLabel(max_dup, runs)
- _AdjustIteration(benchmarks, max_dup, bench)
+ """Properly expands keys like `foo{1}` in `result`."""
+ for bench, data in result.items():
+ max_dup = _GetMaxDup(data)
+ # If there's nothing to expand, there's nothing to do.
+ if not max_dup:
+ continue
+ for i, runs in enumerate(data):
+ data[i] = _GetNonDupLabel(max_dup, runs)
+ _AdjustIteration(benchmarks, max_dup, bench)
def _ReadSummaryFile(filename):
- """Reads the summary file at filename."""
- dirname, _ = misc.GetRoot(filename)
- fullname = os.path.join(dirname, _TELEMETRY_RESULT_DEFAULTS_FILE)
- try:
- # Slurp the summary file into a dictionary. The keys in the dictionary are
- # the benchmark names. The value for a key is a list containing the names
- # of all the result fields that should be returned in a 'default' report.
- with open(fullname) as in_file:
- return json.load(in_file)
- except IOError as e:
- # ENOENT means "no such file or directory"
- if e.errno == errno.ENOENT:
- return {}
- raise
+ """Reads the summary file at filename."""
+ dirname, _ = misc.GetRoot(filename)
+ fullname = os.path.join(dirname, _TELEMETRY_RESULT_DEFAULTS_FILE)
+ try:
+ # Slurp the summary file into a dictionary. The keys in the dictionary are
+ # the benchmark names. The value for a key is a list containing the names
+ # of all the result fields that should be returned in a 'default' report.
+ with open(fullname) as in_file:
+ return json.load(in_file)
+ except IOError as e:
+ # ENOENT means "no such file or directory"
+ if e.errno == errno.ENOENT:
+ return {}
+ raise
def _MakeOrganizeResultOutline(benchmark_runs, labels):
- """Creates the "outline" of the OrganizeResults result for a set of runs.
-
- Report generation returns lists of different sizes, depending on the input
- data. Depending on the order in which we iterate through said input data, we
- may populate the Nth index of a list, then the N-1st, then the N+Mth, ...
-
- It's cleaner to figure out the "skeleton"/"outline" ahead of time, so we don't
- have to worry about resizing while computing results.
- """
- # Count how many iterations exist for each benchmark run.
- # We can't simply count up, since we may be given an incomplete set of
- # iterations (e.g. [r.iteration for r in benchmark_runs] == [1, 3])
- iteration_count = {}
- for run in benchmark_runs:
- name = run.benchmark.name
- old_iterations = iteration_count.get(name, -1)
- # N.B. run.iteration starts at 1, not 0.
- iteration_count[name] = max(old_iterations, run.iteration)
-
- # Result structure: {benchmark_name: [[{key: val}]]}
- result = {}
- for run in benchmark_runs:
- name = run.benchmark.name
- num_iterations = iteration_count[name]
- # default param makes cros lint be quiet about defining num_iterations in a
- # loop.
- make_dicts = lambda n=num_iterations: _Repeat(dict, n)
- result[name] = _Repeat(make_dicts, len(labels))
- return result
+ """Creates the "outline" of the OrganizeResults result for a set of runs.
+
+ Report generation returns lists of different sizes, depending on the input
+ data. Depending on the order in which we iterate through said input data, we
+ may populate the Nth index of a list, then the N-1st, then the N+Mth, ...
+
+ It's cleaner to figure out the "skeleton"/"outline" ahead of time, so we don't
+ have to worry about resizing while computing results.
+ """
+ # Count how many iterations exist for each benchmark run.
+ # We can't simply count up, since we may be given an incomplete set of
+ # iterations (e.g. [r.iteration for r in benchmark_runs] == [1, 3])
+ iteration_count = {}
+ for run in benchmark_runs:
+ name = run.benchmark.name
+ old_iterations = iteration_count.get(name, -1)
+ # N.B. run.iteration starts at 1, not 0.
+ iteration_count[name] = max(old_iterations, run.iteration)
+
+ # Result structure: {benchmark_name: [[{key: val}]]}
+ result = {}
+ for run in benchmark_runs:
+ name = run.benchmark.name
+ num_iterations = iteration_count[name]
+ # default param makes cros lint be quiet about defining num_iterations in a
+ # loop.
+ make_dicts = lambda n=num_iterations: _Repeat(dict, n)
+ result[name] = _Repeat(make_dicts, len(labels))
+ return result
def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
- """Create a dict from benchmark_runs.
-
- The structure of the output dict is as follows:
- {"benchmark_1":[
- [{"key1":"v1", "key2":"v2"},{"key1":"v1", "key2","v2"}]
- #one label
- []
- #the other label
- ]
- "benchmark_2":
- [
- ]}.
- """
- result = _MakeOrganizeResultOutline(benchmark_runs, labels)
- label_names = [label.name for label in labels]
- label_indices = {name: i for i, name in enumerate(label_names)}
- summary_file = _ReadSummaryFile(sys.argv[0])
-
- if benchmarks is None:
- benchmarks = []
-
- for benchmark_run in benchmark_runs:
- if not benchmark_run.result:
- continue
- benchmark = benchmark_run.benchmark
- label_index = label_indices[benchmark_run.label.name]
- cur_label_list = result[benchmark.name][label_index]
- cur_dict = cur_label_list[benchmark_run.iteration - 1]
-
- show_all_results = json_report or benchmark.show_all_results
- if not show_all_results:
- summary_list = summary_file.get(benchmark.name)
- if summary_list:
- for key in benchmark_run.result.keyvals.keys():
- if any(
- key.startswith(added_key)
- for added_key in ['retval', 'cpufreq', 'cputemp']):
- summary_list.append(key)
- else:
- # Did not find test_name in json file; show everything.
- show_all_results = True
- if benchmark_run.result.cwp_dso:
- # If we are in cwp approximation mode, we only care about samples
- if 'samples' in benchmark_run.result.keyvals:
- cur_dict['samples'] = benchmark_run.result.keyvals['samples']
- cur_dict['retval'] = benchmark_run.result.keyvals['retval']
- for key, value in benchmark_run.result.keyvals.items():
- if any(
- key.startswith(cpustat_keyword)
- for cpustat_keyword in ['cpufreq', 'cputemp']):
- cur_dict[key] = value
- else:
- for test_key in benchmark_run.result.keyvals:
- if show_all_results or test_key in summary_list:
- cur_dict[test_key] = benchmark_run.result.keyvals[test_key]
- # Occasionally Telemetry tests will not fail but they will not return a
- # result, either. Look for those cases, and force them to be a fail.
- # (This can happen if, for example, the test has been disabled.)
- if len(cur_dict) == 1 and cur_dict['retval'] == 0:
- cur_dict['retval'] = 1
- benchmark_run.result.keyvals['retval'] = 1
- # TODO: This output should be sent via logger.
- print(
- "WARNING: Test '%s' appears to have succeeded but returned"
- ' no results.' % benchmark.name,
- file=sys.stderr)
- if json_report and benchmark_run.machine:
- cur_dict['machine'] = benchmark_run.machine.name
- cur_dict['machine_checksum'] = benchmark_run.machine.checksum
- cur_dict['machine_string'] = benchmark_run.machine.checksum_string
- _DuplicatePass(result, benchmarks)
- return result
+ """Create a dict from benchmark_runs.
+
+ The structure of the output dict is as follows:
+ {"benchmark_1":[
+ [{"key1":"v1", "key2":"v2"},{"key1":"v1", "key2","v2"}]
+ #one label
+ []
+ #the other label
+ ]
+ "benchmark_2":
+ [
+ ]}.
+ """
+ result = _MakeOrganizeResultOutline(benchmark_runs, labels)
+ label_names = [label.name for label in labels]
+ label_indices = {name: i for i, name in enumerate(label_names)}
+ summary_file = _ReadSummaryFile(sys.argv[0])
+
+ if benchmarks is None:
+ benchmarks = []
+
+ for benchmark_run in benchmark_runs:
+ if not benchmark_run.result:
+ continue
+ benchmark = benchmark_run.benchmark
+ label_index = label_indices[benchmark_run.label.name]
+ cur_label_list = result[benchmark.name][label_index]
+ cur_dict = cur_label_list[benchmark_run.iteration - 1]
+
+ show_all_results = json_report or benchmark.show_all_results
+ if not show_all_results:
+ summary_list = summary_file.get(benchmark.name)
+ if summary_list:
+ for key in benchmark_run.result.keyvals.keys():
+ if any(
+ key.startswith(added_key)
+ for added_key in ["retval", "cpufreq", "cputemp"]
+ ):
+ summary_list.append(key)
+ else:
+ # Did not find test_name in json file; show everything.
+ show_all_results = True
+ if benchmark_run.result.cwp_dso:
+ # If we are in cwp approximation mode, we only care about samples
+ if "samples" in benchmark_run.result.keyvals:
+ cur_dict["samples"] = benchmark_run.result.keyvals["samples"]
+ cur_dict["retval"] = benchmark_run.result.keyvals["retval"]
+ for key, value in benchmark_run.result.keyvals.items():
+ if any(
+ key.startswith(cpustat_keyword)
+ for cpustat_keyword in ["cpufreq", "cputemp"]
+ ):
+ cur_dict[key] = value
+ else:
+ for test_key in benchmark_run.result.keyvals:
+ if show_all_results or test_key in summary_list:
+ cur_dict[test_key] = benchmark_run.result.keyvals[test_key]
+ # Occasionally Telemetry tests will not fail but they will not return a
+ # result, either. Look for those cases, and force them to be a fail.
+ # (This can happen if, for example, the test has been disabled.)
+ if len(cur_dict) == 1 and cur_dict["retval"] == 0:
+ cur_dict["retval"] = 1
+ benchmark_run.result.keyvals["retval"] = 1
+ # TODO: This output should be sent via logger.
+ print(
+ "WARNING: Test '%s' appears to have succeeded but returned"
+ " no results." % benchmark.name,
+ file=sys.stderr,
+ )
+ if json_report and benchmark_run.machine:
+ cur_dict["machine"] = benchmark_run.machine.name
+ cur_dict["machine_checksum"] = benchmark_run.machine.checksum
+ cur_dict["machine_string"] = benchmark_run.machine.checksum_string
+ _DuplicatePass(result, benchmarks)
+ return result
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
index f259879d..f3db266d 100755
--- a/crosperf/results_organizer_unittest.py
+++ b/crosperf/results_organizer_unittest.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -11,160 +11,153 @@
after that, we compare the result of ResultOrganizer.
"""
-from __future__ import print_function
import unittest
from benchmark_run import BenchmarkRun
+import mock_instance
from results_cache import Result
from results_organizer import OrganizeResults
-import mock_instance
result = {
- 'benchmark1': [[{
- '': 'PASS',
- 'bool': 'True',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'ms_1': '2.1',
- 'total': '5'
- }, {
- '': 'PASS',
- 'test': '2'
- }, {
- '': 'PASS',
- 'test': '4'
- }, {
- '': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6'
- }, {
- '': 'PASS',
- 'test': '3'
- }, {
- '': 'PASS',
- 'test': '4'
- }], [{
- '': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6'
- }, {
- '': 'PASS',
- 'test': '5'
- }, {
- '': 'PASS',
- 'test': '4'
- }, {
- '': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7'
- }, {
- '': 'PASS',
- 'test': '2'
- }, {
- '': 'PASS',
- 'test': '6'
- }]],
- 'benchmark2': [[{
- '': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7'
- }, {
- '': 'PASS',
- 'test': '2'
- }, {
- '': 'PASS',
- 'test': '6'
- }, {
- '': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7'
- }, {
- '': 'PASS',
- 'test': '2'
- }, {
- '': 'PASS',
- 'test': '2'
- }], [{
- '': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7'
- }, {
- '': 'PASS',
- 'test': '2'
- }, {
- '': 'PASS',
- 'test': '4'
- }, {
- '': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7'
- }, {
- '': 'PASS',
- 'test': '1'
- }, {
- '': 'PASS',
- 'test': '6'
- }]]
-} # yapf: disable
+ "benchmark1": [
+ [
+ {
+ "": "PASS",
+ "bool": "True",
+ "milliseconds_1": "1",
+ "milliseconds_2": "8",
+ "milliseconds_3": "9.2",
+ "ms_1": "2.1",
+ "total": "5",
+ },
+ {"": "PASS", "test": "2"},
+ {"": "PASS", "test": "4"},
+ {
+ "": "PASS",
+ "bool": "FALSE",
+ "milliseconds_1": "3",
+ "milliseconds_2": "5",
+ "ms_1": "2.2",
+ "total": "6",
+ },
+ {"": "PASS", "test": "3"},
+ {"": "PASS", "test": "4"},
+ ],
+ [
+ {
+ "": "PASS",
+ "bool": "FALSE",
+ "milliseconds_4": "30",
+ "milliseconds_5": "50",
+ "ms_1": "2.23",
+ "total": "6",
+ },
+ {"": "PASS", "test": "5"},
+ {"": "PASS", "test": "4"},
+ {
+ "": "PASS",
+ "bool": "FALSE",
+ "milliseconds_1": "3",
+ "milliseconds_6": "7",
+ "ms_1": "2.3",
+ "total": "7",
+ },
+ {"": "PASS", "test": "2"},
+ {"": "PASS", "test": "6"},
+ ],
+ ],
+ "benchmark2": [
+ [
+ {
+ "": "PASS",
+ "bool": "TRUE",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "2.3",
+ "total": "7",
+ },
+ {"": "PASS", "test": "2"},
+ {"": "PASS", "test": "6"},
+ {
+ "": "PASS",
+ "bool": "TRUE",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "2.2",
+ "total": "7",
+ },
+ {"": "PASS", "test": "2"},
+ {"": "PASS", "test": "2"},
+ ],
+ [
+ {
+ "": "PASS",
+ "bool": "TRUE",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "2",
+ "total": "7",
+ },
+ {"": "PASS", "test": "2"},
+ {"": "PASS", "test": "4"},
+ {
+ "": "PASS",
+ "bool": "TRUE",
+ "milliseconds_1": "3",
+ "milliseconds_8": "6",
+ "ms_1": "1",
+ "total": "7",
+ },
+ {"": "PASS", "test": "1"},
+ {"": "PASS", "test": "6"},
+ ],
+ ],
+} # yapf: disable
class ResultOrganizerTest(unittest.TestCase):
- """Test result organizer."""
+ """Test result organizer."""
- def testResultOrganizer(self):
- labels = [mock_instance.label1, mock_instance.label2]
- benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
- benchmark_runs = [None] * 8
- benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
- '', 'average', '', {})
- benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
- '', 'average', '', {})
- benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
- '', 'average', '', {})
- benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
- '', 'average', '', {})
- benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
- '', 'average', '', {})
- benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
- '', 'average', '', {})
- benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
- '', 'average', '', {})
- benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '',
- '', 'average', '', {})
+ def testResultOrganizer(self):
+ labels = [mock_instance.label1, mock_instance.label2]
+ benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
+ benchmark_runs = [None] * 8
+ benchmark_runs[0] = BenchmarkRun(
+ "b1", benchmarks[0], labels[0], 1, "", "", "", "average", "", {}
+ )
+ benchmark_runs[1] = BenchmarkRun(
+ "b2", benchmarks[0], labels[0], 2, "", "", "", "average", "", {}
+ )
+ benchmark_runs[2] = BenchmarkRun(
+ "b3", benchmarks[0], labels[1], 1, "", "", "", "average", "", {}
+ )
+ benchmark_runs[3] = BenchmarkRun(
+ "b4", benchmarks[0], labels[1], 2, "", "", "", "average", "", {}
+ )
+ benchmark_runs[4] = BenchmarkRun(
+ "b5", benchmarks[1], labels[0], 1, "", "", "", "average", "", {}
+ )
+ benchmark_runs[5] = BenchmarkRun(
+ "b6", benchmarks[1], labels[0], 2, "", "", "", "average", "", {}
+ )
+ benchmark_runs[6] = BenchmarkRun(
+ "b7", benchmarks[1], labels[1], 1, "", "", "", "average", "", {}
+ )
+ benchmark_runs[7] = BenchmarkRun(
+ "b8", benchmarks[1], labels[1], 2, "", "", "", "average", "", {}
+ )
- i = 0
- for b in benchmark_runs:
- b.result = Result('', b.label, 'average', 'machine')
- b.result.keyvals = mock_instance.keyval[i]
- i += 1
+ i = 0
+ for b in benchmark_runs:
+ b.result = Result("", b.label, "average", "machine")
+ b.result.keyvals = mock_instance.keyval[i]
+ i += 1
- organized = OrganizeResults(benchmark_runs, labels, benchmarks)
- self.assertEqual(organized, result)
+ organized = OrganizeResults(benchmark_runs, labels, benchmarks)
+ self.assertEqual(organized, result)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index dc80b53b..045e623b 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -1,10 +1,9 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module to handle the report format."""
-from __future__ import print_function
import datetime
import functools
@@ -14,15 +13,15 @@ import os
import re
import time
+from column_chart import ColumnChart
from cros_utils.tabulator import AmeanResult
from cros_utils.tabulator import Cell
from cros_utils.tabulator import CoeffVarFormat
from cros_utils.tabulator import CoeffVarResult
from cros_utils.tabulator import Column
-from cros_utils.tabulator import SamplesTableGenerator
from cros_utils.tabulator import Format
-from cros_utils.tabulator import IterationResult
from cros_utils.tabulator import GmeanRatioResult
+from cros_utils.tabulator import IterationResult
from cros_utils.tabulator import LiteralResult
from cros_utils.tabulator import MaxResult
from cros_utils.tabulator import MinResult
@@ -30,20 +29,18 @@ from cros_utils.tabulator import PValueFormat
from cros_utils.tabulator import PValueResult
from cros_utils.tabulator import RatioFormat
from cros_utils.tabulator import RawResult
+from cros_utils.tabulator import SamplesTableGenerator
from cros_utils.tabulator import StdResult
from cros_utils.tabulator import TableFormatter
from cros_utils.tabulator import TableGenerator
from cros_utils.tabulator import TablePrinter
-from update_telemetry_defaults import TelemetryDefaults
-
-from column_chart import ColumnChart
from results_organizer import OrganizeResults
-
import results_report_templates as templates
+from update_telemetry_defaults import TelemetryDefaults
def ParseChromeosImage(chromeos_image):
- """Parse the chromeos_image string for the image and version.
+ """Parse the chromeos_image string for the image and version.
The chromeos_image string will probably be in one of two formats:
1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
@@ -64,760 +61,857 @@ def ParseChromeosImage(chromeos_image):
version, image: The results of parsing the input string, as explained
above.
"""
- # Find the Chromeos Version, e.g. R45-2345.0.0.....
- # chromeos_image should have been something like:
- # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
- if chromeos_image.endswith('/chromiumos_test_image.bin'):
- full_version = chromeos_image.split('/')[-2]
- # Strip the date and time off of local builds (which have the format
- # "R43-2345.0.0.date-and-time").
- version, _ = os.path.splitext(full_version)
- else:
- version = ''
-
- # Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then
- # it's an official image that got downloaded, so chop off the download path
- # to make the official image name more clear.
- official_image_path = '/chroot/tmp'
- if official_image_path in chromeos_image:
- image = chromeos_image.split(official_image_path, 1)[1]
- else:
- image = chromeos_image
- return version, image
+ # Find the Chromeos Version, e.g. R45-2345.0.0.....
+ # chromeos_image should have been something like:
+ # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
+ if chromeos_image.endswith("/chromiumos_test_image.bin"):
+ full_version = chromeos_image.split("/")[-2]
+ # Strip the date and time off of local builds (which have the format
+ # "R43-2345.0.0.date-and-time").
+ version, _ = os.path.splitext(full_version)
+ else:
+ version = ""
+
+ # Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then
+ # it's an official image that got downloaded, so chop off the download path
+ # to make the official image name more clear.
+ official_image_path = "/chroot/tmp"
+ if official_image_path in chromeos_image:
+ image = chromeos_image.split(official_image_path, 1)[1]
+ else:
+ image = chromeos_image
+ return version, image
def _AppendUntilLengthIs(gen, the_list, target_len):
- """Appends to `list` until `list` is `target_len` elements long.
+ """Appends to `list` until `list` is `target_len` elements long.
- Uses `gen` to generate elements.
- """
- the_list.extend(gen() for _ in range(target_len - len(the_list)))
- return the_list
+ Uses `gen` to generate elements.
+ """
+ the_list.extend(gen() for _ in range(target_len - len(the_list)))
+ return the_list
def _FilterPerfReport(event_threshold, report):
- """Filters out entries with `< event_threshold` percent in a perf report."""
+ """Filters out entries with `< event_threshold` percent in a perf report."""
- def filter_dict(m):
- return {
- fn_name: pct for fn_name, pct in m.items() if pct >= event_threshold
- }
+ def filter_dict(m):
+ return {
+ fn_name: pct for fn_name, pct in m.items() if pct >= event_threshold
+ }
- return {event: filter_dict(m) for event, m in report.items()}
+ return {event: filter_dict(m) for event, m in report.items()}
class _PerfTable(object):
- """Generates dicts from a perf table.
-
- Dicts look like:
- {'benchmark_name': {'perf_event_name': [LabelData]}}
- where LabelData is a list of perf dicts, each perf dict coming from the same
- label.
- Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the
- percentage of time spent in function_name).
- """
-
- def __init__(self,
- benchmark_names_and_iterations,
- label_names,
- read_perf_report,
- event_threshold=None):
- """Constructor.
-
- read_perf_report is a function that takes a label name, benchmark name, and
- benchmark iteration, and returns a dictionary describing the perf output for
- that given run.
+ """Generates dicts from a perf table.
+
+ Dicts look like:
+ {'benchmark_name': {'perf_event_name': [LabelData]}}
+ where LabelData is a list of perf dicts, each perf dict coming from the same
+ label.
+ Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the
+ percentage of time spent in function_name).
"""
- self.event_threshold = event_threshold
- self._label_indices = {name: i for i, name in enumerate(label_names)}
- self.perf_data = {}
- for label in label_names:
- for bench_name, bench_iterations in benchmark_names_and_iterations:
- for i in range(bench_iterations):
- report = read_perf_report(label, bench_name, i)
- self._ProcessPerfReport(report, label, bench_name, i)
-
- def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration):
- """Add the data from one run to the dict."""
- perf_of_run = perf_report
- if self.event_threshold is not None:
- perf_of_run = _FilterPerfReport(self.event_threshold, perf_report)
- if benchmark_name not in self.perf_data:
- self.perf_data[benchmark_name] = {event: [] for event in perf_of_run}
- ben_data = self.perf_data[benchmark_name]
- label_index = self._label_indices[label]
- for event in ben_data:
- _AppendUntilLengthIs(list, ben_data[event], label_index + 1)
- data_for_label = ben_data[event][label_index]
- _AppendUntilLengthIs(dict, data_for_label, iteration + 1)
- data_for_label[iteration] = perf_of_run[event] if perf_of_run else {}
+
+ def __init__(
+ self,
+ benchmark_names_and_iterations,
+ label_names,
+ read_perf_report,
+ event_threshold=None,
+ ):
+ """Constructor.
+
+ read_perf_report is a function that takes a label name, benchmark name, and
+ benchmark iteration, and returns a dictionary describing the perf output for
+ that given run.
+ """
+ self.event_threshold = event_threshold
+ self._label_indices = {name: i for i, name in enumerate(label_names)}
+ self.perf_data = {}
+ for label in label_names:
+ for bench_name, bench_iterations in benchmark_names_and_iterations:
+ for i in range(bench_iterations):
+ report = read_perf_report(label, bench_name, i)
+ self._ProcessPerfReport(report, label, bench_name, i)
+
+ def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration):
+ """Add the data from one run to the dict."""
+ perf_of_run = perf_report
+ if self.event_threshold is not None:
+ perf_of_run = _FilterPerfReport(self.event_threshold, perf_report)
+ if benchmark_name not in self.perf_data:
+ self.perf_data[benchmark_name] = {
+ event: [] for event in perf_of_run
+ }
+ ben_data = self.perf_data[benchmark_name]
+ label_index = self._label_indices[label]
+ for event in ben_data:
+ _AppendUntilLengthIs(list, ben_data[event], label_index + 1)
+ data_for_label = ben_data[event][label_index]
+ _AppendUntilLengthIs(dict, data_for_label, iteration + 1)
+ data_for_label[iteration] = (
+ perf_of_run[event] if perf_of_run else {}
+ )
def _GetResultsTableHeader(ben_name, iterations):
- benchmark_info = ('Benchmark: {0}; Iterations: {1}'.format(
- ben_name, iterations))
- cell = Cell()
- cell.string_value = benchmark_info
- cell.header = True
- return [[cell]]
+ benchmark_info = "Benchmark: {0}; Iterations: {1}".format(
+ ben_name, iterations
+ )
+ cell = Cell()
+ cell.string_value = benchmark_info
+ cell.header = True
+ return [[cell]]
def _GetDSOHeader(cwp_dso):
- info = 'CWP_DSO: %s' % cwp_dso
- cell = Cell()
- cell.string_value = info
- cell.header = False
- return [[cell]]
+ info = "CWP_DSO: %s" % cwp_dso
+ cell = Cell()
+ cell.string_value = info
+ cell.header = False
+ return [[cell]]
def _ParseColumn(columns, iteration):
- new_column = []
- for column in columns:
- if column.result.__class__.__name__ != 'RawResult':
- new_column.append(column)
- else:
- new_column.extend(
- Column(LiteralResult(i), Format(), str(i + 1))
- for i in range(iteration))
- return new_column
+ new_column = []
+ for column in columns:
+ if column.result.__class__.__name__ != "RawResult":
+ new_column.append(column)
+ else:
+ new_column.extend(
+ Column(LiteralResult(i), Format(), str(i + 1))
+ for i in range(iteration)
+ )
+ return new_column
def _GetTables(benchmark_results, columns, table_type):
- iter_counts = benchmark_results.iter_counts
- result = benchmark_results.run_keyvals
- tables = []
- for bench_name, runs in result.items():
- iterations = iter_counts[bench_name]
- ben_table = _GetResultsTableHeader(bench_name, iterations)
-
- all_runs_empty = all(not dict for label in runs for dict in label)
- if all_runs_empty:
- cell = Cell()
- cell.string_value = ('This benchmark contains no result.'
- ' Is the benchmark name valid?')
- cell_table = [[cell]]
- else:
- table = TableGenerator(runs, benchmark_results.label_names).GetTable()
- parsed_columns = _ParseColumn(columns, iterations)
- tf = TableFormatter(table, parsed_columns)
- cell_table = tf.GetCellTable(table_type)
- tables.append(ben_table)
- tables.append(cell_table)
- return tables
+ iter_counts = benchmark_results.iter_counts
+ result = benchmark_results.run_keyvals
+ tables = []
+ for bench_name, runs in result.items():
+ iterations = iter_counts[bench_name]
+ ben_table = _GetResultsTableHeader(bench_name, iterations)
+
+ all_runs_empty = all(not dict for label in runs for dict in label)
+ if all_runs_empty:
+ cell = Cell()
+ cell.string_value = (
+ "This benchmark contains no result."
+ " Is the benchmark name valid?"
+ )
+ cell_table = [[cell]]
+ else:
+ table = TableGenerator(
+ runs, benchmark_results.label_names
+ ).GetTable()
+ parsed_columns = _ParseColumn(columns, iterations)
+ tf = TableFormatter(table, parsed_columns)
+ cell_table = tf.GetCellTable(table_type)
+ tables.append(ben_table)
+ tables.append(cell_table)
+ return tables
def _GetPerfTables(benchmark_results, columns, table_type):
- p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations,
- benchmark_results.label_names,
- benchmark_results.read_perf_report)
-
- tables = []
- for benchmark in p_table.perf_data:
- iterations = benchmark_results.iter_counts[benchmark]
- ben_table = _GetResultsTableHeader(benchmark, iterations)
- tables.append(ben_table)
- benchmark_data = p_table.perf_data[benchmark]
- table = []
- for event in benchmark_data:
- tg = TableGenerator(
- benchmark_data[event],
- benchmark_results.label_names,
- sort=TableGenerator.SORT_BY_VALUES_DESC)
- table = tg.GetTable(ResultsReport.PERF_ROWS)
- parsed_columns = _ParseColumn(columns, iterations)
- tf = TableFormatter(table, parsed_columns)
- tf.GenerateCellTable(table_type)
- tf.AddColumnName()
- tf.AddLabelName()
- tf.AddHeader(str(event))
- table = tf.GetCellTable(table_type, headers=False)
- tables.append(table)
- return tables
+ p_table = _PerfTable(
+ benchmark_results.benchmark_names_and_iterations,
+ benchmark_results.label_names,
+ benchmark_results.read_perf_report,
+ )
+
+ tables = []
+ for benchmark in p_table.perf_data:
+ iterations = benchmark_results.iter_counts[benchmark]
+ ben_table = _GetResultsTableHeader(benchmark, iterations)
+ tables.append(ben_table)
+ benchmark_data = p_table.perf_data[benchmark]
+ table = []
+ for event in benchmark_data:
+ tg = TableGenerator(
+ benchmark_data[event],
+ benchmark_results.label_names,
+ sort=TableGenerator.SORT_BY_VALUES_DESC,
+ )
+ table = tg.GetTable(ResultsReport.PERF_ROWS)
+ parsed_columns = _ParseColumn(columns, iterations)
+ tf = TableFormatter(table, parsed_columns)
+ tf.GenerateCellTable(table_type)
+ tf.AddColumnName()
+ tf.AddLabelName()
+ tf.AddHeader(str(event))
+ table = tf.GetCellTable(table_type, headers=False)
+ tables.append(table)
+ return tables
def _GetSamplesTables(benchmark_results, columns, table_type):
- tables = []
- dso_header_table = _GetDSOHeader(benchmark_results.cwp_dso)
- tables.append(dso_header_table)
- (table, new_keyvals, iter_counts) = SamplesTableGenerator(
- benchmark_results.run_keyvals, benchmark_results.label_names,
- benchmark_results.iter_counts, benchmark_results.weights).GetTable()
- parsed_columns = _ParseColumn(columns, 1)
- tf = TableFormatter(table, parsed_columns, samples_table=True)
- cell_table = tf.GetCellTable(table_type)
- tables.append(cell_table)
- return (tables, new_keyvals, iter_counts)
+ tables = []
+ dso_header_table = _GetDSOHeader(benchmark_results.cwp_dso)
+ tables.append(dso_header_table)
+ (table, new_keyvals, iter_counts) = SamplesTableGenerator(
+ benchmark_results.run_keyvals,
+ benchmark_results.label_names,
+ benchmark_results.iter_counts,
+ benchmark_results.weights,
+ ).GetTable()
+ parsed_columns = _ParseColumn(columns, 1)
+ tf = TableFormatter(table, parsed_columns, samples_table=True)
+ cell_table = tf.GetCellTable(table_type)
+ tables.append(cell_table)
+ return (tables, new_keyvals, iter_counts)
class ResultsReport(object):
- """Class to handle the report format."""
- MAX_COLOR_CODE = 255
- PERF_ROWS = 5
-
- def __init__(self, results):
- self.benchmark_results = results
-
- def _GetTablesWithColumns(self, columns, table_type, summary_type):
- if summary_type == 'perf':
- get_tables = _GetPerfTables
- elif summary_type == 'samples':
- get_tables = _GetSamplesTables
- else:
- get_tables = _GetTables
- ret = get_tables(self.benchmark_results, columns, table_type)
- # If we are generating a samples summary table, the return value of
- # get_tables will be a tuple, and we will update the benchmark_results for
- # composite benchmark so that full table can use it.
- if isinstance(ret, tuple):
- self.benchmark_results.run_keyvals = ret[1]
- self.benchmark_results.iter_counts = ret[2]
- ret = ret[0]
- return ret
-
- def GetFullTables(self, perf=False):
- ignore_min_max = self.benchmark_results.ignore_min_max
- columns = [
- Column(RawResult(), Format()),
- Column(MinResult(), Format()),
- Column(MaxResult(), Format()),
- Column(AmeanResult(ignore_min_max), Format()),
- Column(StdResult(ignore_min_max), Format(), 'StdDev'),
- Column(CoeffVarResult(ignore_min_max), CoeffVarFormat(), 'StdDev/Mean'),
- Column(GmeanRatioResult(ignore_min_max), RatioFormat(), 'GmeanSpeedup'),
- Column(PValueResult(ignore_min_max), PValueFormat(), 'p-value')
- ]
- return self._GetTablesWithColumns(columns, 'full', perf)
-
- def GetSummaryTables(self, summary_type=''):
- ignore_min_max = self.benchmark_results.ignore_min_max
- columns = []
- if summary_type == 'samples':
- columns += [Column(IterationResult(), Format(), 'Iterations [Pass:Fail]')]
- columns += [
- Column(
- AmeanResult(ignore_min_max), Format(),
- 'Weighted Samples Amean' if summary_type == 'samples' else ''),
- Column(StdResult(ignore_min_max), Format(), 'StdDev'),
- Column(CoeffVarResult(ignore_min_max), CoeffVarFormat(), 'StdDev/Mean'),
- Column(GmeanRatioResult(ignore_min_max), RatioFormat(), 'GmeanSpeedup'),
- Column(PValueResult(ignore_min_max), PValueFormat(), 'p-value')
- ]
- return self._GetTablesWithColumns(columns, 'summary', summary_type)
+ """Class to handle the report format."""
+
+ MAX_COLOR_CODE = 255
+ PERF_ROWS = 5
+
+ def __init__(self, results):
+ self.benchmark_results = results
+
+ def _GetTablesWithColumns(self, columns, table_type, summary_type):
+ if summary_type == "perf":
+ get_tables = _GetPerfTables
+ elif summary_type == "samples":
+ get_tables = _GetSamplesTables
+ else:
+ get_tables = _GetTables
+ ret = get_tables(self.benchmark_results, columns, table_type)
+ # If we are generating a samples summary table, the return value of
+ # get_tables will be a tuple, and we will update the benchmark_results for
+ # composite benchmark so that full table can use it.
+ if isinstance(ret, tuple):
+ self.benchmark_results.run_keyvals = ret[1]
+ self.benchmark_results.iter_counts = ret[2]
+ ret = ret[0]
+ return ret
+
+ def GetFullTables(self, perf=False):
+ ignore_min_max = self.benchmark_results.ignore_min_max
+ columns = [
+ Column(RawResult(), Format()),
+ Column(MinResult(), Format()),
+ Column(MaxResult(), Format()),
+ Column(AmeanResult(ignore_min_max), Format()),
+ Column(StdResult(ignore_min_max), Format(), "StdDev"),
+ Column(
+ CoeffVarResult(ignore_min_max), CoeffVarFormat(), "StdDev/Mean"
+ ),
+ Column(
+ GmeanRatioResult(ignore_min_max), RatioFormat(), "GmeanSpeedup"
+ ),
+ Column(PValueResult(ignore_min_max), PValueFormat(), "p-value"),
+ ]
+ return self._GetTablesWithColumns(columns, "full", perf)
+
+ def GetSummaryTables(self, summary_type=""):
+ ignore_min_max = self.benchmark_results.ignore_min_max
+ columns = []
+ if summary_type == "samples":
+ columns += [
+ Column(IterationResult(), Format(), "Iterations [Pass:Fail]")
+ ]
+ columns += [
+ Column(
+ AmeanResult(ignore_min_max),
+ Format(),
+ "Weighted Samples Amean" if summary_type == "samples" else "",
+ ),
+ Column(StdResult(ignore_min_max), Format(), "StdDev"),
+ Column(
+ CoeffVarResult(ignore_min_max), CoeffVarFormat(), "StdDev/Mean"
+ ),
+ Column(
+ GmeanRatioResult(ignore_min_max), RatioFormat(), "GmeanSpeedup"
+ ),
+ Column(PValueResult(ignore_min_max), PValueFormat(), "p-value"),
+ ]
+ return self._GetTablesWithColumns(columns, "summary", summary_type)
def _PrintTable(tables, out_to):
- # tables may be None.
- if not tables:
- return ''
-
- if out_to == 'HTML':
- out_type = TablePrinter.HTML
- elif out_to == 'PLAIN':
- out_type = TablePrinter.PLAIN
- elif out_to == 'CONSOLE':
- out_type = TablePrinter.CONSOLE
- elif out_to == 'TSV':
- out_type = TablePrinter.TSV
- elif out_to == 'EMAIL':
- out_type = TablePrinter.EMAIL
- else:
- raise ValueError('Invalid out_to value: %s' % (out_to,))
-
- printers = (TablePrinter(table, out_type) for table in tables)
- return ''.join(printer.Print() for printer in printers)
+ # tables may be None.
+ if not tables:
+ return ""
+
+ if out_to == "HTML":
+ out_type = TablePrinter.HTML
+ elif out_to == "PLAIN":
+ out_type = TablePrinter.PLAIN
+ elif out_to == "CONSOLE":
+ out_type = TablePrinter.CONSOLE
+ elif out_to == "TSV":
+ out_type = TablePrinter.TSV
+ elif out_to == "EMAIL":
+ out_type = TablePrinter.EMAIL
+ else:
+ raise ValueError("Invalid out_to value: %s" % (out_to,))
+ printers = (TablePrinter(table, out_type) for table in tables)
+ return "".join(printer.Print() for printer in printers)
-class TextResultsReport(ResultsReport):
- """Class to generate text result report."""
-
- H1_STR = '==========================================='
- H2_STR = '-------------------------------------------'
-
- def __init__(self, results, email=False, experiment=None):
- super(TextResultsReport, self).__init__(results)
- self.email = email
- self.experiment = experiment
-
- @staticmethod
- def _MakeTitle(title):
- header_line = TextResultsReport.H1_STR
- # '' at the end gives one newline.
- return '\n'.join([header_line, title, header_line, ''])
-
- @staticmethod
- def _MakeSection(title, body):
- header_line = TextResultsReport.H2_STR
- # '\n' at the end gives us two newlines.
- return '\n'.join([header_line, title, header_line, body, '\n'])
-
- @staticmethod
- def FromExperiment(experiment, email=False):
- results = BenchmarkResults.FromExperiment(experiment)
- return TextResultsReport(results, email, experiment)
-
- def GetStatusTable(self):
- """Generate the status table by the tabulator."""
- table = [['', '']]
- columns = [
- Column(LiteralResult(iteration=0), Format(), 'Status'),
- Column(LiteralResult(iteration=1), Format(), 'Failing Reason')
- ]
-
- for benchmark_run in self.experiment.benchmark_runs:
- status = [
- benchmark_run.name,
- [benchmark_run.timeline.GetLastEvent(), benchmark_run.failure_reason]
- ]
- table.append(status)
- cell_table = TableFormatter(table, columns).GetCellTable('status')
- return [cell_table]
-
- def GetTotalWaitCooldownTime(self):
- """Get cooldown wait time in seconds from experiment benchmark runs.
-
- Returns:
- Dictionary {'dut': int(wait_time_in_seconds)}
- """
- waittime_dict = {}
- for dut in self.experiment.machine_manager.GetMachines():
- waittime_dict[dut.name] = dut.GetCooldownWaitTime()
- return waittime_dict
-
- def GetReport(self):
- """Generate the report for email and console."""
- output_type = 'EMAIL' if self.email else 'CONSOLE'
- experiment = self.experiment
-
- sections = []
- if experiment is not None:
- title_contents = "Results report for '%s'" % (experiment.name,)
- else:
- title_contents = 'Results report'
- sections.append(self._MakeTitle(title_contents))
- if not self.benchmark_results.cwp_dso:
- summary_table = _PrintTable(self.GetSummaryTables(), output_type)
- else:
- summary_table = _PrintTable(
- self.GetSummaryTables(summary_type='samples'), output_type)
- sections.append(self._MakeSection('Summary', summary_table))
-
- if experiment is not None:
- table = _PrintTable(self.GetStatusTable(), output_type)
- sections.append(self._MakeSection('Benchmark Run Status', table))
-
- if not self.benchmark_results.cwp_dso:
- perf_table = _PrintTable(
- self.GetSummaryTables(summary_type='perf'), output_type)
- sections.append(self._MakeSection('Perf Data', perf_table))
-
- if experiment is not None:
- experiment_file = experiment.experiment_file
- sections.append(self._MakeSection('Experiment File', experiment_file))
-
- cpu_info = experiment.machine_manager.GetAllCPUInfo(experiment.labels)
- sections.append(self._MakeSection('CPUInfo', cpu_info))
-
- totaltime = (time.time() -
- experiment.start_time) if experiment.start_time else 0
- totaltime_str = 'Total experiment time:\n%d min' % (totaltime // 60)
- cooldown_waittime_list = ['Cooldown wait time:']
- # When running experiment on multiple DUTs cooldown wait time may vary
- # on different devices. In addition its combined time may exceed total
- # experiment time which will look weird but it is reasonable.
- # For this matter print cooldown time per DUT.
- for dut, waittime in sorted(self.GetTotalWaitCooldownTime().items()):
- cooldown_waittime_list.append('DUT %s: %d min' % (dut, waittime // 60))
- cooldown_waittime_str = '\n'.join(cooldown_waittime_list)
- sections.append(
- self._MakeSection('Duration',
- '\n\n'.join([totaltime_str,
- cooldown_waittime_str])))
-
- return '\n'.join(sections)
+class TextResultsReport(ResultsReport):
+ """Class to generate text result report."""
+
+ H1_STR = "==========================================="
+ H2_STR = "-------------------------------------------"
+
+ def __init__(self, results, email=False, experiment=None):
+ super(TextResultsReport, self).__init__(results)
+ self.email = email
+ self.experiment = experiment
+
+ @staticmethod
+ def _MakeTitle(title):
+ header_line = TextResultsReport.H1_STR
+ # '' at the end gives one newline.
+ return "\n".join([header_line, title, header_line, ""])
+
+ @staticmethod
+ def _MakeSection(title, body):
+ header_line = TextResultsReport.H2_STR
+ # '\n' at the end gives us two newlines.
+ return "\n".join([header_line, title, header_line, body, "\n"])
+
+ @staticmethod
+ def FromExperiment(experiment, email=False):
+ results = BenchmarkResults.FromExperiment(experiment)
+ return TextResultsReport(results, email, experiment)
+
+ def GetStatusTable(self):
+ """Generate the status table by the tabulator."""
+ table = [["", ""]]
+ columns = [
+ Column(LiteralResult(iteration=0), Format(), "Status"),
+ Column(LiteralResult(iteration=1), Format(), "Failing Reason"),
+ ]
+
+ for benchmark_run in self.experiment.benchmark_runs:
+ status = [
+ benchmark_run.name,
+ [
+ benchmark_run.timeline.GetLastEvent(),
+ benchmark_run.failure_reason,
+ ],
+ ]
+ table.append(status)
+ cell_table = TableFormatter(table, columns).GetCellTable("status")
+ return [cell_table]
+
+ def GetTotalWaitCooldownTime(self):
+ """Get cooldown wait time in seconds from experiment benchmark runs.
+
+ Returns:
+ Dictionary {'dut': int(wait_time_in_seconds)}
+ """
+ waittime_dict = {}
+ for dut in self.experiment.machine_manager.GetMachines():
+ waittime_dict[dut.name] = dut.GetCooldownWaitTime()
+ return waittime_dict
+
+ def GetReport(self):
+ """Generate the report for email and console."""
+ output_type = "EMAIL" if self.email else "CONSOLE"
+ experiment = self.experiment
+
+ sections = []
+ if experiment is not None:
+ title_contents = "Results report for '%s'" % (experiment.name,)
+ else:
+ title_contents = "Results report"
+ sections.append(self._MakeTitle(title_contents))
+
+ if not self.benchmark_results.cwp_dso:
+ summary_table = _PrintTable(self.GetSummaryTables(), output_type)
+ else:
+ summary_table = _PrintTable(
+ self.GetSummaryTables(summary_type="samples"), output_type
+ )
+ sections.append(self._MakeSection("Summary", summary_table))
+
+ if experiment is not None:
+ table = _PrintTable(self.GetStatusTable(), output_type)
+ sections.append(self._MakeSection("Benchmark Run Status", table))
+
+ if not self.benchmark_results.cwp_dso:
+ perf_table = _PrintTable(
+ self.GetSummaryTables(summary_type="perf"), output_type
+ )
+ sections.append(self._MakeSection("Perf Data", perf_table))
+
+ if experiment is not None:
+ experiment_file = experiment.experiment_file
+ sections.append(
+ self._MakeSection("Experiment File", experiment_file)
+ )
+
+ cpu_info = experiment.machine_manager.GetAllCPUInfo(
+ experiment.labels
+ )
+ sections.append(self._MakeSection("CPUInfo", cpu_info))
+
+ totaltime = (
+ (time.time() - experiment.start_time)
+ if experiment.start_time
+ else 0
+ )
+ totaltime_str = "Total experiment time:\n%d min" % (totaltime // 60)
+ cooldown_waittime_list = ["Cooldown wait time:"]
+ # When running experiment on multiple DUTs cooldown wait time may vary
+ # on different devices. In addition its combined time may exceed total
+ # experiment time which will look weird but it is reasonable.
+ # For this matter print cooldown time per DUT.
+ for dut, waittime in sorted(
+ self.GetTotalWaitCooldownTime().items()
+ ):
+ cooldown_waittime_list.append(
+ "DUT %s: %d min" % (dut, waittime // 60)
+ )
+ cooldown_waittime_str = "\n".join(cooldown_waittime_list)
+ sections.append(
+ self._MakeSection(
+ "Duration",
+ "\n\n".join([totaltime_str, cooldown_waittime_str]),
+ )
+ )
+
+ return "\n".join(sections)
def _GetHTMLCharts(label_names, test_results):
- charts = []
- for item, runs in test_results.items():
- # Fun fact: label_names is actually *entirely* useless as a param, since we
- # never add headers. We still need to pass it anyway.
- table = TableGenerator(runs, label_names).GetTable()
- columns = [
- Column(AmeanResult(), Format()),
- Column(MinResult(), Format()),
- Column(MaxResult(), Format())
- ]
- tf = TableFormatter(table, columns)
- data_table = tf.GetCellTable('full', headers=False)
-
- for cur_row_data in data_table:
- test_key = cur_row_data[0].string_value
- title = '{0}: {1}'.format(item, test_key.replace('/', ''))
- chart = ColumnChart(title, 300, 200)
- chart.AddColumn('Label', 'string')
- chart.AddColumn('Average', 'number')
- chart.AddColumn('Min', 'number')
- chart.AddColumn('Max', 'number')
- chart.AddSeries('Min', 'line', 'black')
- chart.AddSeries('Max', 'line', 'black')
- cur_index = 1
- for label in label_names:
- chart.AddRow([
- label, cur_row_data[cur_index].value,
- cur_row_data[cur_index + 1].value, cur_row_data[cur_index + 2].value
- ])
- if isinstance(cur_row_data[cur_index].value, str):
- chart = None
- break
- cur_index += 3
- if chart:
- charts.append(chart)
- return charts
+ charts = []
+ for item, runs in test_results.items():
+ # Fun fact: label_names is actually *entirely* useless as a param, since we
+ # never add headers. We still need to pass it anyway.
+ table = TableGenerator(runs, label_names).GetTable()
+ columns = [
+ Column(AmeanResult(), Format()),
+ Column(MinResult(), Format()),
+ Column(MaxResult(), Format()),
+ ]
+ tf = TableFormatter(table, columns)
+ data_table = tf.GetCellTable("full", headers=False)
+
+ for cur_row_data in data_table:
+ test_key = cur_row_data[0].string_value
+ title = "{0}: {1}".format(item, test_key.replace("/", ""))
+ chart = ColumnChart(title, 300, 200)
+ chart.AddColumn("Label", "string")
+ chart.AddColumn("Average", "number")
+ chart.AddColumn("Min", "number")
+ chart.AddColumn("Max", "number")
+ chart.AddSeries("Min", "line", "black")
+ chart.AddSeries("Max", "line", "black")
+ cur_index = 1
+ for label in label_names:
+ chart.AddRow(
+ [
+ label,
+ cur_row_data[cur_index].value,
+ cur_row_data[cur_index + 1].value,
+ cur_row_data[cur_index + 2].value,
+ ]
+ )
+ if isinstance(cur_row_data[cur_index].value, str):
+ chart = None
+ break
+ cur_index += 3
+ if chart:
+ charts.append(chart)
+ return charts
class HTMLResultsReport(ResultsReport):
- """Class to generate html result report."""
-
- def __init__(self, benchmark_results, experiment=None):
- super(HTMLResultsReport, self).__init__(benchmark_results)
- self.experiment = experiment
-
- @staticmethod
- def FromExperiment(experiment):
- return HTMLResultsReport(
- BenchmarkResults.FromExperiment(experiment), experiment=experiment)
-
- def GetReport(self):
- label_names = self.benchmark_results.label_names
- test_results = self.benchmark_results.run_keyvals
- charts = _GetHTMLCharts(label_names, test_results)
- chart_javascript = ''.join(chart.GetJavascript() for chart in charts)
- chart_divs = ''.join(chart.GetDiv() for chart in charts)
-
- if not self.benchmark_results.cwp_dso:
- summary_table = self.GetSummaryTables()
- perf_table = self.GetSummaryTables(summary_type='perf')
- else:
- summary_table = self.GetSummaryTables(summary_type='samples')
- perf_table = None
- full_table = self.GetFullTables()
-
- experiment_file = ''
- if self.experiment is not None:
- experiment_file = self.experiment.experiment_file
- # Use kwargs for code readability, and so that testing is a bit easier.
- return templates.GenerateHTMLPage(
- perf_table=perf_table,
- chart_js=chart_javascript,
- summary_table=summary_table,
- print_table=_PrintTable,
- chart_divs=chart_divs,
- full_table=full_table,
- experiment_file=experiment_file)
+ """Class to generate html result report."""
+
+ def __init__(self, benchmark_results, experiment=None):
+ super(HTMLResultsReport, self).__init__(benchmark_results)
+ self.experiment = experiment
+
+ @staticmethod
+ def FromExperiment(experiment):
+ return HTMLResultsReport(
+ BenchmarkResults.FromExperiment(experiment), experiment=experiment
+ )
+
+ def GetReport(self):
+ label_names = self.benchmark_results.label_names
+ test_results = self.benchmark_results.run_keyvals
+ charts = _GetHTMLCharts(label_names, test_results)
+ chart_javascript = "".join(chart.GetJavascript() for chart in charts)
+ chart_divs = "".join(chart.GetDiv() for chart in charts)
+
+ if not self.benchmark_results.cwp_dso:
+ summary_table = self.GetSummaryTables()
+ perf_table = self.GetSummaryTables(summary_type="perf")
+ else:
+ summary_table = self.GetSummaryTables(summary_type="samples")
+ perf_table = None
+ full_table = self.GetFullTables()
+
+ experiment_file = ""
+ if self.experiment is not None:
+ experiment_file = self.experiment.experiment_file
+ # Use kwargs for code readability, and so that testing is a bit easier.
+ return templates.GenerateHTMLPage(
+ perf_table=perf_table,
+ chart_js=chart_javascript,
+ summary_table=summary_table,
+ print_table=_PrintTable,
+ chart_divs=chart_divs,
+ full_table=full_table,
+ experiment_file=experiment_file,
+ )
def ParseStandardPerfReport(report_data):
- """Parses the output of `perf report`.
+ """Parses the output of `perf report`.
- It'll parse the following:
- {{garbage}}
- # Samples: 1234M of event 'foo'
+ It'll parse the following:
+ {{garbage}}
+ # Samples: 1234M of event 'foo'
- 1.23% command shared_object location function::name
+ 1.23% command shared_object location function::name
- 1.22% command shared_object location function2::name
+ 1.22% command shared_object location function2::name
- # Samples: 999K of event 'bar'
+ # Samples: 999K of event 'bar'
- 0.23% command shared_object location function3::name
- {{etc.}}
+ 0.23% command shared_object location function3::name
+ {{etc.}}
- Into:
- {'foo': {'function::name': 1.23, 'function2::name': 1.22},
- 'bar': {'function3::name': 0.23, etc.}}
- """
- # This function fails silently on its if it's handed a string (as opposed to a
- # list of lines). So, auto-split if we do happen to get a string.
- if isinstance(report_data, str):
- report_data = report_data.splitlines()
- # When switching to python3 catch the case when bytes are passed.
- elif isinstance(report_data, bytes):
- raise TypeError()
-
- # Samples: N{K,M,G} of event 'event-name'
- samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'")
-
- # We expect lines like:
- # N.NN% command samples shared_object [location] symbol
- #
- # Note that we're looking at stripped lines, so there is no space at the
- # start.
- perf_regex = re.compile(r'^(\d+(?:.\d*)?)%' # N.NN%
- r'\s*\d+' # samples count (ignored)
- r'\s*\S+' # command (ignored)
- r'\s*\S+' # shared_object (ignored)
- r'\s*\[.\]' # location (ignored)
- r'\s*(\S.+)' # function
- )
-
- stripped_lines = (l.strip() for l in report_data)
- nonempty_lines = (l for l in stripped_lines if l)
- # Ignore all lines before we see samples_regex
- interesting_lines = itertools.dropwhile(lambda x: not samples_regex.match(x),
- nonempty_lines)
-
- first_sample_line = next(interesting_lines, None)
- # Went through the entire file without finding a 'samples' header. Quit.
- if first_sample_line is None:
- return {}
-
- sample_name = samples_regex.match(first_sample_line).group(1)
- current_result = {}
- results = {sample_name: current_result}
- for line in interesting_lines:
- samples_match = samples_regex.match(line)
- if samples_match:
- sample_name = samples_match.group(1)
- current_result = {}
- results[sample_name] = current_result
- continue
-
- match = perf_regex.match(line)
- if not match:
- continue
- percentage_str, func_name = match.groups()
- try:
- percentage = float(percentage_str)
- except ValueError:
- # Couldn't parse it; try to be "resilient".
- continue
- current_result[func_name] = percentage
- return results
+ Into:
+ {'foo': {'function::name': 1.23, 'function2::name': 1.22},
+ 'bar': {'function3::name': 0.23, etc.}}
+ """
+ # This function fails silently on its if it's handed a string (as opposed to a
+ # list of lines). So, auto-split if we do happen to get a string.
+ if isinstance(report_data, str):
+ report_data = report_data.splitlines()
+ # When switching to python3 catch the case when bytes are passed.
+ elif isinstance(report_data, bytes):
+ raise TypeError()
+
+ # Samples: N{K,M,G} of event 'event-name'
+ samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'")
+
+ # We expect lines like:
+ # N.NN% command samples shared_object [location] symbol
+ #
+ # Note that we're looking at stripped lines, so there is no space at the
+ # start.
+ perf_regex = re.compile(
+ r"^(\d+(?:.\d*)?)%" # N.NN%
+ r"\s*\d+" # samples count (ignored)
+ r"\s*\S+" # command (ignored)
+ r"\s*\S+" # shared_object (ignored)
+ r"\s*\[.\]" # location (ignored)
+ r"\s*(\S.+)" # function
+ )
+
+ stripped_lines = (l.strip() for l in report_data)
+ nonempty_lines = (l for l in stripped_lines if l)
+ # Ignore all lines before we see samples_regex
+ interesting_lines = itertools.dropwhile(
+ lambda x: not samples_regex.match(x), nonempty_lines
+ )
+
+ first_sample_line = next(interesting_lines, None)
+ # Went through the entire file without finding a 'samples' header. Quit.
+ if first_sample_line is None:
+ return {}
+ sample_name = samples_regex.match(first_sample_line).group(1)
+ current_result = {}
+ results = {sample_name: current_result}
+ for line in interesting_lines:
+ samples_match = samples_regex.match(line)
+ if samples_match:
+ sample_name = samples_match.group(1)
+ current_result = {}
+ results[sample_name] = current_result
+ continue
+
+ match = perf_regex.match(line)
+ if not match:
+ continue
+ percentage_str, func_name = match.groups()
+ try:
+ percentage = float(percentage_str)
+ except ValueError:
+ # Couldn't parse it; try to be "resilient".
+ continue
+ current_result[func_name] = percentage
+ return results
-def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name,
- benchmark_iteration):
- """Reads a perf report for the given benchmark. Returns {} on failure.
- The result should be a map of maps; it should look like:
- {perf_event_name: {function_name: pct_time_spent}}, e.g.
- {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}}
- """
- raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
- dir_name = ''.join(c for c in raw_dir_name if c.isalnum())
- file_name = os.path.join(results_directory, dir_name, 'perf.data.report.0')
- try:
- with open(file_name) as in_file:
- return ParseStandardPerfReport(in_file)
- except IOError:
- # Yes, we swallow any IO-related errors.
- return {}
+def _ReadExperimentPerfReport(
+ results_directory, label_name, benchmark_name, benchmark_iteration
+):
+ """Reads a perf report for the given benchmark. Returns {} on failure.
+
+ The result should be a map of maps; it should look like:
+ {perf_event_name: {function_name: pct_time_spent}}, e.g.
+ {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}}
+ """
+ raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
+ dir_name = "".join(c for c in raw_dir_name if c.isalnum())
+ file_name = os.path.join(results_directory, dir_name, "perf.data.report.0")
+ try:
+ with open(file_name) as in_file:
+ return ParseStandardPerfReport(in_file)
+ except IOError:
+ # Yes, we swallow any IO-related errors.
+ return {}
# Split out so that testing (specifically: mocking) is easier
def _ExperimentToKeyvals(experiment, for_json_report):
- """Converts an experiment to keyvals."""
- return OrganizeResults(
- experiment.benchmark_runs, experiment.labels, json_report=for_json_report)
+ """Converts an experiment to keyvals."""
+ return OrganizeResults(
+ experiment.benchmark_runs,
+ experiment.labels,
+ json_report=for_json_report,
+ )
class BenchmarkResults(object):
- """The minimum set of fields that any ResultsReport will take."""
-
- def __init__(self,
- label_names,
- benchmark_names_and_iterations,
- run_keyvals,
- ignore_min_max=False,
- read_perf_report=None,
- cwp_dso=None,
- weights=None):
- if read_perf_report is None:
-
- def _NoPerfReport(*_args, **_kwargs):
- return {}
-
- read_perf_report = _NoPerfReport
-
- self.label_names = label_names
- self.benchmark_names_and_iterations = benchmark_names_and_iterations
- self.iter_counts = dict(benchmark_names_and_iterations)
- self.run_keyvals = run_keyvals
- self.ignore_min_max = ignore_min_max
- self.read_perf_report = read_perf_report
- self.cwp_dso = cwp_dso
- self.weights = dict(weights) if weights else None
-
- @staticmethod
- def FromExperiment(experiment, for_json_report=False):
- label_names = [label.name for label in experiment.labels]
- benchmark_names_and_iterations = [(benchmark.name, benchmark.iterations)
- for benchmark in experiment.benchmarks]
- run_keyvals = _ExperimentToKeyvals(experiment, for_json_report)
- ignore_min_max = experiment.ignore_min_max
- read_perf_report = functools.partial(_ReadExperimentPerfReport,
- experiment.results_directory)
- cwp_dso = experiment.cwp_dso
- weights = [(benchmark.name, benchmark.weight)
- for benchmark in experiment.benchmarks]
- return BenchmarkResults(label_names, benchmark_names_and_iterations,
- run_keyvals, ignore_min_max, read_perf_report,
- cwp_dso, weights)
+ """The minimum set of fields that any ResultsReport will take."""
+
+ def __init__(
+ self,
+ label_names,
+ benchmark_names_and_iterations,
+ run_keyvals,
+ ignore_min_max=False,
+ read_perf_report=None,
+ cwp_dso=None,
+ weights=None,
+ ):
+ if read_perf_report is None:
+
+ def _NoPerfReport(*_args, **_kwargs):
+ return {}
+
+ read_perf_report = _NoPerfReport
+
+ self.label_names = label_names
+ self.benchmark_names_and_iterations = benchmark_names_and_iterations
+ self.iter_counts = dict(benchmark_names_and_iterations)
+ self.run_keyvals = run_keyvals
+ self.ignore_min_max = ignore_min_max
+ self.read_perf_report = read_perf_report
+ self.cwp_dso = cwp_dso
+ self.weights = dict(weights) if weights else None
+
+ @staticmethod
+ def FromExperiment(experiment, for_json_report=False):
+ label_names = [label.name for label in experiment.labels]
+ benchmark_names_and_iterations = [
+ (benchmark.name, benchmark.iterations)
+ for benchmark in experiment.benchmarks
+ ]
+ run_keyvals = _ExperimentToKeyvals(experiment, for_json_report)
+ ignore_min_max = experiment.ignore_min_max
+ read_perf_report = functools.partial(
+ _ReadExperimentPerfReport, experiment.results_directory
+ )
+ cwp_dso = experiment.cwp_dso
+ weights = [
+ (benchmark.name, benchmark.weight)
+ for benchmark in experiment.benchmarks
+ ]
+ return BenchmarkResults(
+ label_names,
+ benchmark_names_and_iterations,
+ run_keyvals,
+ ignore_min_max,
+ read_perf_report,
+ cwp_dso,
+ weights,
+ )
def _GetElemByName(name, from_list):
- """Gets an element from the given list by its name field.
+ """Gets an element from the given list by its name field.
- Raises an error if it doesn't find exactly one match.
- """
- elems = [e for e in from_list if e.name == name]
- if len(elems) != 1:
- raise ValueError('Expected 1 item named %s, found %d' % (name, len(elems)))
- return elems[0]
+ Raises an error if it doesn't find exactly one match.
+ """
+ elems = [e for e in from_list if e.name == name]
+ if len(elems) != 1:
+ raise ValueError(
+ "Expected 1 item named %s, found %d" % (name, len(elems))
+ )
+ return elems[0]
def _Unlist(l):
- """If l is a list, extracts the first element of l. Otherwise, returns l."""
- return l[0] if isinstance(l, list) else l
+ """If l is a list, extracts the first element of l. Otherwise, returns l."""
+ return l[0] if isinstance(l, list) else l
class JSONResultsReport(ResultsReport):
- """Class that generates JSON reports for experiments."""
-
- def __init__(self,
- benchmark_results,
- benchmark_date=None,
- benchmark_time=None,
- experiment=None,
- json_args=None):
- """Construct a JSONResultsReport.
-
- json_args is the dict of arguments we pass to json.dumps in GetReport().
- """
- super(JSONResultsReport, self).__init__(benchmark_results)
-
- defaults = TelemetryDefaults()
- defaults.ReadDefaultsFile()
- summary_field_defaults = defaults.GetDefault()
- if summary_field_defaults is None:
- summary_field_defaults = {}
- self.summary_field_defaults = summary_field_defaults
-
- if json_args is None:
- json_args = {}
- self.json_args = json_args
-
- self.experiment = experiment
- if not benchmark_date:
- timestamp = datetime.datetime.strftime(datetime.datetime.now(),
- '%Y-%m-%d %H:%M:%S')
- benchmark_date, benchmark_time = timestamp.split(' ')
- self.date = benchmark_date
- self.time = benchmark_time
-
- @staticmethod
- def FromExperiment(experiment,
- benchmark_date=None,
- benchmark_time=None,
- json_args=None):
- benchmark_results = BenchmarkResults.FromExperiment(
- experiment, for_json_report=True)
- return JSONResultsReport(benchmark_results, benchmark_date, benchmark_time,
- experiment, json_args)
-
- def GetReportObjectIgnoringExperiment(self):
- """Gets the JSON report object specifically for the output data.
-
- Ignores any experiment-specific fields (e.g. board, machine checksum, ...).
- """
- benchmark_results = self.benchmark_results
- label_names = benchmark_results.label_names
- summary_field_defaults = self.summary_field_defaults
- final_results = []
- for test, test_results in benchmark_results.run_keyvals.items():
- for label_name, label_results in zip(label_names, test_results):
- for iter_results in label_results:
- passed = iter_results.get('retval') == 0
- json_results = {
- 'date': self.date,
- 'time': self.time,
- 'label': label_name,
- 'test_name': test,
- 'pass': passed,
- }
- final_results.append(json_results)
-
- if not passed:
- continue
-
- # Get overall results.
- summary_fields = summary_field_defaults.get(test)
- if summary_fields is not None:
- value = []
- json_results['overall_result'] = value
- for f in summary_fields:
- v = iter_results.get(f)
- if v is None:
+ """Class that generates JSON reports for experiments."""
+
+ def __init__(
+ self,
+ benchmark_results,
+ benchmark_date=None,
+ benchmark_time=None,
+ experiment=None,
+ json_args=None,
+ ):
+ """Construct a JSONResultsReport.
+
+ json_args is the dict of arguments we pass to json.dumps in GetReport().
+ """
+ super(JSONResultsReport, self).__init__(benchmark_results)
+
+ defaults = TelemetryDefaults()
+ defaults.ReadDefaultsFile()
+ summary_field_defaults = defaults.GetDefault()
+ if summary_field_defaults is None:
+ summary_field_defaults = {}
+ self.summary_field_defaults = summary_field_defaults
+
+ if json_args is None:
+ json_args = {}
+ self.json_args = json_args
+
+ self.experiment = experiment
+ if not benchmark_date:
+ timestamp = datetime.datetime.strftime(
+ datetime.datetime.now(), "%Y-%m-%d %H:%M:%S"
+ )
+ benchmark_date, benchmark_time = timestamp.split(" ")
+ self.date = benchmark_date
+ self.time = benchmark_time
+
+ @staticmethod
+ def FromExperiment(
+ experiment, benchmark_date=None, benchmark_time=None, json_args=None
+ ):
+ benchmark_results = BenchmarkResults.FromExperiment(
+ experiment, for_json_report=True
+ )
+ return JSONResultsReport(
+ benchmark_results,
+ benchmark_date,
+ benchmark_time,
+ experiment,
+ json_args,
+ )
+
+ def GetReportObjectIgnoringExperiment(self):
+ """Gets the JSON report object specifically for the output data.
+
+ Ignores any experiment-specific fields (e.g. board, machine checksum, ...).
+ """
+ benchmark_results = self.benchmark_results
+ label_names = benchmark_results.label_names
+ summary_field_defaults = self.summary_field_defaults
+ final_results = []
+ for test, test_results in benchmark_results.run_keyvals.items():
+ for label_name, label_results in zip(label_names, test_results):
+ for iter_results in label_results:
+ passed = iter_results.get("retval") == 0
+ json_results = {
+ "date": self.date,
+ "time": self.time,
+ "label": label_name,
+ "test_name": test,
+ "pass": passed,
+ }
+ final_results.append(json_results)
+
+ if not passed:
+ continue
+
+ # Get overall results.
+ summary_fields = summary_field_defaults.get(test)
+ if summary_fields is not None:
+ value = []
+ json_results["overall_result"] = value
+ for f in summary_fields:
+ v = iter_results.get(f)
+ if v is None:
+ continue
+ # New telemetry results format: sometimes we get a list of lists
+ # now.
+ v = _Unlist(_Unlist(v))
+ value.append((f, float(v)))
+
+ # Get detailed results.
+ detail_results = {}
+ json_results["detailed_results"] = detail_results
+ for k, v in iter_results.items():
+ if (
+ k == "retval"
+ or k == "PASS"
+ or k == ["PASS"]
+ or v == "PASS"
+ ):
+ continue
+
+ v = _Unlist(v)
+ if "machine" in k:
+ json_results[k] = v
+ elif v is not None:
+ if isinstance(v, list):
+ detail_results[k] = [float(d) for d in v]
+ else:
+ detail_results[k] = float(v)
+ return final_results
+
+ def GetReportObject(self):
+ """Generate the JSON report, returning it as a python object."""
+ report_list = self.GetReportObjectIgnoringExperiment()
+ if self.experiment is not None:
+ self._AddExperimentSpecificFields(report_list)
+ return report_list
+
+ def _AddExperimentSpecificFields(self, report_list):
+ """Add experiment-specific data to the JSON report."""
+ board = self.experiment.labels[0].board
+ manager = self.experiment.machine_manager
+ for report in report_list:
+ label_name = report["label"]
+ label = _GetElemByName(label_name, self.experiment.labels)
+
+ img_path = os.path.realpath(
+ os.path.expanduser(label.chromeos_image)
+ )
+ ver, img = ParseChromeosImage(img_path)
+
+ report.update(
+ {
+ "board": board,
+ "chromeos_image": img,
+ "chromeos_version": ver,
+ "chrome_version": label.chrome_version,
+ "compiler": label.compiler,
+ }
+ )
+
+ if not report["pass"]:
continue
- # New telemetry results format: sometimes we get a list of lists
- # now.
- v = _Unlist(_Unlist(v))
- value.append((f, float(v)))
-
- # Get detailed results.
- detail_results = {}
- json_results['detailed_results'] = detail_results
- for k, v in iter_results.items():
- if k == 'retval' or k == 'PASS' or k == ['PASS'] or v == 'PASS':
- continue
-
- v = _Unlist(v)
- if 'machine' in k:
- json_results[k] = v
- elif v is not None:
- if isinstance(v, list):
- detail_results[k] = [float(d) for d in v]
- else:
- detail_results[k] = float(v)
- return final_results
-
- def GetReportObject(self):
- """Generate the JSON report, returning it as a python object."""
- report_list = self.GetReportObjectIgnoringExperiment()
- if self.experiment is not None:
- self._AddExperimentSpecificFields(report_list)
- return report_list
-
- def _AddExperimentSpecificFields(self, report_list):
- """Add experiment-specific data to the JSON report."""
- board = self.experiment.labels[0].board
- manager = self.experiment.machine_manager
- for report in report_list:
- label_name = report['label']
- label = _GetElemByName(label_name, self.experiment.labels)
-
- img_path = os.path.realpath(os.path.expanduser(label.chromeos_image))
- ver, img = ParseChromeosImage(img_path)
-
- report.update({
- 'board': board,
- 'chromeos_image': img,
- 'chromeos_version': ver,
- 'chrome_version': label.chrome_version,
- 'compiler': label.compiler
- })
-
- if not report['pass']:
- continue
- if 'machine_checksum' not in report:
- report['machine_checksum'] = manager.machine_checksum[label_name]
- if 'machine_string' not in report:
- report['machine_string'] = manager.machine_checksum_string[label_name]
-
- def GetReport(self):
- """Dump the results of self.GetReportObject() to a string as JSON."""
- # This exists for consistency with the other GetReport methods.
- # Specifically, they all return strings, so it's a bit awkward if the JSON
- # results reporter returns an object.
- return json.dumps(self.GetReportObject(), **self.json_args)
+ if "machine_checksum" not in report:
+ report["machine_checksum"] = manager.machine_checksum[
+ label_name
+ ]
+ if "machine_string" not in report:
+ report["machine_string"] = manager.machine_checksum_string[
+ label_name
+ ]
+
+ def GetReport(self):
+ """Dump the results of self.GetReportObject() to a string as JSON."""
+ # This exists for consistency with the other GetReport methods.
+ # Specifically, they all return strings, so it's a bit awkward if the JSON
+ # results reporter returns an object.
+ return json.dumps(self.GetReportObject(), **self.json_args)
diff --git a/crosperf/results_report_templates.py b/crosperf/results_report_templates.py
index ea411e21..3ef9e74a 100644
--- a/crosperf/results_report_templates.py
+++ b/crosperf/results_report_templates.py
@@ -1,26 +1,28 @@
# -*- coding: utf-8 -*-
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Text templates used by various parts of results_report."""
-from __future__ import print_function
import html
from string import Template
-_TabMenuTemplate = Template("""
+
+_TabMenuTemplate = Template(
+ """
<div class='tab-menu'>
<a href="javascript:switchTab('$table_name', 'html')">HTML</a>
<a href="javascript:switchTab('$table_name', 'text')">Text</a>
<a href="javascript:switchTab('$table_name', 'tsv')">TSV</a>
-</div>""")
+</div>"""
+)
def _GetTabMenuHTML(table_name):
- # N.B. cgi.escape does some very basic HTML escaping. Nothing more.
- escaped = html.escape(table_name)
- return _TabMenuTemplate.substitute(table_name=escaped)
+ # N.B. cgi.escape does some very basic HTML escaping. Nothing more.
+ escaped = html.escape(table_name)
+ return _TabMenuTemplate.substitute(table_name=escaped)
_ExperimentFileHTML = """
@@ -33,12 +35,15 @@ _ExperimentFileHTML = """
def _GetExperimentFileHTML(experiment_file_text):
- if not experiment_file_text:
- return ''
- return _ExperimentFileHTML % (html.escape(experiment_file_text, quote=False),)
+ if not experiment_file_text:
+ return ""
+ return _ExperimentFileHTML % (
+ html.escape(experiment_file_text, quote=False),
+ )
-_ResultsSectionHTML = Template("""
+_ResultsSectionHTML = Template(
+ """
<div class='results-section'>
<div class='results-section-title'>$sect_name</div>
<div class='results-section-content'>
@@ -48,22 +53,25 @@ _ResultsSectionHTML = Template("""
</div>
$tab_menu
</div>
-""")
+"""
+)
def _GetResultsSectionHTML(print_table, table_name, data):
- first_word = table_name.strip().split()[0]
- short_name = first_word.lower()
- return _ResultsSectionHTML.substitute(
- sect_name=table_name,
- html_table=print_table(data, 'HTML'),
- text_table=print_table(data, 'PLAIN'),
- tsv_table=print_table(data, 'TSV'),
- tab_menu=_GetTabMenuHTML(short_name),
- short_name=short_name)
-
-
-_MainHTML = Template("""
+ first_word = table_name.strip().split()[0]
+ short_name = first_word.lower()
+ return _ResultsSectionHTML.substitute(
+ sect_name=table_name,
+ html_table=print_table(data, "HTML"),
+ text_table=print_table(data, "PLAIN"),
+ tsv_table=print_table(data, "TSV"),
+ tab_menu=_GetTabMenuHTML(short_name),
+ short_name=short_name,
+ )
+
+
+_MainHTML = Template(
+ """
<html>
<head>
<style type="text/css">
@@ -169,37 +177,50 @@ _MainHTML = Template("""
$experiment_file
</body>
</html>
-""")
+"""
+)
# It's a bit ugly that we take some HTML things, and some non-HTML things, but I
# need to balance prettiness with time spent making things pretty.
-def GenerateHTMLPage(perf_table, chart_js, summary_table, print_table,
- chart_divs, full_table, experiment_file):
- """Generates a crosperf HTML page from the given arguments.
-
- print_table is a two-arg function called like: print_table(t, f)
- t is one of [summary_table, print_table, full_table]; it's the table we want
- to format.
- f is one of ['TSV', 'HTML', 'PLAIN']; it's the type of format we want.
- """
- summary_table_html = _GetResultsSectionHTML(print_table, 'Summary Table',
- summary_table)
- if perf_table:
- perf_html = _GetResultsSectionHTML(print_table, 'Perf Table', perf_table)
- perf_init = "switchTab('perf', 'html')"
- else:
- perf_html = ''
- perf_init = ''
-
- full_table_html = _GetResultsSectionHTML(print_table, 'Full Table',
- full_table)
- experiment_file_html = _GetExperimentFileHTML(experiment_file)
- return _MainHTML.substitute(
- perf_init=perf_init,
- chart_js=chart_js,
- summary_table=summary_table_html,
- perf_html=perf_html,
- chart_divs=chart_divs,
- full_table=full_table_html,
- experiment_file=experiment_file_html)
+def GenerateHTMLPage(
+ perf_table,
+ chart_js,
+ summary_table,
+ print_table,
+ chart_divs,
+ full_table,
+ experiment_file,
+):
+ """Generates a crosperf HTML page from the given arguments.
+
+ print_table is a two-arg function called like: print_table(t, f)
+ t is one of [summary_table, print_table, full_table]; it's the table we want
+ to format.
+ f is one of ['TSV', 'HTML', 'PLAIN']; it's the type of format we want.
+ """
+ summary_table_html = _GetResultsSectionHTML(
+ print_table, "Summary Table", summary_table
+ )
+ if perf_table:
+ perf_html = _GetResultsSectionHTML(
+ print_table, "Perf Table", perf_table
+ )
+ perf_init = "switchTab('perf', 'html')"
+ else:
+ perf_html = ""
+ perf_init = ""
+
+ full_table_html = _GetResultsSectionHTML(
+ print_table, "Full Table", full_table
+ )
+ experiment_file_html = _GetExperimentFileHTML(experiment_file)
+ return _MainHTML.substitute(
+ perf_init=perf_init,
+ chart_js=chart_js,
+ summary_table=summary_table_html,
+ perf_html=perf_html,
+ chart_divs=chart_divs,
+ full_table=full_table_html,
+ experiment_file=experiment_file_html,
+ )
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index 1e96ef97..4ce654d0 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -1,14 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Copyright 2016 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for the results reporter."""
-from __future__ import division
-from __future__ import print_function
import collections
import io
@@ -16,8 +14,6 @@ import os
import unittest
import unittest.mock as mock
-import test_flag
-
from benchmark_run import MockBenchmarkRun
from cros_utils import logger
from experiment_factory import ExperimentFactory
@@ -31,39 +27,46 @@ from results_report import JSONResultsReport
from results_report import ParseChromeosImage
from results_report import ParseStandardPerfReport
from results_report import TextResultsReport
+import test_flag
class FreeFunctionsTest(unittest.TestCase):
- """Tests for any free functions in results_report."""
-
- def testParseChromeosImage(self):
- # N.B. the cases with blank versions aren't explicitly supported by
- # ParseChromeosImage. I'm not sure if they need to be supported, but the
- # goal of this was to capture existing functionality as much as possible.
- base_case = '/my/chroot/src/build/images/x86-generic/R01-1.0.date-time' \
- '/chromiumos_test_image.bin'
- self.assertEqual(ParseChromeosImage(base_case), ('R01-1.0', base_case))
-
- dir_base_case = os.path.dirname(base_case)
- self.assertEqual(ParseChromeosImage(dir_base_case), ('', dir_base_case))
-
- buildbot_case = '/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time' \
- '/chromiumos_test_image.bin'
- buildbot_img = buildbot_case.split('/chroot/tmp')[1]
-
- self.assertEqual(
- ParseChromeosImage(buildbot_case), ('R02-1.0', buildbot_img))
- self.assertEqual(
- ParseChromeosImage(os.path.dirname(buildbot_case)),
- ('', os.path.dirname(buildbot_img)))
-
- # Ensure we do something reasonable when giving paths that don't quite
- # match the expected pattern.
- fun_case = '/chromiumos_test_image.bin'
- self.assertEqual(ParseChromeosImage(fun_case), ('', fun_case))
-
- fun_case2 = 'chromiumos_test_image.bin'
- self.assertEqual(ParseChromeosImage(fun_case2), ('', fun_case2))
+ """Tests for any free functions in results_report."""
+
+ def testParseChromeosImage(self):
+ # N.B. the cases with blank versions aren't explicitly supported by
+ # ParseChromeosImage. I'm not sure if they need to be supported, but the
+ # goal of this was to capture existing functionality as much as possible.
+ base_case = (
+ "/my/chroot/src/build/images/x86-generic/R01-1.0.date-time"
+ "/chromiumos_test_image.bin"
+ )
+ self.assertEqual(ParseChromeosImage(base_case), ("R01-1.0", base_case))
+
+ dir_base_case = os.path.dirname(base_case)
+ self.assertEqual(ParseChromeosImage(dir_base_case), ("", dir_base_case))
+
+ buildbot_case = (
+ "/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time"
+ "/chromiumos_test_image.bin"
+ )
+ buildbot_img = buildbot_case.split("/chroot/tmp")[1]
+
+ self.assertEqual(
+ ParseChromeosImage(buildbot_case), ("R02-1.0", buildbot_img)
+ )
+ self.assertEqual(
+ ParseChromeosImage(os.path.dirname(buildbot_case)),
+ ("", os.path.dirname(buildbot_img)),
+ )
+
+ # Ensure we do something reasonable when giving paths that don't quite
+ # match the expected pattern.
+ fun_case = "/chromiumos_test_image.bin"
+ self.assertEqual(ParseChromeosImage(fun_case), ("", fun_case))
+
+ fun_case2 = "chromiumos_test_image.bin"
+ self.assertEqual(ParseChromeosImage(fun_case2), ("", fun_case2))
# There are many ways for this to be done better, but the linter complains
@@ -72,19 +75,20 @@ _fake_path_number = [0]
def FakePath(ext):
- """Makes a unique path that shouldn't exist on the host system.
+ """Makes a unique path that shouldn't exist on the host system.
- Each call returns a different path, so if said path finds its way into an
- error message, it may be easier to track it to its source.
- """
- _fake_path_number[0] += 1
- prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0],)
- return os.path.join(prefix, ext)
+ Each call returns a different path, so if said path finds its way into an
+ error message, it may be easier to track it to its source.
+ """
+ _fake_path_number[0] += 1
+ prefix = "/tmp/should/not/exist/%d/" % (_fake_path_number[0],)
+ return os.path.join(prefix, ext)
-def MakeMockExperiment(compiler='gcc'):
- """Mocks an experiment using the given compiler."""
- mock_experiment_file = io.StringIO("""
+def MakeMockExperiment(compiler="gcc"):
+ """Mocks an experiment using the given compiler."""
+ mock_experiment_file = io.StringIO(
+ """
board: x86-alex
remote: 127.0.0.1
locks_dir: /tmp
@@ -101,363 +105,398 @@ def MakeMockExperiment(compiler='gcc'):
remote: 127.0.0.2
chromeos_image: %s
}
- """ % (FakePath('cros_image1.bin'), FakePath('cros_image2.bin')))
- efile = ExperimentFile(mock_experiment_file)
- experiment = ExperimentFactory().GetExperiment(efile,
- FakePath('working_directory'),
- FakePath('log_dir'))
- for label in experiment.labels:
- label.compiler = compiler
- return experiment
+ """
+ % (FakePath("cros_image1.bin"), FakePath("cros_image2.bin"))
+ )
+ efile = ExperimentFile(mock_experiment_file)
+ experiment = ExperimentFactory().GetExperiment(
+ efile, FakePath("working_directory"), FakePath("log_dir")
+ )
+ for label in experiment.labels:
+ label.compiler = compiler
+ return experiment
def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0):
- """Injects successful experiment runs (for each label) into the experiment."""
- # Defensive copy of keyvals, so if it's modified, we'll know.
- keyvals = dict(keyvals)
- num_configs = len(experiment.benchmarks) * len(experiment.labels)
- num_runs = len(experiment.benchmark_runs) // num_configs
-
- # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in
- # benchmark_run_unittest)
- bench = experiment.benchmarks[for_benchmark]
- cache_conditions = []
- log_level = 'average'
- share_cache = ''
- locks_dir = ''
- log = logger.GetLogger()
- machine_manager = MockMachineManager(
- FakePath('chromeos_root'), 0, log_level, locks_dir)
- machine_manager.AddMachine('testing_machine')
- machine = next(
- m for m in machine_manager.GetMachines() if m.name == 'testing_machine')
-
- def MakeSuccessfulRun(n, label):
- run = MockBenchmarkRun('mock_success%d' % (n,), bench, label,
- 1 + n + num_runs, cache_conditions, machine_manager,
- log, log_level, share_cache, {})
- mock_result = MockResult(log, label, log_level, machine)
- mock_result.keyvals = keyvals
- run.result = mock_result
- return run
-
- for label in experiment.labels:
- experiment.benchmark_runs.extend(
- MakeSuccessfulRun(n, label) for n in range(how_many))
- return experiment
+ """Injects successful experiment runs (for each label) into the experiment."""
+ # Defensive copy of keyvals, so if it's modified, we'll know.
+ keyvals = dict(keyvals)
+ num_configs = len(experiment.benchmarks) * len(experiment.labels)
+ num_runs = len(experiment.benchmark_runs) // num_configs
+
+ # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in
+ # benchmark_run_unittest)
+ bench = experiment.benchmarks[for_benchmark]
+ cache_conditions = []
+ log_level = "average"
+ share_cache = ""
+ locks_dir = ""
+ log = logger.GetLogger()
+ machine_manager = MockMachineManager(
+ FakePath("chromeos_root"), 0, log_level, locks_dir
+ )
+ machine_manager.AddMachine("testing_machine")
+ machine = next(
+ m for m in machine_manager.GetMachines() if m.name == "testing_machine"
+ )
+
+ def MakeSuccessfulRun(n, label):
+ run = MockBenchmarkRun(
+ "mock_success%d" % (n,),
+ bench,
+ label,
+ 1 + n + num_runs,
+ cache_conditions,
+ machine_manager,
+ log,
+ log_level,
+ share_cache,
+ {},
+ )
+ mock_result = MockResult(log, label, log_level, machine)
+ mock_result.keyvals = keyvals
+ run.result = mock_result
+ return run
+
+ for label in experiment.labels:
+ experiment.benchmark_runs.extend(
+ MakeSuccessfulRun(n, label) for n in range(how_many)
+ )
+ return experiment
class TextResultsReportTest(unittest.TestCase):
- """Tests that the output of a text report contains the things we pass in.
-
- At the moment, this doesn't care deeply about the format in which said
- things are displayed. It just cares that they're present.
- """
-
- def _checkReport(self, mock_getcooldown, email):
- num_success = 2
- success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
- experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
- success_keyvals)
- SECONDS_IN_MIN = 60
- mock_getcooldown.return_value = {
- experiment.remote[0]: 12 * SECONDS_IN_MIN,
- experiment.remote[1]: 8 * SECONDS_IN_MIN
- }
-
- text_report = TextResultsReport.FromExperiment(
- experiment, email=email).GetReport()
- self.assertIn(str(success_keyvals['a_float']), text_report)
- self.assertIn(success_keyvals['machine'], text_report)
- self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
- self.assertIn('\nDuration\n', text_report)
- self.assertIn('Total experiment time:\n', text_report)
- self.assertIn('Cooldown wait time:\n', text_report)
- self.assertIn('DUT %s: %d min' % (experiment.remote[0], 12), text_report)
- self.assertIn('DUT %s: %d min' % (experiment.remote[1], 8), text_report)
- return text_report
-
- @mock.patch.object(TextResultsReport, 'GetTotalWaitCooldownTime')
- def testOutput(self, mock_getcooldown):
- email_report = self._checkReport(mock_getcooldown, email=True)
- text_report = self._checkReport(mock_getcooldown, email=False)
-
- # Ensure that the reports somehow different. Otherwise, having the
- # distinction is useless.
- self.assertNotEqual(email_report, text_report)
-
- def test_get_totalwait_cooldowntime(self):
- experiment = MakeMockExperiment()
- cros_machines = experiment.machine_manager.GetMachines()
- cros_machines[0].AddCooldownWaitTime(120)
- cros_machines[1].AddCooldownWaitTime(240)
- text_results = TextResultsReport.FromExperiment(experiment, email=False)
- total = text_results.GetTotalWaitCooldownTime()
- self.assertEqual(total[experiment.remote[0]], 120)
- self.assertEqual(total[experiment.remote[1]], 240)
+ """Tests that the output of a text report contains the things we pass in.
+
+ At the moment, this doesn't care deeply about the format in which said
+ things are displayed. It just cares that they're present.
+ """
+
+ def _checkReport(self, mock_getcooldown, email):
+ num_success = 2
+ success_keyvals = {"retval": 0, "machine": "some bot", "a_float": 3.96}
+ experiment = _InjectSuccesses(
+ MakeMockExperiment(), num_success, success_keyvals
+ )
+ SECONDS_IN_MIN = 60
+ mock_getcooldown.return_value = {
+ experiment.remote[0]: 12 * SECONDS_IN_MIN,
+ experiment.remote[1]: 8 * SECONDS_IN_MIN,
+ }
+
+ text_report = TextResultsReport.FromExperiment(
+ experiment, email=email
+ ).GetReport()
+ self.assertIn(str(success_keyvals["a_float"]), text_report)
+ self.assertIn(success_keyvals["machine"], text_report)
+ self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
+ self.assertIn("\nDuration\n", text_report)
+ self.assertIn("Total experiment time:\n", text_report)
+ self.assertIn("Cooldown wait time:\n", text_report)
+ self.assertIn(
+ "DUT %s: %d min" % (experiment.remote[0], 12), text_report
+ )
+ self.assertIn("DUT %s: %d min" % (experiment.remote[1], 8), text_report)
+ return text_report
+
+ @mock.patch.object(TextResultsReport, "GetTotalWaitCooldownTime")
+ def testOutput(self, mock_getcooldown):
+ email_report = self._checkReport(mock_getcooldown, email=True)
+ text_report = self._checkReport(mock_getcooldown, email=False)
+
+ # Ensure that the reports somehow different. Otherwise, having the
+ # distinction is useless.
+ self.assertNotEqual(email_report, text_report)
+
+ def test_get_totalwait_cooldowntime(self):
+ experiment = MakeMockExperiment()
+ cros_machines = experiment.machine_manager.GetMachines()
+ cros_machines[0].AddCooldownWaitTime(120)
+ cros_machines[1].AddCooldownWaitTime(240)
+ text_results = TextResultsReport.FromExperiment(experiment, email=False)
+ total = text_results.GetTotalWaitCooldownTime()
+ self.assertEqual(total[experiment.remote[0]], 120)
+ self.assertEqual(total[experiment.remote[1]], 240)
class HTMLResultsReportTest(unittest.TestCase):
- """Tests that the output of a HTML report contains the things we pass in.
-
- At the moment, this doesn't care deeply about the format in which said
- things are displayed. It just cares that they're present.
- """
-
- _TestOutput = collections.namedtuple('TestOutput', [
- 'summary_table', 'perf_html', 'chart_js', 'charts', 'full_table',
- 'experiment_file'
- ])
-
- @staticmethod
- def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
- chart_divs, full_table, experiment_file):
- # N.B. Currently we don't check chart_js; it's just passed through because
- # cros lint complains otherwise.
- summary_table = print_table(summary_table, 'HTML')
- perf_html = print_table(perf_table, 'HTML')
- full_table = print_table(full_table, 'HTML')
- return HTMLResultsReportTest._TestOutput(
- summary_table=summary_table,
- perf_html=perf_html,
- chart_js=chart_js,
- charts=chart_divs,
- full_table=full_table,
- experiment_file=experiment_file)
-
- def _GetOutput(self, experiment=None, benchmark_results=None):
- with mock.patch('results_report_templates.GenerateHTMLPage') as standin:
- if experiment is not None:
- HTMLResultsReport.FromExperiment(experiment).GetReport()
- else:
- HTMLResultsReport(benchmark_results).GetReport()
- mod_mock = standin
- self.assertEqual(mod_mock.call_count, 1)
- # call_args[0] is positional args, call_args[1] is kwargs.
- self.assertEqual(mod_mock.call_args[0], tuple())
- fmt_args = mod_mock.call_args[1]
- return self._GetTestOutput(**fmt_args)
-
- def testNoSuccessOutput(self):
- output = self._GetOutput(MakeMockExperiment())
- self.assertIn('no result', output.summary_table)
- self.assertIn('no result', output.full_table)
- self.assertEqual(output.charts, '')
- self.assertNotEqual(output.experiment_file, '')
-
- def testSuccessfulOutput(self):
- num_success = 2
- success_keyvals = {'retval': 0, 'a_float': 3.96}
- output = self._GetOutput(
- _InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals))
-
- self.assertNotIn('no result', output.summary_table)
- # self.assertIn(success_keyvals['machine'], output.summary_table)
- self.assertIn('a_float', output.summary_table)
- self.assertIn(str(success_keyvals['a_float']), output.summary_table)
- self.assertIn('a_float', output.full_table)
- # The _ in a_float is filtered out when we're generating HTML.
- self.assertIn('afloat', output.charts)
- # And make sure we have our experiment file...
- self.assertNotEqual(output.experiment_file, '')
-
- def testBenchmarkResultFailure(self):
- labels = ['label1']
- benchmark_names_and_iterations = [('bench1', 1)]
- benchmark_keyvals = {'bench1': [[]]}
- results = BenchmarkResults(labels, benchmark_names_and_iterations,
- benchmark_keyvals)
- output = self._GetOutput(benchmark_results=results)
- self.assertIn('no result', output.summary_table)
- self.assertEqual(output.charts, '')
- self.assertEqual(output.experiment_file, '')
-
- def testBenchmarkResultSuccess(self):
- labels = ['label1']
- benchmark_names_and_iterations = [('bench1', 1)]
- benchmark_keyvals = {'bench1': [[{'retval': 1, 'foo': 2.0}]]}
- results = BenchmarkResults(labels, benchmark_names_and_iterations,
- benchmark_keyvals)
- output = self._GetOutput(benchmark_results=results)
- self.assertNotIn('no result', output.summary_table)
- self.assertIn('bench1', output.summary_table)
- self.assertIn('bench1', output.full_table)
- self.assertNotEqual(output.charts, '')
- self.assertEqual(output.experiment_file, '')
+ """Tests that the output of a HTML report contains the things we pass in.
+
+ At the moment, this doesn't care deeply about the format in which said
+ things are displayed. It just cares that they're present.
+ """
+
+ _TestOutput = collections.namedtuple(
+ "TestOutput",
+ [
+ "summary_table",
+ "perf_html",
+ "chart_js",
+ "charts",
+ "full_table",
+ "experiment_file",
+ ],
+ )
+
+ @staticmethod
+ def _GetTestOutput(
+ perf_table,
+ chart_js,
+ summary_table,
+ print_table,
+ chart_divs,
+ full_table,
+ experiment_file,
+ ):
+ # N.B. Currently we don't check chart_js; it's just passed through because
+ # cros lint complains otherwise.
+ summary_table = print_table(summary_table, "HTML")
+ perf_html = print_table(perf_table, "HTML")
+ full_table = print_table(full_table, "HTML")
+ return HTMLResultsReportTest._TestOutput(
+ summary_table=summary_table,
+ perf_html=perf_html,
+ chart_js=chart_js,
+ charts=chart_divs,
+ full_table=full_table,
+ experiment_file=experiment_file,
+ )
+
+ def _GetOutput(self, experiment=None, benchmark_results=None):
+ with mock.patch("results_report_templates.GenerateHTMLPage") as standin:
+ if experiment is not None:
+ HTMLResultsReport.FromExperiment(experiment).GetReport()
+ else:
+ HTMLResultsReport(benchmark_results).GetReport()
+ mod_mock = standin
+ self.assertEqual(mod_mock.call_count, 1)
+ # call_args[0] is positional args, call_args[1] is kwargs.
+ self.assertEqual(mod_mock.call_args[0], tuple())
+ fmt_args = mod_mock.call_args[1]
+ return self._GetTestOutput(**fmt_args)
+
+ def testNoSuccessOutput(self):
+ output = self._GetOutput(MakeMockExperiment())
+ self.assertIn("no result", output.summary_table)
+ self.assertIn("no result", output.full_table)
+ self.assertEqual(output.charts, "")
+ self.assertNotEqual(output.experiment_file, "")
+
+ def testSuccessfulOutput(self):
+ num_success = 2
+ success_keyvals = {"retval": 0, "a_float": 3.96}
+ output = self._GetOutput(
+ _InjectSuccesses(MakeMockExperiment(), num_success, success_keyvals)
+ )
+
+ self.assertNotIn("no result", output.summary_table)
+ # self.assertIn(success_keyvals['machine'], output.summary_table)
+ self.assertIn("a_float", output.summary_table)
+ self.assertIn(str(success_keyvals["a_float"]), output.summary_table)
+ self.assertIn("a_float", output.full_table)
+ # The _ in a_float is filtered out when we're generating HTML.
+ self.assertIn("afloat", output.charts)
+ # And make sure we have our experiment file...
+ self.assertNotEqual(output.experiment_file, "")
+
+ def testBenchmarkResultFailure(self):
+ labels = ["label1"]
+ benchmark_names_and_iterations = [("bench1", 1)]
+ benchmark_keyvals = {"bench1": [[]]}
+ results = BenchmarkResults(
+ labels, benchmark_names_and_iterations, benchmark_keyvals
+ )
+ output = self._GetOutput(benchmark_results=results)
+ self.assertIn("no result", output.summary_table)
+ self.assertEqual(output.charts, "")
+ self.assertEqual(output.experiment_file, "")
+
+ def testBenchmarkResultSuccess(self):
+ labels = ["label1"]
+ benchmark_names_and_iterations = [("bench1", 1)]
+ benchmark_keyvals = {"bench1": [[{"retval": 1, "foo": 2.0}]]}
+ results = BenchmarkResults(
+ labels, benchmark_names_and_iterations, benchmark_keyvals
+ )
+ output = self._GetOutput(benchmark_results=results)
+ self.assertNotIn("no result", output.summary_table)
+ self.assertIn("bench1", output.summary_table)
+ self.assertIn("bench1", output.full_table)
+ self.assertNotEqual(output.charts, "")
+ self.assertEqual(output.experiment_file, "")
class JSONResultsReportTest(unittest.TestCase):
- """Tests JSONResultsReport."""
-
- REQUIRED_REPORT_KEYS = ('date', 'time', 'label', 'test_name', 'pass')
- EXPERIMENT_REPORT_KEYS = ('board', 'chromeos_image', 'chromeos_version',
- 'chrome_version', 'compiler')
-
- @staticmethod
- def _GetRequiredKeys(is_experiment):
- required_keys = JSONResultsReportTest.REQUIRED_REPORT_KEYS
- if is_experiment:
- required_keys += JSONResultsReportTest.EXPERIMENT_REPORT_KEYS
- return required_keys
-
- def _CheckRequiredKeys(self, test_output, is_experiment):
- required_keys = self._GetRequiredKeys(is_experiment)
- for output in test_output:
- for key in required_keys:
- self.assertIn(key, output)
-
- def testAllFailedJSONReportOutput(self):
- experiment = MakeMockExperiment()
- results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
- self._CheckRequiredKeys(results, is_experiment=True)
- # Nothing succeeded; we don't send anything more than what's required.
- required_keys = self._GetRequiredKeys(is_experiment=True)
- for result in results:
- self.assertCountEqual(result.keys(), required_keys)
-
- def testJSONReportOutputWithSuccesses(self):
- success_keyvals = {
- 'retval': 0,
- 'a_float': '2.3',
- 'many_floats': [['1.0', '2.0'], ['3.0']],
- 'machine': "i'm a pirate"
- }
-
- # 2 is arbitrary.
- num_success = 2
- experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
- success_keyvals)
- results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
- self._CheckRequiredKeys(results, is_experiment=True)
-
- num_passes = num_success * len(experiment.labels)
- non_failures = [r for r in results if r['pass']]
- self.assertEqual(num_passes, len(non_failures))
-
- # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped?
- expected_detailed = {'a_float': 2.3, 'many_floats': [1.0, 2.0]}
- for pass_ in non_failures:
- self.assertIn('detailed_results', pass_)
- self.assertDictEqual(expected_detailed, pass_['detailed_results'])
- self.assertIn('machine', pass_)
- self.assertEqual(success_keyvals['machine'], pass_['machine'])
-
- def testFailedJSONReportOutputWithoutExperiment(self):
- labels = ['label1']
- # yapf:disable
- benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2),
- ('bench3', 1), ('bench4', 0)]
- # yapf:enable
-
- benchmark_keyvals = {
- 'bench1': [[{
- 'retval': 1,
- 'foo': 2.0
- }]],
- 'bench2': [[{
- 'retval': 1,
- 'foo': 4.0
- }, {
- 'retval': -1,
- 'bar': 999
- }]],
- # lack of retval is considered a failure.
- 'bench3': [[{}]],
- 'bench4': [[]]
- }
- bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
- benchmark_keyvals)
- results = JSONResultsReport(bench_results).GetReportObject()
- self._CheckRequiredKeys(results, is_experiment=False)
- self.assertFalse(any(r['pass'] for r in results))
-
- def testJSONGetReportObeysJSONSettings(self):
- labels = ['label1']
- benchmark_names_and_iterations = [('bench1', 1)]
- # These can be anything, really. So long as they're distinctive.
- separators = (',\t\n\t', ':\t\n\t')
- benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]}
- bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
- benchmark_keyvals)
- reporter = JSONResultsReport(
- bench_results, json_args={'separators': separators})
- result_str = reporter.GetReport()
- self.assertIn(separators[0], result_str)
- self.assertIn(separators[1], result_str)
-
- def testSuccessfulJSONReportOutputWithoutExperiment(self):
- labels = ['label1']
- benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)]
- benchmark_keyvals = {
- 'bench1': [[{
- 'retval': 0,
- 'foo': 2.0
- }]],
- 'bench2': [[{
- 'retval': 0,
- 'foo': 4.0
- }, {
- 'retval': 0,
- 'bar': 999
- }]]
- }
- bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
- benchmark_keyvals)
- results = JSONResultsReport(bench_results).GetReportObject()
- self._CheckRequiredKeys(results, is_experiment=False)
- self.assertTrue(all(r['pass'] for r in results))
- # Enforce that the results have *some* deterministic order.
- keyfn = lambda r: (r['test_name'], r['detailed_results'].get('foo', 5.0))
- sorted_results = sorted(results, key=keyfn)
- detailed_results = [r['detailed_results'] for r in sorted_results]
- bench1, bench2_foo, bench2_bar = detailed_results
- self.assertEqual(bench1['foo'], 2.0)
- self.assertEqual(bench2_foo['foo'], 4.0)
- self.assertEqual(bench2_bar['bar'], 999)
- self.assertNotIn('bar', bench1)
- self.assertNotIn('bar', bench2_foo)
- self.assertNotIn('foo', bench2_bar)
+ """Tests JSONResultsReport."""
+
+ REQUIRED_REPORT_KEYS = ("date", "time", "label", "test_name", "pass")
+ EXPERIMENT_REPORT_KEYS = (
+ "board",
+ "chromeos_image",
+ "chromeos_version",
+ "chrome_version",
+ "compiler",
+ )
+
+ @staticmethod
+ def _GetRequiredKeys(is_experiment):
+ required_keys = JSONResultsReportTest.REQUIRED_REPORT_KEYS
+ if is_experiment:
+ required_keys += JSONResultsReportTest.EXPERIMENT_REPORT_KEYS
+ return required_keys
+
+ def _CheckRequiredKeys(self, test_output, is_experiment):
+ required_keys = self._GetRequiredKeys(is_experiment)
+ for output in test_output:
+ for key in required_keys:
+ self.assertIn(key, output)
+
+ def testAllFailedJSONReportOutput(self):
+ experiment = MakeMockExperiment()
+ results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=True)
+ # Nothing succeeded; we don't send anything more than what's required.
+ required_keys = self._GetRequiredKeys(is_experiment=True)
+ for result in results:
+ self.assertCountEqual(result.keys(), required_keys)
+
+ def testJSONReportOutputWithSuccesses(self):
+ success_keyvals = {
+ "retval": 0,
+ "a_float": "2.3",
+ "many_floats": [["1.0", "2.0"], ["3.0"]],
+ "machine": "i'm a pirate",
+ }
+
+ # 2 is arbitrary.
+ num_success = 2
+ experiment = _InjectSuccesses(
+ MakeMockExperiment(), num_success, success_keyvals
+ )
+ results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=True)
+
+ num_passes = num_success * len(experiment.labels)
+ non_failures = [r for r in results if r["pass"]]
+ self.assertEqual(num_passes, len(non_failures))
+
+ # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped?
+ expected_detailed = {"a_float": 2.3, "many_floats": [1.0, 2.0]}
+ for pass_ in non_failures:
+ self.assertIn("detailed_results", pass_)
+ self.assertDictEqual(expected_detailed, pass_["detailed_results"])
+ self.assertIn("machine", pass_)
+ self.assertEqual(success_keyvals["machine"], pass_["machine"])
+
+ def testFailedJSONReportOutputWithoutExperiment(self):
+ labels = ["label1"]
+ # yapf:disable
+ benchmark_names_and_iterations = [
+ ("bench1", 1),
+ ("bench2", 2),
+ ("bench3", 1),
+ ("bench4", 0),
+ ]
+ # yapf:enable
+
+ benchmark_keyvals = {
+ "bench1": [[{"retval": 1, "foo": 2.0}]],
+ "bench2": [[{"retval": 1, "foo": 4.0}, {"retval": -1, "bar": 999}]],
+ # lack of retval is considered a failure.
+ "bench3": [[{}]],
+ "bench4": [[]],
+ }
+ bench_results = BenchmarkResults(
+ labels, benchmark_names_and_iterations, benchmark_keyvals
+ )
+ results = JSONResultsReport(bench_results).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=False)
+ self.assertFalse(any(r["pass"] for r in results))
+
+ def testJSONGetReportObeysJSONSettings(self):
+ labels = ["label1"]
+ benchmark_names_and_iterations = [("bench1", 1)]
+ # These can be anything, really. So long as they're distinctive.
+ separators = (",\t\n\t", ":\t\n\t")
+ benchmark_keyvals = {"bench1": [[{"retval": 0, "foo": 2.0}]]}
+ bench_results = BenchmarkResults(
+ labels, benchmark_names_and_iterations, benchmark_keyvals
+ )
+ reporter = JSONResultsReport(
+ bench_results, json_args={"separators": separators}
+ )
+ result_str = reporter.GetReport()
+ self.assertIn(separators[0], result_str)
+ self.assertIn(separators[1], result_str)
+
+ def testSuccessfulJSONReportOutputWithoutExperiment(self):
+ labels = ["label1"]
+ benchmark_names_and_iterations = [("bench1", 1), ("bench2", 2)]
+ benchmark_keyvals = {
+ "bench1": [[{"retval": 0, "foo": 2.0}]],
+ "bench2": [[{"retval": 0, "foo": 4.0}, {"retval": 0, "bar": 999}]],
+ }
+ bench_results = BenchmarkResults(
+ labels, benchmark_names_and_iterations, benchmark_keyvals
+ )
+ results = JSONResultsReport(bench_results).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=False)
+ self.assertTrue(all(r["pass"] for r in results))
+ # Enforce that the results have *some* deterministic order.
+ keyfn = lambda r: (
+ r["test_name"],
+ r["detailed_results"].get("foo", 5.0),
+ )
+ sorted_results = sorted(results, key=keyfn)
+ detailed_results = [r["detailed_results"] for r in sorted_results]
+ bench1, bench2_foo, bench2_bar = detailed_results
+ self.assertEqual(bench1["foo"], 2.0)
+ self.assertEqual(bench2_foo["foo"], 4.0)
+ self.assertEqual(bench2_bar["bar"], 999)
+ self.assertNotIn("bar", bench1)
+ self.assertNotIn("bar", bench2_foo)
+ self.assertNotIn("foo", bench2_bar)
class PerfReportParserTest(unittest.TestCase):
- """Tests for the perf report parser in results_report."""
-
- @staticmethod
- def _ReadRealPerfReport():
- my_dir = os.path.dirname(os.path.realpath(__file__))
- with open(os.path.join(my_dir, 'perf_files/perf.data.report.0')) as f:
- return f.read()
-
- def testParserParsesRealWorldPerfReport(self):
- report = ParseStandardPerfReport(self._ReadRealPerfReport())
- self.assertCountEqual(['cycles', 'instructions'], list(report.keys()))
-
- # Arbitrarily selected known percentages from the perf report.
- known_cycles_percentages = {
- '0xffffffffa4a1f1c9': 0.66,
- '0x0000115bb7ba9b54': 0.47,
- '0x0000000000082e08': 0.00,
- '0xffffffffa4a13e63': 0.00,
- }
- report_cycles = report['cycles']
- self.assertEqual(len(report_cycles), 214)
- for k, v in known_cycles_percentages.items():
- self.assertIn(k, report_cycles)
- self.assertEqual(v, report_cycles[k])
-
- known_instrunctions_percentages = {
- '0x0000115bb6c35d7a': 1.65,
- '0x0000115bb7ba9b54': 0.67,
- '0x0000000000024f56': 0.00,
- '0xffffffffa4a0ee03': 0.00,
- }
- report_instructions = report['instructions']
- self.assertEqual(len(report_instructions), 492)
- for k, v in known_instrunctions_percentages.items():
- self.assertIn(k, report_instructions)
- self.assertEqual(v, report_instructions[k])
-
-
-if __name__ == '__main__':
- test_flag.SetTestMode(True)
- unittest.main()
+ """Tests for the perf report parser in results_report."""
+
+ @staticmethod
+ def _ReadRealPerfReport():
+ my_dir = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(my_dir, "perf_files/perf.data.report.0")) as f:
+ return f.read()
+
+ def testParserParsesRealWorldPerfReport(self):
+ report = ParseStandardPerfReport(self._ReadRealPerfReport())
+ self.assertCountEqual(["cycles", "instructions"], list(report.keys()))
+
+ # Arbitrarily selected known percentages from the perf report.
+ known_cycles_percentages = {
+ "0xffffffffa4a1f1c9": 0.66,
+ "0x0000115bb7ba9b54": 0.47,
+ "0x0000000000082e08": 0.00,
+ "0xffffffffa4a13e63": 0.00,
+ }
+ report_cycles = report["cycles"]
+ self.assertEqual(len(report_cycles), 214)
+ for k, v in known_cycles_percentages.items():
+ self.assertIn(k, report_cycles)
+ self.assertEqual(v, report_cycles[k])
+
+ known_instrunctions_percentages = {
+ "0x0000115bb6c35d7a": 1.65,
+ "0x0000115bb7ba9b54": 0.67,
+ "0x0000000000024f56": 0.00,
+ "0xffffffffa4a0ee03": 0.00,
+ }
+ report_instructions = report["instructions"]
+ self.assertEqual(len(report_instructions), 492)
+ for k, v in known_instrunctions_percentages.items():
+ self.assertIn(k, report_instructions)
+ self.assertEqual(v, report_instructions[k])
+
+
+if __name__ == "__main__":
+ test_flag.SetTestMode(True)
+ unittest.main()
diff --git a/crosperf/run_tests.sh b/crosperf/run_tests.sh
index d70fc99d..b3d4d1e2 100755
--- a/crosperf/run_tests.sh
+++ b/crosperf/run_tests.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2011 Google Inc. All Rights Reserved.
+# Copyright 2011 Google LLC
# Author: raymes@google.com (Raymes Khoury)
../run_tests_for.py .
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index 49c6344d..828b8b81 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -1,449 +1,479 @@
# -*- coding: utf-8 -*-
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to optimize the scheduling of benchmark_run tasks."""
-from __future__ import division
-from __future__ import print_function
-
-import sys
-import traceback
from collections import defaultdict
+import sys
from threading import Lock
from threading import Thread
+import traceback
-import test_flag
-
-from machine_image_manager import MachineImageManager
from cros_utils import command_executer
from cros_utils import logger
+from machine_image_manager import MachineImageManager
+import test_flag
class DutWorker(Thread):
- """Working thread for a dut."""
-
- def __init__(self, dut, sched):
- super(DutWorker, self).__init__(name='DutWorker-{}'.format(dut.name))
- self._dut = dut
- self._sched = sched
- self._stat_num_br_run = 0
- self._stat_num_reimage = 0
- self._stat_annotation = ''
- self._logger = logger.GetLogger(self._sched.get_experiment().log_dir)
- self.daemon = True
- self._terminated = False
- self._active_br = None
- # Race condition accessing _active_br between _execute_benchmark_run and
- # _terminate, so lock it up.
- self._active_br_lock = Lock()
-
- def terminate(self):
- self._terminated = True
- with self._active_br_lock:
- if self._active_br is not None:
- # BenchmarkRun.Terminate() terminates any running testcase via
- # suite_runner.Terminate and updates timeline.
- self._active_br.Terminate()
-
- def run(self):
- """Do the "run-test->(optionally reimage)->run-test" chore.
-
- Note - 'br' below means 'benchmark_run'.
- """
-
- # Firstly, handle benchmarkruns that have cache hit.
- br = self._sched.get_cached_benchmark_run()
- while br:
- try:
- self._stat_annotation = 'finishing cached {}'.format(br)
- br.run()
- except RuntimeError:
- traceback.print_exc(file=sys.stdout)
- br = self._sched.get_cached_benchmark_run()
-
- # Secondly, handle benchmarkruns that needs to be run on dut.
- self._setup_dut_label()
- try:
- self._logger.LogOutput('{} started.'.format(self))
- while not self._terminated:
- br = self._sched.get_benchmark_run(self._dut)
- if br is None:
- # No br left for this label. Considering reimaging.
- label = self._sched.allocate_label(self._dut)
- if label is None:
- # No br even for other labels. We are done.
- self._logger.LogOutput('ImageManager found no label '
- 'for dut, stopping working '
- 'thread {}.'.format(self))
- break
- if self._reimage(label):
- # Reimage to run other br fails, dut is doomed, stop
- # this thread.
- self._logger.LogWarning('Re-image failed, dut '
- 'in an unstable state, stopping '
- 'working thread {}.'.format(self))
- break
- else:
- # Execute the br.
- self._execute_benchmark_run(br)
- finally:
- self._stat_annotation = 'finished'
- # Thread finishes. Notify scheduler that I'm done.
- self._sched.dut_worker_finished(self)
-
- def _reimage(self, label):
- """Reimage image to label.
-
- Args:
- label: the label to remimage onto dut.
-
- Returns:
- 0 if successful, otherwise 1.
- """
-
- # Termination could happen anywhere, check it.
- if self._terminated:
- return 1
-
- if self._sched.get_experiment().crosfleet:
- self._logger.LogOutput('Crosfleet mode, do not image before testing.')
- self._dut.label = label
- return 0
-
- self._logger.LogOutput('Reimaging {} using {}'.format(self, label))
- self._stat_num_reimage += 1
- self._stat_annotation = 'reimaging using "{}"'.format(label.name)
- try:
- # Note, only 1 reimage at any given time, this is guaranteed in
- # ImageMachine, so no sync needed below.
- retval = self._sched.get_experiment().machine_manager.ImageMachine(
- self._dut, label)
+ """Working thread for a dut."""
+
+ def __init__(self, dut, sched):
+ super(DutWorker, self).__init__(name="DutWorker-{}".format(dut.name))
+ self._dut = dut
+ self._sched = sched
+ self._stat_num_br_run = 0
+ self._stat_num_reimage = 0
+ self._stat_annotation = ""
+ self._logger = logger.GetLogger(self._sched.get_experiment().log_dir)
+ self.daemon = True
+ self._terminated = False
+ self._active_br = None
+ # Race condition accessing _active_br between _execute_benchmark_run and
+ # _terminate, so lock it up.
+ self._active_br_lock = Lock()
- if retval:
- return 1
- except RuntimeError:
- return 1
+ def terminate(self):
+ self._terminated = True
+ with self._active_br_lock:
+ if self._active_br is not None:
+ # BenchmarkRun.Terminate() terminates any running testcase via
+ # suite_runner.Terminate and updates timeline.
+ self._active_br.Terminate()
- self._dut.label = label
- return 0
+ def run(self):
+ """Do the "run-test->(optionally reimage)->run-test" chore.
- def _execute_benchmark_run(self, br):
- """Execute a single benchmark_run.
+ Note - 'br' below means 'benchmark_run'.
+ """
+
+ # Firstly, handle benchmarkruns that have cache hit.
+ br = self._sched.get_cached_benchmark_run()
+ while br:
+ try:
+ self._stat_annotation = "finishing cached {}".format(br)
+ br.run()
+ except RuntimeError:
+ traceback.print_exc(file=sys.stdout)
+ br = self._sched.get_cached_benchmark_run()
+
+ # Secondly, handle benchmarkruns that needs to be run on dut.
+ self._setup_dut_label()
+ try:
+ self._logger.LogOutput("{} started.".format(self))
+ while not self._terminated:
+ br = self._sched.get_benchmark_run(self._dut)
+ if br is None:
+ # No br left for this label. Considering reimaging.
+ label = self._sched.allocate_label(self._dut)
+ if label is None:
+ # No br even for other labels. We are done.
+ self._logger.LogOutput(
+ "ImageManager found no label "
+ "for dut, stopping working "
+ "thread {}.".format(self)
+ )
+ break
+ if self._reimage(label):
+ # Reimage to run other br fails, dut is doomed, stop
+ # this thread.
+ self._logger.LogWarning(
+ "Re-image failed, dut "
+ "in an unstable state, stopping "
+ "working thread {}.".format(self)
+ )
+ break
+ else:
+ # Execute the br.
+ self._execute_benchmark_run(br)
+ finally:
+ self._stat_annotation = "finished"
+ # Thread finishes. Notify scheduler that I'm done.
+ self._sched.dut_worker_finished(self)
+
+ def _reimage(self, label):
+ """Reimage image to label.
+
+ Args:
+ label: the label to remimage onto dut.
+
+ Returns:
+ 0 if successful, otherwise 1.
+ """
+
+ # Termination could happen anywhere, check it.
+ if self._terminated:
+ return 1
+
+ if self._sched.get_experiment().crosfleet:
+ self._logger.LogOutput(
+ "Crosfleet mode, do not image before testing."
+ )
+ self._dut.label = label
+ return 0
+
+ self._logger.LogOutput("Reimaging {} using {}".format(self, label))
+ self._stat_num_reimage += 1
+ self._stat_annotation = 'reimaging using "{}"'.format(label.name)
+ try:
+ # Note, only 1 reimage at any given time, this is guaranteed in
+ # ImageMachine, so no sync needed below.
+ retval = self._sched.get_experiment().machine_manager.ImageMachine(
+ self._dut, label
+ )
+
+ if retval:
+ return 1
+ except RuntimeError:
+ return 1
+
+ self._dut.label = label
+ return 0
+
+ def _execute_benchmark_run(self, br):
+ """Execute a single benchmark_run.
Note - this function never throws exceptions.
- """
+ """
- # Termination could happen anywhere, check it.
- if self._terminated:
- return
-
- self._logger.LogOutput('{} started working on {}'.format(self, br))
- self._stat_num_br_run += 1
- self._stat_annotation = 'executing {}'.format(br)
- # benchmark_run.run does not throws, but just play it safe here.
- try:
- assert br.owner_thread is None
- br.owner_thread = self
- with self._active_br_lock:
- self._active_br = br
- br.run()
- finally:
- self._sched.get_experiment().BenchmarkRunFinished(br)
- with self._active_br_lock:
- self._active_br = None
+ # Termination could happen anywhere, check it.
+ if self._terminated:
+ return
- def _setup_dut_label(self):
- """Try to match dut image with a certain experiment label.
+ self._logger.LogOutput("{} started working on {}".format(self, br))
+ self._stat_num_br_run += 1
+ self._stat_annotation = "executing {}".format(br)
+ # benchmark_run.run does not throws, but just play it safe here.
+ try:
+ assert br.owner_thread is None
+ br.owner_thread = self
+ with self._active_br_lock:
+ self._active_br = br
+ br.run()
+ finally:
+ self._sched.get_experiment().BenchmarkRunFinished(br)
+ with self._active_br_lock:
+ self._active_br = None
+
+ def _setup_dut_label(self):
+ """Try to match dut image with a certain experiment label.
If such match is found, we just skip doing reimage and jump to execute
some benchmark_runs.
- """
-
- checksum_file = '/usr/local/osimage_checksum_file'
- try:
- rv, checksum, _ = command_executer.GetCommandExecuter().\
- CrosRunCommandWOutput(
- 'cat ' + checksum_file,
- chromeos_root=self._sched.get_labels(0).chromeos_root,
- machine=self._dut.name,
- print_to_console=False)
- if rv == 0:
- checksum = checksum.strip()
- for l in self._sched.get_labels():
- if l.checksum == checksum:
- self._logger.LogOutput("Dut '{}' is pre-installed with '{}'".format(
- self._dut.name, l))
- self._dut.label = l
- return
- except RuntimeError:
- traceback.print_exc(file=sys.stdout)
- self._dut.label = None
-
- def __str__(self):
- return 'DutWorker[dut="{}", label="{}"]'.format(
- self._dut.name, self._dut.label.name if self._dut.label else 'None')
-
- def dut(self):
- return self._dut
-
- def status_str(self):
- """Report thread status."""
-
- return ('Worker thread "{}", label="{}", benchmark_run={}, '
- 'reimage={}, now {}'.format(
+ """
+
+ checksum_file = "/usr/local/osimage_checksum_file"
+ try:
+ (
+ rv,
+ checksum,
+ _,
+ ) = command_executer.GetCommandExecuter().CrosRunCommandWOutput(
+ "cat " + checksum_file,
+ chromeos_root=self._sched.get_labels(0).chromeos_root,
+ machine=self._dut.name,
+ print_to_console=False,
+ )
+ if rv == 0:
+ checksum = checksum.strip()
+ for l in self._sched.get_labels():
+ if l.checksum == checksum:
+ self._logger.LogOutput(
+ "Dut '{}' is pre-installed with '{}'".format(
+ self._dut.name, l
+ )
+ )
+ self._dut.label = l
+ return
+ except RuntimeError:
+ traceback.print_exc(file=sys.stdout)
+ self._dut.label = None
+
+ def __str__(self):
+ return 'DutWorker[dut="{}", label="{}"]'.format(
+ self._dut.name, self._dut.label.name if self._dut.label else "None"
+ )
+
+ def dut(self):
+ return self._dut
+
+ def status_str(self):
+ """Report thread status."""
+
+ return (
+ 'Worker thread "{}", label="{}", benchmark_run={}, '
+ "reimage={}, now {}".format(
self._dut.name,
- 'None' if self._dut.label is None else self._dut.label.name,
- self._stat_num_br_run, self._stat_num_reimage,
- self._stat_annotation))
+ "None" if self._dut.label is None else self._dut.label.name,
+ self._stat_num_br_run,
+ self._stat_num_reimage,
+ self._stat_annotation,
+ )
+ )
class BenchmarkRunCacheReader(Thread):
- """The thread to read cache for a list of benchmark_runs.
+ """The thread to read cache for a list of benchmark_runs.
On creation, each instance of this class is given a br_list, which is a
subset of experiment._benchmark_runs.
- """
-
- def __init__(self, schedv2, br_list):
- super(BenchmarkRunCacheReader, self).__init__()
- self._schedv2 = schedv2
- self._br_list = br_list
- self._logger = self._schedv2.get_logger()
-
- def run(self):
- for br in self._br_list:
- try:
- br.ReadCache()
- if br.cache_hit:
- self._logger.LogOutput('Cache hit - {}'.format(br))
- with self._schedv2.lock_on('_cached_br_list'):
- self._schedv2.get_cached_run_list().append(br)
- else:
- self._logger.LogOutput('Cache not hit - {}'.format(br))
- except RuntimeError:
- traceback.print_exc(file=sys.stderr)
+ """
+
+ def __init__(self, schedv2, br_list):
+ super(BenchmarkRunCacheReader, self).__init__()
+ self._schedv2 = schedv2
+ self._br_list = br_list
+ self._logger = self._schedv2.get_logger()
+
+ def run(self):
+ for br in self._br_list:
+ try:
+ br.ReadCache()
+ if br.cache_hit:
+ self._logger.LogOutput("Cache hit - {}".format(br))
+ with self._schedv2.lock_on("_cached_br_list"):
+ self._schedv2.get_cached_run_list().append(br)
+ else:
+ self._logger.LogOutput("Cache not hit - {}".format(br))
+ except RuntimeError:
+ traceback.print_exc(file=sys.stderr)
class Schedv2(object):
- """New scheduler for crosperf."""
+ """New scheduler for crosperf."""
- def __init__(self, experiment):
- self._experiment = experiment
- self._logger = logger.GetLogger(experiment.log_dir)
+ def __init__(self, experiment):
+ self._experiment = experiment
+ self._logger = logger.GetLogger(experiment.log_dir)
- # Create shortcuts to nested data structure. "_duts" points to a list of
- # locked machines. _labels points to a list of all labels.
- self._duts = self._experiment.machine_manager.GetMachines()
- self._labels = self._experiment.labels
+ # Create shortcuts to nested data structure. "_duts" points to a list of
+ # locked machines. _labels points to a list of all labels.
+ self._duts = self._experiment.machine_manager.GetMachines()
+ self._labels = self._experiment.labels
- # Bookkeeping for synchronization.
- self._workers_lock = Lock()
- # pylint: disable=unnecessary-lambda
- self._lock_map = defaultdict(lambda: Lock())
+ # Bookkeeping for synchronization.
+ self._workers_lock = Lock()
+ # pylint: disable=unnecessary-lambda
+ self._lock_map = defaultdict(lambda: Lock())
- # Test mode flag
- self._in_test_mode = test_flag.GetTestMode()
+ # Test mode flag
+ self._in_test_mode = test_flag.GetTestMode()
- # Read benchmarkrun cache.
- self._read_br_cache()
+ # Read benchmarkrun cache.
+ self._read_br_cache()
- # Mapping from label to a list of benchmark_runs.
- self._label_brl_map = dict((l, []) for l in self._labels)
- for br in self._experiment.benchmark_runs:
- assert br.label in self._label_brl_map
- # Only put no-cache-hit br into the map.
- if br not in self._cached_br_list:
- self._label_brl_map[br.label].append(br)
+ # Mapping from label to a list of benchmark_runs.
+ self._label_brl_map = dict((l, []) for l in self._labels)
+ for br in self._experiment.benchmark_runs:
+ assert br.label in self._label_brl_map
+ # Only put no-cache-hit br into the map.
+ if br not in self._cached_br_list:
+ self._label_brl_map[br.label].append(br)
- # Use machine image manager to calculate initial label allocation.
- self._mim = MachineImageManager(self._labels, self._duts)
- self._mim.compute_initial_allocation()
+ # Use machine image manager to calculate initial label allocation.
+ self._mim = MachineImageManager(self._labels, self._duts)
+ self._mim.compute_initial_allocation()
- # Create worker thread, 1 per dut.
- self._active_workers = [DutWorker(dut, self) for dut in self._duts]
- self._finished_workers = []
+ # Create worker thread, 1 per dut.
+ self._active_workers = [DutWorker(dut, self) for dut in self._duts]
+ self._finished_workers = []
- # Termination flag.
- self._terminated = False
+ # Termination flag.
+ self._terminated = False
- def run_sched(self):
- """Start all dut worker threads and return immediately."""
+ def run_sched(self):
+ """Start all dut worker threads and return immediately."""
- for w in self._active_workers:
- w.start()
+ for w in self._active_workers:
+ w.start()
- def _read_br_cache(self):
- """Use multi-threading to read cache for all benchmarkruns.
+ def _read_br_cache(self):
+ """Use multi-threading to read cache for all benchmarkruns.
We do this by firstly creating a few threads, and then assign each
thread a segment of all brs. Each thread will check cache status for
each br and put those with cache into '_cached_br_list'.
- """
-
- self._cached_br_list = []
- n_benchmarkruns = len(self._experiment.benchmark_runs)
- if n_benchmarkruns <= 4:
- # Use single thread to read cache.
- self._logger.LogOutput(('Starting to read cache status for '
- '{} benchmark runs ...').format(n_benchmarkruns))
- BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run()
- return
-
- # Split benchmarkruns set into segments. Each segment will be handled by
- # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
- n_threads = max(2, min(20, (n_benchmarkruns + 3) // 4))
- self._logger.LogOutput(
- ('Starting {} threads to read cache status for '
- '{} benchmark runs ...').format(n_threads, n_benchmarkruns))
- benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) // n_threads
- benchmarkrun_segments = []
- for i in range(n_threads - 1):
- start = i * benchmarkruns_per_thread
- end = (i + 1) * benchmarkruns_per_thread
- benchmarkrun_segments.append(self._experiment.benchmark_runs[start:end])
- benchmarkrun_segments.append(
- self._experiment.benchmark_runs[(n_threads - 1) *
- benchmarkruns_per_thread:])
-
- # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
- assert sum(len(x) for x in benchmarkrun_segments) == n_benchmarkruns
-
- # Create and start all readers.
- cache_readers = [
- BenchmarkRunCacheReader(self, x) for x in benchmarkrun_segments
- ]
-
- for x in cache_readers:
- x.start()
-
- # Wait till all readers finish.
- for x in cache_readers:
- x.join()
-
- # Summarize.
- self._logger.LogOutput(
- 'Total {} cache hit out of {} benchmark_runs.'.format(
- len(self._cached_br_list), n_benchmarkruns))
-
- def get_cached_run_list(self):
- return self._cached_br_list
-
- def get_label_map(self):
- return self._label_brl_map
-
- def get_experiment(self):
- return self._experiment
-
- def get_labels(self, i=None):
- if i is None:
- return self._labels
- return self._labels[i]
-
- def get_logger(self):
- return self._logger
-
- def get_cached_benchmark_run(self):
- """Get a benchmark_run with 'cache hit'.
-
- Returns:
- The benchmark that has cache hit, if any. Otherwise none.
- """
-
- with self.lock_on('_cached_br_list'):
- if self._cached_br_list:
- return self._cached_br_list.pop()
- return None
-
- def get_benchmark_run(self, dut):
- """Get a benchmark_run (br) object for a certain dut.
-
- Args:
- dut: the dut for which a br is returned.
-
- Returns:
- A br with its label matching that of the dut. If no such br could be
- found, return None (this usually means a reimage is required for the
- dut).
- """
-
- # If terminated, stop providing any br.
- if self._terminated:
- return None
-
- # If dut bears an unrecognized label, return None.
- if dut.label is None:
- return None
-
- # If br list for the dut's label is empty (that means all brs for this
- # label have been done), return None.
- with self.lock_on(dut.label):
- brl = self._label_brl_map[dut.label]
- if not brl:
- return None
- # Return the first br.
- return brl.pop(0)
-
- def allocate_label(self, dut):
- """Allocate a label to a dut.
-
- The work is delegated to MachineImageManager.
+ """
+
+ self._cached_br_list = []
+ n_benchmarkruns = len(self._experiment.benchmark_runs)
+ if n_benchmarkruns <= 4:
+ # Use single thread to read cache.
+ self._logger.LogOutput(
+ (
+ "Starting to read cache status for " "{} benchmark runs ..."
+ ).format(n_benchmarkruns)
+ )
+ BenchmarkRunCacheReader(self, self._experiment.benchmark_runs).run()
+ return
- The dut_worker calling this method is responsible for reimage the dut to
- this label.
+ # Split benchmarkruns set into segments. Each segment will be handled by
+ # a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
+ n_threads = max(2, min(20, (n_benchmarkruns + 3) // 4))
+ self._logger.LogOutput(
+ (
+ "Starting {} threads to read cache status for "
+ "{} benchmark runs ..."
+ ).format(n_threads, n_benchmarkruns)
+ )
+ benchmarkruns_per_thread = (
+ n_benchmarkruns + n_threads - 1
+ ) // n_threads
+ benchmarkrun_segments = []
+ for i in range(n_threads - 1):
+ start = i * benchmarkruns_per_thread
+ end = (i + 1) * benchmarkruns_per_thread
+ benchmarkrun_segments.append(
+ self._experiment.benchmark_runs[start:end]
+ )
+ benchmarkrun_segments.append(
+ self._experiment.benchmark_runs[
+ (n_threads - 1) * benchmarkruns_per_thread :
+ ]
+ )
+
+ # Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
+ assert sum(len(x) for x in benchmarkrun_segments) == n_benchmarkruns
+
+ # Create and start all readers.
+ cache_readers = [
+ BenchmarkRunCacheReader(self, x) for x in benchmarkrun_segments
+ ]
+
+ for x in cache_readers:
+ x.start()
+
+ # Wait till all readers finish.
+ for x in cache_readers:
+ x.join()
+
+ # Summarize.
+ self._logger.LogOutput(
+ "Total {} cache hit out of {} benchmark_runs.".format(
+ len(self._cached_br_list), n_benchmarkruns
+ )
+ )
+
+ def get_cached_run_list(self):
+ return self._cached_br_list
+
+ def get_label_map(self):
+ return self._label_brl_map
+
+ def get_experiment(self):
+ return self._experiment
+
+ def get_labels(self, i=None):
+ if i is None:
+ return self._labels
+ return self._labels[i]
+
+ def get_logger(self):
+ return self._logger
+
+ def get_cached_benchmark_run(self):
+ """Get a benchmark_run with 'cache hit'.
+
+ Returns:
+ The benchmark that has cache hit, if any. Otherwise none.
+ """
+
+ with self.lock_on("_cached_br_list"):
+ if self._cached_br_list:
+ return self._cached_br_list.pop()
+ return None
+
+ def get_benchmark_run(self, dut):
+ """Get a benchmark_run (br) object for a certain dut.
+
+ Args:
+ dut: the dut for which a br is returned.
+
+ Returns:
+ A br with its label matching that of the dut. If no such br could be
+ found, return None (this usually means a reimage is required for the
+ dut).
+ """
+
+ # If terminated, stop providing any br.
+ if self._terminated:
+ return None
+
+ # If dut bears an unrecognized label, return None.
+ if dut.label is None:
+ return None
+
+ # If br list for the dut's label is empty (that means all brs for this
+ # label have been done), return None.
+ with self.lock_on(dut.label):
+ brl = self._label_brl_map[dut.label]
+ if not brl:
+ return None
+ # Return the first br.
+ return brl.pop(0)
+
+ def allocate_label(self, dut):
+ """Allocate a label to a dut.
+
+ The work is delegated to MachineImageManager.
+
+ The dut_worker calling this method is responsible for reimage the dut to
+ this label.
- Args:
- dut: the new label that is to be reimaged onto the dut.
+ Args:
+ dut: the new label that is to be reimaged onto the dut.
- Returns:
- The label or None.
- """
+ Returns:
+ The label or None.
+ """
- if self._terminated:
- return None
+ if self._terminated:
+ return None
- return self._mim.allocate(dut, self)
+ return self._mim.allocate(dut, self)
- def dut_worker_finished(self, dut_worker):
- """Notify schedv2 that the dut_worker thread finished.
+ def dut_worker_finished(self, dut_worker):
+ """Notify schedv2 that the dut_worker thread finished.
- Args:
- dut_worker: the thread that is about to end.
- """
+ Args:
+ dut_worker: the thread that is about to end.
+ """
- self._logger.LogOutput('{} finished.'.format(dut_worker))
- with self._workers_lock:
- self._active_workers.remove(dut_worker)
- self._finished_workers.append(dut_worker)
+ self._logger.LogOutput("{} finished.".format(dut_worker))
+ with self._workers_lock:
+ self._active_workers.remove(dut_worker)
+ self._finished_workers.append(dut_worker)
- def is_complete(self):
- return len(self._active_workers) == 0
+ def is_complete(self):
+ return len(self._active_workers) == 0
- def lock_on(self, my_object):
- return self._lock_map[my_object]
+ def lock_on(self, my_object):
+ return self._lock_map[my_object]
- def terminate(self):
- """Mark flag so we stop providing br/reimages.
+ def terminate(self):
+ """Mark flag so we stop providing br/reimages.
Also terminate each DutWorker, so they refuse to execute br or reimage.
- """
-
- self._terminated = True
- for dut_worker in self._active_workers:
- dut_worker.terminate()
-
- def threads_status_as_string(self):
- """Report the dut worker threads status."""
-
- status = '{} active threads, {} finished threads.\n'.format(
- len(self._active_workers), len(self._finished_workers))
- status += ' Active threads:'
- for dw in self._active_workers:
- status += '\n ' + dw.status_str()
- if self._finished_workers:
- status += '\n Finished threads:'
- for dw in self._finished_workers:
- status += '\n ' + dw.status_str()
- return status
+ """
+
+ self._terminated = True
+ for dut_worker in self._active_workers:
+ dut_worker.terminate()
+
+ def threads_status_as_string(self):
+ """Report the dut worker threads status."""
+
+ status = "{} active threads, {} finished threads.\n".format(
+ len(self._active_workers), len(self._finished_workers)
+ )
+ status += " Active threads:"
+ for dw in self._active_workers:
+ status += "\n " + dw.status_str()
+ if self._finished_workers:
+ status += "\n Finished threads:"
+ for dw in self._finished_workers:
+ status += "\n " + dw.status_str()
+ return status
diff --git a/crosperf/schedv2_unittest.py b/crosperf/schedv2_unittest.py
index 7b56d723..db5f5feb 100755
--- a/crosperf/schedv2_unittest.py
+++ b/crosperf/schedv2_unittest.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This contains the unit tests for the new Crosperf task scheduler."""
-from __future__ import print_function
import functools
import io
@@ -15,12 +14,13 @@ import unittest
import unittest.mock as mock
import benchmark_run
-import test_flag
+from cros_utils.command_executer import CommandExecuter
from experiment_factory import ExperimentFactory
from experiment_file import ExperimentFile
-from cros_utils.command_executer import CommandExecuter
from experiment_runner_unittest import FakeLogger
from schedv2 import Schedv2
+import test_flag
+
EXPERIMENT_FILE_1 = """\
board: daisy
@@ -66,160 +66,184 @@ image2 {{
class Schedv2Test(unittest.TestCase):
- """Class for setting up and running the unit tests."""
+ """Class for setting up and running the unit tests."""
- def setUp(self):
- self.exp = None
+ def setUp(self):
+ self.exp = None
- mock_logger = FakeLogger()
- mock_cmd_exec = mock.Mock(spec=CommandExecuter)
+ mock_logger = FakeLogger()
+ mock_cmd_exec = mock.Mock(spec=CommandExecuter)
- @mock.patch('benchmark_run.BenchmarkRun', new=benchmark_run.MockBenchmarkRun)
- def _make_fake_experiment(self, expstr):
- """Create fake experiment from string.
+ @mock.patch(
+ "benchmark_run.BenchmarkRun", new=benchmark_run.MockBenchmarkRun
+ )
+ def _make_fake_experiment(self, expstr):
+ """Create fake experiment from string.
Note - we mock out BenchmarkRun in this step.
- """
- experiment_file = ExperimentFile(io.StringIO(expstr))
- experiment = ExperimentFactory().GetExperiment(
- experiment_file, working_directory='', log_dir='')
- return experiment
-
- def test_remote(self):
- """Test that remotes in labels are aggregated into experiment.remote."""
-
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
- self.exp.log_level = 'verbose'
- my_schedv2 = Schedv2(self.exp)
- self.assertFalse(my_schedv2.is_complete())
- self.assertIn('chromeos-daisy1.cros', self.exp.remote)
- self.assertIn('chromeos-daisy2.cros', self.exp.remote)
- self.assertIn('chromeos-daisy3.cros', self.exp.remote)
- self.assertIn('chromeos-daisy4.cros', self.exp.remote)
- self.assertIn('chromeos-daisy5.cros', self.exp.remote)
-
- def test_unreachable_remote(self):
- """Test unreachable remotes are removed from experiment and label."""
-
- def MockIsReachable(cm):
- return (cm.name != 'chromeos-daisy3.cros' and
- cm.name != 'chromeos-daisy5.cros')
-
- with mock.patch(
- 'machine_manager.MockCrosMachine.IsReachable', new=MockIsReachable):
- self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
- self.assertIn('chromeos-daisy1.cros', self.exp.remote)
- self.assertIn('chromeos-daisy2.cros', self.exp.remote)
- self.assertNotIn('chromeos-daisy3.cros', self.exp.remote)
- self.assertIn('chromeos-daisy4.cros', self.exp.remote)
- self.assertNotIn('chromeos-daisy5.cros', self.exp.remote)
-
- for l in self.exp.labels:
- if l.name == 'image2':
- self.assertNotIn('chromeos-daisy5.cros', l.remote)
- self.assertIn('chromeos-daisy4.cros', l.remote)
- elif l.name == 'image1':
- self.assertNotIn('chromeos-daisy3.cros', l.remote)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_1(self, reader):
- """Test benchmarkrun set is split into 5 segments."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=9))
- my_schedv2 = Schedv2(self.exp)
- self.assertFalse(my_schedv2.is_complete())
- # We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4,
- # 4, 2 brs respectively.
- # Assert that BenchmarkRunCacheReader() is called 5 times.
- self.assertEqual(reader.call_count, 5)
- # reader.call_args_list[n] - nth call.
- # reader.call_args_list[n][0] - positioned args in nth call.
- # reader.call_args_list[n][0][1] - the 2nd arg in nth call,
- # that is 'br_list' in 'schedv2.BenchmarkRunCacheReader'.
- self.assertEqual(len(reader.call_args_list[0][0][1]), 4)
- self.assertEqual(len(reader.call_args_list[1][0][1]), 4)
- self.assertEqual(len(reader.call_args_list[2][0][1]), 4)
- self.assertEqual(len(reader.call_args_list[3][0][1]), 4)
- self.assertEqual(len(reader.call_args_list[4][0][1]), 2)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_2(self, reader):
- """Test benchmarkrun set is split into 4 segments."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=8))
- my_schedv2 = Schedv2(self.exp)
- self.assertFalse(my_schedv2.is_complete())
- # We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs.
- self.assertEqual(reader.call_count, 4)
- self.assertEqual(len(reader.call_args_list[0][0][1]), 4)
- self.assertEqual(len(reader.call_args_list[1][0][1]), 4)
- self.assertEqual(len(reader.call_args_list[2][0][1]), 4)
- self.assertEqual(len(reader.call_args_list[3][0][1]), 4)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_3(self, reader):
- """Test benchmarkrun set is split into 2 segments."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=3))
- my_schedv2 = Schedv2(self.exp)
- self.assertFalse(my_schedv2.is_complete())
- # We have 3 * 2 == 6 brs, we use 2 threads.
- self.assertEqual(reader.call_count, 2)
- self.assertEqual(len(reader.call_args_list[0][0][1]), 3)
- self.assertEqual(len(reader.call_args_list[1][0][1]), 3)
-
- @mock.patch('schedv2.BenchmarkRunCacheReader')
- def test_BenchmarkRunCacheReader_4(self, reader):
- """Test benchmarkrun set is not splitted."""
-
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=1))
- my_schedv2 = Schedv2(self.exp)
- self.assertFalse(my_schedv2.is_complete())
- # We have 1 * 2 == 2 br, so only 1 instance.
- self.assertEqual(reader.call_count, 1)
- self.assertEqual(len(reader.call_args_list[0][0][1]), 2)
-
- def test_cachehit(self):
- """Test cache-hit and none-cache-hit brs are properly organized."""
-
- def MockReadCache(br):
- br.cache_hit = (br.label.name == 'image2')
-
- with mock.patch(
- 'benchmark_run.MockBenchmarkRun.ReadCache', new=MockReadCache):
- # We have 2 * 30 brs, half of which are put into _cached_br_list.
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
- my_schedv2 = Schedv2(self.exp)
- self.assertEqual(len(my_schedv2.get_cached_run_list()), 30)
- # The non-cache-hit brs are put into Schedv2._label_brl_map.
- self.assertEqual(
- functools.reduce(lambda a, x: a + len(x[1]),
- my_schedv2.get_label_map().items(), 0), 30)
-
- def test_nocachehit(self):
- """Test no cache-hit."""
-
- def MockReadCache(br):
- br.cache_hit = False
-
- with mock.patch(
- 'benchmark_run.MockBenchmarkRun.ReadCache', new=MockReadCache):
- # We have 2 * 30 brs, none of which are put into _cached_br_list.
- self.exp = self._make_fake_experiment(
- EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30))
- my_schedv2 = Schedv2(self.exp)
- self.assertEqual(len(my_schedv2.get_cached_run_list()), 0)
- # The non-cache-hit brs are put into Schedv2._label_brl_map.
- self.assertEqual(
- functools.reduce(lambda a, x: a + len(x[1]),
- my_schedv2.get_label_map().items(), 0), 60)
-
-
-if __name__ == '__main__':
- test_flag.SetTestMode(True)
- unittest.main()
+ """
+ experiment_file = ExperimentFile(io.StringIO(expstr))
+ experiment = ExperimentFactory().GetExperiment(
+ experiment_file, working_directory="", log_dir=""
+ )
+ return experiment
+
+ def test_remote(self):
+ """Test that remotes in labels are aggregated into experiment.remote."""
+
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
+ self.exp.log_level = "verbose"
+ my_schedv2 = Schedv2(self.exp)
+ self.assertFalse(my_schedv2.is_complete())
+ self.assertIn("chromeos-daisy1.cros", self.exp.remote)
+ self.assertIn("chromeos-daisy2.cros", self.exp.remote)
+ self.assertIn("chromeos-daisy3.cros", self.exp.remote)
+ self.assertIn("chromeos-daisy4.cros", self.exp.remote)
+ self.assertIn("chromeos-daisy5.cros", self.exp.remote)
+
+ def test_unreachable_remote(self):
+ """Test unreachable remotes are removed from experiment and label."""
+
+ def MockIsReachable(cm):
+ return (
+ cm.name != "chromeos-daisy3.cros"
+ and cm.name != "chromeos-daisy5.cros"
+ )
+
+ with mock.patch(
+ "machine_manager.MockCrosMachine.IsReachable", new=MockIsReachable
+ ):
+ self.exp = self._make_fake_experiment(EXPERIMENT_FILE_1)
+ self.assertIn("chromeos-daisy1.cros", self.exp.remote)
+ self.assertIn("chromeos-daisy2.cros", self.exp.remote)
+ self.assertNotIn("chromeos-daisy3.cros", self.exp.remote)
+ self.assertIn("chromeos-daisy4.cros", self.exp.remote)
+ self.assertNotIn("chromeos-daisy5.cros", self.exp.remote)
+
+ for l in self.exp.labels:
+ if l.name == "image2":
+ self.assertNotIn("chromeos-daisy5.cros", l.remote)
+ self.assertIn("chromeos-daisy4.cros", l.remote)
+ elif l.name == "image1":
+ self.assertNotIn("chromeos-daisy3.cros", l.remote)
+
+ @mock.patch("schedv2.BenchmarkRunCacheReader")
+ def test_BenchmarkRunCacheReader_1(self, reader):
+ """Test benchmarkrun set is split into 5 segments."""
+
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=9)
+ )
+ my_schedv2 = Schedv2(self.exp)
+ self.assertFalse(my_schedv2.is_complete())
+ # We have 9 * 2 == 18 brs, we use 5 threads, each reading 4, 4, 4,
+ # 4, 2 brs respectively.
+ # Assert that BenchmarkRunCacheReader() is called 5 times.
+ self.assertEqual(reader.call_count, 5)
+ # reader.call_args_list[n] - nth call.
+ # reader.call_args_list[n][0] - positioned args in nth call.
+ # reader.call_args_list[n][0][1] - the 2nd arg in nth call,
+ # that is 'br_list' in 'schedv2.BenchmarkRunCacheReader'.
+ self.assertEqual(len(reader.call_args_list[0][0][1]), 4)
+ self.assertEqual(len(reader.call_args_list[1][0][1]), 4)
+ self.assertEqual(len(reader.call_args_list[2][0][1]), 4)
+ self.assertEqual(len(reader.call_args_list[3][0][1]), 4)
+ self.assertEqual(len(reader.call_args_list[4][0][1]), 2)
+
+ @mock.patch("schedv2.BenchmarkRunCacheReader")
+ def test_BenchmarkRunCacheReader_2(self, reader):
+ """Test benchmarkrun set is split into 4 segments."""
+
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=8)
+ )
+ my_schedv2 = Schedv2(self.exp)
+ self.assertFalse(my_schedv2.is_complete())
+ # We have 8 * 2 == 16 brs, we use 4 threads, each reading 4 brs.
+ self.assertEqual(reader.call_count, 4)
+ self.assertEqual(len(reader.call_args_list[0][0][1]), 4)
+ self.assertEqual(len(reader.call_args_list[1][0][1]), 4)
+ self.assertEqual(len(reader.call_args_list[2][0][1]), 4)
+ self.assertEqual(len(reader.call_args_list[3][0][1]), 4)
+
+ @mock.patch("schedv2.BenchmarkRunCacheReader")
+ def test_BenchmarkRunCacheReader_3(self, reader):
+ """Test benchmarkrun set is split into 2 segments."""
+
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=3)
+ )
+ my_schedv2 = Schedv2(self.exp)
+ self.assertFalse(my_schedv2.is_complete())
+ # We have 3 * 2 == 6 brs, we use 2 threads.
+ self.assertEqual(reader.call_count, 2)
+ self.assertEqual(len(reader.call_args_list[0][0][1]), 3)
+ self.assertEqual(len(reader.call_args_list[1][0][1]), 3)
+
+ @mock.patch("schedv2.BenchmarkRunCacheReader")
+ def test_BenchmarkRunCacheReader_4(self, reader):
+ """Test benchmarkrun set is not splitted."""
+
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=1)
+ )
+ my_schedv2 = Schedv2(self.exp)
+ self.assertFalse(my_schedv2.is_complete())
+ # We have 1 * 2 == 2 br, so only 1 instance.
+ self.assertEqual(reader.call_count, 1)
+ self.assertEqual(len(reader.call_args_list[0][0][1]), 2)
+
+ def test_cachehit(self):
+ """Test cache-hit and none-cache-hit brs are properly organized."""
+
+ def MockReadCache(br):
+ br.cache_hit = br.label.name == "image2"
+
+ with mock.patch(
+ "benchmark_run.MockBenchmarkRun.ReadCache", new=MockReadCache
+ ):
+ # We have 2 * 30 brs, half of which are put into _cached_br_list.
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30)
+ )
+ my_schedv2 = Schedv2(self.exp)
+ self.assertEqual(len(my_schedv2.get_cached_run_list()), 30)
+ # The non-cache-hit brs are put into Schedv2._label_brl_map.
+ self.assertEqual(
+ functools.reduce(
+ lambda a, x: a + len(x[1]),
+ my_schedv2.get_label_map().items(),
+ 0,
+ ),
+ 30,
+ )
+
+ def test_nocachehit(self):
+ """Test no cache-hit."""
+
+ def MockReadCache(br):
+ br.cache_hit = False
+
+ with mock.patch(
+ "benchmark_run.MockBenchmarkRun.ReadCache", new=MockReadCache
+ ):
+ # We have 2 * 30 brs, none of which are put into _cached_br_list.
+ self.exp = self._make_fake_experiment(
+ EXPERIMENT_FILE_WITH_FORMAT.format(kraken_iterations=30)
+ )
+ my_schedv2 = Schedv2(self.exp)
+ self.assertEqual(len(my_schedv2.get_cached_run_list()), 0)
+ # The non-cache-hit brs are put into Schedv2._label_brl_map.
+ self.assertEqual(
+ functools.reduce(
+ lambda a, x: a + len(x[1]),
+ my_schedv2.get_label_map().items(),
+ 0,
+ ),
+ 60,
+ )
+
+
+if __name__ == "__main__":
+ test_flag.SetTestMode(True)
+ unittest.main()
diff --git a/crosperf/settings.py b/crosperf/settings.py
index 75c8d9ec..5a983b32 100644
--- a/crosperf/settings.py
+++ b/crosperf/settings.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to get the settings from experiment file."""
-from __future__ import print_function
from cros_utils import logger
from cros_utils import misc
@@ -13,74 +12,100 @@ from download_images import ImageDownloader
class Settings(object):
- """Class representing settings (a set of fields) from an experiment file."""
+ """Class representing settings (a set of fields) from an experiment file."""
- def __init__(self, name, settings_type):
- self.name = name
- self.settings_type = settings_type
- self.fields = {}
- self.parent = None
+ def __init__(self, name, settings_type):
+ self.name = name
+ self.settings_type = settings_type
+ self.fields = {}
+ self.parent = None
- def SetParentSettings(self, settings):
- """Set the parent settings which these settings can inherit from."""
- self.parent = settings
+ def SetParentSettings(self, settings):
+ """Set the parent settings which these settings can inherit from."""
+ self.parent = settings
- def AddField(self, field):
- name = field.name
- if name in self.fields:
- raise SyntaxError('Field %s defined previously.' % name)
- self.fields[name] = field
+ def AddField(self, field):
+ name = field.name
+ if name in self.fields:
+ raise SyntaxError("Field %s defined previously." % name)
+ self.fields[name] = field
- def SetField(self, name, value, append=False):
- if name not in self.fields:
- raise SyntaxError("'%s' is not a valid field in '%s' settings" %
- (name, self.settings_type))
- if append:
- self.fields[name].Append(value)
- else:
- self.fields[name].Set(value)
+ def SetField(self, name, value, append=False):
+ if name not in self.fields:
+ raise SyntaxError(
+ "'%s' is not a valid field in '%s' settings"
+ % (name, self.settings_type)
+ )
+ if append:
+ self.fields[name].Append(value)
+ else:
+ self.fields[name].Set(value)
- def GetField(self, name):
- """Get the value of a field with a given name."""
- if name not in self.fields:
- raise SyntaxError(
- "Field '%s' not a valid field in '%s' settings." % (name, self.name))
- field = self.fields[name]
- if not field.assigned and field.required:
- raise SyntaxError("Required field '%s' not defined in '%s' settings." %
- (name, self.name))
- return self.fields[name].Get()
+ def GetField(self, name):
+ """Get the value of a field with a given name."""
+ if name not in self.fields:
+ raise SyntaxError(
+ "Field '%s' not a valid field in '%s' settings."
+ % (name, self.name)
+ )
+ field = self.fields[name]
+ if not field.assigned and field.required:
+ raise SyntaxError(
+ "Required field '%s' not defined in '%s' settings."
+ % (name, self.name)
+ )
+ return self.fields[name].Get()
- def Inherit(self):
- """Inherit any unset values from the parent settings."""
- for name in self.fields:
- if (not self.fields[name].assigned and self.parent and
- name in self.parent.fields and self.parent.fields[name].assigned):
- self.fields[name].Set(self.parent.GetField(name), parse=False)
+ def Inherit(self):
+ """Inherit any unset values from the parent settings."""
+ for name in self.fields:
+ if (
+ not self.fields[name].assigned
+ and self.parent
+ and name in self.parent.fields
+ and self.parent.fields[name].assigned
+ ):
+ self.fields[name].Set(self.parent.GetField(name), parse=False)
- def Override(self, settings):
- """Override settings with settings from a different object."""
- for name in settings.fields:
- if name in self.fields and settings.fields[name].assigned:
- self.fields[name].Set(settings.GetField(name), parse=False)
+ def Override(self, settings):
+ """Override settings with settings from a different object."""
+ for name in settings.fields:
+ if name in self.fields and settings.fields[name].assigned:
+ self.fields[name].Set(settings.GetField(name), parse=False)
- def Validate(self):
- """Check that all required fields have been set."""
- for name in self.fields:
- if not self.fields[name].assigned and self.fields[name].required:
- raise SyntaxError('Field %s is invalid.' % name)
+ def Validate(self):
+ """Check that all required fields have been set."""
+ for name in self.fields:
+ if not self.fields[name].assigned and self.fields[name].required:
+ raise SyntaxError("Field %s is invalid." % name)
- def GetXbuddyPath(self, path_str, autotest_path, debug_path, board,
- chromeos_root, log_level, download_debug):
- prefix = 'remote'
- l = logger.GetLogger()
- if (path_str.find('trybot') < 0 and path_str.find('toolchain') < 0 and
- path_str.find(board) < 0 and path_str.find(board.replace('_', '-'))):
- xbuddy_path = '%s/%s/%s' % (prefix, board, path_str)
- else:
- xbuddy_path = '%s/%s' % (prefix, path_str)
- image_downloader = ImageDownloader(l, log_level)
- # Returns three variables: image, autotest_path, debug_path
- return image_downloader.Run(
- misc.CanonicalizePath(chromeos_root), xbuddy_path, autotest_path,
- debug_path, download_debug)
+ def GetXbuddyPath(
+ self,
+ path_str,
+ autotest_path,
+ debug_path,
+ board,
+ chromeos_root,
+ log_level,
+ download_debug,
+ ):
+ prefix = "remote"
+ l = logger.GetLogger()
+ if (
+ path_str.find("trybot") < 0
+ and path_str.find("toolchain") < 0
+ and path_str.find(board) < 0
+ and path_str.find(board.replace("_", "-"))
+ ):
+ xbuddy_path = "%s/%s/%s" % (prefix, board, path_str)
+ else:
+ xbuddy_path = "%s/%s" % (prefix, path_str)
+ image_downloader = ImageDownloader(l, log_level)
+ # Returns three variables: image, autotest_path, debug_path
+ return image_downloader.Run(
+ misc.CanonicalizePath(chromeos_root),
+ xbuddy_path,
+ autotest_path,
+ debug_path,
+ download_debug,
+ )
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 78834c63..6382bba7 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setting files for global, benchmark and labels."""
-from __future__ import print_function
from field import BooleanField
from field import EnumField
@@ -17,407 +16,558 @@ from settings import Settings
class BenchmarkSettings(Settings):
- """Settings used to configure individual benchmarks."""
+ """Settings used to configure individual benchmarks."""
- def __init__(self, name):
- super(BenchmarkSettings, self).__init__(name, 'benchmark')
- self.AddField(
- TextField('test_name',
- description='The name of the test to run. '
- 'Defaults to the name of the benchmark.'))
- self.AddField(
- TextField('test_args',
- description='Arguments to be passed to the '
- 'test.'))
- self.AddField(
- IntegerField(
- 'iterations',
- required=False,
- default=0,
- description='Number of iterations to run the test. '
- 'If not set, will run each benchmark test the optimum number of '
- 'times to get a stable result.'))
- self.AddField(
- TextField('suite',
- default='test_that',
- description='The type of the benchmark.'))
- self.AddField(
- IntegerField('retries',
- default=0,
- description='Number of times to retry a '
- 'benchmark run.'))
- self.AddField(
- BooleanField('run_local',
- description='Run benchmark harness on the DUT. '
- 'Currently only compatible with the suite: '
- 'telemetry_Crosperf.',
- required=False,
- default=True))
- self.AddField(
- FloatField(
- 'weight',
- default=0.0,
- description='Weight of the benchmark for CWP approximation'))
+ def __init__(self, name):
+ super(BenchmarkSettings, self).__init__(name, "benchmark")
+ self.AddField(
+ TextField(
+ "test_name",
+ description="The name of the test to run. "
+ "Defaults to the name of the benchmark.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "test_args",
+ description="Arguments to be passed to the " "test.",
+ )
+ )
+ self.AddField(
+ IntegerField(
+ "iterations",
+ required=False,
+ default=0,
+ description="Number of iterations to run the test. "
+ "If not set, will run each benchmark test the optimum number of "
+ "times to get a stable result.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "suite",
+ default="test_that",
+ description="The type of the benchmark.",
+ )
+ )
+ self.AddField(
+ IntegerField(
+ "retries",
+ default=0,
+ description="Number of times to retry a " "benchmark run.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "run_local",
+ description="Run benchmark harness on the DUT. "
+ "Currently only compatible with the suite: "
+ "telemetry_Crosperf.",
+ required=False,
+ default=True,
+ )
+ )
+ self.AddField(
+ FloatField(
+ "weight",
+ default=0.0,
+ description="Weight of the benchmark for CWP approximation",
+ )
+ )
class LabelSettings(Settings):
- """Settings for each label."""
+ """Settings for each label."""
- def __init__(self, name):
- super(LabelSettings, self).__init__(name, 'label')
- self.AddField(
- TextField('chromeos_image',
- required=False,
- description='The path to the image to run tests '
- 'on, for local/custom-built images. See the '
- "'build' option for official or trybot images."))
- self.AddField(
- TextField(
- 'autotest_path',
- required=False,
- description='Autotest directory path relative to chroot which '
- 'has autotest files for the image to run tests requiring autotest '
- 'files.'))
- self.AddField(
- TextField(
- 'debug_path',
- required=False,
- description='Debug info directory relative to chroot which has '
- 'symbols and vmlinux that can be used by perf tool.'))
- self.AddField(
- TextField('chromeos_root',
- description='The path to a chromeos checkout which '
- 'contains a src/scripts directory. Defaults to '
- 'the chromeos checkout which contains the '
- 'chromeos_image.'))
- self.AddField(
- ListField('remote',
- description='A comma-separated list of IPs of chromeos'
- 'devices to run experiments on.'))
- self.AddField(
- TextField('image_args',
- required=False,
- default='',
- description='Extra arguments to pass to '
- 'image_chromeos.py.'))
- self.AddField(
- TextField('cache_dir',
- default='',
- description='The cache dir for this image.'))
- self.AddField(
- TextField('compiler',
- default='gcc',
- description='The compiler used to build the '
- 'ChromeOS image (gcc or llvm).'))
- self.AddField(
- TextField('chrome_src',
- description='The path to the source of chrome. '
- 'This is used to run telemetry benchmarks. '
- 'The default one is the src inside chroot.',
- required=False,
- default=''))
- self.AddField(
- TextField('build',
- description='The xbuddy specification for an '
- 'official or trybot image to use for tests. '
- "'/remote' is assumed, and the board is given "
- "elsewhere, so omit the '/remote/<board>/' xbuddy "
- 'prefix.',
- required=False,
- default=''))
+ def __init__(self, name):
+ super(LabelSettings, self).__init__(name, "label")
+ self.AddField(
+ TextField(
+ "chromeos_image",
+ required=False,
+ description="The path to the image to run tests "
+ "on, for local/custom-built images. See the "
+ "'build' option for official or trybot images.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "autotest_path",
+ required=False,
+ description="Autotest directory path relative to chroot which "
+ "has autotest files for the image to run tests requiring autotest "
+ "files.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "debug_path",
+ required=False,
+ description="Debug info directory relative to chroot which has "
+ "symbols and vmlinux that can be used by perf tool.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "chromeos_root",
+ description="The path to a chromeos checkout which "
+ "contains a src/scripts directory. Defaults to "
+ "the chromeos checkout which contains the "
+ "chromeos_image.",
+ )
+ )
+ self.AddField(
+ ListField(
+ "remote",
+ description="A comma-separated list of IPs of chromeos"
+ "devices to run experiments on.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "image_args",
+ required=False,
+ default="",
+ description="Extra arguments to pass to " "image_chromeos.py.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "cache_dir",
+ default="",
+ description="The cache dir for this image.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "compiler",
+ default="gcc",
+ description="The compiler used to build the "
+ "ChromeOS image (gcc or llvm).",
+ )
+ )
+ self.AddField(
+ TextField(
+ "chrome_src",
+ description="The path to the source of chrome. "
+ "This is used to run telemetry benchmarks. "
+ "The default one is the src inside chroot.",
+ required=False,
+ default="",
+ )
+ )
+ self.AddField(
+ TextField(
+ "build",
+ description="The xbuddy specification for an "
+ "official or trybot image to use for tests. "
+ "'/remote' is assumed, and the board is given "
+ "elsewhere, so omit the '/remote/<board>/' xbuddy "
+ "prefix.",
+ required=False,
+ default="",
+ )
+ )
class GlobalSettings(Settings):
- """Settings that apply per-experiment."""
+ """Settings that apply per-experiment."""
- def __init__(self, name):
- super(GlobalSettings, self).__init__(name, 'global')
- self.AddField(
- TextField('name',
- description='The name of the experiment. Just an '
- 'identifier.'))
- self.AddField(
- TextField('board',
- description='The target board for running '
- 'experiments on, e.g. x86-alex.'))
- self.AddField(
- BooleanField('crosfleet',
- description='Whether to run experiments via crosfleet.',
- default=False))
- self.AddField(
- ListField('remote',
- description='A comma-separated list of IPs of '
- 'chromeos devices to run experiments on.'))
- self.AddField(
- BooleanField('rerun_if_failed',
- description='Whether to re-run failed test runs '
- 'or not.',
- default=False))
- self.AddField(
- BooleanField('rm_chroot_tmp',
- default=False,
- description='Whether to remove the test_that '
- 'result in the chroot.'))
- self.AddField(
- ListField('email',
- description='Space-separated list of email '
- 'addresses to send email to.'))
- self.AddField(
- BooleanField('rerun',
- description='Whether to ignore the cache and '
- 'for tests to be re-run.',
- default=False))
- self.AddField(
- BooleanField('same_specs',
- default=True,
- description='Ensure cached runs are run on the '
- 'same kind of devices which are specified as a '
- 'remote.'))
- self.AddField(
- BooleanField('same_machine',
- default=False,
- description='Ensure cached runs are run on the '
- 'same remote.'))
- self.AddField(
- BooleanField('use_file_locks',
- default=False,
- description='DEPRECATED: Whether to use the file locks '
- 'or AFE server lock mechanism.'))
- self.AddField(
- IntegerField(
- 'iterations',
- required=False,
- default=0,
- description='Number of iterations to run all tests. '
- 'If not set, will run each benchmark test the optimum number of '
- 'times to get a stable result.'))
- self.AddField(
- TextField('chromeos_root',
- description='The path to a chromeos checkout which '
- 'contains a src/scripts directory. Defaults to '
- 'the chromeos checkout which contains the '
- 'chromeos_image.'))
- self.AddField(
- TextField('logging_level',
- default='average',
- description='The level of logging desired. '
- "Options are 'quiet', 'average', and 'verbose'."))
- self.AddField(
- IntegerField('acquire_timeout',
- default=0,
- description='Number of seconds to wait for '
- 'machine before exit if all the machines in '
- 'the experiment file are busy. Default is 0.'))
- self.AddField(
- TextField('perf_args',
- default='',
- description='The optional profile command. It '
- 'enables perf commands to record perforamance '
- 'related counters. It must start with perf '
- 'command record or stat followed by arguments.'))
- self.AddField(
- BooleanField('download_debug',
- default=True,
- description='Download compressed debug symbols alongwith '
- 'image. This can provide more info matching symbols for'
- 'profiles, but takes larger space. By default, download'
- 'it only when perf_args is specified.'))
- self.AddField(
- TextField('cache_dir',
- default='',
- description='The abs path of cache dir. '
- 'Default is /home/$(whoami)/cros_scratch.'))
- self.AddField(
- BooleanField('cache_only',
- default=False,
- description='Whether to use only cached '
- 'results (do not rerun failed tests).'))
- self.AddField(
- BooleanField('no_email',
- default=False,
- description='Whether to disable the email to '
- 'user after crosperf finishes.'))
- self.AddField(
- BooleanField('json_report',
- default=False,
- description='Whether to generate a json version '
- 'of the report, for archiving.'))
- self.AddField(
- BooleanField('show_all_results',
- default=False,
- description='When running Telemetry tests, '
- 'whether to all the results, instead of just '
- 'the default (summary) results.'))
- self.AddField(
- TextField('share_cache',
- default='',
- description='Path to alternate cache whose data '
- 'you want to use. It accepts multiple directories '
- 'separated by a ",".'))
- self.AddField(
- TextField('results_dir', default='', description='The results dir.'))
- self.AddField(
- BooleanField(
- 'compress_results',
- default=True,
- description='Whether to compress all test results other than '
- 'reports into a tarball to save disk space.'))
- self.AddField(
- TextField('locks_dir',
- default='',
- description='An alternate directory to use for '
- 'storing/checking machine file locks for local machines. '
- 'By default the file locks directory is '
- '/google/data/rw/users/mo/mobiletc-prebuild/locks.\n'
- 'WARNING: If you use your own locks directory, '
- 'there is no guarantee that someone else might not '
- 'hold a lock on the same machine in a different '
- 'locks directory.'))
- self.AddField(
- TextField('chrome_src',
- description='The path to the source of chrome. '
- 'This is used to run telemetry benchmarks. '
- 'The default one is the src inside chroot.',
- required=False,
- default=''))
- self.AddField(
- IntegerField('retries',
- default=0,
- description='Number of times to retry a '
- 'benchmark run.'))
- self.AddField(
- TextField('cwp_dso',
- description='The DSO type that we want to use for '
- 'CWP approximation. This is used to run telemetry '
- 'benchmarks. Valid DSO types can be found from dso_list '
- 'in experiment_factory.py. The default value is set to '
- 'be empty.',
- required=False,
- default=''))
- self.AddField(
- BooleanField('enable_aslr',
- description='Enable ASLR on the machine to run the '
- 'benchmarks. ASLR is disabled by default',
- required=False,
- default=False))
- self.AddField(
- BooleanField('ignore_min_max',
- description='When doing math for the raw results, '
- 'ignore min and max values to reduce noise.',
- required=False,
- default=False))
- self.AddField(
- TextField(
- 'intel_pstate',
- description='Intel Pstate mode.\n'
- 'Supported modes: "active", "passive", "no_hwp".\n'
- 'Default is "no_hwp" which disables hardware pstates to avoid '
- 'noise in benchmarks.',
- required=False,
- default='no_hwp'))
- self.AddField(
- BooleanField('turbostat',
- description='Run turbostat process in the background'
- ' of a benchmark. Enabled by default.',
- required=False,
- default=True))
- self.AddField(
- FloatField(
- 'top_interval',
- description='Run top command in the background of a benchmark with'
- ' interval of sampling specified in seconds.\n'
- 'Recommended values 1-5. Lower number provides more accurate'
- ' data.\n'
- 'With 0 - do not run top.\n'
- 'NOTE: Running top with interval 1-5 sec has insignificant'
- ' performance impact (performance degradation does not exceed'
- ' 0.3%%, measured on x86_64, ARM32, and ARM64). '
- 'The default value is 1.',
- required=False,
- default=1))
- self.AddField(
- IntegerField('cooldown_temp',
- required=False,
- default=40,
- description='Wait until CPU temperature goes down below'
- ' specified temperature in Celsius'
- ' prior starting a benchmark. '
- 'By default the value is set to 40 degrees.'))
- self.AddField(
- IntegerField('cooldown_time',
- required=False,
- default=10,
- description='Wait specified time in minutes allowing'
- ' CPU to cool down. Zero value disables cooldown. '
- 'The default value is 10 minutes.'))
- self.AddField(
- EnumField(
- 'governor',
- options=[
- 'performance',
- 'powersave',
- 'userspace',
- 'ondemand',
- 'conservative',
- 'schedutils',
- 'sched',
- 'interactive',
- ],
- default='performance',
- required=False,
- description='Setup CPU governor for all cores.\n'
- 'For more details refer to:\n'
- 'https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt. '
- 'Default is "performance" governor.'))
- self.AddField(
- EnumField(
- 'cpu_usage',
- options=[
- 'all',
- 'big_only',
- 'little_only',
- 'exclusive_cores',
- ],
- default='all',
- required=False,
- description='Restrict usage of CPUs to decrease CPU interference.\n'
- '"all" - no restrictions;\n'
- '"big-only", "little-only" - enable only big/little cores,'
- ' applicable only on ARM;\n'
- '"exclusive-cores" - (for future use)'
- ' isolate cores for exclusive use of benchmark processes. '
- 'By default use all CPUs.'))
- self.AddField(
- IntegerField(
- 'cpu_freq_pct',
- required=False,
- default=95,
- description='Setup CPU frequency to a supported value less than'
- ' or equal to a percent of max_freq. '
- 'CPU frequency is reduced to 95%% by default to reduce thermal '
- 'throttling.'))
- self.AddField(
- BooleanField(
- 'no_lock',
- default=False,
- description='Do not attempt to lock the DUT.'
- ' Useful when lock is held externally, say with crosfleet.'))
+ def __init__(self, name):
+ super(GlobalSettings, self).__init__(name, "global")
+ self.AddField(
+ TextField(
+ "name",
+ description="The name of the experiment. Just an "
+ "identifier.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "board",
+ description="The target board for running "
+ "experiments on, e.g. x86-alex.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "crosfleet",
+ description="Whether to run experiments via crosfleet.",
+ default=False,
+ )
+ )
+ self.AddField(
+ ListField(
+ "remote",
+ description="A comma-separated list of IPs of "
+ "chromeos devices to run experiments on.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "rerun_if_failed",
+ description="Whether to re-run failed test runs " "or not.",
+ default=False,
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "rm_chroot_tmp",
+ default=False,
+ description="Whether to remove the test_that "
+ "result in the chroot.",
+ )
+ )
+ self.AddField(
+ ListField(
+ "email",
+ description="Space-separated list of email "
+ "addresses to send email to.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "rerun",
+ description="Whether to ignore the cache and "
+ "for tests to be re-run.",
+ default=False,
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "same_specs",
+ default=True,
+ description="Ensure cached runs are run on the "
+ "same kind of devices which are specified as a "
+ "remote.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "same_machine",
+ default=False,
+ description="Ensure cached runs are run on the " "same remote.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "use_file_locks",
+ default=False,
+ description="DEPRECATED: Whether to use the file locks "
+ "or AFE server lock mechanism.",
+ )
+ )
+ self.AddField(
+ IntegerField(
+ "iterations",
+ required=False,
+ default=0,
+ description="Number of iterations to run all tests. "
+ "If not set, will run each benchmark test the optimum number of "
+ "times to get a stable result.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "chromeos_root",
+ description="The path to a chromeos checkout which "
+ "contains a src/scripts directory. Defaults to "
+ "the chromeos checkout which contains the "
+ "chromeos_image.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "logging_level",
+ default="average",
+ description="The level of logging desired. "
+ "Options are 'quiet', 'average', and 'verbose'.",
+ )
+ )
+ self.AddField(
+ IntegerField(
+ "acquire_timeout",
+ default=0,
+ description="Number of seconds to wait for "
+ "machine before exit if all the machines in "
+ "the experiment file are busy. Default is 0.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "perf_args",
+ default="",
+ description="The optional profile command. It "
+ "enables perf commands to record perforamance "
+ "related counters. It must start with perf "
+ "command record or stat followed by arguments.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "download_debug",
+ default=True,
+ description="Download compressed debug symbols alongwith "
+ "image. This can provide more info matching symbols for"
+ "profiles, but takes larger space. By default, download"
+ "it only when perf_args is specified.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "cache_dir",
+ default="",
+ description="The abs path of cache dir. "
+ "Default is /home/$(whoami)/cros_scratch.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "cache_only",
+ default=False,
+ description="Whether to use only cached "
+ "results (do not rerun failed tests).",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "no_email",
+ default=False,
+ description="Whether to disable the email to "
+ "user after crosperf finishes.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "json_report",
+ default=False,
+ description="Whether to generate a json version "
+ "of the report, for archiving.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "show_all_results",
+ default=False,
+ description="When running Telemetry tests, "
+ "whether to all the results, instead of just "
+ "the default (summary) results.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "share_cache",
+ default="",
+ description="Path to alternate cache whose data "
+ "you want to use. It accepts multiple directories "
+ 'separated by a ",".',
+ )
+ )
+ self.AddField(
+ TextField("results_dir", default="", description="The results dir.")
+ )
+ self.AddField(
+ BooleanField(
+ "compress_results",
+ default=True,
+ description="Whether to compress all test results other than "
+ "reports into a tarball to save disk space.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "locks_dir",
+ default="",
+ description="An alternate directory to use for "
+ "storing/checking machine file locks for local machines. "
+ "By default the file locks directory is "
+ "/google/data/rw/users/mo/mobiletc-prebuild/locks.\n"
+ "WARNING: If you use your own locks directory, "
+ "there is no guarantee that someone else might not "
+ "hold a lock on the same machine in a different "
+ "locks directory.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "chrome_src",
+ description="The path to the source of chrome. "
+ "This is used to run telemetry benchmarks. "
+ "The default one is the src inside chroot.",
+ required=False,
+ default="",
+ )
+ )
+ self.AddField(
+ IntegerField(
+ "retries",
+ default=0,
+ description="Number of times to retry a " "benchmark run.",
+ )
+ )
+ self.AddField(
+ TextField(
+ "cwp_dso",
+ description="The DSO type that we want to use for "
+ "CWP approximation. This is used to run telemetry "
+ "benchmarks. Valid DSO types can be found from dso_list "
+ "in experiment_factory.py. The default value is set to "
+ "be empty.",
+ required=False,
+ default="",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "enable_aslr",
+ description="Enable ASLR on the machine to run the "
+ "benchmarks. ASLR is disabled by default",
+ required=False,
+ default=False,
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "ignore_min_max",
+ description="When doing math for the raw results, "
+ "ignore min and max values to reduce noise.",
+ required=False,
+ default=False,
+ )
+ )
+ self.AddField(
+ TextField(
+ "intel_pstate",
+ description="Intel Pstate mode.\n"
+ 'Supported modes: "active", "passive", "no_hwp".\n'
+ 'Default is "no_hwp" which disables hardware pstates to avoid '
+ "noise in benchmarks.",
+ required=False,
+ default="no_hwp",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "turbostat",
+ description="Run turbostat process in the background"
+ " of a benchmark. Enabled by default.",
+ required=False,
+ default=True,
+ )
+ )
+ self.AddField(
+ FloatField(
+ "top_interval",
+ description="Run top command in the background of a benchmark with"
+ " interval of sampling specified in seconds.\n"
+ "Recommended values 1-5. Lower number provides more accurate"
+ " data.\n"
+ "With 0 - do not run top.\n"
+ "NOTE: Running top with interval 1-5 sec has insignificant"
+ " performance impact (performance degradation does not exceed"
+ " 0.3%%, measured on x86_64, ARM32, and ARM64). "
+ "The default value is 1.",
+ required=False,
+ default=1,
+ )
+ )
+ self.AddField(
+ IntegerField(
+ "cooldown_temp",
+ required=False,
+ default=40,
+ description="Wait until CPU temperature goes down below"
+ " specified temperature in Celsius"
+ " prior starting a benchmark. "
+ "By default the value is set to 40 degrees.",
+ )
+ )
+ self.AddField(
+ IntegerField(
+ "cooldown_time",
+ required=False,
+ default=10,
+ description="Wait specified time in minutes allowing"
+ " CPU to cool down. Zero value disables cooldown. "
+ "The default value is 10 minutes.",
+ )
+ )
+ self.AddField(
+ EnumField(
+ "governor",
+ options=[
+ "performance",
+ "powersave",
+ "userspace",
+ "ondemand",
+ "conservative",
+ "schedutils",
+ "sched",
+ "interactive",
+ ],
+ default="performance",
+ required=False,
+ description="Setup CPU governor for all cores.\n"
+ "For more details refer to:\n"
+ "https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt. "
+ 'Default is "performance" governor.',
+ )
+ )
+ self.AddField(
+ EnumField(
+ "cpu_usage",
+ options=[
+ "all",
+ "big_only",
+ "little_only",
+ "exclusive_cores",
+ ],
+ default="all",
+ required=False,
+ description="Restrict usage of CPUs to decrease CPU interference.\n"
+ '"all" - no restrictions;\n'
+ '"big-only", "little-only" - enable only big/little cores,'
+ " applicable only on ARM;\n"
+ '"exclusive-cores" - (for future use)'
+ " isolate cores for exclusive use of benchmark processes. "
+ "By default use all CPUs.",
+ )
+ )
+ self.AddField(
+ IntegerField(
+ "cpu_freq_pct",
+ required=False,
+ default=95,
+ description="Setup CPU frequency to a supported value less than"
+ " or equal to a percent of max_freq. "
+ "CPU frequency is reduced to 95%% by default to reduce thermal "
+ "throttling.",
+ )
+ )
+ self.AddField(
+ BooleanField(
+ "no_lock",
+ default=False,
+ description="Do not attempt to lock the DUT."
+ " Useful when lock is held externally, say with crosfleet.",
+ )
+ )
class SettingsFactory(object):
- """Factory class for building different types of Settings objects.
+ """Factory class for building different types of Settings objects.
- This factory is currently hardcoded to produce settings for ChromeOS
- experiment files. The idea is that in the future, other types
- of settings could be produced.
- """
+ This factory is currently hardcoded to produce settings for ChromeOS
+ experiment files. The idea is that in the future, other types
+ of settings could be produced.
+ """
- def GetSettings(self, name, settings_type):
- if settings_type == 'label' or not settings_type:
- return LabelSettings(name)
- if settings_type == 'global':
- return GlobalSettings(name)
- if settings_type == 'benchmark':
- return BenchmarkSettings(name)
+ def GetSettings(self, name, settings_type):
+ if settings_type == "label" or not settings_type:
+ return LabelSettings(name)
+ if settings_type == "global":
+ return GlobalSettings(name)
+ if settings_type == "benchmark":
+ return BenchmarkSettings(name)
- raise TypeError("Invalid settings type: '%s'." % settings_type)
+ raise TypeError("Invalid settings type: '%s'." % settings_type)
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index 8277e870..93d3bd6d 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Copyright 2017 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for crosperf."""
-from __future__ import print_function
import unittest
@@ -15,101 +14,108 @@ import settings_factory
class BenchmarkSettingsTest(unittest.TestCase):
- """Class to test benchmark settings."""
+ """Class to test benchmark settings."""
- def test_init(self):
- res = settings_factory.BenchmarkSettings('b_settings')
- self.assertIsNotNone(res)
- self.assertEqual(len(res.fields), 7)
- self.assertEqual(res.GetField('test_name'), '')
- self.assertEqual(res.GetField('test_args'), '')
- self.assertEqual(res.GetField('iterations'), 0)
- self.assertEqual(res.GetField('suite'), 'test_that')
+ def test_init(self):
+ res = settings_factory.BenchmarkSettings("b_settings")
+ self.assertIsNotNone(res)
+ self.assertEqual(len(res.fields), 7)
+ self.assertEqual(res.GetField("test_name"), "")
+ self.assertEqual(res.GetField("test_args"), "")
+ self.assertEqual(res.GetField("iterations"), 0)
+ self.assertEqual(res.GetField("suite"), "test_that")
class LabelSettingsTest(unittest.TestCase):
- """Class to test label settings."""
-
- def test_init(self):
- res = settings_factory.LabelSettings('l_settings')
- self.assertIsNotNone(res)
- self.assertEqual(len(res.fields), 10)
- self.assertEqual(res.GetField('chromeos_image'), '')
- self.assertEqual(res.GetField('autotest_path'), '')
- self.assertEqual(res.GetField('chromeos_root'), '')
- self.assertEqual(res.GetField('remote'), None)
- self.assertEqual(res.GetField('image_args'), '')
- self.assertEqual(res.GetField('cache_dir'), '')
- self.assertEqual(res.GetField('chrome_src'), '')
- self.assertEqual(res.GetField('build'), '')
+ """Class to test label settings."""
+
+ def test_init(self):
+ res = settings_factory.LabelSettings("l_settings")
+ self.assertIsNotNone(res)
+ self.assertEqual(len(res.fields), 10)
+ self.assertEqual(res.GetField("chromeos_image"), "")
+ self.assertEqual(res.GetField("autotest_path"), "")
+ self.assertEqual(res.GetField("chromeos_root"), "")
+ self.assertEqual(res.GetField("remote"), None)
+ self.assertEqual(res.GetField("image_args"), "")
+ self.assertEqual(res.GetField("cache_dir"), "")
+ self.assertEqual(res.GetField("chrome_src"), "")
+ self.assertEqual(res.GetField("build"), "")
class GlobalSettingsTest(unittest.TestCase):
- """Class to test global settings."""
-
- def test_init(self):
- res = settings_factory.GlobalSettings('g_settings')
- self.assertIsNotNone(res)
- self.assertEqual(len(res.fields), 40)
- self.assertEqual(res.GetField('name'), '')
- self.assertEqual(res.GetField('board'), '')
- self.assertEqual(res.GetField('crosfleet'), False)
- self.assertEqual(res.GetField('remote'), None)
- self.assertEqual(res.GetField('rerun_if_failed'), False)
- self.assertEqual(res.GetField('rm_chroot_tmp'), False)
- self.assertEqual(res.GetField('email'), None)
- self.assertEqual(res.GetField('rerun'), False)
- self.assertEqual(res.GetField('same_specs'), True)
- self.assertEqual(res.GetField('same_machine'), False)
- self.assertEqual(res.GetField('iterations'), 0)
- self.assertEqual(res.GetField('chromeos_root'), '')
- self.assertEqual(res.GetField('logging_level'), 'average')
- self.assertEqual(res.GetField('acquire_timeout'), 0)
- self.assertEqual(res.GetField('perf_args'), '')
- self.assertEqual(res.GetField('download_debug'), True)
- self.assertEqual(res.GetField('cache_dir'), '')
- self.assertEqual(res.GetField('cache_only'), False)
- self.assertEqual(res.GetField('no_email'), False)
- self.assertEqual(res.GetField('show_all_results'), False)
- self.assertEqual(res.GetField('share_cache'), '')
- self.assertEqual(res.GetField('results_dir'), '')
- self.assertEqual(res.GetField('compress_results'), True)
- self.assertEqual(res.GetField('chrome_src'), '')
- self.assertEqual(res.GetField('cwp_dso'), '')
- self.assertEqual(res.GetField('enable_aslr'), False)
- self.assertEqual(res.GetField('ignore_min_max'), False)
- self.assertEqual(res.GetField('intel_pstate'), 'no_hwp')
- self.assertEqual(res.GetField('turbostat'), True)
- self.assertEqual(res.GetField('top_interval'), 1)
- self.assertEqual(res.GetField('cooldown_time'), 10)
- self.assertEqual(res.GetField('cooldown_temp'), 40)
- self.assertEqual(res.GetField('governor'), 'performance')
- self.assertEqual(res.GetField('cpu_usage'), 'all')
- self.assertEqual(res.GetField('cpu_freq_pct'), 95)
+ """Class to test global settings."""
+
+ def test_init(self):
+ res = settings_factory.GlobalSettings("g_settings")
+ self.assertIsNotNone(res)
+ self.assertEqual(len(res.fields), 40)
+ self.assertEqual(res.GetField("name"), "")
+ self.assertEqual(res.GetField("board"), "")
+ self.assertEqual(res.GetField("crosfleet"), False)
+ self.assertEqual(res.GetField("remote"), None)
+ self.assertEqual(res.GetField("rerun_if_failed"), False)
+ self.assertEqual(res.GetField("rm_chroot_tmp"), False)
+ self.assertEqual(res.GetField("email"), None)
+ self.assertEqual(res.GetField("rerun"), False)
+ self.assertEqual(res.GetField("same_specs"), True)
+ self.assertEqual(res.GetField("same_machine"), False)
+ self.assertEqual(res.GetField("iterations"), 0)
+ self.assertEqual(res.GetField("chromeos_root"), "")
+ self.assertEqual(res.GetField("logging_level"), "average")
+ self.assertEqual(res.GetField("acquire_timeout"), 0)
+ self.assertEqual(res.GetField("perf_args"), "")
+ self.assertEqual(res.GetField("download_debug"), True)
+ self.assertEqual(res.GetField("cache_dir"), "")
+ self.assertEqual(res.GetField("cache_only"), False)
+ self.assertEqual(res.GetField("no_email"), False)
+ self.assertEqual(res.GetField("show_all_results"), False)
+ self.assertEqual(res.GetField("share_cache"), "")
+ self.assertEqual(res.GetField("results_dir"), "")
+ self.assertEqual(res.GetField("compress_results"), True)
+ self.assertEqual(res.GetField("chrome_src"), "")
+ self.assertEqual(res.GetField("cwp_dso"), "")
+ self.assertEqual(res.GetField("enable_aslr"), False)
+ self.assertEqual(res.GetField("ignore_min_max"), False)
+ self.assertEqual(res.GetField("intel_pstate"), "no_hwp")
+ self.assertEqual(res.GetField("turbostat"), True)
+ self.assertEqual(res.GetField("top_interval"), 1)
+ self.assertEqual(res.GetField("cooldown_time"), 10)
+ self.assertEqual(res.GetField("cooldown_temp"), 40)
+ self.assertEqual(res.GetField("governor"), "performance")
+ self.assertEqual(res.GetField("cpu_usage"), "all")
+ self.assertEqual(res.GetField("cpu_freq_pct"), 95)
class SettingsFactoryTest(unittest.TestCase):
- """Class to test SettingsFactory."""
-
- def test_get_settings(self):
- self.assertRaises(Exception, settings_factory.SettingsFactory.GetSettings,
- 'global', 'bad_type')
-
- l_settings = settings_factory.SettingsFactory().GetSettings(
- 'label', 'label')
- self.assertIsInstance(l_settings, settings_factory.LabelSettings)
- self.assertEqual(len(l_settings.fields), 10)
-
- b_settings = settings_factory.SettingsFactory().GetSettings(
- 'benchmark', 'benchmark')
- self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings)
- self.assertEqual(len(b_settings.fields), 7)
-
- g_settings = settings_factory.SettingsFactory().GetSettings(
- 'global', 'global')
- self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
- self.assertEqual(len(g_settings.fields), 40)
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Class to test SettingsFactory."""
+
+ def test_get_settings(self):
+ self.assertRaises(
+ Exception,
+ settings_factory.SettingsFactory.GetSettings,
+ "global",
+ "bad_type",
+ )
+
+ l_settings = settings_factory.SettingsFactory().GetSettings(
+ "label", "label"
+ )
+ self.assertIsInstance(l_settings, settings_factory.LabelSettings)
+ self.assertEqual(len(l_settings.fields), 10)
+
+ b_settings = settings_factory.SettingsFactory().GetSettings(
+ "benchmark", "benchmark"
+ )
+ self.assertIsInstance(b_settings, settings_factory.BenchmarkSettings)
+ self.assertEqual(len(b_settings.fields), 7)
+
+ g_settings = settings_factory.SettingsFactory().GetSettings(
+ "global", "global"
+ )
+ self.assertIsInstance(g_settings, settings_factory.GlobalSettings)
+ self.assertEqual(len(g_settings.fields), 40)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/settings_unittest.py b/crosperf/settings_unittest.py
index e127552f..ab31e18f 100755
--- a/crosperf/settings_unittest.py
+++ b/crosperf/settings_unittest.py
@@ -1,238 +1,293 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""unittest for settings."""
-from __future__ import print_function
import unittest
import unittest.mock as mock
-import settings
-import settings_factory
-
+from cros_utils import logger
+import download_images
from field import IntegerField
from field import ListField
-import download_images
-
-from cros_utils import logger
+import settings
+import settings_factory
class TestSettings(unittest.TestCase):
- """setting test class."""
-
- def setUp(self):
- self.settings = settings.Settings('global_name', 'global')
-
- def test_init(self):
- self.assertEqual(self.settings.name, 'global_name')
- self.assertEqual(self.settings.settings_type, 'global')
- self.assertIsNone(self.settings.parent)
-
- def test_set_parent_settings(self):
- self.assertIsNone(self.settings.parent)
- settings_parent = {'fake_parent_entry': 0}
- self.settings.SetParentSettings(settings_parent)
- self.assertIsNotNone(self.settings.parent)
- self.assertTrue(isinstance(self.settings.parent, dict))
- self.assertEqual(self.settings.parent, settings_parent)
-
- def test_add_field(self):
- self.assertEqual(self.settings.fields, {})
- self.settings.AddField(
- IntegerField(
- 'iterations',
- default=1,
- required=False,
- description='Number of iterations to '
- 'run the test.'))
- self.assertEqual(len(self.settings.fields), 1)
- # Adding the same field twice raises an exception.
- self.assertRaises(Exception, self.settings.AddField, (IntegerField(
- 'iterations',
- default=1,
- required=False,
- description='Number of iterations to run '
- 'the test.')))
- res = self.settings.fields['iterations']
- self.assertIsInstance(res, IntegerField)
- self.assertEqual(res.Get(), 1)
-
- def test_set_field(self):
- self.assertEqual(self.settings.fields, {})
- self.settings.AddField(
- IntegerField(
- 'iterations',
- default=1,
- required=False,
- description='Number of iterations to run the '
- 'test.'))
- res = self.settings.fields['iterations']
- self.assertEqual(res.Get(), 1)
-
- self.settings.SetField('iterations', 10)
- res = self.settings.fields['iterations']
- self.assertEqual(res.Get(), 10)
-
- # Setting a field that's not there raises an exception.
- self.assertRaises(Exception, self.settings.SetField, 'remote',
- 'lumpy1.cros')
-
- self.settings.AddField(
- ListField(
- 'remote',
- default=[],
- description="A comma-separated list of ip's of "
- 'chromeos devices to run '
- 'experiments on.'))
- self.assertTrue(isinstance(self.settings.fields, dict))
- self.assertEqual(len(self.settings.fields), 2)
- res = self.settings.fields['remote']
- self.assertEqual(res.Get(), [])
- self.settings.SetField('remote', 'lumpy1.cros', append=True)
- self.settings.SetField('remote', 'lumpy2.cros', append=True)
- res = self.settings.fields['remote']
- self.assertEqual(res.Get(), ['lumpy1.cros', 'lumpy2.cros'])
-
- def test_get_field(self):
- # Getting a field that's not there raises an exception.
- self.assertRaises(Exception, self.settings.GetField, 'iterations')
-
- # Getting a required field that hasn't been assigned raises an exception.
- self.settings.AddField(
- IntegerField(
- 'iterations',
- required=True,
- description='Number of iterations to '
- 'run the test.'))
- self.assertIsNotNone(self.settings.fields['iterations'])
- self.assertRaises(Exception, self.settings.GetField, 'iterations')
-
- # Set the value, then get it.
- self.settings.SetField('iterations', 5)
- res = self.settings.GetField('iterations')
- self.assertEqual(res, 5)
-
- def test_inherit(self):
- parent_settings = settings_factory.SettingsFactory().GetSettings(
- 'global', 'global')
- label_settings = settings_factory.SettingsFactory().GetSettings(
- 'label', 'label')
- self.assertEqual(parent_settings.GetField('chromeos_root'), '')
- self.assertEqual(label_settings.GetField('chromeos_root'), '')
- self.assertIsNone(label_settings.parent)
-
- parent_settings.SetField('chromeos_root', '/tmp/chromeos')
- label_settings.SetParentSettings(parent_settings)
- self.assertEqual(parent_settings.GetField('chromeos_root'), '/tmp/chromeos')
- self.assertEqual(label_settings.GetField('chromeos_root'), '')
- label_settings.Inherit()
- self.assertEqual(label_settings.GetField('chromeos_root'), '/tmp/chromeos')
-
- def test_override(self):
- self.settings.AddField(
- ListField(
- 'email',
- default=[],
- description='Space-seperated'
- 'list of email addresses to send '
- 'email to.'))
-
- global_settings = settings_factory.SettingsFactory().GetSettings(
- 'global', 'global')
-
- global_settings.SetField('email', 'john.doe@google.com', append=True)
- global_settings.SetField('email', 'jane.smith@google.com', append=True)
-
- res = self.settings.GetField('email')
- self.assertEqual(res, [])
-
- self.settings.Override(global_settings)
- res = self.settings.GetField('email')
- self.assertEqual(res, ['john.doe@google.com', 'jane.smith@google.com'])
-
- def test_validate(self):
-
- self.settings.AddField(
- IntegerField(
- 'iterations',
- required=True,
- description='Number of iterations '
- 'to run the test.'))
- self.settings.AddField(
- ListField(
- 'remote',
- default=[],
- required=True,
- description='A comma-separated list '
- "of ip's of chromeos "
- 'devices to run experiments on.'))
- self.settings.AddField(
- ListField(
- 'email',
- default=[],
- description='Space-seperated'
- 'list of email addresses to '
- 'send email to.'))
-
- # 'required' fields have not been assigned; should raise an exception.
- self.assertRaises(Exception, self.settings.Validate)
- self.settings.SetField('iterations', 2)
- self.settings.SetField('remote', 'x86-alex.cros', append=True)
- # Should run without exception now.
- self.settings.Validate()
-
- @mock.patch.object(logger, 'GetLogger')
- @mock.patch.object(download_images.ImageDownloader, 'Run')
- @mock.patch.object(download_images, 'ImageDownloader')
- def test_get_xbuddy_path(self, mock_downloader, mock_run, mock_logger):
-
- mock_run.return_value = 'fake_xbuddy_translation'
- mock_downloader.Run = mock_run
- board = 'lumpy'
- chromeos_root = '/tmp/chromeos'
- log_level = 'average'
-
- trybot_str = 'trybot-lumpy-paladin/R34-5417.0.0-b1506'
- official_str = 'lumpy-release/R34-5417.0.0'
- xbuddy_str = 'latest-dev'
- autotest_path = ''
- debug_path = ''
- download_debug = False
-
- self.settings.GetXbuddyPath(trybot_str, autotest_path, debug_path, board,
- chromeos_root, log_level, download_debug)
- self.assertEqual(mock_run.call_count, 1)
- self.assertEqual(mock_run.call_args_list[0][0], (
- '/tmp/chromeos',
- 'remote/trybot-lumpy-paladin/R34-5417.0.0-b1506',
- '',
- '',
- False,
- ))
-
- mock_run.reset_mock()
- self.settings.GetXbuddyPath(official_str, autotest_path, debug_path, board,
- chromeos_root, log_level, download_debug)
- self.assertEqual(mock_run.call_count, 1)
- self.assertEqual(
- mock_run.call_args_list[0][0],
- ('/tmp/chromeos', 'remote/lumpy-release/R34-5417.0.0', '', '', False))
-
- mock_run.reset_mock()
- self.settings.GetXbuddyPath(xbuddy_str, autotest_path, debug_path, board,
- chromeos_root, log_level, download_debug)
- self.assertEqual(mock_run.call_count, 1)
- self.assertEqual(
- mock_run.call_args_list[0][0],
- ('/tmp/chromeos', 'remote/lumpy/latest-dev', '', '', False))
-
- if mock_logger:
- return
-
-
-if __name__ == '__main__':
- unittest.main()
+ """setting test class."""
+
+ def setUp(self):
+ self.settings = settings.Settings("global_name", "global")
+
+ def test_init(self):
+ self.assertEqual(self.settings.name, "global_name")
+ self.assertEqual(self.settings.settings_type, "global")
+ self.assertIsNone(self.settings.parent)
+
+ def test_set_parent_settings(self):
+ self.assertIsNone(self.settings.parent)
+ settings_parent = {"fake_parent_entry": 0}
+ self.settings.SetParentSettings(settings_parent)
+ self.assertIsNotNone(self.settings.parent)
+ self.assertTrue(isinstance(self.settings.parent, dict))
+ self.assertEqual(self.settings.parent, settings_parent)
+
+ def test_add_field(self):
+ self.assertEqual(self.settings.fields, {})
+ self.settings.AddField(
+ IntegerField(
+ "iterations",
+ default=1,
+ required=False,
+ description="Number of iterations to " "run the test.",
+ )
+ )
+ self.assertEqual(len(self.settings.fields), 1)
+ # Adding the same field twice raises an exception.
+ self.assertRaises(
+ Exception,
+ self.settings.AddField,
+ (
+ IntegerField(
+ "iterations",
+ default=1,
+ required=False,
+ description="Number of iterations to run " "the test.",
+ )
+ ),
+ )
+ res = self.settings.fields["iterations"]
+ self.assertIsInstance(res, IntegerField)
+ self.assertEqual(res.Get(), 1)
+
+ def test_set_field(self):
+ self.assertEqual(self.settings.fields, {})
+ self.settings.AddField(
+ IntegerField(
+ "iterations",
+ default=1,
+ required=False,
+ description="Number of iterations to run the " "test.",
+ )
+ )
+ res = self.settings.fields["iterations"]
+ self.assertEqual(res.Get(), 1)
+
+ self.settings.SetField("iterations", 10)
+ res = self.settings.fields["iterations"]
+ self.assertEqual(res.Get(), 10)
+
+ # Setting a field that's not there raises an exception.
+ self.assertRaises(
+ Exception, self.settings.SetField, "remote", "lumpy1.cros"
+ )
+
+ self.settings.AddField(
+ ListField(
+ "remote",
+ default=[],
+ description="A comma-separated list of ip's of "
+ "chromeos devices to run "
+ "experiments on.",
+ )
+ )
+ self.assertTrue(isinstance(self.settings.fields, dict))
+ self.assertEqual(len(self.settings.fields), 2)
+ res = self.settings.fields["remote"]
+ self.assertEqual(res.Get(), [])
+ self.settings.SetField("remote", "lumpy1.cros", append=True)
+ self.settings.SetField("remote", "lumpy2.cros", append=True)
+ res = self.settings.fields["remote"]
+ self.assertEqual(res.Get(), ["lumpy1.cros", "lumpy2.cros"])
+
+ def test_get_field(self):
+ # Getting a field that's not there raises an exception.
+ self.assertRaises(Exception, self.settings.GetField, "iterations")
+
+ # Getting a required field that hasn't been assigned raises an exception.
+ self.settings.AddField(
+ IntegerField(
+ "iterations",
+ required=True,
+ description="Number of iterations to " "run the test.",
+ )
+ )
+ self.assertIsNotNone(self.settings.fields["iterations"])
+ self.assertRaises(Exception, self.settings.GetField, "iterations")
+
+ # Set the value, then get it.
+ self.settings.SetField("iterations", 5)
+ res = self.settings.GetField("iterations")
+ self.assertEqual(res, 5)
+
+ def test_inherit(self):
+ parent_settings = settings_factory.SettingsFactory().GetSettings(
+ "global", "global"
+ )
+ label_settings = settings_factory.SettingsFactory().GetSettings(
+ "label", "label"
+ )
+ self.assertEqual(parent_settings.GetField("chromeos_root"), "")
+ self.assertEqual(label_settings.GetField("chromeos_root"), "")
+ self.assertIsNone(label_settings.parent)
+
+ parent_settings.SetField("chromeos_root", "/tmp/chromeos")
+ label_settings.SetParentSettings(parent_settings)
+ self.assertEqual(
+ parent_settings.GetField("chromeos_root"), "/tmp/chromeos"
+ )
+ self.assertEqual(label_settings.GetField("chromeos_root"), "")
+ label_settings.Inherit()
+ self.assertEqual(
+ label_settings.GetField("chromeos_root"), "/tmp/chromeos"
+ )
+
+ def test_override(self):
+ self.settings.AddField(
+ ListField(
+ "email",
+ default=[],
+ description="Space-seperated"
+ "list of email addresses to send "
+ "email to.",
+ )
+ )
+
+ global_settings = settings_factory.SettingsFactory().GetSettings(
+ "global", "global"
+ )
+
+ global_settings.SetField("email", "john.doe@google.com", append=True)
+ global_settings.SetField("email", "jane.smith@google.com", append=True)
+
+ res = self.settings.GetField("email")
+ self.assertEqual(res, [])
+
+ self.settings.Override(global_settings)
+ res = self.settings.GetField("email")
+ self.assertEqual(res, ["john.doe@google.com", "jane.smith@google.com"])
+
+ def test_validate(self):
+
+ self.settings.AddField(
+ IntegerField(
+ "iterations",
+ required=True,
+ description="Number of iterations " "to run the test.",
+ )
+ )
+ self.settings.AddField(
+ ListField(
+ "remote",
+ default=[],
+ required=True,
+ description="A comma-separated list "
+ "of ip's of chromeos "
+ "devices to run experiments on.",
+ )
+ )
+ self.settings.AddField(
+ ListField(
+ "email",
+ default=[],
+ description="Space-seperated"
+ "list of email addresses to "
+ "send email to.",
+ )
+ )
+
+ # 'required' fields have not been assigned; should raise an exception.
+ self.assertRaises(Exception, self.settings.Validate)
+ self.settings.SetField("iterations", 2)
+ self.settings.SetField("remote", "x86-alex.cros", append=True)
+ # Should run without exception now.
+ self.settings.Validate()
+
+ @mock.patch.object(logger, "GetLogger")
+ @mock.patch.object(download_images.ImageDownloader, "Run")
+ @mock.patch.object(download_images, "ImageDownloader")
+ def test_get_xbuddy_path(self, mock_downloader, mock_run, mock_logger):
+
+ mock_run.return_value = "fake_xbuddy_translation"
+ mock_downloader.Run = mock_run
+ board = "lumpy"
+ chromeos_root = "/tmp/chromeos"
+ log_level = "average"
+
+ trybot_str = "trybot-lumpy-paladin/R34-5417.0.0-b1506"
+ official_str = "lumpy-release/R34-5417.0.0"
+ xbuddy_str = "latest-dev"
+ autotest_path = ""
+ debug_path = ""
+ download_debug = False
+
+ self.settings.GetXbuddyPath(
+ trybot_str,
+ autotest_path,
+ debug_path,
+ board,
+ chromeos_root,
+ log_level,
+ download_debug,
+ )
+ self.assertEqual(mock_run.call_count, 1)
+ self.assertEqual(
+ mock_run.call_args_list[0][0],
+ (
+ "/tmp/chromeos",
+ "remote/trybot-lumpy-paladin/R34-5417.0.0-b1506",
+ "",
+ "",
+ False,
+ ),
+ )
+
+ mock_run.reset_mock()
+ self.settings.GetXbuddyPath(
+ official_str,
+ autotest_path,
+ debug_path,
+ board,
+ chromeos_root,
+ log_level,
+ download_debug,
+ )
+ self.assertEqual(mock_run.call_count, 1)
+ self.assertEqual(
+ mock_run.call_args_list[0][0],
+ (
+ "/tmp/chromeos",
+ "remote/lumpy-release/R34-5417.0.0",
+ "",
+ "",
+ False,
+ ),
+ )
+
+ mock_run.reset_mock()
+ self.settings.GetXbuddyPath(
+ xbuddy_str,
+ autotest_path,
+ debug_path,
+ board,
+ chromeos_root,
+ log_level,
+ download_debug,
+ )
+ self.assertEqual(mock_run.call_count, 1)
+ self.assertEqual(
+ mock_run.call_args_list[0][0],
+ ("/tmp/chromeos", "remote/lumpy/latest-dev", "", "", False),
+ )
+
+ if mock_logger:
+ return
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 6bd4ff39..e777a57f 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -1,332 +1,434 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SuiteRunner defines the interface from crosperf to test script."""
-from __future__ import division
-from __future__ import print_function
+import contextlib
import json
import os
+from pathlib import Path
import pipes
+import random
import shlex
+import subprocess
import time
from cros_utils import command_executer
-TEST_THAT_PATH = '/usr/bin/test_that'
-TAST_PATH = '/usr/bin/tast'
-CROSFLEET_PATH = 'crosfleet'
-GS_UTIL = 'src/chromium/depot_tools/gsutil.py'
-AUTOTEST_DIR = '/mnt/host/source/src/third_party/autotest/files'
-CHROME_MOUNT_DIR = '/tmp/chrome_root'
+
+# sshwatcher path, relative to ChromiumOS source root.
+SSHWATCHER = "src/platform/dev/contrib/sshwatcher/sshwatcher.go"
+TEST_THAT_PATH = "/usr/bin/test_that"
+TAST_PATH = "/usr/bin/tast"
+CROSFLEET_PATH = "crosfleet"
+GS_UTIL = "src/chromium/depot_tools/gsutil.py"
+AUTOTEST_DIR = "/mnt/host/source/src/third_party/autotest/files"
+CHROME_MOUNT_DIR = "/tmp/chrome_root"
def GetProfilerArgs(profiler_args):
- # Remove "--" from in front of profiler args.
- args_list = shlex.split(profiler_args)
- new_list = []
- for arg in args_list:
- if arg[0:2] == '--':
- arg = arg[2:]
- new_list.append(arg)
- args_list = new_list
-
- # Remove "perf_options=" from middle of profiler args.
- new_list = []
- for arg in args_list:
- idx = arg.find('perf_options=')
- if idx != -1:
- prefix = arg[0:idx]
- suffix = arg[idx + len('perf_options=') + 1:-1]
- new_arg = prefix + "'" + suffix + "'"
- new_list.append(new_arg)
- else:
- new_list.append(arg)
- args_list = new_list
-
- return ' '.join(args_list)
+ # Remove "--" from in front of profiler args.
+ args_list = shlex.split(profiler_args)
+ new_list = []
+ for arg in args_list:
+ if arg[0:2] == "--":
+ arg = arg[2:]
+ new_list.append(arg)
+ args_list = new_list
+
+ # Remove "perf_options=" from middle of profiler args.
+ new_list = []
+ for arg in args_list:
+ idx = arg.find("perf_options=")
+ if idx != -1:
+ prefix = arg[0:idx]
+ suffix = arg[idx + len("perf_options=") + 1 : -1]
+ new_arg = prefix + "'" + suffix + "'"
+ new_list.append(new_arg)
+ else:
+ new_list.append(arg)
+ args_list = new_list
+
+ return " ".join(args_list)
def GetDutConfigArgs(dut_config):
- return 'dut_config={}'.format(pipes.quote(json.dumps(dut_config)))
+ return f"dut_config={pipes.quote(json.dumps(dut_config))}"
+
+
+@contextlib.contextmanager
+def ssh_tunnel(sshwatcher: "os.PathLike", machinename: str) -> str:
+ """Context manager that forwards a TCP port over SSH while active.
+
+ This class is used to set up port forwarding before entering the
+ chroot, so that the forwarded port can be used from inside
+ the chroot.
+
+ Args:
+ sshwatcher: Path to sshwatcher.go
+ machinename: Hostname of the machine to connect to.
+
+ Returns:
+ host:port string that can be passed to tast
+ """
+ # We have to tell sshwatcher which port we want to use.
+ # We pick a port that is likely to be available.
+ port = random.randrange(4096, 32768)
+ cmd = ["go", "run", str(sshwatcher), machinename, str(port)]
+ # Pylint wants us to use subprocess.Popen as a context manager,
+ # but we don't, so that we can ask sshwatcher to terminate and
+ # limit the time we wait for it to do so.
+ # pylint: disable=consider-using-with
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ try:
+ # sshwatcher takes a few seconds before it binds to the port,
+ # presumably due to SSH handshaking taking a while.
+ # Give it 12 seconds before we ask the client to connect.
+ time.sleep(12)
+ yield f"localhost:{port}"
+ finally:
+ proc.terminate()
+ proc.wait(timeout=5)
class SuiteRunner(object):
- """This defines the interface from crosperf to test script."""
-
- def __init__(self,
- dut_config,
- logger_to_use=None,
- log_level='verbose',
- cmd_exec=None,
- cmd_term=None):
- self.logger = logger_to_use
- self.log_level = log_level
- self._ce = cmd_exec or command_executer.GetCommandExecuter(
- self.logger, log_level=self.log_level)
- # DUT command executer.
- # Will be initialized and used within Run.
- self._ct = cmd_term or command_executer.CommandTerminator()
- self.dut_config = dut_config
-
- def Run(self, cros_machine, label, benchmark, test_args, profiler_args):
- machine_name = cros_machine.name
- for i in range(0, benchmark.retries + 1):
- if label.crosfleet:
- ret_tup = self.Crosfleet_Run(label, benchmark, test_args, profiler_args)
- else:
- if benchmark.suite == 'tast':
- ret_tup = self.Tast_Run(machine_name, label, benchmark)
+ """This defines the interface from crosperf to test script."""
+
+ def __init__(
+ self,
+ dut_config,
+ logger_to_use=None,
+ log_level="verbose",
+ cmd_exec=None,
+ cmd_term=None,
+ ):
+ self.logger = logger_to_use
+ self.log_level = log_level
+ self._ce = cmd_exec or command_executer.GetCommandExecuter(
+ self.logger, log_level=self.log_level
+ )
+ # DUT command executer.
+ # Will be initialized and used within Run.
+ self._ct = cmd_term or command_executer.CommandTerminator()
+ self.dut_config = dut_config
+
+ def Run(self, cros_machine, label, benchmark, test_args, profiler_args):
+ machine_name = cros_machine.name
+ for i in range(0, benchmark.retries + 1):
+ if label.crosfleet:
+ ret_tup = self.Crosfleet_Run(
+ label, benchmark, test_args, profiler_args
+ )
+ else:
+ if benchmark.suite == "tast":
+ with ssh_tunnel(
+ Path(label.chromeos_root, SSHWATCHER), machine_name
+ ) as hostport:
+ ret_tup = self.Tast_Run(hostport, label, benchmark)
+ else:
+ ret_tup = self.Test_That_Run(
+ machine_name, label, benchmark, test_args, profiler_args
+ )
+ if ret_tup[0] != 0:
+ self.logger.LogOutput(
+ "benchmark %s failed. Retries left: %s"
+ % (benchmark.name, benchmark.retries - i)
+ )
+ elif i > 0:
+ self.logger.LogOutput(
+ "benchmark %s succeded after %s retries"
+ % (benchmark.name, i)
+ )
+ break
+ else:
+ self.logger.LogOutput(
+ "benchmark %s succeded on first try" % benchmark.name
+ )
+ break
+ return ret_tup
+
+ def RemoveTelemetryTempFile(self, machine, chromeos_root):
+ filename = "telemetry@%s" % machine
+ fullname = os.path.join(chromeos_root, "chroot", "tmp", filename)
+ if os.path.exists(fullname):
+ os.remove(fullname)
+
+ def GenTestArgs(self, benchmark, test_args, profiler_args):
+ args_list = []
+
+ if benchmark.suite != "telemetry_Crosperf" and profiler_args:
+ self.logger.LogFatal(
+ "Tests other than telemetry_Crosperf do not "
+ "support profiler."
+ )
+
+ if test_args:
+ # Strip double quotes off args (so we can wrap them in single
+ # quotes, to pass through to Telemetry).
+ if test_args[0] == '"' and test_args[-1] == '"':
+ test_args = test_args[1:-1]
+ args_list.append("test_args='%s'" % test_args)
+
+ args_list.append(GetDutConfigArgs(self.dut_config))
+
+ if not (
+ benchmark.suite == "telemetry_Crosperf"
+ or benchmark.suite == "crosperf_Wrapper"
+ ):
+ self.logger.LogWarning(
+ "Please make sure the server test has stage for "
+ "device setup.\n"
+ )
else:
- ret_tup = self.Test_That_Run(machine_name, label, benchmark,
- test_args, profiler_args)
- if ret_tup[0] != 0:
- self.logger.LogOutput('benchmark %s failed. Retries left: %s' %
- (benchmark.name, benchmark.retries - i))
- elif i > 0:
- self.logger.LogOutput('benchmark %s succeded after %s retries' %
- (benchmark.name, i))
- break
- else:
- self.logger.LogOutput('benchmark %s succeded on first try' %
- benchmark.name)
- break
- return ret_tup
-
- def RemoveTelemetryTempFile(self, machine, chromeos_root):
- filename = 'telemetry@%s' % machine
- fullname = os.path.join(chromeos_root, 'chroot', 'tmp', filename)
- if os.path.exists(fullname):
- os.remove(fullname)
-
- def GenTestArgs(self, benchmark, test_args, profiler_args):
- args_list = []
-
- if benchmark.suite != 'telemetry_Crosperf' and profiler_args:
- self.logger.LogFatal('Tests other than telemetry_Crosperf do not '
- 'support profiler.')
-
- if test_args:
- # Strip double quotes off args (so we can wrap them in single
- # quotes, to pass through to Telemetry).
- if test_args[0] == '"' and test_args[-1] == '"':
- test_args = test_args[1:-1]
- args_list.append("test_args='%s'" % test_args)
-
- args_list.append(GetDutConfigArgs(self.dut_config))
-
- if not (benchmark.suite == 'telemetry_Crosperf' or
- benchmark.suite == 'crosperf_Wrapper'):
- self.logger.LogWarning('Please make sure the server test has stage for '
- 'device setup.\n')
- else:
- args_list.append('test=%s' % benchmark.test_name)
- if benchmark.suite == 'telemetry_Crosperf':
- args_list.append('run_local=%s' % benchmark.run_local)
- args_list.append(GetProfilerArgs(profiler_args))
-
- return args_list
-
- # TODO(zhizhouy): Currently do not support passing arguments or running
- # customized tast tests, as we do not have such requirements.
- def Tast_Run(self, machine, label, benchmark):
- # Remove existing tast results
- command = 'rm -rf /usr/local/autotest/results/*'
- self._ce.CrosRunCommand(
- command, machine=machine, chromeos_root=label.chromeos_root)
-
- command = ' '.join(
- [TAST_PATH, 'run', '-build=False', machine, benchmark.test_name])
-
- if self.log_level != 'verbose':
- self.logger.LogOutput('Running test.')
- self.logger.LogOutput('CMD: %s' % command)
-
- return self._ce.ChrootRunCommandWOutput(
- label.chromeos_root, command, command_terminator=self._ct)
-
- def Test_That_Run(self, machine, label, benchmark, test_args, profiler_args):
- """Run the test_that test.."""
-
- # Remove existing test_that results
- command = 'rm -rf /usr/local/autotest/results/*'
- self._ce.CrosRunCommand(
- command, machine=machine, chromeos_root=label.chromeos_root)
-
- if benchmark.suite == 'telemetry_Crosperf':
- if not os.path.isdir(label.chrome_src):
- self.logger.LogFatal('Cannot find chrome src dir to '
- 'run telemetry: %s' % label.chrome_src)
- # Check for and remove temporary file that may have been left by
- # previous telemetry runs (and which might prevent this run from
- # working).
- self.RemoveTelemetryTempFile(machine, label.chromeos_root)
-
- # --autotest_dir specifies which autotest directory to use.
- autotest_dir_arg = '--autotest_dir=%s' % (
- label.autotest_path if label.autotest_path else AUTOTEST_DIR)
-
- # --fast avoids unnecessary copies of syslogs.
- fast_arg = '--fast'
- board_arg = '--board=%s' % label.board
-
- args_list = self.GenTestArgs(benchmark, test_args, profiler_args)
- args_arg = '--args=%s' % pipes.quote(' '.join(args_list))
-
- command = ' '.join([
- TEST_THAT_PATH, autotest_dir_arg, fast_arg, board_arg, args_arg,
- machine, benchmark.suite if
- (benchmark.suite == 'telemetry_Crosperf' or
- benchmark.suite == 'crosperf_Wrapper') else benchmark.test_name
- ])
-
- # Use --no-ns-pid so that cros_sdk does not create a different
- # process namespace and we can kill process created easily by their
- # process group.
- chrome_root_options = ('--no-ns-pid '
- '--chrome_root={0} --chrome_root_mount={1} '
- 'FEATURES="-usersandbox" '
- 'CHROME_ROOT={1}'.format(label.chrome_src,
- CHROME_MOUNT_DIR))
-
- if self.log_level != 'verbose':
- self.logger.LogOutput('Running test.')
- self.logger.LogOutput('CMD: %s' % command)
-
- return self._ce.ChrootRunCommandWOutput(
- label.chromeos_root,
- command,
- command_terminator=self._ct,
- cros_sdk_options=chrome_root_options)
-
- def DownloadResult(self, label, task_id):
- gsutil_cmd = os.path.join(label.chromeos_root, GS_UTIL)
- result_dir = 'gs://chromeos-autotest-results/swarming-%s' % task_id
- download_path = os.path.join(label.chromeos_root, 'chroot/tmp')
- ls_command = '%s ls %s' % (gsutil_cmd,
- os.path.join(result_dir, 'autoserv_test'))
- cp_command = '%s -mq cp -r %s %s' % (gsutil_cmd, result_dir, download_path)
-
- # Server sometimes will not be able to generate the result directory right
- # after the test. Will try to access this gs location every 60s for
- # RETRY_LIMIT mins.
- t = 0
- RETRY_LIMIT = 10
- while t < RETRY_LIMIT:
- t += 1
- status = self._ce.RunCommand(ls_command, print_to_console=False)
- if status == 0:
- break
- if t < RETRY_LIMIT:
- self.logger.LogOutput('Result directory not generated yet, '
- 'retry (%d) in 60s.' % t)
+ args_list.append("test=%s" % benchmark.test_name)
+ if benchmark.suite == "telemetry_Crosperf":
+ args_list.append("run_local=%s" % benchmark.run_local)
+ args_list.append(GetProfilerArgs(profiler_args))
+
+ return args_list
+
+ # TODO(zhizhouy): Currently do not support passing arguments or running
+ # customized tast tests, as we do not have such requirements.
+ def Tast_Run(self, machine, label, benchmark):
+ # Remove existing tast results
+ command = "rm -rf /usr/local/autotest/results/*"
+ self._ce.CrosRunCommand(
+ command, machine=machine, chromeos_root=label.chromeos_root
+ )
+
+ command = " ".join(
+ [TAST_PATH, "run", "-build=False", machine, benchmark.test_name]
+ )
+
+ if self.log_level != "verbose":
+ self.logger.LogOutput("Running test.")
+ self.logger.LogOutput("CMD: %s" % command)
+
+ return self._ce.ChrootRunCommandWOutput(
+ label.chromeos_root, command, command_terminator=self._ct
+ )
+
+ def Test_That_Run(
+ self, machine, label, benchmark, test_args, profiler_args
+ ):
+ """Run the test_that test.."""
+
+ # Remove existing test_that results
+ command = "rm -rf /usr/local/autotest/results/*"
+ self._ce.CrosRunCommand(
+ command, machine=machine, chromeos_root=label.chromeos_root
+ )
+
+ if benchmark.suite == "telemetry_Crosperf":
+ if not os.path.isdir(label.chrome_src):
+ self.logger.LogFatal(
+ "Cannot find chrome src dir to "
+ "run telemetry: %s" % label.chrome_src
+ )
+ # Check for and remove temporary file that may have been left by
+ # previous telemetry runs (and which might prevent this run from
+ # working).
+ self.RemoveTelemetryTempFile(machine, label.chromeos_root)
+
+ # --autotest_dir specifies which autotest directory to use.
+ autotest_dir_arg = "--autotest_dir=%s" % (
+ label.autotest_path if label.autotest_path else AUTOTEST_DIR
+ )
+
+ # --fast avoids unnecessary copies of syslogs.
+ fast_arg = "--fast"
+ board_arg = "--board=%s" % label.board
+
+ args_list = self.GenTestArgs(benchmark, test_args, profiler_args)
+ args_arg = "--args=%s" % pipes.quote(" ".join(args_list))
+
+ command = " ".join(
+ [
+ TEST_THAT_PATH,
+ autotest_dir_arg,
+ fast_arg,
+ board_arg,
+ args_arg,
+ machine,
+ benchmark.suite
+ if (
+ benchmark.suite == "telemetry_Crosperf"
+ or benchmark.suite == "crosperf_Wrapper"
+ )
+ else benchmark.test_name,
+ ]
+ )
+
+ # Use --no-ns-pid so that cros_sdk does not create a different
+ # process namespace and we can kill process created easily by their
+ # process group.
+ chrome_root_options = (
+ f"--no-ns-pid "
+ f"--chrome_root={label.chrome_src} --chrome_root_mount={CHROME_MOUNT_DIR} "
+ f'FEATURES="-usersandbox" '
+ f"CHROME_ROOT={CHROME_MOUNT_DIR}"
+ )
+
+ if self.log_level != "verbose":
+ self.logger.LogOutput("Running test.")
+ self.logger.LogOutput("CMD: %s" % command)
+
+ return self._ce.ChrootRunCommandWOutput(
+ label.chromeos_root,
+ command,
+ command_terminator=self._ct,
+ cros_sdk_options=chrome_root_options,
+ )
+
+ def DownloadResult(self, label, task_id):
+ gsutil_cmd = os.path.join(label.chromeos_root, GS_UTIL)
+ result_dir = "gs://chromeos-autotest-results/swarming-%s" % task_id
+ download_path = os.path.join(label.chromeos_root, "chroot/tmp")
+ ls_command = "%s ls %s" % (
+ gsutil_cmd,
+ os.path.join(result_dir, "autoserv_test"),
+ )
+ cp_command = "%s -mq cp -r %s %s" % (
+ gsutil_cmd,
+ result_dir,
+ download_path,
+ )
+
+ # Server sometimes will not be able to generate the result directory right
+ # after the test. Will try to access this gs location every 60s for
+ # RETRY_LIMIT mins.
+ t = 0
+ RETRY_LIMIT = 10
+ while t < RETRY_LIMIT:
+ t += 1
+ status = self._ce.RunCommand(ls_command, print_to_console=False)
+ if status == 0:
+ break
+ if t < RETRY_LIMIT:
+ self.logger.LogOutput(
+ "Result directory not generated yet, "
+ "retry (%d) in 60s." % t
+ )
+ time.sleep(60)
+ else:
+ self.logger.LogOutput(
+ "No result directory for task %s" % task_id
+ )
+ return status
+
+ # Wait for 60s to make sure server finished writing to gs location.
time.sleep(60)
- else:
- self.logger.LogOutput('No result directory for task %s' % task_id)
+
+ status = self._ce.RunCommand(cp_command)
+ if status != 0:
+ self.logger.LogOutput(
+ "Cannot download results from task %s" % task_id
+ )
+ else:
+ self.logger.LogOutput("Result downloaded for task %s" % task_id)
return status
- # Wait for 60s to make sure server finished writing to gs location.
- time.sleep(60)
-
- status = self._ce.RunCommand(cp_command)
- if status != 0:
- self.logger.LogOutput('Cannot download results from task %s' % task_id)
- else:
- self.logger.LogOutput('Result downloaded for task %s' % task_id)
- return status
-
- def Crosfleet_Run(self, label, benchmark, test_args, profiler_args):
- """Run the test via crosfleet.."""
- options = []
- if label.board:
- options.append('-board=%s' % label.board)
- if label.build:
- options.append('-image=%s' % label.build)
- # TODO: now only put toolchain pool here, user need to be able to specify
- # which pool to use. Need to request feature to not use this option at all.
- options.append('-pool=toolchain')
-
- args_list = self.GenTestArgs(benchmark, test_args, profiler_args)
- options.append('-test-args=%s' % pipes.quote(' '.join(args_list)))
-
- dimensions = []
- for dut in label.remote:
- dimensions.append('-dim dut_name:%s' % dut.rstrip('.cros'))
-
- command = (('%s create-test %s %s %s') % \
- (CROSFLEET_PATH, ' '.join(dimensions), ' '.join(options),
- benchmark.suite if
- (benchmark.suite == 'telemetry_Crosperf' or
- benchmark.suite == 'crosperf_Wrapper')
- else benchmark.test_name))
-
- if self.log_level != 'verbose':
- self.logger.LogOutput('Starting crosfleet test.')
- self.logger.LogOutput('CMD: %s' % command)
- ret_tup = self._ce.RunCommandWOutput(command, command_terminator=self._ct)
-
- if ret_tup[0] != 0:
- self.logger.LogOutput('Crosfleet test not created successfully.')
- return ret_tup
-
- # Std output of the command will look like:
- # Created request at https://ci.chromium.org/../cros_test_platform/b12345
- # We want to parse it and get the id number of the task, which is the
- # number in the very end of the link address.
- task_id = ret_tup[1].strip().split('b')[-1]
-
- command = ('crosfleet wait-task %s' % task_id)
- if self.log_level != 'verbose':
- self.logger.LogOutput('Waiting for crosfleet test to finish.')
- self.logger.LogOutput('CMD: %s' % command)
-
- ret_tup = self._ce.RunCommandWOutput(command, command_terminator=self._ct)
-
- # The output of `wait-task` command will be a combination of verbose and a
- # json format result in the end. The json result looks like this:
- # {"task-result":
- # {"name":"Test Platform Invocation",
- # "state":"", "failure":false, "success":true,
- # "task-run-id":"12345",
- # "task-run-url":"https://ci.chromium.org/.../cros_test_platform/b12345",
- # "task-logs-url":""
- # },
- # "stdout":"",
- # "child-results":
- # [{"name":"graphics_WebGLAquarium",
- # "state":"", "failure":false, "success":true, "task-run-id":"",
- # "task-run-url":"https://chromeos-swarming.appspot.com/task?id=1234",
- # "task-logs-url":"https://stainless.corp.google.com/1234/"}
- # ]
- # }
- # We need the task id of the child-results to download result.
- output = json.loads(ret_tup[1].split('\n')[-1])
- output = output['child-results'][0]
- if output['success']:
- task_id = output['task-run-url'].split('=')[-1]
- if self.DownloadResult(label, task_id) == 0:
- result_dir = '\nResults placed in tmp/swarming-%s\n' % task_id
- return (ret_tup[0], result_dir, ret_tup[2])
- return ret_tup
-
- def CommandTerminator(self):
- return self._ct
-
- def Terminate(self):
- self._ct.Terminate()
+ def Crosfleet_Run(self, label, benchmark, test_args, profiler_args):
+ """Run the test via crosfleet.."""
+ options = []
+ if label.board:
+ options.append("-board=%s" % label.board)
+ if label.build:
+ options.append("-image=%s" % label.build)
+ # TODO: now only put toolchain pool here, user need to be able to specify
+ # which pool to use. Need to request feature to not use this option at all.
+ options.append("-pool=toolchain")
+
+ args_list = self.GenTestArgs(benchmark, test_args, profiler_args)
+ options.append("-test-args=%s" % pipes.quote(" ".join(args_list)))
+
+ dimensions = []
+ for dut in label.remote:
+ dimensions.append("-dim dut_name:%s" % dut.rstrip(".cros"))
+
+ command = ("%s create-test %s %s %s") % (
+ CROSFLEET_PATH,
+ " ".join(dimensions),
+ " ".join(options),
+ benchmark.suite
+ if (
+ benchmark.suite == "telemetry_Crosperf"
+ or benchmark.suite == "crosperf_Wrapper"
+ )
+ else benchmark.test_name,
+ )
+
+ if self.log_level != "verbose":
+ self.logger.LogOutput("Starting crosfleet test.")
+ self.logger.LogOutput("CMD: %s" % command)
+ ret_tup = self._ce.RunCommandWOutput(
+ command, command_terminator=self._ct
+ )
+
+ if ret_tup[0] != 0:
+ self.logger.LogOutput("Crosfleet test not created successfully.")
+ return ret_tup
+
+ # Std output of the command will look like:
+ # Created request at https://ci.chromium.org/../cros_test_platform/b12345
+ # We want to parse it and get the id number of the task, which is the
+ # number in the very end of the link address.
+ task_id = ret_tup[1].strip().split("b")[-1]
+
+ command = "crosfleet wait-task %s" % task_id
+ if self.log_level != "verbose":
+ self.logger.LogOutput("Waiting for crosfleet test to finish.")
+ self.logger.LogOutput("CMD: %s" % command)
+
+ ret_tup = self._ce.RunCommandWOutput(
+ command, command_terminator=self._ct
+ )
+
+ # The output of `wait-task` command will be a combination of verbose and a
+ # json format result in the end. The json result looks like this:
+ # {"task-result":
+ # {"name":"Test Platform Invocation",
+ # "state":"", "failure":false, "success":true,
+ # "task-run-id":"12345",
+ # "task-run-url":"https://ci.chromium.org/.../cros_test_platform/b12345",
+ # "task-logs-url":""
+ # },
+ # "stdout":"",
+ # "child-results":
+ # [{"name":"graphics_WebGLAquarium",
+ # "state":"", "failure":false, "success":true, "task-run-id":"",
+ # "task-run-url":"https://chromeos-swarming.appspot.com/task?id=1234",
+ # "task-logs-url":"https://stainless.corp.google.com/1234/"}
+ # ]
+ # }
+ # We need the task id of the child-results to download result.
+ output = json.loads(ret_tup[1].split("\n")[-1])
+ output = output["child-results"][0]
+ if output["success"]:
+ task_id = output["task-run-url"].split("=")[-1]
+ if self.DownloadResult(label, task_id) == 0:
+ result_dir = "\nResults placed in tmp/swarming-%s\n" % task_id
+ return (ret_tup[0], result_dir, ret_tup[2])
+ return ret_tup
+
+ def CommandTerminator(self):
+ return self._ct
+
+ def Terminate(self):
+ self._ct.Terminate()
class MockSuiteRunner(object):
- """Mock suite runner for test."""
+ """Mock suite runner for test."""
- def __init__(self):
- self._true = True
+ def __init__(self):
+ self._true = True
- def Run(self, *_args):
- if self._true:
- return [0, '', '']
- else:
- return [0, '', '']
+ def Run(self, *_args):
+ if self._true:
+ return [0, "", ""]
+ else:
+ return [0, "", ""]
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index c1eacb32..c936a074 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -1,304 +1,411 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
+# Copyright 2014 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for suite_runner."""
-from __future__ import print_function
+import contextlib
import json
-
import unittest
import unittest.mock as mock
-import suite_runner
-import label
-
from benchmark import Benchmark
-
from cros_utils import command_executer
from cros_utils import logger
+import label
from machine_manager import MockCrosMachine
+import suite_runner
class SuiteRunnerTest(unittest.TestCase):
- """Class of SuiteRunner test."""
- mock_json = mock.Mock(spec=json)
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- mock_cmd_term = mock.Mock(spec=command_executer.CommandTerminator)
- mock_logger = mock.Mock(spec=logger.Logger)
- mock_label = label.MockLabel('lumpy', 'build', 'lumpy_chromeos_image', '', '',
- '/tmp/chromeos', 'lumpy',
- ['lumpy1.cros', 'lumpy.cros2'], '', '', False,
- 'average', 'gcc', False, '')
- telemetry_crosperf_bench = Benchmark(
- 'b1_test', # name
- 'octane', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- 'record -e cycles', # perf_args
- 'telemetry_Crosperf', # suite
- True) # show_all_results
-
- crosperf_wrapper_bench = Benchmark(
- 'b2_test', # name
- 'webgl', # test_name
- '', # test_args
- 3, # iterations
- False, # rm_chroot_tmp
- '', # perf_args
- 'crosperf_Wrapper') # suite
-
- tast_bench = Benchmark(
- 'b3_test', # name
- 'platform.ReportDiskUsage', # test_name
- '', # test_args
- 1, # iterations
- False, # rm_chroot_tmp
- '', # perf_args
- 'tast') # suite
-
- def __init__(self, *args, **kwargs):
- super(SuiteRunnerTest, self).__init__(*args, **kwargs)
- self.crosfleet_run_args = []
- self.test_that_args = []
- self.tast_args = []
- self.call_crosfleet_run = False
- self.call_test_that_run = False
- self.call_tast_run = False
-
- def setUp(self):
- self.runner = suite_runner.SuiteRunner({}, self.mock_logger, 'verbose',
- self.mock_cmd_exec,
- self.mock_cmd_term)
-
- def test_get_profiler_args(self):
- input_str = ("--profiler=custom_perf --profiler_args='perf_options"
- '="record -a -e cycles,instructions"\'')
- output_str = ("profiler=custom_perf profiler_args='record -a -e "
- "cycles,instructions'")
- res = suite_runner.GetProfilerArgs(input_str)
- self.assertEqual(res, output_str)
-
- def test_get_dut_config_args(self):
- dut_config = {'enable_aslr': False, 'top_interval': 1.0}
- output_str = ('dut_config='
- "'"
- '{"enable_aslr": '
- 'false, "top_interval": 1.0}'
- "'"
- '')
- res = suite_runner.GetDutConfigArgs(dut_config)
- self.assertEqual(res, output_str)
-
- def test_run(self):
-
- def reset():
- self.test_that_args = []
- self.crosfleet_run_args = []
- self.tast_args = []
- self.call_test_that_run = False
- self.call_crosfleet_run = False
- self.call_tast_run = False
-
- def FakeCrosfleetRun(test_label, benchmark, test_args, profiler_args):
- self.crosfleet_run_args = [
- test_label, benchmark, test_args, profiler_args
- ]
- self.call_crosfleet_run = True
- return 'Ran FakeCrosfleetRun'
-
- def FakeTestThatRun(machine, test_label, benchmark, test_args,
- profiler_args):
- self.test_that_args = [
- machine, test_label, benchmark, test_args, profiler_args
- ]
- self.call_test_that_run = True
- return 'Ran FakeTestThatRun'
-
- def FakeTastRun(machine, test_label, benchmark):
- self.tast_args = [machine, test_label, benchmark]
- self.call_tast_run = True
- return 'Ran FakeTastRun'
-
- self.runner.Crosfleet_Run = FakeCrosfleetRun
- self.runner.Test_That_Run = FakeTestThatRun
- self.runner.Tast_Run = FakeTastRun
-
- self.runner.dut_config['enable_aslr'] = False
- self.runner.dut_config['cooldown_time'] = 0
- self.runner.dut_config['governor'] = 'fake_governor'
- self.runner.dut_config['cpu_freq_pct'] = 65
- self.runner.dut_config['intel_pstate'] = 'no_hwp'
- machine = 'fake_machine'
- cros_machine = MockCrosMachine(machine, self.mock_label.chromeos_root,
- self.mock_logger)
- test_args = ''
- profiler_args = ''
-
- # Test crosfleet run for telemetry_Crosperf and crosperf_Wrapper benchmarks.
- self.mock_label.crosfleet = True
- reset()
- self.runner.Run(cros_machine, self.mock_label, self.crosperf_wrapper_bench,
- test_args, profiler_args)
- self.assertTrue(self.call_crosfleet_run)
- self.assertFalse(self.call_test_that_run)
- self.assertEqual(self.crosfleet_run_args,
- [self.mock_label, self.crosperf_wrapper_bench, '', ''])
-
- reset()
- self.runner.Run(cros_machine, self.mock_label,
- self.telemetry_crosperf_bench, test_args, profiler_args)
- self.assertTrue(self.call_crosfleet_run)
- self.assertFalse(self.call_test_that_run)
- self.assertEqual(self.crosfleet_run_args,
- [self.mock_label, self.telemetry_crosperf_bench, '', ''])
-
- # Test test_that run for telemetry_Crosperf and crosperf_Wrapper benchmarks.
- self.mock_label.crosfleet = False
- reset()
- self.runner.Run(cros_machine, self.mock_label, self.crosperf_wrapper_bench,
- test_args, profiler_args)
- self.assertTrue(self.call_test_that_run)
- self.assertFalse(self.call_crosfleet_run)
- self.assertEqual(
- self.test_that_args,
- ['fake_machine', self.mock_label, self.crosperf_wrapper_bench, '', ''])
-
- reset()
- self.runner.Run(cros_machine, self.mock_label,
- self.telemetry_crosperf_bench, test_args, profiler_args)
- self.assertTrue(self.call_test_that_run)
- self.assertFalse(self.call_crosfleet_run)
- self.assertEqual(self.test_that_args, [
- 'fake_machine', self.mock_label, self.telemetry_crosperf_bench, '', ''
- ])
-
- # Test tast run for tast benchmarks.
- reset()
- self.runner.Run(cros_machine, self.mock_label, self.tast_bench, '', '')
- self.assertTrue(self.call_tast_run)
- self.assertFalse(self.call_test_that_run)
- self.assertFalse(self.call_crosfleet_run)
- self.assertEqual(self.tast_args,
- ['fake_machine', self.mock_label, self.tast_bench])
-
- def test_gen_test_args(self):
- test_args = '--iterations=2'
- perf_args = 'record -a -e cycles'
-
- # Test crosperf_Wrapper benchmarks arg list generation
- args_list = ["test_args='--iterations=2'", "dut_config='{}'", 'test=webgl']
- res = self.runner.GenTestArgs(self.crosperf_wrapper_bench, test_args, '')
- self.assertCountEqual(res, args_list)
-
- # Test telemetry_Crosperf benchmarks arg list generation
- args_list = [
- "test_args='--iterations=2'", "dut_config='{}'", 'test=octane',
- 'run_local=False'
- ]
- args_list.append(suite_runner.GetProfilerArgs(perf_args))
- res = self.runner.GenTestArgs(self.telemetry_crosperf_bench, test_args,
- perf_args)
- self.assertCountEqual(res, args_list)
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- def test_tast_run(self, mock_chroot_runcmd, mock_cros_runcmd):
- mock_chroot_runcmd.return_value = 0
- self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd
- self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
- res = self.runner.Tast_Run('lumpy1.cros', self.mock_label, self.tast_bench)
- self.assertEqual(mock_cros_runcmd.call_count, 1)
- self.assertEqual(mock_chroot_runcmd.call_count, 1)
- self.assertEqual(res, 0)
- self.assertEqual(mock_cros_runcmd.call_args_list[0][0],
- ('rm -rf /usr/local/autotest/results/*',))
- args_list = mock_chroot_runcmd.call_args_list[0][0]
- args_dict = mock_chroot_runcmd.call_args_list[0][1]
- self.assertEqual(len(args_list), 2)
- self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommand')
- @mock.patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput')
- @mock.patch.object(logger.Logger, 'LogFatal')
- def test_test_that_run(self, mock_log_fatal, mock_chroot_runcmd,
- mock_cros_runcmd):
- mock_log_fatal.side_effect = SystemExit()
- self.runner.logger.LogFatal = mock_log_fatal
- # Test crosperf_Wrapper benchmarks cannot take perf_args
- raised_exception = False
- try:
- self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
- self.crosperf_wrapper_bench, '',
- 'record -a -e cycles')
- except SystemExit:
- raised_exception = True
- self.assertTrue(raised_exception)
-
- mock_chroot_runcmd.return_value = 0
- self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd
- self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
- res = self.runner.Test_That_Run('lumpy1.cros', self.mock_label,
- self.crosperf_wrapper_bench,
- '--iterations=2', '')
- self.assertEqual(mock_cros_runcmd.call_count, 1)
- self.assertEqual(mock_chroot_runcmd.call_count, 1)
- self.assertEqual(res, 0)
- self.assertEqual(mock_cros_runcmd.call_args_list[0][0],
- ('rm -rf /usr/local/autotest/results/*',))
- args_list = mock_chroot_runcmd.call_args_list[0][0]
- args_dict = mock_chroot_runcmd.call_args_list[0][1]
- self.assertEqual(len(args_list), 2)
- self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
-
- @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
- @mock.patch.object(json, 'loads')
- def test_crosfleet_run_client(self, mock_json_loads, mock_runcmd):
-
- def FakeDownloadResult(l, task_id):
- if l and task_id:
- self.assertEqual(task_id, '12345')
- return 0
-
- mock_runcmd.return_value = (
- 0,
- 'Created Swarming task https://swarming/task/b12345',
- '',
+ """Class of SuiteRunner test."""
+
+ mock_json = mock.Mock(spec=json)
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ mock_cmd_term = mock.Mock(spec=command_executer.CommandTerminator)
+ mock_logger = mock.Mock(spec=logger.Logger)
+ mock_label = label.MockLabel(
+ "lumpy",
+ "build",
+ "lumpy_chromeos_image",
+ "",
+ "",
+ "/tmp/chromeos",
+ "lumpy",
+ ["lumpy1.cros", "lumpy.cros2"],
+ "",
+ "",
+ False,
+ "average",
+ "gcc",
+ False,
+ "",
+ )
+ telemetry_crosperf_bench = Benchmark(
+ "b1_test", # name
+ "octane", # test_name
+ "", # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ "record -e cycles", # perf_args
+ "telemetry_Crosperf", # suite
+ True,
+ ) # show_all_results
+
+ crosperf_wrapper_bench = Benchmark(
+ "b2_test", # name
+ "webgl", # test_name
+ "", # test_args
+ 3, # iterations
+ False, # rm_chroot_tmp
+ "", # perf_args
+ "crosperf_Wrapper",
+ ) # suite
+
+ tast_bench = Benchmark(
+ "b3_test", # name
+ "platform.ReportDiskUsage", # test_name
+ "", # test_args
+ 1, # iterations
+ False, # rm_chroot_tmp
+ "", # perf_args
+ "tast",
+ ) # suite
+
+ def __init__(self, *args, **kwargs):
+ super(SuiteRunnerTest, self).__init__(*args, **kwargs)
+ self.crosfleet_run_args = []
+ self.test_that_args = []
+ self.tast_args = []
+ self.call_crosfleet_run = False
+ self.call_test_that_run = False
+ self.call_tast_run = False
+
+ def setUp(self):
+ self.runner = suite_runner.SuiteRunner(
+ {},
+ self.mock_logger,
+ "verbose",
+ self.mock_cmd_exec,
+ self.mock_cmd_term,
+ )
+
+ def test_get_profiler_args(self):
+ input_str = (
+ "--profiler=custom_perf --profiler_args='perf_options"
+ '="record -a -e cycles,instructions"\''
+ )
+ output_str = (
+ "profiler=custom_perf profiler_args='record -a -e "
+ "cycles,instructions'"
+ )
+ res = suite_runner.GetProfilerArgs(input_str)
+ self.assertEqual(res, output_str)
+
+ def test_get_dut_config_args(self):
+ dut_config = {"enable_aslr": False, "top_interval": 1.0}
+ output_str = (
+ "dut_config="
+ "'"
+ '{"enable_aslr": '
+ 'false, "top_interval": 1.0}'
+ "'"
+ ""
+ )
+ res = suite_runner.GetDutConfigArgs(dut_config)
+ self.assertEqual(res, output_str)
+
+ @mock.patch("suite_runner.ssh_tunnel")
+ def test_run(self, ssh_tunnel):
+ @contextlib.contextmanager
+ def mock_ssh_tunnel(_watcher, _host):
+ yield "fakelocalhost:1234"
+
+ ssh_tunnel.side_effect = mock_ssh_tunnel
+
+ def reset():
+ self.test_that_args = []
+ self.crosfleet_run_args = []
+ self.tast_args = []
+ self.call_test_that_run = False
+ self.call_crosfleet_run = False
+ self.call_tast_run = False
+
+ def FakeCrosfleetRun(test_label, benchmark, test_args, profiler_args):
+ self.crosfleet_run_args = [
+ test_label,
+ benchmark,
+ test_args,
+ profiler_args,
+ ]
+ self.call_crosfleet_run = True
+ return "Ran FakeCrosfleetRun"
+
+ def FakeTestThatRun(
+ machine, test_label, benchmark, test_args, profiler_args
+ ):
+ self.test_that_args = [
+ machine,
+ test_label,
+ benchmark,
+ test_args,
+ profiler_args,
+ ]
+ self.call_test_that_run = True
+ return "Ran FakeTestThatRun"
+
+ def FakeTastRun(machine, test_label, benchmark):
+ self.tast_args = [machine, test_label, benchmark]
+ self.call_tast_run = True
+ return "Ran FakeTastRun"
+
+ self.runner.Crosfleet_Run = FakeCrosfleetRun
+ self.runner.Test_That_Run = FakeTestThatRun
+ self.runner.Tast_Run = FakeTastRun
+
+ self.runner.dut_config["enable_aslr"] = False
+ self.runner.dut_config["cooldown_time"] = 0
+ self.runner.dut_config["governor"] = "fake_governor"
+ self.runner.dut_config["cpu_freq_pct"] = 65
+ self.runner.dut_config["intel_pstate"] = "no_hwp"
+ machine = "fake_machine"
+ cros_machine = MockCrosMachine(
+ machine, self.mock_label.chromeos_root, self.mock_logger
+ )
+ test_args = ""
+ profiler_args = ""
+
+ # Test crosfleet run for telemetry_Crosperf and crosperf_Wrapper benchmarks.
+ self.mock_label.crosfleet = True
+ reset()
+ self.runner.Run(
+ cros_machine,
+ self.mock_label,
+ self.crosperf_wrapper_bench,
+ test_args,
+ profiler_args,
+ )
+ self.assertTrue(self.call_crosfleet_run)
+ self.assertFalse(self.call_test_that_run)
+ self.assertEqual(
+ self.crosfleet_run_args,
+ [self.mock_label, self.crosperf_wrapper_bench, "", ""],
+ )
+
+ reset()
+ self.runner.Run(
+ cros_machine,
+ self.mock_label,
+ self.telemetry_crosperf_bench,
+ test_args,
+ profiler_args,
+ )
+ self.assertTrue(self.call_crosfleet_run)
+ self.assertFalse(self.call_test_that_run)
+ self.assertEqual(
+ self.crosfleet_run_args,
+ [self.mock_label, self.telemetry_crosperf_bench, "", ""],
+ )
+
+ # Test test_that run for telemetry_Crosperf and crosperf_Wrapper benchmarks.
+ self.mock_label.crosfleet = False
+ reset()
+ self.runner.Run(
+ cros_machine,
+ self.mock_label,
+ self.crosperf_wrapper_bench,
+ test_args,
+ profiler_args,
+ )
+ self.assertTrue(self.call_test_that_run)
+ self.assertFalse(self.call_crosfleet_run)
+ self.assertEqual(
+ self.test_that_args,
+ [
+ "fake_machine",
+ self.mock_label,
+ self.crosperf_wrapper_bench,
+ "",
+ "",
+ ],
+ )
+
+ reset()
+ self.runner.Run(
+ cros_machine,
+ self.mock_label,
+ self.telemetry_crosperf_bench,
+ test_args,
+ profiler_args,
+ )
+ self.assertTrue(self.call_test_that_run)
+ self.assertFalse(self.call_crosfleet_run)
+ self.assertEqual(
+ self.test_that_args,
+ [
+ "fake_machine",
+ self.mock_label,
+ self.telemetry_crosperf_bench,
+ "",
+ "",
+ ],
+ )
+
+ # Test tast run for tast benchmarks.
+ reset()
+ self.runner.Run(cros_machine, self.mock_label, self.tast_bench, "", "")
+ self.assertTrue(self.call_tast_run)
+ self.assertFalse(self.call_test_that_run)
+ self.assertFalse(self.call_crosfleet_run)
+ self.assertEqual(
+ self.tast_args,
+ ["fakelocalhost:1234", self.mock_label, self.tast_bench],
+ )
+
+ def test_gen_test_args(self):
+ test_args = "--iterations=2"
+ perf_args = "record -a -e cycles"
+
+ # Test crosperf_Wrapper benchmarks arg list generation
+ args_list = [
+ "test_args='--iterations=2'",
+ "dut_config='{}'",
+ "test=webgl",
+ ]
+ res = self.runner.GenTestArgs(
+ self.crosperf_wrapper_bench, test_args, ""
+ )
+ self.assertCountEqual(res, args_list)
+
+ # Test telemetry_Crosperf benchmarks arg list generation
+ args_list = [
+ "test_args='--iterations=2'",
+ "dut_config='{}'",
+ "test=octane",
+ "run_local=False",
+ ]
+ args_list.append(suite_runner.GetProfilerArgs(perf_args))
+ res = self.runner.GenTestArgs(
+ self.telemetry_crosperf_bench, test_args, perf_args
+ )
+ self.assertCountEqual(res, args_list)
+
+ @mock.patch.object(command_executer.CommandExecuter, "CrosRunCommand")
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ )
+ def test_tast_run(self, mock_chroot_runcmd, mock_cros_runcmd):
+ mock_chroot_runcmd.return_value = 0
+ self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd
+ self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
+ res = self.runner.Tast_Run(
+ "lumpy1.cros", self.mock_label, self.tast_bench
+ )
+ self.assertEqual(mock_cros_runcmd.call_count, 1)
+ self.assertEqual(mock_chroot_runcmd.call_count, 1)
+ self.assertEqual(res, 0)
+ self.assertEqual(
+ mock_cros_runcmd.call_args_list[0][0],
+ ("rm -rf /usr/local/autotest/results/*",),
+ )
+ args_list = mock_chroot_runcmd.call_args_list[0][0]
+ args_dict = mock_chroot_runcmd.call_args_list[0][1]
+ self.assertEqual(len(args_list), 2)
+ self.assertEqual(args_dict["command_terminator"], self.mock_cmd_term)
+
+ @mock.patch.object(command_executer.CommandExecuter, "CrosRunCommand")
+ @mock.patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
)
- self.mock_cmd_exec.RunCommandWOutput = mock_runcmd
-
- mock_json_loads.return_value = {
- 'child-results': [{
- 'success': True,
- 'task-run-url': 'https://swarming/task?id=12345'
- }]
- }
- self.mock_json.loads = mock_json_loads
-
- self.mock_label.crosfleet = True
- self.runner.DownloadResult = FakeDownloadResult
- res = self.runner.Crosfleet_Run(self.mock_label,
- self.crosperf_wrapper_bench, '', '')
- ret_tup = (0, '\nResults placed in tmp/swarming-12345\n', '')
- self.assertEqual(res, ret_tup)
- self.assertEqual(mock_runcmd.call_count, 2)
-
- args_list = mock_runcmd.call_args_list[0][0]
- args_dict = mock_runcmd.call_args_list[0][1]
- self.assertEqual(len(args_list), 1)
- self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
-
- args_list = mock_runcmd.call_args_list[1][0]
- self.assertEqual(args_list[0], ('crosfleet wait-task 12345'))
- self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
-
-
-if __name__ == '__main__':
- unittest.main()
+ @mock.patch.object(logger.Logger, "LogFatal")
+ def test_test_that_run(
+ self, mock_log_fatal, mock_chroot_runcmd, mock_cros_runcmd
+ ):
+ mock_log_fatal.side_effect = SystemExit()
+ self.runner.logger.LogFatal = mock_log_fatal
+ # Test crosperf_Wrapper benchmarks cannot take perf_args
+ raised_exception = False
+ try:
+ self.runner.Test_That_Run(
+ "lumpy1.cros",
+ self.mock_label,
+ self.crosperf_wrapper_bench,
+ "",
+ "record -a -e cycles",
+ )
+ except SystemExit:
+ raised_exception = True
+ self.assertTrue(raised_exception)
+
+ mock_chroot_runcmd.return_value = 0
+ self.mock_cmd_exec.ChrootRunCommandWOutput = mock_chroot_runcmd
+ self.mock_cmd_exec.CrosRunCommand = mock_cros_runcmd
+ res = self.runner.Test_That_Run(
+ "lumpy1.cros",
+ self.mock_label,
+ self.crosperf_wrapper_bench,
+ "--iterations=2",
+ "",
+ )
+ self.assertEqual(mock_cros_runcmd.call_count, 1)
+ self.assertEqual(mock_chroot_runcmd.call_count, 1)
+ self.assertEqual(res, 0)
+ self.assertEqual(
+ mock_cros_runcmd.call_args_list[0][0],
+ ("rm -rf /usr/local/autotest/results/*",),
+ )
+ args_list = mock_chroot_runcmd.call_args_list[0][0]
+ args_dict = mock_chroot_runcmd.call_args_list[0][1]
+ self.assertEqual(len(args_list), 2)
+ self.assertEqual(args_dict["command_terminator"], self.mock_cmd_term)
+
+ @mock.patch.object(command_executer.CommandExecuter, "RunCommandWOutput")
+ @mock.patch.object(json, "loads")
+ def test_crosfleet_run_client(self, mock_json_loads, mock_runcmd):
+ def FakeDownloadResult(l, task_id):
+ if l and task_id:
+ self.assertEqual(task_id, "12345")
+ return 0
+
+ mock_runcmd.return_value = (
+ 0,
+ "Created Swarming task https://swarming/task/b12345",
+ "",
+ )
+ self.mock_cmd_exec.RunCommandWOutput = mock_runcmd
+
+ mock_json_loads.return_value = {
+ "child-results": [
+ {
+ "success": True,
+ "task-run-url": "https://swarming/task?id=12345",
+ }
+ ]
+ }
+ self.mock_json.loads = mock_json_loads
+
+ self.mock_label.crosfleet = True
+ self.runner.DownloadResult = FakeDownloadResult
+ res = self.runner.Crosfleet_Run(
+ self.mock_label, self.crosperf_wrapper_bench, "", ""
+ )
+ ret_tup = (0, "\nResults placed in tmp/swarming-12345\n", "")
+ self.assertEqual(res, ret_tup)
+ self.assertEqual(mock_runcmd.call_count, 2)
+
+ args_list = mock_runcmd.call_args_list[0][0]
+ args_dict = mock_runcmd.call_args_list[0][1]
+ self.assertEqual(len(args_list), 1)
+ self.assertEqual(args_dict["command_terminator"], self.mock_cmd_term)
+
+ args_list = mock_runcmd.call_args_list[1][0]
+ self.assertEqual(args_list[0], ("crosfleet wait-task 12345"))
+ self.assertEqual(args_dict["command_terminator"], self.mock_cmd_term)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/crosperf/test_flag.py b/crosperf/test_flag.py
index 6fa3b589..17c17a3d 100644
--- a/crosperf/test_flag.py
+++ b/crosperf/test_flag.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,8 +9,8 @@ is_test = [False]
def SetTestMode(flag):
- is_test[0] = flag
+ is_test[0] = flag
def GetTestMode():
- return is_test[0]
+ return is_test[0]
diff --git a/crosperf/translate_xbuddy.py b/crosperf/translate_xbuddy.py
index 80187f9b..e6a53a94 100755
--- a/crosperf/translate_xbuddy.py
+++ b/crosperf/translate_xbuddy.py
@@ -1,39 +1,42 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to translate the xbuddy config."""
-from __future__ import print_function
import os
import sys
-if '/mnt/host/source/src/third_party/toolchain-utils/crosperf' in sys.path:
- dev_path = os.path.expanduser('~/trunk/chromite/lib/xbuddy')
- sys.path.append(dev_path)
+
+if "/mnt/host/source/src/third_party/toolchain-utils/crosperf" in sys.path:
+ dev_path = os.path.expanduser("~/trunk/chromite/lib/xbuddy")
+ sys.path.append(dev_path)
else:
- print('This script can only be run from inside a ChromeOS chroot. Please '
- 'enter your chroot, go to ~/src/third_party/toolchain-utils/crosperf'
- ' and try again.')
- sys.exit(0)
+ print(
+ "This script can only be run from inside a ChromeOS chroot. Please "
+ "enter your chroot, go to ~/src/third_party/toolchain-utils/crosperf"
+ " and try again."
+ )
+ sys.exit(0)
# pylint: disable=import-error,wrong-import-position
import xbuddy
def Main(xbuddy_string):
- if not os.path.exists('./xbuddy_config.ini'):
- config_path = os.path.expanduser(
- '~/trunk/chromite/lib/xbuddy/xbuddy_config.ini')
- os.symlink(config_path, './xbuddy_config.ini')
- x = xbuddy.XBuddy(manage_builds=False, static_dir='/tmp/devserver/static')
- build_id = x.Translate(os.path.split(xbuddy_string))
- return build_id
-
-
-if __name__ == '__main__':
- print(Main(sys.argv[1]))
- sys.exit(0)
+ if not os.path.exists("./xbuddy_config.ini"):
+ config_path = os.path.expanduser(
+ "~/trunk/chromite/lib/xbuddy/xbuddy_config.ini"
+ )
+ os.symlink(config_path, "./xbuddy_config.ini")
+ x = xbuddy.XBuddy(manage_builds=False, static_dir="/tmp/devserver/static")
+ build_id = x.Translate(os.path.split(xbuddy_string))
+ return build_id
+
+
+if __name__ == "__main__":
+ print(Main(sys.argv[1]))
+ sys.exit(0)