diff options
44 files changed, 3595 insertions, 1540 deletions
diff --git a/crosperf/autotest_runner.py b/crosperf/autotest_runner.py new file mode 100644 index 00000000..5df71ac5 --- /dev/null +++ b/crosperf/autotest_runner.py @@ -0,0 +1,40 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +from utils import command_executer +from utils import utils + + +class AutotestRunner(object): + def __init__(self, logger_to_use=None): + self._logger = logger_to_use + self._ce = command_executer.GetCommandExecuter(self._logger) + self._ct = command_executer.CommandTerminator() + + def Run(self, machine_name, chromeos_root, board, autotest_name, + autotest_args, profile_counters, profile_type): + if profile_counters and profile_type: + profiler_args = "-e " + " -e ".join(profile_counters) + # TODO(asharif): Add an option to do -g. + autotest_args += (" --profile --profiler_args='%s' --profile_type='%s'" + % (profiler_args, profile_type)) + options = "" + if board: + options += " --board=%s" % board + if autotest_args: + options += " %s" % autotest_args + command = ("./run_remote_tests.sh --remote=%s %s %s" % + (machine_name, options, autotest_name)) + return self._ce.ChrootRunCommand(chromeos_root, command, True, self._ct) + + def Terminate(self): + self._ct.Terminate() + + +class MockAutotestRunner(object): + def __init__(self): + pass + + def Run(self, *args): + return ["", "", 0] diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py new file mode 100644 index 00000000..fa12d934 --- /dev/null +++ b/crosperf/benchmark.py @@ -0,0 +1,24 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + + +class Benchmark(object): + """Class representing a benchmark to be run. + + Contains details of the autotest, arguments to pass to the autotest, + iterations to run the autotest and so on. Note that the benchmark name + can be different to the autotest name. For example, you may want to have + two different benchmarks which run the same autotest with different + arguments. + """ + + def __init__(self, name, autotest_name, autotest_args, iterations, + outlier_range, profile_counters, profile_type): + self.name = name + self.autotest_name = autotest_name + self.autotest_args = autotest_args + self.iterations = iterations + self.outlier_range = outlier_range + self.profile_counters = profile_counters + self.profile_type = profile_type diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py new file mode 100644 index 00000000..cbc79561 --- /dev/null +++ b/crosperf/benchmark_run.py @@ -0,0 +1,220 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import datetime +import os +import re +import threading +import time +import traceback +from results_cache import Result +from utils import logger + +STATUS_FAILED = "FAILED" +STATUS_SUCCEEDED = "SUCCEEDED" +STATUS_IMAGING = "IMAGING" +STATUS_RUNNING = "RUNNING" +STATUS_WAITING = "WAITING" +STATUS_PENDING = "PENDING" + + +class BenchmarkRun(threading.Thread): + def __init__(self, name, benchmark_name, autotest_name, autotest_args, + label_name, chromeos_root, chromeos_image, board, iteration, + cache_conditions, outlier_range, profile_counters, profile_type, + machine_manager, cache, autotest_runner, perf_processor, + logger_to_use): + threading.Thread.__init__(self) + self.name = name + self._logger = logger_to_use + self.benchmark_name = benchmark_name + self.autotest_name = autotest_name + self.autotest_args = autotest_args + self.label_name = label_name + self.chromeos_root = chromeos_root + self.chromeos_image = os.path.expanduser(chromeos_image) + self.board = board + self.iteration = iteration + self.results = {} + self.terminated = False + self.retval = None + self.status = STATUS_PENDING + self.run_completed = False + self.outlier_range = outlier_range + self.profile_counters = profile_counters + self.profile_type = profile_type + self.machine_manager = machine_manager + self.cache = cache + self.autotest_runner = autotest_runner + self.perf_processor = perf_processor + self.machine = None + self.full_name = self.autotest_name + self.cache_conditions = cache_conditions + self.runs_complete = 0 + self.cache_hit = False + self.perf_results = None + self.failure_reason = "" + + def MeanExcludingOutliers(self, array, outlier_range): + """Return the arithmetic mean excluding outliers.""" + mean = sum(array) / len(array) + array2 = [] + + for v in array: + if mean != 0 and abs(v - mean) / mean < outlier_range: + array2.append(v) + + if array2: + return sum(array2) / len(array2) + else: + return mean + + def ParseResults(self, output): + p = re.compile("^-+.*?^-+", re.DOTALL | re.MULTILINE) + matches = p.findall(output) + for i in range(len(matches)): + results = matches[i] + results_dict = {} + for line in results.splitlines()[1:-1]: + mo = re.match("(.*\S)\s+\[\s+(PASSED|FAILED)\s+\]", line) + if mo: + results_dict[mo.group(1)] = mo.group(2) + continue + mo = re.match("(.*\S)\s+(.*)", line) + if mo: + results_dict[mo.group(1)] = mo.group(2) + + return results_dict + return {} + + def ProcessResults(self, result, cache_hit): + # Generate results from the output file. + results_dir = self._GetResultsDir(result.out) + self.full_name = os.path.basename(results_dir) + self.results = self.ParseResults(result.out) + + # Store the autotest output in the cache also. + if not cache_hit: + self.cache.StoreResult(result) + self.cache.StoreAutotestOutput(results_dir) + + # Generate a perf report and cache it. + if self.profile_type: + if cache_hit: + self.perf_results = self.cache.ReadPerfResults() + else: + self.perf_results = (self.perf_processor. + GeneratePerfResults(results_dir, + self.chromeos_root, + self.board)) + self.cache.StorePerfResults(self.perf_results) + + # If there are valid results from perf stat, combine them with the + # autotest results. + if self.perf_results: + stat_results = self.perf_processor.ParseStatResults(self.perf_results) + self.results = dict(self.results.items() + stat_results.items()) + + def _GetResultsDir(self, output): + mo = re.search("Results placed in (\S+)", output) + if mo: + result = mo.group(1) + return result + raise Exception("Could not find results directory.") + + def run(self): + try: + # Just use the first machine for running the cached version, + # without locking it. + self.cache.Init(self.chromeos_image, + self.chromeos_root, + self.autotest_name, + self.iteration, + self.autotest_args, + self.machine_manager.GetMachines()[0].name, + self.board, + self.cache_conditions, + self._logger) + + result = self.cache.ReadResult() + self.cache_hit = (result is not None) + + if result: + self._logger.LogOutput("%s: Cache hit." % self.name) + self._logger.LogOutput(result.out + "\n" + result.err) + else: + self._logger.LogOutput("%s: No cache hit." % self.name) + self.status = STATUS_WAITING + # Try to acquire a machine now. + self.machine = self.AcquireMachine() + self.cache.remote = self.machine.name + result = self.RunTest(self.machine) + + if self.terminated: + return + + if not result.retval: + self.status = STATUS_SUCCEEDED + else: + if self.status != STATUS_FAILED: + self.status = STATUS_FAILED + self.failure_reason = "Return value of autotest was non-zero." + + self.ProcessResults(result, self.cache_hit) + + except Exception, e: + self._logger.LogError("Benchmark run: '%s' failed: %s" % (self.name, e)) + traceback.print_exc() + if self.status != STATUS_FAILED: + self.status = STATUS_FAILED + self.failure_reason = str(e) + finally: + if self.machine: + self._logger.LogOutput("Releasing machine: %s" % self.machine.name) + self.machine_manager.ReleaseMachine(self.machine) + self._logger.LogOutput("Released machine: %s" % self.machine.name) + + def Terminate(self): + self.terminated = True + self.autotest_runner.Terminate() + if self.status != STATUS_FAILED: + self.status = STATUS_FAILED + self.failure_reason = "Thread terminated." + + def AcquireMachine(self): + while True: + if self.terminated: + raise Exception("Thread terminated while trying to acquire machine.") + machine = self.machine_manager.AcquireMachine(self.chromeos_image) + if machine: + self._logger.LogOutput("%s: Machine %s acquired at %s" % + (self.name, + machine.name, + datetime.datetime.now())) + break + else: + sleep_duration = 10 + time.sleep(sleep_duration) + return machine + + def RunTest(self, machine): + self.status = STATUS_IMAGING + self.machine_manager.ImageMachine(machine, + self.chromeos_image, + self.board) + self.status = "%s %s" % (STATUS_RUNNING, self.autotest_name) + [retval, out, err] = self.autotest_runner.Run(machine.name, + self.chromeos_root, + self.board, + self.autotest_name, + self.autotest_args, + self.profile_counters, + self.profile_type) + self.run_completed = True + result = Result(out, err, retval) + + return result + + def SetCacheConditions(self, cache_conditions): + self.cache_conditions = cache_conditions diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py new file mode 100755 index 00000000..a449a33b --- /dev/null +++ b/crosperf/benchmark_run_unittest.py @@ -0,0 +1,42 @@ +#!/usr/bin/python + +# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import unittest +from autotest_runner import MockAutotestRunner +from benchmark_run import BenchmarkRun +from machine_manager import MockMachineManager +from perf_processor import MockPerfProcessor +from results_cache import MockResultsCache +from utils import logger + + +class BenchmarkRunTest(unittest.TestCase): + def testDryRun(self): + m = MockMachineManager() + m.AddMachine("chromeos-alex1") + b = BenchmarkRun("test run", + "PageCycler", + "PageCycler", + "", + "image1", + "/tmp/test", + "/tmp/test/image", + "x86-alex", + 1, + [], + 0.2, + "", + "none", + m, + MockResultsCache(), + MockAutotestRunner(), + MockPerfProcessor(), + logger.GetLogger()) + b.start() + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/column_chart.py b/crosperf/column_chart.py new file mode 100644 index 00000000..22a45c5b --- /dev/null +++ b/crosperf/column_chart.py @@ -0,0 +1,57 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + + +class ColumnChart(object): + def __init__(self, title, width, height): + self.title = title + self.chart_div = filter(str.isalnum, title) + self.width = width + self.height = height + self.columns = [] + self.rows = [] + self.series = [] + + def AddSeries(self, column_name, series_type, color): + for i in range(len(self.columns)): + if column_name == self.columns[i][1]: + self.series.append((i - 1, series_type, color)) + break + + def AddColumn(self, name, column_type): + self.columns.append((column_type, name)) + + def AddRow(self, row): + self.rows.append(row) + + def GetJavascript(self): + res = "var data = new google.visualization.DataTable();\n" + for column in self.columns: + res += "data.addColumn('%s', '%s');\n" % column + res += "data.addRows(%s);\n" % len(self.rows) + for row in range(len(self.rows)): + for column in range(len(self.columns)): + val = self.rows[row][column] + if isinstance(val, str): + val = "'%s'" % val + res += "data.setValue(%s, %s, %s);\n" % (row, column, val) + + series_javascript = "" + for series in self.series: + series_javascript += "%s: {type: '%s', color: '%s'}, " % series + + chart_add_javascript = """ +var chart_%s = new google.visualization.ComboChart( + document.getElementById('%s')); +chart_%s.draw(data, {width: %s, height: %s, title: '%s', legend: 'none', + seriesType: "bars", lineWidth: 0, pointSize: 5, series: {%s}, + vAxis: {minValue: 0}}) +""" + res += chart_add_javascript % (self.chart_div, self.chart_div, + self.chart_div, self.width, + self.height, self.title, series_javascript) + return res + + def GetDiv(self): + return "<div id='%s' class='chart'></div>" % self.chart_div diff --git a/crosperf/crosperf b/crosperf/crosperf new file mode 100755 index 00000000..286bf25a --- /dev/null +++ b/crosperf/crosperf @@ -0,0 +1,2 @@ +#!/bin/bash +PYTHONPATH=$PYTHONPATH:$(dirname $0)/.. python $(dirname $0)/crosperf.py "$@" diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py new file mode 100755 index 00000000..e00ae75d --- /dev/null +++ b/crosperf/crosperf.py @@ -0,0 +1,94 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +"""The driver script for running performance benchmarks on ChromeOS.""" + +import atexit +import optparse +import os +import sys +from experiment_runner import ExperimentRunner +from experiment_runner import MockExperimentRunner +from experiment_factory import ExperimentFactory +from experiment_file import ExperimentFile +from help import Help +from settings_factory import GlobalSettings +from utils import logger + + +l = logger.GetLogger() + + +class MyIndentedHelpFormatter(optparse.IndentedHelpFormatter): + def format_description(self, description): + return description + + +def SetupParserOptions(parser): + """Add all options to the parser.""" + parser.add_option("--dry_run", + dest="dry_run", + help=("Parse the experiment file and " + "show what will be done"), + action="store_true", + default=False) + # Allow each of the global fields to be overridden by passing in + # options. Add each global field as an option. + option_settings = GlobalSettings("") + for field_name in option_settings.fields: + field = option_settings.fields[field_name] + parser.add_option("--%s" % field.name, + dest=field.name, + help=field.description, + action="store") + + +def ConvertOptionsToSettings(options): + """Convert options passed in into global settings.""" + option_settings = GlobalSettings("option_settings") + for option_name in options.__dict__: + if (options.__dict__[option_name] is not None and + option_name in option_settings.fields): + option_settings.SetField(option_name, options.__dict__[option_name]) + return option_settings + + +def Cleanup(experiment): + """Handler function which is registered to the atexit handler.""" + experiment.Cleanup() + + +def Main(argv): + parser = optparse.OptionParser(usage=Help().GetUsage(), + description=Help().GetHelp(), + formatter=MyIndentedHelpFormatter(), + version="%prog 0.1") + SetupParserOptions(parser) + options, args = parser.parse_args(argv) + + # Convert the relevant options that are passed in into a settings + # object which will override settings in the experiment file. + option_settings = ConvertOptionsToSettings(options) + + if len(args) == 2: + experiment_filename = args[1] + else: + parser.error("Invalid number arguments.") + + working_directory = os.getcwd() + experiment_file = ExperimentFile(open(experiment_filename, "rb"), + option_settings) + experiment = ExperimentFactory().GetExperiment(experiment_file, + working_directory) + + atexit.register(Cleanup, experiment) + + if options.dry_run: + runner = MockExperimentRunner(experiment) + else: + runner = ExperimentRunner(experiment) + runner.Run() + +if __name__ == "__main__": + Main(sys.argv) diff --git a/crosperf/crosperf_test.py b/crosperf/crosperf_test.py new file mode 100755 index 00000000..0c50e7b5 --- /dev/null +++ b/crosperf/crosperf_test.py @@ -0,0 +1,40 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import os +import tempfile +import unittest +import crosperf +from utils.file_utils import FileUtils + + +EXPERIMENT_FILE_1 = """ + board: x86-alex + remote: chromeos-alex3 + + benchmark: PageCycler { + iterations: 3 + } + + image1 { + chromeos_image: /usr/local/google/cros_image1.bin + } + + image2 { + chromeos_image: /usr/local/google/cros_image2.bin + } + """ + + +class CrosPerfTest(unittest.TestCase): + def testDryRun(self): + filehandle, filename = tempfile.mkstemp() + os.write(filehandle, EXPERIMENT_FILE_1) + crosperf.Main(["", filename, "--dry_run"]) + os.remove(filename) + + +if __name__ == "__main__": + FileUtils.Configure(True) + unittest.main() diff --git a/crosperf/experiment.py b/crosperf/experiment.py new file mode 100644 index 00000000..83889b06 --- /dev/null +++ b/crosperf/experiment.py @@ -0,0 +1,128 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import os +import time +from autotest_runner import AutotestRunner +from benchmark_run import BenchmarkRun +from machine_manager import MachineManager +from perf_processor import PerfProcessor +from results_cache import ResultsCache +from results_report import HTMLResultsReport +from utils import logger +from utils.file_utils import FileUtils + + +class Experiment(object): + """Class representing an Experiment to be run.""" + + def __init__(self, name, remote, rerun_if_failed, working_directory, + chromeos_root, cache_conditions, labels, benchmarks, + experiment_file): + self.name = name + self.rerun_if_failed = rerun_if_failed + self.working_directory = working_directory + self.remote = remote + self.chromeos_root = chromeos_root + self.cache_conditions = cache_conditions + self.experiment_file = experiment_file + self.results_directory = os.path.join(self.working_directory, + self.name + "_results") + + self.labels = labels + self.benchmarks = benchmarks + self.num_complete = 0 + + # We need one chromeos_root to run the benchmarks in, but it doesn't + # matter where it is, unless the ABIs are different. + if not chromeos_root: + for label in self.labels: + if label.chromeos_root: + chromeos_root = label.chromeos_root + if not chromeos_root: + raise Exception("No chromeos_root given and could not determine one from " + "the image path.") + + self.machine_manager = MachineManager(chromeos_root) + self.l = logger.GetLogger() + + for machine in remote: + self.machine_manager.AddMachine(machine) + + self.start_time = None + self.benchmark_runs = self._GenerateBenchmarkRuns() + + def _GenerateBenchmarkRuns(self): + """Generate benchmark runs from labels and benchmark defintions.""" + benchmark_runs = [] + for label in self.labels: + for benchmark in self.benchmarks: + for iteration in range(1, benchmark.iterations + 1): + + benchmark_run_name = "%s: %s (%s)" % (label.name, benchmark.name, + iteration) + full_name = "%s_%s_%s" % (label.name, benchmark.name, iteration) + logger_to_use = logger.Logger(os.path.dirname(__file__), + "run.%s" % (full_name), + True) + ar = AutotestRunner(logger_to_use=logger_to_use) + rc = ResultsCache() + pp = PerfProcessor(logger_to_use=logger_to_use) + benchmark_run = BenchmarkRun(benchmark_run_name, + benchmark.name, + benchmark.autotest_name, + benchmark.autotest_args, + label.name, + label.chromeos_root, + label.chromeos_image, + label.board, + iteration, + self.cache_conditions, + benchmark.outlier_range, + benchmark.profile_counters, + benchmark.profile_type, + self.machine_manager, + rc, + ar, + pp, + logger_to_use) + + benchmark_runs.append(benchmark_run) + return benchmark_runs + + def Build(self): + pass + + def Terminate(self): + for t in self.benchmark_runs: + if t.isAlive(): + self.l.LogError("Terminating run: '%s'." % t.name) + t.Terminate() + + def IsComplete(self): + if self.active_threads: + for t in self.active_threads: + if t.isAlive(): + t.join(0) + if not t.isAlive(): + self.num_complete += 1 + self.active_threads.remove(t) + return False + return True + + def Run(self): + self.start_time = time.time() + self.active_threads = [] + for benchmark_run in self.benchmark_runs: + # Set threads to daemon so program exits when ctrl-c is pressed. + benchmark_run.daemon = True + benchmark_run.start() + self.active_threads.append(benchmark_run) + + def SetCacheConditions(self, cache_conditions): + for benchmark_run in self.benchmark_runs: + benchmark_run.SetCacheConditions(cache_conditions) + + def Cleanup(self): + self.machine_manager.Cleanup() diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py new file mode 100644 index 00000000..d3c717ae --- /dev/null +++ b/crosperf/experiment_factory.py @@ -0,0 +1,72 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +from benchmark import Benchmark +from experiment import Experiment +from label import Label +from results_cache import CacheConditions + + +class ExperimentFactory(object): + """Factory class for building an Experiment, given an ExperimentFile as input. + + This factory is currently hardcoded to produce an experiment for running + ChromeOS benchmarks, but the idea is that in the future, other types + of experiments could be produced. + """ + + def GetExperiment(self, experiment_file, working_directory): + """Construct an experiment from an experiment file.""" + global_settings = experiment_file.GetGlobalSettings() + experiment_name = global_settings.GetField("name") + remote = global_settings.GetField("remote") + rerun_if_failed = global_settings.GetField("rerun_if_failed") + chromeos_root = global_settings.GetField("chromeos_root") + + # Default cache hit conditions. The image checksum in the cache and the + # computed checksum of the image must match. Also a cache file must exist. + cache_conditions = [CacheConditions.CACHE_FILE_EXISTS, + CacheConditions.CHECKSUMS_MATCH] + if global_settings.GetField("rerun_if_failed"): + cache_conditions.append(CacheConditions.RUN_SUCCEEDED) + if global_settings.GetField("rerun"): + cache_conditions.append(CacheConditions.FALSE) + if global_settings.GetField("exact_remote"): + cache_conditions.append(CacheConditions.REMOTES_MATCH) + + # Construct benchmarks. + benchmarks = [] + all_benchmark_settings = experiment_file.GetSettings("benchmark") + for benchmark_settings in all_benchmark_settings: + benchmark_name = benchmark_settings.name + autotest_name = benchmark_settings.GetField("autotest_name") + if not autotest_name: + autotest_name = benchmark_name + autotest_args = benchmark_settings.GetField("autotest_args") + iterations = benchmark_settings.GetField("iterations") + outlier_range = benchmark_settings.GetField("outlier_range") + profile_counters = benchmark_settings.GetField("profile_counters") + profile_type = benchmark_settings.GetField("profile_type") + benchmark = Benchmark(benchmark_name, autotest_name, autotest_args, + iterations, outlier_range, profile_counters, + profile_type) + benchmarks.append(benchmark) + + # Construct labels. + labels = [] + all_label_settings = experiment_file.GetSettings("label") + for label_settings in all_label_settings: + label_name = label_settings.name + image = label_settings.GetField("chromeos_image") + chromeos_root = label_settings.GetField("chromeos_root") + board = label_settings.GetField("board") + label = Label(label_name, image, chromeos_root, board) + labels.append(label) + + experiment = Experiment(experiment_name, remote, rerun_if_failed, + working_directory, chromeos_root, + cache_conditions, labels, benchmarks, + experiment_file.Canonicalize()) + + return experiment diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py new file mode 100755 index 00000000..e91295da --- /dev/null +++ b/crosperf/experiment_factory_unittest.py @@ -0,0 +1,50 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import StringIO +import unittest +from experiment_factory import ExperimentFactory +from experiment_file import ExperimentFile +from utils.file_utils import FileUtils + + +EXPERIMENT_FILE_1 = """ + board: x86-alex + remote: chromeos-alex3 + + benchmark: PageCycler { + iterations: 3 + } + + image1 { + chromeos_image: /usr/local/google/cros_image1.bin + } + + image2 { + chromeos_image: /usr/local/google/cros_image2.bin + } + """ + + +class ExperimentFactoryTest(unittest.TestCase): + def testLoadExperimentFile1(self): + experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1)) + experiment = ExperimentFactory().GetExperiment(experiment_file, "") + self.assertEqual(experiment.remote, ["chromeos-alex3"]) + + self.assertEqual(len(experiment.benchmarks), 1) + self.assertEqual(experiment.benchmarks[0].name, "PageCycler") + self.assertEqual(experiment.benchmarks[0].autotest_name, "PageCycler") + self.assertEqual(experiment.benchmarks[0].iterations, 3) + + self.assertEqual(len(experiment.labels), 2) + self.assertEqual(experiment.labels[0].chromeos_image, + "/usr/local/google/cros_image1.bin") + self.assertEqual(experiment.labels[0].board, + "x86-alex") + + +if __name__ == "__main__": + FileUtils.Configure(True) + unittest.main() diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py new file mode 100644 index 00000000..bde2a4d7 --- /dev/null +++ b/crosperf/experiment_file.py @@ -0,0 +1,179 @@ +#!/usr/bin/python + +# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import re +from settings import Settings +from settings_factory import SettingsFactory + + +class ExperimentFile(object): + """Class for parsing the experiment file format. + + The grammar for this format is: + + experiment = { _FIELD_VALUE_RE | settings } + settings = _OPEN_SETTINGS_RE + { _FIELD_VALUE_RE } + _CLOSE_SETTINGS_RE + + Where the regexes are terminals defined below. This results in an format + which looks something like: + + field_name: value + settings_type: settings_name { + field_name: value + field_name: value + } + """ + + # Field regex, e.g. "iterations: 3" + _FIELD_VALUE_RE = re.compile("(\+)?\s*(\w+?)(?:\.(\S+))?\s*:\s*(.*)") + # Open settings regex, e.g. "label {" + _OPEN_SETTINGS_RE = re.compile("(?:(\w+):)?\s*(\w+)\s*{") + # Close settings regex. + _CLOSE_SETTINGS_RE = re.compile("}") + + def __init__(self, experiment_file, overrides=None): + """Construct object from file-like experiment_file. + + Args: + experiment_file: file-like object with text description of experiment. + overrides: A settings object that will override fields in other settings. + + Raises: + Exception: if invalid build type or description is invalid. + """ + self.all_settings = [] + self.global_settings = SettingsFactory().GetSettings("global", "global") + self.all_settings.append(self.global_settings) + + self._Parse(experiment_file) + + for settings in self.all_settings: + settings.Inherit() + settings.Validate() + if overrides: + settings.Override(overrides) + + def GetSettings(self, settings_type): + """Return nested fields from the experiment file.""" + res = [] + for settings in self.all_settings: + if settings.settings_type == settings_type: + res.append(settings) + return res + + def GetGlobalSettings(self): + """Return the global fields from the experiment file.""" + return self.global_settings + + def _ParseField(self, reader): + """Parse a key/value field.""" + line = reader.CurrentLine().strip() + match = ExperimentFile._FIELD_VALUE_RE.match(line) + append, name, _, text_value = match.groups() + return (name, text_value, append) + + def _ParseSettings(self, reader): + """Parse a settings block.""" + line = reader.CurrentLine().strip() + match = ExperimentFile._OPEN_SETTINGS_RE.match(line) + settings_type = match.group(1) + if settings_type is None: + settings_type = "" + settings_name = match.group(2) + settings = SettingsFactory().GetSettings(settings_name, settings_type) + settings.SetParentSettings(self.global_settings) + + while reader.NextLine(): + line = reader.CurrentLine().strip() + + if not line: + continue + elif ExperimentFile._FIELD_VALUE_RE.match(line): + field = self._ParseField(reader) + settings.SetField(field[0], field[1], field[2]) + elif ExperimentFile._CLOSE_SETTINGS_RE.match(line): + return settings + + raise Exception("Unexpected EOF while parsing settings block.") + + def _Parse(self, experiment_file): + """Parse experiment file and create settings.""" + reader = ExperimentFileReader(experiment_file) + settings_names = {} + try: + while reader.NextLine(): + line = reader.CurrentLine().strip() + + if not line: + continue + elif ExperimentFile._OPEN_SETTINGS_RE.match(line): + new_settings = self._ParseSettings(reader) + if new_settings.name in settings_names: + raise Exception("Duplicate settings name: '%s'." % + new_settings.name) + settings_names[new_settings.name] = True + self.all_settings.append(new_settings) + elif ExperimentFile._FIELD_VALUE_RE.match(line): + field = self._ParseField(reader) + self.global_settings.SetField(field[0], field[1], field[2]) + else: + raise Exception("Unexpected line.") + except Exception, err: + raise Exception("Line %d: %s\n==> %s" % (reader.LineNo(), str(err), + reader.CurrentLine(False))) + + def Canonicalize(self): + """Convert parsed experiment file back into an experiment file.""" + res = "" + for field_name in self.global_settings.fields: + field = self.global_settings.fields[field_name] + if field.assigned: + res += "%s: %s\n" % (field.name, field.GetString()) + res += "\n" + + for settings in self.all_settings: + if settings.settings_type != "global": + res += "%s: %s {\n" % (settings.settings_type, settings.name) + for field_name in settings.fields: + field = settings.fields[field_name] + if field.assigned: + res += "\t%s: %s\n" % (field.name, field.GetString()) + res += "}\n\n" + + return res + + +class ExperimentFileReader(object): + """Handle reading lines from an experiment file.""" + + def __init__(self, file_object): + self.file_object = file_object + self.current_line = None + self.current_line_no = 0 + + def CurrentLine(self, strip_comment=True): + """Return the next line from the file, without advancing the iterator.""" + if strip_comment: + return self._StripComment(self.current_line) + return self.current_line + + def NextLine(self, strip_comment=True): + """Advance the iterator and return the next line of the file.""" + self.current_line_no += 1 + self.current_line = self.file_object.readline() + return self.CurrentLine(strip_comment) + + def _StripComment(self, line): + """Strip comments starting with # from a line.""" + if "#" in line: + line = line[:line.find("#")] + line[-1] + return line + + def LineNo(self): + """Return the current line number.""" + return self.current_line_no diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py new file mode 100755 index 00000000..67da11e5 --- /dev/null +++ b/crosperf/experiment_file_unittest.py @@ -0,0 +1,105 @@ +#!/usr/bin/python + +# Copyright (c) 2011 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import StringIO +import unittest +from experiment_file import ExperimentFile + +EXPERIMENT_FILE_1 = """ + board: x86-alex + remote: chromeos-alex3 + + benchmark: PageCycler { + iterations: 3 + } + + image1 { + chromeos_image: /usr/local/google/cros_image1.bin + } + + image2 { + chromeos_image: /usr/local/google/cros_image2.bin + } + """ + +EXPERIMENT_FILE_2 = """ + board: x86-alex + remote: chromeos-alex3 + iterations: 3 + + benchmark: PageCycler { + } + + benchmark: AndroidBench { + iterations: 2 + } + + image1 { + chromeos_image:/usr/local/google/cros_image1.bin + } + + image2 { + chromeos_image: /usr/local/google/cros_image2.bin + } + """ + +EXPERIMENT_FILE_3 = """ + board: x86-alex + remote: chromeos-alex3 + iterations: 3 + + benchmark: PageCycler { + } + + image1 { + chromeos_image:/usr/local/google/cros_image1.bin + } + + image1 { + chromeos_image: /usr/local/google/cros_image2.bin + } + """ + + +class ExperimentFileTest(unittest.TestCase): + def testLoadExperimentFile1(self): + input_file = StringIO.StringIO(EXPERIMENT_FILE_1) + experiment_file = ExperimentFile(input_file) + global_settings = experiment_file.GetGlobalSettings() + self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"]) + + benchmark_settings = experiment_file.GetSettings("benchmark") + self.assertEqual(len(benchmark_settings), 1) + self.assertEqual(benchmark_settings[0].name, "PageCycler") + self.assertEqual(benchmark_settings[0].GetField("iterations"), 3) + + label_settings = experiment_file.GetSettings("label") + self.assertEqual(len(label_settings), 2) + self.assertEqual(label_settings[0].name, "image1") + self.assertEqual(label_settings[0].GetField("board"), "x86-alex") + self.assertEqual(label_settings[0].GetField("chromeos_image"), + "/usr/local/google/cros_image1.bin") + + def testOverrideSetting(self): + input_file = StringIO.StringIO(EXPERIMENT_FILE_2) + experiment_file = ExperimentFile(input_file) + global_settings = experiment_file.GetGlobalSettings() + self.assertEqual(global_settings.GetField("remote"), ["chromeos-alex3"]) + + benchmark_settings = experiment_file.GetSettings("benchmark") + self.assertEqual(len(benchmark_settings), 2) + self.assertEqual(benchmark_settings[0].name, "PageCycler") + self.assertEqual(benchmark_settings[0].GetField("iterations"), 3) + self.assertEqual(benchmark_settings[1].name, "AndroidBench") + self.assertEqual(benchmark_settings[1].GetField("iterations"), 2) + + def testDuplicateLabel(self): + input_file = StringIO.StringIO(EXPERIMENT_FILE_3) + self.assertRaises(Exception, ExperimentFile, input_file) + + +if __name__ == "__main__": + unittest.main() diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py new file mode 100644 index 00000000..3da1d63d --- /dev/null +++ b/crosperf/experiment_runner.py @@ -0,0 +1,126 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import getpass +import os +import time +from experiment_status import ExperimentStatus +from results_report import HTMLResultsReport +from results_report import TextResultsReport +from utils import logger +from utils.email_sender import EmailSender +from utils.file_utils import FileUtils + + +class ExperimentRunner(object): + STATUS_TIME_DELAY = 30 + THREAD_MONITOR_DELAY = 2 + + def __init__(self, experiment): + self._experiment = experiment + self.l = logger.GetLogger() + self._terminated = False + + def _Run(self, experiment): + status = ExperimentStatus(experiment) + experiment.Run() + last_status_time = 0 + try: + while not experiment.IsComplete(): + if last_status_time + self.STATUS_TIME_DELAY < time.time(): + last_status_time = time.time() + border = "==============================" + self.l.LogOutput(border) + self.l.LogOutput(status.GetProgressString()) + self.l.LogOutput(status.GetStatusString()) + logger.GetLogger().LogOutput(border) + time.sleep(self.THREAD_MONITOR_DELAY) + except KeyboardInterrupt: + self._terminated = True + self.l.LogError("Ctrl-c pressed. Cleaning up...") + experiment.Terminate() + + def _PrintTable(self, experiment): + self.l.LogOutput(TextResultsReport(experiment).GetReport()) + + def _Email(self, experiment): + # Only email by default if a new run was completed. + send_mail = False + for benchmark_run in experiment.benchmark_runs: + if not benchmark_run.cache_hit: + send_mail = True + break + if not send_mail: + return + + label_names = [] + for label in experiment.labels: + label_names.append(label.name) + subject = "%s: %s" % (experiment.name, " vs. ".join(label_names)) + + text_report = TextResultsReport(experiment).GetReport() + text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report + html_report = HTMLResultsReport(experiment).GetReport() + attachment = EmailSender.Attachment("report.html", html_report) + EmailSender().SendEmail([getpass.getuser()], + subject, + text_report, + attachments=[attachment], + msg_type="html") + + def _StoreResults (self, experiment): + if self._terminated: + return + results_directory = experiment.results_directory + FileUtils().RmDir(results_directory) + FileUtils().MkDirP(results_directory) + self.l.LogOutput("Storing experiment file.") + experiment_file_path = os.path.join(results_directory, + "experiment.exp") + FileUtils().WriteFile(experiment_file_path, experiment.experiment_file) + + self.l.LogOutput("Storing results report.") + results_table_path = os.path.join(results_directory, "results.html") + report = HTMLResultsReport(experiment).GetReport() + FileUtils().WriteFile(results_table_path, report) + + self.l.LogOutput("Storing results of each benchmark run.") + for benchmark_run in experiment.benchmark_runs: + benchmark_run_name = filter(str.isalnum, benchmark_run.name) + try: + if benchmark_run.perf_results: + benchmark_run_path = os.path.join(results_directory, + benchmark_run_name) + FileUtils().MkDirP(benchmark_run_path) + FileUtils().WriteFile(os.path.join(benchmark_run_path, "perf.report"), + benchmark_run.perf_results.report) + FileUtils().WriteFile(os.path.join(benchmark_run_path, "perf.out"), + benchmark_run.perf_results.output) + except Exception, e: + self.l.LogError(e) + + def Run(self): + self._Run(self._experiment) + self._PrintTable(self._experiment) + if not self._terminated: + self._StoreResults(self._experiment) + self._Email(self._experiment) + + +class MockExperimentRunner(ExperimentRunner): + def __init__(self, experiment): + super(MockExperimentRunner, self).__init__(experiment) + + def _Run(self, experiment): + self.l.LogOutput("Would run the following experiment: '%s'." % + experiment.name) + + def _PrintTable(self, experiment): + self.l.LogOutput("Would print the experiment table.") + + def _Email(self, experiment): + self.l.LogOutput("Would send result email.") + + def _StoreResults(self, experiment): + self.l.LogOutput("Would store the results.") diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py new file mode 100644 index 00000000..ddf3f54a --- /dev/null +++ b/crosperf/experiment_status.py @@ -0,0 +1,69 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import datetime +import time + + +class ExperimentStatus(object): + def __init__(self, experiment): + self.experiment = experiment + self.num_total = len(self.experiment.benchmark_runs) + + def _GetProgressBar(self, num_complete, num_total): + ret = "Done: %s%%" % int(100.0 * num_complete / num_total) + bar_length = 50 + done_char = ">" + undone_char = " " + num_complete_chars = bar_length * num_complete / num_total + num_undone_chars = bar_length - num_complete_chars + ret += " [%s%s]" % (num_complete_chars * done_char, num_undone_chars * + undone_char) + return ret + + def GetProgressString(self): + current_time = time.time() + if self.experiment.start_time: + elapsed_time = current_time - self.experiment.start_time + else: + elapsed_time = 0 + try: + eta_seconds = (float(self.num_total - self.experiment.num_complete) * + elapsed_time / self.experiment.num_complete) + eta_seconds = int(eta_seconds) + eta = datetime.timedelta(seconds=eta_seconds) + except ZeroDivisionError: + eta = "Unknown" + strings = [] + strings.append("Current time: %s Elapsed: %s ETA: %s" % + (datetime.datetime.now(), + datetime.timedelta(seconds=int(elapsed_time)), + eta)) + strings.append(self._GetProgressBar(self.experiment.num_complete, + self.num_total)) + return "\n".join(strings) + + def GetStatusString(self): + status_bins = {} + for benchmark_run in self.experiment.benchmark_runs: + if benchmark_run.status not in status_bins: + status_bins[benchmark_run.status] = [] + status_bins[benchmark_run.status].append(benchmark_run) + + status_strings = [] + for key, val in status_bins.items(): + status_strings.append("%s: %s" % + (key, self._GetNamesAndIterations(val))) + result = "Thread Status:\n%s" % "\n".join(status_strings) + + # Add the machine manager status. + result += "\n" + self.experiment.machine_manager.AsString() + "\n" + + return result + + def _GetNamesAndIterations(self, benchmark_runs): + strings = [] + for benchmark_run in benchmark_runs: + strings.append("'%s'" % benchmark_run.name) + return " %s (%s)" % (len(strings), ", ".join(strings)) diff --git a/crosperf/field.py b/crosperf/field.py new file mode 100644 index 00000000..b3cdaa23 --- /dev/null +++ b/crosperf/field.py @@ -0,0 +1,108 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + + +class Field(object): + """Class representing a Field in an experiment file.""" + + def __init__(self, name, required, default, inheritable, description): + self.name = name + self.required = required + self.assigned = False + self.default = default + self._value = default + self.inheritable = inheritable + self.description = description + + def Set(self, value, parse=True): + if parse: + self._value = self._Parse(value) + else: + self._value = value + self.assigned = True + + def Append(self, value): + self._value += self._Parse(value) + self.assigned = True + + def _Parse(self, value): + return value + + def Get(self): + return self._value + + def GetString(self): + return str(self._value) + + +class TextField(Field): + def __init__(self, name, required=False, default="", inheritable=False, + description=""): + super(TextField, self).__init__(name, required, default, inheritable, + description) + + def _Parse(self, value): + return str(value) + + +class BooleanField(Field): + def __init__(self, name, required=False, default=False, inheritable=False, + description=""): + super(BooleanField, self).__init__(name, required, default, inheritable, + description) + + def _Parse(self, value): + if value.lower() == "true": + return True + elif value.lower() == "false": + return False + raise Exception("Invalid value for '%s'. Must be true or false." % + self.name) + + +class IntegerField(Field): + def __init__(self, name, required=False, default=0, inheritable=False, + description=""): + super(IntegerField, self).__init__(name, required, default, inheritable, + description) + + def _Parse(self, value): + return int(value) + + +class FloatField(Field): + def __init__(self, name, required=False, default=0, inheritable=False, + description=""): + super(FloatField, self).__init__(name, required, default, inheritable, + description) + + def _Parse(self, value): + return float(value) + + +class ListField(Field): + def __init__(self, name, required=False, default=[], inheritable=False, + description=""): + super(ListField, self).__init__(name, required, default, inheritable, + description) + + def _Parse(self, value): + return value.split() + + def GetString(self): + return " ".join(self._value) + + +class EnumField(Field): + def __init__(self, name, options, required=False, default="", + inheritable=False, description=""): + super(EnumField, self).__init__(name, required, default, inheritable, + description) + self.options = options + + def _Parse(self, value): + if value not in self.options: + raise Exception("Invalid enum value for field '%s'. Must be one of (%s)" + % (self.name, ", ".join(self.options))) + return str(value) diff --git a/crosperf/help.py b/crosperf/help.py new file mode 100644 index 00000000..cf74d93e --- /dev/null +++ b/crosperf/help.py @@ -0,0 +1,106 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import sys +import textwrap +from settings_factory import BenchmarkSettings +from settings_factory import GlobalSettings +from settings_factory import LabelSettings + + +class Help(object): + def GetUsage(self): + return """%s [OPTIONS] [ACTION] EXPERIMENT_FILE""" % (sys.argv[0]) + + def _WrapLine(self, line): + return "\n".join(textwrap.wrap(line, 80)) + + def _GetFieldDescriptions(self, fields): + res = "" + for field_name in fields: + field = fields[field_name] + res += "Field:\t\t%s\n" % field.name + res += self._WrapLine("Description:\t%s" % field.description) + "\n" + res += "Type:\t\t%s\n" % type(field).__name__.replace("Field", "") + res += "Required:\t%s\n" % field.required + if field.default: + res += "Default:\t%s\n" % field.default + res += "\n" + return res + + def GetHelp(self): + global_fields = self._GetFieldDescriptions(GlobalSettings("").fields) + benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings("").fields) + label_fields = self._GetFieldDescriptions(LabelSettings("").fields) + + return """%s is a script for running performance experiments on ChromeOS. It +allows one to run ChromeOS Autotest benchmarks over several images and compare +the results to determine whether there is a performance difference. + +Comparing several images using %s is referred to as running an +"experiment". An "experiment file" is a configuration file which holds all the +information that describes the experiment and how it should be run. An example +of a simple experiment file is below: + +--------------------------------- test.exp --------------------------------- +name: my_experiment +board: x86-alex +remote: chromeos-alex5 172.18.122.132 + +benchmark: PageCycler { + iterations: 3 +} + +my_first_image { + chromeos_image: /usr/local/chromeos-1/chromiumos_image.bin +} + +my_second_image { + chromeos_image: /usr/local/chromeos-2/chromiumos_image.bin +} +---------------------------------------------------------------------------- + +This experiment file names the experiment "my_experiment". It will be run +on the board x86-alex. Benchmarks will be run using two remote devices, +one is a device specified by a hostname and the other is a device specified +by it's IP address. Benchmarks will be run in parallel across these devices. +There is currently no way to specify which benchmark will run on each device. + +We define one "benchmark" that will be run, PageCycler. This benchmark has one +"field" which specifies how many iterations it will run for. + +We specify 2 "labels" or images which will be compared. The PageCycler benchmark +will be run on each of these images 3 times and a result table will be output +which compares the two. + +The full list of fields that can be specified are as follows: +================= +Global Fields +================= +%s +================= +Benchmark Fields +================= +%s +================= +Label Fields +================= +%s + +Note that global fields are overidden by label or benchmark fields, if they can +be specified in both places. Fields that are specified as arguments override +fields specified in experiment files. + +%s is invoked by passing it a path to an experiment file, as well as an action +to execute on that experiment file. The possible actions to use are: + +run\t\tRun the experiment and cache the results. + +table\t\tDisplay cached results of an experiment, without running anything. + +email\t\tEmail a summary of the results to the user. + +do\t\tThe default action. Executes the following actions: run, table, email. +""" % (sys.argv[0], sys.argv[0], global_fields, + benchmark_fields, label_fields, sys.argv[0]) diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py new file mode 100644 index 00000000..f75dc944 --- /dev/null +++ b/crosperf/image_checksummer.py @@ -0,0 +1,49 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import threading +from utils import logger +from utils.file_utils import FileUtils + + +class ImageChecksummer(object): + class PerImageChecksummer(object): + def __init__(self, filename): + self._lock = threading.Lock() + self.filename = filename + self._checksum = None + + def Checksum(self): + with self._lock: + if not self._checksum: + logger.GetLogger().LogOutput("Computing checksum for '%s'." % + self.filename) + self._checksum = FileUtils().Md5File(self.filename) + logger.GetLogger().LogOutput("Checksum is: %s" % self._checksum) + return self._checksum + + _instance = None + _lock = threading.Lock() + _per_image_checksummers = {} + + def __new__(cls, *args, **kwargs): + with cls._lock: + if not cls._instance: + cls._instance = super(ImageChecksummer, cls).__new__(cls, + *args, **kwargs) + return cls._instance + + def Checksum(self, filename): + with self._lock: + if filename not in self._per_image_checksummers: + self._per_image_checksummers[filename] = (ImageChecksummer. + PerImageChecksummer(filename)) + checksummer = self._per_image_checksummers[filename] + + try: + return checksummer.Checksum() + except Exception, e: + logger.GetLogger().LogError("Could not compute checksum of file '%s'." + % filename) + raise e diff --git a/crosperf/label.py b/crosperf/label.py new file mode 100644 index 00000000..3b6fb804 --- /dev/null +++ b/crosperf/label.py @@ -0,0 +1,26 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +from utils.file_utils import FileUtils + + +class Label(object): + def __init__(self, name, chromeos_image, chromeos_root, board): + self.name = name + self.chromeos_image = chromeos_image + self.board = board + + if not chromeos_root: + chromeos_root = FileUtils().ChromeOSRootFromImage(chromeos_image) + if not chromeos_root: + raise Exception("No ChromeOS root given for label '%s' and could not " + "determine one from image path: '%s'." % + (name, chromeos_image)) + else: + chromeos_root = FileUtils().CanonicalizeChromeOSRoot(chromeos_root) + if not chromeos_root: + raise Exception("Invalid ChromeOS root given for label '%s': '%s'." + % (name, chromeos_root)) + + self.chromeos_root = chromeos_root diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py new file mode 100644 index 00000000..5beaf3bd --- /dev/null +++ b/crosperf/machine_manager.py @@ -0,0 +1,219 @@ +import sys +import threading +import time +from image_checksummer import ImageChecksummer +import image_chromeos +import lock_machine +from utils import command_executer +from utils import logger + +CHECKSUM_FILE = "/usr/local/osimage_checksum_file" + + +class CrosMachine(object): + def __init__(self, name): + self.name = name + self.image = None + self.checksum = None + self.locked = False + self.released_time = time.time() + self.autotest_run = None + + def __str__(self): + l = [] + l.append(self.name) + l.append(str(self.image)) + l.append(str(self.checksum)) + l.append(str(self.locked)) + l.append(str(self.released_time)) + return ", ".join(l) + + +class MachineManager(object): + def __init__(self, chromeos_root): + self._lock = threading.RLock() + self._all_machines = [] + self._machines = [] + self.image_lock = threading.Lock() + self.num_reimages = 0 + self.chromeos_root = None + self.no_lock = False + self.initialized = False + self.chromeos_root = chromeos_root + + def ImageMachine(self, machine, chromeos_image, board=None): + checksum = ImageChecksummer().Checksum(chromeos_image) + if machine.checksum == checksum: + return + image_args = [image_chromeos.__file__, + "--chromeos_root=%s" % self.chromeos_root, + "--image=%s" % chromeos_image, + "--remote=%s" % machine.name] + if board: + image_args.append("--board=%s" % board) + + # Currently can't image two machines at once. + # So have to serialized on this lock. + ce = command_executer.GetCommandExecuter() + with self.image_lock: + retval = ce.RunCommand(" ".join(["python"] + image_args)) + self.num_reimages += 1 + if retval: + raise Exception("Could not image machine: '%s'." % machine.name) + machine.checksum = checksum + machine.image = chromeos_image + + return retval + + def _TryToLockMachine(self, cros_machine): + with self._lock: + assert cros_machine, "Machine can't be None" + for m in self._machines: + assert m.name != cros_machine.name, ( + "Tried to double-lock %s" % cros_machine.name) + if self.no_lock: + locked = True + else: + locked = lock_machine.Machine(cros_machine.name).Lock(True, sys.argv[0]) + if locked: + self._machines.append(cros_machine) + ce = command_executer.GetCommandExecuter() + command = "cat %s" % CHECKSUM_FILE + ret, out, _ = ce.CrosRunCommand( + command, return_output=True, chromeos_root=self.chromeos_root, + machine=cros_machine.name) + if ret == 0: + cros_machine.checksum = out.strip() + else: + logger.GetLogger().LogOutput("Couldn't lock: %s" % cros_machine.name) + + # This is called from single threaded mode. + def AddMachine(self, machine_name): + with self._lock: + for m in self._all_machines: + assert m.name != machine_name, "Tried to double-add %s" % machine_name + self._all_machines.append(CrosMachine(machine_name)) + + def AcquireMachine(self, chromeos_image): + image_checksum = ImageChecksummer().Checksum(chromeos_image) + with self._lock: + # Lazily external lock machines + if not self.initialized: + for m in self._all_machines: + self._TryToLockMachine(m) + self.initialized = True + for m in self._all_machines: + m.released_time = time.time() + + if not self._machines: + machine_names = [] + for machine in self._all_machines: + machine_names.append(machine.name) + raise Exception("Could not acquire any of the following machines: '%s'" + % ", ".join(machine_names)) + +### for m in self._machines: +### if (m.locked and time.time() - m.released_time < 10 and +### m.checksum == image_checksum): +### return None + for m in [machine for machine in self._machines if not machine.locked]: + if m.checksum == image_checksum: + m.locked = True + m.autotest_run = threading.current_thread() + return m + for m in [machine for machine in self._machines if not machine.locked]: + if not m.checksum: + m.locked = True + m.autotest_run = threading.current_thread() + return m + # This logic ensures that threads waiting on a machine will get a machine + # with a checksum equal to their image over other threads. This saves time + # when crosperf initially assigns the machines to threads by minimizing + # the number of re-images. + # TODO(asharif): If we centralize the thread-scheduler, we wont need this + # code and can implement minimal reimaging code more cleanly. + for m in [machine for machine in self._machines if not machine.locked]: + if time.time() - m.released_time > 20: + m.locked = True + m.autotest_run = threading.current_thread() + return m + return None + + def GetMachines(self): + return self._all_machines + + def ReleaseMachine(self, machine): + with self._lock: + for m in self._machines: + if machine.name == m.name: + assert m.locked == True, "Tried to double-release %s" % m.name + m.released_time = time.time() + m.locked = False + m.status = "Available" + break + + def Cleanup(self): + with self._lock: + # Unlock all machines. + for m in self._machines: + if not self.no_lock: + res = lock_machine.Machine(m.name).Unlock(True) + if not res: + logger.GetLogger().LogError("Could not unlock machine: '%s'." + % m.name) + + def __str__(self): + with self._lock: + l = ["MachineManager Status:"] + for m in self._machines: + l.append(str(m)) + return "\n".join(l) + + def AsString(self): + with self._lock: + stringify_fmt = "%-30s %-10s %-4s %-25s %-32s" + header = stringify_fmt % ("Machine", "Thread", "Lock", "Status", + "Checksum") + table = [header] + for m in self._machines: + if m.autotest_run: + autotest_name = m.autotest_run.name + autotest_status = m.autotest_run.status + else: + autotest_name = "" + autotest_status = "" + + try: + machine_string = stringify_fmt % (m.name, + autotest_name, + m.locked, + autotest_status, + m.checksum) + except Exception: + machine_string = "" + table.append(machine_string) + return "Machine Status:\n%s" % "\n".join(table) + + +class MockMachineManager(object): + def __init__(self): + self.machines = [] + + def ImageMachine(self, machine_name, chromeos_image, board=None): + return 0 + + def AddMachine(self, machine_name): + self.machines.append(CrosMachine(machine_name)) + + def AcquireMachine(self, chromeos_image): + for machine in self.machines: + if not machine.locked: + machine.locked = True + return machine + return None + + def ReleaseMachine(self, machine): + machine.locked = False + + def GetMachines(self): + return self.machines diff --git a/crosperf/perf_processor.py b/crosperf/perf_processor.py new file mode 100644 index 00000000..031f35d2 --- /dev/null +++ b/crosperf/perf_processor.py @@ -0,0 +1,64 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import os +import re +from utils import command_executer +from utils import utils + + +class PerfProcessor(object): + class PerfResults(object): + def __init__(self, report, output): + self.report = report + self.output = output + + def __init__(self, logger_to_use=None): + self._logger = logger_to_use + self._ce = command_executer.GetCommandExecuter(self._logger) + + def GeneratePerfResults(self, results_dir, chromeos_root, board): + perf_location = os.path.join(results_dir, + os.path.basename(results_dir), + "profiling/iteration.1") + host_perf_location = os.path.join(chromeos_root, "chroot", + perf_location.lstrip("/")) + report = self._GeneratePerfReport(perf_location, chromeos_root, board) + output = self._ReadPerfOutput(host_perf_location) + return PerfProcessor.PerfResults(report, output) + + def ParseStatResults(self, results): + output = results.output + result = {} + p = re.compile("\s*([0-9.]+) +(\S+)") + for line in output.split("\n"): + match = p.match(line) + if match: + result[match.group(2)] = match.group(1) + return result + + def _ReadPerfOutput(self, perf_location): + perf_output_file = os.path.join(perf_location, "perf.out") + with open(perf_output_file, "rb") as f: + return f.read() + + def _GeneratePerfReport(self, perf_location, chromeos_root, board): + perf_data_file = os.path.join(perf_location, "perf.data") + # Attempt to build a perf report and keep it with the results. + command = ("/usr/sbin/perf report --symfs=/build/%s" + " -i %s --stdio | head -n1000" % (board, perf_data_file)) + _, out, _ = self._ce.ChrootRunCommand(chromeos_root, + command, return_output=True) + return out + + +class MockPerfProcessor(object): + def __init__(self): + pass + + def GeneratePerfReport(self, *args): + pass + + def ParseStatResults(self, *args): + return {} diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py new file mode 100644 index 00000000..89e1888b --- /dev/null +++ b/crosperf/results_cache.py @@ -0,0 +1,201 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import getpass +import glob +import hashlib +import os +import pickle +import re +from image_checksummer import ImageChecksummer +from perf_processor import PerfProcessor +from utils import command_executer +from utils import logger +from utils import utils + +SCRATCH_DIR = "/home/%s/cros_scratch" % getpass.getuser() +RESULTS_FILE = "results.txt" +AUTOTEST_TARBALL = "autotest.tbz2" +PERF_RESULTS_FILE = "perf-results.txt" + + +class Result(object): + def __init__(self, out, err, retval): + self.out = out + self.err = err + self.retval = retval + + +class CacheConditions(object): + # Cache hit only if the result file exists. + CACHE_FILE_EXISTS = 0 + + # Cache hit if the ip address of the cached result and the new run match. + REMOTES_MATCH = 1 + + # Cache hit if the image checksum of the cached result and the new run match. + CHECKSUMS_MATCH = 2 + + # Cache hit only if the cached result was successful + RUN_SUCCEEDED = 3 + + # Never a cache hit. + FALSE = 4 + + +class ResultsCache(object): + def Init(self, chromeos_image, chromeos_root, autotest_name, iteration, + autotest_args, remote, board, cache_conditions, + logger_to_use): + self.chromeos_image = chromeos_image + self.chromeos_root = chromeos_root + self.autotest_name = autotest_name + self.iteration = iteration + self.autotest_args = autotest_args, + self.remote = remote + self.board = board + self.cache_conditions = cache_conditions + self._logger = logger_to_use + self._ce = command_executer.GetCommandExecuter(self._logger) + + def _GetCacheDirForRead(self): + glob_path = self._FormCacheDir(self._GetCacheKeyList(True)) + matching_dirs = glob.glob(glob_path) + + if matching_dirs: + # Cache file found. + if len(matching_dirs) > 1: + self._logger.LogError("Multiple compatible cache files: %s." % + " ".join(matching_dirs)) + return matching_dirs[0] + else: + return None + + def _GetCacheDirForWrite(self): + return self._FormCacheDir(self._GetCacheKeyList(False)) + + def _FormCacheDir(self, list_of_strings): + cache_key = " ".join(list_of_strings) + cache_dir = self._ConvertToFilename(cache_key) + cache_path = os.path.join(SCRATCH_DIR, cache_dir) + return cache_path + + def _GetCacheKeyList(self, read): + if read and CacheConditions.REMOTES_MATCH not in self.cache_conditions: + remote = "*" + else: + remote = self.remote + if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions: + checksum = "*" + else: + checksum = ImageChecksummer().Checksum(self.chromeos_image) + return (hashlib.md5(self.chromeos_image).hexdigest(), + self.autotest_name, str(self.iteration), + ",".join(self.autotest_args), + checksum, remote) + + def ReadResult(self): + if CacheConditions.FALSE in self.cache_conditions: + return None + cache_dir = self._GetCacheDirForRead() + + if not cache_dir: + return None + + try: + cache_file = os.path.join(cache_dir, RESULTS_FILE) + + self._logger.LogOutput("Trying to read from cache file: %s" % cache_file) + + with open(cache_file, "rb") as f: + retval = pickle.load(f) + out = pickle.load(f) + err = pickle.load(f) + + if (retval == 0 or + CacheConditions.RUN_SUCCEEDED not in self.cache_conditions): + return Result(out, err, retval) + + except Exception, e: + if CacheConditions.CACHE_FILE_EXISTS not in self.cache_conditions: + # Cache file not found but just return a failure. + return Result("", "", 1) + raise e + + def StoreResult(self, result): + cache_dir = self._GetCacheDirForWrite() + cache_file = os.path.join(cache_dir, RESULTS_FILE) + command = "mkdir -p %s" % cache_dir + ret = self._ce.RunCommand(command) + assert ret == 0, "Couldn't create cache dir" + with open(cache_file, "wb") as f: + pickle.dump(result.retval, f) + pickle.dump(result.out, f) + pickle.dump(result.err, f) + + def StoreAutotestOutput(self, results_dir): + host_results_dir = os.path.join(self.chromeos_root, "chroot", + results_dir[1:]) + tarball = os.path.join(self._GetCacheDirForWrite(), AUTOTEST_TARBALL) + command = ("cd %s && tar cjf %s ." % (host_results_dir, tarball)) + ret = self._ce.RunCommand(command) + if ret: + raise Exception("Couldn't store autotest output directory.") + + def ReadAutotestOutput(self, destination): + cache_dir = self._GetCacheDirForRead() + tarball = os.path.join(cache_dir, AUTOTEST_TARBALL) + if not os.path.exists(tarball): + raise Exception("Cached autotest tarball does not exist at '%s'." % + tarball) + command = ("cd %s && tar xjf %s ." % (destination, tarball)) + ret = self._ce.RunCommand(command) + if ret: + raise Exception("Couldn't read autotest output directory.") + + def StorePerfResults(self, perf): + perf_path = os.path.join(self._GetCacheDirForWrite(), PERF_RESULTS_FILE) + with open(perf_path, "wb") as f: + pickle.dump(perf.report, f) + pickle.dump(perf.output, f) + + def ReadPerfResults(self): + cache_dir = self._GetCacheDirForRead() + perf_path = os.path.join(cache_dir, PERF_RESULTS_FILE) + with open(perf_path, "rb") as f: + report = pickle.load(f) + output = pickle.load(f) + + return PerfProcessor.PerfResults(report, output) + + def _ConvertToFilename(self, text): + ret = text + ret = re.sub("/", "__", ret) + ret = re.sub(" ", "_", ret) + ret = re.sub("=", "", ret) + ret = re.sub("\"", "", ret) + return ret + + +class MockResultsCache(object): + def Init(self, *args): + pass + + def ReadResult(self): + return Result("Results placed in /tmp/test", "", 0) + + def StoreResult(self, result): + pass + + def StoreAutotestOutput(self, results_dir): + pass + + def ReadAutotestOutput(self, destination): + pass + + def StorePerfResults(self, perf): + pass + + def ReadPerfResults(self): + return PerfProcessor.PerfResults("", "") diff --git a/crosperf/results_columns.py b/crosperf/results_columns.py new file mode 100644 index 00000000..09e97d0b --- /dev/null +++ b/crosperf/results_columns.py @@ -0,0 +1,152 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import math + + +class Column(object): + def __init__(self, name): + self.name = name + + def _ContainsString(self, results): + for result in results: + if isinstance(result, str): + return True + return False + + def _StripNone(self, results): + res = [] + for result in results: + if result is not None: + res.append(result) + return res + + +class MinColumn(Column): + def Compute(self, results, baseline_results): + if self._ContainsString(results): + return "-" + results = self._StripNone(results) + if not results: + return "-" + return min(results) + + +class MaxColumn(Column): + def Compute(self, results, baseline_results): + if self._ContainsString(results): + return "-" + results = self._StripNone(results) + if not results: + return "-" + return max(results) + + +class MeanColumn(Column): + def Compute(self, results, baseline_results): + all_pass = True + all_fail = True + if self._ContainsString(results): + for result in results: + if result != "PASSED": + all_pass = False + if result != "FAILED": + all_fail = False + + if all_pass: + return "ALL PASS" + elif all_fail: + return "ALL FAIL" + else: + return "-" + + results = self._StripNone(results) + if not results: + return "-" + return float(sum(results)) / len(results) + + +class StandardDeviationColumn(Column): + def __init__(self, name): + super(StandardDeviationColumn, self).__init__(name) + + def Compute(self, results, baseline_results): + if self._ContainsString(results): + return "-" + + results = self._StripNone(results) + if not results: + return "-" + n = len(results) + average = sum(results) / n + total = 0 + for result in results: + total += (result - average) ** 2 + + return math.sqrt(total / n) + + +class RatioColumn(Column): + def __init__(self, name): + super(RatioColumn, self).__init__(name) + + def Compute(self, results, baseline_results): + if self._ContainsString(results) or self._ContainsString(baseline_results): + return "-" + + results = self._StripNone(results) + baseline_results = self._StripNone(baseline_results) + if not results or not baseline_results: + return "-" + result_mean = sum(results) / len(results) + baseline_mean = sum(baseline_results) / len(baseline_results) + + if not baseline_mean: + return "-" + + return result_mean / baseline_mean + + +class DeltaColumn(Column): + def __init__(self, name): + super(DeltaColumn, self).__init__(name) + + def Compute(self, results, baseline_results): + if self._ContainsString(results) or self._ContainsString(baseline_results): + return "-" + + results = self._StripNone(results) + baseline_results = self._StripNone(baseline_results) + if not results or not baseline_results: + return "-" + result_mean = sum(results) / len(results) + baseline_mean = sum(baseline_results) / len(baseline_results) + + if not baseline_mean: + return "-" + + res = 100 * (result_mean - baseline_mean) / baseline_mean + return res + + +class IterationsCompleteColumn(Column): + def __init__(self, name): + super(IterationsCompleteColumn, self).__init__(name) + + def Compute(self, results, baseline_results): + return len(self._StripNone(results)) + + +class IterationColumn(Column): + def __init__(self, name, iteration): + super(IterationColumn, self).__init__(name) + self.iteration = iteration + + def Compute(self, results, baseline_results): + if self.iteration > len(results): + return "" + res = results[self.iteration - 1] + if not res: + return "-" + return res diff --git a/crosperf/results_report.py b/crosperf/results_report.py new file mode 100644 index 00000000..ec6d7df0 --- /dev/null +++ b/crosperf/results_report.py @@ -0,0 +1,357 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +from column_chart import ColumnChart +from results_columns import IterationColumn +from results_columns import IterationsCompleteColumn +from results_columns import MaxColumn +from results_columns import MeanColumn +from results_columns import MinColumn +from results_columns import RatioColumn +from results_columns import StandardDeviationColumn +from results_sorter import ResultSorter +from table import Table + + +class ResultsReport(object): + DELTA_COLUMN_NAME = "Change" + + def __init__(self, experiment): + self.experiment = experiment + self.benchmark_runs = experiment.benchmark_runs + self.labels = experiment.labels + self.benchmarks = experiment.benchmarks + self.baseline = self.labels[0] + + def _SortByLabel(self, runs): + labels = {} + for benchmark_run in runs: + if benchmark_run.label_name not in labels: + labels[benchmark_run.label_name] = [] + labels[benchmark_run.label_name].append(benchmark_run) + return labels + + def GetFullTable(self): + full_columns = [] + max_iterations = 0 + for benchmark in self.benchmarks: + if benchmark.iterations > max_iterations: + max_iterations = benchmark.iterations + + for i in range(1, max_iterations + 1): + full_columns.append(IterationColumn(str(i), i)) + + full_columns.append(IterationsCompleteColumn("Completed")) + full_columns.append(MinColumn("Min")) + full_columns.append(MaxColumn("Max")) + full_columns.append(MeanColumn("Avg")) + full_columns.append(StandardDeviationColumn("Std Dev")) + full_columns.append(RatioColumn(self.DELTA_COLUMN_NAME)) + return self._GetTable(self.labels, self.benchmarks, self.benchmark_runs, + full_columns) + + def GetSummaryTable(self): + summary_columns = [MeanColumn("Average"), + RatioColumn(self.DELTA_COLUMN_NAME)] + return self._GetTable(self.labels, self.benchmarks, self.benchmark_runs, + summary_columns) + + def _GetTable(self, labels, benchmarks, benchmark_runs, columns): + table = Table("box-table-a") + label_headings = [Table.Cell("", hidden=True, colspan=2, header=True)] + for label in labels: + colspan = len(columns) + if label.name == self.baseline.name: + colspan -= 1 + label_headings.append(Table.Cell(label.name, colspan=colspan, + header=True)) + + table.AddRow(label_headings) + + column_headings = [Table.Cell("Autotest Key", header=True), + Table.Cell("Iterations", header=True)] + for label in labels: + for column in columns: + if (label.name == self.baseline.name and + column.name == self.DELTA_COLUMN_NAME): + continue + column_headings.append(Table.Cell(column.name, header=True)) + + table.AddRow(column_headings) + + sorter = ResultSorter(benchmark_runs) + + for benchmark in benchmarks: + table.AddRow([Table.Cell(benchmark.name)]) + autotest_keys = sorter.GetAutotestKeys(benchmark.name) + for autotest_key in autotest_keys: + row = [Table.Cell(autotest_key), + Table.Cell(benchmark.iterations)] + for label in labels: + for column in columns: + if (label.name == self.baseline.name and + column.name == self.DELTA_COLUMN_NAME): + continue + results = sorter.GetResults(benchmark.name, + autotest_key, label.name) + baseline_results = sorter.GetResults(benchmark.name, + autotest_key, + self.baseline.name) + value = column.Compute(results, baseline_results) + if isinstance(value, float): + value_string = "%.2f" % value + else: + value_string = value + + row.append(Table.Cell(value_string)) + + table.AddRow(row) + + return table + + +class TextResultsReport(ResultsReport): + TEXT = """ +=========================================== +Results report for: '%s' +=========================================== + +------------------------------------------- +Benchmark Run Status +------------------------------------------- +%s + +Number re-images: %s + +------------------------------------------- +Summary +------------------------------------------- +%s + +------------------------------------------- +Full Table +------------------------------------------- +%s + +------------------------------------------- +Experiment File +------------------------------------------- +%s +=========================================== +""" + + def __init__(self, experiment): + super(TextResultsReport, self).__init__(experiment) + + def GetStatusTable(self): + status_table = Table("status") + for benchmark_run in self.benchmark_runs: + status_table.AddRow([Table.Cell(benchmark_run.name), + Table.Cell(benchmark_run.status), + Table.Cell(benchmark_run.failure_reason)]) + return status_table + + def GetReport(self): + return self.TEXT % (self.experiment.name, + self.GetStatusTable().ToText(), + self.experiment.machine_manager.num_reimages, + self.GetSummaryTable().ToText(30), + self.GetFullTable().ToText(30), + self.experiment.experiment_file) + + +class HTMLResultsReport(ResultsReport): + HTML = """ +<html> + <head> + <style type="text/css"> + +body { + font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif; + font-size: 12px; +} + +pre { + margin: 10px; + color: #039; + font-size: 14px; +} + +.chart { + display: inline; +} + +.hidden { + visibility: hidden; +} + +.results-section { + border: 1px solid #b9c9fe; + margin: 10px; +} + +.results-section-title { + background-color: #b9c9fe; + color: #039; + padding: 7px; + font-size: 14px; + width: 200px; +} + +.results-section-content { + margin: 10px; + padding: 10px; + overflow:auto; +} + +#box-table-a { + font-size: 12px; + width: 480px; + text-align: left; + border-collapse: collapse; +} + +#box-table-a th { + padding: 6px; + background: #b9c9fe; + border-right: 1px solid #fff; + border-bottom: 1px solid #fff; + color: #039; + text-align: center; +} + +#box-table-a td { + padding: 4px; + background: #e8edff; + border-bottom: 1px solid #fff; + border-right: 1px solid #fff; + color: #669; + border-top: 1px solid transparent; +} + +#box-table-a tr:hover td { + background: #d0dafd; + color: #339; +} + + </style> + <script type='text/javascript' src='https://www.google.com/jsapi'></script> + <script type='text/javascript'> + google.load('visualization', '1', {packages:['corechart']}); + google.setOnLoadCallback(init); + function init() { + switchTab('summary', 'html'); + switchTab('full', 'html'); + drawTable(); + } + function drawTable() { + %s + } + function switchTab(table, tab) { + document.getElementById(table + '-html').style.display = 'none'; + document.getElementById(table + '-text').style.display = 'none'; + document.getElementById(table + '-tsv').style.display = 'none'; + document.getElementById(table + '-' + tab).style.display = 'block'; + } + </script> + </head> + + <body> + <div class='results-section'> + <div class='results-section-title'>Summary Table</div> + <div class='results-section-content'> + <div id='summary-html'>%s</div> + <div id='summary-text'><pre>%s</pre></div> + <div id='summary-tsv'><pre>%s</pre></div> + </div> + %s + </div> + <div class='results-section'> + <div class='results-section-title'>Charts</div> + <div class='results-section-content'>%s</div> + </div> + <div class='results-section'> + <div class='results-section-title'>Full Table</div> + <div class='results-section-content'> + <div id='full-html'>%s</div> + <div id='full-text'><pre>%s</pre></div> + <div id='full-tsv'><pre>%s</pre></div> + </div> + %s + </div> + <div class='results-section'> + <div class='results-section-title'>Experiment File</div> + <div class='results-section-content'> + <pre>%s</pre> + </div> + </div> + </body> +</html> +""" + + def __init__(self, experiment): + super(HTMLResultsReport, self).__init__(experiment) + + def _GetTabMenuHTML(self, table): + return """ +<div class='tab-menu'> + <a href="javascript:switchTab('%s', 'html')">HTML</a> + <a href="javascript:switchTab('%s', 'text')">Text</a> + <a href="javascript:switchTab('%s', 'tsv')">TSV</a> +</div>""" % (table, table, table) + + def GetReport(self): + chart_javascript = "" + charts = self._GetCharts(self.labels, self.benchmarks, self.benchmark_runs) + for chart in charts: + chart_javascript += chart.GetJavascript() + chart_divs = "" + for chart in charts: + chart_divs += chart.GetDiv() + + summary_table = self.GetSummaryTable() + full_table = self.GetFullTable() + return self.HTML % (chart_javascript, + summary_table.ToHTML(), + summary_table.ToText(), + summary_table.ToTSV(), + self._GetTabMenuHTML("summary"), + chart_divs, + full_table.ToHTML(), + full_table.ToText(), + full_table.ToTSV(), + self._GetTabMenuHTML("full"), + self.experiment.experiment_file) + + def _GetCharts(self, labels, benchmarks, benchmark_runs): + charts = [] + sorter = ResultSorter(benchmark_runs) + + for benchmark in benchmarks: + autotest_keys = sorter.GetAutotestKeys(benchmark.name) + + for autotest_key in autotest_keys: + title = "%s: %s" % (benchmark.name, autotest_key.replace("/", " ")) + chart = ColumnChart(title, 300, 200) + chart.AddColumn("Label", "string") + chart.AddColumn("Average", "number") + chart.AddColumn("Min", "number") + chart.AddColumn("Max", "number") + chart.AddSeries("Min", "line", "black") + chart.AddSeries("Max", "line", "black") + + for label in labels: + res = sorter.GetResults(benchmark.name, autotest_key, label.name) + avg_val = MeanColumn("").Compute(res, None) + min_val = MinColumn("").Compute(res, None) + max_val = MaxColumn("").Compute(res, None) + chart.AddRow([label.name, avg_val, min_val, max_val]) + if isinstance(avg_val, str): + chart = None + break + + if chart: + charts.append(chart) + return charts + diff --git a/crosperf/results_sorter.py b/crosperf/results_sorter.py new file mode 100644 index 00000000..c523e10d --- /dev/null +++ b/crosperf/results_sorter.py @@ -0,0 +1,45 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + + +class ResultSorter(object): + def __init__(self, benchmark_runs): + self.table = {} + for benchmark_run in benchmark_runs: + benchmark_name = benchmark_run.benchmark_name + label_name = benchmark_run.label_name + for autotest_key in benchmark_run.results: + result_tuple = (benchmark_name, autotest_key, label_name) + if result_tuple not in self.table: + self.table[result_tuple] = [] + + cell = self.table[result_tuple] + index = benchmark_run.iteration - 1 + while index >= len(cell): + cell.append(None) + + result_value = benchmark_run.results[autotest_key] + try: + result_value = float(result_value) + except ValueError: + pass + + cell[index] = result_value + + self.autotest_keys = {} + for benchmark_run in benchmark_runs: + benchmark_name = benchmark_run.benchmark_name + if benchmark_name not in self.autotest_keys: + self.autotest_keys[benchmark_name] = {} + for autotest_key in benchmark_run.results: + self.autotest_keys[benchmark_name][autotest_key] = True + + def GetAutotestKeys(self, benchmark_name): + return self.autotest_keys[benchmark_name].keys() + + def GetResults(self, benchmark_name, autotest_key, label_name): + try: + return self.table[(benchmark_name, autotest_key, label_name)] + except KeyError: + return [] diff --git a/crosperf/run_tests.sh b/crosperf/run_tests.sh new file mode 100755 index 00000000..2a35cc73 --- /dev/null +++ b/crosperf/run_tests.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# +# Copyright 2011 Google Inc. All Rights Reserved. +# Author: raymes@google.com (Raymes Khoury) + +export PYTHONPATH+=":.." +for test in $(find -name \*test.py); do + echo RUNNING: ${test} + if ! ./${test} ; then + echo "Test Failed!" + exit 1 + fi +done diff --git a/crosperf/settings.py b/crosperf/settings.py new file mode 100644 index 00000000..e407a143 --- /dev/null +++ b/crosperf/settings.py @@ -0,0 +1,62 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + + +class Settings(object): + """Class representing settings (a set of fields) from an experiment file.""" + + def __init__(self, name, settings_type): + self.name = name + self.settings_type = settings_type + self.fields = {} + self.parent = None + + def SetParentSettings(self, settings): + """Set the parent settings which these settings can inherit from.""" + self.parent = settings + + def AddField(self, field): + name = field.name + if name in self.fields: + raise Exception("Field %s defined previously." % name) + self.fields[name] = field + + def SetField(self, name, value, append=False): + if name not in self.fields: + raise Exception("'%s' is not a valid field in '%s' settings" + % (name, self.settings_type)) + if append: + self.fields[name].Append(value) + else: + self.fields[name].Set(value) + + def GetField(self, name): + """Get the value of a field with a given name.""" + if name not in self.fields: + raise Exception("Field '%s' not a valid field in '%s' settings." % + (name, self.name)) + field = self.fields[name] + if not field.assigned and field.required: + raise Exception("Required field '%s' not defined in '%s' settings." % + (name, self.name)) + return self.fields[name].Get() + + def Inherit(self): + """Inherit any unset values from the parent settings.""" + for name in self.fields: + if (not self.fields[name].assigned and self.parent and + name in self.parent.fields and self.parent.fields[name].assigned): + self.fields[name].Set(self.parent.GetField(name), parse=False) + + def Override(self, settings): + """Override settings with settings from a different object.""" + for name in settings.fields: + if name in self.fields and settings.fields[name].assigned: + self.fields[name].Set(settings.GetField(name), parse=False) + + def Validate(self): + """Check that all required fields have been set.""" + for name in self.fields: + if not self.fields[name].assigned and self.fields[name].required: + raise Exception("Field %s is invalid." % name) diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py new file mode 100644 index 00000000..3f752834 --- /dev/null +++ b/crosperf/settings_factory.py @@ -0,0 +1,107 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +from field import BooleanField +from field import EnumField +from field import FloatField +from field import IntegerField +from field import ListField +from field import TextField +from settings import Settings + + +class BenchmarkSettings(Settings): + def __init__(self, name): + super(BenchmarkSettings, self).__init__(name, "benchmark") + self.AddField(TextField("autotest_name", + description="The name of the autotest to run." + "Defaults to the name of the benchmark.")) + self.AddField(TextField("autotest_args", + description="Arguments to be passed to the " + "autotest.")) + self.AddField(IntegerField("iterations", default=3, + description="Number of iterations to run the " + "autotest.")) + self.AddField(FloatField("outlier_range", default=0.2, + description="The percentage of highest/lowest " + "values to omit when computing the average.")) + self.AddField(ListField("profile_counters", + description="A list of profile counters to " + "collect.")) + self.AddField(EnumField("profile_type", + description="The type of profile to collect. " + "Either 'stat', 'record' or ''.", + options=["stat", "record", ""], + default="")) + + +class LabelSettings(Settings): + def __init__(self, name): + super(LabelSettings, self).__init__(name, "label") + self.AddField(TextField("chromeos_image", required=True, + description="The path to the image to run tests " + "on.")) + self.AddField(TextField("chromeos_root", + description="The path to a chromeos checkout which " + "contains a src/scripts directory. Defaults to " + "the chromeos checkout which contains the " + "chromeos_image.")) + self.AddField(TextField("board", required=True, description="The target " + "board for running experiments on, e.g. x86-alex.")) + + +class GlobalSettings(Settings): + def __init__(self, name): + super(GlobalSettings, self).__init__(name, "global") + self.AddField(TextField("name", default="Experiment", + description="The name of the experiment. Just an " + "identifier.")) + self.AddField(TextField("board", description="The target " + "board for running experiments on, e.g. x86-alex.")) + self.AddField(ListField("remote", required=True, + description="A comma-separated list of ip's of " + "chromeos devices to run experiments on.")) + self.AddField(BooleanField("rerun_if_failed", description="Whether to " + "re-run failed autotest runs or not.", + default=False)) + self.AddField(BooleanField("rerun", description="Whether to ignore the " + "cache and for autotests to be re-run.", + default=False)) + self.AddField(BooleanField("exact_remote", default=False, + description="Ensure cached runs are run on the " + "same device that is specified as a remote.")) + self.AddField(IntegerField("iterations", default=3, + description="Number of iterations to run all " + "autotests.")) + self.AddField(TextField("chromeos_root", + description="The path to a chromeos checkout which " + "contains a src/scripts directory. Defaults to " + "the chromeos checkout which contains the " + "chromeos_image.")) + self.AddField(ListField("profile_counters", + description="A list of profile counters to " + "collect.")) + self.AddField(EnumField("profile_type", + description="The type of profile to collect. " + "Either 'stat', 'record' or ''.", + options=["stat", "record", ""])) + + +class SettingsFactory(object): + """Factory class for building different types of Settings objects. + + This factory is currently hardcoded to produce settings for ChromeOS + experiment files. The idea is that in the future, other types + of settings could be produced. + """ + + def GetSettings(self, name, settings_type): + if settings_type == "label" or not settings_type: + return LabelSettings(name) + if settings_type == "global": + return GlobalSettings(name) + if settings_type == "benchmark": + return BenchmarkSettings(name) + + raise Exception("Invalid settings type: '%s'." % settings_type) diff --git a/crosperf/table.py b/crosperf/table.py new file mode 100644 index 00000000..84eb21ae --- /dev/null +++ b/crosperf/table.py @@ -0,0 +1,84 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import math + + +class Table(object): + class Cell(object): + def __init__(self, value, colspan=1, hidden=False, header=False): + self.value = value + self.colspan = colspan + self.hidden = hidden + self.header = header + + def __init__(self, table_id): + self.table_id = table_id + self.rows = [] + + def AddRow(self, row): + self.rows.append(row) + + def ToHTML(self): + res = "<table id='%s'>\n" % self.table_id + for row in self.rows: + res += "<tr>" + for cell in row: + if cell.header: + tag = "th" + else: + tag = "td" + cell_class = "" + if cell.hidden: + cell_class = "class='hidden'" + res += "<%s colspan='%s' %s>%s</%s>" % (tag, cell.colspan, cell_class, + cell.value, tag) + res += "</tr>\n" + res += "</table>" + return res + + def ToText(self, max_column_width=None): + col_spacing = 2 + max_widths = [] + for row in self.rows: + column = 0 + for cell in row: + text_width = len(str(cell.value)) + per_column_width = int(math.ceil(float(text_width) / cell.colspan)) + if max_column_width: + per_column_width = min(max_column_width, per_column_width) + for i in range(column, column + cell.colspan): + while i >= len(max_widths): + max_widths.append(0) + max_widths[i] = max(per_column_width, max_widths[i]) + column += cell.colspan + + res = "" + for row in self.rows: + column = 0 + for cell in row: + val = str(cell.value) + if max_column_width: + if len(val) > max_column_width: + val = val[:2] + ".." + val[len(val) - (max_column_width - 4):] + res += val + space_to_use = (sum(max_widths[column:column + cell.colspan]) + + (cell.colspan * col_spacing)) + whitespace_length = space_to_use - len(val) + res += " " * whitespace_length + # Add space b/w columns + column += cell.colspan + res += "\n" + return res + + def ToTSV(self): + res = "" + column = 0 + for row in self.rows: + for cell in row: + res += str(cell.value).replace("\t", " ") + for _ in range(column, column + cell.colspan): + res += "\t" + res += "\n" + return res diff --git a/v14/image_chromeos.py b/image_chromeos.py index a346a770..17812d4d 100755 --- a/v14/image_chromeos.py +++ b/image_chromeos.py @@ -16,12 +16,11 @@ import os import shutil import sys import tempfile -import tc_enter_chroot from utils import command_executer from utils import logger -from utils import utils +from utils.file_utils import FileUtils -checksum_file = "/home/chronos/osimage_checksum_file" +checksum_file = "/usr/local/osimage_checksum_file" def Usage(parser, message): @@ -48,6 +47,10 @@ def Main(argv): action="store_true", default=False, help="Force an image even if it is non-test.") + parser.add_option("-a", + "--image_to_live_args", + dest="image_to_live_args") + options = parser.parse_args(argv[1:])[0] @@ -78,7 +81,7 @@ def Main(argv): if not os.path.exists(image): Usage(parser, "Image file: " + image + " does not exist!") - image_checksum = utils.Md5File(image) + image_checksum = FileUtils().Md5File(image) command = "cat " + checksum_file retval, device_checksum, err = cmd_executer.CrosRunCommand(command, @@ -116,6 +119,8 @@ def Main(argv): "/src/scripts/image_to_live.sh --remote=" + options.remote + " --image=" + located_image) + if options.image_to_live_args: + command += " %s" % options.image_to_live_args retval = cmd_executer.RunCommand(command) @@ -125,8 +130,8 @@ def Main(argv): shutil.rmtree(temp_dir) logger.GetLogger().LogFatalIf(retval, "Image command failed") - command = "'echo " + image_checksum + " > " + checksum_file - command += "&& chmod -w " + checksum_file + "'" + command = "echo %s > %s && chmod -w %s" % (image_checksum, checksum_file, + checksum_file) retval = cmd_executer.CrosRunCommand(command, chromeos_root=options.chromeos_root, machine=options.remote) @@ -152,7 +157,7 @@ def LocateOrCopyImage(chromeos_root, image, board=None): chromeos_root_realpath = os.path.realpath(chromeos_root) image = os.path.realpath(image) - + if image.startswith("%s/" % chromeos_root_realpath): return [True, image] @@ -181,20 +186,22 @@ def LocateOrCopyImage(chromeos_root, image, board=None): return [False, new_image] -def GetImageMountCommand(chromeos_root, image, mount_point): +def GetImageMountCommand(chromeos_root, image, rootfs_mp, stateful_mp): image_dir = os.path.dirname(image) image_file = os.path.basename(image) mount_command = ("cd %s/src/scripts &&" "./mount_gpt_image.sh --from=%s --image=%s" " --safe --read_only" - " --rootfs_mountpt=%s" % - (chromeos_root, image_dir, image_file, mount_point)) + " --rootfs_mountpt=%s" + " --stateful_mountpt=%s" % + (chromeos_root, image_dir, image_file, rootfs_mp, + stateful_mp)) return mount_command -def MountImage(chromeos_root, image, mount_point, unmount=False): +def MountImage(chromeos_root, image, rootfs_mp, stateful_mp, unmount=False): cmd_executer = command_executer.GetCommandExecuter() - command = GetImageMountCommand(chromeos_root, image, mount_point) + command = GetImageMountCommand(chromeos_root, image, rootfs_mp, stateful_mp) if unmount: command = "%s --unmount" % command retval = cmd_executer.RunCommand(command) @@ -203,21 +210,23 @@ def MountImage(chromeos_root, image, mount_point, unmount=False): def IsImageModdedForTest(chromeos_root, image): - mount_point = tempfile.mkdtemp() - MountImage(chromeos_root, image, mount_point) - signature_file = "/usr/local/lib/python2.6/test/autotest.py" - is_test_image = os.path.isfile("%s/%s" % (mount_point, signature_file)) - MountImage(chromeos_root, image, mount_point, unmount=True) + rootfs_mp = tempfile.mkdtemp() + stateful_mp = tempfile.mkdtemp() + MountImage(chromeos_root, image, rootfs_mp, stateful_mp) + lsb_release_file = os.path.join(rootfs_mp, "etc/lsb-release") + is_test_image = "Test Build" in open(lsb_release_file).read() + MountImage(chromeos_root, image, rootfs_mp, stateful_mp, unmount=True) return is_test_image def VerifyChromeChecksum(chromeos_root, image, remote): cmd_executer = command_executer.GetCommandExecuter() - mount_point = tempfile.mkdtemp() - MountImage(chromeos_root, image, mount_point) - image_chrome_checksum = utils.Md5File("%s/opt/google/chrome/chrome" % - mount_point) - MountImage(chromeos_root, image, mount_point, unmount=True) + rootfs_mp = tempfile.mkdtemp() + stateful_mp = tempfile.mkdtemp() + MountImage(chromeos_root, image, rootfs_mp, stateful_mp) + image_chrome_checksum = FileUtils().Md5File("%s/opt/google/chrome/chrome" % + rootfs_mp) + MountImage(chromeos_root, image, rootfs_mp, stateful_mp, unmount=True) command = "md5sum /opt/google/chrome/chrome" [r, o, e] = cmd_executer.CrosRunCommand(command, diff --git a/lock_machine.py b/lock_machine.py new file mode 100755 index 00000000..c5f98092 --- /dev/null +++ b/lock_machine.py @@ -0,0 +1,250 @@ +#!/usr/bin/python2.6 +# +# Copyright 2010 Google Inc. All Rights Reserved. + +"""Script to lock/unlock machines. + +""" + +__author__ = "asharif@google.com (Ahmad Sharif)" + +import datetime +import fcntl +import getpass +import glob +import optparse +import os +import pickle +import socket +import sys +import time +from utils import logger + + +class FileCreationMask(object): + def __init__(self, mask): + self._mask = mask + + def __enter__(self): + self._old_mask = os.umask(self._mask) + + def __exit__(self, type, value, traceback): + os.umask(self._old_mask) + + +class LockDescription(object): + def __init__(self): + self.owner = "" + self.exclusive = False + self.counter = 0 + self.time = 0 + self.reason = "" + + def IsLocked(self): + return self.counter or self.exclusive + + def __str__(self): + return " ".join(["Owner: %s" % self.owner, + "Exclusive: %s" % self.exclusive, + "Counter: %s" % self.counter, + "Time: %s" % self.time, + "Reason: %s" % self.reason]) + + +class FileLock(object): + LOCKS_DIR = "/home/mobiletc-prebuild/locks" + + def __init__(self, lock_filename): + assert os.path.isdir(self.LOCKS_DIR), ( + "Locks dir: %s doesn't exist!" % self.LOCKS_DIR) + self._filepath = os.path.join(self.LOCKS_DIR, lock_filename) + self._file = None + + @classmethod + def AsString(cls, file_locks): + stringify_fmt = "%-30s %-15s %-4s %-4s %-15s %-40s" + header = stringify_fmt % ("machine", "owner", "excl", "ctr", + "elapsed", "reason") + lock_strings = [] + for file_lock in file_locks: + + elapsed_time = datetime.timedelta( + seconds=int(time.time() - file_lock._description.time)) + elapsed_time = "%s ago" % elapsed_time + lock_strings.append(stringify_fmt % + (os.path.basename(file_lock._filepath), + file_lock._description.owner, + file_lock._description.exclusive, + file_lock._description.counter, + elapsed_time, + file_lock._description.reason)) + table = "\n".join(lock_strings) + return "\n".join([header, table]) + + @classmethod + def ListLock(cls, pattern): + full_pattern = os.path.join(cls.LOCKS_DIR, pattern) + file_locks = [] + for lock_filename in glob.glob(full_pattern): + file_lock = FileLock(lock_filename) + with file_lock as lock: + if lock.IsLocked(): + file_locks.append(file_lock) + logger.GetLogger().LogOutput("\n%s" % cls.AsString(file_locks)) + + def __enter__(self): + with FileCreationMask(0000): + try: + self._file = open(self._filepath, "a+") + self._file.seek(0, os.SEEK_SET) + + if fcntl.flock(self._file.fileno(), fcntl.LOCK_EX) == -1: + raise IOError("flock(%s, LOCK_EX) failed!" % self._filepath) + + try: + self._description = pickle.load(self._file) + except (EOFError, pickle.PickleError): + self._description = LockDescription() + return self._description + # Check this differently? + except IOError as ex: + logger.GetLogger().LogError(ex) + return None + + def __exit__(self, type, value, traceback): + self._file.truncate(0) + self._file.write(pickle.dumps(self._description)) + self._file.close() + + def __str__(self): + return self.AsString([self]) + + +class Lock(object): + def __init__(self, to_lock): + self._to_lock = to_lock + self._logger = logger.GetLogger() + + def NonBlockingLock(self, exclusive, reason=""): + with FileLock(self._to_lock) as lock: + if lock.exclusive: + self._logger.LogError( + "Exclusive lock already acquired by %s. Reason: %s" % + (lock.owner, lock.reason)) + return False + + if exclusive: + if lock.counter: + self._logger.LogError("Shared lock already acquired") + return False + lock.exclusive = True + lock.reason = reason + lock.owner = getpass.getuser() + lock.time = time.time() + else: + lock.counter += 1 + self._logger.LogOutput("Successfully locked: %s" % self._to_lock) + return True + + def Unlock(self, exclusive, force=False): + with FileLock(self._to_lock) as lock: + if not lock.IsLocked(): + self._logger.LogError("Can't unlock unlocked machine!") + return False + + if lock.exclusive != exclusive: + self._logger.LogError("shared locks must be unlocked with --shared") + return False + + if lock.exclusive: + if lock.owner != getpass.getuser() and not force: + self._logger.LogError("%s can't unlock lock owned by: %s" % + (getpass.getuser(), lock.owner)) + return False + lock.exclusive = False + lock.reason = "" + lock.owner = "" + else: + lock.counter -= 1 + return True + + +class Machine(object): + def __init__(self, name): + self._name = name + try: + self._full_name = socket.gethostbyaddr(name)[0] + except socket.error: + self._full_name = self._name + + def Lock(self, exclusive=False, reason=""): + lock = Lock(self._full_name) + return lock.NonBlockingLock(exclusive, reason) + + def Unlock(self, exclusive=False, ignore_ownership=False): + lock = Lock(self._full_name) + return lock.Unlock(exclusive, ignore_ownership) + + +def Main(argv): + """The main function.""" + parser = optparse.OptionParser() + parser.add_option("-r", + "--reason", + dest="reason", + default="", + help="The lock reason.") + parser.add_option("-u", + "--unlock", + dest="unlock", + action="store_true", + default=False, + help="Use this to unlock.") + parser.add_option("-l", + "--list_locks", + dest="list_locks", + action="store_true", + default=False, + help="Use this to list locks.") + parser.add_option("-f", + "--ignore_ownership", + dest="ignore_ownership", + action="store_true", + default=False, + help="Use this to force unlock on a lock you don't own.") + parser.add_option("-s", + "--shared", + dest="shared", + action="store_true", + default=False, + help="Use this for a shared (non-exclusive) lock.") + + options, args = parser.parse_args(argv) + + exclusive = not options.shared + + if not options.list_locks and len(args) != 2: + logger.GetLogger().LogError( + "Either --list_locks or a machine arg is needed.") + return 1 + + if len(args) > 1: + machine = Machine(args[1]) + else: + machine = None + + if options.list_locks: + FileLock.ListLock("*") + retval = True + elif options.unlock: + retval = machine.Unlock(exclusive, options.ignore_ownership) + else: + retval = machine.Lock(exclusive, options.reason) + + if retval: + return 0 + else: + return 1 + +if __name__ == "__main__": + sys.exit(Main(sys.argv)) diff --git a/v14/utils/__init__.py b/utils/__init__.py index e69de29b..e69de29b 100644 --- a/v14/utils/__init__.py +++ b/utils/__init__.py diff --git a/v14/utils/command_executer.py b/utils/command_executer.py index 4566eac3..32d86c91 100644 --- a/v14/utils/command_executer.py +++ b/utils/command_executer.py @@ -6,6 +6,7 @@ import re import select import subprocess import sys +import tempfile import time import utils @@ -39,10 +40,15 @@ class CommandExecuter: terminated_timeout=10): """Run a command.""" + cmd = str(cmd) + self.logger.LogCmd(cmd, machine, username) if command_terminator and command_terminator.IsTerminated(): self.logger.LogError("Command was terminated!") - return 1 + if return_output: + return [1, "", ""] + else: + return 1 if machine is not None: user = "" @@ -50,7 +56,6 @@ class CommandExecuter: user = username + "@" cmd = "ssh -t -t %s%s -- '%s'" % (user, machine, cmd) - pty_fds = pty.openpty() p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, @@ -72,7 +77,10 @@ class CommandExecuter: self.RunCommand("sudo kill -9 " + str(p.pid)) wait = p.wait() self.logger.LogError("Command was terminated!") - return wait + if return_output: + return (p.wait, full_stdout, full_stderr) + else: + return wait for fd in fds[0]: if fd == p.stdout: out = os.read(p.stdout.fileno(), 16384) @@ -128,6 +136,14 @@ class CommandExecuter: command += "\nremote_access_init" return command + def WriteToTempShFile(self, contents): + handle, command_file = tempfile.mkstemp(prefix=os.uname()[1], + suffix=".sh") + os.write(handle, "#!/bin/bash\n") + os.write(handle, contents) + os.close(handle) + return command_file + def CrosLearnBoard(self, chromeos_root, machine): command = self.RemoteAccessInitCommand(chromeos_root, machine) @@ -138,15 +154,29 @@ class CommandExecuter: return output.split()[-1] def CrosRunCommand(self, cmd, return_output=False, machine=None, - username=None, command_terminator=None, chromeos_root=None): + username=None, command_terminator=None, chromeos_root=None, + command_timeout=None): """Run a command on a chromeos box""" + self.logger.LogCmd(cmd) self.logger.LogFatalIf(not machine, "No machine provided!") self.logger.LogFatalIf(not chromeos_root, "chromeos_root not given!") chromeos_root = os.path.expanduser(chromeos_root) + + # Write all commands to a file. + command_file = self.WriteToTempShFile(cmd) + self.CopyFiles(command_file, command_file, + dest_machine=machine, + command_terminator=command_terminator, + chromeos_root=chromeos_root, + dest_cros=True, + recursive=False) + command = self.RemoteAccessInitCommand(chromeos_root, machine) - command += "\nremote_sh " + cmd + command += "\nremote_sh bash %s" % command_file command += "\necho \"$REMOTE_OUT\"" - retval = self.RunCommand(command, return_output) + retval = self.RunCommand(command, return_output, + command_terminator=command_terminator, + command_timeout=command_timeout) if return_output: connect_signature = ("Initiating first contact with remote host\n" + "Connection OK\n") @@ -158,6 +188,28 @@ class CommandExecuter: return modded_return return retval + def ChrootRunCommand(self, chromeos_root, command, return_output=False, + command_terminator=None): + self.logger.LogCmd(command) + + handle, command_file = tempfile.mkstemp(dir=os.path.join(chromeos_root, + "src/scripts"), + suffix=".sh", + prefix="in_chroot_cmd") + os.write(handle, "#!/bin/bash\n") + os.write(handle, command) + os.close(handle) + + os.chmod(command_file, 0777) + + command = "cd %s; cros_sdk -- ./%s" % (chromeos_root, + os.path.basename(command_file)) + ret = self.RunCommand(command, return_output, + command_terminator=command_terminator) + os.remove(command_file) + return ret + + def RunCommands(self, cmdlist, return_output=False, machine=None, username=None, command_terminator=None): cmd = " ;\n" .join(cmdlist) @@ -232,6 +284,7 @@ class MockCommandExecuter(CommandExecuter): def RunCommand(self, cmd, return_output=False, machine=None, username=None, command_terminator=None): + cmd = str(cmd) if machine is None: machine = "localhost" if username is None: diff --git a/utils/email_sender.py b/utils/email_sender.py new file mode 100644 index 00000000..f8c0d62c --- /dev/null +++ b/utils/email_sender.py @@ -0,0 +1,62 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +from email import Encoders +from email.MIMEBase import MIMEBase +from email.MIMEMultipart import MIMEMultipart +from email.MIMEText import MIMEText +import getpass +import os +import smtplib +import sys + + +class EmailSender(object): + class Attachment(object): + def __init__(self, name, content): + self.name = name + self.content = content + + def SendEmail(self, + email_to, + subject, + text_to_send, + email_cc=None, + email_bcc=None, + email_from=None, + msg_type="plain", + attachments=None): + # Email summary to the current user. + msg = MIMEMultipart() + + if not email_from: + email_from = os.path.basename(__file__) + + msg["To"] = ",".join(email_to) + msg["Subject"] = subject + + if email_from: + msg["From"] = email_from + if email_cc: + msg["CC"] = ",".join(email_cc) + email_to += email_cc + if email_bcc: + msg["BCC"] = ",".join(email_bcc) + email_to += email_bcc + + msg.attach(MIMEText(text_to_send, msg_type)) + if attachments: + for attachment in attachments: + part = MIMEBase("application", "octet-stream") + part.set_payload(attachment.content) + Encoders.encode_base64(part) + part.add_header("Content-Disposition", "attachment; filename=\"%s\"" % + attachment.name) + msg.attach(part) + + # Send the message via our own SMTP server, but don't include the + # envelope header. + s = smtplib.SMTP("localhost") + s.sendmail(email_from, email_to, msg.as_string()) + s.quit() diff --git a/utils/file_utils.py b/utils/file_utils.py new file mode 100644 index 00000000..987e88ad --- /dev/null +++ b/utils/file_utils.py @@ -0,0 +1,91 @@ +#!/usr/bin/python + +# Copyright 2011 Google Inc. All Rights Reserved. + +import errno +import hashlib +import os +import shutil + + +class FileUtils(object): + """Utilities for operations on files.""" + _instance = None + DRY_RUN = False + + @classmethod + def Configure(cls, dry_run): + cls.DRY_RUN = dry_run + + def __new__(cls, *args, **kwargs): + if not cls._instance: + if cls.DRY_RUN: + cls._instance = super(FileUtils, cls).__new__(MockFileUtils, *args, + **kwargs) + else: + cls._instance = super(FileUtils, cls).__new__(cls, *args, + **kwargs) + return cls._instance + + def Md5File(self, filename, block_size=2 ** 10): + md5 = hashlib.md5() + + with open(filename) as f: + while True: + data = f.read(block_size) + if not data: + break + md5.update(data) + + return md5.hexdigest() + + def CanonicalizeChromeOSRoot(self, chromeos_root): + chromeos_root = os.path.expanduser(chromeos_root) + if os.path.isfile(os.path.join(chromeos_root, + "src/scripts/enter_chroot.sh")): + return chromeos_root + else: + return None + + def ChromeOSRootFromImage(self, chromeos_image): + chromeos_root = os.path.join(os.path.dirname(chromeos_image), + "../../../../..") + return self.CanonicalizeChromeOSRoot(chromeos_root) + + def MkDirP(self, path): + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + pass + else: + raise + + def RmDir(self, path): + shutil.rmtree(path, ignore_errors=True) + + def WriteFile(self, path, contents): + with open(path, "wb") as f: + f.write(contents) + + +class MockFileUtils(FileUtils): + """Mock class for file utilities.""" + + def Md5File(self, filename, block_size=2 ** 10): + return "d41d8cd98f00b204e9800998ecf8427e" + + def CanonicalizeChromeOSRoot(self, chromeos_root): + return "/tmp/chromeos_root" + + def ChromeOSRootFromImage(self, chromeos_image): + return "/tmp/chromeos_root" + + def RmDir(self, path): + pass + + def MkDirP(self, path): + pass + + def WriteFile(self, path, contents): + pass diff --git a/utils/html_tools.py b/utils/html_tools.py new file mode 100644 index 00000000..c90a57b5 --- /dev/null +++ b/utils/html_tools.py @@ -0,0 +1,93 @@ +#!/usr/bin/python2.6 +# +# Copyright 2010 Google Inc. All Rights Reserved. +# + + +def GetPageHeader(page_title): + return """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" +"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> +<html> +<head> +<style type="text/css"> +table +{ +border-collapse:collapse; +} +table, td, th +{ +border:1px solid black; +} +</style> +<script type="text/javascript"> +function displayRow(id){ + var row = document.getElementById("group_"+id); + if (row.style.display == '') row.style.display = 'none'; + else row.style.display = ''; + } +</script> +<title>%s</title> +</head> +<body> + +""" % page_title + + +def GetListHeader(): + return "<ul>" + + +def GetListItem(text): + return "<li>%s</li>" % text + + +def GetListFooter(): + return "</ul>" + + +def GetList(items): + return "<ul>%s</ul>" % "".join(["<li>%s</li>" % item for item in items]) + + +def GetParagraph(text): + return "<p>%s</p>" % text + + +def GetFooter(): + return "</body>\n</html>" + + +def GetHeader(text, h=1): + return "<h%s>%s</h%s>" % (h, text, h) + + +def GetTableHeader(headers): + row = "".join(["<th>%s</th>" % header for header in headers]) + return "<table><tr>%s</tr>" % row + + +def GetTableFooter(): + return "</table>" + + +def FormatLineBreaks(text): + return text.replace("\n", "<br/>") + + +def GetTableCell(text): + return "<td>%s</td>" % FormatLineBreaks(str(text)) + + +def GetTableRow(columns): + return "<tr>%s</tr>" % "\n".join([GetTableCell(column) for column in columns]) + + +def GetTable(headers, rows): + table = [GetTableHeader(headers)] + table.extend([GetTableRow(row) for row in rows]) + table.append(GetTableFooter()) + return "\n".join(table) + + +def GetLink(link, text): + return "<a href='%s'>%s</a>" % (link, text) diff --git a/v14/utils/logger.py b/utils/logger.py index 62a1ecc0..6f626407 100644 --- a/v14/utils/logger.py +++ b/utils/logger.py @@ -11,6 +11,8 @@ import utils class Logger(object): """Logging helper class.""" + MAX_LOG_FILES = 10 + def __init__ (self, rootdir, basefilename, print_console, subdir="logs"): logdir = os.path.join(rootdir, subdir) basename = os.path.join(logdir, basefilename) @@ -18,13 +20,61 @@ class Logger(object): try: os.makedirs(logdir) except OSError: - print "Warning: Logs directory '%s' already exists." % logdir + pass + # print "Warning: Logs directory '%s' already exists." % logdir - self.cmdfd = open("%s.cmd" % basename, "w", 0755) - self.stdout = open("%s.out" % basename, "w") - self.stderr = open("%s.err" % basename, "w") self.print_console = print_console + self._CreateLogFileHandles(basename) + + self._WriteTo(self.cmdfd, " ".join(sys.argv), True) + + def _AddSuffix(self, basename, suffix): + return "%s%s" % (basename, suffix) + + def _FindSuffix(self, basename): + timestamps = [] + found_suffix = None + for i in range(self.MAX_LOG_FILES): + suffix = str(i) + suffixed_basename = self._AddSuffix(basename, suffix) + cmd_file = "%s.cmd" % suffixed_basename + if not os.path.exists(cmd_file): + found_suffix = suffix + break + timestamps.append(os.stat(cmd_file).st_mtime) + + if found_suffix: + return found_suffix + + # Try to pick the oldest file with the suffix and return that one. + suffix = str(timestamps.index(min(timestamps))) + # print ("Warning: Overwriting log file: %s" % + # self._AddSuffix(basename, suffix)) + return suffix + + def _CreateLogFileHandles(self, basename): + suffix = self._FindSuffix(basename) + suffixed_basename = self._AddSuffix(basename, suffix) + + self.cmdfd = open("%s.cmd" % suffixed_basename, "w", 0755) + self.stdout = open("%s.out" % suffixed_basename, "w") + self.stderr = open("%s.err" % suffixed_basename, "w") + + self._CreateLogFileSymlinks(basename, suffixed_basename) + + # Symlink unsuffixed basename to currently suffixed one. + def _CreateLogFileSymlinks(self, basename, suffixed_basename): + try: + for extension in ["cmd", "out", "err"]: + src_file = "%s.%s" % (os.path.basename(suffixed_basename), extension) + dest_file = "%s.%s" % (basename, extension) + if os.path.exists(dest_file): + os.remove(dest_file) + os.symlink(src_file, dest_file) + except IOError as ex: + self.LogFatal(str(ex)) + def _WriteTo(self, fd, msg, flush): fd.write(msg) if flush: @@ -95,3 +145,15 @@ def GetLogger(): if not main_logger: InitLogger(sys.argv[0]) return main_logger + + +def HandleUncaughtExceptions(fun): + """Catches all exceptions that would go outside decorated fun scope.""" + + def _Interceptor(*args, **kwargs): + try: + return fun(*args, **kwargs) + except StandardError: + GetLogger().LogFatal("Uncaught exception:\n%s" % traceback.format_exc()) + + return _Interceptor diff --git a/utils/utils.py b/utils/utils.py new file mode 100755 index 00000000..3f9795ca --- /dev/null +++ b/utils/utils.py @@ -0,0 +1,102 @@ +#!/usr/bin/python2.6 +# +# Copyright 2010 Google Inc. All Rights Reserved. + +"""Utilities for toolchain build.""" + +__author__ = "asharif@google.com (Ahmad Sharif)" + +import hashlib +import os +import re +import stat +import command_executer +import logger +import tempfile +from contextlib import contextmanager + + +def GetRoot(scr_name): + """Break up pathname into (dir+name).""" + abs_path = os.path.abspath(scr_name) + return (os.path.dirname(abs_path), os.path.basename(abs_path)) + + +def FormatQuotedCommand(command): + return command.replace("\"", "\\\"") + + +def FormatCommands(commands): + output = str(commands) + output = re.sub("&&", "&&\n", output) + output = re.sub(";", ";\n", output) + output = re.sub("\n+\s*", "\n", output) + return output + + +def GetBuildPackagesCommand(board): + return "./build_packages --nousepkg --withdev --withtest --withautotest " \ + "--skip_toolchain_update --nowithdebug --board=%s" % board + + +def GetBuildImageCommand(board): + return "./build_image --withdev --board=%s" % board + + +def GetModImageForTestCommand(board): + return "./mod_image_for_test.sh --yes --board=%s" % board + + +def GetSetupBoardCommand(board, gcc_version=None, binutils_version=None, + usepkg=None, force=None): + options = [] + + if gcc_version: + options.append("--gcc_version=%s" % gcc_version) + + if binutils_version: + options.append("--binutils_version=%s" % binutils_version) + + if usepkg: + options.append("--usepkg") + else: + options.append("--nousepkg") + + if force: + options.append("--force") + + return "./setup_board --board=%s %s" % (board, " ".join(options)) + + +def CanonicalizePath(path): + path = os.path.expanduser(path) + path = os.path.realpath(path) + return path + + +def GetCtargetFromBoard(board, chromeos_root): + base_board = board.split("_")[0] + command = ("cat" + " $(cros_overlay_list --board=%s --primary_only)/toolchain.conf" % + (base_board)) + ce = command_executer.GetCommandExecuter() + ret, out, err = ce.ChrootRunCommand(chromeos_root, + command, + return_output=True) + if ret != 0: + raise ValueError("Board %s is invalid!" % board) + return out.strip() + + +@contextmanager +def WorkingDirectory(new_dir): + old_dir = os.getcwd() + if old_dir != new_dir: + msg = "cd %s" % new_dir + logger.GetLogger().LogCmd(msg) + os.chdir(new_dir) + yield new_dir + if old_dir != new_dir: + msg = "cd %s" % old_dir + logger.GetLogger().LogCmd(msg) + os.chdir(old_dir) diff --git a/v14/build_chrome_browser.py b/v14/build_chrome_browser.py deleted file mode 100755 index 15503e38..00000000 --- a/v14/build_chrome_browser.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/python2.6 -# -# Copyright 2010 Google Inc. All Rights Reserved. - -"""Script to checkout the ChromeOS source. - -This script sets up the ChromeOS source in the given directory, matching a -particular release of ChromeOS. -""" - -__author__ = "raymes@google.com (Raymes Khoury)" - -import optparse -import os -import sys -from utils import command_executer -from utils import logger -from utils import utils -import build_chromeos - -cmd_executer = None - - -def Usage(parser, message): - print "ERROR: " + message - parser.print_help() - sys.exit(0) - - -def Main(argv): - """Build Chrome browser.""" - # Common initializations - global cmd_executer - cmd_executer = command_executer.GetCommandExecuter() - - parser = optparse.OptionParser() - parser.add_option("--chromeos_root", dest="chromeos_root", - help="Target directory for ChromeOS installation.") - parser.add_option("--version", dest="version") - parser.add_option("--cflags", dest="cflags", - default="", - help="CFLAGS for the ChromeOS packages") - parser.add_option("--cxxflags", dest="cxxflags", - default="", - help="CXXFLAGS for the ChromeOS packages") - parser.add_option("--ldflags", dest="ldflags", - default="", - help="LDFLAGS for the ChromeOS packages") - parser.add_option("--board", dest="board", - help="ChromeOS target board, e.g. x86-generic") - parser.add_option("--label", dest="label", - help="Optional label to apply to the ChromeOS image.") - - options = parser.parse_args(argv)[0] - - if options.chromeos_root is None: - Usage(parser, "--chromeos_root must be set") - - if options.board is None: - Usage(parser, "--board must be set") - - if options.version is None: - logger.GetLogger().LogOutput("No Chrome version given so " - "using the default checked in version.") - chrome_version = "" - else: - chrome_version = "CHROME_VERSION=%s" % options.version - - # Emerge the browser - ret = (build_chromeos. - ExecuteCommandInChroot(options.chromeos_root, - "CHROME_ORIGIN=SERVER_SOURCE %s " - "CFLAGS=\"$(portageq-%s envvar CFLAGS) %s\" " - "LDFLAGS=\"$(portageq-%s envvar LDFLAGS) %s\" " - "CXXFLAGS=\"$(portageq-%s envvar CXXFLAGS) %s\" " - "emerge-%s --buildpkg chromeos-chrome" % - (chrome_version, options.board, options.cflags, - options.board, options.ldflags, options.board, - options.cxxflags, options.board))) - - logger.GetLogger().LogFatalIf(ret, "build_packages failed") - - # Build image - ret = (build_chromeos. - ExecuteCommandInChroot(options.chromeos_root, - utils.GetBuildImageCommand(options.board))) - - logger.GetLogger().LogFatalIf(ret, "build_image failed") - - # Mod image for test - ret = (build_chromeos. - ExecuteCommandInChroot(options.chromeos_root, - utils.GetModImageForTestCommand(options.board))) - - logger.GetLogger().LogFatalIf(ret, "mod_image_for_test failed") - - flags_file_name = "chrome_flags.txt" - flags_file_path = ("%s/src/build/images/%s/latest/%s" % - (options.chromeos_root, - options.board, - flags_file_name)) - flags_file = open(flags_file_path, "wb") - flags_file.write("CFLAGS=%s\n" % options.cflags) - flags_file.write("CXXFLAGS=%s\n" % options.cxxflags) - flags_file.write("LDFLAGS=%s\n" % options.ldflags) - flags_file.close() - - - if options.label: - image_dir_path = ("%s/src/build/images/%s/latest" % - (options.chromeos_root, - options.board)) - real_image_dir_path = os.path.realpath(image_dir_path) - command = ("ln -sf -T %s %s/%s" % - (os.path.basename(real_image_dir_path), - os.path.dirname(real_image_dir_path), - options.label)) - - ret = cmd_executer.RunCommand(command) - logger.GetLogger().LogFatalIf(ret, "Failed to apply symlink label %s" % - options.label) - - return ret - -if __name__ == "__main__": - retval = Main(sys.argv) - sys.exit(retval) diff --git a/v14/build_chromeos.py b/v14/build_chromeos.py deleted file mode 100755 index 2b68ab33..00000000 --- a/v14/build_chromeos.py +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/python2.6 -# -# Copyright 2010 Google Inc. All Rights Reserved. - -"""Script to checkout the ChromeOS source. - -This script sets up the ChromeOS source in the given directory, matching a -particular release of ChromeOS. -""" - -__author__ = "raymes@google.com (Raymes Khoury)" - -import optparse -import os -import sys -import tc_enter_chroot -from utils import command_executer -from utils import logger -from utils import utils - - -def Usage(parser, message): - print "ERROR: " + message - parser.print_help() - sys.exit(0) - -#TODO(raymes): move this to a common utils file. -def ExecuteCommandInChroot(chromeos_root, command, toolchain_root=None, - return_output=False, full_mount=False, - tec_options=[]): - """Executes a command in the chroot.""" - global cmd_executer - cmd_executer = command_executer.GetCommandExecuter() - chromeos_root = os.path.expanduser(chromeos_root) - - argv = [os.path.dirname(os.path.abspath(__file__)) + "/tc_enter_chroot.py", - "--chromeos_root=" + chromeos_root, - command] - if toolchain_root: - toolchain_root = os.path.expanduser(toolchain_root) - argv.append("--toolchain_root=" + toolchain_root) - if not full_mount: - argv.append("-s") - argv += tec_options - return tc_enter_chroot.Main(argv, return_output) - - -def MakeChroot(chromeos_root, clobber_chroot=False): - """Make a chroot given a chromeos checkout.""" - if (not os.path.isdir(chromeos_root + "/chroot") - or clobber_chroot): - commands = [] - commands.append("cd " + chromeos_root + "/src/scripts") - clobber_chroot = "" - if clobber_chroot: - clobber_chroot = "--replace" - commands.append("./make_chroot --fast " + clobber_chroot) - ret = command_executer.GetCommandExecuter().RunCommands(commands) - logger.GetLogger().LogFatalIf(ret, "make_chroot failed") - else: - logger.GetLogger().LogOutput("Did not make_chroot because it already exists") - - -def Main(argv): - """Build ChromeOS.""" - # Common initializations - cmd_executer = command_executer.GetCommandExecuter() - - parser = optparse.OptionParser() - parser.add_option("--chromeos_root", dest="chromeos_root", - help="Target directory for ChromeOS installation.") - parser.add_option("--clobber_chroot", dest="clobber_chroot", - action="store_true", help= - "Delete the chroot and start fresh", default=False) - parser.add_option("--clobber_board", dest="clobber_board", - action="store_true", - help="Delete the board and start fresh", default=False) - parser.add_option("--rebuild", dest="rebuild", - action="store_true", - help="Rebuild all board packages except the toolchain.", - default=False) - parser.add_option("--cflags", dest="cflags", default="", - help="CFLAGS for the ChromeOS packages") - parser.add_option("--cxxflags", dest="cxxflags", default="", - help="CXXFLAGS for the ChromeOS packages") - parser.add_option("--ldflags", dest="ldflags", default="", - help="LDFLAGS for the ChromeOS packages") - parser.add_option("--board", dest="board", - help="ChromeOS target board, e.g. x86-generic") - parser.add_option("--label", dest="label", - help="Optional label symlink to point to build dir.") - parser.add_option("--vanilla", dest="vanilla", - default=False, - action="store_true", - help="Use default ChromeOS toolchain.") - - options = parser.parse_args(argv[1:])[0] - - if options.chromeos_root is None: - Usage(parser, "--chromeos_root must be set") - - if options.board is None: - Usage(parser, "--board must be set") - - build_packages_env = "" - if options.rebuild == True: - build_packages_env = "EXTRA_BOARD_FLAGS=-e" - - options.chromeos_root = os.path.expanduser(options.chromeos_root) - - MakeChroot(options.chromeos_root, options.clobber_chroot) - - build_packages_command = utils.GetBuildPackagesCommand(options.board) - build_image_command = utils.GetBuildImageCommand(options.board) - mod_image_command = utils.GetModImageForTestCommand(options.board) - - if options.vanilla == True: - command = utils.GetSetupBoardCommand(options.board, - usepkg=False, - force=options.clobber_board) - command += "; " + build_packages_env + " " + build_packages_command - command += "&& " + build_image_command - command += "&& " + mod_image_command - ret = ExecuteCommandInChroot(options.chromeos_root, command) - return ret - - # Setup board - if not os.path.isdir(options.chromeos_root + "/chroot/build/" - + options.board) or options.clobber_board: - # Run build_tc.py from binary package - rootdir = utils.GetRoot(argv[0])[0] - version_number = utils.GetRoot(rootdir)[1] - ret = ExecuteCommandInChroot(options.chromeos_root, - utils.GetSetupBoardCommand(options.board, - gcc_version="9999", - binutils_version="9999", - force=options.clobber_board)) - logger.GetLogger().LogFatalIf(ret, "setup_board failed") - else: - logger.GetLogger().LogOutput("Did not setup_board " - "because it already exists") - - # Build packages - ret = ExecuteCommandInChroot(options.chromeos_root, - "CFLAGS=\"$(portageq-%s envvar CFLAGS) %s\" " - "LDFLAGS=\"$(portageq-%s envvar LDFLAGS) %s\" " - "CXXFLAGS=\"$(portageq-%s envvar CXXFLAGS) %s\" " - "CHROME_ORIGIN=SERVER_SOURCE " - "%s " - "%s" - % (options.board, options.cflags, - options.board, options.cxxflags, - options.board, options.ldflags, - build_packages_env, - build_packages_command)) - - logger.GetLogger().LogFatalIf(ret, "build_packages failed") - - # Build image - ret = ExecuteCommandInChroot(options.chromeos_root, - build_image_command) - - logger.GetLogger().LogFatalIf(ret, "build_image failed") - - # Mod image for test - ret = ExecuteCommandInChroot(options.chromeos_root, mod_image_command) - - logger.GetLogger().LogFatalIf(ret, "mod_image_for_test failed") - - flags_file_name = "flags.txt" - flags_file_path = ("%s/src/build/images/%s/latest/%s" % - (options.chromeos_root, - options.board, - flags_file_name)) - flags_file = open(flags_file_path, "wb") - flags_file.write("CFLAGS=%s\n" % options.cflags) - flags_file.write("CXXFLAGS=%s\n" % options.cxxflags) - flags_file.write("LDFLAGS=%s\n" % options.ldflags) - flags_file.close() - - if options.label: - image_dir_path = ("%s/src/build/images/%s/latest" % - (options.chromeos_root, - options.board)) - real_image_dir_path = os.path.realpath(image_dir_path) - command = ("ln -sf -T %s %s/%s" % - (os.path.basename(real_image_dir_path), - os.path.dirname(real_image_dir_path), - options.label)) - - ret = cmd_executer.RunCommand(command) - logger.GetLogger().LogFatalIf(ret, "Failed to apply symlink label %s" % - options.label) - - return ret - -if __name__ == "__main__": - retval = Main(sys.argv) - sys.exit(retval) diff --git a/v14/cros_run_benchmarks.py b/v14/cros_run_benchmarks.py deleted file mode 100755 index 5b1627ed..00000000 --- a/v14/cros_run_benchmarks.py +++ /dev/null @@ -1,786 +0,0 @@ -#!/usr/bin/python - -""" -*** How to run benchmarks using two ChromeOS images and compare them *** - -The relevant script here is cros_run_benchmarks.py. - -=== Pre-requisites === - -1. A chromeos_root where you have a ChromeOS checkout as well a chroot that has -autotests emerged for the particular board you are testing. -A chromeos_root will have a src/ and a chroot/ dir. -2. Multiple ChromeOS images that you want to compare. Make sure these images -have been modded for test (mod_image_for_test.sh). These images should be of the -same board as the one that has the autotests (though you can give a board -override on the command line). -3. A remote ChromeOS machine on which the tests will be done. - -=== How to run === - -Specify the chromeos_root, the images and the remote machine. Optionally you can -also specify tests to run, iterations to run, etc. Here is an example: - -python cros_scripts/cros_run_benchmarks.py --iterations=1 --tests=AesThroughput --remote=chromeos-test2 --images=/home/asharif/a/chromeos.latest.fdo/src/build/images/x86-generic/0.10.142.2011_01_18_1450-a1/chromiumos_image.bin,/home/asharif/a/chromeos.latest.fdo/src/build/images/x86-generic/0.10.142.2011_01_19_1120-a1/chromiumos_image.bin --chromeos_root=/home/asharif/a/chromeos.latest.fdo/ --board=x86-generic - -=== Example explanation === - -I checked out chromeos sources in my --chromeos_root option. In there I did -make_chroot, setup_board, build_packages, build_image and mod_image_for_test -twice to obtain two images. The images were given to cros_run_benchmarks -separated by commas. The test I chose to run on both images is AesThroughput. -You would typically run BootPerfServer and PageCycler. - - -=== Example Output & Explanation === - -For my command line, it produced an output like: - -OUTPUT: Benchmark: AesThroughput -Run labels: -0: /usr/local/google/home/asharif/chromeos.latest.fdo/src/build/images/x86-generic/0.10.142.2011_01_18_1450-a1/chromiumos_image.bin 0.10.142.2011_01_18_1455 (Test Build 166caf1e - Tue Jan 18 14:55:39 PST 2011 - asharif) developer x86-generic -1: /usr/local/google/home/asharif/chromeos.latest.fdo/src/build/images/x86-generic/0.10.142.2011_01_19_1120-a1/chromiumos_image.bin 0.10.142.2011_01_19_1125 (Test Build 166caf1e - Wed Jan 19 11:25:33 PST 2011 - asharif) developer x86-generic -Group labels: -0: /usr/local/google/home/asharif/chromeos.latest.fdo/src/build/images/x86-generic/0.10.142.2011_01_18_1450-a1/chromiumos_image.bin 0.10.142.2011_01_18_1455 (Test Build 166caf1e - Tue Jan 18 14:55:39 PST 2011 - asharif) developer x86-generic -1: /usr/local/google/home/asharif/chromeos.latest.fdo/src/build/images/x86-generic/0.10.142.2011_01_19_1120-a1/chromiumos_image.bin 0.10.142.2011_01_19_1125 (Test Build 166caf1e - Wed Jan 19 11:25:33 PST 2011 - asharif) developer x86-generic latest - - Benchmark 0 (0) 1 (1) -6_blocksz_1024_bytes 49592320 (+0%) 49684138 (+0%) -256_blocksz_16_bytes 28907317 (+0%) 29669280 (+3%) -56_blocksz_256_bytes 48225450 (+0%) 48199168 (-0%) -256_blocksz_64_bytes 42501482 (+0%) 43009450 (+1%) -6_blocksz_8192_bytes 50610176 (+0%) 50484565 (-0%) -es_per_sec_ideal_min 20971520 (+0%) 20971520 (+0%) -atform_AesThroughput PASS (x) PASS (x) -atform_AesThroughput PASS (x) PASS (x) -Benchmark Summary Table: AesThroughput - Benchmark Summary 0 (0) 1 (1) -6_blocksz_1024_bytes 49592320 (+0%) 49684138 (+0%) -256_blocksz_16_bytes 28907317 (+0%) 29669280 (+3%) -56_blocksz_256_bytes 48225450 (+0%) 48199168 (-0%) -256_blocksz_64_bytes 42501482 (+0%) 43009450 (+1%) -6_blocksz_8192_bytes 50610176 (+0%) 50484565 (-0%) -es_per_sec_ideal_min 20971520 (+0%) 20971520 (+0%) -atform_AesThroughput ALL_PASS (x) ALL_PASS (x) -atform_AesThroughput ALL_PASS (x) ALL_PASS (x) - - -You get two tables in the output. The first table shows all the runs and the -second table averages the runs per image across iterations. In this case, since -the iteration count was 1, you get identical tables for "Benchmark" and -"Benchmark Summary". Above the tables is information about the images that were -used for the runs. The image information contains the image path as well as the -build time and the board. - -For benchmarks with multiple fields within them that do not get averaged (example: -BootPerfServer's seconds_kernel_to_login{0,1,...}, cros_run_benchmarks.py -automatically averages them and displays them as <field_name>[c]. The average -used is arithmetic mean. - - -=== Scratch Cache === - -By default cros_run_benchmarks will cache the output of runs so it doesn't run -it again when you compare the same image with another one. - -For example, you can set --images=A,B and it will run benchmarks with image A -and B. If you now set --images=A,C it will run benchmarks only on C and use the -cached results for image A. - -To prevent it from using cached results rm the cros_scratch directory which -is created inside cros_scripts when cros_run_benchmarks runs. - -The cache is also useful when you interrupt runs for some reason -- it will -continue from the same spot again. - - -=== How to get the script help message === - -If you've forgotten the switches this script has a help message that can be -obtained by invoking the script like this: - -python cros_scripts/cros_run_benchmarks.py --help - -Warning: Logs directory '/home/asharif/www/cros_scripts/logs/' already exists. -OUTPUT: cros_scripts/cros_run_benchmarks.py --help -Usage: cros_run_benchmarks.py [options] - -Options: - -h, --help show this help message and exit - -t TESTS, --tests=TESTS - Tests to compare. - -c CHROMEOS_ROOT, --chromeos_root=CHROMEOS_ROOT - A *single* chromeos_root where scripts can be found. - -i IMAGES, --images=IMAGES - Possibly multiple (comma-separated) chromeos images. - -n ITERATIONS, --iterations=ITERATIONS - Iterations to run per benchmark. - -r REMOTE, --remote=REMOTE - The remote chromeos machine. - -b BOARD, --board=BOARD - The remote board. - -""" - -# Script to test the compiler. -import copy -import getpass -import optparse -import os -import sys -from utils import command_executer -from utils import utils -from utils import logger -import tempfile -import re -import subprocess -import multiprocessing -import math -import numpy -import hashlib -import image_chromeos -import pickle - - -def IsFloat(text): - if text is None: - return False - try: - float(text) - return True - except ValueError: - return False - - -def RemoveTrailingZeros(x): - ret = x - ret = re.sub("\.0*$", "", ret) - ret = re.sub("(\.[1-9]*)0+$", "\\1", ret) - return ret - - -def HumanizeFloat(x, n=2): - if not IsFloat(x): - return x - digits = re.findall("[0-9.]", str(x)) - decimal_found = False - ret = "" - sig_figs = 0 - for digit in digits: - if digit == ".": - decimal_found = True - elif sig_figs !=0 or digit != "0": - sig_figs += 1 - if decimal_found and sig_figs >= n: - break - ret += digit - return ret - - -def GetNSigFigs(x, n=2): - if not IsFloat(x): - return x - my_fmt = "%." + str(n-1) + "e" - x_string = my_fmt % x - f = float(x_string) - return f - - -def GetFormattedPercent(baseline, other, bad_result="--"): - result = "%8s" % GetPercent(baseline, other, bad_result) - return result - -def GetPercent(baseline, other, bad_result="--"): - result = bad_result - if IsFloat(baseline) and IsFloat(other): - try: - pct = (float(other)/float(baseline) - 1) * 100 - result = "%+1.1f" % pct - except (ZeroDivisionError): - pass - return result - -def FitString(text, N): - if len(text) == N: - return text - elif len(text) > N: - return text[-N:] - else: - fmt = "%%%ds" % N - return fmt % text - - -class AutotestRun: - def __init__(self, test, chromeos_root="", chromeos_image="", - remote="", iteration=0, image_checksum=""): - self.test = test - self.chromeos_root = chromeos_root - self.chromeos_image = chromeos_image - self.remote = remote - self.iteration = iteration - self.output = "" - self.results = {} - l = logger.GetLogger() - l.LogFatalIf(not image_checksum, "Checksum shouldn't be None") - self.image_checksum = image_checksum - - - def GetCacheHash(self): - ret = "%s %s %s %d" % (self.image_checksum, self.test, self.remote, self.iteration) - ret = re.sub("/", "__", ret) - ret = re.sub(" ", "_", ret) - return ret - - - def GetLabel(self): - ret = "%s %s %s" % (self.chromeos_image, self.test, self.remote) - return ret - - -class TableFormatter: - def __init__(self): - self.d = "\t" - self.bad_result = "x" - pass - - - def GetTablePercents(self, table): - # Assumes table is not transposed. - pct_table = [] - - pct_table.append(table[0]) - for i in range(1, len(table)): - row = [] - row.append(table[i][0]) - for j in range (1, len(table[0])): - c = table[i][j] - b = table[i][1] - p = GetPercent(b, c, self.bad_result) - row.append(p) - pct_table.append(row) - return pct_table - - - def FormatFloat(self, c, max_length=8): - if not IsFloat(c): - return c - f = float(c) - ret = HumanizeFloat(f, 4) - ret = RemoveTrailingZeros(ret) - if len(ret) > max_length: - ret = "%1.1ef" % f - return ret - - - def TransposeTable(self, table): - transposed_table = [] - for i in range(len(table[0])): - row = [] - for j in range(len(table)): - row.append(table[j][i]) - transposed_table.append(row) - return transposed_table - - - def GetTableLabels(self, table): - ret = "" - header = table[0] - for i in range(1, len(header)): - ret += "%d: %s\n" % (i, header[i]) - return ret - - - def GetFormattedTable(self, table, transposed=False, - first_column_width=30, column_width=14, - percents_only=True, - fit_string=True): - o = "" - pct_table = self.GetTablePercents(table) - if transposed == True: - table = self.TransposeTable(table) - pct_table = self.TransposeTable(table) - - for i in range(0, len(table)): - for j in range(len(table[0])): - if j == 0: - width = first_column_width - else: - width = column_width - - c = table[i][j] - p = pct_table[i][j] - - # Replace labels with numbers: 0... n - if IsFloat(c): - c = self.FormatFloat(c) - - if IsFloat(p) and not percents_only: - p = "%s%%" % p - - # Print percent values side by side. - if j != 0: - if percents_only: - c = "%s" % p - else: - c = "%s (%s)" % (c, p) - - if i == 0 and j != 0: - c = str(j) - - if fit_string: - o += FitString(c, width) + self.d - else: - o += c + self.d - o += "\n" - return o - - - def GetGroups(self, table): - labels = table[0] - groups = [] - group_dict = {} - for i in range(1, len(labels)): - label = labels[i] - stripped_label = self.GetStrippedLabel(label) - if stripped_label not in group_dict: - group_dict[stripped_label] = len(groups) - groups.append([]) - groups[group_dict[stripped_label]].append(i) - return groups - - - def GetSummaryTableValues(self, table): - # First get the groups - groups = self.GetGroups(table) - - summary_table = [] - - labels = table[0] - - summary_labels = ["Summary Table"] - for group in groups: - label = labels[group[0]] - stripped_label = self.GetStrippedLabel(label) - group_label = "%s (%d runs)" % (stripped_label, len(group)) - summary_labels.append(group_label) - summary_table.append(summary_labels) - - for i in range(1, len(table)): - row = table[i] - summary_row = [row[0]] - for group in groups: - group_runs = [] - for index in group: - group_runs.append(row[index]) - group_run = self.AggregateResults(group_runs) - summary_row.append(group_run) - summary_table.append(summary_row) - - return summary_table - - - @staticmethod - def AggregateResults(group_results): - ret = "" - if len(group_results) == 0: - return ret - all_floats = True - all_passes = True - all_fails = True - for group_result in group_results: - if not IsFloat(group_result): - all_floats = False - if group_result != "PASS": - all_passes = False - if group_result != "FAIL": - all_fails = False - if all_floats == True: - float_results = [float(v) for v in group_results] - ret = "%f" % numpy.average(float_results) - # Add this line for standard deviation. -### ret += " %f" % numpy.std(float_results) - elif all_passes == True: - ret = "ALL_PASS" - elif all_fails == True: - ret = "ALL_FAILS" - return ret - - - @staticmethod - def GetStrippedLabel(label): - return re.sub("\s*i:\d+$", "", label) - - - @staticmethod - def GetLabelWithIteration(label, iteration): - return "%s i:%d" % (label, iteration) - - -class AutotestGatherer(TableFormatter): - def __init__(self): - self.runs = [] - TableFormatter.__init__(self) - pass - - - @staticmethod - def MeanExcludingSlowest(array): - mean = sum(array) / len(array) - array2 = [] - - for v in array: - if mean != 0 and abs(v - mean)/mean < 0.2: - array2.append(v) - - if len(array2) != 0: - return sum(array2) / len(array2) - else: - return mean - - - @staticmethod - def AddComposite(results_dict): - composite_keys = [] - composite_dict = {} - for key in results_dict: - mo = re.match("(.*){\d+}", key) - if mo: - composite_keys.append(mo.group(1)) - for key in results_dict: - for composite_key in composite_keys: - if key.count(composite_key) != 0 and IsFloat(results_dict[key]): - if composite_key not in composite_dict: - composite_dict[composite_key] = [] - composite_dict[composite_key].append(float(results_dict[key])) - break - - for composite_key in composite_dict: - v = composite_dict[composite_key] - results_dict["%s[c]" % composite_key] = sum(v) / len(v) - mean_excluding_slowest = AutotestGatherer.MeanExcludingSlowest(v) - results_dict["%s[ce]" % composite_key] = mean_excluding_slowest - - return results_dict - - - def ParseOutput(self, test): - p=re.compile("^-+.*?^-+", re.DOTALL|re.MULTILINE) - matches = p.findall(test.output) - for i in range(len(matches)): - results = matches[i] - keyvals = results.split()[1:-1] - results_dict = {} - for j in range(len(keyvals)/2): - # Eanble this to compare only numerical results. -### if IsFloat(keyvals[j*2+1]): - results_dict[keyvals[j*2]] = keyvals[j*2+1] - - # Add a composite keyval for tests like startup. - results_dict = AutotestGatherer.AddComposite(results_dict) - - test.results = results_dict - - self.runs.append(test) - - # This causes it to not parse the table again - # Autotest recently added a secondary table - # That reports errors and screws up the final pretty output. - break - - - def GetFormattedMainTable(self, percents_only, fit_string): - ret = "" - table = self.GetTableValues() - ret += self.GetTableLabels(table) - ret += self.GetFormattedTable(table, percents_only=percents_only, - fit_string=fit_string) - return ret - - - def GetFormattedSummaryTable(self, percents_only, fit_string): - ret = "" - table = self.GetTableValues() - summary_table = self.GetSummaryTableValues(table) - ret += self.GetTableLabels(summary_table) - ret += self.GetFormattedTable(summary_table, percents_only=percents_only, - fit_string=fit_string) - return ret - - - def GetBenchmarksString(self): - ret = "Benchmarks (in order):" - ret = "\n".join(self.GetAllBenchmarks()) - return ret - - - def GetAllBenchmarks(self): - all_benchmarks = [] - for run in self.runs: - for key in run.results.keys(): - if key not in all_benchmarks: - all_benchmarks.append(key) - all_benchmarks.sort() - return all_benchmarks - - - def GetTableValues(self): - table = [] - row = [] - - row.append("Benchmark") - for i in range(len(self.runs)): - run = self.runs[i] - label = run.GetLabel() - label = self.GetLabelWithIteration(label, run.iteration) - row.append(label) - table.append(row) - - all_benchmarks = self.GetAllBenchmarks() - for benchmark in all_benchmarks: - row = [] - row.append(benchmark) - for run in self.runs: - results = run.results - if benchmark in results: - row.append(results[benchmark]) - else: - row.append("") - table.append(row) - - return table - - -class AutotestRunner: - def __init__(self, chromeos_root, test, board="x86-agz", image=None, ag=None): - self.chromeos_root = os.path.expanduser(chromeos_root) - self.board = board - if image: - self.image = image - else: - self.image = ("%s/src/build/images/%s/latest/chromiumos_image.bin" - % (chromeos_root, - board)) - self.image = os.path.realpath(self.image) - - if os.path.isdir(self.image): - old_image = self.image - self.image = "%s/chromiumos_image.bin" % self.image - m = "%s is a dir. Trying to use %s instead..." % (old_image, self.image) - logger.GetLogger().LogOutput(m) - - if not os.path.isfile(self.image): - m = "Image: %s (%s) not found!" % (image, self.image) - logger.GetLogger().LogError(m) - sys.exit(1) - - self.test = test - self.ag = ag - self.ce = command_executer.GetCommandExecuter() - self.scratch_dir = "%s/cros_scratch" % os.path.dirname(os.path.realpath(__file__)) - if not os.path.isdir(self.scratch_dir): - os.mkdir(self.scratch_dir) - - def RunTest(self, remote, iterations): - image_args = [os.path.dirname(os.path.abspath(__file__)) + - "/image_chromeos.py", - "--chromeos_root=" + self.chromeos_root, - "--image=" + self.image, - "--remote=" + remote, - ] - if self.board: - image_args.append("--board=" + self.board) - - image_checksum = utils.Md5File(self.image) - - reimaged = False - - for i in range(iterations): - options = "" - if self.board: - options += "--board=%s" % self.board - - run = AutotestRun(self.test, self.chromeos_root, - self.image, remote, i, image_checksum) - cache_file = run.GetCacheHash() - f = "%s/%s" % (self.scratch_dir, cache_file) - if os.path.isfile(f): - m = "Cache hit: %s. Not running test for image: %s.\n" % (f, self.image) - logger.GetLogger().LogOutput(m) - pickle_file = open(f, "rb") - retval = pickle.load(pickle_file) - out = pickle.load(pickle_file) - err = pickle.load(pickle_file) - pickle_file.close() - logger.GetLogger().LogOutput(out) - else: - if reimaged == False: - retval = image_chromeos.Main(image_args) - logger.GetLogger().LogFatalIf(retval, "Could not re-image!") - reimaged = True - command = "cd %s/src/scripts" % self.chromeos_root - command += ("&& ./enter_chroot.sh -- ./run_remote_tests.sh --remote=%s %s %s" % - (remote, - options, - self.test)) - [retval, out, err] = self.ce.RunCommand(command, True) - pickle_file = open(f, "wb") - pickle.dump(retval, pickle_file) - pickle.dump(out, pickle_file) - pickle.dump(err, pickle_file) - pickle_file.close() - - run.output = out - self.ag.ParseOutput(run) - - -class Benchmark: - def __init__(self, name, iterations, args=None): - self.name = name - self.iterations = iterations - self.args = args - - -def Main(argv): - """The main function.""" - # Common initializations -### command_executer.InitCommandExecuter(True) - ce = command_executer.GetCommandExecuter() - - parser = optparse.OptionParser() - parser.add_option("-t", "--tests", dest="tests", - help=("Tests to compare." - "Optionally specify per-test iterations by <test>:<iter>")) - parser.add_option("-c", "--chromeos_root", dest="chromeos_root", - help="A *single* chromeos_root where scripts can be found.") - parser.add_option("-i", "--images", dest="images", - help="Possibly multiple (comma-separated) chromeos images.") - parser.add_option("-n", "--iterations", dest="iterations", - help="Iterations to run per benchmark.", - default=1) - parser.add_option("-r", "--remote", dest="remote", - help="The remote chromeos machine.") - parser.add_option("-b", "--board", dest="board", - help="The remote board.", - default="x86-mario") - parser.add_option("--full_table", dest="full_table", - help="Print full tables.", - action="store_true", - default=False) - parser.add_option("--fit_string", dest="fit_string", - help="Fit strings to fixed sizes.", - action="store_true", - default=False) - logger.GetLogger().LogOutput(" ".join(argv)) - [options, args] = parser.parse_args(argv) - - if options.remote is None: - logger.GetLogger().LogError("No remote machine specified.") - parser.print_help() - sys.exit(1) - - remote = options.remote - - benchmarks = [] - - if options.tests: - benchmark_strings = options.tests.split(",") - for benchmark_string in benchmark_strings: - iterations = int(options.iterations) - fields = benchmark_string.split(":") - l = logger.GetLogger() - l.LogFatalIf(len(fields)>2, - "Benchmark string: %s flawed" % benchmark_string) - name = fields[0] - if len(fields) == 2: - iterations = int(fields[1]) - benchmarks.append(Benchmark(name, iterations)) - else: -### benchmarks.append(Benchmark("BootPerfServer/control", iterations)) -### benchmarks.append(Benchmark("Page --args=\"--page-cycler-gtest-filters=PageCyclerTest.BloatFile\"", iterations)) - benchmarks.append(Benchmark("Page", iterations)) -### benchmarks.append(Benchmark("bvt", iterations)) -### benchmarks.append(Benchmark("suite_Smoke", iterations)) -### benchmarks.append(Benchmark("SunSpider", iterations)) -### benchmarks.append(Benchmark("V8Bench", iterations)) -### benchmarks.append(Benchmark("graphics_GLBench", iterations)) -### benchmarks.append(Benchmark("unixbench", iterations)) -### benchmarks.append(Benchmark("compilebench", iterations)) -### benchmarks.append(Benchmark("audiovideo_FFMPEG", iterations)) -### benchmarks.append(Benchmark("audiovideo_V4L2", iterations)) -### benchmarks.append(Benchmark("hackbench", iterations)) -### benchmarks.append(Benchmark("dbench", iterations)) - - - if not options.chromeos_root: - logger.GetLogger().LogError("No chromeos root specified.") - parser.print_help() - sys.exit(1) - else: - chromeos_root = options.chromeos_root - chromeos_root = os.path.expanduser(chromeos_root) - sig_file = "%s/src/scripts/enter_chroot.sh" % chromeos_root - if (not os.path.isdir(chromeos_root)) or (not os.path.isfile(sig_file)): - message = "chromeos_root: %s not valid." % chromeos_root - logger.GetLogger().LogError(message) - sys.exit(1) - - if not options.images: - logger.GetLogger().LogError("No images specified.") - parser.print_help() - sys.exit(1) - - ags = {} - try: - # Lock the machine if it is of this style: chromeos-test\d+ - match = re.search("chromeos-test(\d+)$", remote) - if match: - index = match.group(1) - perflab_machine = "chromeos_%s_%s" % (options.board, index) - lock_reason = ("Automatically locked by %s@%s for testing new toolchain using %s" % - (getpass.getuser(), - os.uname()[1], - sys.argv[0])) - lock_reason = "Automatically locked by %s" % os.path.basename(sys.argv[0]) - command = ("perflab --machines=%s --lock_reason=%r --lock_duration=1d lock" % - (perflab_machine, lock_reason)) - retval = ce.RunCommand(command) - logger.GetLogger().LogFatalIf(retval, "Could not lock machine %s through perflab" % perflab_machine) - - for image in options.images.split(","): - if image == "": - logger.GetLogger().LogWarning("Empty image specified!") - continue - image = os.path.expanduser(image) - for b in benchmarks: - if b in ags: - ag = ags[b] - else: - ag = AutotestGatherer() - ags[b] = ag - ar = AutotestRunner(chromeos_root, b.name, options.board, image=image, ag=ag) - ar.RunTest(remote, b.iterations) - - output = "" - for b, ag in ags.items(): - output += "Benchmark: %s\n" % b.name - output += ag.GetFormattedMainTable(percents_only=not options.full_table, - fit_string=options.fit_string) - output += "\n" - output += "Benchmark Summary Table: %s\n" % b.name - output += ag.GetFormattedSummaryTable(percents_only=not options.full_table, - fit_string=options.fit_string) - output += "\n" - logger.GetLogger().LogOutput(output) - - - except (KeyboardInterrupt, SystemExit): - print "C-c pressed" - if match: - command = ("perflab --machines=%s --lock_reason=%r unlock" % - (perflab_machine, lock_reason)) - retval = ce.RunCommand(command) - - -if __name__ == "__main__": - Main(sys.argv) - diff --git a/v14/tc_enter_chroot.py b/v14/tc_enter_chroot.py deleted file mode 100755 index 40befbea..00000000 --- a/v14/tc_enter_chroot.py +++ /dev/null @@ -1,268 +0,0 @@ -#!/usr/bin/python2.6 -# -# Copyright 2010 Google Inc. All Rights Reserved. - -"""Script to enter the ChromeOS chroot with mounted sources. - -This script enters the chroot with mounted sources. -""" - -__author__ = "asharif@google.com (Ahmad Sharif)" - -import getpass -import optparse -import os -import pwd -import stat -import sys -from utils import command_executer -from utils import logger -from utils import utils - -class MountPoint: - def __init__(self, external_dir, mount_dir, owner, options=None): - self.external_dir = external_dir - self.mount_dir = mount_dir - self.owner = owner - self.options = options - - - def CreateAndOwnDir(self, dir_name): - retval = 0 - if not os.path.exists(dir_name): - command = "mkdir -p " + dir_name - command += " || sudo mkdir -p " + dir_name - retval = command_executer.GetCommandExecuter().RunCommand(command) - if retval != 0: - return retval - pw = pwd.getpwnam(self.owner) - if os.stat(dir_name).st_uid != pw.pw_uid: - command = "sudo chown -f " + self.owner + " " + dir_name - retval = command_executer.GetCommandExecuter().RunCommand(command) - return retval - - - def DoMount(self): - retval = self.CreateAndOwnDir(self.mount_dir) - logger.GetLogger().LogFatalIf(retval, "Cannot create mount_dir!") - retval = self.CreateAndOwnDir(self.external_dir) - logger.GetLogger().LogFatalIf(retval, "Cannot create external_dir!") - retval = self.MountDir() - logger.GetLogger().LogFatalIf(retval, "Cannot mount!") - return retval - - - def MountDir(self): - command = "sudo mount --bind " + self.external_dir + " " + self.mount_dir - if self.options == "ro": - command += " && sudo mount --bind -oremount,ro " + self.mount_dir - retval = command_executer.GetCommandExecuter().RunCommand(command) - return retval - - - def __str__(self): - ret = "" - ret += self.external_dir + "\n" - ret += self.mount_dir + "\n" - if self.owner: - ret += self.owner + "\n" - if self.options: - ret += self.options + "\n" - return ret - - -def Main(argv, return_output=False): - """The main function.""" - parser = optparse.OptionParser() - parser.add_option("-c", "--chromeos_root", dest="chromeos_root", - default="../..", - help="ChromeOS root checkout directory.") - parser.add_option("-t", "--toolchain_root", dest="toolchain_root", - help="Toolchain root directory.") - parser.add_option("-o", "--output", dest="output", - help="Toolchain output directory") - parser.add_option("--sudo", dest="sudo", - action="store_true", - default=False, - help="Run the command with sudo.") - parser.add_option("-r", "--third_party", dest="third_party", - help="The third_party directory to mount.") - parser.add_option("-m", "--other_mounts", dest="other_mounts", - help="Other mount points in the form: " + - "dir:mounted_dir:options") - parser.add_option("-s", "--mount-scripts-only", - dest="mount_scripts_only", - action="store_true", - default=False, - help="Mount only the scripts dir, and not the sources.") - - passthrough_argv = [] - (options, passthrough_argv) = parser.parse_args(argv) - - chromeos_root = options.chromeos_root - - chromeos_root = os.path.expanduser(chromeos_root) - if options.toolchain_root: - options.toolchain_root = os.path.expanduser(options.toolchain_root) - - chromeos_root = os.path.abspath(chromeos_root) - - tc_dirs = [] - if options.toolchain_root is None or options.mount_scripts_only: - m = "toolchain_root not specified. Will not mount toolchain dirs." - logger.GetLogger().LogWarning(m) - else: - tc_dirs = [options.toolchain_root + "/google_vendor_src_branch/gcc", - options.toolchain_root + "/google_vendor_src_branch/binutils"] - - for tc_dir in tc_dirs: - if not os.path.exists(tc_dir): - logger.GetLogger().LogError("toolchain path " + - tc_dir + " does not exist!") - parser.print_help() - sys.exit(1) - - if not os.path.exists(chromeos_root): - logger.GetLogger().LogError("chromeos_root " + options.chromeos_root + - " does not exist!") - parser.print_help() - sys.exit(1) - - if not os.path.exists(chromeos_root + "/src/scripts/enter_chroot.sh"): - logger.GetLogger().LogError(options.chromeos_root + - "/src/scripts/enter_chroot.sh" - " not found!") - parser.print_help() - sys.exit(1) - - rootdir = utils.GetRoot(__file__)[0] - version_dir = rootdir - - mounted_tc_root = "/usr/local/toolchain_root" - full_mounted_tc_root = chromeos_root + "/chroot/" + mounted_tc_root - full_mounted_tc_root = os.path.abspath(full_mounted_tc_root) - - mount_points = [] - for tc_dir in tc_dirs: - last_dir = utils.GetRoot(tc_dir)[1] - mount_point = MountPoint(tc_dir, full_mounted_tc_root + "/" + last_dir, - getpass.getuser(), "ro") - mount_points.append(mount_point) - - # Add the third_party mount point if it exists - if options.third_party: - third_party_dir = options.third_party - logger.GetLogger().LogFatalIf(not os.path.isdir(third_party_dir), - "--third_party option is not a valid dir.") - else: - third_party_dir = os.path.abspath("%s/../../../third_party" % - os.path.dirname(__file__)) - - if os.path.isdir(third_party_dir): - mount_point = MountPoint(third_party_dir, - ("%s/%s" % - (full_mounted_tc_root, - os.path.basename(third_party_dir))), - getpass.getuser()) - mount_points.append(mount_point) - - output = options.output - if output is None and options.toolchain_root: - # Mount the output directory at /usr/local/toolchain_root/output - output = options.toolchain_root + "/output" - - if output: - mount_points.append(MountPoint(output, full_mounted_tc_root + "/output", - getpass.getuser())) - - # Mount the other mount points - mount_points += CreateMountPointsFromString(options.other_mounts, - chromeos_root + "/chroot/") - - last_dir = utils.GetRoot(version_dir)[1] - - # Mount the version dir (v14) at /usr/local/toolchain_root/v14 - mount_point = MountPoint(version_dir, full_mounted_tc_root + "/" + last_dir, - getpass.getuser()) - mount_points.append(mount_point) - - for mount_point in mount_points: - retval = mount_point.DoMount() - if retval != 0: - return retval - - # Finally, create the symlink to build-gcc. - command = "sudo chown " + getpass.getuser() + " " + full_mounted_tc_root - retval = command_executer.GetCommandExecuter().RunCommand(command) - - try: - CreateSymlink(last_dir + "/build-gcc", full_mounted_tc_root + "/build-gcc") - CreateSymlink(last_dir + "/build-binutils", full_mounted_tc_root + "/build-binutils") - except Exception as e: - logger.GetLogger().LogError(str(e)) - - # Now call enter_chroot with the rest of the arguments. - command = chromeos_root + "/src/scripts/enter_chroot.sh" - - if len(passthrough_argv) > 1: - inner_command = " ".join(passthrough_argv[1:]) - inner_command = inner_command.strip() - if inner_command.startswith("-- "): - inner_command = inner_command[3:] - command_file = "tc_enter_chroot.cmd" - command_file_path = chromeos_root + "/src/scripts/" + command_file - retval = command_executer.GetCommandExecuter().RunCommand("sudo rm -f " + command_file_path) - if retval != 0: - return retval - f = open(command_file_path, "w") - f.write(inner_command) - f.close() - logger.GetLogger().LogCmd(inner_command) - retval = command_executer.GetCommandExecuter().RunCommand("chmod +x " + command_file_path) - if retval != 0: - return retval - - if options.sudo: - command += " sudo ./" + command_file - else: - command += " ./" + command_file - retval = command_executer.GetCommandExecuter().RunCommand(command, return_output) - return retval - else: - return os.execv(command, [""]) - - -def CreateMountPointsFromString(mount_strings, chroot_dir): - # String has options in the form dir:mount:options - mount_points = [] - if not mount_strings: - return mount_points - mount_list = mount_strings.split() - for mount_string in mount_list: - mount_values = mount_string.split(":") - external_dir = mount_values[0] - mount_dir = mount_values[1] - if len(mount_values) > 2: - options = mount_values[2] - else: - options = None - mount_point = MountPoint(external_dir, chroot_dir + "/" + mount_dir, - getpass.getuser(), options) - mount_points.append(mount_point) - return mount_points - - -def CreateSymlink(target, link_name): - logger.GetLogger().LogFatalIf(target.startswith("/"), - "Can't create symlink to absolute path!") - real_from_file = utils.GetRoot(link_name)[0] + "/" + target - if os.path.realpath(real_from_file) != os.path.realpath(link_name): - if os.path.exists(link_name): - command = "rm -rf " + link_name - command_executer.GetCommandExecuter().RunCommand(command) - os.symlink(target, link_name) - - -if __name__ == "__main__": - retval = Main(sys.argv) - sys.exit(retval) diff --git a/v14/utils/utils.py b/v14/utils/utils.py deleted file mode 100755 index 7769c1bb..00000000 --- a/v14/utils/utils.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/python2.6 -# -# Copyright 2010 Google Inc. All Rights Reserved. - -"""Utilities for toolchain build.""" - -__author__ = "asharif@google.com (Ahmad Sharif)" - -import hashlib -import os -import re -import logger - - -def GetRoot(scr_name): - """Break up pathname into (dir+name).""" - abs_path = os.path.abspath(scr_name) - return (os.path.dirname(abs_path), os.path.basename(abs_path)) - - -def FormatQuotedCommand(command): - return command.replace("\"", "\\\"") - - -def FormatCommands(commands): - output = str(commands) - output = re.sub("&&", "&&\n", output) - output = re.sub(";", ";\n", output) - output = re.sub("\n+\s*", "\n", output) - return output - - -def GetBuildPackagesCommand(board): - return "./build_packages --nousepkg --withdev --withtest --withautotest " \ - "--board=%s" % board - - -def GetBuildImageCommand(board): - return "./build_image --withdev --board=%s" % board - - -def GetModImageForTestCommand(board): - return "./mod_image_for_test.sh --yes --board=%s" % board - - -def GetSetupBoardCommand(board, gcc_version=None, binutils_version=None, - usepkg=None, force=None): - options = [] - - if gcc_version: - options.append("--gcc_version=%s" % gcc_version) - - if binutils_version: - options.append("--binutils_version=%s" % binutils_version) - - if usepkg: - options.append("--usepkg") - else: - options.append("--nousepkg") - - if force: - options.append("--force") - - return "./setup_board --board=%s %s" % (board, " ".join(options)) - - -def Md5File(filename, block_size=2**10): - md5 = hashlib.md5() - - try: - with open(filename) as f: - while True: - data = f.read(block_size) - if not data: - break - md5.update(data) - except IOError as ex: - logger.GetLogger().LogFatal(ex) - - return md5.hexdigest() - - -def GetP4ClientSpec(client_name, p4_paths): - p4_string = "" - for p4_path in p4_paths: - if " " not in p4_path: - p4_string += p4_path - else: - [remote_path, local_path] = p4_path.split() - if local_path.endswith("/") and not remote_path.endswith("/"): - local_path = "%s%s" % (local_path, os.path.basename(remote_path)) - p4_string += " -a \"%s //%s/%s\"" % (remote_path, client_name, local_path) - - return p4_string - - -def GetP4SyncCommand(revision=None): - command = "g4 sync" - if revision: - command += " @%s" % revision - return command - - -def GetP4SetupCommand(client_name, port, mappings, - checkout_dir=None): - command = "export P4CONFIG=.p4config" - if checkout_dir: - command += "&& mkdir -p %s && cd %s" % (checkout_dir, checkout_dir) - command += "&& cp ${HOME}/.p4config ." - command += "&& chmod u+w .p4config" - command += "&& echo \"P4PORT=%s\" >> .p4config" % port - command += "&& echo \"P4CLIENT=%s\" >> .p4config" % client_name - command += "&& g4 client " + GetP4ClientSpec(client_name, mappings) - return command - - -def GetP4VersionCommand(client_name, checkout_dir): - command = "cd %s" % checkout_dir - command += "&& g4 changes -m1 ...#have | grep -o 'Change [0-9]\+' | cut -d' ' -f2" - return command - - -def GetP4DeleteCommand(client_name, checkout_dir=None): - command = "" - if checkout_dir: - command += "cd %s &&" % checkout_dir - command = "g4 client -d %s" % client_name - return command |