diff options
Diffstat (limited to 'crosperf')
-rw-r--r-- | crosperf/benchmark.py | 7 | ||||
-rwxr-xr-x | crosperf/benchmark_run_unittest.py | 3 | ||||
-rwxr-xr-x | crosperf/crosperf.py | 2 | ||||
-rw-r--r-- | crosperf/experiment_factory.py | 67 | ||||
-rwxr-xr-x | crosperf/experiment_file_unittest.py | 2 | ||||
-rw-r--r-- | crosperf/help.py | 79 | ||||
-rw-r--r-- | crosperf/image_checksummer.py | 7 | ||||
-rw-r--r-- | crosperf/label.py | 6 | ||||
-rw-r--r-- | crosperf/mock_instance.py | 8 | ||||
-rw-r--r-- | crosperf/results_cache.py | 16 | ||||
-rwxr-xr-x | crosperf/results_cache_unittest.py | 2 | ||||
-rw-r--r-- | crosperf/results_organizer.py | 12 | ||||
-rw-r--r-- | crosperf/settings_factory.py | 43 | ||||
-rw-r--r-- | crosperf/suite_runner.py | 46 | ||||
-rw-r--r-- | crosperf/translate_xbuddy.py | 2 |
15 files changed, 98 insertions, 204 deletions
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py index 2b12e60d..c8a26bbb 100644 --- a/crosperf/benchmark.py +++ b/crosperf/benchmark.py @@ -16,19 +16,16 @@ class Benchmark(object): """ def __init__(self, name, test_name, test_args, iterations, - outlier_range, key_results_only, rm_chroot_tmp, perf_args, - suite="pyauto", use_test_that=True, show_all_results=False): + rm_chroot_tmp, perf_args, suite="", + show_all_results=False): self.name = name #For telemetry, this is the benchmark name. self.test_name = test_name #For telemetry, this is the data. self.test_args = test_args self.iterations = iterations - self.outlier_range = outlier_range self.perf_args = perf_args - self.key_results_only = key_results_only self.rm_chroot_tmp = rm_chroot_tmp self.iteration_adjusted = False self.suite = suite - self.use_test_that = use_test_that self.show_all_results = show_all_results diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py index d493678d..0e7c92ff 100755 --- a/crosperf/benchmark_run_unittest.py +++ b/crosperf/benchmark_run_unittest.py @@ -23,7 +23,6 @@ class BenchmarkRunTest(unittest.TestCase): my_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run", "x86-alex", "chromeos2-row1-rack4-host9.cros", image_args="", - image_md5sum="", cache_dir="") logging_level="average" m = MockMachineManager("/tmp/chromeos_root", 0, logging_level) @@ -32,8 +31,6 @@ class BenchmarkRunTest(unittest.TestCase): "page_cycler.netsim.top_10", # test_name "", # test_args 1, # iteratins - 0.2, # outlier_range - False, # key_results_only False, # rm_chroot_tmp "", # perf_args suite="telemetry_Crosperf") # suite diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py index cb7911fd..88e510fd 100755 --- a/crosperf/crosperf.py +++ b/crosperf/crosperf.py @@ -62,7 +62,7 @@ def Main(argv): parser = optparse.OptionParser(usage=Help().GetUsage(), description=Help().GetHelp(), formatter=MyIndentedHelpFormatter(), - version="%prog 0.1") + version="%prog 3.0") parser.add_option("-l", "--log_dir", dest="log_dir", diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index e10bc261..6a3e995b 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -76,16 +76,13 @@ class ExperimentFactory(object): """ def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args, - iterations, outlier_range, key_results_only, - rm_chroot_tmp, perf_args, suite, use_test_that, + iterations, rm_chroot_tmp, perf_args, suite, show_all_results): """Add all the tests in a set to the benchmarks list.""" for test_name in benchmark_list: telemetry_benchmark = Benchmark (test_name, test_name, test_args, - iterations, outlier_range, - key_results_only, rm_chroot_tmp, - perf_args, suite, use_test_that, - show_all_results) + iterations, rm_chroot_tmp, perf_args, + suite, show_all_results) benchmarks.append(telemetry_benchmark) @@ -93,6 +90,7 @@ class ExperimentFactory(object): """Construct an experiment from an experiment file.""" global_settings = experiment_file.GetGlobalSettings() experiment_name = global_settings.GetField("name") + board = global_settings.GetField("board") remote = global_settings.GetField("remote") # This is used to remove the ",' from the remote if user # add them to the remote string. @@ -103,16 +101,17 @@ class ExperimentFactory(object): remote = new_remote chromeos_root = global_settings.GetField("chromeos_root") rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp") - key_results_only = global_settings.GetField("key_results_only") + perf_args = global_settings.GetField("perf_args") acquire_timeout= global_settings.GetField("acquire_timeout") cache_dir = global_settings.GetField("cache_dir") config.AddConfig("no_email", global_settings.GetField("no_email")) share_users = global_settings.GetField("share_users") results_dir = global_settings.GetField("results_dir") chrome_src = global_settings.GetField("chrome_src") - build = global_settings.GetField("build") - use_test_that = global_settings.GetField("use_test_that") show_all_results = global_settings.GetField("show_all_results") + log_level = global_settings.GetField("logging_level") + if log_level not in ("quiet", "average", "verbose"): + log_level = "verbose" # Default cache hit conditions. The image checksum in the cache and the # computed checksum of the image must match. Also a cache file must exist. cache_conditions = [CacheConditions.CACHE_FILE_EXISTS, @@ -136,49 +135,30 @@ class ExperimentFactory(object): test_name = benchmark_name test_args = benchmark_settings.GetField("test_args") iterations = benchmark_settings.GetField("iterations") - outlier_range = benchmark_settings.GetField("outlier_range") - perf_args = benchmark_settings.GetField("perf_args") - rm_chroot_tmp = benchmark_settings.GetField("rm_chroot_tmp") - key_results_only = benchmark_settings.GetField("key_results_only") suite = benchmark_settings.GetField("suite") - use_test_that = benchmark_settings.GetField("use_test_that") - show_all_results = benchmark_settings.GetField("show_all_results") - log_level = benchmark_settings.GetField("logging_level") - if log_level not in ("quiet", "average", "verbose"): - log_level = "verbose" if suite == 'telemetry_Crosperf': if test_name == 'all_perfv2': self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests, - test_args, iterations, outlier_range, - key_results_only, rm_chroot_tmp, - perf_args, suite, use_test_that, - show_all_results) + test_args, iterations, rm_chroot_tmp, + perf_args, suite, show_all_results) elif test_name == 'all_pagecyclers': self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests, - test_args, iterations, outlier_range, - key_results_only, rm_chroot_tmp, - perf_args, suite, use_test_that, - show_all_results) + test_args, iterations, rm_chroot_tmp, + perf_args, suite, show_all_results) elif test_name == 'all_toolchain_perf': self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests, - test_args, iterations, outlier_range, - key_results_only, rm_chroot_tmp, - perf_args, suite, use_test_that, - show_all_results) + test_args, iterations, rm_chroot_tmp, + perf_args, suite, show_all_results) else: benchmark = Benchmark(test_name, test_name, test_args, - iterations, outlier_range, - key_results_only, rm_chroot_tmp, - perf_args, suite, use_test_that, + iterations, rm_chroot_tmp, perf_args, suite, show_all_results) benchmarks.append(benchmark) else: # Add the single benchmark. benchmark = Benchmark(benchmark_name, test_name, test_args, - iterations, outlier_range, - key_results_only, rm_chroot_tmp, - perf_args, suite, use_test_that, + iterations, rm_chroot_tmp, perf_args, suite, show_all_results) benchmarks.append(benchmark) @@ -188,21 +168,19 @@ class ExperimentFactory(object): all_remote = list(remote) for label_settings in all_label_settings: label_name = label_settings.name - board = label_settings.GetField("board") image = label_settings.GetField("chromeos_image") chromeos_root = label_settings.GetField("chromeos_root") - if image == "": - build = label_settings.GetField("build") - image = label_settings.GetXbuddyPath (build, board, chromeos_root, - log_level) my_remote = label_settings.GetField("remote") new_remote = [] for i in my_remote: c = re.sub('["\']', '', i) new_remote.append(c) my_remote = new_remote + if image == "": + build = label_settings.GetField("build") + image = label_settings.GetXbuddyPath (build, board, chromeos_root, + log_level) - image_md5sum = label_settings.GetField("md5sum") cache_dir = label_settings.GetField("cache_dir") chrome_src = label_settings.GetField("chrome_src") @@ -220,14 +198,15 @@ class ExperimentFactory(object): image_args = label_settings.GetField("image_args") if test_flag.GetTestMode(): label = MockLabel(label_name, image, chromeos_root, board, my_remote, - image_args, image_md5sum, cache_dir, chrome_src) + image_args, cache_dir, chrome_src) else: label = Label(label_name, image, chromeos_root, board, my_remote, - image_args, image_md5sum, cache_dir, chrome_src) + image_args, cache_dir, chrome_src) labels.append(label) email = global_settings.GetField("email") all_remote = list(set(all_remote)) + all_remote = list(set(my_remote)) experiment = Experiment(experiment_name, all_remote, working_directory, chromeos_root, cache_conditions, labels, benchmarks, diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py index d08c7eb5..335a516b 100755 --- a/crosperf/experiment_file_unittest.py +++ b/crosperf/experiment_file_unittest.py @@ -21,7 +21,6 @@ EXPERIMENT_FILE_1 = """ } image2 { - board: lumpy remote: chromeos-lumpy1 chromeos_image: /usr/local/google/cros_image2.bin } @@ -82,7 +81,6 @@ class ExperimentFileTest(unittest.TestCase): label_settings = experiment_file.GetSettings("label") self.assertEqual(len(label_settings), 2) self.assertEqual(label_settings[0].name, "image1") - self.assertEqual(label_settings[0].GetField("board"), "x86-alex") self.assertEqual(label_settings[0].GetField("chromeos_image"), "/usr/local/google/cros_image1.bin") self.assertEqual(label_settings[1].GetField("remote"), ["chromeos-lumpy1"]) diff --git a/crosperf/help.py b/crosperf/help.py index cf74d93e..09a6c66c 100644 --- a/crosperf/help.py +++ b/crosperf/help.py @@ -11,7 +11,7 @@ from settings_factory import LabelSettings class Help(object): def GetUsage(self): - return """%s [OPTIONS] [ACTION] EXPERIMENT_FILE""" % (sys.argv[0]) + return """%s [OPTIONS] EXPERIMENT_FILE""" % (sys.argv[0]) def _WrapLine(self, line): return "\n".join(textwrap.wrap(line, 80)) @@ -34,21 +34,23 @@ class Help(object): benchmark_fields = self._GetFieldDescriptions(BenchmarkSettings("").fields) label_fields = self._GetFieldDescriptions(LabelSettings("").fields) - return """%s is a script for running performance experiments on ChromeOS. It -allows one to run ChromeOS Autotest benchmarks over several images and compare -the results to determine whether there is a performance difference. + return """%s is a script for running performance experiments on +ChromeOS. It allows one to run ChromeOS Autotest benchmarks over +several images and compare the results to determine whether there +is a performance difference. Comparing several images using %s is referred to as running an -"experiment". An "experiment file" is a configuration file which holds all the -information that describes the experiment and how it should be run. An example -of a simple experiment file is below: +"experiment". An "experiment file" is a configuration file which holds +all the information that describes the experiment and how it should be +run. An example of a simple experiment file is below: --------------------------------- test.exp --------------------------------- name: my_experiment board: x86-alex -remote: chromeos-alex5 172.18.122.132 +remote: chromeos2-row1-rack4-host7.cros 172.18.122.132 -benchmark: PageCycler { +benchmark: page_cycler.morejs { + suite: telemetry_Crosperf iterations: 3 } @@ -61,20 +63,26 @@ my_second_image { } ---------------------------------------------------------------------------- -This experiment file names the experiment "my_experiment". It will be run -on the board x86-alex. Benchmarks will be run using two remote devices, -one is a device specified by a hostname and the other is a device specified -by it's IP address. Benchmarks will be run in parallel across these devices. -There is currently no way to specify which benchmark will run on each device. - -We define one "benchmark" that will be run, PageCycler. This benchmark has one -"field" which specifies how many iterations it will run for. - -We specify 2 "labels" or images which will be compared. The PageCycler benchmark -will be run on each of these images 3 times and a result table will be output -which compares the two. - -The full list of fields that can be specified are as follows: +This experiment file names the experiment "my_experiment". It will be +run on the board x86-alex. Benchmarks will be run using two remote +devices, one is a device specified by a hostname and the other is a +device specified by it's IP address. Benchmarks will be run in +parallel across these devices. There is currently no way to specify +which benchmark will run on each device. + +We define one "benchmark" that will be run, page_cycler.morejs. This +benchmark has two "fields", one which specifies that this benchmark is +part of the telemetry_Crosperf suite (this is the common way to run +most Telemetry benchmarks), and the other which specifies how many +iterations it will run for. + +We specify one or more "labels" or images which will be compared. The +page_cycler.morejs benchmark will be run on each of these images 3 +times and a result table will be output which compares them for all +the images specified. + +The full list of fields that can be specified in the experiment file +are as follows: ================= Global Fields ================= @@ -88,19 +96,16 @@ Label Fields ================= %s -Note that global fields are overidden by label or benchmark fields, if they can -be specified in both places. Fields that are specified as arguments override -fields specified in experiment files. - -%s is invoked by passing it a path to an experiment file, as well as an action -to execute on that experiment file. The possible actions to use are: - -run\t\tRun the experiment and cache the results. - -table\t\tDisplay cached results of an experiment, without running anything. - -email\t\tEmail a summary of the results to the user. - -do\t\tThe default action. Executes the following actions: run, table, email. +Note that global fields are overidden by label or benchmark fields, if +they can be specified in both places. Fields that are specified as +arguments override fields specified in experiment files. + +%s is invoked by passing it a path to an experiment file, +as well as any options (in addition to those specified in the +experiment file). Crosperf runs the experiment and caches the results +(or reads the previously cached experiment results out of the cache), +generates and displays a report based on the run, and emails the +report to the user. If the results were all read out of the cache, +then by default no email is generated. """ % (sys.argv[0], sys.argv[0], global_fields, benchmark_fields, label_fields, sys.argv[0]) diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py index 2f6694d9..6a727083 100644 --- a/crosperf/image_checksummer.py +++ b/crosperf/image_checksummer.py @@ -32,12 +32,7 @@ class ImageChecksummer(object): logger.GetLogger().LogOutput("Computed checksum is " ": %s" % self._checksum) if not self._checksum: - if self.label.image_md5sum: - self._checksum = self.label.image_md5sum - logger.GetLogger().LogOutput("Checksum in experiment file is " - ": %s" % self._checksum) - else: - raise Exception("Checksum computing error.") + raise Exception("Checksum computing error.") logger.GetLogger().LogOutput("Checksum is: %s" % self._checksum) return self._checksum diff --git a/crosperf/label.py b/crosperf/label.py index c212125b..52f98cef 100644 --- a/crosperf/label.py +++ b/crosperf/label.py @@ -13,7 +13,7 @@ from utils import misc class Label(object): def __init__(self, name, chromeos_image, chromeos_root, board, remote, - image_args, image_md5sum, cache_dir, chrome_src=None): + image_args, cache_dir, chrome_src=None): self.image_type = self._GetImageType(chromeos_image) @@ -27,7 +27,6 @@ class Label(object): self.board = board self.remote = remote self.image_args = image_args - self.image_md5sum = image_md5sum self.cache_dir = cache_dir if not chromeos_root: @@ -69,7 +68,7 @@ class Label(object): class MockLabel(object): def __init__(self, name, chromeos_image, chromeos_root, board, remote, - image_args, image_md5sum, cache_dir, chrome_src=None): + image_args, cache_dir, chrome_src=None): self.name = name self.chromeos_image = chromeos_image self.board = board @@ -80,5 +79,4 @@ class MockLabel(object): else: self.chromeos_root = chromeos_root self.image_args = image_args - self.image_md5sum = image_md5sum self.chrome_src = chrome_src diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py index b86d8dee..74fc21b3 100644 --- a/crosperf/mock_instance.py +++ b/crosperf/mock_instance.py @@ -12,23 +12,22 @@ from label import MockLabel from machine_manager import MockMachineManager from results_cache import MockResultsCache +perf_args = "record -a -e cycles" label1 = MockLabel("test1", "image1", "/tmp/test_benchmark_run", "x86-alex", "chromeos-alex1", image_args="", - image_md5sum="", cache_dir="") label2 = MockLabel("test2", "image2", "/tmp/test_benchmark_run_2", "x86-alex", "chromeos-alex2", image_args="", - image_md5sum="", cache_dir="") benchmark1 = Benchmark("benchmark1", "autotest_name_1", - "autotest_args", 2, "", "perf_args", "", "") + "autotest_args", 2, "", perf_args, "", "") benchmark2 = Benchmark("benchmark2", "autotest_name_2", - "autotest_args", 2, "", "perf_args", "", "") + "autotest_args", 2, "", perf_args, "", "") keyval = {} @@ -113,4 +112,3 @@ keyval[8] = {'': 'PASS', 'test{1}': '2', 'test{2}': '8', 'bool': 'TRUE'} - diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py index 43f0d1e8..91d2b437 100644 --- a/crosperf/results_cache.py +++ b/crosperf/results_cache.py @@ -304,7 +304,11 @@ class Result(object): def CleanUp(self, rm_chroot_tmp): if rm_chroot_tmp and self.results_dir: - command = "rm -rf %s" % self.results_dir + dirname, basename = misc.GetRoot(self.results_dir) + if basename.find("test_that_results_") != -1: + command = "rm -rf %s" % self.results_dir + else: + command = "rm -rf %s" % dirname self._ce.RunCommand(command) if self._temp_dir: command = "rm -rf %s" % self._temp_dir @@ -352,9 +356,9 @@ class Result(object): @classmethod def CreateFromRun(cls, logger, log_level, label, out, err, retval, show_all, - test, suite="pyauto"): + test, suite="telemetry_Crosperf"): if suite == "telemetry": - result = TelemetryResult(logger, label) + result = TelemetryResult(logger, label, log_level) else: result = cls(logger, label, log_level) result._PopulateFromRun(out, err, retval, show_all, test, suite) @@ -362,7 +366,7 @@ class Result(object): @classmethod def CreateFromCacheHit(cls, logger, log_level, label, cache_dir, - show_all, test, suite="pyauto"): + show_all, test, suite="telemetry_Crosperf"): if suite == "telemetry": result = TelemetryResult(logger, label) else: @@ -378,8 +382,8 @@ class Result(object): class TelemetryResult(Result): - def __init__(self, logger, label): - super(TelemetryResult, self).__init__(logger, label) + def __init__(self, logger, label, log_level): + super(TelemetryResult, self).__init__(logger, label, log_level) def _PopulateFromRun(self, out, err, retval, show_all, test, suite): self.out = out diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py index 5f7fbc97..099aed81 100755 --- a/crosperf/results_cache_unittest.py +++ b/crosperf/results_cache_unittest.py @@ -126,7 +126,7 @@ class MockResult(Result): class ResultTest(unittest.TestCase): mock_label = MockLabel("mock_label", "chromeos_image", "/tmp", "lumpy", - "remote", "image_args", "image_md5sum", "cache_dir") + "remote", "image_args", "cache_dir") def testCreateFromRun(self): result = MockResult.CreateFromRun(logger.GetLogger(), "average", self.mock_label, diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py index a771922f..0460b6e9 100644 --- a/crosperf/results_organizer.py +++ b/crosperf/results_organizer.py @@ -28,11 +28,6 @@ class ResultOrganizer(object): [ ]}. """ - key_filter = ["milliseconds_", - "retval", - "iterations", - "ms_", - "score_"] def __init__(self, benchmark_runs, labels, benchmarks=None): self.result = {} @@ -59,9 +54,6 @@ class ResultOrganizer(object): if not benchmark_run.result: continue benchmark = benchmark_run.benchmark - key_filter_on = (benchmark.key_results_only and - "PyAutoPerfTest" in benchmark.name + benchmark.test_name - and "perf." not in benchmark.test_args) if not show_all_results: summary_list = self._GetSummaryResults(benchmark.test_name) if len(summary_list) > 0: @@ -70,10 +62,6 @@ class ResultOrganizer(object): # Did not find test_name in json file; therefore show everything. show_all_results = True for test_key in benchmark_run.result.keyvals: - if (key_filter_on and - not any([key for key in self.key_filter if key in test_key]) - ): - continue if not show_all_results and not test_key in summary_list: continue result_value = benchmark_run.result.keyvals[test_key] diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py index 0584fbe2..59dd14a9 100644 --- a/crosperf/settings_factory.py +++ b/crosperf/settings_factory.py @@ -26,32 +26,8 @@ class BenchmarkSettings(Settings): self.AddField(IntegerField("iterations", default=1, description="Number of iterations to run the " "test.")) - self.AddField(FloatField("outlier_range", default=0.2, - description="The percentage of highest/lowest " - "values to omit when computing the average.")) - self.AddField(BooleanField("rm_chroot_tmp", default=False, - description="Whether remove the run_remote_test" - "result in the chroot")) - self.AddField(BooleanField("key_results_only", default=True, - description="Whether only show the key results" - "of pyautoperf")) - self.AddField(TextField("perf_args", default="", - description="The optional profile command. It " - "enables perf commands to record perforamance " - "related counters. It must start with perf " - "command record or stat followed by arguments.")) - self.AddField(TextField("suite", default="pyauto", + self.AddField(TextField("suite", default="", description="The type of the benchmark")) - self.AddField(TextField("logging_level", default="average", - description="The level of logging desired. " - "Options are 'quiet', 'average', and 'verbose'.")) - self.AddField(BooleanField("use_test_that", default=True, - description="Whether to use the" - " new test_that script for running the test.")) - self.AddField(BooleanField("show_all_results", default=False, - description="When running Telemetry tests, " - "whether to all the results, instead of just " - "the default (summary) results.")) class LabelSettings(Settings): @@ -66,10 +42,6 @@ class LabelSettings(Settings): "contains a src/scripts directory. Defaults to " "the chromeos checkout which contains the " "chromeos_image.")) - self.AddField(TextField("md5sum", default="", - description="The md5sum of this image")) - self.AddField(TextField("board", required=True, description="The target " - "board for running experiments on, e.g. x86-alex.")) self.AddField(ListField("remote", description= "A comma-separated list of ip's of chromeos" "devices to run experiments on.")) @@ -133,9 +105,6 @@ class GlobalSettings(Settings): self.AddField(TextField("logging_level", default="average", description="The level of logging desired. " "Options are 'quiet', 'average', and 'verbose'.")) - self.AddField(BooleanField("key_results_only", default=True, - description="Whether only show the key results" - "of pyautoperf")) self.AddField(IntegerField("acquire_timeout", default=0, description="Number of seconds to wait for " "machine before exit if all the machines in " @@ -151,9 +120,6 @@ class GlobalSettings(Settings): self.AddField(BooleanField("no_email", default=False, description="Whether to disable the email to " "user after crosperf finishes.")) - self.AddField(BooleanField("use_test_that", default=True, - description="Whether to use the " - "new test_that script for running the test.")) self.AddField(BooleanField("show_all_results", default=False, description="When running Telemetry tests, " "whether to all the results, instead of just " @@ -168,13 +134,6 @@ class GlobalSettings(Settings): "This is used to run telemetry benchmarks. " "The default one is the src inside chroot.", required=False, default="")) - self.AddField(TextField("build", - description="The xbuddy specification for an " - "official or trybot image to use for tests. " - "'/remote' is assumed, and the board is given " - "elsewhere, so omit the '/remote/<board>/' xbuddy" - "prefix.", - required=False, default="")) class SettingsFactory(object): diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py index 00e6e51e..8e2847ef 100644 --- a/crosperf/suite_runner.py +++ b/crosperf/suite_runner.py @@ -57,12 +57,9 @@ class SuiteRunner(object): elif benchmark.suite == "telemetry_Crosperf": return self.Telemetry_Crosperf_Run(machine, label, benchmark, test_args, profiler_args) - elif benchmark.use_test_that: + else: return self.Test_That_Run(machine, label, benchmark, test_args, profiler_args) - else: - return self.Pyauto_Run(machine, label, benchmark, test_args, - profiler_args) def GetHighestStaticFrequency(self, machine_name, chromeos_root): """ Gets the highest static frequency for the specified machine @@ -121,33 +118,6 @@ class SuiteRunner(object): # Whenever we reboot the machine, we need to restore the governor settings. self.PinGovernorExecutionFrequencies(machine_name, chromeos_root) - def Pyauto_Run(self, machine, label, benchmark, test_args, profiler_args): - """Run the run_remote_test.""" - options = "" - if label.board: - options += " --board=%s" % label.board - if test_args: - options += " %s" % test_args - if profiler_args: - options += " %s" % profiler_args - command = "rm -rf /usr/local/autotest/results/*" - self._ce.CrosRunCommand(command, machine=machine, username="root", - chromeos_root=label.chromeos_root) - - # We do this because PyAuto tests leave the machine in weird states. - # Rebooting between iterations has proven to help with this. - self.RebootMachine(machine, label.chromeos_root) - - command = ("./run_remote_tests.sh --use_emerged --remote=%s %s %s" % - (machine, options, benchmark.test_name)) - if self.log_level != "verbose": - self._logger.LogOutput("Running test.") - self._logger.LogOutput("CMD: %s" % command) - return self._ce.ChrootRunCommand(label.chromeos_root, - command, - True, - self._ct) - def Test_That_Run(self, machine, label, benchmark, test_args, profiler_args): """Run the test_that test..""" options = "" @@ -161,7 +131,7 @@ class SuiteRunner(object): self._ce.CrosRunCommand(command, machine=machine, username="root", chromeos_root=label.chromeos_root) - # We do this because PyAuto tests leave the machine in weird states. + # We do this because some tests leave the machine in weird states. # Rebooting between iterations has proven to help with this. self.RebootMachine(machine, label.chromeos_root) @@ -219,19 +189,25 @@ class SuiteRunner(object): def Telemetry_Run(self, machine, label, benchmark): + telemetry_run_path = "" if not os.path.isdir(label.chrome_src): self._logger.LogFatal("Cannot find chrome src dir to" - " run telemetry.") + " run telemetry.") + else: + telemetry_run_path = os.path.join(label.chrome_src, "src/tools/perf") + if not os.path.exists(telemetry_run_path): + self._logger.LogFatal("Cannot find %s directory." % telemetry_run_path) + rsa_key = os.path.join(label.chromeos_root, "src/scripts/mod_for_test_scripts/ssh_keys/testing_rsa") cmd = ("cd {0} && " - "./tools/perf/run_measurement " + "./run_measurement " "--browser=cros-chrome " "--output-format=csv " "--remote={1} " "--identity {2} " - "{3} {4}".format(label.chrome_src, machine, + "{3} {4}".format(telemetry_run_path, machine, rsa_key, benchmark.test_name, benchmark.test_args)) diff --git a/crosperf/translate_xbuddy.py b/crosperf/translate_xbuddy.py index 8a559270..92cf0230 100644 --- a/crosperf/translate_xbuddy.py +++ b/crosperf/translate_xbuddy.py @@ -22,7 +22,7 @@ def Main(xbuddy_string): os.symlink (config_path, './xbuddy_config.ini') x = xbuddy.XBuddy(manage_builds=False, static_dir='/tmp/devserver/static') build_id = x.Translate(os.path.split(xbuddy_string)) - return build_id + return build_id if __name__ == "__main__": build_id = Main(sys.argv[1]) |