diff options
Diffstat (limited to 'crosperf/benchmark_run.py')
-rw-r--r-- | crosperf/benchmark_run.py | 31 |
1 files changed, 15 insertions, 16 deletions
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py index c20b24e0..7579b6c2 100644 --- a/crosperf/benchmark_run.py +++ b/crosperf/benchmark_run.py @@ -8,12 +8,12 @@ import re import threading import time import traceback -from results_cache import Result -from utils import logger -from utils import command_executer + from autotest_runner import AutotestRunner +from results_cache import Result from results_cache import ResultsCache - +from utils import command_executer +from utils import logger STATUS_FAILED = "FAILED" STATUS_SUCCEEDED = "SUCCEEDED" @@ -26,7 +26,7 @@ STATUS_PENDING = "PENDING" class BenchmarkRun(threading.Thread): def __init__(self, name, benchmark_name, autotest_name, autotest_args, label_name, chromeos_root, chromeos_image, board, iteration, - cache_conditions, outlier_range, profile_counters, profile_type, + cache_conditions, outlier_range, perf_args, machine_manager, logger_to_use): threading.Thread.__init__(self) @@ -45,8 +45,7 @@ class BenchmarkRun(threading.Thread): self.status = STATUS_PENDING self.run_completed = False self.outlier_range = outlier_range - self.profile_counters = profile_counters - self.profile_type = profile_type + self.perf_args = perf_args self.machine_manager = machine_manager self.cache = ResultsCache() self.autotest_runner = AutotestRunner(self._logger) @@ -68,10 +67,11 @@ class BenchmarkRun(threading.Thread): self.autotest_name, self.iteration, self.autotest_args, - self.machine_manager.GetMachines()[0].name, + self.machine_manager, self.board, self.cache_conditions, - self._logger) + self._logger, + ) self.result = self.cache.ReadResult() self.cache_hit = (self.result is not None) @@ -134,13 +134,12 @@ class BenchmarkRun(threading.Thread): return machine def _GetExtraAutotestArgs(self): - if self.profile_type: - if self.profile_type == "record": - perf_args = "record -a -e %s" % ",".join(self.profile_counters) - elif self.profile_type == "stat": - perf_args = "stat -a" - else: - raise Exception("profile_type must be either record or stat") + if self.perf_args: + perf_args_list = self.perf_args.split(" ") + perf_args_list = [perf_args_list[0]] + ["-a"] + perf_args_list[1:] + perf_args = " ".join(perf_args_list) + if not perf_args_list[0] in ["record", "stat"]: + raise Exception("perf_args must start with either record or stat") extra_autotest_args = ["--profiler=custom_perf", ("--profiler_args='perf_options=\"%s\"'" % perf_args)] |