aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark.py5
-rw-r--r--crosperf/benchmark_run.py6
-rw-r--r--crosperf/experiment_factory.py24
-rw-r--r--crosperf/results_cache.py5
-rwxr-xr-xcrosperf/results_cache_unittest.py3
-rw-r--r--crosperf/settings_factory.py5
-rw-r--r--crosperf/suite_runner.py3
7 files changed, 35 insertions, 16 deletions
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index 506a825d..7fabf0b4 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -17,7 +17,7 @@ class Benchmark(object):
def __init__(self, name, test_name, test_args, iterations,
rm_chroot_tmp, perf_args, suite="",
- show_all_results=False, retries=0):
+ show_all_results=False, retries=0, run_local=False):
self.name = name
#For telemetry, this is the benchmark name.
self.test_name = test_name
@@ -32,3 +32,6 @@ class Benchmark(object):
self.retries = retries
if self.suite == "telemetry":
self.show_all_results = True
+ if run_local and self.suite != 'telemetry_Crosperf':
+ raise Exception("run_local is only supported by telemetry_Crosperf.")
+ self.run_local = run_local
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index 0632dde6..cee41dac 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -82,7 +82,8 @@ class BenchmarkRun(threading.Thread):
self.label,
self.share_cache,
self.benchmark.suite,
- self.benchmark.show_all_results
+ self.benchmark.show_all_results,
+ self.benchmark.run_local
)
self.result = self.cache.ReadResult()
@@ -247,7 +248,8 @@ class MockBenchmarkRun(BenchmarkRun):
self.label,
self.share_cache,
self.benchmark.suite,
- self.benchmark.show_all_results
+ self.benchmark.show_all_results,
+ self.benchmark.run_local
)
self.result = self.cache.ReadResult()
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 5b84aeaf..65d7ce8d 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -92,12 +92,12 @@ class ExperimentFactory(object):
def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries):
+ show_all_results, retries, run_local):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
telemetry_benchmark = Benchmark (test_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries)
+ suite, show_all_results, retries, run_local)
benchmarks.append(telemetry_benchmark)
@@ -163,40 +163,46 @@ class ExperimentFactory(object):
iterations = benchmark_settings.GetField("iterations")
suite = benchmark_settings.GetField("suite")
retries = benchmark_settings.GetField("retries")
+ run_local = benchmark_settings.GetField("run_local")
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries)
+ perf_args, suite, show_all_results, retries,
+ run_local)
elif test_name == 'all_pagecyclers':
self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries)
+ perf_args, suite, show_all_results, retries,
+ run_local)
elif test_name == 'all_toolchain_perf':
self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries)
+ perf_args, suite, show_all_results, retries,
+ run_local)
# Add non-telemetry toolchain-perf benchmarks:
benchmarks.append(Benchmark('graphics_WebGLAquarium',
'graphics_WebGLAquarium', '', iterations,
rm_chroot_tmp, perf_args, '',
- show_all_results, retries))
+ show_all_results, retries,
+ run_local=False))
elif test_name == 'all_toolchain_perf_old':
self._AppendBenchmarkSet (benchmarks,
telemetry_toolchain_old_perf_tests,
test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries)
+ perf_args, suite, show_all_results, retries,
+ run_local)
else:
benchmark = Benchmark(test_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries)
+ show_all_results, retries, run_local)
benchmarks.append(benchmark)
else:
# Add the single benchmark.
benchmark = Benchmark(benchmark_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results)
+ show_all_results, run_local)
benchmarks.append(benchmark)
# Construct labels.
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index de6e572a..232f13bc 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -471,7 +471,7 @@ class ResultsCache(object):
def Init(self, chromeos_image, chromeos_root, test_name, iteration,
test_args, profiler_args, machine_manager, board, cache_conditions,
logger_to_use, log_level, label, share_cache, suite,
- show_all_results):
+ show_all_results, run_local):
self.chromeos_image = chromeos_image
self.chromeos_root = chromeos_root
self.test_name = test_name
@@ -489,6 +489,7 @@ class ResultsCache(object):
self.suite = suite
self.log_level = log_level
self.show_all = show_all_results
+ self.run_local = run_local
def _GetCacheDirForRead(self):
matching_dirs = []
@@ -550,7 +551,7 @@ class ResultsCache(object):
machine_id_checksum = machine.machine_id_checksum
break
- temp_test_args = "%s %s" % (self.test_args, self.profiler_args)
+ temp_test_args = "%s %s %s" % (self.test_args, self.profiler_args, self.run_local)
test_args_checksum = hashlib.md5(
"".join(temp_test_args)).hexdigest()
return (image_path_checksum,
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 04e2975f..2ca62ad7 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -794,7 +794,8 @@ class ResultsCacheTest(unittest.TestCase):
self.mock_label,
'', # benchmark_run.share_cache
'telemetry_Crosperf',
- True) # benchmark_run.show_all_results
+ True, # benchmark_run.show_all_results
+ False) # benchmark_run.run_local
@mock.patch.object (image_checksummer.ImageChecksummer, 'Checksum')
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index ba21a647..bf15719a 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -31,6 +31,11 @@ class BenchmarkSettings(Settings):
self.AddField(IntegerField("retries", default=0,
description="Number of times to retry a "
"benchmark run."))
+ self.AddField(BooleanField("run_local",
+ description="Run benchmark harness locally. "
+ "Currently only compatible with the suite: "
+ "telemetry_Crosperf.",
+ required=False, default=False))
class LabelSettings(Settings):
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index a1332273..6a2a1297 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -184,12 +184,13 @@ class SuiteRunner(object):
test_args = test_args[1:-1]
args_string = "test_args='%s'" % test_args
- cmd = ('{} {} {} --board={} --args="{} test={} '
+ cmd = ('{} {} {} --board={} --args="{} run_local={} test={} '
'{}" {} telemetry_Crosperf'.format(TEST_THAT_PATH,
autotest_dir_arg,
fast_arg,
label.board,
args_string,
+ benchmark.run_local,
benchmark.test_name,
profiler_args,
machine))