aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorCaroline Tice <cmtice@google.com>2019-08-06 16:21:18 -0700
committerCaroline Tice <cmtice@chromium.org>2019-08-07 21:50:07 +0000
commite39d7700abdb5aa21814e54104af857eebf64b88 (patch)
treea3d793581e4d8ba26d5c2181bf4573b9fede1fae /crosperf
parente5a86824924123af66cd5520f0cfba69156bd38a (diff)
downloadtoolchain-utils-e39d7700abdb5aa21814e54104af857eebf64b88.tar.gz
toolchain-utils: Fix argument-passing error in experiment_factory.py
Fix all calls to Benchmark inside experiment_factory.py to pass all args, to prevent an arg with a default value accidentally being passed a value for a different arg. BUG=chromium:991396 TEST=Tested locally with Crosperf and it worked properly. Change-Id: Ia8096e2989e179723e122090ea40a11747070b22 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1740646 Reviewed-by: Zhizhou Yang <zhizhouy@google.com> Tested-by: Caroline Tice <cmtice@chromium.org> Legacy-Commit-Queue: Commit Bot <commit-bot@chromium.org> Commit-Queue: ChromeOS CL Exonerator Bot <chromiumos-cl-exonerator@appspot.gserviceaccount.com>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/experiment_factory.py110
-rwxr-xr-xcrosperf/experiment_factory_unittest.py14
2 files changed, 91 insertions, 33 deletions
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 6e75644b..7bc6e9fe 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -104,12 +104,14 @@ class ExperimentFactory(object):
def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local, turbostat):
+ show_all_results, retries, run_local, cwp_dso, weight,
+ turbostat):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(
- test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries, run_local, turbostat)
+ telemetry_benchmark = Benchmark(test_name, test_name, test_args,
+ iterations, rm_chroot_tmp, perf_args,
+ suite, show_all_results, retries,
+ run_local, cwp_dso, weight, turbostat)
benchmarks.append(telemetry_benchmark)
def GetExperiment(self, experiment_file, working_directory, log_dir):
@@ -240,21 +242,50 @@ class ExperimentFactory(object):
if suite == 'telemetry_Crosperf':
if test_name == 'all_perfv2':
- self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local,
- turbostat=turbostat_opt)
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_perfv2_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ turbostat=turbostat_opt)
elif test_name == 'all_pagecyclers':
- self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local, turbostat=turbostat_opt)
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_pagecycler_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ turbostat=turbostat_opt)
elif test_name == 'all_crosbolt_perf':
- self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, 'telemetry_Crosperf',
- show_all_results, retries, run_local,
- turbostat=turbostat_opt)
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_crosbolt_perf_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ 'telemetry_Crosperf',
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ turbostat=turbostat_opt)
self.AppendBenchmarkSet(
benchmarks,
crosbolt_perf_tests,
@@ -266,12 +297,24 @@ class ExperimentFactory(object):
show_all_results,
retries,
run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight,
turbostat=turbostat_opt)
elif test_name == 'all_toolchain_perf':
- self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
- test_args, iterations, rm_chroot_tmp,
- perf_args, suite, show_all_results, retries,
- run_local, turbostat=turbostat_opt)
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_toolchain_perf_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ turbostat=turbostat_opt)
# Add non-telemetry toolchain-perf benchmarks:
benchmarks.append(
Benchmark(
@@ -285,13 +328,24 @@ class ExperimentFactory(object):
show_all_results,
retries,
run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight,
turbostat=turbostat_opt))
elif test_name == 'all_toolchain_perf_old':
- self.AppendBenchmarkSet(benchmarks,
- telemetry_toolchain_old_perf_tests, test_args,
- iterations, rm_chroot_tmp, perf_args, suite,
- show_all_results, retries, run_local,
- turbostat=turbostat_opt)
+ self.AppendBenchmarkSet(
+ benchmarks,
+ telemetry_toolchain_old_perf_tests,
+ test_args,
+ iterations,
+ rm_chroot_tmp,
+ perf_args,
+ suite,
+ show_all_results,
+ retries,
+ run_local,
+ cwp_dso,
+ weight,
+ turbostat=turbostat_opt)
else:
benchmark = Benchmark(benchmark_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
@@ -311,6 +365,8 @@ class ExperimentFactory(object):
show_all_results,
retries,
run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight,
turbostat=turbostat_opt)
else:
# Add the single benchmark.
@@ -325,6 +381,8 @@ class ExperimentFactory(object):
show_all_results,
retries,
run_local=False,
+ cwp_dso=cwp_dso,
+ weight=weight,
turbostat=turbostat_opt)
benchmarks.append(benchmark)
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index d701d47b..1d6b1ea7 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -239,23 +239,23 @@ class ExperimentFactoryTest(unittest.TestCase):
bench_list = []
ef.AppendBenchmarkSet(bench_list, experiment_factory.telemetry_perfv2_tests,
'', 1, False, '', 'telemetry_Crosperf', False, 0,
- False, False)
+ False, '', 0, False)
self.assertEqual(
len(bench_list), len(experiment_factory.telemetry_perfv2_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
bench_list = []
- ef.AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_pagecycler_tests, '', 1,
- False, '', 'telemetry_Crosperf', False, 0, False, False)
+ ef.AppendBenchmarkSet(
+ bench_list, experiment_factory.telemetry_pagecycler_tests, '', 1, False,
+ '', 'telemetry_Crosperf', False, 0, False, '', 0, False)
self.assertEqual(
len(bench_list), len(experiment_factory.telemetry_pagecycler_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))
bench_list = []
- ef.AppendBenchmarkSet(bench_list,
- experiment_factory.telemetry_toolchain_perf_tests, '',
- 1, False, '', 'telemetry_Crosperf', False, 0, False, False)
+ ef.AppendBenchmarkSet(
+ bench_list, experiment_factory.telemetry_toolchain_perf_tests, '', 1,
+ False, '', 'telemetry_Crosperf', False, 0, False, '', 0, False)
self.assertEqual(
len(bench_list), len(experiment_factory.telemetry_toolchain_perf_tests))
self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark))