diff options
author | Caroline Tice <cmtice@google.com> | 2019-08-06 16:21:18 -0700 |
---|---|---|
committer | Caroline Tice <cmtice@chromium.org> | 2019-08-07 21:50:07 +0000 |
commit | e39d7700abdb5aa21814e54104af857eebf64b88 (patch) | |
tree | a3d793581e4d8ba26d5c2181bf4573b9fede1fae /crosperf/experiment_factory.py | |
parent | e5a86824924123af66cd5520f0cfba69156bd38a (diff) | |
download | toolchain-utils-e39d7700abdb5aa21814e54104af857eebf64b88.tar.gz |
toolchain-utils: Fix argument-passing error in experiment_factory.py
Fix all calls to Benchmark inside experiment_factory.py to pass all
args, to prevent an arg with a default value accidentally being passed
a value for a different arg.
BUG=chromium:991396
TEST=Tested locally with Crosperf and it worked properly.
Change-Id: Ia8096e2989e179723e122090ea40a11747070b22
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1740646
Reviewed-by: Zhizhou Yang <zhizhouy@google.com>
Tested-by: Caroline Tice <cmtice@chromium.org>
Legacy-Commit-Queue: Commit Bot <commit-bot@chromium.org>
Commit-Queue: ChromeOS CL Exonerator Bot <chromiumos-cl-exonerator@appspot.gserviceaccount.com>
Diffstat (limited to 'crosperf/experiment_factory.py')
-rw-r--r-- | crosperf/experiment_factory.py | 110 |
1 files changed, 84 insertions, 26 deletions
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index 6e75644b..7bc6e9fe 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -104,12 +104,14 @@ class ExperimentFactory(object): def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args, iterations, rm_chroot_tmp, perf_args, suite, - show_all_results, retries, run_local, turbostat): + show_all_results, retries, run_local, cwp_dso, weight, + turbostat): """Add all the tests in a set to the benchmarks list.""" for test_name in benchmark_list: - telemetry_benchmark = Benchmark( - test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args, - suite, show_all_results, retries, run_local, turbostat) + telemetry_benchmark = Benchmark(test_name, test_name, test_args, + iterations, rm_chroot_tmp, perf_args, + suite, show_all_results, retries, + run_local, cwp_dso, weight, turbostat) benchmarks.append(telemetry_benchmark) def GetExperiment(self, experiment_file, working_directory, log_dir): @@ -240,21 +242,50 @@ class ExperimentFactory(object): if suite == 'telemetry_Crosperf': if test_name == 'all_perfv2': - self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args, - iterations, rm_chroot_tmp, perf_args, suite, - show_all_results, retries, run_local, - turbostat=turbostat_opt) + self.AppendBenchmarkSet( + benchmarks, + telemetry_perfv2_tests, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + turbostat=turbostat_opt) elif test_name == 'all_pagecyclers': - self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests, - test_args, iterations, rm_chroot_tmp, - perf_args, suite, show_all_results, retries, - run_local, turbostat=turbostat_opt) + self.AppendBenchmarkSet( + benchmarks, + telemetry_pagecycler_tests, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + turbostat=turbostat_opt) elif test_name == 'all_crosbolt_perf': - self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests, - test_args, iterations, rm_chroot_tmp, - perf_args, 'telemetry_Crosperf', - show_all_results, retries, run_local, - turbostat=turbostat_opt) + self.AppendBenchmarkSet( + benchmarks, + telemetry_crosbolt_perf_tests, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + 'telemetry_Crosperf', + show_all_results, + retries, + run_local, + cwp_dso, + weight, + turbostat=turbostat_opt) self.AppendBenchmarkSet( benchmarks, crosbolt_perf_tests, @@ -266,12 +297,24 @@ class ExperimentFactory(object): show_all_results, retries, run_local=False, + cwp_dso=cwp_dso, + weight=weight, turbostat=turbostat_opt) elif test_name == 'all_toolchain_perf': - self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests, - test_args, iterations, rm_chroot_tmp, - perf_args, suite, show_all_results, retries, - run_local, turbostat=turbostat_opt) + self.AppendBenchmarkSet( + benchmarks, + telemetry_toolchain_perf_tests, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + turbostat=turbostat_opt) # Add non-telemetry toolchain-perf benchmarks: benchmarks.append( Benchmark( @@ -285,13 +328,24 @@ class ExperimentFactory(object): show_all_results, retries, run_local=False, + cwp_dso=cwp_dso, + weight=weight, turbostat=turbostat_opt)) elif test_name == 'all_toolchain_perf_old': - self.AppendBenchmarkSet(benchmarks, - telemetry_toolchain_old_perf_tests, test_args, - iterations, rm_chroot_tmp, perf_args, suite, - show_all_results, retries, run_local, - turbostat=turbostat_opt) + self.AppendBenchmarkSet( + benchmarks, + telemetry_toolchain_old_perf_tests, + test_args, + iterations, + rm_chroot_tmp, + perf_args, + suite, + show_all_results, + retries, + run_local, + cwp_dso, + weight, + turbostat=turbostat_opt) else: benchmark = Benchmark(benchmark_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args, suite, @@ -311,6 +365,8 @@ class ExperimentFactory(object): show_all_results, retries, run_local=False, + cwp_dso=cwp_dso, + weight=weight, turbostat=turbostat_opt) else: # Add the single benchmark. @@ -325,6 +381,8 @@ class ExperimentFactory(object): show_all_results, retries, run_local=False, + cwp_dso=cwp_dso, + weight=weight, turbostat=turbostat_opt) benchmarks.append(benchmark) |