aboutsummaryrefslogtreecommitdiff
path: root/crosperf/experiment_factory.py
diff options
context:
space:
mode:
authorZhizhou Yang <zhizhouy@google.com>2018-11-06 15:24:57 -0800
committerchrome-bot <chrome-bot@chromium.org>2018-11-09 11:05:09 -0800
commitaa8d023f42fa991587e291e067820b2bbd6031a3 (patch)
tree0850ae5fc4e2ad8437035a82390d844fbbe6c6b9 /crosperf/experiment_factory.py
parentd87bc2f4eb3564c4865c82775dcb82c7e7cc4e3d (diff)
downloadtoolchain-utils-aa8d023f42fa991587e291e067820b2bbd6031a3.tar.gz
Crosperf: Treat benchmarks with story- test_args as different benchmarks
The bug refers to https://crbug.com/902466. This patch modified the behavior of crosperf on determining duplicate benchmarks. When there are two blocks with same benchmark name in experiment file, we will append them to benchmark list first. After that, we will rename the benchmark if it has test_args: 'story-filter' or 'story-tag-filter'. The new benchmark name will be passed to experiment so that we will treat them as two different runs and get separate tables on the report. BUG=chromium:902466 TEST=Passed all up-to-date unittests, tested and verified with one experiment file containing following benchmark settings: benchmark: webrtc { iterations: 3 test_args: --story-filter=datachannel suite: telemetry_Crosperf } benchmark: webrtc { iterations: 3 test_args: --story-tag-filter=smoothness suite: telemetry_Crosperf } Change-Id: Id733273a5f9f43d149407055c9c0da3b761ddeef Reviewed-on: https://chromium-review.googlesource.com/1321415 Commit-Ready: Zhizhou Yang <zhizhouy@google.com> Tested-by: Zhizhou Yang <zhizhouy@google.com> Reviewed-by: Caroline Tice <cmtice@chromium.org>
Diffstat (limited to 'crosperf/experiment_factory.py')
-rw-r--r--crosperf/experiment_factory.py21
1 files changed, 19 insertions, 2 deletions
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 70b5068b..a6c163dc 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -156,12 +156,29 @@ class ExperimentFactory(object):
# inherited and/or merged with the global settings values.
benchmarks = []
all_benchmark_settings = experiment_file.GetSettings('benchmark')
+ # Check if there is duplicated benchmark name
+ benchmark_names = {}
for benchmark_settings in all_benchmark_settings:
benchmark_name = benchmark_settings.name
test_name = benchmark_settings.GetField('test_name')
if not test_name:
test_name = benchmark_name
test_args = benchmark_settings.GetField('test_args')
+
+ # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified
+ # in test_args.
+ for arg in test_args.split():
+ if '--story-filter' in arg or '--story-tag-filter' in arg:
+ # Rename benchmark name with an extension of 'story'-option
+ benchmark_name = '%s@@%s' % (benchmark_name, arg.split('=')[-1])
+ break
+
+ # Check for duplicated benchmark name after renaming
+ if not benchmark_name in benchmark_names:
+ benchmark_names[benchmark_name] = True
+ else:
+ raise SyntaxError("Duplicate benchmark name: '%s'." % benchmark_name)
+
iterations = benchmark_settings.GetField('iterations')
suite = benchmark_settings.GetField('suite')
retries = benchmark_settings.GetField('retries')
@@ -217,8 +234,8 @@ class ExperimentFactory(object):
iterations, rm_chroot_tmp, perf_args, suite,
show_all_results, retries, run_local)
else:
- benchmark = Benchmark(test_name, test_name, test_args, iterations,
- rm_chroot_tmp, perf_args, suite,
+ benchmark = Benchmark(benchmark_name, test_name, test_args,
+ iterations, rm_chroot_tmp, perf_args, suite,
show_all_results, retries, run_local)
benchmarks.append(benchmark)
else: