diff options
author | Zhizhou Yang <zhizhouy@google.com> | 2018-11-06 15:24:57 -0800 |
---|---|---|
committer | chrome-bot <chrome-bot@chromium.org> | 2018-11-09 11:05:09 -0800 |
commit | aa8d023f42fa991587e291e067820b2bbd6031a3 (patch) | |
tree | 0850ae5fc4e2ad8437035a82390d844fbbe6c6b9 /crosperf | |
parent | d87bc2f4eb3564c4865c82775dcb82c7e7cc4e3d (diff) | |
download | toolchain-utils-aa8d023f42fa991587e291e067820b2bbd6031a3.tar.gz |
Crosperf: Treat benchmarks with story- test_args as different benchmarks
The bug refers to https://crbug.com/902466.
This patch modified the behavior of crosperf on determining duplicate
benchmarks. When there are two blocks with same benchmark name in
experiment file, we will append them to benchmark list first. After
that, we will rename the benchmark if it has test_args: 'story-filter'
or 'story-tag-filter'. The new benchmark name will be passed to
experiment so that we will treat them as two different runs and get
separate tables on the report.
BUG=chromium:902466
TEST=Passed all up-to-date unittests, tested and verified with one
experiment file containing following benchmark settings:
benchmark: webrtc {
iterations: 3
test_args: --story-filter=datachannel
suite: telemetry_Crosperf
}
benchmark: webrtc {
iterations: 3
test_args: --story-tag-filter=smoothness
suite: telemetry_Crosperf
}
Change-Id: Id733273a5f9f43d149407055c9c0da3b761ddeef
Reviewed-on: https://chromium-review.googlesource.com/1321415
Commit-Ready: Zhizhou Yang <zhizhouy@google.com>
Tested-by: Zhizhou Yang <zhizhouy@google.com>
Reviewed-by: Caroline Tice <cmtice@chromium.org>
Diffstat (limited to 'crosperf')
-rw-r--r-- | crosperf/experiment_factory.py | 21 | ||||
-rwxr-xr-x | crosperf/experiment_factory_unittest.py | 24 | ||||
-rw-r--r-- | crosperf/experiment_file.py | 16 | ||||
-rwxr-xr-x | crosperf/experiment_file_unittest.py | 29 |
4 files changed, 80 insertions, 10 deletions
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index 70b5068b..a6c163dc 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -156,12 +156,29 @@ class ExperimentFactory(object): # inherited and/or merged with the global settings values. benchmarks = [] all_benchmark_settings = experiment_file.GetSettings('benchmark') + # Check if there is duplicated benchmark name + benchmark_names = {} for benchmark_settings in all_benchmark_settings: benchmark_name = benchmark_settings.name test_name = benchmark_settings.GetField('test_name') if not test_name: test_name = benchmark_name test_args = benchmark_settings.GetField('test_args') + + # Rename benchmark name if 'story-filter' or 'story-tag-filter' specified + # in test_args. + for arg in test_args.split(): + if '--story-filter' in arg or '--story-tag-filter' in arg: + # Rename benchmark name with an extension of 'story'-option + benchmark_name = '%s@@%s' % (benchmark_name, arg.split('=')[-1]) + break + + # Check for duplicated benchmark name after renaming + if not benchmark_name in benchmark_names: + benchmark_names[benchmark_name] = True + else: + raise SyntaxError("Duplicate benchmark name: '%s'." % benchmark_name) + iterations = benchmark_settings.GetField('iterations') suite = benchmark_settings.GetField('suite') retries = benchmark_settings.GetField('retries') @@ -217,8 +234,8 @@ class ExperimentFactory(object): iterations, rm_chroot_tmp, perf_args, suite, show_all_results, retries, run_local) else: - benchmark = Benchmark(test_name, test_name, test_args, iterations, - rm_chroot_tmp, perf_args, suite, + benchmark = Benchmark(benchmark_name, test_name, test_args, + iterations, rm_chroot_tmp, perf_args, suite, show_all_results, retries, run_local) benchmarks.append(benchmark) else: diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py index f29d4566..74521c7c 100755 --- a/crosperf/experiment_factory_unittest.py +++ b/crosperf/experiment_factory_unittest.py @@ -29,6 +29,11 @@ EXPERIMENT_FILE_1 = """ iterations: 3 } + benchmark: webrtc { + iterations: 1 + test_args: --story-filter=datachannel + } + image1 { chromeos_image: /usr/local/google/cros_image1.bin } @@ -53,16 +58,31 @@ class ExperimentFactoryTest(unittest.TestCase): experiment_file, working_directory='', log_dir='') self.assertEqual(exp.remote, ['chromeos-alex3']) - self.assertEqual(len(exp.benchmarks), 1) + self.assertEqual(len(exp.benchmarks), 2) self.assertEqual(exp.benchmarks[0].name, 'PageCycler') self.assertEqual(exp.benchmarks[0].test_name, 'PageCycler') self.assertEqual(exp.benchmarks[0].iterations, 3) + self.assertEqual(exp.benchmarks[1].name, 'webrtc@@datachannel') + self.assertEqual(exp.benchmarks[1].test_name, 'webrtc') + self.assertEqual(exp.benchmarks[1].iterations, 1) self.assertEqual(len(exp.labels), 2) self.assertEqual(exp.labels[0].chromeos_image, '/usr/local/google/cros_image1.bin') self.assertEqual(exp.labels[0].board, 'x86-alex') + def testDuplecateBenchmark(self): + mock_experiment_file = ExperimentFile(StringIO.StringIO('')) + mock_experiment_file.all_settings = [] + benchmark_settings1 = settings_factory.BenchmarkSettings('name') + mock_experiment_file.all_settings.append(benchmark_settings1) + benchmark_settings2 = settings_factory.BenchmarkSettings('name') + mock_experiment_file.all_settings.append(benchmark_settings2) + + with self.assertRaises(SyntaxError): + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, '', '') + def test_append_benchmark_set(self): ef = ExperimentFactory() @@ -158,7 +178,7 @@ class ExperimentFactoryTest(unittest.TestCase): self.assertEqual(exp.log_level, 'average') self.assertEqual(len(exp.benchmarks), 1) - self.assertEqual(exp.benchmarks[0].name, 'kraken') + self.assertEqual(exp.benchmarks[0].name, 'bench_test') self.assertEqual(exp.benchmarks[0].test_name, 'kraken') self.assertEqual(exp.benchmarks[0].iterations, 1) self.assertEqual(exp.benchmarks[0].suite, 'telemetry_Crosperf') diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py index 57eb52dc..12f9d5df 100644 --- a/crosperf/experiment_file.py +++ b/crosperf/experiment_file.py @@ -97,7 +97,7 @@ class ExperimentFile(object): field = self._ParseField(reader) settings.SetField(field[0], field[1], field[2]) elif ExperimentFile._CLOSE_SETTINGS_RE.match(line): - return settings + return settings, settings_type raise EOFError('Unexpected EOF while parsing settings block.') @@ -112,11 +112,15 @@ class ExperimentFile(object): if not line: continue elif ExperimentFile._OPEN_SETTINGS_RE.match(line): - new_settings = self._ParseSettings(reader) - if new_settings.name in settings_names: - raise SyntaxError( - "Duplicate settings name: '%s'." % new_settings.name) - settings_names[new_settings.name] = True + new_settings, settings_type = self._ParseSettings(reader) + # We will allow benchmarks with duplicated settings name for now. + # Further decision will be made when parsing benchmark details in + # ExperimentFactory.GetExperiment(). + if settings_type != 'benchmark': + if new_settings.name in settings_names: + raise SyntaxError( + "Duplicate settings name: '%s'." % new_settings.name) + settings_names[new_settings.name] = True self.all_settings.append(new_settings) elif ExperimentFile._FIELD_VALUE_RE.match(line): field = self._ParseField(reader) diff --git a/crosperf/experiment_file_unittest.py b/crosperf/experiment_file_unittest.py index d4a02107..a5658bfb 100755 --- a/crosperf/experiment_file_unittest.py +++ b/crosperf/experiment_file_unittest.py @@ -65,6 +65,24 @@ EXPERIMENT_FILE_3 = """ } """ +EXPERIMENT_FILE_4 = """ + board: x86-alex + remote: chromeos-alex3 + iterations: 3 + + benchmark: webrtc { + test_args: --story-filter=datachannel + } + + benchmark: webrtc { + test_args: --story-tag-filter=smoothness + } + + image1 { + chromeos_image:/usr/local/google/cros_image1.bin + } + """ + OUTPUT_FILE = """board: x86-alex remote: chromeos-alex3 perf_args: record -a -e cycles @@ -124,6 +142,17 @@ class ExperimentFileTest(unittest.TestCase): input_file = StringIO.StringIO(EXPERIMENT_FILE_3) self.assertRaises(Exception, ExperimentFile, input_file) + def testDuplicateBenchmark(self): + input_file = StringIO.StringIO(EXPERIMENT_FILE_4) + experiment_file = ExperimentFile(input_file) + benchmark_settings = experiment_file.GetSettings('benchmark') + self.assertEqual(benchmark_settings[0].name, 'webrtc') + self.assertEqual(benchmark_settings[0].GetField('test_args'), + '--story-filter=datachannel') + self.assertEqual(benchmark_settings[1].name, 'webrtc') + self.assertEqual(benchmark_settings[1].GetField('test_args'), + '--story-tag-filter=smoothness') + def testCanonicalize(self): input_file = StringIO.StringIO(EXPERIMENT_FILE_1) experiment_file = ExperimentFile(input_file) |