diff options
author | Zhizhou Yang <zhizhouy@google.com> | 2019-02-07 14:53:39 -0800 |
---|---|---|
committer | chrome-bot <chrome-bot@chromium.org> | 2019-02-08 06:03:53 -0800 |
commit | 06275b2a18fcfe072180aae3085966cbb592c35f (patch) | |
tree | 36083259f5dabcdaf0d487d79e07ad8bea19aae1 /crosperf | |
parent | b1cba791a63a24cac5b4d7cd68f5e0941a9afe62 (diff) | |
download | toolchain-utils-06275b2a18fcfe072180aae3085966cbb592c35f.tar.gz |
crosperf: in cwp mode, first check whether iterations are the same
In cwp mode, we ask user to set same iterations for different benchmark
runs, but if user did not do so, he will spend too long running all
benchmarks and then get an error message in the end.
This patch checks whether iterations in experiment file are the same at
the time we construct benchmark.
Also the patch fixed a small test case mismatch in weight check so that
all unit tests can pass.
Fixed format issues from pylint.
BUG=chromium:929390
TEST=All unit tests passed.
Change-Id: I0eae51205e8c0fa80131c5b79c4773e88c5b0915
Reviewed-on: https://chromium-review.googlesource.com/1459854
Commit-Ready: Zhizhou Yang <zhizhouy@google.com>
Tested-by: Zhizhou Yang <zhizhouy@google.com>
Reviewed-by: Caroline Tice <cmtice@chromium.org>
Diffstat (limited to 'crosperf')
-rw-r--r-- | crosperf/experiment_factory.py | 18 | ||||
-rwxr-xr-x | crosperf/experiment_factory_unittest.py | 69 |
2 files changed, 59 insertions, 28 deletions
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index 3a1efb83..aa877c6e 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2013 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. @@ -88,6 +89,7 @@ dso_list = [ 'kallsyms', ] + class ExperimentFactory(object): """Factory class for building an Experiment, given an ExperimentFile as input. @@ -164,8 +166,12 @@ class ExperimentFactory(object): # inherited and/or merged with the global settings values. benchmarks = [] all_benchmark_settings = experiment_file.GetSettings('benchmark') + # Check if there is duplicated benchmark name benchmark_names = {} + # Check if in cwp_dso mode, all benchmarks should have same iterations + cwp_dso_iterations = 0 + for benchmark_settings in all_benchmark_settings: benchmark_name = benchmark_settings.name test_name = benchmark_settings.GetField('test_name') @@ -192,6 +198,12 @@ class ExperimentFactory(object): raise SyntaxError("Duplicate benchmark name: '%s'." % benchmark_name) iterations = benchmark_settings.GetField('iterations') + if cwp_dso: + if cwp_dso_iterations != 0 and iterations != cwp_dso_iterations: + raise RuntimeError('Iterations of each benchmark run are not the ' \ + 'same') + cwp_dso_iterations = iterations + suite = benchmark_settings.GetField('suite') retries = benchmark_settings.GetField('retries') run_local = benchmark_settings.GetField('run_local') @@ -206,7 +218,7 @@ class ExperimentFactory(object): raise RuntimeError('run_local must be set to False to use CWP ' 'approximation') if weight < 0: - raise RuntimeError('Weight should be a float no less than 0') + raise RuntimeError('Weight should be a float >=0') elif cwp_dso: raise RuntimeError('With DSO specified, each benchmark should have a ' 'weight') @@ -377,7 +389,7 @@ class ExperimentFactory(object): raise RuntimeError('There is no remote for {0}'.format(board)) except IOError: # TODO: rethrow instead of throwing different exception. - raise RuntimeError('IOError while reading file {0}' - .format(default_remotes_file)) + raise RuntimeError( + 'IOError while reading file {0}'.format(default_remotes_file)) else: raise RuntimeError('There is no remote for {0}'.format(board)) diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py index 92fb3116..b0c795eb 100755 --- a/crosperf/experiment_factory_unittest.py +++ b/crosperf/experiment_factory_unittest.py @@ -1,4 +1,5 @@ #!/usr/bin/env python2 +# -*- coding: utf-8 -*- # Copyright (c) 2013 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be @@ -14,11 +15,11 @@ import unittest from cros_utils.file_utils import FileUtils -from experiment_factory import ExperimentFactory from experiment_file import ExperimentFile import test_flag import benchmark import experiment_factory +from experiment_factory import ExperimentFactory import settings_factory EXPERIMENT_FILE_1 = """ @@ -68,7 +69,6 @@ EXPERIMENT_FILE_2 = """ } """ - # pylint: disable=too-many-function-args @@ -130,8 +130,7 @@ class ExperimentFactoryTest(unittest.TestCase): with self.assertRaises(RuntimeError) as msg: ef = ExperimentFactory() ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual('The DSO specified is not supported', - str(msg.exception)) + self.assertEqual('The DSO specified is not supported', str(msg.exception)) # Test 2: No weight after DSO specified global_settings.SetField('cwp_dso', 'kallsyms') @@ -167,9 +166,9 @@ class ExperimentFactoryTest(unittest.TestCase): with self.assertRaises(RuntimeError) as msg: ef = ExperimentFactory() ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual('CWP approximation weight only works with ' - 'telemetry_Crosperf suite', - str(msg.exception)) + self.assertEqual( + 'CWP approximation weight only works with ' + 'telemetry_Crosperf suite', str(msg.exception)) # Test 5: cwp_dso does not work for local run benchmark_settings = settings_factory.BenchmarkSettings('name') @@ -184,9 +183,9 @@ class ExperimentFactoryTest(unittest.TestCase): self.assertEqual('run_local must be set to False to use CWP approximation', str(msg.exception)) - # Test 6: weight should be float between 0 and 1 + # Test 6: weight should be float >=0 benchmark_settings = settings_factory.BenchmarkSettings('name') - benchmark_settings.SetField('weight', '1.2') + benchmark_settings.SetField('weight', '-1.2') benchmark_settings.SetField('suite', 'telemetry_Crosperf') benchmark_settings.SetField('run_local', 'False') mock_experiment_file.all_settings = [] @@ -194,8 +193,7 @@ class ExperimentFactoryTest(unittest.TestCase): with self.assertRaises(RuntimeError) as msg: ef = ExperimentFactory() ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual('Weight should be a float between 0 and 1', - str(msg.exception)) + self.assertEqual('Weight should be a float >=0', str(msg.exception)) # Test 7: more than one story tag in test_args benchmark_settings = settings_factory.BenchmarkSettings('name') @@ -208,8 +206,29 @@ class ExperimentFactoryTest(unittest.TestCase): with self.assertRaises(RuntimeError) as msg: ef = ExperimentFactory() ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual('Only one story or story-tag filter allowed in a single ' - 'benchmark run', str(msg.exception)) + self.assertEqual( + 'Only one story or story-tag filter allowed in a single ' + 'benchmark run', str(msg.exception)) + + # Test 8: Iterations of each benchmark run are not same in cwp mode + mock_experiment_file.all_settings = [] + benchmark_settings = settings_factory.BenchmarkSettings('name1') + benchmark_settings.SetField('iterations', '4') + benchmark_settings.SetField('weight', '1.2') + benchmark_settings.SetField('suite', 'telemetry_Crosperf') + benchmark_settings.SetField('run_local', 'False') + mock_experiment_file.all_settings.append(benchmark_settings) + benchmark_settings = settings_factory.BenchmarkSettings('name2') + benchmark_settings.SetField('iterations', '3') + benchmark_settings.SetField('weight', '1.2') + benchmark_settings.SetField('suite', 'telemetry_Crosperf') + benchmark_settings.SetField('run_local', 'False') + mock_experiment_file.all_settings.append(benchmark_settings) + with self.assertRaises(RuntimeError) as msg: + ef = ExperimentFactory() + ef.GetExperiment(mock_experiment_file, '', '') + self.assertEqual('Iterations of each benchmark run are not the same', + str(msg.exception)) def test_append_benchmark_set(self): ef = ExperimentFactory() @@ -220,7 +239,7 @@ class ExperimentFactoryTest(unittest.TestCase): False) self.assertEqual( len(bench_list), len(experiment_factory.telemetry_perfv2_tests)) - self.assertTrue(type(bench_list[0]) is benchmark.Benchmark) + self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) bench_list = [] ef.AppendBenchmarkSet(bench_list, @@ -228,7 +247,7 @@ class ExperimentFactoryTest(unittest.TestCase): False, '', 'telemetry_Crosperf', False, 0, False) self.assertEqual( len(bench_list), len(experiment_factory.telemetry_pagecycler_tests)) - self.assertTrue(type(bench_list[0]) is benchmark.Benchmark) + self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) bench_list = [] ef.AppendBenchmarkSet(bench_list, @@ -236,7 +255,7 @@ class ExperimentFactoryTest(unittest.TestCase): 1, False, '', 'telemetry_Crosperf', False, 0, False) self.assertEqual( len(bench_list), len(experiment_factory.telemetry_toolchain_perf_tests)) - self.assertTrue(type(bench_list[0]) is benchmark.Benchmark) + self.assertTrue(isinstance(bench_list[0], benchmark.Benchmark)) @mock.patch.object(socket, 'gethostname') def test_get_experiment(self, mock_socket): @@ -313,9 +332,9 @@ class ExperimentFactoryTest(unittest.TestCase): self.assertFalse(exp.benchmarks[0].show_all_results) self.assertEqual(len(exp.labels), 1) - self.assertEqual(exp.labels[0].chromeos_image, - 'chromeos/src/build/images/lumpy/latest/' - 'chromiumos_test_image.bin') + self.assertEqual( + exp.labels[0].chromeos_image, 'chromeos/src/build/images/lumpy/latest/' + 'chromiumos_test_image.bin') self.assertEqual(exp.labels[0].autotest_path, '/tmp/autotest') self.assertEqual(exp.labels[0].board, 'lumpy') @@ -323,9 +342,9 @@ class ExperimentFactoryTest(unittest.TestCase): test_flag.SetTestMode(True) label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros') exp = ef.GetExperiment(mock_experiment_file, '', '') - self.assertEqual(exp.remote, [ - 'chromeos1.cros', 'chromeos2.cros', '123.45.67.89', '123.45.76.80' - ]) + self.assertEqual( + exp.remote, + ['chromeos1.cros', 'chromeos2.cros', '123.45.67.89', '123.45.76.80']) # Third test: Automatic fixing of bad logging_level param: global_settings.SetField('logging_level', 'really loud!') @@ -361,9 +380,9 @@ class ExperimentFactoryTest(unittest.TestCase): self.assertEqual(len(exp.labels), 2) self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path') self.assertEqual(exp.labels[1].autotest_path, 'fake_autotest_path') - self.assertEqual(exp.remote, [ - 'fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros' - ]) + self.assertEqual( + exp.remote, + ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros']) def test_get_default_remotes(self): board_list = [ |