aboutsummaryrefslogtreecommitdiff
path: root/crosperf/mock_instance.py
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2017-02-07 13:02:21 -0800
committerStephen Hines <srhines@google.com>2017-02-07 13:03:24 -0800
commit870a8df6fcb12de32fa2dd83b6ed0f7b24dbab1e (patch)
tree87b3a32b13c392939d66fa93105896f5df0736a6 /crosperf/mock_instance.py
parentbaba90fd78c18585d22430dc95c748f96ad0c772 (diff)
parent058aae85dcfb12049ef90137915ec7e981288569 (diff)
downloadtoolchain-utils-870a8df6fcb12de32fa2dd83b6ed0f7b24dbab1e.tar.gz
Merge remote-tracking branch 'aosp/mirror-chromium-master' into initial_import
Initial import of Chromium's toolchain-utils project. Bug: http://b/31321592 Test: None necessary, as this is just a helper repository. Change-Id: I61d2caaf1195da18cfaa7795706e8fc7fecff1d4
Diffstat (limited to 'crosperf/mock_instance.py')
-rw-r--r--crosperf/mock_instance.py143
1 files changed, 143 insertions, 0 deletions
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
new file mode 100644
index 00000000..758108fa
--- /dev/null
+++ b/crosperf/mock_instance.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""This contains some mock instances for testing."""
+
+from __future__ import print_function
+
+from benchmark import Benchmark
+from label import MockLabel
+
+perf_args = 'record -a -e cycles'
+label1 = MockLabel(
+ 'test1',
+ 'image1',
+ 'autotest_dir',
+ '/tmp/test_benchmark_run',
+ 'x86-alex',
+ 'chromeos-alex1',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+label2 = MockLabel(
+ 'test2',
+ 'image2',
+ 'autotest_dir',
+ '/tmp/test_benchmark_run_2',
+ 'x86-alex',
+ 'chromeos-alex2',
+ image_args='',
+ cache_dir='',
+ cache_only=False,
+ log_level='average',
+ compiler='gcc')
+
+benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '',
+ perf_args, '', '')
+
+benchmark2 = Benchmark('benchmark2', 'autotest_name_2', 'autotest_args', 2, '',
+ perf_args, '', '')
+
+keyval = {}
+keyval[0] = {
+ '': 'PASS',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'ms_1': '2.1',
+ 'total': '5',
+ 'bool': 'True'
+}
+
+keyval[1] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6',
+ 'test{1}': '3',
+ 'test{2}': '4',
+ 'bool': 'FALSE'
+}
+
+keyval[2] = {
+ '': 'PASS',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6',
+ 'test{1}': '5',
+ 'test{2}': '4',
+ 'bool': 'FALSE'
+}
+
+keyval[3] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'FALSE'
+}
+
+keyval[4] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'TRUE'
+}
+
+keyval[5] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '2',
+ 'bool': 'TRUE'
+}
+
+keyval[6] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'bool': 'TRUE'
+}
+
+keyval[7] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7',
+ 'test{1}': '1',
+ 'test{2}': '6',
+ 'bool': 'TRUE'
+}
+
+keyval[8] = {
+ '': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '3.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '8',
+ 'bool': 'TRUE'
+}