aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorYunlian Jiang <yunlian@google.com>2013-05-21 11:29:50 -0700
committerChromeBot <chrome-bot@google.com>2013-05-22 16:10:18 -0700
commit1131359ab8fc43c015f4a72c5aec128bcb85b0da (patch)
tree6d919ba034f988d6e014cf205f1f6b7496f3c4a0 /crosperf
parent8cad5d6b898579aae593b1fcf53675882a014b11 (diff)
downloadtoolchain-utils-1131359ab8fc43c015f4a72c5aec128bcb85b0da.tar.gz
crosperf: add results_organizer_unittest
BUG=None TEST=Unittest passes. Change-Id: I1fda414c1379af40cd1efc13461ed34aafe11847 Reviewed-on: https://gerrit-int.chromium.org/38326 Reviewed-by: Luis Lozano <llozano@chromium.org> Tested-by: Yunlian Jiang <yunlian@google.com> Commit-Queue: Yunlian Jiang <yunlian@google.com>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/mock_instance.py117
-rwxr-xr-xcrosperf/results_organizer_unittest.py117
2 files changed, 234 insertions, 0 deletions
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
new file mode 100644
index 00000000..1cb0f660
--- /dev/null
+++ b/crosperf/mock_instance.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This contains some mock instances for testing."""
+
+from autotest_runner import MockAutotestRunner
+from benchmark_run import MockBenchmarkRun
+from label import MockLabel
+from benchmark import Benchmark
+from machine_manager import MockMachineManager
+from results_cache import MockResultsCache
+
+label1 = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
+ "x86-alex", "chromeos-alex1",
+ image_args="",
+ image_md5sum="",
+ cache_dir="")
+
+label2 = MockLabel("test2", "image2", "/tmp/test_benchmark_run_2",
+ "x86-alex", "chromeos-alex2",
+ image_args="",
+ image_md5sum="",
+ cache_dir="")
+
+benchmark1 = Benchmark("benchmark1", "autotest_name_1",
+ "autotest_args", 2, "", "perf_args", "", "")
+
+benchmark2 = Benchmark("benchmark2", "autotest_name_2",
+ "autotest_args", 2, "", "perf_args", "", "")
+
+
+keyval = {}
+keyval[0] = {'': 'PASS',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'ms_1': '2.1',
+ 'total': '5',
+ 'bool': 'True'}
+
+keyval[1] = {'': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6',
+ 'test{1}': '3',
+ 'test{2}': '4',
+ 'bool': 'FALSE'}
+
+keyval[2] = {'': 'PASS',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6',
+ 'test{1}': '5',
+ 'test{2}': '4',
+ 'bool': 'FALSE'}
+
+keyval[3] = {'': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'FALSE'}
+
+keyval[4] = {'': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '6',
+ 'bool': 'TRUE'}
+
+keyval[5] = {'': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '2',
+ 'bool': 'TRUE'}
+
+keyval[6] = {'': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '4',
+ 'bool': 'TRUE'}
+
+keyval[7] = {'': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7',
+ 'test{1}': '1',
+ 'test{2}': '6',
+ 'bool': 'TRUE'}
+
+keyval[8] = {'': 'PASS',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '3.3',
+ 'total': '7',
+ 'test{1}': '2',
+ 'test{2}': '8',
+ 'bool': 'TRUE'}
+
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
new file mode 100755
index 00000000..8ca650c8
--- /dev/null
+++ b/crosperf/results_organizer_unittest.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Testing of ResultsOrganizer. We create some labels, benchmark_runs
+ and then create a ResultsOrganizer, after that, we compare the result of
+ ResultOrganizer"""
+
+import unittest
+
+from benchmark_run import BenchmarkRun
+from results_cache import Result
+from results_organizer import ResultOrganizer
+
+import mock_instance
+
+result = {'benchmark1': [[{'': 'PASS',
+ 'bool': 'True',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'ms_1': '2.1',
+ 'total': '5'},
+ {'test': '2'},
+ {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6'},
+ {'test': '3'},
+ {'test': '4'}],
+ [{'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6'},
+ {'test': '5'},
+ {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7'},
+ {'test': '2'},
+ {'test': '6'}]],
+ 'benchmark2': [[{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7'},
+ {'test': '2'},
+ {'test': '6'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7'},
+ {'test': '2'},
+ {'test': '2'}],
+ [{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7'},
+ {'test': '2'},
+ {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7'},
+ {'test': '1'},
+ {'test': '6'}]]}
+
+class ResultOrganizerTest(unittest.TestCase):
+ def testResultOrganizer(self):
+ labels = [mock_instance.label1, mock_instance.label2]
+ benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
+ benchmark_runs = [None]*8
+ benchmark_runs[0] = BenchmarkRun("b1", benchmarks[0],
+ labels[0], 1, "", "", "", "")
+ benchmark_runs[1] = BenchmarkRun("b2", benchmarks[0],
+ labels[0], 2, "", "", "", "")
+ benchmark_runs[2] = BenchmarkRun("b3", benchmarks[0],
+ labels[1], 1, "", "", "", "")
+ benchmark_runs[3] = BenchmarkRun("b4", benchmarks[0],
+ labels[1], 2, "", "", "", "")
+ benchmark_runs[4] = BenchmarkRun("b5", benchmarks[1],
+ labels[0], 1, "", "", "", "")
+ benchmark_runs[5] = BenchmarkRun("b6", benchmarks[1],
+ labels[0], 2, "", "", "", "")
+ benchmark_runs[6] = BenchmarkRun("b7", benchmarks[1],
+ labels[1], 1, "", "", "", "")
+ benchmark_runs[7] = BenchmarkRun("b8", benchmarks[1],
+ labels[1], 2, "", "", "", "")
+
+ i = 0
+ for b in benchmark_runs:
+ b.result = Result(b.label.chromeos_root, "", b.label.name)
+ b.result.keyvals = mock_instance.keyval[i]
+ i += 1
+
+ ro = ResultOrganizer(benchmark_runs, labels, benchmarks)
+ self.assertEqual(ro.result, result)
+
+if __name__ == "__main__":
+ unittest.main()