aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_organizer_unittest.py
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2017-02-08 10:44:04 +0000
committerandroid-build-merger <android-build-merger@google.com>2017-02-08 10:44:04 +0000
commit271389dd3199539c4474c351942f4d4fa975b81b (patch)
tree87b3a32b13c392939d66fa93105896f5df0736a6 /crosperf/results_organizer_unittest.py
parentbaba90fd78c18585d22430dc95c748f96ad0c772 (diff)
parentc5804ce784c39d6cf4f69139ab3197d989181cf9 (diff)
downloadtoolchain-utils-271389dd3199539c4474c351942f4d4fa975b81b.tar.gz
Merge remote-tracking branch 'aosp/mirror-chromium-master' into initial_import am: 870a8df6fc am: 9c6fa5f9e5
am: c5804ce784 Change-Id: I5ff109272784db60dfef5145242a68779f7f0ccb
Diffstat (limited to 'crosperf/results_organizer_unittest.py')
-rwxr-xr-xcrosperf/results_organizer_unittest.py109
1 files changed, 109 insertions, 0 deletions
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
new file mode 100755
index 00000000..ccf02973
--- /dev/null
+++ b/crosperf/results_organizer_unittest.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python2
+
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Testing of ResultsOrganizer
+
+ We create some labels, benchmark_runs and then create a ResultsOrganizer,
+ after that, we compare the result of ResultOrganizer.
+ """
+
+from __future__ import print_function
+
+import unittest
+
+from benchmark_run import BenchmarkRun
+from results_cache import Result
+from results_organizer import OrganizeResults
+
+import mock_instance
+
+result = {'benchmark1': [[{'': 'PASS',
+ 'bool': 'True',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'ms_1': '2.1',
+ 'total': '5'}, {'test': '2'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6'}, {'test': '3'}, {'test': '4'}],
+ [{'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6'}, {'test': '5'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7'}, {'test': '2'}, {'test': '6'}]],
+ 'benchmark2': [[{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7'}, {'test': '2'}, {'test': '6'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7'}, {'test': '2'}, {'test': '2'}],
+ [{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7'}, {'test': '2'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7'}, {'test': '1'}, {'test': '6'}]]}
+
+
+class ResultOrganizerTest(unittest.TestCase):
+ """Test result organizer."""
+
+ def testResultOrganizer(self):
+ labels = [mock_instance.label1, mock_instance.label2]
+ benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
+ benchmark_runs = [None] * 8
+ benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '',
+ '', 'average', '')
+
+ i = 0
+ for b in benchmark_runs:
+ b.result = Result('', b.label, 'average', 'machine')
+ b.result.keyvals = mock_instance.keyval[i]
+ i += 1
+
+ organized = OrganizeResults(benchmark_runs, labels, benchmarks)
+ self.assertEqual(organized, result)
+
+
+if __name__ == '__main__':
+ unittest.main()