aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_organizer_unittest.py
diff options
context:
space:
mode:
authorLuis Lozano <llozano@chromium.org>2015-12-15 13:49:30 -0800
committerLuis Lozano <llozano@chromium.org>2015-12-16 17:36:06 +0000
commitf2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe (patch)
tree185d243c7eed7c7a0db6f0e640746cadc1479ea9 /crosperf/results_organizer_unittest.py
parent2a66f70fef907c1cb15229cb58e5129cb620ac98 (diff)
downloadtoolchain-utils-f2a3ef46f75d2196a93d3ed27f4d1fcf22b54fbe.tar.gz
Run pyformat on all the toolchain-utils files.
This gets rid of a lot of lint issues. Ran by doing this: for f in *.py; do echo -n "$f " ; if [ -x $f ]; then pyformat -i --remove_trailing_comma --yapf --force_quote_type=double $f ; else pyformat -i --remove_shebang --remove_trailing_comma --yapf --force_quote_type=double $f ; fi ; done BUG=chromium:567921 TEST=Ran simple crosperf run. Change-Id: I59778835fdaa5f706d2e1765924389f9e97433d1 Reviewed-on: https://chrome-internal-review.googlesource.com/242031 Reviewed-by: Luis Lozano <llozano@chromium.org> Commit-Queue: Luis Lozano <llozano@chromium.org> Tested-by: Luis Lozano <llozano@chromium.org> Reviewed-by: Yunlian Jiang <yunlian@google.com>
Diffstat (limited to 'crosperf/results_organizer_unittest.py')
-rwxr-xr-xcrosperf/results_organizer_unittest.py154
1 files changed, 70 insertions, 84 deletions
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
index c170f0a3..914ecc5e 100755
--- a/crosperf/results_organizer_unittest.py
+++ b/crosperf/results_organizer_unittest.py
@@ -3,7 +3,6 @@
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Testing of ResultsOrganizer. We create some labels, benchmark_runs
and then create a ResultsOrganizer, after that, we compare the result of
ResultOrganizer"""
@@ -17,101 +16,88 @@ from results_organizer import ResultOrganizer
import mock_instance
result = {'benchmark1': [[{'': 'PASS',
- 'bool': 'True',
- 'milliseconds_1': '1',
- 'milliseconds_2': '8',
- 'milliseconds_3': '9.2',
- 'ms_1': '2.1',
- 'total': '5'},
- {'test': '2'},
- {'test': '4'},
- {'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_2': '5',
- 'ms_1': '2.2',
- 'total': '6'},
- {'test': '3'},
- {'test': '4'}],
- [{'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_4': '30',
- 'milliseconds_5': '50',
- 'ms_1': '2.23',
- 'total': '6'},
- {'test': '5'},
- {'test': '4'},
- {'': 'PASS',
- 'bool': 'FALSE',
- 'milliseconds_1': '3',
- 'milliseconds_6': '7',
- 'ms_1': '2.3',
- 'total': '7'},
- {'test': '2'},
- {'test': '6'}]],
- 'benchmark2': [[{'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.3',
- 'total': '7'},
- {'test': '2'},
- {'test': '6'},
- {'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2.2',
- 'total': '7'},
- {'test': '2'},
- {'test': '2'}],
- [{'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '2',
- 'total': '7'},
- {'test': '2'},
- {'test': '4'},
- {'': 'PASS',
- 'bool': 'TRUE',
- 'milliseconds_1': '3',
- 'milliseconds_8': '6',
- 'ms_1': '1',
- 'total': '7'},
- {'test': '1'},
- {'test': '6'}]]}
+ 'bool': 'True',
+ 'milliseconds_1': '1',
+ 'milliseconds_2': '8',
+ 'milliseconds_3': '9.2',
+ 'ms_1': '2.1',
+ 'total': '5'}, {'test': '2'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_2': '5',
+ 'ms_1': '2.2',
+ 'total': '6'}, {'test': '3'}, {'test': '4'}],
+ [{'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_4': '30',
+ 'milliseconds_5': '50',
+ 'ms_1': '2.23',
+ 'total': '6'}, {'test': '5'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'FALSE',
+ 'milliseconds_1': '3',
+ 'milliseconds_6': '7',
+ 'ms_1': '2.3',
+ 'total': '7'}, {'test': '2'}, {'test': '6'}]],
+ 'benchmark2': [[{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.3',
+ 'total': '7'}, {'test': '2'}, {'test': '6'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2.2',
+ 'total': '7'}, {'test': '2'}, {'test': '2'}],
+ [{'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '2',
+ 'total': '7'}, {'test': '2'}, {'test': '4'},
+ {'': 'PASS',
+ 'bool': 'TRUE',
+ 'milliseconds_1': '3',
+ 'milliseconds_8': '6',
+ 'ms_1': '1',
+ 'total': '7'}, {'test': '1'}, {'test': '6'}]]}
+
class ResultOrganizerTest(unittest.TestCase):
+
def testResultOrganizer(self):
labels = [mock_instance.label1, mock_instance.label2]
benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
- benchmark_runs = [None]*8
- benchmark_runs[0] = BenchmarkRun("b1", benchmarks[0],
- labels[0], 1, "", "", "", "average", "")
- benchmark_runs[1] = BenchmarkRun("b2", benchmarks[0],
- labels[0], 2, "", "", "", "average", "")
- benchmark_runs[2] = BenchmarkRun("b3", benchmarks[0],
- labels[1], 1, "", "", "", "average", "")
- benchmark_runs[3] = BenchmarkRun("b4", benchmarks[0],
- labels[1], 2, "", "", "", "average", "")
- benchmark_runs[4] = BenchmarkRun("b5", benchmarks[1],
- labels[0], 1, "", "", "", "average", "")
- benchmark_runs[5] = BenchmarkRun("b6", benchmarks[1],
- labels[0], 2, "", "", "", "average", "")
- benchmark_runs[6] = BenchmarkRun("b7", benchmarks[1],
- labels[1], 1, "", "", "", "average", "")
- benchmark_runs[7] = BenchmarkRun("b8", benchmarks[1],
- labels[1], 2, "", "", "", "average", "")
+ benchmark_runs = [None] * 8
+ benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
+ '', 'average', '')
+ benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
+ '', 'average', '')
+ benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '',
+ '', 'average', '')
i = 0
for b in benchmark_runs:
- b.result = Result("", b.label, "average", "machine")
+ b.result = Result('', b.label, 'average', 'machine')
b.result.keyvals = mock_instance.keyval[i]
i += 1
ro = ResultOrganizer(benchmark_runs, labels, benchmarks)
self.assertEqual(ro.result, result)
-if __name__ == "__main__":
+
+if __name__ == '__main__':
unittest.main()