aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_organizer_unittest.py
blob: 7cb09316a97479472b453b1721d3d41618ba864a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
#!/usr/bin/python

# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Testing of ResultsOrganizer. We create some labels, benchmark_runs
   and then create a ResultsOrganizer, after that, we compare the result of
   ResultOrganizer"""

import unittest

from benchmark_run import BenchmarkRun
from results_cache import Result
from results_organizer import ResultOrganizer

import mock_instance

result = {'benchmark1': [[{'': 'PASS',
                  'bool': 'True',
                  'milliseconds_1': '1',
                  'milliseconds_2': '8',
                  'milliseconds_3': '9.2',
                  'ms_1': '2.1',
                  'total': '5'},
                 {'test': '2'},
                 {'test': '4'},
                 {'': 'PASS',
                  'bool': 'FALSE',
                  'milliseconds_1': '3',
                  'milliseconds_2': '5',
                  'ms_1': '2.2',
                  'total': '6'},
                 {'test': '3'},
                 {'test': '4'}],
                [{'': 'PASS',
                  'bool': 'FALSE',
                  'milliseconds_4': '30',
                  'milliseconds_5': '50',
                  'ms_1': '2.23',
                  'total': '6'},
                 {'test': '5'},
                 {'test': '4'},
                 {'': 'PASS',
                  'bool': 'FALSE',
                  'milliseconds_1': '3',
                  'milliseconds_6': '7',
                  'ms_1': '2.3',
                  'total': '7'},
                 {'test': '2'},
                 {'test': '6'}]],
 'benchmark2': [[{'': 'PASS',
                  'bool': 'TRUE',
                  'milliseconds_1': '3',
                  'milliseconds_8': '6',
                  'ms_1': '2.3',
                  'total': '7'},
                 {'test': '2'},
                 {'test': '6'},
                 {'': 'PASS',
                  'bool': 'TRUE',
                  'milliseconds_1': '3',
                  'milliseconds_8': '6',
                  'ms_1': '2.2',
                  'total': '7'},
                 {'test': '2'},
                 {'test': '2'}],
                [{'': 'PASS',
                  'bool': 'TRUE',
                  'milliseconds_1': '3',
                  'milliseconds_8': '6',
                  'ms_1': '2',
                  'total': '7'},
                 {'test': '2'},
                 {'test': '4'},
                 {'': 'PASS',
                  'bool': 'TRUE',
                  'milliseconds_1': '3',
                  'milliseconds_8': '6',
                  'ms_1': '1',
                  'total': '7'},
                 {'test': '1'},
                 {'test': '6'}]]}

class ResultOrganizerTest(unittest.TestCase):
  def testResultOrganizer(self):
    labels = [mock_instance.label1, mock_instance.label2]
    benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
    benchmark_runs = [None]*8
    benchmark_runs[0] = BenchmarkRun("b1", benchmarks[0],
                                     labels[0], 1, "", "", "", "average", "")
    benchmark_runs[1] = BenchmarkRun("b2", benchmarks[0],
                                     labels[0], 2, "", "", "", "average", "")
    benchmark_runs[2] = BenchmarkRun("b3", benchmarks[0],
                                     labels[1], 1, "", "", "", "average", "")
    benchmark_runs[3] = BenchmarkRun("b4", benchmarks[0],
                                     labels[1], 2, "", "", "", "average", "")
    benchmark_runs[4] = BenchmarkRun("b5", benchmarks[1],
                                     labels[0], 1, "", "", "", "average", "")
    benchmark_runs[5] = BenchmarkRun("b6", benchmarks[1],
                                     labels[0], 2, "", "", "", "average", "")
    benchmark_runs[6] = BenchmarkRun("b7", benchmarks[1],
                                     labels[1], 1, "", "", "", "average", "")
    benchmark_runs[7] = BenchmarkRun("b8", benchmarks[1],
                                     labels[1], 2, "", "", "", "average", "")

    i = 0
    for b in benchmark_runs:
      b.result = Result("",  b.label, "average")
      b.result.keyvals = mock_instance.keyval[i]
      i += 1

    ro = ResultOrganizer(benchmark_runs, labels, benchmarks)
    self.assertEqual(ro.result, result)

if __name__ == "__main__":
  unittest.main()