aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_organizer_unittest.py
diff options
context:
space:
mode:
authorcmtice <cmtice@google.com>2014-04-09 10:58:51 -0700
committerchrome-internal-fetch <chrome-internal-fetch@google.com>2014-04-11 20:01:27 +0000
commitc454cee542ca459ef9bd87c9f72e81c822caf1e5 (patch)
tree39a5c1bdc6c53073e3589da913cef3790f816e7f /crosperf/results_organizer_unittest.py
parent0537956ef5981f4069fcaaefd3d25298f9d6ebd8 (diff)
downloadtoolchain-utils-c454cee542ca459ef9bd87c9f72e81c822caf1e5.tar.gz
Update unittests to all pass.
Fix the parameters to the various unittests so they match recent changes and the unittests all pass again. BUG=None TEST=I ran all the unittests with the changes. Change-Id: I083b5127a2ade8f1dbaf2bb173d82183871cb7c7 Reviewed-on: https://chrome-internal-review.googlesource.com/159915 Reviewed-by: Yunlian Jiang <yunlian@google.com> Commit-Queue: Caroline Tice <cmtice@google.com> Tested-by: Caroline Tice <cmtice@google.com>
Diffstat (limited to 'crosperf/results_organizer_unittest.py')
-rwxr-xr-xcrosperf/results_organizer_unittest.py18
1 files changed, 9 insertions, 9 deletions
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
index b17df82a..7cb09316 100755
--- a/crosperf/results_organizer_unittest.py
+++ b/crosperf/results_organizer_unittest.py
@@ -88,25 +88,25 @@ class ResultOrganizerTest(unittest.TestCase):
benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
benchmark_runs = [None]*8
benchmark_runs[0] = BenchmarkRun("b1", benchmarks[0],
- labels[0], 1, "", "", "", "")
+ labels[0], 1, "", "", "", "average", "")
benchmark_runs[1] = BenchmarkRun("b2", benchmarks[0],
- labels[0], 2, "", "", "", "")
+ labels[0], 2, "", "", "", "average", "")
benchmark_runs[2] = BenchmarkRun("b3", benchmarks[0],
- labels[1], 1, "", "", "", "")
+ labels[1], 1, "", "", "", "average", "")
benchmark_runs[3] = BenchmarkRun("b4", benchmarks[0],
- labels[1], 2, "", "", "", "")
+ labels[1], 2, "", "", "", "average", "")
benchmark_runs[4] = BenchmarkRun("b5", benchmarks[1],
- labels[0], 1, "", "", "", "")
+ labels[0], 1, "", "", "", "average", "")
benchmark_runs[5] = BenchmarkRun("b6", benchmarks[1],
- labels[0], 2, "", "", "", "")
+ labels[0], 2, "", "", "", "average", "")
benchmark_runs[6] = BenchmarkRun("b7", benchmarks[1],
- labels[1], 1, "", "", "", "")
+ labels[1], 1, "", "", "", "average", "")
benchmark_runs[7] = BenchmarkRun("b8", benchmarks[1],
- labels[1], 2, "", "", "", "")
+ labels[1], 2, "", "", "", "average", "")
i = 0
for b in benchmark_runs:
- b.result = Result("", b.label)
+ b.result = Result("", b.label, "average")
b.result.keyvals = mock_instance.keyval[i]
i += 1