aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_organizer.py
diff options
context:
space:
mode:
authorAhmad Sharif <asharif@chromium.org>2012-12-20 12:09:49 -0800
committerAhmad Sharif <asharif@chromium.org>2012-12-20 12:09:49 -0800
commit4467f004e7f0854963bec90daff1879fbd9d2fec (patch)
treeaac36caa6279aa532e2d6234e50ee812f2db0c8d /crosperf/results_organizer.py
parentf395c26437cbdabc2960447fba89b226f4409e82 (diff)
downloadtoolchain-utils-4467f004e7f0854963bec90daff1879fbd9d2fec.tar.gz
Synced repos to: 64740
Diffstat (limited to 'crosperf/results_organizer.py')
-rw-r--r--crosperf/results_organizer.py59
1 files changed, 56 insertions, 3 deletions
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 0071387b..810186b2 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -1,6 +1,8 @@
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
+"""Parse data from benchmark_runs for tabulator."""
+import re
class ResultOrganizer(object):
@@ -18,18 +20,22 @@ class ResultOrganizer(object):
]}.
"""
- def __init__(self, benchmark_runs, labels):
+ def __init__(self, benchmark_runs, labels, benchmarks=None):
self.result = {}
self.labels = []
+ self.prog = re.compile(r"(\w+)\{(\d+)\}")
+ self.benchmarks = benchmarks
+ if not self.benchmarks:
+ self.benchmarks = []
for label in labels:
self.labels.append(label.name)
for benchmark_run in benchmark_runs:
- benchmark_name = benchmark_run.benchmark_name
+ benchmark_name = benchmark_run.benchmark.name
if benchmark_name not in self.result:
self.result[benchmark_name] = []
while len(self.result[benchmark_name]) < len(labels):
self.result[benchmark_name].append([])
- label_index = self.labels.index(benchmark_run.label_name)
+ label_index = self.labels.index(benchmark_run.label.name)
cur_table = self.result[benchmark_name][label_index]
index = benchmark_run.iteration - 1
while index >= len(cur_table):
@@ -40,3 +46,50 @@ class ResultOrganizer(object):
for autotest_key in benchmark_run.result.keyvals:
result_value = benchmark_run.result.keyvals[autotest_key]
cur_dict[autotest_key] = result_value
+ self._DuplicatePass()
+
+ def _DuplicatePass(self):
+ for bench, data in self.result.items():
+ max_dup = self._GetMaxDup(data)
+ if not max_dup:
+ continue
+ for label in data:
+ index = data.index(label)
+ data[index] = self._GetNonDupLabel(max_dup, label)
+ self._AdjustIteration(max_dup, bench)
+
+ def _GetMaxDup(self, data):
+ """Find the maximum i inside ABCD{i}."""
+ max_dup = 0
+ for label in data:
+ for run in label:
+ for key in run:
+ if re.match(self.prog, key):
+ max_dup = max(max_dup,
+ int(re.search(self.prog, key).group(2)))
+ return max_dup
+
+ def _GetNonDupLabel(self, max_dup, label):
+ """Create new list for the runs of the same label."""
+ new_label = []
+ for run in label:
+ start_index = len(new_label)
+ new_label.append(dict(run))
+ for i in range(max_dup):
+ new_label.append({})
+ new_run = new_label[start_index]
+ for key, value in new_run.items():
+ if re.match(self.prog, key):
+ new_key = re.search(self.prog, key).group(1)
+ index = int(re.search(self.prog, key).group(2))
+ new_label[start_index+index][new_key] = str(value)
+ del new_run[key]
+ return new_label
+
+ def _AdjustIteration(self, max_dup, bench):
+ """Adjust the interation numbers if the have keys like ABCD{i}."""
+ for benchmark in self.benchmarks:
+ if benchmark.name == bench:
+ if not benchmark.iteration_adjusted:
+ benchmark.iteration_adjusted = True
+ benchmark.iterations *= (max_dup +1)