aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_organizer.py
blob: a771922f145ebe56a0eb6049af1a3a48f9450c32 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
#!/usr/bin/python

# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Parse data from benchmark_runs for tabulator."""
import json
import os
import re
import sys

from utils import misc

TELEMETRY_RESULT_DEFAULTS_FILE = "default-telemetry-results.json"

class ResultOrganizer(object):
  """Create a dict from benchmark_runs.

  The structure of the output dict is as follows:
  {"benchmark_1":[
    [{"key1":"v1", "key2":"v2"},{"key1":"v1", "key2","v2"}]
    #one label
    []
    #the other label
    ]
   "benchmark_2":
    [
    ]}.
  """
  key_filter = ["milliseconds_",
                "retval",
                "iterations",
                "ms_",
                "score_"]

  def __init__(self, benchmark_runs, labels, benchmarks=None):
    self.result = {}
    self.labels = []
    self.prog = re.compile(r"(\w+)\{(\d+)\}")
    self.benchmarks = benchmarks
    if not self.benchmarks:
      self.benchmarks = []
    for label in labels:
      self.labels.append(label.name)
    for benchmark_run in benchmark_runs:
      benchmark_name = benchmark_run.benchmark.name
      show_all_results = benchmark_run.benchmark.show_all_results
      if benchmark_name not in self.result:
        self.result[benchmark_name] = []
        while len(self.result[benchmark_name]) < len(labels):
          self.result[benchmark_name].append([])
      label_index = self.labels.index(benchmark_run.label.name)
      cur_table = self.result[benchmark_name][label_index]
      index = benchmark_run.iteration - 1
      while index >= len(cur_table):
        cur_table.append({})
      cur_dict = cur_table[index]
      if not benchmark_run.result:
        continue
      benchmark = benchmark_run.benchmark
      key_filter_on = (benchmark.key_results_only and
                       "PyAutoPerfTest" in benchmark.name + benchmark.test_name
                       and "perf." not in benchmark.test_args)
      if not show_all_results:
        summary_list = self._GetSummaryResults(benchmark.test_name)
        if len(summary_list) > 0:
          summary_list.append ("retval")
        else:
          # Did not find test_name in json file; therefore show everything.
          show_all_results = True
      for test_key in benchmark_run.result.keyvals:
        if (key_filter_on and
            not any([key for key in self.key_filter if key in test_key])
           ):
          continue
        if not show_all_results and not test_key in summary_list:
          continue
        result_value = benchmark_run.result.keyvals[test_key]
        cur_dict[test_key] = result_value
    self._DuplicatePass()

  def _GetSummaryResults (self, test_name):
    dirname, _ = misc.GetRoot(sys.argv[0])
    fullname = os.path.join(dirname, TELEMETRY_RESULT_DEFAULTS_FILE)
    if os.path.exists (fullname):
      # Slurp the file into a dictionary.  The keys in the dictionary are
      # the benchmark names.  The value for a key is a list containing the
      # names of all the result fields that should be returned in a 'default'
      # report.
      result_defaults = json.load(open(fullname))
      # Check to see if the current benchmark test actually has an entry in
      # the dictionary.
      if test_name in result_defaults:
        return result_defaults[test_name]
      else:
        return []

  def _DuplicatePass(self):
    for bench, data in self.result.items():
      max_dup = self._GetMaxDup(data)
      if not max_dup:
        continue
      for label in data:
        index = data.index(label)
        data[index] = self._GetNonDupLabel(max_dup, label)
      self._AdjustIteration(max_dup, bench)

  def _GetMaxDup(self, data):
    """Find the maximum i inside ABCD{i}."""
    max_dup = 0
    for label in data:
      for run in label:
        for key in run:
          if re.match(self.prog, key):
            max_dup = max(max_dup,
                          int(re.search(self.prog, key).group(2)))
    return max_dup

  def _GetNonDupLabel(self, max_dup, label):
    """Create new list for the runs of the same label."""
    new_label = []
    for run in label:
      start_index = len(new_label)
      new_label.append(dict(run))
      for i in range(max_dup):
        new_label.append({})
      new_run = new_label[start_index]
      for key, value in new_run.items():
        if re.match(self.prog, key):
          new_key = re.search(self.prog, key).group(1)
          index = int(re.search(self.prog, key).group(2))
          new_label[start_index+index][new_key] = str(value)
          del new_run[key]
    return new_label

  def _AdjustIteration(self, max_dup, bench):
    """Adjust the interation numbers if the have keys like ABCD{i}."""
    for benchmark in self.benchmarks:
      if benchmark.name == bench:
        if not benchmark.iteration_adjusted:
          benchmark.iteration_adjusted = True
          benchmark.iterations *= (max_dup +1)