aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_report.py
blob: adb85874e6608e83abb1559a6ce8e0a273325839 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module to handle the report format."""
from __future__ import print_function

import datetime
import itertools
import json
import os

from cros_utils.tabulator import AmeanResult
from cros_utils.tabulator import Cell
from cros_utils.tabulator import CoeffVarFormat
from cros_utils.tabulator import CoeffVarResult
from cros_utils.tabulator import Column
from cros_utils.tabulator import Format
from cros_utils.tabulator import GmeanRatioResult
from cros_utils.tabulator import LiteralResult
from cros_utils.tabulator import MaxResult
from cros_utils.tabulator import MinResult
from cros_utils.tabulator import PValueFormat
from cros_utils.tabulator import PValueResult
from cros_utils.tabulator import RatioFormat
from cros_utils.tabulator import RawResult
from cros_utils.tabulator import StdResult
from cros_utils.tabulator import TableFormatter
from cros_utils.tabulator import TableGenerator
from cros_utils.tabulator import TablePrinter
from update_telemetry_defaults import TelemetryDefaults

from column_chart import ColumnChart
from results_organizer import ResultOrganizer
from perf_table import PerfTable


def ParseChromeosImage(chromeos_image):
  """Parse the chromeos_image string for the image and version.

  The chromeos_image string will probably be in one of two formats:
  1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
     chromiumos_test_image.bin
  2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
      chromiumos_test_image.bin

  We parse these strings to find the 'chromeos_version' to store in the
  json archive (without the .datatime bit in the first case); and also
  the 'chromeos_image', which would be all of the first case, but only the
  part after '/chroot/tmp' in the second case.

  Args:
      chromeos_image: string containing the path to the chromeos_image that
      crosperf used for the test.

  Returns:
      version, image: The results of parsing the input string, as explained
      above.
  """
  # Find the Chromeos Version, e.g. R45-2345.0.0.....
  # chromeos_image should have been something like:
  # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
  if chromeos_image.endswith('/chromiumos_test_image.bin'):
    full_version = chromeos_image.split('/')[-2]
    # Strip the date and time off of local builds (which have the format
    # "R43-2345.0.0.date-and-time").
    version, _ = os.path.splitext(full_version)
  else:
    version = ''

  # Find the chromeos image.  If it's somewhere in .../chroot/tmp/..., then
  # it's an official image that got downloaded, so chop off the download path
  # to make the official image name more clear.
  official_image_path = '/chroot/tmp'
  if official_image_path in chromeos_image:
    image = chromeos_image.split(official_image_path, 1)[1]
  else:
    image = chromeos_image
  return version, image


class ResultsReport(object):
  """Class to handle the report format."""
  MAX_COLOR_CODE = 255
  PERF_ROWS = 5

  def __init__(self, experiment):
    self.experiment = experiment
    self.benchmark_runs = experiment.benchmark_runs
    self.labels = experiment.labels
    self.benchmarks = experiment.benchmarks
    self.baseline = self.labels[0]

  def _SortByLabel(self, runs):
    labels = {}
    for benchmark_run in runs:
      if benchmark_run.label_name not in labels:
        labels[benchmark_run.label_name] = []
      labels[benchmark_run.label_name].append(benchmark_run)
    return labels

  def GetFullTables(self, perf=False):
    columns = [Column(RawResult(), Format()), Column(
        MinResult(), Format()), Column(MaxResult(),
                                       Format()), Column(AmeanResult(),
                                                         Format()),
               Column(StdResult(), Format(),
                      'StdDev'), Column(CoeffVarResult(), CoeffVarFormat(),
                                        'StdDev/Mean'),
               Column(GmeanRatioResult(), RatioFormat(),
                      'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
                                              'p-value')]
    if not perf:
      return self._GetTables(self.labels, self.benchmark_runs, columns, 'full')
    return self._GetPerfTables(self.labels, columns, 'full')

  def GetSummaryTables(self, perf=False):
    columns = [Column(AmeanResult(), Format()), Column(StdResult(), Format(),
                                                       'StdDev'),
               Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
               Column(GmeanRatioResult(), RatioFormat(),
                      'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
                                              'p-value')]
    if not perf:
      return self._GetTables(self.labels, self.benchmark_runs, columns,
                             'summary')
    return self._GetPerfTables(self.labels, columns, 'summary')

  def _ParseColumn(self, columns, iteration):
    new_column = []
    for column in columns:
      if column.result.__class__.__name__ != 'RawResult':
        #TODO(asharif): tabulator should support full table natively.
        new_column.append(column)
      else:
        for i in range(iteration):
          cc = Column(LiteralResult(i), Format(), str(i + 1))
          new_column.append(cc)
    return new_column

  def _AreAllRunsEmpty(self, runs):
    for label in runs:
      for dictionary in label:
        if dictionary:
          return False
    return True

  def _GetTableHeader(self, benchmark):
    benchmark_info = ('Benchmark:  {0};  Iterations: {1}'
                      .format(benchmark.name, benchmark.iterations))
    cell = Cell()
    cell.string_value = benchmark_info
    cell.header = True
    return [[cell]]

  def _GetTables(self, labels, benchmark_runs, columns, table_type):
    tables = []
    ro = ResultOrganizer(benchmark_runs, labels, self.benchmarks)
    result = ro.result
    label_name = ro.labels
    for item in result:
      benchmark = None
      runs = result[item]
      for benchmark in self.benchmarks:
        if benchmark.name == item:
          break
      ben_table = self._GetTableHeader(benchmark)

      if self._AreAllRunsEmpty(runs):
        cell = Cell()
        cell.string_value = ('This benchmark contains no result.'
                             ' Is the benchmark name valid?')
        cell_table = [[cell]]
      else:
        tg = TableGenerator(runs, label_name)
        table = tg.GetTable()
        parsed_columns = self._ParseColumn(columns, benchmark.iterations)
        tf = TableFormatter(table, parsed_columns)
        cell_table = tf.GetCellTable(table_type)
      tables.append(ben_table)
      tables.append(cell_table)
    return tables

  def _GetPerfTables(self, labels, columns, table_type):
    tables = []
    label_names = [label.name for label in labels]
    p_table = PerfTable(self.experiment, label_names)

    if not p_table.perf_data:
      return tables

    for benchmark in p_table.perf_data:
      ben = None
      for ben in self.benchmarks:
        if ben.name == benchmark:
          break

      ben_table = self._GetTableHeader(ben)
      tables.append(ben_table)
      benchmark_data = p_table.perf_data[benchmark]
      row_info = p_table.row_info[benchmark]
      table = []
      for event in benchmark_data:
        tg = TableGenerator(benchmark_data[event],
                            label_names,
                            sort=TableGenerator.SORT_BY_VALUES_DESC)
        table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
        parsed_columns = self._ParseColumn(columns, ben.iterations)
        tf = TableFormatter(table, parsed_columns)
        tf.GenerateCellTable(table_type)
        tf.AddColumnName()
        tf.AddLabelName()
        tf.AddHeader(str(event))
        table = tf.GetCellTable(table_type, headers=False)
        tables.append(table)
    return tables

  def PrintTables(self, tables, out_to):
    output = ''
    if not tables:
      return output
    for table in tables:
      if out_to == 'HTML':
        tp = TablePrinter(table, TablePrinter.HTML)
      elif out_to == 'PLAIN':
        tp = TablePrinter(table, TablePrinter.PLAIN)
      elif out_to == 'CONSOLE':
        tp = TablePrinter(table, TablePrinter.CONSOLE)
      elif out_to == 'TSV':
        tp = TablePrinter(table, TablePrinter.TSV)
      elif out_to == 'EMAIL':
        tp = TablePrinter(table, TablePrinter.EMAIL)
      else:
        pass
      output += tp.Print()
    return output


class TextResultsReport(ResultsReport):
  """Class to generate text result report."""
  TEXT = """
===========================================
Results report for: '%s'
===========================================

-------------------------------------------
Summary
-------------------------------------------
%s


Number re-images: %s

-------------------------------------------
Benchmark Run Status
-------------------------------------------
%s


-------------------------------------------
Perf Data
-------------------------------------------
%s



Experiment File
-------------------------------------------
%s


CPUInfo
-------------------------------------------
%s
===========================================
"""

  def __init__(self, experiment, email=False):
    super(TextResultsReport, self).__init__(experiment)
    self.email = email

  def GetStatusTable(self):
    """Generate the status table by the tabulator."""
    table = [['', '']]
    columns = [Column(
        LiteralResult(iteration=0),
        Format(),
        'Status'), Column(
            LiteralResult(iteration=1),
            Format(),
            'Failing Reason')]

    for benchmark_run in self.benchmark_runs:
      status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
                                     benchmark_run.failure_reason]]
      table.append(status)
    tf = TableFormatter(table, columns)
    cell_table = tf.GetCellTable('status')
    return [cell_table]

  def GetReport(self):
    """Generate the report for email and console."""
    status_table = self.GetStatusTable()
    summary_table = self.GetSummaryTables()
    perf_table = self.GetSummaryTables(perf=True)
    if not perf_table:
      perf_table = None
    output_type = 'EMAIL' if self.email else 'CONSOLE'
    return self.TEXT % (
        self.experiment.name, self.PrintTables(summary_table, output_type),
        self.experiment.machine_manager.num_reimages,
        self.PrintTables(status_table, output_type),
        self.PrintTables(perf_table, output_type),
        self.experiment.experiment_file,
        self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))


class HTMLResultsReport(ResultsReport):
  """Class to generate html result report."""

  HTML = """
<html>
  <head>
    <style type="text/css">

body {
  font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif;
  font-size: 12px;
}

pre {
  margin: 10px;
  color: #039;
  font-size: 14px;
}

.chart {
  display: inline;
}

.hidden {
  visibility: hidden;
}

.results-section {
  border: 1px solid #b9c9fe;
  margin: 10px;
}

.results-section-title {
  background-color: #b9c9fe;
  color: #039;
  padding: 7px;
  font-size: 14px;
  width: 200px;
}

.results-section-content {
  margin: 10px;
  padding: 10px;
  overflow:auto;
}

#box-table-a {
  font-size: 12px;
  width: 480px;
  text-align: left;
  border-collapse: collapse;
}

#box-table-a th {
  padding: 6px;
  background: #b9c9fe;
  border-right: 1px solid #fff;
  border-bottom: 1px solid #fff;
  color: #039;
  text-align: center;
}

#box-table-a td {
  padding: 4px;
  background: #e8edff;
  border-bottom: 1px solid #fff;
  border-right: 1px solid #fff;
  color: #669;
  border-top: 1px solid transparent;
}

#box-table-a tr:hover td {
  background: #d0dafd;
  color: #339;
}

    </style>
    <script type='text/javascript' src='https://www.google.com/jsapi'></script>
    <script type='text/javascript'>
      google.load('visualization', '1', {packages:['corechart']});
      google.setOnLoadCallback(init);
      function init() {
        switchTab('summary', 'html');
        %s
        switchTab('full', 'html');
        drawTable();
      }
      function drawTable() {
        %s
      }
      function switchTab(table, tab) {
        document.getElementById(table + '-html').style.display = 'none';
        document.getElementById(table + '-text').style.display = 'none';
        document.getElementById(table + '-tsv').style.display = 'none';
        document.getElementById(table + '-' + tab).style.display = 'block';
      }
    </script>
  </head>

  <body>
    <div class='results-section'>
      <div class='results-section-title'>Summary Table</div>
      <div class='results-section-content'>
        <div id='summary-html'>%s</div>
        <div id='summary-text'><pre>%s</pre></div>
        <div id='summary-tsv'><pre>%s</pre></div>
      </div>
      %s
    </div>
    %s
    <div class='results-section'>
      <div class='results-section-title'>Charts</div>
      <div class='results-section-content'>%s</div>
    </div>
    <div class='results-section'>
      <div class='results-section-title'>Full Table</div>
      <div class='results-section-content'>
        <div id='full-html'>%s</div>
        <div id='full-text'><pre>%s</pre></div>
        <div id='full-tsv'><pre>%s</pre></div>
      </div>
      %s
    </div>
    <div class='results-section'>
      <div class='results-section-title'>Experiment File</div>
      <div class='results-section-content'>
        <pre>%s</pre>
    </div>
    </div>
  </body>
</html>
"""

  PERF_HTML = """
    <div class='results-section'>
      <div class='results-section-title'>Perf Table</div>
      <div class='results-section-content'>
        <div id='perf-html'>%s</div>
        <div id='perf-text'><pre>%s</pre></div>
        <div id='perf-tsv'><pre>%s</pre></div>
      </div>
      %s
    </div>
"""

  def __init__(self, experiment):
    super(HTMLResultsReport, self).__init__(experiment)

  def _GetTabMenuHTML(self, table):
    return """
<div class='tab-menu'>
  <a href="javascript:switchTab('%s', 'html')">HTML</a>
  <a href="javascript:switchTab('%s', 'text')">Text</a>
  <a href="javascript:switchTab('%s', 'tsv')">TSV</a>
</div>""" % (table, table, table)

  def GetReport(self):
    chart_javascript = ''
    charts = self._GetCharts(self.labels, self.benchmark_runs)
    chart_javascript = ''.join(chart.GetJavascript() for chart in charts)
    chart_divs = ''.join(chart.GetDiv() for chart in charts)

    summary_table = self.GetSummaryTables()
    full_table = self.GetFullTables()
    perf_table = self.GetSummaryTables(perf=True)
    if perf_table:
      perf_html = self.PERF_HTML % (self.PrintTables(perf_table, 'HTML'),
                                    self.PrintTables(perf_table, 'PLAIN'),
                                    self.PrintTables(perf_table, 'TSV'),
                                    self._GetTabMenuHTML('perf'))
      perf_init = "switchTab('perf', 'html');"
    else:
      perf_html = ''
      perf_init = ''

    return self.HTML % (
        perf_init, chart_javascript, self.PrintTables(summary_table, 'HTML'),
        self.PrintTables(summary_table, 'PLAIN'),
        self.PrintTables(summary_table, 'TSV'), self._GetTabMenuHTML('summary'),
        perf_html, chart_divs, self.PrintTables(full_table, 'HTML'),
        self.PrintTables(full_table, 'PLAIN'),
        self.PrintTables(full_table, 'TSV'), self._GetTabMenuHTML('full'),
        self.experiment.experiment_file)

  def _GetCharts(self, labels, benchmark_runs):
    charts = []
    ro = ResultOrganizer(benchmark_runs, labels)
    result = ro.result
    for item, runs in result.iteritems():
      tg = TableGenerator(runs, ro.labels)
      table = tg.GetTable()
      columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
                 Column(MaxResult(), Format())]
      tf = TableFormatter(table, columns)
      data_table = tf.GetCellTable('full')

      for i in range(2, len(data_table)):
        cur_row_data = data_table[i]
        test_key = cur_row_data[0].string_value
        title = '{0}: {1}'.format(item, test_key.replace('/', ''))
        chart = ColumnChart(title, 300, 200)
        chart.AddColumn('Label', 'string')
        chart.AddColumn('Average', 'number')
        chart.AddColumn('Min', 'number')
        chart.AddColumn('Max', 'number')
        chart.AddSeries('Min', 'line', 'black')
        chart.AddSeries('Max', 'line', 'black')
        cur_index = 1
        for label in ro.labels:
          chart.AddRow([label, cur_row_data[cur_index].value, cur_row_data[
              cur_index + 1].value, cur_row_data[cur_index + 2].value])
          if isinstance(cur_row_data[cur_index].value, str):
            chart = None
            break
          cur_index += 3
        if chart:
          charts.append(chart)
    return charts


class JSONResultsReport(ResultsReport):
  """Class that generates JSON reports."""

  @staticmethod
  def _WriteResultsToFile(filename, results):
    """Write the results as JSON to the given filename."""
    with open(filename, 'w') as fp:
      json.dump(results, fp, indent=2)

  def __init__(self, experiment, date=None, time=None):
    super(JSONResultsReport, self).__init__(experiment)
    self.ro = ResultOrganizer(experiment.benchmark_runs,
                              experiment.labels,
                              experiment.benchmarks,
                              json_report=True)
    self.date = date
    self.time = time
    self.defaults = TelemetryDefaults()
    if not self.date:
      timestamp = datetime.datetime.strftime(datetime.datetime.now(),
                                             '%Y-%m-%d %H:%M:%S')
      date, time = timestamp.split(' ')
      self.date = date
      self.time = time

  def GetReport(self, results_dir, write_results=None):
    if write_results is None:
      write_results = JSONResultsReport._WriteResultsToFile

    self.defaults.ReadDefaultsFile()
    final_results = []
    board = self.experiment.labels[0].board
    compiler_string = 'gcc'
    for test, test_results in self.ro.result.iteritems():
      for label, label_results in itertools.izip(self.ro.labels, test_results):
        for iter_results in label_results:
          json_results = {
              'date': self.date,
              'time': self.time,
              'board': board,
              'label': label
          }
          common_checksum = ''
          common_string = ''
          for l in self.experiment.labels:
            if l.name == label:
              img_path = os.path.realpath(os.path.expanduser(l.chromeos_image))
              ver, img = ParseChromeosImage(img_path)
              json_results['chromeos_image'] = img
              json_results['chromeos_version'] = ver
              json_results['chrome_version'] = l.chrome_version
              json_results['compiler'] = l.compiler
              # If any of the labels used the LLVM compiler, we will add
              # ".llvm" to the json report filename. (Otherwise we use .gcc).
              if 'llvm' in l.compiler:
                compiler_string = 'llvm'
              common_checksum = \
                self.experiment.machine_manager.machine_checksum[l.name]
              common_string = \
                self.experiment.machine_manager.machine_checksum_string[l.name]
              break
          else:
            raise RuntimeError("Label doesn't exist in label_results?")
          json_results['test_name'] = test

          if not iter_results or iter_results['retval'] != 0:
            json_results['pass'] = False
          else:
            json_results['pass'] = True
            # Get overall results.
            if test in self.defaults.GetDefault():
              default_result_fields = self.defaults.GetDefault()[test]
              value = []
              for f in default_result_fields:
                if f in iter_results:
                  v = iter_results[f]
                  if type(v) == list:
                    v = v[0]
                  # New telemetry results format: sometimes we get a list
                  # of lists now.
                  if type(v) == list:
                    v = v[0]
                  item = (f, float(v))
                  value.append(item)
              json_results['overall_result'] = value
            # Get detailed results.
            detail_results = {}
            for k in iter_results:
              if k != 'retval':
                v = iter_results[k]
                if type(v) == list:
                  v = v[0]
                if v != 'PASS':
                  if k.find('machine') == -1:
                    if v is None:
                      continue
                    if type(v) != list:
                      detail_results[k] = float(v)
                    else:
                      detail_results[k] = [float(d) for d in v]
                  else:
                    json_results[k] = v
            if 'machine_checksum' not in json_results:
              json_results['machine_checksum'] = common_checksum
            if 'machine_string' not in json_results:
              json_results['machine_string'] = common_string
            json_results['detailed_results'] = detail_results
          final_results.append(json_results)

    filename = 'report_%s_%s_%s.%s.json' % (
        board, self.date, self.time.replace(':', '.'), compiler_string)
    fullname = os.path.join(results_dir, filename)
    write_results(fullname, final_results)