aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorGeorge Burgess IV <gbiv@google.com>2016-08-25 11:29:09 -0700
committerchrome-bot <chrome-bot@chromium.org>2016-09-08 11:48:40 -0700
commit2368b41ea869a6904ae8320ad69f1b8b9a9a3714 (patch)
treea9bd3cbad3a4a52b937708a668e9210dfddfd6c7 /crosperf
parenta3e8787abdde02848ff987b67fa9ac0b22e24b7b (diff)
downloadtoolchain-utils-2368b41ea869a6904ae8320ad69f1b8b9a9a3714.tar.gz
crosperf: Make results_report more general.
The goal of this patch is to allow us to use results_report to generate non-ChromeOS-specific results reports. It's meant to be a nop for regular crosperf runs (except that, in HTML report generation, whitespace in the HTML itself will look a bit different.) Moreover, results_report used to shuffle Experiments around, each of which, unsurprisingly, contained *tons* of data about an experiment. So, part of this patch was reducing results_report's reliance on Experiments, in favor of a new type, BenchmarkResult. Some parts of results_report still rely on Experiments, but only to provide *extra* data. The minimum amount of data needed to make a results report is contained in a BenchmarkResult, and there's a convenient API we can use to make a BenchmarkResult out of an Experiment. This patch also does a massive refactor of results_report, because lots of the code was mildly more icky than it is with this patch applied. The refactor-for-prettiness and refactor-for-BenchmarkResults kind of go hand-in-hand, so it's really hard to split them out. The only part that's not so difficult to split out is the refactor to results_report_templates, but the value of splitting that out is questionable (to me). Speaking of which, all HTML magicks that were in results_report is now in results_report_templates, and now use *actual* Templates. It -- and HTMLReportGenerator -- are hopefully more readable as a result. Next, this makes JSONRsultsReport's GetReport() return a string containing JSON data, rather than calling a callback to write that string to a file. Finally, this includes a change to perf_table. Namely, its removal. It was otherwise unused, and I couldn't get it to even work, so I cleaned it up, made a perf report parser (because I couldn't get the cros_utils one to work, either...), and made sure it functions. If we're able to produce perf reports that cros_utils can parse, I'm happy to back my new parser out; I was primarily using it so I could be sure I didn't break HTML report generation. BUG=chromium:641098 TEST=./run_tests.sh passes Change-Id: I437de9eb39e00c9dd5c223ecd27feaaab544a6fd Reviewed-on: https://chrome-internal-review.googlesource.com/282217 Commit-Ready: George Burgess <gbiv@google.com> Tested-by: George Burgess <gbiv@google.com> Reviewed-by: Caroline Tice <cmtice@google.com>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/experiment_runner.py34
-rwxr-xr-xcrosperf/experiment_runner_unittest.py15
-rw-r--r--crosperf/perf_files/perf.data.report.0734
-rw-r--r--crosperf/perf_table.py87
-rw-r--r--crosperf/results_report.py1061
-rw-r--r--crosperf/results_report_templates.py196
-rwxr-xr-xcrosperf/results_report_unittest.py260
7 files changed, 1687 insertions, 700 deletions
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 542a0a60..b30c8bd5 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -1,5 +1,9 @@
-# Copyright 2011-2015 Google Inc. All Rights Reserved.
+# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
"""The experiment runner module."""
+from __future__ import print_function
+
import getpass
import os
import shutil
@@ -22,6 +26,19 @@ from results_report import TextResultsReport
from results_report import JSONResultsReport
from schedv2 import Schedv2
+def _WriteJSONReportToFile(experiment, results_dir, json_report):
+ """Writes a JSON report to a file in results_dir."""
+ has_llvm = any('llvm' in l.compiler for l in experiment.labels)
+ compiler_string = 'llvm' if has_llvm else 'gcc'
+ board = experiment.labels[0].board
+ filename = 'report_%s_%s_%s.%s.json' % (
+ board, json_report.date, json_report.time.replace(':', '.'),
+ compiler_string)
+ fullname = os.path.join(results_dir, filename)
+ report_text = json_report.GetReport()
+ with open(fullname, 'w') as out_file:
+ out_file.write(report_text)
+
class ExperimentRunner(object):
"""ExperimentRunner Class."""
@@ -193,7 +210,7 @@ class ExperimentRunner(object):
self._UnlockAllMachines(experiment)
def _PrintTable(self, experiment):
- self.l.LogOutput(TextResultsReport(experiment).GetReport())
+ self.l.LogOutput(TextResultsReport.FromExperiment(experiment).GetReport())
def _Email(self, experiment):
# Only email by default if a new run was completed.
@@ -211,11 +228,11 @@ class ExperimentRunner(object):
label_names.append(label.name)
subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
- text_report = TextResultsReport(experiment, True).GetReport()
+ text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
- html_report = HTMLResultsReport(experiment).GetReport()
+ html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
attachment = EmailSender.Attachment('report.html', html_report)
email_to = experiment.email_to or []
email_to.append(getpass.getuser())
@@ -237,14 +254,17 @@ class ExperimentRunner(object):
self.l.LogOutput('Storing results report in %s.' % results_directory)
results_table_path = os.path.join(results_directory, 'results.html')
- report = HTMLResultsReport(experiment).GetReport()
+ report = HTMLResultsReport.FromExperiment(experiment).GetReport()
if self.json_report:
- JSONResultsReport(experiment).GetReport(results_directory)
+ json_report = JSONResultsReport.FromExperiment(experiment,
+ json_args={'indent': 2})
+ _WriteJSONReportToFile(experiment, results_directory, json_report)
+
FileUtils().WriteFile(results_table_path, report)
self.l.LogOutput('Storing email message body in %s.' % results_directory)
msg_file_path = os.path.join(results_directory, 'msg_body.html')
- text_report = TextResultsReport(experiment, True).GetReport()
+ text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index fde9f03d..6ad0c7fa 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -1,6 +1,8 @@
#!/usr/bin/python2
#
-# Copyright 2014 Google Inc. All Rights Reserved
+# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
"""Tests for the experiment runner module."""
from __future__ import print_function
@@ -17,11 +19,12 @@ import experiment_status
import machine_manager
import config
import test_flag
-from experiment_file import ExperimentFile
+
from experiment_factory import ExperimentFactory
-from results_report import TextResultsReport
-from results_report import HTMLResultsReport
+from experiment_file import ExperimentFile
from results_cache import Result
+from results_report import HTMLResultsReport
+from results_report import TextResultsReport
from cros_utils import command_executer
from cros_utils.email_sender import EmailSender
@@ -380,8 +383,8 @@ class ExperimentRunnerTest(unittest.TestCase):
@mock.patch.object(FileUtils, 'RmDir')
@mock.patch.object(FileUtils, 'MkDirP')
@mock.patch.object(FileUtils, 'WriteFile')
- @mock.patch.object(HTMLResultsReport, 'GetReport')
- @mock.patch.object(TextResultsReport, 'GetReport')
+ @mock.patch.object(HTMLResultsReport, 'FromExperiment')
+ @mock.patch.object(TextResultsReport, 'FromExperiment')
@mock.patch.object(Result, 'CopyResultsTo')
@mock.patch.object(Result, 'CleanUp')
def test_store_results(self, mock_cleanup, mock_copy, _mock_text_report,
diff --git a/crosperf/perf_files/perf.data.report.0 b/crosperf/perf_files/perf.data.report.0
new file mode 100644
index 00000000..910fdc44
--- /dev/null
+++ b/crosperf/perf_files/perf.data.report.0
@@ -0,0 +1,734 @@
+# To display the perf.data header info, please use --header/--header-only options.
+#
+# NOTE: this file has been manually cut into arbitrary tiny pieces. The original
+# was > 100,000 lines, and took Python a few seconds to run through. This one
+# takes almost no time, and should work just as well.
+#
+# Samples: 292K of event 'cycles'
+# Event count (approx.): 106521626675
+#
+# Overhead Samples Command Shared Object Symbol
+# ........ ............ ............... .............................. ......................
+#
+ 0.66% 3539 swapper [kernel.kallsyms] [k] 0xffffffffa4a1f1c9
+ 0.61% 1703 chrome [kernel.kallsyms] [k] 0xffffffffa4eca110
+ 0.50% 1402 chrome [kernel.kallsyms] [k] 0xffffffffa4beea47
+ 0.48% 1297 chrome perf-24199.map [.] 0x0000115bb6c35d7a
+ 0.47% 1286 chrome perf-24199.map [.] 0x0000115bb7ba9b54
+ 0.42% 1149 lsusb lsusb [.] 0x0000000000010e60
+ 0.37% 1029 chrome chrome [.] 0x0000000000e45a2b
+ 0.37% 991 chrome perf-24199.map [.] 0x0000115bb6c35d72
+ 0.28% 762 chrome perf-24199.map [.] 0x0000115bb6c35d76
+ 0.27% 735 chrome perf-24199.map [.] 0x0000115bb6aa463a
+ 0.22% 608 chrome perf-24199.map [.] 0x0000115bb7ba9ebf
+ 0.17% 468 chrome perf-24199.map [.] 0x0000115bb6a7afc3
+ 0.17% 503 chrome [kernel.kallsyms] [k] 0xffffffffa4bf4c3d
+ 0.17% 450 chrome perf-24199.map [.] 0x0000115bb6af7457
+ 0.16% 444 chrome perf-24199.map [.] 0x0000115bb7c6edd1
+ 0.16% 438 chrome perf-24199.map [.] 0x0000115bb7c6f93d
+ 0.15% 420 chrome perf-24199.map [.] 0x0000115bb6af744b
+ 0.15% 414 chrome perf-24199.map [.] 0x0000115bb7c6fa42
+ 0.15% 405 chrome perf-24199.map [.] 0x0000115bb6af7430
+ 0.15% 398 chrome perf-24199.map [.] 0x0000115bb6af7421
+ 0.15% 396 chrome perf-24199.map [.] 0x0000115bb6af7438
+ 0.15% 396 chrome perf-24199.map [.] 0x0000115bb6af742b
+ 0.14% 437 chrome chrome [.] 0x0000000005d10b64
+ 0.14% 385 chrome perf-24199.map [.] 0x0000115bb7c6f9e5
+ 0.14% 371 chrome perf-24199.map [.] 0x0000115bb6af7418
+ 0.14% 369 chrome perf-24199.map [.] 0x0000115bb6af73f9
+ 0.14% 369 chrome perf-24199.map [.] 0x0000115bb5d21648
+ 0.13% 363 chrome perf-24199.map [.] 0x0000115bb6af7428
+ 0.13% 358 chrome perf-24199.map [.] 0x0000115bb6b80e03
+ 0.13% 343 chrome perf-24199.map [.] 0x0000115bb6af73fc
+ 0.13% 344 chrome chrome [.] 0x0000000000e55b20
+ 0.12% 338 chrome chrome [.] 0x00000000011d1cb0
+ 0.12% 317 chrome perf-24199.map [.] 0x0000115bb6aa469c
+ 0.11% 311 chrome perf-24199.map [.] 0x0000115bb6af73f6
+ 0.11% 315 chrome chrome [.] 0x0000000000e48e65
+ 0.11% 310 chrome perf-24199.map [.] 0x0000115bb6af73dc
+ 0.11% 309 chrome perf-24199.map [.] 0x0000115bb6af73cc
+ 0.11% 303 chrome perf-24199.map [.] 0x0000115bb5d21662
+ 0.11% 302 chrome perf-24199.map [.] 0x0000115bb5d29f6a
+ 0.11% 295 chrome perf-24199.map [.] 0x0000115bb6af7382
+ 0.11% 295 chrome perf-24199.map [.] 0x0000115bb6c35d1d
+ 0.11% 294 chrome perf-24199.map [.] 0x0000115bb6c35d99
+ 0.11% 293 chrome perf-24199.map [.] 0x0000115bb6c35cec
+ 0.11% 292 chrome perf-24199.map [.] 0x0000115bb6af73bc
+ 0.10% 285 chrome chrome [.] 0x0000000000e46990
+ 0.10% 283 chrome perf-24199.map [.] 0x0000115bb6af7465
+ 0.10% 282 chrome perf-24199.map [.] 0x0000115bb6aa4699
+ 0.10% 276 chrome perf-24199.map [.] 0x0000115bb6c35d2e
+ 0.10% 274 chrome perf-24199.map [.] 0x0000115bb6c35d6e
+ 0.10% 273 chrome perf-24199.map [.] 0x0000115bb6af73f0
+ 0.10% 268 chrome perf-24199.map [.] 0x0000115bb7ba9ecb
+ 0.10% 266 chrome perf-24199.map [.] 0x0000115bb6af73a1
+ 0.10% 262 chrome perf-24199.map [.] 0x0000115bb6c35d57
+ 0.09% 286 chrome [kernel.kallsyms] [k] 0xffffffffa4bef022
+ 0.09% 256 chrome chrome [.] 0x0000000000e6fa2b
+ 0.09% 249 chrome perf-24199.map [.] 0x0000115bb6c35d47
+ 0.09% 248 chrome perf-24199.map [.] 0x0000115bb6af73e6
+ 0.09% 247 chrome perf-24199.map [.] 0x0000115bb6c35d8d
+ 0.09% 240 chrome perf-24199.map [.] 0x0000115bb6a7b6e7
+ 0.09% 240 chrome perf-24199.map [.] 0x0000115bb6c35d81
+ 0.09% 233 chrome perf-24199.map [.] 0x0000115bb7ba9e8c
+ 0.09% 233 chrome perf-24199.map [.] 0x0000115bb6c35d02
+ 0.08% 230 chrome perf-24199.map [.] 0x0000115bb5d09f68
+ 0.08% 228 chrome chrome [.] 0x0000000000e45adc
+ 0.08% 232 swapper [kernel.kallsyms] [k] 0xffffffffa4dccf94
+ 0.08% 222 chrome perf-24199.map [.] 0x0000115bb7bed938
+ 0.08% 222 chrome perf-24199.map [.] 0x0000115bb5d0a372
+ 0.08% 338 python [kernel.kallsyms] [k] 0xffffffffa4eca110
+ 0.08% 218 chrome perf-24199.map [.] 0x0000115bb7ba9b5d
+ 0.08% 215 chrome perf-24199.map [.] 0x0000115bb7ba9ea8
+ 0.08% 246 python [kernel.kallsyms] [k] 0xffffffffa4ad6f19
+ 0.08% 216 swapper [kernel.kallsyms] [k] 0xffffffffa4dccfa1
+ 0.08% 206 lsusb lsusb [.] 0x0000000000010e63
+ 0.08% 207 chrome chrome [.] 0x0000000000e4596c
+ 0.07% 204 chrome perf-24199.map [.] 0x0000115bb5d29dd4
+ 0.07% 202 chrome perf-24199.map [.] 0x0000115bb6b25330
+ 0.07% 199 chrome perf-24199.map [.] 0x0000115bb6b25338
+ 0.07% 198 chrome perf-24199.map [.] 0x0000115bb5d1726d
+ 0.07% 194 chrome perf-24199.map [.] 0x0000115bb6a7b07c
+ 0.07% 214 chrome chrome [.] 0x0000000005d10e5e
+ 0.07% 187 chrome perf-24199.map [.] 0x0000115bb7ba9b69
+ 0.07% 188 chrome perf-24199.map [.] 0x0000115bb5d1728e
+ 0.07% 187 chrome perf-24199.map [.] 0x0000115bb6b80dfe
+ 0.07% 179 chrome perf-24199.map [.] 0x0000115bb7bed940
+ 0.07% 179 chrome perf-24199.map [.] 0x0000115bb5d0a36e
+ 0.06% 176 chrome chrome [.] 0x0000000000e75fe4
+ 0.06% 181 chrome chrome [.] 0x00000000023fd480
+ 0.06% 172 chrome perf-24199.map [.] 0x0000115bb6af73e9
+ 0.06% 170 chrome perf-24199.map [.] 0x0000115bb6a7b6fe
+ 0.06% 177 swapper [kernel.kallsyms] [k] 0xffffffffa4dccf9b
+ 0.06% 168 chrome chrome [.] 0x0000000000e45aff
+ 0.06% 166 chrome perf-24199.map [.] 0x0000115bb6b25340
+ 0.06% 175 chrome [kernel.kallsyms] [k] 0xffffffffa4ac31c3
+ 0.06% 163 chrome chrome [.] 0x0000000000e4fcb8
+ 0.06% 160 chrome perf-24199.map [.] 0x0000115bb6a7afbb
+ 0.06% 160 chrome chrome [.] 0x0000000000e54d5c
+ 0.06% 156 chrome perf-24199.map [.] 0x0000115bb6a7af9f
+ 0.06% 157 chrome perf-24199.map [.] 0x0000115bb5d29daf
+ 0.06% 156 chrome perf-24199.map [.] 0x0000115bb5d21656
+ 0.06% 172 chrome chrome [.] 0x0000000005d10b5b
+ 0.06% 156 chrome perf-24199.map [.] 0x0000115bb6aa4662
+ 0.06% 155 chrome perf-24199.map [.] 0x0000115bb7bed932
+ 0.06% 155 chrome perf-24199.map [.] 0x0000115bb6b82327
+ 0.05% 149 chrome perf-24199.map [.] 0x0000115bb7ba9ede
+ 0.05% 146 chrome perf-24199.map [.] 0x0000115bb6aa45f8
+ 0.05% 145 chrome perf-24199.map [.] 0x0000115bb6aa460e
+ 0.05% 153 chrome chrome [.] 0x0000000000cb7030
+ 0.05% 142 chrome perf-24199.map [.] 0x0000115bb7ba9b18
+ 0.05% 143 chrome chrome [.] 0x0000000000f13e9c
+ 0.05% 143 chrome perf-24199.map [.] 0x0000115bb6b2530a
+ 0.05% 141 chrome chrome [.] 0x0000000000e18c45
+ 0.05% 138 chrome perf-24199.map [.] 0x0000115bb6ca5090
+ 0.05% 211 python [kernel.kallsyms] [k] 0xffffffffa4ae14fd
+ 0.05% 137 chrome perf-24199.map [.] 0x0000115bb6aa4692
+ 0.05% 137 chrome perf-24199.map [.] 0x0000115bb6aa4626
+ 0.05% 136 chrome perf-24199.map [.] 0x0000115bb7ba9ed2
+ 0.05% 196 python [kernel.kallsyms] [k] 0xffffffffa4beeac5
+ 0.05% 133 chrome perf-24199.map [.] 0x0000115bb6ca5109
+ 0.05% 132 chrome perf-24199.map [.] 0x0000115bb7ba9b42
+ 0.05% 132 chrome perf-24199.map [.] 0x0000115bb6b8230f
+ 0.05% 132 chrome perf-24199.map [.] 0x0000115bb5d215e5
+ 0.05% 131 chrome perf-24199.map [.] 0x0000115bb7c6fa0a
+ 0.05% 149 chrome libpthread-2.19.so [.] 0x000000000000b471
+ 0.05% 130 chrome perf-24199.map [.] 0x0000115bb6aa4678
+ 0.05% 133 chrome libc-2.19.so [.] 0x0000000000088b72
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4c692
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6a7b4bc
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb7bba146
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb7ba9e83
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb7ba9dde
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4c713
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6a7b197
+ 0.01% 16 keygen libfreebl3.so [.] 0x000000000005bc62
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6ae766b
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4c6ef
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4e9ef
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4c0ba
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6a78053
+ 0.01% 16 chrome chrome [.] 0x0000000000e73bb0
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c36bee
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c3979b
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6c4e93b
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6af73bf
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b814a7
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6a7b6cd
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6af73c5
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b8147d
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b8216b
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b80dc6
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6ba1724
+ 0.01% 16 chrome chrome [.] 0x000000000254788e
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b81ed9
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb5d27a01
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a09503
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a63d39
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a65090
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4dcd1c3
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a544b0
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a54f5b
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4ec8bec
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4c532e4
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a00e4c
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a63e67
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4ec855a
+ 0.00% 1 mtpd [kernel.kallsyms] [k] 0xffffffffa4a0cb13
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a00dee
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5d3a2
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66eba
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4bea29e
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a545c4
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a62fcf
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4cc8948
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4ec9b33
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4ec8911
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a64bf8
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a00e4c
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a63d0c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4bea29a
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a75623
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a5d435
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a546cf
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4bec12d
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a66db1
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4ec855b
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a6394d
+ 0.00% 1 dbus-daemon [kernel.kallsyms] [k] 0xffffffffa4ded832
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a638c4
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a1fc16
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a75810
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a92368
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a23893
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a00e17
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a679aa
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a6e743
+ 0.00% 1 disks  [.] 0x00000000000e9fc7
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a55032
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a58dc9
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a6646c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a65163
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4ec84f8
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a54e31
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a63e17
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4ec8435
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4bf4d14
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a3909e
+ 0.00% 1 dbus-daemon [kernel.kallsyms] [k] 0xffffffffa4bef0f0
+ 0.00% 1 disks  [.] 0x0000000000082e08
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4dce5a5
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66a5c
+ 0.00% 1 rsync [kernel.kallsyms] [k] 0xffffffffa4bbaa3c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a1feea
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5de3a
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a38e6b
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a2cb16
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a7a32f
+ 0.00% 5 perf [kernel.kallsyms] [k] 0xffffffffa4a13e63
+
+
+# Samples: 266K of event 'instructions'
+# Event count (approx.): 154627854320
+#
+# Overhead Samples Command Shared Object Symbol
+# ........ ............ ............... .......................... ......................
+#
+ 1.65% 2882 chrome perf-24199.map [.] 0x0000115bb6c35d7a
+ 0.67% 987 chrome perf-24199.map [.] 0x0000115bb7ba9b54
+ 0.51% 663 chrome perf-24199.map [.] 0x0000115bb6af7457
+ 0.45% 592 chrome perf-24199.map [.] 0x0000115bb6af744b
+ 0.45% 660 chrome perf-24199.map [.] 0x0000115bb7ba9ebf
+ 0.44% 581 chrome perf-24199.map [.] 0x0000115bb6af7438
+ 0.39% 576 chrome perf-24199.map [.] 0x0000115bb7c6f9e5
+ 0.37% 488 chrome perf-24199.map [.] 0x0000115bb6af7430
+ 0.34% 499 chrome perf-24199.map [.] 0x0000115bb7c6f93d
+ 0.33% 575 chrome perf-24199.map [.] 0x0000115bb6c35d81
+ 0.33% 573 chrome perf-24199.map [.] 0x0000115bb6c35d99
+ 0.32% 420 chrome perf-24199.map [.] 0x0000115bb6af742b
+ 0.30% 391 chrome perf-24199.map [.] 0x0000115bb6af7465
+ 0.29% 503 chrome perf-24199.map [.] 0x0000115bb6c35d76
+ 0.29% 377 chrome perf-24199.map [.] 0x0000115bb6af73f0
+ 0.28% 492 chrome perf-24199.map [.] 0x0000115bb6a7afc3
+ 0.28% 373 chrome perf-24199.map [.] 0x0000115bb6af7428
+ 0.27% 361 chrome perf-24199.map [.] 0x0000115bb6af7382
+ 0.27% 361 chrome perf-24199.map [.] 0x0000115bb6af73f9
+ 0.27% 360 chrome perf-24199.map [.] 0x0000115bb6af73a1
+ 0.27% 464 chrome perf-24199.map [.] 0x0000115bb6c35d8d
+ 0.24% 318 chrome perf-24199.map [.] 0x0000115bb6af7421
+ 0.24% 425 chrome perf-24199.map [.] 0x0000115bb6b80e03
+ 0.24% 314 chrome perf-24199.map [.] 0x0000115bb6af73f6
+ 0.24% 345 chrome perf-24199.map [.] 0x0000115bb7c6fa42
+ 0.23% 338 chrome perf-24199.map [.] 0x0000115bb7c6edd1
+ 0.21% 315 chrome perf-24199.map [.] 0x0000115bb7ba9ecb
+ 0.21% 279 chrome perf-24199.map [.] 0x0000115bb6af7418
+ 0.21% 277 chrome perf-24199.map [.] 0x0000115bb6af73bc
+ 0.21% 304 chrome perf-24199.map [.] 0x0000115bb7ba9ea8
+ 0.20% 534 chrome perf-24199.map [.] 0x0000115bb5d21648
+ 0.18% 238 chrome perf-24199.map [.] 0x0000115bb6af73e6
+ 0.17% 227 chrome perf-24199.map [.] 0x0000115bb6af73fc
+ 0.17% 241 chrome perf-24199.map [.] 0x0000115bb7ba9b5d
+ 0.16% 240 chrome perf-24199.map [.] 0x0000115bb7ba9b10
+ 0.16% 285 chrome perf-24199.map [.] 0x0000115bb6a7b07c
+ 0.15% 205 chrome perf-24199.map [.] 0x0000115bb6af73dc
+ 0.15% 290 chrome perf-24199.map [.] 0x0000115bb6aa460e
+ 0.15% 223 chrome perf-24199.map [.] 0x0000115bb7ba9b69
+ 0.15% 194 chrome perf-24199.map [.] 0x0000115bb6af73cc
+ 0.15% 191 chrome perf-24199.map [.] 0x0000115bb6af974c
+ 0.14% 185 chrome perf-24199.map [.] 0x0000115bb6af7461
+ 0.14% 204 chrome perf-24199.map [.] 0x0000115bb7bed940
+ 0.14% 368 chrome perf-24199.map [.] 0x0000115bb5d21662
+ 0.14% 230 chrome perf-24199.map [.] 0x0000115bb6a7b412
+ 0.14% 199 chrome perf-24199.map [.] 0x0000115bb7ba9ed2
+ 0.13% 197 chrome perf-24199.map [.] 0x0000115bb7ba9ede
+ 0.13% 255 chrome perf-24199.map [.] 0x0000115bb6aa463a
+ 0.13% 191 chrome perf-24199.map [.] 0x0000115bb7ba9e8c
+ 0.13% 187 chrome perf-24199.map [.] 0x0000115bb7c6f9cb
+ 0.13% 242 chrome perf-24199.map [.] 0x0000115bb6aa4678
+ 0.13% 165 chrome perf-24199.map [.] 0x0000115bb6af975e
+ 0.13% 222 chrome perf-24199.map [.] 0x0000115bb6b80dfe
+ 0.12% 163 chrome perf-24199.map [.] 0x0000115bb6af9746
+ 0.12% 234 chrome perf-24199.map [.] 0x0000115bb6aa4692
+ 0.12% 178 chrome perf-24199.map [.] 0x0000115bb7c6ed55
+ 0.12% 157 chrome perf-24199.map [.] 0x0000115bb6af96f4
+ 0.12% 154 chrome perf-24199.map [.] 0x0000115bb6af9737
+ 0.12% 173 chrome perf-24199.map [.] 0x0000115bb7c6fa73
+ 0.12% 171 chrome perf-24199.map [.] 0x0000115bb7c6fa5e
+ 0.12% 200 chrome perf-24199.map [.] 0x0000115bb6a7afbb
+ 0.12% 199 chrome perf-24199.map [.] 0x0000115bb6a7b6fe
+ 0.12% 169 chrome perf-24199.map [.] 0x0000115bb7c6f8f2
+ 0.11% 148 chrome perf-24199.map [.] 0x0000115bb6af737e
+ 0.11% 205 chrome libc-2.19.so [.] 0x0000000000088b72
+ 0.11% 212 chrome perf-24199.map [.] 0x0000115bb6aa469c
+ 0.11% 160 chrome perf-24199.map [.] 0x0000115bb7c6f8d0
+ 0.11% 204 chrome perf-24199.map [.] 0x0000115bb6aa4626
+ 0.11% 160 chrome perf-24199.map [.] 0x0000115bb7bed932
+ 0.11% 154 chrome perf-24199.map [.] 0x0000115bb7ba9b18
+ 0.11% 137 chrome perf-24199.map [.] 0x0000115bb6af972f
+ 0.10% 153 chrome perf-24199.map [.] 0x0000115bb7c6f9f9
+ 0.10% 136 chrome perf-24199.map [.] 0x0000115bb6af7394
+ 0.10% 238 chrome chrome [.] 0x0000000000e45adc
+ 0.10% 131 chrome perf-24199.map [.] 0x0000115bb6af977d
+ 0.10% 146 chrome perf-24199.map [.] 0x0000115bb7c6f907
+ 0.10% 171 chrome perf-24199.map [.] 0x0000115bb6a7b6e7
+ 0.10% 144 chrome perf-24199.map [.] 0x0000115bb7c6f9e1
+ 0.10% 128 chrome perf-24199.map [.] 0x0000115bb6af9732
+ 0.10% 256 chrome perf-24199.map [.] 0x0000115bb5d21656
+ 0.10% 142 chrome perf-24199.map [.] 0x0000115bb7c6f9b1
+ 0.10% 181 chrome perf-24199.map [.] 0x0000115bb6aa464e
+ 0.10% 147 chrome perf-24199.map [.] 0x0000115bb7bf5700
+ 0.10% 181 chrome perf-24199.map [.] 0x0000115bb6aa4662
+ 0.09% 161 chrome perf-24199.map [.] 0x0000115bb6a7af9f
+ 0.09% 136 chrome perf-24199.map [.] 0x0000115bb7c6f216
+ 0.09% 159 chrome perf-24199.map [.] 0x0000115bb6a7b377
+ 0.09% 228 chrome perf-24199.map [.] 0x0000115bb5d0a372
+ 0.09% 118 chrome perf-24199.map [.] 0x0000115bb6af9769
+ 0.09% 117 chrome perf-24199.map [.] 0x0000115bb6af96f2
+ 0.09% 336 chrome perf-24199.map [.] 0x0000115bb5d1726d
+ 0.09% 193 chrome chrome [.] 0x0000000000e76562
+ 0.09% 117 chrome perf-24199.map [.] 0x0000115bb6add6ed
+ 0.09% 219 chrome chrome [.] 0x0000000000e45a2b
+ 0.09% 148 chrome perf-24199.map [.] 0x0000115bb6a7afc9
+ 0.08% 111 chrome perf-24199.map [.] 0x0000115bb6af972a
+ 0.08% 158 chrome perf-24199.map [.] 0x0000115bb6aa45f8
+ 0.08% 145 chrome perf-24199.map [.] 0x0000115bb6a7afed
+ 0.08% 112 chrome perf-24199.map [.] 0x0000115bb6af73b8
+ 0.08% 107 chrome perf-24199.map [.] 0x0000115bb6af9707
+ 0.08% 118 chrome perf-24199.map [.] 0x0000115bb7ba9b3a
+ 0.08% 138 chrome perf-24199.map [.] 0x0000115bb6a785c9
+ 0.08% 117 chrome perf-24199.map [.] 0x0000115bb7c6ed3e
+ 0.08% 142 chrome perf-24199.map [.] 0x0000115bb6b81e10
+ 0.08% 106 chrome perf-24199.map [.] 0x0000115bb6af73e3
+ 0.08% 154 chrome chrome [.] 0x0000000000eb5472
+ 0.08% 116 chrome perf-24199.map [.] 0x0000115bb7bed9b6
+ 0.08% 287 chrome perf-24199.map [.] 0x0000115bb5d29f6a
+ 0.08% 199 chrome chrome [.] 0x0000000000e6fa2b
+ 0.08% 218 chrome chrome [.] 0x0000000000e55b20
+ 0.08% 110 chrome perf-24199.map [.] 0x0000115bb7c6f925
+ 0.07% 112 chrome perf-24199.map [.] 0x0000115bb7bb53c2
+ 0.07% 107 chrome perf-24199.map [.] 0x0000115bb7c6f92d
+ 0.07% 155 chrome perf-24199.map [.] 0x0000115bb5d2640e
+ 0.07% 127 chrome perf-24199.map [.] 0x0000115bb6c35d72
+ 0.07% 124 chrome perf-24199.map [.] 0x0000115bb6a78284
+ 0.07% 107 chrome perf-24199.map [.] 0x0000115bb7bed990
+ 0.07% 421 python [kernel.kallsyms] [k] 0xffffffffa4ae14fd
+ 0.07% 93 chrome perf-24199.map [.] 0x0000115bb6af73e9
+ 0.07% 104 chrome perf-24199.map [.] 0x0000115bb7bed938
+ 0.07% 158 chrome perf-24199.map [.] 0x0000115bb6b57933
+ 0.07% 100 chrome perf-24199.map [.] 0x0000115bb7c6f96b
+ 0.07% 123 chrome perf-24199.map [.] 0x0000115bb6b8230f
+ 0.07% 101 chrome perf-24199.map [.] 0x0000115bb7bed9bf
+ 0.07% 102 chrome perf-24199.map [.] 0x0000115bb7bb53df
+ 0.07% 144 chrome perf-24199.map [.] 0x0000115bb6b2530a
+ 0.07% 91 chrome perf-24199.map [.] 0x0000115bb6add73f
+ 0.07% 89 chrome perf-24199.map [.] 0x0000115bb6add762
+ 0.07% 98 chrome perf-24199.map [.] 0x0000115bb7bed926
+ 0.06% 85 chrome perf-24199.map [.] 0x0000115bb6af96a7
+ 0.06% 129 chrome perf-24199.map [.] 0x0000115bb6aa4699
+ 0.06% 112 chrome perf-24199.map [.] 0x0000115bb6c35d47
+ 0.06% 295 chrome chrome [.] 0x00000000011d1cb0
+ 0.06% 114 chrome perf-24199.map [.] 0x0000115bb6b822b1
+ 0.06% 94 chrome perf-24199.map [.] 0x0000115bb7bed99d
+ 0.06% 94 chrome perf-24199.map [.] 0x0000115bb7bb53f2
+ 0.06% 92 chrome perf-24199.map [.] 0x0000115bb7ba9b86
+ 0.06% 92 chrome perf-24199.map [.] 0x0000115bb7ba9b29
+ 0.06% 88 chrome perf-24199.map [.] 0x0000115bb6e14f87
+ 0.06% 80 chrome perf-24199.map [.] 0x0000115bb6af9722
+ 0.06% 109 chrome perf-24199.map [.] 0x0000115bb6b8238a
+ 0.06% 93 chrome perf-24199.map [.] 0x0000115bb7ba71a3
+ 0.06% 80 chrome perf-24199.map [.] 0x0000115bb6af747f
+ 0.06% 107 chrome perf-24199.map [.] 0x0000115bb6b80e22
+ 0.06% 104 chrome perf-24199.map [.] 0x0000115bb6a72466
+ 0.06% 78 chrome perf-24199.map [.] 0x0000115bb6add757
+ 0.06% 80 chrome perf-24199.map [.] 0x0000115bb6add745
+ 0.06% 102 chrome perf-24199.map [.] 0x0000115bb6c35cec
+ 0.06% 202 chrome chrome [.] 0x0000000000f13e9c
+ 0.06% 166 chrome chrome [.] 0x0000000000e46989
+ 0.06% 318 python [kernel.kallsyms] [k] 0xffffffffa4ad6f19
+ 0.06% 83 chrome perf-24199.map [.] 0x0000115bb7c6f97d
+ 0.06% 77 chrome perf-24199.map [.] 0x0000115bb6add730
+ 0.06% 82 chrome perf-24199.map [.] 0x0000115bb7c6f992
+ 0.06% 132 chrome perf-24199.map [.] 0x0000115bb5d417f2
+ 0.06% 76 chrome perf-24199.map [.] 0x0000115bb6add728
+ 0.06% 72 chrome perf-24199.map [.] 0x0000115bb6af9702
+ 0.06% 94 chrome perf-24199.map [.] 0x0000115bb6c4ea18
+ 0.06% 321 chrome [kernel.kallsyms] [k] 0xffffffffa4bf4c3d
+ 0.06% 139 chrome perf-24199.map [.] 0x0000115bb5d21666
+ 0.06% 72 chrome perf-24199.map [.] 0x0000115bb6af9684
+ 0.06% 78 chrome perf-24199.map [.] 0x0000115bb6add6a0
+ 0.05% 72 chrome perf-24199.map [.] 0x0000115bb6af73c8
+ 0.05% 137 chrome perf-24199.map [.] 0x0000115bb5d0a36e
+ 0.05% 97 chrome perf-24199.map [.] 0x0000115bb6b82327
+ 0.05% 70 chrome perf-24199.map [.] 0x0000115bb6b06516
+ 0.05% 137 chrome chrome [.] 0x0000000000e6fa1c
+ 0.05% 132 chrome perf-24199.map [.] 0x0000115bb5d21629
+ 0.05% 130 chrome chrome [.] 0x0000000000e54d5c
+ 0.05% 122 chrome chrome [.] 0x0000000000e48e5f
+ 0.05% 839 lsusb lsusb [.] 0x0000000000010e60
+ 0.05% 133 chrome perf-24199.map [.] 0x0000115bb5d215dd
+ 0.05% 130 chrome perf-24199.map [.] 0x0000115bb5d215c9
+ 0.05% 130 chrome perf-24199.map [.] 0x0000115bb78d3895
+ 0.05% 76 chrome perf-24199.map [.] 0x0000115bb7c6f174
+ 0.01% 46 chrome chrome [.] 0x0000000005d109a9
+ 0.01% 15 chrome perf-24199.map [.] 0x0000115bb6aa5665
+ 0.01% 17 chrome chrome [.] 0x0000000000ec6b13
+ 0.01% 18 chrome perf-24199.map [.] 0x0000115bb5d417ea
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6c3581e
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6af94fb
+ 0.01% 29 chrome libc-2.19.so [.] 0x000000000009a8d5
+ 0.01% 25 chrome chrome [.] 0x0000000000e57849
+ 0.01% 40 chrome chrome [.] 0x0000000005d1101d
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb6e1502e
+ 0.01% 20 chrome perf-24199.map [.] 0x0000115bb7c9f11d
+ 0.01% 18 chrome perf-24199.map [.] 0x0000115bb6b577c8
+ 0.01% 30 chrome [kernel.kallsyms] [k] 0xffffffffa4acff4a
+ 0.01% 38 python libpython2.7.so.1.0 [.] 0x000000000011ad14
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6b8221e
+ 0.01% 59 chrome i965_dri.so [.] 0x0000000000483802
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6a7d197
+ 0.01% 19 chrome perf-24199.map [.] 0x0000115bb6b51f6a
+ 0.01% 31 chrome libpthread-2.19.so [.] 0x0000000000009e71
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6c4eac4
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb7bea5c2
+ 0.01% 38 chrome chrome [.] 0x0000000000d3e821
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb7c78e2c
+ 0.01% 22 chrome perf-24199.map [.] 0x0000115bb5d67f0f
+ 0.01% 30 chrome chrome [.] 0x0000000000ec4b08
+ 0.01% 15 chrome perf-24199.map [.] 0x0000115bb5d2793b
+ 0.01% 28 chrome chrome [.] 0x0000000000f38669
+ 0.01% 43 chrome chrome [.] 0x0000000001bfb240
+ 0.01% 20 chrome perf-24199.map [.] 0x0000115bb5d09f8e
+ 0.01% 30 chrome perf-24199.map [.] 0x0000115bb5e3f9b2
+ 0.01% 18 chrome perf-24199.map [.] 0x0000115bb5d072d7
+ 0.01% 32 chrome ld-2.19.so [.] 0x000000000000bcc7
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6a786b6
+ 0.01% 36 chrome chrome [.] 0x00000000024366f0
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb6b5a6bf
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6a78024
+ 0.01% 16 sshd sshd [.] 0x0000000000075c37
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6adec72
+ 0.01% 15 chrome perf-24199.map [.] 0x0000115bb6ba0a78
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6a7b373
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6c3256e
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb6e14f74
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6af95ac
+ 0.01% 26 python libpython2.7.so.1.0 [.] 0x0000000000095f32
+ 0.01% 19 chrome perf-24199.map [.] 0x0000115bb5d215e1
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6adeb55
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b2c7db
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7ba9e3d
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6af5a62
+ 0.01% 25 chrome chrome [.] 0x0000000000e1654e
+ 0.01% 24 chrome perf-24199.map [.] 0x0000115bb6ca75f2
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c6ef16
+ 0.01% 17 chrome chrome [.] 0x0000000000ed5a9a
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6b810f4
+ 0.01% 30 chrome chrome [.] 0x0000000000e543e9
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb6b5a5e2
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6ab0afa
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c6f24e
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b2ca81
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aa471b
+ 0.01% 37 chrome chrome [.] 0x0000000000eb4d31
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb6e1f80f
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c6f5d9
+ 0.01% 31 chrome chrome [.] 0x0000000000e18aa9
+ 0.01% 18 chrome chrome [.] 0x0000000000e4907d
+ 0.01% 58 chrome i965_dri.so [.] 0x0000000000483806
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6a7b69b
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b81edd
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c6f5b3
+ 0.01% 41 chrome chrome [.] 0x0000000000d1e411
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb7b0a72f
+ 0.01% 23 lsof [kernel.kallsyms] [k] 0xffffffffa4bf4c3d
+ 0.01% 16 chrome chrome [.] 0x0000000000e7656e
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b24dbd
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aa5672
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7ba9d29
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6cb5bbd
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b2ca2a
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6a72710
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6c35d84
+ 0.01% 19 chrome chrome [.] 0x0000000000e45ac6
+ 0.01% 38 python libpython2.7.so.1.0 [.] 0x00000000000a8c9e
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b2c989
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b2ca71
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aa5666
+ 0.01% 20 chrome perf-24199.map [.] 0x0000115bb5d2165e
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aa4729
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b80feb
+ 0.01% 27 python libpython2.7.so.1.0 [.] 0x00000000000a8bf8
+ 0.01% 23 chrome chrome [.] 0x0000000000eea37a
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c6f25f
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb6e1fc56
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b315b4
+ 0.01% 24 chrome chrome [.] 0x0000000000f16081
+ 0.01% 26 chrome [kernel.kallsyms] [k] 0xffffffffa4ad9a67
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb5d2177a
+ 0.01% 30 chrome libc-2.19.so [.] 0x000000000009a991
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6a7b687
+ 0.01% 32 chrome chrome [.] 0x0000000000f9f3c0
+ 0.01% 25 chrome chrome [.] 0x0000000000f13e73
+ 0.01% 17 chrome chrome [.] 0x0000000000e48e41
+ 0.01% 25 chrome chrome [.] 0x0000000000f19e4e
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb6b557dd
+ 0.01% 28 chrome chrome [.] 0x0000000000e18b99
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb7c9f0c6
+ 0.01% 30 python libpython2.7.so.1.0 [.] 0x00000000000e8dc7
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6ad9163
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb6b5c652
+ 0.01% 18 chrome chrome [.] 0x0000000000e45abc
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6a7d0d7
+ 0.01% 37 chrome chrome [.] 0x0000000000ce74f6
+ 0.01% 15 chrome chrome [.] 0x0000000000f13df8
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6bc7d01
+ 0.01% 20 chrome perf-24199.map [.] 0x0000115bb5d265fa
+ 0.01% 38 chrome chrome [.] 0x00000000011dc830
+ 0.01% 27 chrome perf-24199.map [.] 0x0000115bb5d17263
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6c36bc0
+ 0.01% 24 chrome chrome [.] 0x0000000000e18b9d
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6ad4877
+ 0.01% 27 chrome chrome [.] 0x0000000000f15f92
+ 0.01% 31 chrome chrome [.] 0x0000000000cf4525
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6aded45
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6c36bee
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6af5ac8
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aac55f
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb5d07a9c
+ 0.01% 15 chrome chrome [.] 0x0000000000e520df
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b80f05
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6ac669f
+ 0.01% 29 chrome libc-2.19.so [.] 0x000000000008e2bb
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b81f43
+ 0.01% 32 chrome ld-2.19.so [.] 0x000000000000bca3
+ 0.01% 23 chrome perf-24199.map [.] 0x0000115bb6ca738d
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6e1fb74
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6abcae7
+ 0.01% 33 chrome chrome [.] 0x0000000000e10fd9
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7beaa06
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6e150b1
+ 0.01% 27 chrome perf-24199.map [.] 0x0000115bb7e1e828
+ 0.01% 23 chrome chrome [.] 0x0000000000f1608a
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6a7b4f3
+ 0.01% 18 chrome perf-24199.map [.] 0x0000115bb6b57760
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7bf5036
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b814d1
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6ba4ea1
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6aaca3e
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7bf5678
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6a7d202
+ 0.01% 24 chrome ld-2.19.so [.] 0x000000000000967a
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb706289d
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b252a0
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6e1fbb7
+ 0.01% 23 chrome chrome [.] 0x0000000000f15f6a
+ 0.01% 27 chrome chrome [.] 0x0000000000f19e57
+ 0.01% 20 chrome chrome [.] 0x0000000000e5752d
+ 0.01% 14 chrome perf-24199.map [.] 0x0000115bb6a9f27e
+ 0.01% 24 ps [kernel.kallsyms] [k] 0xffffffffa4bee3ef
+ 0.01% 18 chrome chrome [.] 0x0000000000ed5ad3
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6c4e9e3
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b5a2e0
+ 0.01% 25 chrome chrome [.] 0x0000000000eb696b
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b8213f
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6c35d37
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6a7b399
+ 0.01% 21 chrome chrome [.] 0x0000000000e4722f
+ 0.01% 20 chrome chrome [.] 0x0000000000dbec48
+ 0.01% 15 chrome perf-24199.map [.] 0x0000115bb6b358f7
+ 0.01% 13 chrome perf-24199.map [.] 0x0000115bb6b8215a
+ 0.01% 21 chrome libc-2.19.so [.] 0x000000000008d855
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6af5abf
+ 0.01% 36 chrome chrome [.] 0x0000000005d10df6
+ 0.01% 16 chrome perf-24199.map [.] 0x0000115bb6b5c716
+ 0.01% 18 chrome chrome [.] 0x0000000000e45c20
+ 0.01% 26 chrome chrome [.] 0x0000000000f1606e
+ 0.01% 22 chrome chrome [.] 0x0000000000f1419e
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c8ad93
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6a784f8
+ 0.01% 17 chrome perf-24199.map [.] 0x0000115bb7cdc597
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6b810d3
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb7ba9bae
+ 0.01% 19 chrome chrome [.] 0x0000000000e48dd8
+ 0.01% 19 chrome chrome [.] 0x0000000000eb4de8
+ 0.01% 10 chrome perf-24199.map [.] 0x0000115bb6e41577
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6b7d7f6
+ 0.01% 12 chrome perf-24199.map [.] 0x0000115bb6b794d1
+ 0.01% 9 chrome perf-24199.map [.] 0x0000115bb6af95d5
+ 0.01% 17 chrome chrome [.] 0x0000000000e6fa00
+ 0.01% 11 chrome perf-24199.map [.] 0x0000115bb7c8b961
+ 0.01% 15 chrome perf-24199.map [.] 0x0000115bb6b315c5
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a549b6
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a923d2
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4b03afa
+ 0.00% 1 tcsd [kernel.kallsyms] [k] 0xffffffffa4a5c294
+ 0.00% 1 kworker/0:1H [kernel.kallsyms] [k] 0xffffffffa4cef732
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a00c88
+ 0.00% 1 kworker/u:5 [kernel.kallsyms] [k] 0xffffffffa4a8ac8a
+ 0.00% 1 lsof [kernel.kallsyms] [k] 0xffffffffa4a51a55
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a3a27c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a58dff
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66016
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a92359
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a93347
+ 0.00% 1 sh libc-2.19.so [.] 0x0000000000082a06
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a38e4d
+ 0.00% 1 powerd  [.] 0x00000000000a0930
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4ec8d72
+ 0.00% 1 powerd [kernel.kallsyms] [k] 0xffffffffa4a408c8
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4ab35a2
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5fda6
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5fed2
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a543e8
+ 0.00% 1 dhcpcd  [.] 0x00000000000280a3
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a54db9
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a00e21
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4a65ea8
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4adde99
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5655b
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a40550
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a54a89
+ 0.00% 1 python libpython2.7.so.1.0 [.] 0x000000000008f1be
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a64ade
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4bbaa30
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a00c06
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66bfe
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a63bf9
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5fe01
+ 0.00% 1 tcsd [kernel.kallsyms] [k] 0xffffffffa4a5f308
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4be56c1
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a40596
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a9381f
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a757c5
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a65ea3
+ 0.00% 1 rsyslogd [kernel.kallsyms] [k] 0xffffffffa4bee6b7
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a2d72a
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a63d00
+ 0.00% 1 chrome chrome [.] 0x0000000000cd3879
+ 0.00% 1 python libc-2.19.so [.] 0x00000000000e9fc7
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a58e03
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a65f0b
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4ec89d8
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4ec90d6
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a548c8
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4a38453
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4a66335
+ 0.00% 1 chrome chrome [.] 0x0000000000cc75d8
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a2058f
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4e0f293
+ 0.00% 1 kworker/u:0 [ath9k] [k] 0x0000000000008126
+ 0.00% 1 jbd2/sda1-8 [kernel.kallsyms] [k] 0xffffffffa4a08ab7
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a2388e
+ 0.00% 1 powerd  [.] 0x000000000007dca0
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4a2c97c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a0659b
+ 0.00% 1 chrome chrome [.] 0x0000000000d0e3bd
+ 0.00% 1 cryptohomed  [.] 0x000000000001dcd0
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4bec0c0
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a62145
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4bf3318
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5fd33
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a2d71a
+ 0.00% 1 kworker/u:0 [kernel.kallsyms] [k] 0xffffffffa4ad29f3
+ 0.00% 1 tcsd [kernel.kallsyms] [k] 0xffffffffa4a67997
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a548a8
+ 0.00% 1 python libc-2.19.so [.] 0x0000000000082bb3
+ 0.00% 1 dhcpcd  [.] 0x0000000000024f56
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4bf334e
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a65e88
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a41578
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a65a87
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4a66d95
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4ec89cf
+ 0.00% 1 powerd [kernel.kallsyms] [k] 0xffffffffa4c6a22e
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a91b84
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a4073d
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a408c2
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66a84
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a61b9f
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4ec8c7f
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a40a1d
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4ec8b7c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a6646c
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a3a6d4
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a41623
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a75d98
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4bf32fd
+ 0.00% 1 chrome chrome [.] 0x00000000022ad6a0
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a62152
+ 0.00% 1 dbus-daemon [kernel.kallsyms] [k] 0xffffffffa4b0d38b
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a58d66
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4ecb3ed
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a54eca
+ 0.00% 1 periodic_schedu [kernel.kallsyms] [k] 0xffffffffa4a92b1a
+ 0.00% 1 periodic_schedu [kernel.kallsyms] [k] 0xffffffffa4bec2e9
+ 0.00% 1 sh [kernel.kallsyms] [k] 0xffffffffa4a92a06
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a65b67
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a66ce5
+ 0.00% 1 python [kernel.kallsyms] [k] 0xffffffffa4b06121
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a54418
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a65cc5
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a547ee
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a64c99
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a5adda
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a923ac
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a91ec6
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a63269
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a62f53
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a621ff
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a65d8b
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5de41
+ 0.00% 1 chrome [kernel.kallsyms] [k] 0xffffffffa4a56f3d
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4be56c6
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a5d828
+ 0.00% 1 rcu_sched [kernel.kallsyms] [k] 0xffffffffa4a65ccf
+ 0.00% 1 sh [kernel.kallsyms] [k] 0xffffffffa4b06cb5
+ 0.00% 1 perf [kernel.kallsyms] [k] 0xffffffffa4ab0a38
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a1ff32
+ 0.00% 1 swapper [kernel.kallsyms] [k] 0xffffffffa4a7aac4
+ 0.00% 8 swapper [kernel.kallsyms] [k] 0xffffffffa4a0ee03
+ 0.00% 4 perf [kernel.kallsyms] [k] 0xffffffffa4a0ee03
+
+#
+# (For a higher level overview, try: perf report --sort comm,dso)
+#
diff --git a/crosperf/perf_table.py b/crosperf/perf_table.py
deleted file mode 100644
index 5a565486..00000000
--- a/crosperf/perf_table.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2012 Google Inc. All Rights Reserved.
-"""Parse perf report data for tabulator."""
-
-from __future__ import print_function
-
-import os
-
-from cros_utils import perf_diff
-
-
-def ParsePerfReport():
- """It should return a dict."""
-
- return {'cycles': {'foo': 10,
- 'bar': 20},
- 'cache_miss': {'foo': 20,
- 'bar': 10}}
-
-
-class PerfTable(object):
- """The class to generate dicts for tabulator."""
-
- def __init__(self, experiment, label_names):
- self._experiment = experiment
- self._label_names = label_names
- self.perf_data = {}
- self.GenerateData()
-
- # {benchmark:{perf_event1:[[{func1:number, func2:number,
- # rows_to_show: number}
- # {func1: number, func2: number
- # rows_to_show: number}]], ...},
- # benchmark2:...}
- # The rows_to_show is temp data recording how many
- # rows have over 1% running time.
- self.row_info = {}
- self.GetRowsToShow()
-
- def GenerateData(self):
- for label in self._label_names:
- for benchmark in self._experiment.benchmarks:
- for i in range(1, benchmark.iterations + 1):
- dir_name = label + benchmark.name + str(i)
- dir_name = filter(str.isalnum, dir_name)
- perf_file = os.path.join(self._experiment.results_directory, dir_name,
- 'perf.data.report.0')
- if os.path.exists(perf_file):
- self.ReadPerfReport(perf_file, label, benchmark.name, i - 1)
-
- def ReadPerfReport(self, perf_file, label, benchmark_name, iteration):
- """Add the data from one run to the dict."""
- if os.path.isfile(perf_file):
- perf_of_run = perf_diff.GetPerfDictFromReport(perf_file)
- else:
- perf_of_run = {}
- if benchmark_name not in self.perf_data:
- self.perf_data[benchmark_name] = {}
- for event in perf_of_run:
- self.perf_data[benchmark_name][event] = []
- ben_data = self.perf_data[benchmark_name]
-
- label_index = self._label_names.index(label)
- for event in ben_data:
- while len(ben_data[event]) <= label_index:
- ben_data[event].append([])
- data_for_label = ben_data[event][label_index]
- while len(data_for_label) <= iteration:
- data_for_label.append({})
- if perf_of_run:
- data_for_label[iteration] = perf_of_run[event]
- else:
- data_for_label[iteration] = {}
-
- def GetRowsToShow(self):
- for benchmark in self.perf_data:
- if benchmark not in self.row_info:
- self.row_info[benchmark] = {}
- for event in self.perf_data[benchmark]:
- rows = 0
- for run in self.perf_data[benchmark][event]:
- for iteration in run:
- if perf_diff.ROWS_TO_SHOW in iteration:
- rows = max(iteration[perf_diff.ROWS_TO_SHOW], rows)
- # delete the temp data which stores how many rows of
- # the perf data have over 1% running time.
- del iteration[perf_diff.ROWS_TO_SHOW]
- self.row_info[benchmark][event] = rows
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 825d1d7c..8d584f57 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -5,9 +5,11 @@
from __future__ import print_function
import datetime
+import functools
import itertools
import json
import os
+import re
from cros_utils.tabulator import AmeanResult
from cros_utils.tabulator import Cell
@@ -31,7 +33,8 @@ from update_telemetry_defaults import TelemetryDefaults
from column_chart import ColumnChart
from results_organizer import OrganizeResults
-from perf_table import PerfTable
+
+import results_report_templates as templates
def ParseChromeosImage(chromeos_image):
@@ -78,573 +81,611 @@ def ParseChromeosImage(chromeos_image):
return version, image
+def _AppendUntilLengthIs(gen, the_list, target_len):
+ """Appends to `list` until `list` is `target_len` elements long.
+
+ Uses `gen` to generate elements.
+ """
+ the_list.extend(gen() for _ in xrange(target_len - len(the_list)))
+ return the_list
+
+
+def _FilterPerfReport(event_threshold, report):
+ """Filters out entries with `< event_threshold` percent in a perf report."""
+ def filter_dict(m):
+ return {fn_name: pct for fn_name, pct in m.iteritems()
+ if pct >= event_threshold}
+ return {event: filter_dict(m) for event, m in report.iteritems()}
+
+
+class _PerfTable(object):
+ """Generates dicts from a perf table.
+
+ Dicts look like:
+ {'benchmark_name': {'perf_event_name': [LabelData]}}
+ where LabelData is a list of perf dicts, each perf dict coming from the same
+ label.
+ Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the
+ percentage of time spent in function_name).
+ """
+
+ def __init__(self, benchmark_names_and_iterations, label_names,
+ read_perf_report, event_threshold=None):
+ """Constructor.
+
+ read_perf_report is a function that takes a label name, benchmark name, and
+ benchmark iteration, and returns a dictionary describing the perf output for
+ that given run.
+ """
+ self.event_threshold = event_threshold
+ self._label_indices = {name: i for i, name in enumerate(label_names)}
+ self.perf_data = {}
+ for label in label_names:
+ for bench_name, bench_iterations in benchmark_names_and_iterations:
+ for i in xrange(bench_iterations):
+ report = read_perf_report(label, bench_name, i)
+ self._ProcessPerfReport(report, label, bench_name, i)
+
+ def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration):
+ """Add the data from one run to the dict."""
+ perf_of_run = perf_report
+ if self.event_threshold is not None:
+ perf_of_run = _FilterPerfReport(self.event_threshold, perf_report)
+ if benchmark_name not in self.perf_data:
+ self.perf_data[benchmark_name] = {event: [] for event in perf_of_run}
+ ben_data = self.perf_data[benchmark_name]
+ label_index = self._label_indices[label]
+ for event in ben_data:
+ _AppendUntilLengthIs(list, ben_data[event], label_index + 1)
+ data_for_label = ben_data[event][label_index]
+ _AppendUntilLengthIs(dict, data_for_label, iteration + 1)
+ data_for_label[iteration] = perf_of_run[event] if perf_of_run else {}
+
+
+def _GetResultsTableHeader(ben_name, iterations):
+ benchmark_info = ('Benchmark: {0}; Iterations: {1}'
+ .format(ben_name, iterations))
+ cell = Cell()
+ cell.string_value = benchmark_info
+ cell.header = True
+ return [[cell]]
+
+
+def _ParseColumn(columns, iteration):
+ new_column = []
+ for column in columns:
+ if column.result.__class__.__name__ != 'RawResult':
+ new_column.append(column)
+ else:
+ new_column.extend(Column(LiteralResult(i), Format(), str(i + 1))
+ for i in xrange(iteration))
+ return new_column
+
+
+def _GetTables(benchmark_results, columns, table_type):
+ iter_counts = benchmark_results.iter_counts
+ result = benchmark_results.run_keyvals
+ tables = []
+ for bench_name, runs in result.iteritems():
+ iterations = iter_counts[bench_name]
+ ben_table = _GetResultsTableHeader(bench_name, iterations)
+
+ all_runs_empty = all(not dict for label in runs for dict in label)
+ if all_runs_empty:
+ cell = Cell()
+ cell.string_value = ('This benchmark contains no result.'
+ ' Is the benchmark name valid?')
+ cell_table = [[cell]]
+ else:
+ table = TableGenerator(runs, benchmark_results.label_names).GetTable()
+ parsed_columns = _ParseColumn(columns, iterations)
+ tf = TableFormatter(table, parsed_columns)
+ cell_table = tf.GetCellTable(table_type)
+ tables.append(ben_table)
+ tables.append(cell_table)
+ return tables
+
+
+def _GetPerfTables(benchmark_results, columns, table_type):
+ p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations,
+ benchmark_results.label_names,
+ benchmark_results.read_perf_report)
+
+ tables = []
+ for benchmark in p_table.perf_data:
+ iterations = benchmark_results.iter_counts[benchmark]
+ ben_table = _GetResultsTableHeader(benchmark, iterations)
+ tables.append(ben_table)
+ benchmark_data = p_table.perf_data[benchmark]
+ table = []
+ for event in benchmark_data:
+ tg = TableGenerator(benchmark_data[event],
+ benchmark_results.label_names,
+ sort=TableGenerator.SORT_BY_VALUES_DESC)
+ table = tg.GetTable(ResultsReport.PERF_ROWS)
+ parsed_columns = _ParseColumn(columns, iterations)
+ tf = TableFormatter(table, parsed_columns)
+ tf.GenerateCellTable(table_type)
+ tf.AddColumnName()
+ tf.AddLabelName()
+ tf.AddHeader(str(event))
+ table = tf.GetCellTable(table_type, headers=False)
+ tables.append(table)
+ return tables
+
+
class ResultsReport(object):
"""Class to handle the report format."""
MAX_COLOR_CODE = 255
PERF_ROWS = 5
- def __init__(self, experiment):
- self.experiment = experiment
- self.benchmark_runs = experiment.benchmark_runs
- self.labels = experiment.labels
- self.benchmarks = experiment.benchmarks
- self.baseline = self.labels[0]
-
- def _SortByLabel(self, runs):
- labels = {}
- for benchmark_run in runs:
- if benchmark_run.label_name not in labels:
- labels[benchmark_run.label_name] = []
- labels[benchmark_run.label_name].append(benchmark_run)
- return labels
+ def __init__(self, results):
+ self.benchmark_results = results
+
+ def _GetTablesWithColumns(self, columns, table_type, perf):
+ get_tables = _GetPerfTables if perf else _GetTables
+ return get_tables(self.benchmark_results, columns, table_type)
def GetFullTables(self, perf=False):
- columns = [Column(RawResult(), Format()), Column(
- MinResult(), Format()), Column(MaxResult(),
- Format()), Column(AmeanResult(),
- Format()),
- Column(StdResult(), Format(),
- 'StdDev'), Column(CoeffVarResult(), CoeffVarFormat(),
- 'StdDev/Mean'),
- Column(GmeanRatioResult(), RatioFormat(),
- 'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
- 'p-value')]
- if not perf:
- return self._GetTables(self.labels, self.benchmark_runs, columns, 'full')
- return self._GetPerfTables(self.labels, columns, 'full')
+ columns = [Column(RawResult(), Format()),
+ Column(MinResult(), Format()),
+ Column(MaxResult(), Format()),
+ Column(AmeanResult(), Format()),
+ Column(StdResult(), Format(), 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
+ Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
+ Column(PValueResult(), PValueFormat(), 'p-value')]
+ return self._GetTablesWithColumns(columns, 'full', perf)
def GetSummaryTables(self, perf=False):
- columns = [Column(AmeanResult(), Format()), Column(StdResult(), Format(),
- 'StdDev'),
+ columns = [Column(AmeanResult(), Format()),
+ Column(StdResult(), Format(), 'StdDev'),
Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
- Column(GmeanRatioResult(), RatioFormat(),
- 'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
- 'p-value')]
- if not perf:
- return self._GetTables(self.labels, self.benchmark_runs, columns,
- 'summary')
- return self._GetPerfTables(self.labels, columns, 'summary')
-
- def _ParseColumn(self, columns, iteration):
- new_column = []
- for column in columns:
- if column.result.__class__.__name__ != 'RawResult':
- #TODO(asharif): tabulator should support full table natively.
- new_column.append(column)
- else:
- for i in range(iteration):
- cc = Column(LiteralResult(i), Format(), str(i + 1))
- new_column.append(cc)
- return new_column
-
- def _AreAllRunsEmpty(self, runs):
- for label in runs:
- for dictionary in label:
- if dictionary:
- return False
- return True
-
- def _GetTableHeader(self, benchmark):
- benchmark_info = ('Benchmark: {0}; Iterations: {1}'
- .format(benchmark.name, benchmark.iterations))
- cell = Cell()
- cell.string_value = benchmark_info
- cell.header = True
- return [[cell]]
-
- def _GetTables(self, labels, benchmark_runs, columns, table_type):
- tables = []
- result = OrganizeResults(benchmark_runs, labels, self.benchmarks)
- label_name = [label.name for label in labels]
- for item in result:
- benchmark = None
- runs = result[item]
- for benchmark in self.benchmarks:
- if benchmark.name == item:
- break
- ben_table = self._GetTableHeader(benchmark)
-
- if self._AreAllRunsEmpty(runs):
- cell = Cell()
- cell.string_value = ('This benchmark contains no result.'
- ' Is the benchmark name valid?')
- cell_table = [[cell]]
- else:
- tg = TableGenerator(runs, label_name)
- table = tg.GetTable()
- parsed_columns = self._ParseColumn(columns, benchmark.iterations)
- tf = TableFormatter(table, parsed_columns)
- cell_table = tf.GetCellTable(table_type)
- tables.append(ben_table)
- tables.append(cell_table)
- return tables
-
- def _GetPerfTables(self, labels, columns, table_type):
- tables = []
- label_names = [label.name for label in labels]
- p_table = PerfTable(self.experiment, label_names)
-
- if not p_table.perf_data:
- return tables
-
- for benchmark in p_table.perf_data:
- ben = None
- for ben in self.benchmarks:
- if ben.name == benchmark:
- break
+ Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
+ Column(PValueResult(), PValueFormat(), 'p-value')]
+ return self._GetTablesWithColumns(columns, 'summary', perf)
+
+
+def _PrintTable(tables, out_to):
+ # tables may be None.
+ if not tables:
+ return ''
+
+ if out_to == 'HTML':
+ out_type = TablePrinter.HTML
+ elif out_to == 'PLAIN':
+ out_type = TablePrinter.PLAIN
+ elif out_to == 'CONSOLE':
+ out_type = TablePrinter.CONSOLE
+ elif out_to == 'TSV':
+ out_type = TablePrinter.TSV
+ elif out_to == 'EMAIL':
+ out_type = TablePrinter.EMAIL
+ else:
+ raise ValueError('Invalid out_to value: %s' % (out_to,))
- ben_table = self._GetTableHeader(ben)
- tables.append(ben_table)
- benchmark_data = p_table.perf_data[benchmark]
- row_info = p_table.row_info[benchmark]
- table = []
- for event in benchmark_data:
- tg = TableGenerator(benchmark_data[event],
- label_names,
- sort=TableGenerator.SORT_BY_VALUES_DESC)
- table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
- parsed_columns = self._ParseColumn(columns, ben.iterations)
- tf = TableFormatter(table, parsed_columns)
- tf.GenerateCellTable(table_type)
- tf.AddColumnName()
- tf.AddLabelName()
- tf.AddHeader(str(event))
- table = tf.GetCellTable(table_type, headers=False)
- tables.append(table)
- return tables
-
- def PrintTables(self, tables, out_to):
- output = ''
- if not tables:
- return output
- for table in tables:
- if out_to == 'HTML':
- tp = TablePrinter(table, TablePrinter.HTML)
- elif out_to == 'PLAIN':
- tp = TablePrinter(table, TablePrinter.PLAIN)
- elif out_to == 'CONSOLE':
- tp = TablePrinter(table, TablePrinter.CONSOLE)
- elif out_to == 'TSV':
- tp = TablePrinter(table, TablePrinter.TSV)
- elif out_to == 'EMAIL':
- tp = TablePrinter(table, TablePrinter.EMAIL)
- else:
- pass
- output += tp.Print()
- return output
+ printers = (TablePrinter(table, out_type) for table in tables)
+ return ''.join(printer.Print() for printer in printers)
class TextResultsReport(ResultsReport):
"""Class to generate text result report."""
- TEXT = """
-===========================================
-Results report for: '%s'
-===========================================
-
--------------------------------------------
-Summary
--------------------------------------------
-%s
-
-Number re-images: %s
+ H1_STR = '==========================================='
+ H2_STR = '-------------------------------------------'
--------------------------------------------
-Benchmark Run Status
--------------------------------------------
-%s
-
-
--------------------------------------------
-Perf Data
--------------------------------------------
-%s
-
-
-
-Experiment File
--------------------------------------------
-%s
+ def __init__(self, results, email=False, experiment=None):
+ super(TextResultsReport, self).__init__(results)
+ self.email = email
+ self.experiment = experiment
+ @staticmethod
+ def _MakeTitle(title):
+ header_line = TextResultsReport.H1_STR
+ # '' at the end gives one newline.
+ return '\n'.join([header_line, title, header_line, ''])
-CPUInfo
--------------------------------------------
-%s
-===========================================
-"""
+ @staticmethod
+ def _MakeSection(title, body):
+ header_line = TextResultsReport.H2_STR
+ # '\n' at the end gives us two newlines.
+ return '\n'.join([header_line, title, header_line, body, '\n'])
- def __init__(self, experiment, email=False):
- super(TextResultsReport, self).__init__(experiment)
- self.email = email
+ @staticmethod
+ def FromExperiment(experiment, email=False):
+ results = BenchmarkResults.FromExperiment(experiment)
+ return TextResultsReport(results, email, experiment)
def GetStatusTable(self):
"""Generate the status table by the tabulator."""
table = [['', '']]
- columns = [Column(
- LiteralResult(iteration=0),
- Format(),
- 'Status'), Column(
- LiteralResult(iteration=1),
- Format(),
- 'Failing Reason')]
-
- for benchmark_run in self.benchmark_runs:
+ columns = [Column(LiteralResult(iteration=0), Format(), 'Status'),
+ Column(LiteralResult(iteration=1), Format(), 'Failing Reason')]
+
+ for benchmark_run in self.experiment.benchmark_runs:
status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
benchmark_run.failure_reason]]
table.append(status)
- tf = TableFormatter(table, columns)
- cell_table = tf.GetCellTable('status')
+ cell_table = TableFormatter(table, columns).GetCellTable('status')
return [cell_table]
def GetReport(self):
"""Generate the report for email and console."""
- status_table = self.GetStatusTable()
- summary_table = self.GetSummaryTables()
- perf_table = self.GetSummaryTables(perf=True)
- if not perf_table:
- perf_table = None
output_type = 'EMAIL' if self.email else 'CONSOLE'
- return self.TEXT % (
- self.experiment.name, self.PrintTables(summary_table, output_type),
- self.experiment.machine_manager.num_reimages,
- self.PrintTables(status_table, output_type),
- self.PrintTables(perf_table, output_type),
- self.experiment.experiment_file,
- self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))
+ experiment = self.experiment
+
+ sections = []
+ if experiment is not None:
+ title_contents = "Results report for '%s'" % (experiment.name, )
+ else:
+ title_contents = 'Results report'
+ sections.append(self._MakeTitle(title_contents))
+
+ summary_table = _PrintTable(self.GetSummaryTables(perf=False), output_type)
+ sections.append(self._MakeSection('Summary', summary_table))
+
+ if experiment is not None:
+ table = _PrintTable(self.GetStatusTable(), output_type)
+ sections.append(self._MakeSection('Benchmark Run Status', table))
+
+ perf_table = _PrintTable(self.GetSummaryTables(perf=True), output_type)
+ if perf_table:
+ sections.append(self._MakeSection('Perf Data', perf_table))
+
+ if experiment is not None:
+ experiment_file = experiment.experiment_file
+ sections.append(self._MakeSection('Experiment File', experiment_file))
+
+ cpu_info = experiment.machine_manager.GetAllCPUInfo(experiment.labels)
+ sections.append(self._MakeSection('CPUInfo', cpu_info))
+
+ return '\n'.join(sections)
+
+
+def _GetHTMLCharts(label_names, test_results):
+ charts = []
+ for item, runs in test_results.iteritems():
+ # Fun fact: label_names is actually *entirely* useless as a param, since we
+ # never add headers. We still need to pass it anyway.
+ table = TableGenerator(runs, label_names).GetTable()
+ columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
+ Column(MaxResult(), Format())]
+ tf = TableFormatter(table, columns)
+ data_table = tf.GetCellTable('full', headers=False)
+
+ for cur_row_data in data_table:
+ test_key = cur_row_data[0].string_value
+ title = '{0}: {1}'.format(item, test_key.replace('/', ''))
+ chart = ColumnChart(title, 300, 200)
+ chart.AddColumn('Label', 'string')
+ chart.AddColumn('Average', 'number')
+ chart.AddColumn('Min', 'number')
+ chart.AddColumn('Max', 'number')
+ chart.AddSeries('Min', 'line', 'black')
+ chart.AddSeries('Max', 'line', 'black')
+ cur_index = 1
+ for label in label_names:
+ chart.AddRow([label,
+ cur_row_data[cur_index].value,
+ cur_row_data[cur_index + 1].value,
+ cur_row_data[cur_index + 2].value])
+ if isinstance(cur_row_data[cur_index].value, str):
+ chart = None
+ break
+ cur_index += 3
+ if chart:
+ charts.append(chart)
+ return charts
class HTMLResultsReport(ResultsReport):
"""Class to generate html result report."""
- HTML = """
-<html>
- <head>
- <style type="text/css">
-
-body {
- font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif;
- font-size: 12px;
-}
-
-pre {
- margin: 10px;
- color: #039;
- font-size: 14px;
-}
-
-.chart {
- display: inline;
-}
-
-.hidden {
- visibility: hidden;
-}
-
-.results-section {
- border: 1px solid #b9c9fe;
- margin: 10px;
-}
-
-.results-section-title {
- background-color: #b9c9fe;
- color: #039;
- padding: 7px;
- font-size: 14px;
- width: 200px;
-}
-
-.results-section-content {
- margin: 10px;
- padding: 10px;
- overflow:auto;
-}
-
-#box-table-a {
- font-size: 12px;
- width: 480px;
- text-align: left;
- border-collapse: collapse;
-}
-
-#box-table-a th {
- padding: 6px;
- background: #b9c9fe;
- border-right: 1px solid #fff;
- border-bottom: 1px solid #fff;
- color: #039;
- text-align: center;
-}
-
-#box-table-a td {
- padding: 4px;
- background: #e8edff;
- border-bottom: 1px solid #fff;
- border-right: 1px solid #fff;
- color: #669;
- border-top: 1px solid transparent;
-}
-
-#box-table-a tr:hover td {
- background: #d0dafd;
- color: #339;
-}
-
- </style>
- <script type='text/javascript' src='https://www.google.com/jsapi'></script>
- <script type='text/javascript'>
- google.load('visualization', '1', {packages:['corechart']});
- google.setOnLoadCallback(init);
- function init() {
- switchTab('summary', 'html');
- %s
- switchTab('full', 'html');
- drawTable();
- }
- function drawTable() {
- %s
- }
- function switchTab(table, tab) {
- document.getElementById(table + '-html').style.display = 'none';
- document.getElementById(table + '-text').style.display = 'none';
- document.getElementById(table + '-tsv').style.display = 'none';
- document.getElementById(table + '-' + tab).style.display = 'block';
- }
- </script>
- </head>
-
- <body>
- <div class='results-section'>
- <div class='results-section-title'>Summary Table</div>
- <div class='results-section-content'>
- <div id='summary-html'>%s</div>
- <div id='summary-text'><pre>%s</pre></div>
- <div id='summary-tsv'><pre>%s</pre></div>
- </div>
- %s
- </div>
- %s
- <div class='results-section'>
- <div class='results-section-title'>Charts</div>
- <div class='results-section-content'>%s</div>
- </div>
- <div class='results-section'>
- <div class='results-section-title'>Full Table</div>
- <div class='results-section-content'>
- <div id='full-html'>%s</div>
- <div id='full-text'><pre>%s</pre></div>
- <div id='full-tsv'><pre>%s</pre></div>
- </div>
- %s
- </div>
- <div class='results-section'>
- <div class='results-section-title'>Experiment File</div>
- <div class='results-section-content'>
- <pre>%s</pre>
- </div>
- </div>
- </body>
-</html>
-"""
-
- PERF_HTML = """
- <div class='results-section'>
- <div class='results-section-title'>Perf Table</div>
- <div class='results-section-content'>
- <div id='perf-html'>%s</div>
- <div id='perf-text'><pre>%s</pre></div>
- <div id='perf-tsv'><pre>%s</pre></div>
- </div>
- %s
- </div>
-"""
-
- def __init__(self, experiment):
- super(HTMLResultsReport, self).__init__(experiment)
-
- def _GetTabMenuHTML(self, table):
- return """
-<div class='tab-menu'>
- <a href="javascript:switchTab('%s', 'html')">HTML</a>
- <a href="javascript:switchTab('%s', 'text')">Text</a>
- <a href="javascript:switchTab('%s', 'tsv')">TSV</a>
-</div>""" % (table, table, table)
+ def __init__(self, benchmark_results, experiment=None):
+ super(HTMLResultsReport, self).__init__(benchmark_results)
+ self.experiment = experiment
+
+ @staticmethod
+ def FromExperiment(experiment):
+ return HTMLResultsReport(BenchmarkResults.FromExperiment(experiment),
+ experiment=experiment)
def GetReport(self):
- chart_javascript = ''
- charts = self._GetCharts(self.labels, self.benchmark_runs)
+ label_names = self.benchmark_results.label_names
+ test_results = self.benchmark_results.run_keyvals
+ charts = _GetHTMLCharts(label_names, test_results)
chart_javascript = ''.join(chart.GetJavascript() for chart in charts)
chart_divs = ''.join(chart.GetDiv() for chart in charts)
summary_table = self.GetSummaryTables()
full_table = self.GetFullTables()
perf_table = self.GetSummaryTables(perf=True)
- if perf_table:
- perf_html = self.PERF_HTML % (self.PrintTables(perf_table, 'HTML'),
- self.PrintTables(perf_table, 'PLAIN'),
- self.PrintTables(perf_table, 'TSV'),
- self._GetTabMenuHTML('perf'))
- perf_init = "switchTab('perf', 'html');"
- else:
- perf_html = ''
- perf_init = ''
-
- return self.HTML % (
- perf_init, chart_javascript, self.PrintTables(summary_table, 'HTML'),
- self.PrintTables(summary_table, 'PLAIN'),
- self.PrintTables(summary_table, 'TSV'), self._GetTabMenuHTML('summary'),
- perf_html, chart_divs, self.PrintTables(full_table, 'HTML'),
- self.PrintTables(full_table, 'PLAIN'),
- self.PrintTables(full_table, 'TSV'), self._GetTabMenuHTML('full'),
- self.experiment.experiment_file)
-
- def _GetCharts(self, labels, benchmark_runs):
- charts = []
- result = OrganizeResults(benchmark_runs, labels)
- label_names = [label.name for label in labels]
- for item, runs in result.iteritems():
- tg = TableGenerator(runs, label_names)
- table = tg.GetTable()
- columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
- Column(MaxResult(), Format())]
- tf = TableFormatter(table, columns)
- data_table = tf.GetCellTable('full')
-
- for i in range(2, len(data_table)):
- cur_row_data = data_table[i]
- test_key = cur_row_data[0].string_value
- title = '{0}: {1}'.format(item, test_key.replace('/', ''))
- chart = ColumnChart(title, 300, 200)
- chart.AddColumn('Label', 'string')
- chart.AddColumn('Average', 'number')
- chart.AddColumn('Min', 'number')
- chart.AddColumn('Max', 'number')
- chart.AddSeries('Min', 'line', 'black')
- chart.AddSeries('Max', 'line', 'black')
- cur_index = 1
- for label in label_names:
- chart.AddRow([label, cur_row_data[cur_index].value, cur_row_data[
- cur_index + 1].value, cur_row_data[cur_index + 2].value])
- if isinstance(cur_row_data[cur_index].value, str):
- chart = None
- break
- cur_index += 3
- if chart:
- charts.append(chart)
- return charts
+ experiment_file = ''
+ if self.experiment is not None:
+ experiment_file = self.experiment.experiment_file
+ # Use kwargs for sanity, and so that testing is a bit easier.
+ return templates.GenerateHTMLPage(perf_table=perf_table,
+ chart_js=chart_javascript,
+ summary_table=summary_table,
+ print_table=_PrintTable,
+ chart_divs=chart_divs,
+ full_table=full_table,
+ experiment_file=experiment_file)
-class JSONResultsReport(ResultsReport):
- """Class that generates JSON reports."""
+def ParseStandardPerfReport(report_data):
+ """Parses the output of `perf report`.
+
+ It'll parse the following:
+ {{garbage}}
+ # Samples: 1234M of event 'foo'
+
+ 1.23% command shared_object location function::name
+
+ 1.22% command shared_object location function2::name
+
+ # Samples: 999K of event 'bar'
+
+ 0.23% command shared_object location function3::name
+ {{etc.}}
+
+ Into:
+ {'foo': {'function::name': 1.23, 'function2::name': 1.22},
+ 'bar': {'function3::name': 0.23, etc.}}
+ """
+ # This function fails silently on its if it's handed a string (as opposed to a
+ # list of lines). So, auto-split if we do happen to get a string.
+ if isinstance(report_data, basestring):
+ report_data = report_data.splitlines()
+
+ # Samples: N{K,M,G} of event 'event-name'
+ samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'")
+
+ # We expect lines like:
+ # N.NN% command samples shared_object [location] symbol
+ #
+ # Note that we're looking at stripped lines, so there is no space at the
+ # start.
+ perf_regex = re.compile(r'^(\d+(?:.\d*)?)%' # N.NN%
+ r'\s*\d+' # samples count (ignored)
+ r'\s*\S+' # command (ignored)
+ r'\s*\S+' # shared_object (ignored)
+ r'\s*\[.\]' # location (ignored)
+ r'\s*(\S.+)' # function
+ )
+
+ stripped_lines = (l.strip() for l in report_data)
+ nonempty_lines = (l for l in stripped_lines if l)
+ # Ignore all lines before we see samples_regex
+ interesting_lines = itertools.dropwhile(lambda x: not samples_regex.match(x),
+ nonempty_lines)
+
+ first_sample_line = next(interesting_lines, None)
+ # Went through the entire file without finding a 'samples' header. Quit.
+ if first_sample_line is None:
+ return {}
+
+ sample_name = samples_regex.match(first_sample_line).group(1)
+ current_result = {}
+ results = {sample_name: current_result}
+ for line in interesting_lines:
+ samples_match = samples_regex.match(line)
+ if samples_match:
+ sample_name = samples_match.group(1)
+ current_result = {}
+ results[sample_name] = current_result
+ continue
+
+ match = perf_regex.match(line)
+ if not match:
+ continue
+ percentage_str, func_name = match.groups()
+ try:
+ percentage = float(percentage_str)
+ except ValueError:
+ # Couldn't parse it; try to be "resilient".
+ continue
+ current_result[func_name] = percentage
+ return results
+
+
+def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name,
+ benchmark_iteration):
+ """Reads a perf report for the given benchmark. Returns {} on failure.
+
+ The result should be a map of maps; it should look like:
+ {perf_event_name: {function_name: pct_time_spent}}, e.g.
+ {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}}
+ """
+ raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
+ dir_name = ''.join(c for c in raw_dir_name if c.isalnum())
+ file_name = os.path.join(results_directory, dir_name, 'perf.data.report.0')
+ try:
+ with open(file_name) as in_file:
+ return ParseStandardPerfReport(in_file)
+ except IOError:
+ # Yes, we swallow any IO-related errors.
+ return {}
+
+
+# Split out so that testing (specifically: mocking) is easier
+def _ExperimentToKeyvals(experiment, for_json_report):
+ """Converts an experiment to keyvals."""
+ return OrganizeResults(experiment.benchmark_runs, experiment.labels,
+ json_report=for_json_report)
+
+
+class BenchmarkResults(object):
+ """The minimum set of fields that any ResultsReport will take."""
+ def __init__(self, label_names, benchmark_names_and_iterations, run_keyvals,
+ read_perf_report=None):
+ if read_perf_report is None:
+ def _NoPerfReport(*_args, **_kwargs):
+ return {}
+ read_perf_report = _NoPerfReport
+
+ self.label_names = label_names
+ self.benchmark_names_and_iterations = benchmark_names_and_iterations
+ self.iter_counts = dict(benchmark_names_and_iterations)
+ self.run_keyvals = run_keyvals
+ self.read_perf_report = read_perf_report
@staticmethod
- def _WriteResultsToFile(filename, results):
- """Write the results as JSON to the given filename."""
- with open(filename, 'w') as fp:
- json.dump(results, fp, indent=2)
-
- def __init__(self, experiment, date=None, time=None):
- super(JSONResultsReport, self).__init__(experiment)
- self.label_names = [label.name for label in experiment.labels]
- self.organized_result = OrganizeResults(experiment.benchmark_runs,
- experiment.labels,
- experiment.benchmarks,
- json_report=True)
- self.date = date
- self.time = time
- self.defaults = TelemetryDefaults()
- if not self.date:
+ def FromExperiment(experiment, for_json_report=False):
+ label_names = [label.name for label in experiment.labels]
+ benchmark_names_and_iterations = [(benchmark.name, benchmark.iterations)
+ for benchmark in experiment.benchmarks]
+ run_keyvals = _ExperimentToKeyvals(experiment, for_json_report)
+ read_perf_report = functools.partial(_ReadExperimentPerfReport,
+ experiment.results_directory)
+ return BenchmarkResults(label_names, benchmark_names_and_iterations,
+ run_keyvals, read_perf_report)
+
+
+def _GetElemByName(name, from_list):
+ """Gets an element from the given list by its name field.
+
+ Raises an error if it doesn't find exactly one match.
+ """
+ elems = [e for e in from_list if e.name == name]
+ if len(elems) != 1:
+ raise ValueError('Expected 1 item named %s, found %d' % (name, len(elems)))
+ return elems[0]
+
+
+def _Unlist(l):
+ """If l is a list, extracts the first element of l. Otherwise, returns l."""
+ return l[0] if isinstance(l, list) else l
+
+class JSONResultsReport(ResultsReport):
+ """Class that generates JSON reports for experiments."""
+
+ def __init__(self, benchmark_results, date=None, time=None, experiment=None,
+ json_args=None):
+ """Construct a JSONResultsReport.
+
+ json_args is the dict of arguments we pass to json.dumps in GetReport().
+ """
+ super(JSONResultsReport, self).__init__(benchmark_results)
+
+ defaults = TelemetryDefaults()
+ defaults.ReadDefaultsFile()
+ summary_field_defaults = defaults.GetDefault()
+ if summary_field_defaults is None:
+ summary_field_defaults = {}
+ self.summary_field_defaults = summary_field_defaults
+
+ if json_args is None:
+ json_args = {}
+ self.json_args = json_args
+
+ self.experiment = experiment
+ if not date:
timestamp = datetime.datetime.strftime(datetime.datetime.now(),
'%Y-%m-%d %H:%M:%S')
date, time = timestamp.split(' ')
- self.date = date
- self.time = time
-
- def GetReport(self, results_dir, write_results=None):
- if write_results is None:
- write_results = JSONResultsReport._WriteResultsToFile
+ self.date = date
+ self.time = time
- self.defaults.ReadDefaultsFile()
+ @staticmethod
+ def FromExperiment(experiment, date=None, time=None, json_args=None):
+ benchmark_results = BenchmarkResults.FromExperiment(experiment,
+ for_json_report=True)
+ return JSONResultsReport(benchmark_results, date, time, experiment,
+ json_args)
+
+ def GetReportObjectIgnoringExperiment(self):
+ """Gets the JSON report object specifically for the output data.
+
+ Ignores any experiment-specific fields (e.g. board, machine checksum, ...).
+ """
+ benchmark_results = self.benchmark_results
+ label_names = benchmark_results.label_names
+ summary_field_defaults = self.summary_field_defaults
final_results = []
- board = self.experiment.labels[0].board
- compiler_string = 'gcc'
- for test, test_results in self.organized_result.iteritems():
- for label, label_results in itertools.izip(self.label_names,
- test_results):
+ for test, test_results in benchmark_results.run_keyvals.iteritems():
+ for label_name, label_results in zip(label_names, test_results):
for iter_results in label_results:
+ passed = iter_results.get('retval') == 0
json_results = {
'date': self.date,
'time': self.time,
- 'board': board,
- 'label': label
+ 'label': label_name,
+ 'test_name': test,
+ 'pass': passed,
}
- common_checksum = ''
- common_string = ''
- for l in self.experiment.labels:
- if l.name == label:
- img_path = os.path.realpath(os.path.expanduser(l.chromeos_image))
- ver, img = ParseChromeosImage(img_path)
- json_results['chromeos_image'] = img
- json_results['chromeos_version'] = ver
- json_results['chrome_version'] = l.chrome_version
- json_results['compiler'] = l.compiler
- # If any of the labels used the LLVM compiler, we will add
- # ".llvm" to the json report filename. (Otherwise we use .gcc).
- if 'llvm' in l.compiler:
- compiler_string = 'llvm'
- common_checksum = \
- self.experiment.machine_manager.machine_checksum[l.name]
- common_string = \
- self.experiment.machine_manager.machine_checksum_string[l.name]
- break
- else:
- raise RuntimeError("Label doesn't exist in label_results?")
- json_results['test_name'] = test
-
- if not iter_results or iter_results['retval'] != 0:
- json_results['pass'] = False
- else:
- json_results['pass'] = True
- # Get overall results.
- if test in self.defaults.GetDefault():
- default_result_fields = self.defaults.GetDefault()[test]
- value = []
- for f in default_result_fields:
- if f in iter_results:
- v = iter_results[f]
- if type(v) == list:
- v = v[0]
- # New telemetry results format: sometimes we get a list
- # of lists now.
- if type(v) == list:
- v = v[0]
- item = (f, float(v))
- value.append(item)
- json_results['overall_result'] = value
- # Get detailed results.
- detail_results = {}
- for k in iter_results:
- if k != 'retval':
- v = iter_results[k]
- if type(v) == list:
- v = v[0]
- if v != 'PASS':
- if k.find('machine') == -1:
- if v is None:
- continue
- if type(v) != list:
- detail_results[k] = float(v)
- else:
- detail_results[k] = [float(d) for d in v]
- else:
- json_results[k] = v
- if 'machine_checksum' not in json_results:
- json_results['machine_checksum'] = common_checksum
- if 'machine_string' not in json_results:
- json_results['machine_string'] = common_string
- json_results['detailed_results'] = detail_results
final_results.append(json_results)
- filename = 'report_%s_%s_%s.%s.json' % (
- board, self.date, self.time.replace(':', '.'), compiler_string)
- fullname = os.path.join(results_dir, filename)
- write_results(fullname, final_results)
+ if not passed:
+ continue
+
+ # Get overall results.
+ summary_fields = summary_field_defaults.get(test)
+ if summary_fields is not None:
+ value = []
+ json_results['overall_result'] = value
+ for f in summary_fields:
+ v = iter_results.get(f)
+ if v is None:
+ continue
+ # New telemetry results format: sometimes we get a list of lists
+ # now.
+ v = _Unlist(_Unlist(v))
+ value.append((f, float(v)))
+
+ # Get detailed results.
+ detail_results = {}
+ json_results['detailed_results'] = detail_results
+ for k, v in iter_results.iteritems():
+ if k == 'retval' or k == 'PASS' or k == ['PASS']:
+ continue
+
+ v = _Unlist(v)
+ if 'machine' in k:
+ json_results[k] = v
+ elif v is not None:
+ if isinstance(v, list):
+ detail_results[k] = [float(d) for d in v]
+ else:
+ detail_results[k] = float(v)
+ return final_results
+
+ def GetReportObject(self):
+ """Generate the JSON report, returning it as a python object."""
+ report_list = self.GetReportObjectIgnoringExperiment()
+ if self.experiment is not None:
+ self._AddExperimentSpecificFields(report_list)
+ return report_list
+
+ def _AddExperimentSpecificFields(self, report_list):
+ """Add experiment-specific data to the JSON report."""
+ board = self.experiment.labels[0].board
+ manager = self.experiment.machine_manager
+ for report in report_list:
+ label_name = report['label']
+ label = _GetElemByName(label_name, self.experiment.labels)
+
+ img_path = os.path.realpath(os.path.expanduser(label.chromeos_image))
+ ver, img = ParseChromeosImage(img_path)
+
+ report.update({
+ 'board': board,
+ 'chromeos_image': img,
+ 'chromeos_version': ver,
+ 'chrome_version': label.chrome_version,
+ 'compiler': label.compiler
+ })
+
+ if not report['pass']:
+ continue
+ if 'machine_checksum' not in report:
+ report['machine_checksum'] = manager.machine_checksum[label_name]
+ if 'machine_string' not in report:
+ report['machine_string'] = manager.machine_checksum_string[label_name]
+
+ def GetReport(self):
+ """Dump the results of self.GetReportObject() to a string as JSON."""
+ # This exists for consistency with the other GetReport methods.
+ # Specifically, they all return strings, so it's a bit awkward if the JSON
+ # results reporter returns an object.
+ return json.dumps(self.GetReportObject(), **self.json_args)
diff --git a/crosperf/results_report_templates.py b/crosperf/results_report_templates.py
new file mode 100644
index 00000000..827649fd
--- /dev/null
+++ b/crosperf/results_report_templates.py
@@ -0,0 +1,196 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Text templates used by various parts of results_report."""
+from __future__ import print_function
+
+import cgi
+from string import Template
+
+_TabMenuTemplate = Template("""
+<div class='tab-menu'>
+ <a href="javascript:switchTab('$table_name', 'html')">HTML</a>
+ <a href="javascript:switchTab('$table_name', 'text')">Text</a>
+ <a href="javascript:switchTab('$table_name', 'tsv')">TSV</a>
+</div>""")
+
+def _GetTabMenuHTML(table_name):
+ # N.B. cgi.escape does some very basic HTML escaping. Nothing more.
+ escaped = cgi.escape(table_name, quote=True)
+ return _TabMenuTemplate.substitute(table_name=escaped)
+
+
+_ExperimentFileHTML = """
+<div class='results-section'>
+ <div class='results-section-title'>Experiment File</div>
+ <div class='results-section-content'>
+ <pre>%s</pre>
+</div>
+"""
+
+def _GetExperimentFileHTML(experiment_file_text):
+ if not experiment_file_text:
+ return ''
+ return _ExperimentFileHTML % (cgi.escape(experiment_file_text), )
+
+
+_ResultsSectionHTML = Template("""
+<div class='results-section'>
+ <div class='results-section-title'>$sect_name</div>
+ <div class='results-section-content'>
+ <div id='${short_name}-html'>$html_table</div>
+ <div id='${short_name}-text'><pre>$text_table</pre></div>
+ <div id='${short_name}-tsv'><pre>$tsv_table</pre></div>
+ </div>
+ $tab_menu
+</div>
+""")
+
+def _GetResultsSectionHTML(print_table, table_name, data):
+ first_word = table_name.strip().split()[0]
+ short_name = first_word.lower()
+ return _ResultsSectionHTML.substitute(sect_name=table_name,
+ html_table=print_table(data, 'HTML'),
+ text_table=print_table(data, 'PLAIN'),
+ tsv_table=print_table(data, 'TSV'),
+ tab_menu=_GetTabMenuHTML(short_name),
+ short_name=short_name)
+
+
+
+_MainHTML = Template("""
+<html>
+<head>
+ <style type="text/css">
+ body {
+ font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif;
+ font-size: 12px;
+ }
+
+ pre {
+ margin: 10px;
+ color: #039;
+ font-size: 14px;
+ }
+
+ .chart {
+ display: inline;
+ }
+
+ .hidden {
+ visibility: hidden;
+ }
+
+ .results-section {
+ border: 1px solid #b9c9fe;
+ margin: 10px;
+ }
+
+ .results-section-title {
+ background-color: #b9c9fe;
+ color: #039;
+ padding: 7px;
+ font-size: 14px;
+ width: 200px;
+ }
+
+ .results-section-content {
+ margin: 10px;
+ padding: 10px;
+ overflow:auto;
+ }
+
+ #box-table-a {
+ font-size: 12px;
+ width: 480px;
+ text-align: left;
+ border-collapse: collapse;
+ }
+
+ #box-table-a th {
+ padding: 6px;
+ background: #b9c9fe;
+ border-right: 1px solid #fff;
+ border-bottom: 1px solid #fff;
+ color: #039;
+ text-align: center;
+ }
+
+ #box-table-a td {
+ padding: 4px;
+ background: #e8edff;
+ border-bottom: 1px solid #fff;
+ border-right: 1px solid #fff;
+ color: #669;
+ border-top: 1px solid transparent;
+ }
+
+ #box-table-a tr:hover td {
+ background: #d0dafd;
+ color: #339;
+ }
+
+ </style>
+ <script type='text/javascript' src='https://www.google.com/jsapi'></script>
+ <script type='text/javascript'>
+ google.load('visualization', '1', {packages:['corechart']});
+ google.setOnLoadCallback(init);
+ function init() {
+ switchTab('summary', 'html');
+ ${perf_init};
+ switchTab('full', 'html');
+ drawTable();
+ }
+ function drawTable() {
+ ${chart_js};
+ }
+ function switchTab(table, tab) {
+ document.getElementById(table + '-html').style.display = 'none';
+ document.getElementById(table + '-text').style.display = 'none';
+ document.getElementById(table + '-tsv').style.display = 'none';
+ document.getElementById(table + '-' + tab).style.display = 'block';
+ }
+ </script>
+</head>
+
+<body>
+ $summary_table
+ $perf_html
+ <div class='results-section'>
+ <div class='results-section-title'>Charts</div>
+ <div class='results-section-content'>$chart_divs</div>
+ </div>
+ $full_table
+ $experiment_file
+</body>
+</html>
+""")
+
+# It's a bit ugly that we take some HTML things, and some non-HTML things, but I
+# need to balance prettiness with time spent making things pretty.
+def GenerateHTMLPage(perf_table, chart_js, summary_table, print_table,
+ chart_divs, full_table, experiment_file):
+ """Generates a crosperf HTML page from the given arguments.
+
+ print_table is a two-arg function called like: print_table(t, f)
+ t is one of [summary_table, print_table, full_table]; it's the table we want
+ to format.
+ f is one of ['TSV', 'HTML', 'PLAIN']; it's the type of format we want.
+ """
+ summary_table_html = _GetResultsSectionHTML(print_table, 'Summary Table',
+ summary_table)
+ if perf_table:
+ perf_html = _GetResultsSectionHTML(print_table, 'Perf Table', perf_table)
+ perf_init = "switchTab('perf', 'html')"
+ else:
+ perf_html = ''
+ perf_init = ''
+
+ full_table_html = _GetResultsSectionHTML(print_table, 'Full Table',
+ full_table)
+ experiment_file_html = _GetExperimentFileHTML(experiment_file)
+ return _MainHTML.substitute(perf_init=perf_init, chart_js=chart_js,
+ summary_table=summary_table_html,
+ perf_html=perf_html, chart_divs=chart_divs,
+ full_table=full_table_html,
+ experiment_file=experiment_file_html)
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index acbe94fe..9a45496f 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -24,9 +24,11 @@ from experiment_file import ExperimentFile
from machine_manager import MockCrosMachine
from machine_manager import MockMachineManager
from results_cache import MockResult
+from results_report import BenchmarkResults
from results_report import HTMLResultsReport
from results_report import JSONResultsReport
from results_report import ParseChromeosImage
+from results_report import ParseStandardPerfReport
from results_report import TextResultsReport
@@ -151,7 +153,8 @@ class TextResultsReportTest(unittest.TestCase):
success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
success_keyvals)
- text_report = TextResultsReport(experiment, email=email).GetReport()
+ text_report = TextResultsReport.FromExperiment(experiment, email=email) \
+ .GetReport()
self.assertIn(str(success_keyvals['a_float']), text_report)
self.assertIn(success_keyvals['machine'], text_report)
self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
@@ -177,47 +180,40 @@ class HTMLResultsReportTest(unittest.TestCase):
_TestOutput = collections.namedtuple('TestOutput', ['summary_table',
'perf_html',
'charts',
- 'table_html',
+ 'full_table',
'experiment_file'])
@staticmethod
- def _TupleToTestOutput(to_what):
- fields = {}
- # to_what has 13 fields. So, dealing with it can be unfun.
- it = iter(to_what)
- next(it) # perf_init
- next(it) # chart_javascript
- fields['summary_table'] = next(it) # HTML summary
- next(it) # plaintext summary
- next(it) # TSV summary
- next(it) # tab menu summary
- fields['perf_html'] = next(it)
- fields['charts'] = next(it)
- fields['table_html'] = next(it)
- next(it) # full table plain text
- next(it) # full table TSV
- next(it) # full tab menu
- fields['experiment_file'] = next(it)
-
- remaining_fields = list(it)
- if not remaining_fields:
- return HTMLResultsReportTest._TestOutput(**fields)
-
- raise RuntimeError('Initialization missed field(s): %s' %
- (remaining_fields, ))
-
- def _GetOutput(self, experiment):
- with mock.patch('results_report.HTMLResultsReport.HTML') as standin:
- HTMLResultsReport(experiment).GetReport()
- mod_mock = standin.__mod__
+ def _GetTestOutput(perf_table, _chart_js, summary_table, print_table,
+ chart_divs, full_table, experiment_file):
+ summary_table = print_table(summary_table, 'HTML')
+ perf_html = print_table(perf_table, 'HTML')
+ full_table = print_table(full_table, 'HTML')
+ return HTMLResultsReportTest._TestOutput(summary_table=summary_table,
+ perf_html=perf_html,
+ charts=chart_divs,
+ full_table=full_table,
+ experiment_file=experiment_file)
+
+ def _GetOutput(self, experiment=None, benchmark_results=None):
+ with mock.patch('results_report_templates.GenerateHTMLPage') as standin:
+ if experiment is not None:
+ HTMLResultsReport.FromExperiment(experiment).GetReport()
+ else:
+ HTMLResultsReport(benchmark_results).GetReport()
+ mod_mock = standin
self.assertEquals(mod_mock.call_count, 1)
- fmt_args = mod_mock.call_args[0][0]
- return self._TupleToTestOutput(fmt_args)
+ # call_args[0] is positional args, call_args[1] is kwargs.
+ self.assertEquals(mod_mock.call_args[0], tuple())
+ fmt_args = mod_mock.call_args[1]
+ return self._GetTestOutput(**fmt_args)
def testNoSuccessOutput(self):
output = self._GetOutput(MakeMockExperiment())
self.assertIn('no result', output.summary_table)
+ self.assertIn('no result', output.full_table)
self.assertEqual(output.charts, '')
+ self.assertNotEqual(output.experiment_file, '')
def testSuccessfulOutput(self):
num_success = 2
@@ -229,73 +225,65 @@ class HTMLResultsReportTest(unittest.TestCase):
#self.assertIn(success_keyvals['machine'], output.summary_table)
self.assertIn('a_float', output.summary_table)
self.assertIn(str(success_keyvals['a_float']), output.summary_table)
+ self.assertIn('a_float', output.full_table)
# The _ in a_float is filtered out when we're generating HTML.
self.assertIn('afloat', output.charts)
+ # And make sure we have our experiment file...
+ self.assertNotEqual(output.experiment_file, '')
+
+ def testBenchmarkResultFailure(self):
+ labels = ['label1']
+ benchmark_names_and_iterations = [('bench1', 1)]
+ benchmark_keyvals = {'bench1': [[]]}
+ results = BenchmarkResults(labels, benchmark_names_and_iterations,
+ benchmark_keyvals)
+ output = self._GetOutput(benchmark_results=results)
+ self.assertIn('no result', output.summary_table)
+ self.assertEqual(output.charts, '')
+ self.assertEqual(output.experiment_file, '')
+
+ def testBenchmarkResultSuccess(self):
+ labels = ['label1']
+ benchmark_names_and_iterations = [('bench1', 1)]
+ benchmark_keyvals = {'bench1': [[{'retval': 1, 'foo': 2.0}]]}
+ results = BenchmarkResults(labels, benchmark_names_and_iterations,
+ benchmark_keyvals)
+ output = self._GetOutput(benchmark_results=results)
+ self.assertNotIn('no result', output.summary_table)
+ self.assertIn('bench1', output.summary_table)
+ self.assertIn('bench1', output.full_table)
+ self.assertNotEqual(output.charts, '')
+ self.assertEqual(output.experiment_file, '')
class JSONResultsReportTest(unittest.TestCase):
"""Tests JSONResultsReport."""
- REQUIRED_REPORT_KEYS = ('date', 'time', 'board', 'label', 'chromeos_image',
- 'chromeos_version', 'chrome_version', 'compiler',
- 'test_name', 'pass')
-
- # JSONResultsReport.GetReport was initially made to write to disk; unless we
- # refactor it, testing is... a bit awkward.
- def _GetResultsFor(self, experiment, results_dir, date=None, time=None):
- """Gets a JSON report, given an experiment and results_dir.
-
- Returns [filename, result_as_python_datastructures].
- """
- # Linters complain if this isn't populated with precisely two things.
- test_results = [None, None]
- def grab_results(filename, results):
- test_results[0] = filename
- test_results[1] = results
- report = JSONResultsReport(experiment, date=date, time=time)
- report.GetReport(results_dir, write_results=grab_results)
- self.assertNotIn(None, test_results)
- return test_results
-
- def testJSONReportOutputFileNameInfo(self):
- date, time = '1/1/2001', '01:02:03'
- results_dir = FakePath('results')
- experiment = MakeMockExperiment(compiler='gcc')
- board = experiment.labels[0].board
- out_path, _ = self._GetResultsFor(experiment, results_dir, date, time)
-
- self.assertTrue(out_path.startswith(results_dir))
- self.assertTrue(out_path.endswith('.json'))
- out_file = out_path[len(results_dir):]
-
- # This should replace : in time with something else, since : is a path sep.
- # At the moment, it's '.'.
- self.assertIn(time.replace(':', '.'), out_file)
- self.assertIn(date, out_file)
- self.assertIn(board, out_file)
- self.assertIn('gcc', out_file)
-
- out_path, _ = self._GetResultsFor(MakeMockExperiment(compiler='llvm'),
- results_dir, date, time)
- self.assertIn('llvm', out_path)
-
- # Comments say that if *any* compiler used was LLVM, then LLVM must be in
- # the file name, instead of gcc.
- experiment = MakeMockExperiment(compiler='gcc')
- experiment.labels[len(experiment.labels)//2].compiler = 'llvm'
- out_path, _ = self._GetResultsFor(experiment, results_dir, date, time)
- self.assertIn('llvm', out_path)
-
- def _CheckRequiredKeys(self, test_output):
+
+ REQUIRED_REPORT_KEYS = ('date', 'time', 'label', 'test_name', 'pass')
+ EXPERIMENT_REPORT_KEYS = ('board', 'chromeos_image', 'chromeos_version',
+ 'chrome_version', 'compiler')
+
+ @staticmethod
+ def _GetRequiredKeys(is_experiment):
+ required_keys = JSONResultsReportTest.REQUIRED_REPORT_KEYS
+ if is_experiment:
+ required_keys += JSONResultsReportTest.EXPERIMENT_REPORT_KEYS
+ return required_keys
+
+ def _CheckRequiredKeys(self, test_output, is_experiment):
+ required_keys = self._GetRequiredKeys(is_experiment)
for output in test_output:
- for key in JSONResultsReportTest.REQUIRED_REPORT_KEYS:
+ for key in required_keys:
self.assertIn(key, output)
def testAllFailedJSONReportOutput(self):
- _, results = self._GetResultsFor(MakeMockExperiment(), FakePath('results'))
- self._CheckRequiredKeys(results)
+ experiment = MakeMockExperiment()
+ results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=True)
# Nothing succeeded; we don't send anything more than what's required.
+ required_keys = self._GetRequiredKeys(is_experiment=True)
for result in results:
- self.assertItemsEqual(result.iterkeys(), self.REQUIRED_REPORT_KEYS)
+ self.assertItemsEqual(result.iterkeys(), required_keys)
def testJSONReportOutputWithSuccesses(self):
success_keyvals = {
@@ -309,8 +297,8 @@ class JSONResultsReportTest(unittest.TestCase):
num_success = 2
experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
success_keyvals)
- _, results = self._GetResultsFor(experiment, FakePath('results'))
- self._CheckRequiredKeys(results)
+ results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=True)
num_passes = num_success * len(experiment.labels)
non_failures = [r for r in results if r['pass']]
@@ -324,6 +312,98 @@ class JSONResultsReportTest(unittest.TestCase):
self.assertIn('machine', pass_)
self.assertEqual(success_keyvals['machine'], pass_['machine'])
+ def testFailedJSONReportOutputWithoutExperiment(self):
+ labels = ['label1']
+ benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2),
+ ('bench3', 1), ('bench4', 0)]
+ benchmark_keyvals = {
+ 'bench1': [[{'retval': 1, 'foo': 2.0}]],
+ 'bench2': [[{'retval': 1, 'foo': 4.0}, {'retval': -1, 'bar': 999}]],
+ # lack of retval is considered a failure.
+ 'bench3': [[{}]],
+ 'bench4': [[]]
+ }
+ bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
+ benchmark_keyvals)
+ results = JSONResultsReport(bench_results).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=False)
+ self.assertFalse(any(r['pass'] for r in results))
+
+ def testJSONGetReportObeysJSONSettings(self):
+ labels = ['label1']
+ benchmark_names_and_iterations = [('bench1', 1)]
+ # These can be anything, really. So long as they're distinctive.
+ separators = (',\t\n\t', ':\t\n\t')
+ benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]}
+ bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
+ benchmark_keyvals)
+ reporter = JSONResultsReport(bench_results,
+ json_args={'separators': separators})
+ result_str = reporter.GetReport()
+ self.assertIn(separators[0], result_str)
+ self.assertIn(separators[1], result_str)
+
+ def testSuccessfulJSONReportOutputWithoutExperiment(self):
+ labels = ['label1']
+ benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)]
+ benchmark_keyvals = {
+ 'bench1': [[{'retval': 0, 'foo': 2.0}]],
+ 'bench2': [[{'retval': 0, 'foo': 4.0}, {'retval': 0, 'bar': 999}]]
+ }
+ bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
+ benchmark_keyvals)
+ results = JSONResultsReport(bench_results).GetReportObject()
+ self._CheckRequiredKeys(results, is_experiment=False)
+ self.assertTrue(all(r['pass'] for r in results))
+ # Enforce that the results have *some* deterministic order.
+ keyfn = lambda r: (r['test_name'], r['detailed_results'].get('foo', 5.0))
+ sorted_results = sorted(results, key=keyfn)
+ detailed_results = [r['detailed_results'] for r in sorted_results]
+ bench1, bench2_foo, bench2_bar = detailed_results
+ self.assertEqual(bench1['foo'], 2.0)
+ self.assertEqual(bench2_foo['foo'], 4.0)
+ self.assertEqual(bench2_bar['bar'], 999)
+ self.assertNotIn('bar', bench1)
+ self.assertNotIn('bar', bench2_foo)
+ self.assertNotIn('foo', bench2_bar)
+
+
+class PerfReportParserTest(unittest.TestCase):
+ @staticmethod
+ def _ReadRealPerfReport():
+ my_dir = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(my_dir, 'perf_files/perf.data.report.0')) as f:
+ return f.read()
+
+ def testParserParsesRealWorldPerfReport(self):
+ report = ParseStandardPerfReport(self._ReadRealPerfReport())
+ self.assertItemsEqual(['cycles', 'instructions'], report.keys())
+
+ # Arbitrarily selected known percentages from the perf report.
+ known_cycles_percentages = {
+ '0xffffffffa4a1f1c9': 0.66,
+ '0x0000115bb7ba9b54': 0.47,
+ '0x0000000000082e08': 0.00,
+ '0xffffffffa4a13e63': 0.00,
+ }
+ report_cycles = report['cycles']
+ self.assertEqual(len(report_cycles), 214)
+ for k, v in known_cycles_percentages.iteritems():
+ self.assertIn(k, report_cycles)
+ self.assertEqual(v, report_cycles[k])
+
+ known_instrunctions_percentages = {
+ '0x0000115bb6c35d7a': 1.65,
+ '0x0000115bb7ba9b54': 0.67,
+ '0x0000000000024f56': 0.00,
+ '0xffffffffa4a0ee03': 0.00,
+ }
+ report_instructions = report['instructions']
+ self.assertEqual(len(report_instructions), 492)
+ for k, v in known_instrunctions_percentages.iteritems():
+ self.assertIn(k, report_instructions)
+ self.assertEqual(v, report_instructions[k])
+
if __name__ == '__main__':
test_flag.SetTestMode(True)