aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_report_unittest.py
diff options
context:
space:
mode:
authorGeorge Burgess IV <gbiv@chromium.org>2016-09-08 13:52:12 -0700
committerchrome-bot <chrome-bot@chromium.org>2016-09-08 15:26:44 -0700
commit43fe499320c0d77c7cd2b3189896a0d9791821c5 (patch)
tree24febf0579b90a8d2e39fe75295f2046bf7f8f1d /crosperf/results_report_unittest.py
parenta12e97482b0c8b21805a468109e1b5fa7d424304 (diff)
downloadtoolchain-utils-43fe499320c0d77c7cd2b3189896a0d9791821c5.tar.gz
Fix test failures in results_report refactors.
...I have absolutely no clue how I missed these failures (since I had to run results_report_unittest to run the new perf parser test), but apparently I did. The failure happened because we had a _chart_js arg name, which didn't match up with chart_js (in the caller). This also fixes a few minor linter complaints that I introduced in patch-set 2 of said review. All of the other files I touched are clean (except for a complaint about use of _logger in one of them, but that was there before I made my changes). BUG=None TEST=./run_tests.sh *actually* passes this time. I promise. :) Change-Id: I98e43b29ff9cef5fc9cf33143adc26b98a861f3e Reviewed-on: https://chrome-internal-review.googlesource.com/285436 Commit-Ready: George Burgess <gbiv@google.com> Tested-by: George Burgess <gbiv@google.com> Reviewed-by: Caroline Tice <cmtice@google.com>
Diffstat (limited to 'crosperf/results_report_unittest.py')
-rwxr-xr-xcrosperf/results_report_unittest.py15
1 files changed, 10 insertions, 5 deletions
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index 9a45496f..cfd4eb9e 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -179,18 +179,22 @@ class HTMLResultsReportTest(unittest.TestCase):
_TestOutput = collections.namedtuple('TestOutput', ['summary_table',
'perf_html',
+ 'chart_js',
'charts',
'full_table',
'experiment_file'])
@staticmethod
- def _GetTestOutput(perf_table, _chart_js, summary_table, print_table,
+ def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
chart_divs, full_table, experiment_file):
+ # N.B. Currently we don't check chart_js; it's just passed through because
+ # cros lint complains otherwise.
summary_table = print_table(summary_table, 'HTML')
perf_html = print_table(perf_table, 'HTML')
full_table = print_table(full_table, 'HTML')
return HTMLResultsReportTest._TestOutput(summary_table=summary_table,
perf_html=perf_html,
+ chart_js=chart_js,
charts=chart_divs,
full_table=full_table,
experiment_file=experiment_file)
@@ -369,6 +373,7 @@ class JSONResultsReportTest(unittest.TestCase):
class PerfReportParserTest(unittest.TestCase):
+ """Tests for the perf report parser in results_report."""
@staticmethod
def _ReadRealPerfReport():
my_dir = os.path.dirname(os.path.realpath(__file__))
@@ -393,10 +398,10 @@ class PerfReportParserTest(unittest.TestCase):
self.assertEqual(v, report_cycles[k])
known_instrunctions_percentages = {
- '0x0000115bb6c35d7a': 1.65,
- '0x0000115bb7ba9b54': 0.67,
- '0x0000000000024f56': 0.00,
- '0xffffffffa4a0ee03': 0.00,
+ '0x0000115bb6c35d7a': 1.65,
+ '0x0000115bb7ba9b54': 0.67,
+ '0x0000000000024f56': 0.00,
+ '0xffffffffa4a0ee03': 0.00,
}
report_instructions = report['instructions']
self.assertEqual(len(report_instructions), 492)