aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_report.py
diff options
context:
space:
mode:
Diffstat (limited to 'crosperf/results_report.py')
-rw-r--r--crosperf/results_report.py165
1 files changed, 145 insertions, 20 deletions
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index b591370a..f7434132 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -2,11 +2,12 @@
# Copyright 2011 Google Inc. All Rights Reserved.
-import math
+from utils.tabulator import *
+
from column_chart import ColumnChart
-from results_sorter import ResultSorter
from results_organizer import ResultOrganizer
-from utils.tabulator import *
+from perf_table import PerfTable
+
class ResultsReport(object):
MAX_COLOR_CODE = 255
@@ -26,7 +27,7 @@ class ResultsReport(object):
labels[benchmark_run.label_name].append(benchmark_run)
return labels
- def GetFullTables(self):
+ def GetFullTables(self, perf=False):
columns = [Column(NonEmptyCountResult(),
Format(),
"Completed"),
@@ -41,23 +42,30 @@ class ResultsReport(object):
Column(StdResult(),
Format())
]
- return self._GetTables(self.labels, self.benchmark_runs, columns)
+ if not perf:
+ return self._GetTables(self.labels, self.benchmark_runs, columns)
+ return self. _GetPerfTables(self.labels, columns)
- def GetSummaryTables(self):
- columns = [Column(AmeanResult(),
+ def GetSummaryTables(self, perf=False):
+ columns = [Column(NonEmptyCountResult(),
+ Format(),
+ "Completed"),
+ Column(AmeanResult(),
Format()),
Column(StdResult(),
Format(), "StdDev"),
Column(CoeffVarResult(),
- CoeffVarFormat(), "Mean/StdDev"),
+ CoeffVarFormat(), "StdDev/Mean"),
Column(GmeanRatioResult(),
RatioFormat(), "GmeanSpeedup"),
Column(GmeanRatioResult(),
ColorBoxFormat(), " "),
- Column(StatsSignificant(),
- Format(), "p-value")
+ Column(PValueResult(),
+ PValueFormat(), "p-value")
]
- return self._GetTables(self.labels, self.benchmark_runs, columns)
+ if not perf:
+ return self._GetTables(self.labels, self.benchmark_runs, columns)
+ return self. _GetPerfTables(self.labels, columns)
def _ParseColumn(self, columns, iteration):
new_column = []
@@ -78,9 +86,17 @@ class ResultsReport(object):
return False
return True
+ def _GetTableHeader(self, benchmark):
+ benchmark_info = ("Benchmark: {0}; Iterations: {1}"
+ .format(benchmark.name, benchmark.iterations))
+ cell = Cell()
+ cell.string_value = benchmark_info
+ cell.header = True
+ return [[cell]]
+
def _GetTables(self, labels, benchmark_runs, columns):
tables = []
- ro = ResultOrganizer(benchmark_runs, labels)
+ ro = ResultOrganizer(benchmark_runs, labels, self.benchmarks)
result = ro.result
label_name = ro.labels
for item in result:
@@ -88,11 +104,7 @@ class ResultsReport(object):
for benchmark in self.benchmarks:
if benchmark.name == item:
break
- benchmark_info = ("Benchmark: {0}; Iterations: {1}"
- .format(benchmark.name, benchmark.iterations))
- cell = Cell()
- cell.string_value = benchmark_info
- ben_table = [[cell]]
+ ben_table = self._GetTableHeader(benchmark)
if self._AreAllRunsEmpty(runs):
cell = Cell()
@@ -109,8 +121,41 @@ class ResultsReport(object):
tables.append(cell_table)
return tables
+ def _GetPerfTables(self, labels, columns):
+ tables = []
+ label_names = [label.name for label in labels]
+ p_table = PerfTable(self.experiment, label_names)
+
+ if not p_table.perf_data:
+ return tables
+
+ for benchmark in p_table.perf_data:
+ ben = None
+ for ben in self.benchmarks:
+ if ben.name == benchmark:
+ break
+
+ ben_table = self._GetTableHeader(ben)
+ tables.append(ben_table)
+ benchmark_data = p_table.perf_data[benchmark]
+ table = []
+ for event in benchmark_data:
+ tg = TableGenerator(benchmark_data[event], label_names)
+ table = tg.GetTable()
+ parsed_columns = self._ParseColumn(columns, ben.iterations)
+ tf = TableFormatter(table, parsed_columns)
+ tf.GenerateCellTable()
+ tf.AddColumnName()
+ tf.AddLabelName()
+ tf.AddHeader(str(event))
+ table = tf.GetCellTable(headers=False)
+ tables.append(table)
+ return tables
+
def PrintTables(self, tables, out_to):
output = ""
+ if not tables:
+ return output
for table in tables:
if out_to == "HTML":
tp = TablePrinter(table, TablePrinter.HTML)
@@ -126,6 +171,8 @@ class ResultsReport(object):
pass
output += tp.Print()
return output
+
+
class TextResultsReport(ResultsReport):
TEXT = """
===========================================
@@ -137,10 +184,30 @@ Summary
-------------------------------------------
%s
+
+Number re-images: %s
+
+-------------------------------------------
+Benchmark Run Status
-------------------------------------------
+%s
+
+
+-------------------------------------------
+Perf Data
+-------------------------------------------
+%s
+
+
+
Experiment File
-------------------------------------------
%s
+
+
+CPUInfo
+-------------------------------------------
+%s
===========================================
"""
@@ -148,17 +215,46 @@ Experiment File
super(TextResultsReport, self).__init__(experiment)
self.email = email
+ def GetStatusTable(self):
+ """Generate the status table by the tabulator."""
+ table = [["", ""]]
+ columns = [Column(LiteralResult(iteration=0), Format(), "Status"),
+ Column(LiteralResult(iteration=1), Format(), "Failing Reason")]
+
+ for benchmark_run in self.benchmark_runs:
+ status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
+ benchmark_run.failure_reason]]
+ table.append(status)
+ tf = TableFormatter(table, columns)
+ cell_table = tf.GetCellTable()
+ return [cell_table]
+
def GetReport(self):
+ """Generate the report for email and console."""
+ status_table = self.GetStatusTable()
summary_table = self.GetSummaryTables()
full_table = self.GetFullTables()
+ perf_table = self.GetSummaryTables(perf=True)
+ if not perf_table:
+ perf_table = None
if not self.email:
return self.TEXT % (self.experiment.name,
self.PrintTables(summary_table, "CONSOLE"),
- self.experiment.experiment_file)
+ self.experiment.machine_manager.num_reimages,
+ self.PrintTables(status_table, "CONSOLE"),
+ self.PrintTables(perf_table, "CONSOLE"),
+ self.experiment.experiment_file,
+ self.experiment.machine_manager.GetAllCPUInfo(
+ self.experiment.labels))
return self.TEXT % (self.experiment.name,
self.PrintTables(summary_table, "EMAIL"),
- self.experiment.experiment_file)
+ self.experiment.machine_manager.num_reimages,
+ self.PrintTables(status_table, "EMAIL"),
+ self.PrintTables(perf_table, "EMAIL"),
+ self.experiment.experiment_file,
+ self.experiment.machine_manager.GetAllCPUInfo(
+ self.experiment.labels))
class HTMLResultsReport(ResultsReport):
@@ -243,6 +339,7 @@ pre {
google.setOnLoadCallback(init);
function init() {
switchTab('summary', 'html');
+ %s
switchTab('full', 'html');
drawTable();
}
@@ -268,6 +365,7 @@ pre {
</div>
%s
</div>
+ %s
<div class='results-section'>
<div class='results-section-title'>Charts</div>
<div class='results-section-content'>%s</div>
@@ -291,6 +389,18 @@ pre {
</html>
"""
+ PERF_HTML = """
+ <div class='results-section'>
+ <div class='results-section-title'>Perf Table</div>
+ <div class='results-section-content'>
+ <div id='perf-html'>%s</div>
+ <div id='perf-text'><pre>%s</pre></div>
+ <div id='perf-tsv'><pre>%s</pre></div>
+ </div>
+ %s
+ </div>
+"""
+
def __init__(self, experiment):
super(HTMLResultsReport, self).__init__(experiment)
@@ -313,11 +423,26 @@ pre {
summary_table = self.GetSummaryTables()
full_table = self.GetFullTables()
- return self.HTML % (chart_javascript,
+ perf_table = self.GetSummaryTables(perf=True)
+ if perf_table:
+ perf_html = self.PERF_HTML % (
+ self.PrintTables(perf_table, "HTML"),
+ self.PrintTables(perf_table, "PLAIN"),
+ self.PrintTables(perf_table, "TSV"),
+ self._GetTabMenuHTML("perf")
+ )
+ perf_init = "switchTab('perf', 'html');"
+ else:
+ perf_html = ""
+ perf_init = ""
+
+ return self.HTML % (perf_init,
+ chart_javascript,
self.PrintTables(summary_table, "HTML"),
self.PrintTables(summary_table, "PLAIN"),
self.PrintTables(summary_table, "TSV"),
self._GetTabMenuHTML("summary"),
+ perf_html,
chart_divs,
self.PrintTables(full_table, "HTML"),
self.PrintTables(full_table, "PLAIN"),