aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorZhizhou Yang <zhizhouy@google.com>2019-02-28 17:32:13 -0800
committerchrome-bot <chrome-bot@chromium.org>2019-03-06 06:51:23 -0800
commita1a431178f52d1b7b8b24ea2851b509627ddb89d (patch)
treef966565ad38b13ebca08259f5b3c284c8536a265 /crosperf
parentcd2cf15e7642d2fc7def729ba72f98258f5ce229 (diff)
downloadtoolchain-utils-a1a431178f52d1b7b8b24ea2851b509627ddb89d.tar.gz
crosperf: replace cpu cycles in report with samples
What we collected from benchmark run actually are samples from perf tool, so "cpu cycles" is not a accurate name. BUG=chromium:936573 TEST=Tested with cwp and general mode on eve; Passed all unittests. Change-Id: I35533cea0987c4e1b112498cc1b0271eaab665ae Reviewed-on: https://chromium-review.googlesource.com/1495963 Commit-Ready: Zhizhou Yang <zhizhouy@google.com> Tested-by: Zhizhou Yang <zhizhouy@google.com> Reviewed-by: Caroline Tice <cmtice@chromium.org>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/results_cache.py31
-rw-r--r--crosperf/results_organizer.py7
-rw-r--r--crosperf/results_report.py101
3 files changed, 75 insertions, 64 deletions
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 3ff58dd3..bc932760 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -188,8 +189,8 @@ class Result(object):
keyvals_dict = self.AppendTelemetryUnits(keyvals_dict, units_dict)
return keyvals_dict
- def GetCPUCycles(self):
- cpu_cycles = 0
+ def GetSamples(self):
+ samples = 0
for perf_data_file in self.perf_data_files:
chroot_perf_data_file = misc.GetInsideChrootPath(self.chromeos_root,
perf_data_file)
@@ -212,25 +213,25 @@ class Result(object):
# if user want an exact match for the field they want.
exact_match = '"%s"' % self.cwp_dso
- command = ('%s report -n -s dso -i %s 2> /dev/null | grep %s'
- % (perf_file, chroot_perf_data_file, exact_match))
+ command = ('%s report -n -s dso -i %s 2> /dev/null | grep %s' %
+ (perf_file, chroot_perf_data_file, exact_match))
_, result, _ = self.ce.ChrootRunCommandWOutput(self.chromeos_root,
command)
# Accumulate the sample count for all matched fields.
# Each line looks like this:
# 45.42% 237210 chrome
# And we want the second number which is the sample count.
- cpu_cycle = 0
+ sample = 0
try:
for line in result.split('\n'):
attr = line.split()
if len(attr) == 3 and '%' in attr[0]:
- cpu_cycle += int(attr[1])
+ sample += int(attr[1])
except:
raise RuntimeError('Cannot parse perf dso result')
- cpu_cycles += cpu_cycle
- return cpu_cycles
+ samples += sample
+ return samples
def GetResultsDir(self):
mo = re.search(r'Results placed in (\S+)', self.out)
@@ -382,10 +383,10 @@ class Result(object):
print('\n ** WARNING **: Had to use deprecated output-method to '
'collect results.\n')
self.keyvals = self.GetKeyvals()
- # If we are in CWP approximation mode, we want to collect DSO CPU cycles
+ # If we are in CWP approximation mode, we want to collect DSO samples
# for each perf.data file
if self.cwp_dso:
- self.keyvals['cpu_cycles'] = [self.GetCPUCycles(), u'cycles']
+ self.keyvals['samples'] = [self.GetSamples(), u'samples']
self.keyvals['retval'] = self.retval
# Generate report from all perf.data files.
# Now parse all perf report files and include them in keyvals.
@@ -416,8 +417,8 @@ class Result(object):
self.retval = pickle.load(f)
# Untar the tarball to a temporary directory
- self.temp_dir = tempfile.mkdtemp(dir=os.path.join(self.chromeos_root,
- 'chroot', 'tmp'))
+ self.temp_dir = tempfile.mkdtemp(
+ dir=os.path.join(self.chromeos_root, 'chroot', 'tmp'))
command = ('cd %s && tar xf %s' %
(self.temp_dir, os.path.join(cache_dir, AUTOTEST_TARBALL)))
@@ -489,8 +490,8 @@ class Result(object):
if ret:
command = 'rm -rf {0}'.format(temp_dir)
self.ce.RunCommand(command)
- raise RuntimeError('Could not move dir %s to dir %s' % (temp_dir,
- cache_dir))
+ raise RuntimeError(
+ 'Could not move dir %s to dir %s' % (temp_dir, cache_dir))
@classmethod
def CreateFromRun(cls,
@@ -804,7 +805,7 @@ class MockResultsCache(ResultsCache):
class MockResult(Result):
"""Class for mock testing, corresponding to Result class."""
- def PopulateFromRun(self, out, err, retval, test, suite):
+ def PopulateFromRun(self, out, err, retval, test, suite, cwp_dso, weight):
self.out = out
self.err = err
self.retval = retval
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 5410d6d8..2739f739 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -189,9 +190,9 @@ def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
# Did not find test_name in json file; show everything.
show_all_results = True
if benchmark_run.result.cwp_dso:
- # If we are in cwp approximation mode, we only care about cpu_cycles
- if 'cpu_cycles' in benchmark_run.result.keyvals:
- cur_dict['cpu_cycles'] = benchmark_run.result.keyvals['cpu_cycles']
+ # If we are in cwp approximation mode, we only care about samples
+ if 'samples' in benchmark_run.result.keyvals:
+ cur_dict['samples'] = benchmark_run.result.keyvals['samples']
cur_dict['retval'] = benchmark_run.result.keyvals['retval']
else:
for test_key in benchmark_run.result.keyvals:
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 1466c581..ba4ccd88 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -16,7 +17,7 @@ from cros_utils.tabulator import Cell
from cros_utils.tabulator import CoeffVarFormat
from cros_utils.tabulator import CoeffVarResult
from cros_utils.tabulator import Column
-from cros_utils.tabulator import CPUCyclesTableGenerator
+from cros_utils.tabulator import SamplesTableGenerator
from cros_utils.tabulator import Format
from cros_utils.tabulator import IterationResult
from cros_utils.tabulator import GmeanRatioResult
@@ -54,12 +55,12 @@ def ParseChromeosImage(chromeos_image):
part after '/chroot/tmp' in the second case.
Args:
- chromeos_image: string containing the path to the chromeos_image that
- crosperf used for the test.
+ chromeos_image: string containing the path to the chromeos_image that
+ crosperf used for the test.
Returns:
- version, image: The results of parsing the input string, as explained
- above.
+ version, image: The results of parsing the input string, as explained
+ above.
"""
# Find the Chromeos Version, e.g. R45-2345.0.0.....
# chromeos_image should have been something like:
@@ -97,8 +98,7 @@ def _FilterPerfReport(event_threshold, report):
def filter_dict(m):
return {
- fn_name: pct
- for fn_name, pct in m.iteritems() if pct >= event_threshold
+ fn_name: pct for fn_name, pct in m.iteritems() if pct >= event_threshold
}
return {event: filter_dict(m) for event, m in report.iteritems()}
@@ -159,6 +159,7 @@ def _GetResultsTableHeader(ben_name, iterations):
cell.header = True
return [[cell]]
+
def _GetDSOHeader(cwp_dso):
info = 'CWP_DSO: %s' % cwp_dso
cell = Cell()
@@ -166,6 +167,7 @@ def _GetDSOHeader(cwp_dso):
cell.header = False
return [[cell]]
+
def _ParseColumn(columns, iteration):
new_column = []
for column in columns:
@@ -230,21 +232,21 @@ def _GetPerfTables(benchmark_results, columns, table_type):
tables.append(table)
return tables
-def _GetCPUTables(benchmark_results, columns, table_type):
+
+def _GetSamplesTables(benchmark_results, columns, table_type):
tables = []
dso_header_table = _GetDSOHeader(benchmark_results.cwp_dso)
tables.append(dso_header_table)
- (table, new_keyvals, iter_counts) = CPUCyclesTableGenerator(
- benchmark_results.run_keyvals,
- benchmark_results.label_names,
- benchmark_results.iter_counts,
- benchmark_results.weights).GetTable()
+ (table, new_keyvals, iter_counts) = SamplesTableGenerator(
+ benchmark_results.run_keyvals, benchmark_results.label_names,
+ benchmark_results.iter_counts, benchmark_results.weights).GetTable()
parsed_columns = _ParseColumn(columns, 1)
- tf = TableFormatter(table, parsed_columns, cpu_table=True)
+ tf = TableFormatter(table, parsed_columns, samples_table=True)
cell_table = tf.GetCellTable(table_type)
tables.append(cell_table)
return (tables, new_keyvals, iter_counts)
+
class ResultsReport(object):
"""Class to handle the report format."""
MAX_COLOR_CODE = 255
@@ -256,14 +258,14 @@ class ResultsReport(object):
def _GetTablesWithColumns(self, columns, table_type, summary_type):
if summary_type == 'perf':
get_tables = _GetPerfTables
- elif summary_type == 'cpu':
- get_tables = _GetCPUTables
+ elif summary_type == 'samples':
+ get_tables = _GetSamplesTables
else:
get_tables = _GetTables
ret = get_tables(self.benchmark_results, columns, table_type)
- # If we are generating a CPU summary table, the return value of get_tables
- # will be a tuple, and we will update the benchmark_results for composite
- # benchmark so that full table can use it.
+ # If we are generating a samples summary table, the return value of
+ # get_tables will be a tuple, and we will update the benchmark_results for
+ # composite benchmark so that full table can use it.
if isinstance(ret, tuple):
self.benchmark_results.run_keyvals = ret[1]
self.benchmark_results.iter_counts = ret[2]
@@ -272,29 +274,35 @@ class ResultsReport(object):
def GetFullTables(self, perf=False):
columns = [
- Column(RawResult(), Format()), Column(MinResult(), Format()), Column(
- MaxResult(), Format()), Column(AmeanResult(), Format()), Column(
- StdResult(), Format(), 'StdDev'),
- Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'), Column(
- GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'), Column(
- PValueResult(), PValueFormat(), 'p-value')
+ Column(RawResult(), Format()),
+ Column(MinResult(), Format()),
+ Column(MaxResult(), Format()),
+ Column(AmeanResult(), Format()),
+ Column(StdResult(), Format(), 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
+ Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
+ Column(PValueResult(), PValueFormat(), 'p-value')
]
return self._GetTablesWithColumns(columns, 'full', perf)
def GetSummaryTables(self, summary_type=''):
- if summary_type == 'cpu':
- columns = [Column(IterationResult(), Format(), 'Iterations [Pass:Fail]'),
- Column(AmeanResult(), Format(), 'Weighted CPU-cycles Amean'),
- Column(StdResult(), Format(), 'StdDev'),
- Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
- Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
- Column(PValueResult(), PValueFormat(), 'p-value')]
+ if summary_type == 'samples':
+ columns = [
+ Column(IterationResult(), Format(), 'Iterations [Pass:Fail]'),
+ Column(AmeanResult(), Format(), 'Weighted Samples Amean'),
+ Column(StdResult(), Format(), 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
+ Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
+ Column(PValueResult(), PValueFormat(), 'p-value')
+ ]
else:
- columns = [Column(AmeanResult(), Format()),
- Column(StdResult(), Format(), 'StdDev'),
- Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
- Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
- Column(PValueResult(), PValueFormat(), 'p-value')]
+ columns = [
+ Column(AmeanResult(), Format()),
+ Column(StdResult(), Format(), 'StdDev'),
+ Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
+ Column(GmeanRatioResult(), RatioFormat(), 'GmeanSpeedup'),
+ Column(PValueResult(), PValueFormat(), 'p-value')
+ ]
return self._GetTablesWithColumns(columns, 'summary', summary_type)
@@ -352,8 +360,8 @@ class TextResultsReport(ResultsReport):
"""Generate the status table by the tabulator."""
table = [['', '']]
columns = [
- Column(LiteralResult(iteration=0), Format(), 'Status'), Column(
- LiteralResult(iteration=1), Format(), 'Failing Reason')
+ Column(LiteralResult(iteration=0), Format(), 'Status'),
+ Column(LiteralResult(iteration=1), Format(), 'Failing Reason')
]
for benchmark_run in self.experiment.benchmark_runs:
@@ -380,16 +388,16 @@ class TextResultsReport(ResultsReport):
if not self.benchmark_results.cwp_dso:
summary_table = _PrintTable(self.GetSummaryTables(), output_type)
else:
- summary_table = _PrintTable(self.GetSummaryTables(summary_type='cpu'),
- output_type)
+ summary_table = _PrintTable(
+ self.GetSummaryTables(summary_type='samples'), output_type)
sections.append(self._MakeSection('Summary', summary_table))
if experiment is not None:
table = _PrintTable(self.GetStatusTable(), output_type)
sections.append(self._MakeSection('Benchmark Run Status', table))
- perf_table = _PrintTable(self.GetSummaryTables(summary_type='perf'),
- output_type)
+ perf_table = _PrintTable(
+ self.GetSummaryTables(summary_type='perf'), output_type)
if perf_table and not self.benchmark_results.cwp_dso:
sections.append(self._MakeSection('Perf Data', perf_table))
@@ -410,8 +418,9 @@ def _GetHTMLCharts(label_names, test_results):
# never add headers. We still need to pass it anyway.
table = TableGenerator(runs, label_names).GetTable()
columns = [
- Column(AmeanResult(), Format()), Column(MinResult(), Format()), Column(
- MaxResult(), Format())
+ Column(AmeanResult(), Format()),
+ Column(MinResult(), Format()),
+ Column(MaxResult(), Format())
]
tf = TableFormatter(table, columns)
data_table = tf.GetCellTable('full', headers=False)
@@ -464,7 +473,7 @@ class HTMLResultsReport(ResultsReport):
summary_table = self.GetSummaryTables()
perf_table = self.GetSummaryTables(summary_type='perf')
else:
- summary_table = self.GetSummaryTables(summary_type='cpu')
+ summary_table = self.GetSummaryTables(summary_type='samples')
perf_table = None
full_table = self.GetFullTables()