aboutsummaryrefslogtreecommitdiff
path: root/crosperf/results_report.py
diff options
context:
space:
mode:
authorZhizhou Yang <zhizhouy@google.com>2020-01-15 16:25:04 -0800
committerCommit Bot <commit-bot@chromium.org>2020-01-30 00:33:44 +0000
commit5534af8f4f31df22ca307e3e3faa16487fa3d2d2 (patch)
tree771f9865e956551c7c70c3de99fad0b72aef52ce /crosperf/results_report.py
parent658d77957b84def71c77d25229df9845fdb7ee9c (diff)
downloadtoolchain-utils-5534af8f4f31df22ca307e3e3faa16487fa3d2d2.tar.gz
crosperf: migration to python 3
This patch migrates crosperf and its utils to python 3. TEST=Passed presubmit check; tested with simple experiment locally. BUG=chromium:1011676 Change-Id: Ib2a9f9c7cf6a1bb1d0b42a1dd3d9e3cbb4d70a36 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/2003796 Tested-by: Zhizhou Yang <zhizhouy@google.com> Reviewed-by: Mike Frysinger <vapier@chromium.org> Reviewed-by: Caroline Tice <cmtice@chromium.org> Commit-Queue: Zhizhou Yang <zhizhouy@google.com> Auto-Submit: Zhizhou Yang <zhizhouy@google.com>
Diffstat (limited to 'crosperf/results_report.py')
-rw-r--r--crosperf/results_report.py12
1 files changed, 6 insertions, 6 deletions
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 81591db7..ff6c4f96 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -100,10 +100,10 @@ def _FilterPerfReport(event_threshold, report):
def filter_dict(m):
return {
- fn_name: pct for fn_name, pct in m.iteritems() if pct >= event_threshold
+ fn_name: pct for fn_name, pct in m.items() if pct >= event_threshold
}
- return {event: filter_dict(m) for event, m in report.iteritems()}
+ return {event: filter_dict(m) for event, m in report.items()}
class _PerfTable(object):
@@ -186,7 +186,7 @@ def _GetTables(benchmark_results, columns, table_type):
iter_counts = benchmark_results.iter_counts
result = benchmark_results.run_keyvals
tables = []
- for bench_name, runs in result.iteritems():
+ for bench_name, runs in result.items():
iterations = iter_counts[bench_name]
ben_table = _GetResultsTableHeader(bench_name, iterations)
@@ -438,7 +438,7 @@ class TextResultsReport(ResultsReport):
def _GetHTMLCharts(label_names, test_results):
charts = []
- for item, runs in test_results.iteritems():
+ for item, runs in test_results.items():
# Fun fact: label_names is actually *entirely* useless as a param, since we
# never add headers. We still need to pass it anyway.
table = TableGenerator(runs, label_names).GetTable()
@@ -734,7 +734,7 @@ class JSONResultsReport(ResultsReport):
label_names = benchmark_results.label_names
summary_field_defaults = self.summary_field_defaults
final_results = []
- for test, test_results in benchmark_results.run_keyvals.iteritems():
+ for test, test_results in benchmark_results.run_keyvals.items():
for label_name, label_results in zip(label_names, test_results):
for iter_results in label_results:
passed = iter_results.get('retval') == 0
@@ -767,7 +767,7 @@ class JSONResultsReport(ResultsReport):
# Get detailed results.
detail_results = {}
json_results['detailed_results'] = detail_results
- for k, v in iter_results.iteritems():
+ for k, v in iter_results.items():
if k == 'retval' or k == 'PASS' or k == ['PASS'] or v == 'PASS':
continue