aboutsummaryrefslogtreecommitdiff
path: root/android_bench_suite
diff options
context:
space:
mode:
authorBob Haarman <inglorion@chromium.org>2020-09-19 00:00:06 +0000
committerBob Haarman <inglorion@chromium.org>2020-09-25 21:42:42 +0000
commit8223d16e040748ad6a91a87f3ce1cfc13db37f4b (patch)
treeb58dc769d183b3af7c76e6a6cec2800343d4e4f9 /android_bench_suite
parent4f7eb71f9fa78f8710e1deb57d441f51fa74af3b (diff)
downloadtoolchain-utils-8223d16e040748ad6a91a87f3ce1cfc13db37f4b.tar.gz
fix formatting/lint issues pointed out by repohooks
Previous changes resulted in some complaints about formatting and Python 3 compatibility from the repo hooks. This change fixes those. BUG=None TEST=repo upload --cbr . # check that it no longer complains Change-Id: I99cc51dcb8d499d59b7b47817f4cef8fa6ba5059 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/2419831 Tested-by: Bob Haarman <inglorion@chromium.org> Reviewed-by: Manoj Gupta (OoO) <manojgupta@chromium.org>
Diffstat (limited to 'android_bench_suite')
-rwxr-xr-xandroid_bench_suite/fix_skia_results.py20
1 files changed, 11 insertions, 9 deletions
diff --git a/android_bench_suite/fix_skia_results.py b/android_bench_suite/fix_skia_results.py
index bdab80a9..84dee5a5 100755
--- a/android_bench_suite/fix_skia_results.py
+++ b/android_bench_suite/fix_skia_results.py
@@ -5,6 +5,7 @@
# found in the LICENSE file.
#
# pylint: disable=cros-logging-import
+
"""Transforms skia benchmark results to ones that crosperf can understand."""
from __future__ import print_function
@@ -57,7 +58,9 @@ def _GetTimeMultiplier(label_name):
def _GetTimeDenom(ms):
- """Given a list of times (in milliseconds), find a time unit in which
+ """Express times in a common time unit.
+
+ Given a list of times (in milliseconds), find a time unit in which
they can all be expressed.
Returns the unit name, and `ms` normalized to that time unit.
@@ -95,9 +98,9 @@ def _TransformBenchmarks(raw_benchmarks):
# statistic...
benchmarks = raw_benchmarks['results']
results = []
- for bench_name, bench_result in benchmarks.iteritems():
+ for bench_name, bench_result in benchmarks.items():
try:
- for cfg_name, keyvals in bench_result.iteritems():
+ for cfg_name, keyvals in bench_result.items():
# Some benchmarks won't have timing data (either it won't exist at all,
# or it'll be empty); skip them.
samples = keyvals.get('samples')
@@ -110,17 +113,16 @@ def _TransformBenchmarks(raw_benchmarks):
friendly_name = _GetFamiliarName(bench_name)
if len(results) < len(samples):
- results.extend({
- 'retval': 0
- } for _ in range(len(samples) - len(results)))
+ results.extend(
+ {'retval': 0} for _ in range(len(samples) - len(results)))
time_mul = _GetTimeMultiplier(friendly_name)
- for sample, app in itertools.izip(samples, results):
+ for sample, app in itertools.zip(samples, results):
assert friendly_name not in app
app[friendly_name] = sample * time_mul
except (KeyError, ValueError) as e:
- logging.error('While converting "%s" (key: %s): %s',
- bench_result, bench_name, e.message)
+ logging.error('While converting "%s" (key: %s): %s', bench_result,
+ bench_name, e)
raise
# Realistically, [results] should be multiple results, where each entry in the