aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorge Burgess IV <gbiv@google.com>2019-07-24 23:20:57 -0700
committerGeorge Burgess <gbiv@chromium.org>2019-07-25 16:08:53 +0000
commit9a6dae865659ce5e32694ae92bd3f1f7310d0049 (patch)
treefb35dd1b9746822d1d98e6777744e288ba20bd8d
parentadcb8bff41c7f3756756cbe581547897ad49d098 (diff)
downloadtoolchain-utils-9a6dae865659ce5e32694ae92bd3f1f7310d0049.tar.gz
toolchain-utils: remove all xranges
This removes all mention of xrange from toolchain-utils (modulo ones being changed in other CLs that are in flight). It's now an apparent lint error to use xrange, and it hinders our move to python3. As commented on If90d26664c70ccb73750f17573b89933fdb048f4, xrange -> range in python2 is really only a space concern (or speed in pathological cases), so migrations of this nature are generally super straightforward. I glanced at each of these callsites, and none of them appear to be pathological, so my hope is that this should all be Just Fine :) (Also fun to note that this includes a .diff file that has python code embedded in it.) BUG=None TEST=Presubmit tests Change-Id: Ic9f3ac3a5044d7a07da8a249bc505278d98203de Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1717130 Commit-Queue: George Burgess <gbiv@chromium.org> Commit-Queue: Luis Lozano <llozano@chromium.org> Legacy-Commit-Queue: Commit Bot <commit-bot@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: George Burgess <gbiv@chromium.org>
-rwxr-xr-xafdo_redaction/redact_profile_test.py2
-rw-r--r--android_bench_suite/autotest.diff8
-rwxr-xr-xandroid_bench_suite/fix_skia_results.py2
-rwxr-xr-xandroid_bench_suite/gen_json.py2
-rwxr-xr-xandroid_bench_suite/run.py4
-rwxr-xr-xbinary_search_tool/test/cmd_script.py2
-rwxr-xr-xbuildbot_test_toolchains.py2
-rwxr-xr-xcros_utils/buildbot_json.py6
-rw-r--r--cros_utils/tabulator.py10
-rw-r--r--crosperf/results_report.py6
-rwxr-xr-xcrosperf/results_report_unittest.py2
11 files changed, 23 insertions, 23 deletions
diff --git a/afdo_redaction/redact_profile_test.py b/afdo_redaction/redact_profile_test.py
index 5ccde3e4..27fb534e 100755
--- a/afdo_redaction/redact_profile_test.py
+++ b/afdo_redaction/redact_profile_test.py
@@ -55,7 +55,7 @@ def _generate_repeated_function_body(repeats, fn_name='_some_name'):
num_width = len(str(repeats))
lines = []
- for i in xrange(repeats):
+ for i in range(repeats):
num = str(i).zfill(num_width)
lines.append(num + function_header)
lines.extend(function_body)
diff --git a/android_bench_suite/autotest.diff b/android_bench_suite/autotest.diff
index ef0029ae..c2fed83b 100644
--- a/android_bench_suite/autotest.diff
+++ b/android_bench_suite/autotest.diff
@@ -49,7 +49,7 @@ index 000000000..b233b586a
+ 'binderThroughputTest > /data/local/tmp/bench_result'
+ % os.getenv('TEST_MODE'))
+ # Next 4 runs add to bench_result
-+ for i in xrange(4):
++ for i in range(4):
+ self.client.run('taskset %s /data/local/tmp/'
+ 'binderThroughputTest >> '
+ '/data/local/tmp/bench_result'
@@ -165,11 +165,11 @@ index 000000000..dd6af0b53
+ 'bench_result'), 'w') as f:
+
+ # There are two benchmarks, chrome and camera.
-+ for i in xrange(2):
++ for i in range(2):
+ f.write('Test %d:\n' % i)
+ total_time = 0
+ # Run benchmark for several times for accurancy
-+ for j in xrange(3):
++ for j in range(3):
+ f.write('Iteration %d: ' % j)
+ result = self.client.run('time taskset %s dex2oat'
+ ' --dex-file=data/local/tmp/dex2oat_input/test%d.apk'
@@ -739,7 +739,7 @@ index 000000000..b317bd0f3
+ ' > /data/local/tmp/bench_result'
+ % os.getenv('TEST_MODE'))
+ # Next 4 runs add to bench_result
-+ for i in xrange(4):
++ for i in range(4):
+ self.client.run('taskset %s /data/local/tmp/synthmark'
+ ' >> /data/local/tmp/bench_result'
+ % os.getenv('TEST_MODE'))
diff --git a/android_bench_suite/fix_skia_results.py b/android_bench_suite/fix_skia_results.py
index 6eec6ccf..8c919d35 100755
--- a/android_bench_suite/fix_skia_results.py
+++ b/android_bench_suite/fix_skia_results.py
@@ -111,7 +111,7 @@ def _TransformBenchmarks(raw_benchmarks):
if len(results) < len(samples):
results.extend({
'retval': 0
- } for _ in xrange(len(samples) - len(results)))
+ } for _ in range(len(samples) - len(results)))
time_mul = _GetTimeMultiplier(friendly_name)
for sample, app in itertools.izip(samples, results):
diff --git a/android_bench_suite/gen_json.py b/android_bench_suite/gen_json.py
index ad617ff4..e1252933 100755
--- a/android_bench_suite/gen_json.py
+++ b/android_bench_suite/gen_json.py
@@ -81,7 +81,7 @@ def main(argv):
iteration = arguments.iterations
result = []
- for i in xrange(iteration):
+ for i in range(iteration):
result += collect_data(infile, bench, i)
with get_outfile(outfile, bench) as fout:
diff --git a/android_bench_suite/run.py b/android_bench_suite/run.py
index 55acb663..19d9b36f 100755
--- a/android_bench_suite/run.py
+++ b/android_bench_suite/run.py
@@ -301,7 +301,7 @@ def test_bench(bench, setting_no, iterations, serials, remote, mode):
logging.info('Start running benchmark on device...')
# Run benchmark and tests on DUT
- for i in xrange(iterations):
+ for i in range(iterations):
logging.info('Iteration No.%d:', i)
test_cmd = [
os.path.join(
@@ -463,7 +463,7 @@ def main(argv):
for bench in bench_list:
logging.info('Start building and running benchmark: [%s]', bench)
# Run script for each toolchain settings
- for setting_no in xrange(setting_count):
+ for setting_no in range(setting_count):
build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
ldflags)
diff --git a/binary_search_tool/test/cmd_script.py b/binary_search_tool/test/cmd_script.py
index 6940eaae..eb91fe9b 100755
--- a/binary_search_tool/test/cmd_script.py
+++ b/binary_search_tool/test/cmd_script.py
@@ -45,7 +45,7 @@ def Main(argv):
if opt_bisect_limit == -1:
opt_bisect_limit = total_pass
- for i in xrange(1, total_pass + 1):
+ for i in range(1, total_pass + 1):
bisect_str = 'BISECT: %srunning pass (%d) Combine redundant ' \
'instructions on function (f1)' \
% ('NOT ' if i > opt_bisect_limit else '', i)
diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py
index c2d88733..169b5150 100755
--- a/buildbot_test_toolchains.py
+++ b/buildbot_test_toolchains.py
@@ -129,7 +129,7 @@ class ToolchainComparator(object):
assert mo
image_dict = mo.groupdict()
image_dict['image_type'] = 'chrome-pfq'
- for _ in xrange(2):
+ for _ in range(2):
image_dict['tip'] = str(int(image_dict['tip']) - 1)
nonafdo_image = PFQ_IMAGE_FS.replace('\\', '').format(**image_dict)
if buildbot_utils.DoesImageExist(self._chromeos_root, nonafdo_image):
diff --git a/cros_utils/buildbot_json.py b/cros_utils/buildbot_json.py
index 8a9d9cb8..42a27744 100755
--- a/cros_utils/buildbot_json.py
+++ b/cros_utils/buildbot_json.py
@@ -316,7 +316,7 @@ class NonAddressableNodeList(VirtualNodeList): # pylint: disable=W0223
@property
def cached_children(self):
if self.parent.cached_data is not None:
- for i in xrange(len(self.parent.cached_data[self.subkey])):
+ for i in range(len(self.parent.cached_data[self.subkey])):
yield self[i]
@property
@@ -352,7 +352,7 @@ class NonAddressableNodeList(VirtualNodeList): # pylint: disable=W0223
def __iter__(self):
"""Enables 'for i in obj:'. It returns children."""
if self.data:
- for i in xrange(len(self.data)):
+ for i in range(len(self.data)):
yield self[i]
def __getitem__(self, key):
@@ -868,7 +868,7 @@ class Builds(AddressableNodeList):
# Only cache keys here.
self.cache_keys()
if self._keys:
- for i in xrange(max(self._keys), -1, -1):
+ for i in range(max(self._keys), -1, -1):
yield self[i]
def cache_keys(self):
diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py
index e2f27bc4..59e4d426 100644
--- a/cros_utils/tabulator.py
+++ b/cros_utils/tabulator.py
@@ -340,17 +340,17 @@ class SamplesTableGenerator(TableGenerator):
row = [None] * len(header)
row[0] = '%s (samples)' % k
row[1] = 'N/A'
- for label_index in xrange(2, len(row)):
+ for label_index in range(2, len(row)):
row[label_index] = [0] * iterations
for cur_row in table[1:]:
# Iterate through each benchmark
if len(cur_row) > 1:
- for label_index in xrange(2, len(cur_row)):
+ for label_index in range(2, len(cur_row)):
# Iterate through each run in a single benchmark
# each result should look like ((pass, fail), [values_list])
bench_runs = cur_row[label_index][1]
- for index in xrange(iterations):
+ for index in range(iterations):
# Accumulate each run result to composite benchmark run
# If any run fails, then we set this run for composite benchmark
# to None so that we know it fails.
@@ -360,11 +360,11 @@ class SamplesTableGenerator(TableGenerator):
row[label_index][index] = None
else:
# One benchmark totally fails, no valid data will be in final result
- for label_index in xrange(2, len(row)):
+ for label_index in range(2, len(row)):
row[label_index] = [None] * iterations
break
# Calculate pass and fail count for composite benchmark
- for label_index in xrange(2, len(row)):
+ for label_index in range(2, len(row)):
run_pass = 0
run_fail = 0
for run in row[label_index]:
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index afd767aa..5f49872b 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -90,7 +90,7 @@ def _AppendUntilLengthIs(gen, the_list, target_len):
Uses `gen` to generate elements.
"""
- the_list.extend(gen() for _ in xrange(target_len - len(the_list)))
+ the_list.extend(gen() for _ in range(target_len - len(the_list)))
return the_list
@@ -132,7 +132,7 @@ class _PerfTable(object):
self.perf_data = {}
for label in label_names:
for bench_name, bench_iterations in benchmark_names_and_iterations:
- for i in xrange(bench_iterations):
+ for i in range(bench_iterations):
report = read_perf_report(label, bench_name, i)
self._ProcessPerfReport(report, label, bench_name, i)
@@ -177,7 +177,7 @@ def _ParseColumn(columns, iteration):
else:
new_column.extend(
Column(LiteralResult(i), Format(), str(i + 1))
- for i in xrange(iteration))
+ for i in range(iteration))
return new_column
diff --git a/crosperf/results_report_unittest.py b/crosperf/results_report_unittest.py
index 750dcdf4..f2fc9f60 100755
--- a/crosperf/results_report_unittest.py
+++ b/crosperf/results_report_unittest.py
@@ -141,7 +141,7 @@ def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
return run
experiment.benchmark_runs.extend(
- MakeSuccessfulRun(n) for n in xrange(how_many))
+ MakeSuccessfulRun(n) for n in range(how_many))
return experiment