aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZhizhou Yang <zhizhouy@google.com>2020-02-11 16:56:57 -0800
committerZhizhou Yang <zhizhouy@google.com>2020-02-13 06:50:19 +0000
commit43c9066b1889baaa0a6077399deb6a4d503551e6 (patch)
treedb4575b4aea577e3da2d7bfb3ac17f386b257733
parentc4615d189f6b0dc4c116fc0a78ac295f7427170e (diff)
downloadtoolchain-utils-43c9066b1889baaa0a6077399deb6a4d503551e6.tar.gz
toolchain-utils: migrate all in-use projects to python 3
This patch migrates all in-use projects left to python 3. BUG=chromium:1011676 TEST=Passed unittests and launched scripts manually. Change-Id: I7f2de4e1131c05bacfac80667f3064da8adaebfd Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/2051397 Reviewed-by: George Burgess <gbiv@chromium.org> Tested-by: Zhizhou Yang <zhizhouy@google.com> Auto-Submit: Zhizhou Yang <zhizhouy@google.com>
-rwxr-xr-xafdo_redaction/redact_profile.py38
-rwxr-xr-xafdo_redaction/redact_profile_test.py30
-rwxr-xr-xafdo_redaction/remove_indirect_calls.py3
-rwxr-xr-xafdo_redaction/remove_indirect_calls_test.py8
-rwxr-xr-xafdo_tools/bisection/afdo_prof_analysis.py12
-rwxr-xr-xafdo_tools/bisection/afdo_prof_analysis_e2e_test.py6
-rwxr-xr-xafdo_tools/bisection/afdo_prof_analysis_test.py32
-rwxr-xr-xafdo_tools/generate_afdo_from_tryjob.py6
-rwxr-xr-xafdo_tools/run_afdo_tryjob.py4
-rwxr-xr-xcompiler_wrapper/build.py4
-rwxr-xr-xcompiler_wrapper/bundle.py15
-rw-r--r--debug_info_test/check_cus.py102
-rw-r--r--debug_info_test/check_exist.py142
-rw-r--r--debug_info_test/check_icf.py82
-rw-r--r--debug_info_test/check_ngcc.py34
-rwxr-xr-xdebug_info_test/debug_info_test.py86
-rw-r--r--debug_info_test/whitelist.py77
-rwxr-xr-xgo/chromeos/setup_chromeos_testing.py56
-rwxr-xr-xheatmaps/heat_map.py4
-rwxr-xr-xheatmaps/heat_map_test.py13
-rw-r--r--heatmaps/heatmap_generator.py39
-rwxr-xr-xheatmaps/heatmap_generator_test.py9
-rwxr-xr-xllvm_extra/create_ebuild_file.py52
-rwxr-xr-xllvm_tools/custom_script_example.py2
-rwxr-xr-xorderfile/post_process_orderfile.py4
-rwxr-xr-xorderfile/post_process_orderfile_test.py4
-rwxr-xr-xpgo_tools/merge_profdata_and_upload.py14
-rwxr-xr-xrun_tests_for.py32
-rwxr-xr-xtoolchain_utils_githooks/check-presubmit.py2
29 files changed, 498 insertions, 414 deletions
diff --git a/afdo_redaction/redact_profile.py b/afdo_redaction/redact_profile.py
index 96375fee..02bae928 100755
--- a/afdo_redaction/redact_profile.py
+++ b/afdo_redaction/redact_profile.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -30,6 +30,7 @@ import collections
import re
import sys
+
def _count_samples(samples):
"""Count the total number of samples in a function."""
line_re = re.compile(r'^(\s*)\d+(?:\.\d+)?: (\d+)\s*$')
@@ -132,7 +133,7 @@ def _read_textual_afdo_profile(stream):
continue
if line[0].isspace():
- assert function_line is not None, "sample exists outside of a function?"
+ assert function_line is not None, 'sample exists outside of a function?'
samples.append(line)
continue
@@ -170,12 +171,13 @@ def dedup_records(profile_records, summary_file, max_repeats=100):
counts[_normalize_samples(record.samples)].append(record)
# Be sure that we didn't see any duplicate functions, since that's bad...
- total_functions_recorded = sum(len(records)
- for records in counts.itervalues())
+ total_functions_recorded = sum(len(records) for records in counts.values())
- unique_function_names = set(record.function_line.split(':')[0]
- for records in counts.itervalues()
- for record in records)
+ unique_function_names = {
+ record.function_line.split(':')[0]
+ for records in counts.values()
+ for record in records
+ }
assert len(unique_function_names) == total_functions_recorded, \
'duplicate function names?'
@@ -187,7 +189,7 @@ def dedup_records(profile_records, summary_file, max_repeats=100):
num_samples_total = 0
num_top_samples_total = 0
- for normalized_samples, records in counts.iteritems():
+ for normalized_samples, records in counts.items():
top_sample_count, all_sample_count = _count_samples(normalized_samples)
top_sample_count *= len(records)
all_sample_count *= len(records)
@@ -205,11 +207,13 @@ def dedup_records(profile_records, summary_file, max_repeats=100):
for record in records:
yield record
- print('Retained {:,}/{:,} functions'.format(num_kept, num_total),
- file=summary_file)
- print('Retained {:,}/{:,} samples, total'.format(num_samples_kept,
- num_samples_total),
- file=summary_file)
+ print(
+ 'Retained {:,}/{:,} functions'.format(num_kept, num_total),
+ file=summary_file)
+ print(
+ 'Retained {:,}/{:,} samples, total'.format(num_samples_kept,
+ num_samples_total),
+ file=summary_file)
print('Retained {:,}/{:,} top-level samples' \
.format(num_top_samples_kept, num_top_samples_total),
file=summary_file)
@@ -220,15 +224,17 @@ def run(profile_input_file, summary_output_file, profile_output_file):
# Sort this so we get deterministic output. AFDO doesn't care what order it's
# in.
- deduped = sorted(dedup_records(profile_records, summary_output_file),
- key=lambda r: r.function_line)
+ deduped = sorted(
+ dedup_records(profile_records, summary_output_file),
+ key=lambda r: r.function_line)
for function_line, samples in deduped:
print(function_line, file=profile_output_file)
print('\n'.join(samples), file=profile_output_file)
def _main():
- run(profile_input_file=sys.stdin, summary_output_file=sys.stderr,
+ run(profile_input_file=sys.stdin,
+ summary_output_file=sys.stderr,
profile_output_file=sys.stdout)
diff --git a/afdo_redaction/redact_profile_test.py b/afdo_redaction/redact_profile_test.py
index 27fb534e..e2438972 100755
--- a/afdo_redaction/redact_profile_test.py
+++ b/afdo_redaction/redact_profile_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -8,12 +8,12 @@
from __future__ import division, print_function
-import StringIO
+import io
import unittest
-import redact_profile
+from afdo_redaction import redact_profile
-_redact_limit = redact_profile.dedup_records.func_defaults[0]
+_redact_limit = redact_profile.dedup_records.__defaults__[0]
def _redact(input_lines, summary_to=None):
@@ -21,17 +21,18 @@ def _redact(input_lines, summary_to=None):
input_lines = input_lines.splitlines()
if summary_to is None:
- summary_to = StringIO.StringIO()
+ summary_to = io.StringIO()
- output_to = StringIO.StringIO()
- redact_profile.run(profile_input_file=input_lines,
- summary_output_file=summary_to,
- profile_output_file=output_to)
+ output_to = io.StringIO()
+ redact_profile.run(
+ profile_input_file=input_lines,
+ summary_output_file=summary_to,
+ profile_output_file=output_to)
return output_to.getvalue()
def _redact_with_summary(input_lines):
- summary = StringIO.StringIO()
+ summary = io.StringIO()
result = _redact(input_lines, summary_to=summary)
return result, summary.getvalue()
@@ -64,6 +65,7 @@ def _generate_repeated_function_body(repeats, fn_name='_some_name'):
class Tests(unittest.TestCase):
"""All of our tests for redact_profile."""
+
def test_no_input_works(self):
self.assertEqual(_redact(''), '')
@@ -93,13 +95,13 @@ class Tests(unittest.TestCase):
result_file = '\n'.join(kept_lines) + '\n'
- lines = _generate_repeated_function_body(_redact_limit,
- fn_name='_discard_me')
+ lines = _generate_repeated_function_body(
+ _redact_limit, fn_name='_discard_me')
self.assertEqual(_redact(kept_lines + lines), result_file)
self.assertEqual(_redact(lines + kept_lines), result_file)
- more_lines = _generate_repeated_function_body(_redact_limit,
- fn_name='_and_discard_me')
+ more_lines = _generate_repeated_function_body(
+ _redact_limit, fn_name='_and_discard_me')
self.assertEqual(_redact(lines + kept_lines + more_lines), result_file)
self.assertEqual(_redact(lines + more_lines), '')
diff --git a/afdo_redaction/remove_indirect_calls.py b/afdo_redaction/remove_indirect_calls.py
index b879b2f0..0dc15077 100755
--- a/afdo_redaction/remove_indirect_calls.py
+++ b/afdo_redaction/remove_indirect_calls.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -21,7 +21,6 @@ from __future__ import division, print_function
import argparse
import re
-import sys
def _remove_indirect_call_targets(lines):
diff --git a/afdo_redaction/remove_indirect_calls_test.py b/afdo_redaction/remove_indirect_calls_test.py
index 1499af25..164b284f 100755
--- a/afdo_redaction/remove_indirect_calls_test.py
+++ b/afdo_redaction/remove_indirect_calls_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -11,12 +11,12 @@ from __future__ import print_function
import io
import unittest
-import remove_indirect_calls
+from afdo_redaction import remove_indirect_calls
def _run_test(input_lines):
- input_buf = io.BytesIO('\n'.join(input_lines))
- output_buf = io.BytesIO()
+ input_buf = io.StringIO('\n'.join(input_lines))
+ output_buf = io.StringIO()
remove_indirect_calls.run(input_buf, output_buf)
return output_buf.getvalue().splitlines()
diff --git a/afdo_tools/bisection/afdo_prof_analysis.py b/afdo_tools/bisection/afdo_prof_analysis.py
index 36531106..94e5366b 100755
--- a/afdo_tools/bisection/afdo_prof_analysis.py
+++ b/afdo_tools/bisection/afdo_prof_analysis.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -109,7 +109,7 @@ class DeciderState(object):
self.state_file)
return
- with open(self.state_file) as f:
+ with open(self.state_file, encoding='utf-8') as f:
try:
data = json.load(f)
except:
@@ -127,7 +127,7 @@ class DeciderState(object):
def save_state(self):
state = {'seed': self.seed, 'accumulated_results': self.accumulated_results}
tmp_file = self.state_file + '.new'
- with open(tmp_file, 'w') as f:
+ with open(tmp_file, 'w', encoding='utf-8') as f:
json.dump(state, f, indent=2)
os.rename(tmp_file, self.state_file)
logging.info('Logged state to %s...', self.state_file)
@@ -270,7 +270,7 @@ def range_search(decider, good, bad, common_funcs, lo, hi):
def find_upper_border(good_copy, funcs, lo, hi, last_bad_val=None):
"""Finds the upper border of problematic range."""
mid = average(lo, hi)
- if mid == lo or mid == hi:
+ if mid in (lo, hi):
return last_bad_val or hi
for func in funcs[lo:mid]:
@@ -288,7 +288,7 @@ def range_search(decider, good, bad, common_funcs, lo, hi):
def find_lower_border(good_copy, funcs, lo, hi, last_bad_val=None):
"""Finds the lower border of problematic range."""
mid = average(lo, hi)
- if mid == lo or mid == hi:
+ if mid in (lo, hi):
return last_bad_val or lo
for func in funcs[lo:mid]:
@@ -428,7 +428,7 @@ def main(flags):
'good_only_functions': gnb_result,
'bad_only_functions': bng_result
}
- with open(flags.analysis_output_file, 'wb') as f:
+ with open(flags.analysis_output_file, 'w', encoding='utf-8') as f:
json.dump(results, f, indent=2)
if flags.remove_state_on_completion:
os.remove(flags.state_file)
diff --git a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py
index 85c1c175..b293b8aa 100755
--- a/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py
+++ b/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -15,13 +15,13 @@ import tempfile
import unittest
from datetime import date
-import afdo_prof_analysis as analysis
+from afdo_tools.bisection import afdo_prof_analysis as analysis
class ObjectWithFields(object):
"""Turns kwargs given to the constructor into fields on an object.
- Example usage:
+ Examples:
x = ObjectWithFields(a=1, b=2)
assert x.a == 1
assert x.b == 2
diff --git a/afdo_tools/bisection/afdo_prof_analysis_test.py b/afdo_tools/bisection/afdo_prof_analysis_test.py
index 7bd3050c..245edc33 100755
--- a/afdo_tools/bisection/afdo_prof_analysis_test.py
+++ b/afdo_tools/bisection/afdo_prof_analysis_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -8,12 +8,12 @@
from __future__ import print_function
-import afdo_prof_analysis as analysis
-
import random
-import StringIO
+import io
import unittest
+from afdo_tools.bisection import afdo_prof_analysis as analysis
+
class AfdoProfAnalysisTest(unittest.TestCase):
"""Class for testing AFDO Profile Analysis"""
@@ -33,17 +33,17 @@ class AfdoProfAnalysisTest(unittest.TestCase):
analysis.random.seed(5) # 5 is an arbitrary choice. For consistent testing
def test_text_to_json(self):
- test_data = StringIO.StringIO('deflate_slow:87460059:3\n'
- ' 3: 24\n'
- ' 14: 54767\n'
- ' 15: 664 fill_window:22\n'
- ' 16: 661\n'
- ' 19: 637\n'
- ' 41: 36692 longest_match:36863\n'
- ' 44: 36692\n'
- ' 44.2: 5861\n'
- ' 46: 13942\n'
- ' 46.1: 14003\n')
+ test_data = io.StringIO('deflate_slow:87460059:3\n'
+ ' 3: 24\n'
+ ' 14: 54767\n'
+ ' 15: 664 fill_window:22\n'
+ ' 16: 661\n'
+ ' 19: 637\n'
+ ' 41: 36692 longest_match:36863\n'
+ ' 44: 36692\n'
+ ' 44.2: 5861\n'
+ ' 46: 13942\n'
+ ' 46.1: 14003\n')
expected = {
'deflate_slow': ':87460059:3\n'
' 3: 24\n'
@@ -115,7 +115,7 @@ class AfdoProfAnalysisTest(unittest.TestCase):
self.bad_items, common_funcs, 0,
len(common_funcs))
- self.assertEquals(['func_a', 'func_b'], problem_range)
+ self.assertEqual(['func_a', 'func_b'], problem_range)
def test_check_good_not_bad(self):
func_in_good = 'func_c'
diff --git a/afdo_tools/generate_afdo_from_tryjob.py b/afdo_tools/generate_afdo_from_tryjob.py
index b8a2d669..3ed578ea 100755
--- a/afdo_tools/generate_afdo_from_tryjob.py
+++ b/afdo_tools/generate_afdo_from_tryjob.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -115,7 +115,7 @@ def main():
'--tryjob',
required=True,
type=_tryjob_arg,
- help='Path to our tryjob\'s artifacts. Accepts a gs:// path, pantheon '
+ help="Path to our tryjob's artifacts. Accepts a gs:// path, pantheon "
'link, or tryjob ID, e.g. R75-11965.0.0-b3648595. In the last case, '
'the assumption is that you ran a chell-chrome-pfq-tryjob.')
parser.add_argument(
@@ -127,7 +127,7 @@ def main():
'-k',
'--keep_artifacts_on_failure',
action='store_true',
- help='Don\'t remove the tempdir on failure')
+ help="Don't remove the tempdir on failure")
args = parser.parse_args()
if not distutils.spawn.find_executable(_CREATE_LLVM_PROF):
diff --git a/afdo_tools/run_afdo_tryjob.py b/afdo_tools/run_afdo_tryjob.py
index de45af0b..e14cd918 100755
--- a/afdo_tools/run_afdo_tryjob.py
+++ b/afdo_tools/run_afdo_tryjob.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -94,7 +94,7 @@ def main():
user_patches = args.patch
if tag_profiles_with_current_time and use_afdo_generation_stage:
- raise ValueError('You can\'t tag profiles with the time + have '
+ raise ValueError("You can't tag profiles with the time + have "
'afdo-generate')
if not tag_profiles_with_current_time and not use_afdo_generation_stage:
diff --git a/compiler_wrapper/build.py b/compiler_wrapper/build.py
index 6b647714..4257abfc 100755
--- a/compiler_wrapper/build.py
+++ b/compiler_wrapper/build.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -52,7 +52,7 @@ def read_version(build_dir):
return r.read()
last_commit_msg = subprocess.check_output(
- ['git', '-C', build_dir, 'log', '-1', '--pretty=%B'])
+ ['git', '-C', build_dir, 'log', '-1', '--pretty=%B'], encoding='utf-8')
# Use last found change id to support reverts as well.
change_ids = re.findall(r'Change-Id: (\w+)', last_commit_msg)
if not change_ids:
diff --git a/compiler_wrapper/bundle.py b/compiler_wrapper/bundle.py
index c1fa53e0..173625f4 100755
--- a/compiler_wrapper/bundle.py
+++ b/compiler_wrapper/bundle.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -32,7 +32,7 @@ def copy_files(input_dir, output_dir):
def read_change_id(input_dir):
last_commit_msg = subprocess.check_output(
- ['git', '-C', input_dir, 'log', '-1', '--pretty=%B'])
+ ['git', '-C', input_dir, 'log', '-1', '--pretty=%B'], encoding='utf-8')
# Use last found change id to support reverts as well.
change_ids = re.findall(r'Change-Id: (\w+)', last_commit_msg)
if not change_ids:
@@ -41,14 +41,15 @@ def read_change_id(input_dir):
def write_readme(input_dir, output_dir, change_id):
- with open(os.path.join(input_dir, 'bundle.README'), 'r') as r, \
- open(os.path.join(output_dir, 'README'), 'w') as w:
- content = r.read()
- w.write(content.format(change_id=change_id))
+ with open(
+ os.path.join(input_dir, 'bundle.README'), 'r', encoding='utf-8') as r:
+ with open(os.path.join(output_dir, 'README'), 'w', encoding='utf-8') as w:
+ content = r.read()
+ w.write(content.format(change_id=change_id))
def write_version(output_dir, change_id):
- with open(os.path.join(output_dir, 'VERSION'), 'w') as w:
+ with open(os.path.join(output_dir, 'VERSION'), 'w', encoding='utf-8') as w:
w.write(change_id)
diff --git a/debug_info_test/check_cus.py b/debug_info_test/check_cus.py
index f68fe9cb..d3cd6365 100644
--- a/debug_info_test/check_cus.py
+++ b/debug_info_test/check_cus.py
@@ -1,7 +1,12 @@
+# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+"""check compile units."""
+
+from __future__ import print_function
+
import os
import subprocess
@@ -9,59 +14,62 @@ import check_ngcc
cu_checks = [check_ngcc.not_by_gcc]
+
def check_compile_unit(dso_path, producer, comp_path):
- """check all compiler flags used to build the compile unit.
+ """check all compiler flags used to build the compile unit.
+
+ Args:
+ dso_path: path to the elf/dso.
+ producer: DW_AT_producer contains the compiler command line.
+ comp_path: DW_AT_comp_dir + DW_AT_name.
- Args:
- dso_path: path to the elf/dso
- producer: DW_AT_producer contains the compiler command line.
- comp_path: DW_AT_comp_dir + DW_AT_name
+ Returns:
+ A set of failed tests.
+ """
+ failed = set()
+ for c in cu_checks:
+ if not c(dso_path, producer, comp_path):
+ failed.add(c.__module__)
- Returns:
- A set of failed tests.
- """
- failed = set()
- for c in cu_checks:
- if not c(dso_path, producer, comp_path):
- failed.add(c.__module__)
+ return failed
- return failed
def check_compile_units(dso_path):
- """check all compile units in the given dso.
-
- Args:
- dso_path: path to the dso
- Return:
- True if everything looks fine otherwise False.
- """
-
- failed = set()
- producer = ''
- comp_path = ''
-
- readelf = subprocess.Popen(['readelf', '--debug-dump=info',
- '--dwarf-depth=1', dso_path],
- stdout=subprocess.PIPE,
- stderr=open(os.devnull, 'w'))
- for l in readelf.stdout:
- if 'DW_TAG_compile_unit' in l:
- if producer:
- failed = failed.union(check_compile_unit(dso_path, producer,
- comp_path))
- producer = ''
- comp_path = ''
- elif 'DW_AT_producer' in l:
- producer = l
- elif 'DW_AT_name' in l:
- comp_path = os.path.join(comp_path, l.split(':')[-1].strip())
- elif 'DW_AT_comp_dir' in l:
- comp_path = os.path.join(l.split(':')[-1].strip(), comp_path)
- if producer:
+ """check all compile units in the given dso.
+
+ Args:
+ dso_path: path to the dso.
+
+ Returns:
+ True if everything looks fine otherwise False.
+ """
+
+ failed = set()
+ producer = ''
+ comp_path = ''
+
+ readelf = subprocess.Popen(
+ ['readelf', '--debug-dump=info', '--dwarf-depth=1', dso_path],
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'),
+ encoding='utf-8')
+ for l in readelf.stdout:
+ if 'DW_TAG_compile_unit' in l:
+ if producer:
failed = failed.union(check_compile_unit(dso_path, producer, comp_path))
+ producer = ''
+ comp_path = ''
+ elif 'DW_AT_producer' in l:
+ producer = l
+ elif 'DW_AT_name' in l:
+ comp_path = os.path.join(comp_path, l.split(':')[-1].strip())
+ elif 'DW_AT_comp_dir' in l:
+ comp_path = os.path.join(l.split(':')[-1].strip(), comp_path)
+ if producer:
+ failed = failed.union(check_compile_unit(dso_path, producer, comp_path))
- if failed:
- print('%s failed check: %s' % (dso_path, ' '.join(failed)))
- return False
+ if failed:
+ print('%s failed check: %s' % (dso_path, ' '.join(failed)))
+ return False
- return True
+ return True
diff --git a/debug_info_test/check_exist.py b/debug_info_test/check_exist.py
index 5e7cce19..dbb89127 100644
--- a/debug_info_test/check_exist.py
+++ b/debug_info_test/check_exist.py
@@ -1,90 +1,102 @@
+# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+"""check whether intended components exists in the given dso."""
+
+from __future__ import print_function
+
import os
import subprocess
from whitelist import is_whitelisted
+
def check_debug_info(dso_path, readelf_content):
- """check whether debug info section exists in the elf file.
+ """Check whether debug info section exists in the elf file.
+
+ Args:
+ dso_path: path to the dso.
+ readelf_content: debug info dumped by command readelf.
- Args:
- readelf: debug info dumped by command readelf
+ Returns:
+ True if debug info section exists, otherwise False.
+ """
- Returns:
- True if debug info section exists, otherwise False.
- """
+ # Return True if it is whitelisted
+ if is_whitelisted('exist_debug_info', dso_path):
+ return True
- # Return True if it is whitelisted
- if is_whitelisted('exist_debug_info', dso_path):
- return True
+ for l in readelf_content:
+ if 'debug_info' in l:
+ return True
+ return False
- for l in readelf_content:
- if 'debug_info' in l:
- return True
- return False
def check_producer(dso_path, readelf_content):
- """check whether DW_AT_producer exists in each compile unit.
-
- Args:
- readelf: debug info dumped by command readelf
-
- Returns:
- True if DW_AT_producer exists in each compile unit, otherwise False.
- Notice: If no compile unit in DSO, also return True.
- """
-
- # Return True if it is whitelisted
- if is_whitelisted('exist_producer', dso_path):
- return True
-
- # Indicate if there is a producer under each cu
- cur_producer = False
-
- first_cu = True
- producer_exist = True
-
- for l in readelf_content:
- if 'DW_TAG_compile_unit' in l:
- if not first_cu and not cur_producer:
- producer_exist = False
- break
- first_cu = False
- cur_producer = False
- elif 'DW_AT_producer' in l:
- cur_producer = True
-
- # Check whether last producer of compile unit exists in the elf,
- # also return True if no cu in the DSO.
- if not first_cu and not cur_producer:
+ """Check whether DW_AT_producer exists in each compile unit.
+
+ Args:
+ dso_path: path to the dso.
+ readelf_content: debug info dumped by command readelf.
+
+ Returns:
+ True if DW_AT_producer exists in each compile unit, otherwise False.
+ Notice: If no compile unit in DSO, also return True.
+ """
+
+ # Return True if it is whitelisted
+ if is_whitelisted('exist_producer', dso_path):
+ return True
+
+ # Indicate if there is a producer under each cu
+ cur_producer = False
+
+ first_cu = True
+ producer_exist = True
+
+ for l in readelf_content:
+ if 'DW_TAG_compile_unit' in l:
+ if not first_cu and not cur_producer:
producer_exist = False
+ break
+ first_cu = False
+ cur_producer = False
+ elif 'DW_AT_producer' in l:
+ cur_producer = True
+
+ # Check whether last producer of compile unit exists in the elf,
+ # also return True if no cu in the DSO.
+ if not first_cu and not cur_producer:
+ producer_exist = False
+
+ return producer_exist
- return producer_exist
def check_exist_all(dso_path):
- """check whether intended components exists in the given dso.
+ """check whether intended components exists in the given dso.
- Args:
- dso_path: path to the dso
- Return:
- True if everything looks fine otherwise False.
- """
+ Args:
+ dso_path: path to the dso.
- readelf = subprocess.Popen(['readelf', '--debug-dump=info',
- '--dwarf-depth=1', dso_path],
- stdout=subprocess.PIPE,
- stderr=open(os.devnull, 'w'))
- readelf_content = list(readelf.stdout)
+ Returns:
+ True if everything looks fine otherwise False.
+ """
- exist_checks = [check_debug_info, check_producer]
+ readelf = subprocess.Popen(
+ ['readelf', '--debug-dump=info', '--dwarf-depth=1', dso_path],
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'),
+ encoding='utf-8')
+ readelf_content = list(readelf.stdout)
- for e in exist_checks:
- if not e(dso_path, readelf_content):
- check_failed = e.__module__ + ': ' + e.__name__
- print('%s failed check: %s' % (dso_path, check_failed))
- return False
+ exist_checks = [check_debug_info, check_producer]
- return True
+ for e in exist_checks:
+ if not e(dso_path, readelf_content):
+ check_failed = e.__module__ + ': ' + e.__name__
+ print('%s failed check: %s' % (dso_path, check_failed))
+ return False
+
+ return True
diff --git a/debug_info_test/check_icf.py b/debug_info_test/check_icf.py
index 4ac67dbd..a46968e7 100644
--- a/debug_info_test/check_icf.py
+++ b/debug_info_test/check_icf.py
@@ -1,47 +1,53 @@
+# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+"""check whether chrome was built with identical code folding."""
+
+from __future__ import print_function
+
import os
import re
import subprocess
+
def check_identical_code_folding(dso_path):
- """check whether chrome was built with identical code folding.
-
- Args:
- dso_path: path to the dso
- Return:
- False if the dso is chrome and it was not built with icf,
- True otherwise.
- """
-
- if not dso_path.endswith('/chrome.debug'):
- return True
-
- # Run 'nm' on the chrome binary and read the output.
- nm = subprocess.Popen(['nm', dso_path],
- stdout=subprocess.PIPE,
- stderr=open(os.devnull, 'w'))
- nm_output, _ = nm.communicate()
-
- # Search for addresses of text symbols.
- text_addresses = re.findall('^[0-9a-f]+[ ]+[tT] ',
- nm_output,
- re.MULTILINE)
-
- # Calculate number of text symbols in chrome binary.
- num_text_addresses = len(text_addresses)
-
- # Calculate number of unique text symbols in chrome binary.
- num_unique_text_addresses = len(set(text_addresses))
-
- # Check that the number of duplicate symbols is at least 10,000.
- # - https://crbug.com/813272#c18
- if num_text_addresses-num_unique_text_addresses >= 10000:
- return True
-
- print('%s was not built with ICF' % dso_path)
- print(' num_text_addresses = %d' % num_text_addresses)
- print(' num_unique_text_addresses = %d' % num_unique_text_addresses)
- return False
+ """check whether chrome was built with identical code folding.
+
+ Args:
+ dso_path: path to the dso.
+
+ Returns:
+ False if the dso is chrome and it was not built with icf,
+ True otherwise.
+ """
+
+ if not dso_path.endswith('/chrome.debug'):
+ return True
+
+ # Run 'nm' on the chrome binary and read the output.
+ nm = subprocess.Popen(['nm', dso_path],
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'),
+ encoding='utf-8')
+ nm_output, _ = nm.communicate()
+
+ # Search for addresses of text symbols.
+ text_addresses = re.findall('^[0-9a-f]+[ ]+[tT] ', nm_output, re.MULTILINE)
+
+ # Calculate number of text symbols in chrome binary.
+ num_text_addresses = len(text_addresses)
+
+ # Calculate number of unique text symbols in chrome binary.
+ num_unique_text_addresses = len(set(text_addresses))
+
+ # Check that the number of duplicate symbols is at least 10,000.
+ # - https://crbug.com/813272#c18
+ if num_text_addresses - num_unique_text_addresses >= 10000:
+ return True
+
+ print('%s was not built with ICF' % dso_path)
+ print(' num_text_addresses = %d' % num_text_addresses)
+ print(' num_unique_text_addresses = %d' % num_unique_text_addresses)
+ return False
diff --git a/debug_info_test/check_ngcc.py b/debug_info_test/check_ngcc.py
index eecbb85e..501bb988 100644
--- a/debug_info_test/check_ngcc.py
+++ b/debug_info_test/check_ngcc.py
@@ -1,26 +1,30 @@
+# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+"""Check whether the compile unit is not built by gcc."""
+
+from __future__ import print_function
+
from whitelist import is_whitelisted
-def not_by_gcc(dso_path, producer, comp_path):
- """Check whether the compile unit is not built by gcc.
- Args:
- dso_path: path to the elf/dso
- producer: DW_AT_producer contains the compiler command line.
- comp_path: DW_AT_comp_dir + DW_AT_name
+def not_by_gcc(dso_path, producer, comp_path):
+ """Check whether the compile unit is not built by gcc.
- Returns:
- False if compiled by gcc otherwise True
- """
- if is_whitelisted('ngcc_comp_path', comp_path):
- return True
+ Args:
+ dso_path: path to the elf/dso.
+ producer: DW_AT_producer contains the compiler command line.
+ comp_path: DW_AT_comp_dir + DW_AT_name.
- if is_whitelisted('ngcc_dso_path', dso_path):
- return True
+ Returns:
+ False if compiled by gcc otherwise True.
+ """
+ if is_whitelisted('ngcc_comp_path', comp_path):
+ return True
- if 'GNU C' in producer:
- return False
+ if is_whitelisted('ngcc_dso_path', dso_path):
return True
+
+ return 'GNU C' not in producer
diff --git a/debug_info_test/debug_info_test.py b/debug_info_test/debug_info_test.py
index 4839e69c..ae7e9f48 100755
--- a/debug_info_test/debug_info_test.py
+++ b/debug_info_test/debug_info_test.py
@@ -1,9 +1,13 @@
-#!/usr/bin/python2
-
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+"""Test for debug info."""
+
+from __future__ import print_function
+
import os
import subprocess
import sys
@@ -12,46 +16,52 @@ import check_icf
import check_cus
import check_exist
-elf_checks = [check_exist.check_exist_all,
- check_cus.check_compile_units,
- check_icf.check_identical_code_folding]
+elf_checks = [
+ check_exist.check_exist_all, check_cus.check_compile_units,
+ check_icf.check_identical_code_folding
+]
+
def scanelf(root):
- """find ELFs in root
+ """Find ELFs in root.
+
+ Args:
+ root: root dir to start with the search.
+
+ Returns:
+ Filenames of ELFs in root.
+ """
+ p = subprocess.Popen(['scanelf', '-y', '-B', '-F', '%F', '-R', root],
+ stdout=subprocess.PIPE,
+ encoding='utf-8')
+ return [l.strip() for l in p.stdout]
- Args:
- root: root dir to start with the search.
- Returns:
- Filenames of ELFs in root.
- """
- p = subprocess.Popen(['scanelf', '-y', '-B', '-F', '%F', '-R', root],
- stdout=subprocess.PIPE)
- return [l.strip() for l in p.stdout]
def Main(argv):
- if len(argv) < 2:
- print('usage: %s [file|dir]')
- return 1
-
- files = []
- cand = argv[1]
- if os.path.isfile(cand):
- files = [cand]
- elif os.path.isdir(cand):
- files = scanelf(cand)
- else:
- print('usage: %s [file|dir]')
- return 1
-
- failed = False
- for f in files:
- for c in elf_checks:
- if not c(f):
- failed = True
-
- if failed:
- return 1
- return 0
+ if len(argv) < 2:
+ print('usage: %s [file|dir]')
+ return 1
+
+ files = []
+ cand = argv[1]
+ if os.path.isfile(cand):
+ files = [cand]
+ elif os.path.isdir(cand):
+ files = scanelf(cand)
+ else:
+ print('usage: %s [file|dir]')
+ return 1
+
+ failed = False
+ for f in files:
+ for c in elf_checks:
+ if not c(f):
+ failed = True
+
+ if failed:
+ return 1
+ return 0
+
if __name__ == '__main__':
- sys.exit(Main(sys.argv))
+ sys.exit(Main(sys.argv))
diff --git a/debug_info_test/whitelist.py b/debug_info_test/whitelist.py
index 383fcc3d..b53387a8 100644
--- a/debug_info_test/whitelist.py
+++ b/debug_info_test/whitelist.py
@@ -1,11 +1,17 @@
+# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+"""Whitelist functions."""
+
+from __future__ import print_function
+
import os
import glob
import re
+
# Matching a string of length m in an NFA of size n is O(mn^2), but the
# performance also depends largely on the implementation. It appears to be fast
# enough according to the tests.
@@ -13,45 +19,50 @@ import re
# The performance bottleneck of this script is readelf. Unless this becomes
# slower than readelf, don't waste time here.
def is_whitelisted(list_name, pattern):
- """chech whether the given pattern is specified in the whitelist.
+ """Check whether the given pattern is specified in the whitelist.
+
+ Args:
+ list_name: name of the whitelist.
+ pattern: the target string.
+
+ Returns:
+ True if matched otherwise False.
+ """
+ return pattern and whitelists[list_name].match(pattern)
- Args:
- list_name: name of the whitelist
- pattern: the target string
- Returns:
- True if matched otherwise False
- """
- return pattern and whitelists[list_name].match(pattern)
def prepare_whitelist(patterns):
- """Join and compile the re patterns.
+ """Join and compile the re patterns.
+
+ Args:
+ patterns: regex patterns.
+
+ Returns:
+ A compiled re object.
+ """
+ return re.compile('|'.join(patterns))
- Args:
- patterns: regex patterns.
- Return:
- A compiled re object
- """
- return re.compile('|'.join(patterns))
def load_whitelists(dirname):
- """Load whitelists under dirname.
-
- A whitelist ends with .whitelist.
-
- Args:
- dirname: path to the dir.
- Returns:
- A dictionary of 'filename' -> whitelist matcher.
- """
- wlist = {}
- for fn in glob.glob(os.path.join(dirname, '*.whitelist')):
- key = os.path.splitext(os.path.basename(fn))[0]
- with open(fn, 'r') as f:
- patterns = f.read().splitlines()
- patterns = [l for l in patterns if l != '']
- patterns = [l for l in patterns if l[0] != '#']
- wlist[key] = prepare_whitelist(patterns)
- return wlist
+ """Load whitelists under dirname.
+
+ A whitelist ends with .whitelist.
+
+ Args:
+ dirname: path to the dir.
+
+ Returns:
+ A dictionary of 'filename' -> whitelist matcher.
+ """
+ wlist = {}
+ for fn in glob.glob(os.path.join(dirname, '*.whitelist')):
+ key = os.path.splitext(os.path.basename(fn))[0]
+ with open(fn, 'r', encoding='utf-8') as f:
+ patterns = f.read().splitlines()
+ patterns = [l for l in patterns if l != '']
+ patterns = [l for l in patterns if l[0] != '#']
+ wlist[key] = prepare_whitelist(patterns)
+ return wlist
whitelists = load_whitelists(os.path.dirname(__file__))
diff --git a/go/chromeos/setup_chromeos_testing.py b/go/chromeos/setup_chromeos_testing.py
index b679ddfc..8b535538 100755
--- a/go/chromeos/setup_chromeos_testing.py
+++ b/go/chromeos/setup_chromeos_testing.py
@@ -1,7 +1,9 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-#
-# Copyright 2018 Google Inc. All Rightes Reserved
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
"""Generate board-specific scripts for Go compiler testing."""
from __future__ import print_function
@@ -73,7 +75,7 @@ def log(msg):
def WriteFile(file_content, file_name):
- with open(file_name, 'w') as out_file:
+ with open(file_name, 'w', encoding='utf-8') as out_file:
out_file.write(file_content)
@@ -82,7 +84,7 @@ def GenerateGoHelperScripts(ce, arm_board, x86_board, chromeos_root):
names = {
'x86_64': x86_board,
'arm64': arm_board,
- 'arm32': ("%s32" % arm_board)
+ 'arm32': ('%s32' % arm_board)
}
toolchain_dir = os.path.join(chromeos_root, 'src', 'third_party',
@@ -93,17 +95,17 @@ def GenerateGoHelperScripts(ce, arm_board, x86_board, chromeos_root):
toolchain = CROS_TOOLCHAIN_DATA[k]
glibc = GLIBC_DATA[k]
- base_file = os.path.join(toolchain_dir, ("go_%s" % name))
+ base_file = os.path.join(toolchain_dir, ('go_%s' % name))
base_file_content = BASE_TEMPLATE % (name, arch, arch, toolchain, toolchain,
toolchain)
WriteFile(base_file_content, base_file)
- cmd = "chmod 755 %s" % base_file
+ cmd = 'chmod 755 %s' % base_file
ce.RunCommand(cmd)
- exec_file = os.path.join(toolchain_dir, ("go_%s_exec" % name))
+ exec_file = os.path.join(toolchain_dir, ('go_%s_exec' % name))
exec_file_content = EXEC_TEMPLATE % (name, arch, glibc, name)
WriteFile(exec_file_content, exec_file)
- cmd = "chmod 755 %s" % exec_file
+ cmd = 'chmod 755 %s' % exec_file
ce.RunCommand(cmd)
return 0
@@ -111,7 +113,7 @@ def GenerateGoHelperScripts(ce, arm_board, x86_board, chromeos_root):
def UpdateChrootSshConfig(ce, arm_board, arm_dut, x86_board, x86_dut,
chromeos_root):
- log("Entering UpdateChrootSshConfig")
+ log('Entering UpdateChrootSshConfig')
# Copy testing_rsa to .ssh and set file protections properly.
user = getpass.getuser()
ssh_dir = os.path.join(chromeos_root, 'chroot', 'home', user, '.ssh')
@@ -133,22 +135,22 @@ def UpdateChrootSshConfig(ce, arm_board, arm_dut, x86_board, x86_dut,
print('Cannot find %s; you will need to update testing_rsa by hand.' %
src_file)
else:
- log("testing_rsa exists already.")
+ log('testing_rsa exists already.')
# Save ~/.ssh/config file, if not already done.
- config_file = os.path.expanduser("~/.ssh/config")
+ config_file = os.path.expanduser('~/.ssh/config')
saved_config_file = os.path.join(
- os.path.expanduser("~/.ssh"), "config.save.go-scripts")
+ os.path.expanduser('~/.ssh'), 'config.save.go-scripts')
if not os.path.exists(saved_config_file):
- cmd = "cp %s %s" % (config_file, saved_config_file)
+ cmd = 'cp %s %s' % (config_file, saved_config_file)
ret = ce.RunCommand(cmd)
if ret != SUCCESS:
- print("Error making save copy of ~/.ssh/config. Exiting...")
+ print('Error making save copy of ~/.ssh/config. Exiting...')
sys.exit(1)
# Update ~/.ssh/config file
- log("Reading ssh config file")
- with open(config_file, "r") as input_file:
+ log('Reading ssh config file')
+ with open(config_file, 'r') as input_file:
config_lines = input_file.read()
x86_host_config = CONFIG_TEMPLATE % (x86_board, x86_dut)
@@ -158,7 +160,7 @@ def UpdateChrootSshConfig(ce, arm_board, arm_dut, x86_board, x86_dut,
config_lines += x86_host_config
config_lines += arm_host_config
- log("Writing ~/.ssh/config")
+ log('Writing ~/.ssh/config')
WriteFile(config_lines, config_file)
return 0
@@ -170,27 +172,27 @@ def CleanUp(ce, x86_board, arm_board, chromeos_root):
names = {
'x86_64': x86_board,
'arm64': arm_board,
- 'arm32': ("%s32" % arm_board)
+ 'arm32': ('%s32' % arm_board)
}
toolchain_dir = os.path.join(chromeos_root, 'src', 'third_party',
'toolchain-utils', 'go', 'chromeos')
for k in keys:
name = names[k]
- base_file = os.path.join(toolchain_dir, ("go_%s" % name))
- exec_file = os.path.join(toolchain_dir, ("go_%s_exec" % name))
+ base_file = os.path.join(toolchain_dir, ('go_%s' % name))
+ exec_file = os.path.join(toolchain_dir, ('go_%s_exec' % name))
cmd = ('rm -f %s; rm -f %s' % (base_file, exec_file))
ce.RunCommand(cmd)
# Restore saved config_file
- config_file = os.path.expanduser("~/.ssh/config")
+ config_file = os.path.expanduser('~/.ssh/config')
saved_config_file = os.path.join(
- os.path.expanduser("~/.ssh"), "config.save.go-scripts")
+ os.path.expanduser('~/.ssh'), 'config.save.go-scripts')
if not os.path.exists(saved_config_file):
- print("Could not find file: %s; unable to restore ~/.ssh/config ." %
+ print('Could not find file: %s; unable to restore ~/.ssh/config .' %
saved_config_file)
else:
- cmd = "mv %s %s" % (saved_config_file, config_file)
+ cmd = 'mv %s %s' % (saved_config_file, config_file)
ce.RunCommand(cmd)
return 0
@@ -219,7 +221,7 @@ def Main(argv):
DEBUG = True
if not os.path.exists(options.chromeos_root):
- print("Invalid ChromeOS Root: %s" % options.chromeos_root)
+ print('Invalid ChromeOS Root: %s' % options.chromeos_root)
ce = command_executer.GetCommandExecuter()
all_good = True
@@ -245,6 +247,6 @@ def Main(argv):
return 0
-if __name__ == "__main__":
+if __name__ == '__main__':
val = Main(sys.argv)
sys.exit(val)
diff --git a/heatmaps/heat_map.py b/heatmaps/heat_map.py
index 2fd742d2..a989ab70 100755
--- a/heatmaps/heat_map.py
+++ b/heatmaps/heat_map.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -15,7 +15,7 @@ import sys
import tempfile
from cros_utils import command_executer
-import heatmap_generator
+from heatmaps import heatmap_generator
def IsARepoRoot(directory):
diff --git a/heatmaps/heat_map_test.py b/heatmaps/heat_map_test.py
index 21f90d41..ad62cd91 100755
--- a/heatmaps/heat_map_test.py
+++ b/heatmaps/heat_map_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019 The Chromium OS Authors. All rights reserved.
@@ -9,14 +9,15 @@
from __future__ import print_function
-import mock
+import unittest.mock as mock
import unittest
import os
from cros_utils import command_executer
-import heat_map
+from heatmaps import heat_map
+from heatmaps import heatmap_generator
def make_heatmap(chromeos_root='/path/to/fake/chromeos_root/',
@@ -41,7 +42,7 @@ def fake_generate_perf_report_exception(_):
class HeatmapTest(unittest.TestCase):
"""All of our tests for heat_map."""
- #pylint: disable=protected-access
+ # pylint: disable=protected-access
@mock.patch('shutil.copy2')
@mock.patch('tempfile.mkdtemp')
def test_EnsureFileInChrootAlreadyInside(self, mock_mkdtemp, mock_copy):
@@ -81,7 +82,7 @@ class HeatmapTest(unittest.TestCase):
self.assertEqual(heatmap.perf_report,
'/fake/chroot/inchroot_path/perf_report.txt')
- @mock.patch('heatmap_generator.HeatmapGenerator')
+ @mock.patch.object(heatmap_generator, 'HeatmapGenerator')
def test_GetHeatMap(self, mock_heatmap_generator):
heatmap = make_heatmap()
heatmap._GetHeatMap(10)
@@ -107,7 +108,7 @@ class HeatmapTest(unittest.TestCase):
new=fake_generate_perf_report_exception)
@mock.patch.object(heat_map.HeatMapProducer, '_GetHeatMap')
@mock.patch.object(heat_map.HeatMapProducer, '_RemoveFiles')
- @mock.patch('__builtin__.print')
+ @mock.patch('builtins.print')
def test_Run_with_exception(self, mock_print, mock_remove_files,
mock_get_heatmap, mock_ensure_file_in_chroot):
heatmap = make_heatmap()
diff --git a/heatmaps/heatmap_generator.py b/heatmaps/heatmap_generator.py
index 42fd6352..0dd6ad28 100644
--- a/heatmaps/heatmap_generator.py
+++ b/heatmaps/heatmap_generator.py
@@ -13,7 +13,7 @@ performed by another script perf-to-inst-page.sh). It can also analyze
the symbol names in hot pages.
"""
-from __future__ import print_function
+from __future__ import division, print_function
import bisect
import collections
@@ -93,10 +93,10 @@ class HeatmapGenerator(object):
self.max_addr = 1024 * 1024 * 1024
self.ce = command_executer.GetCommandExecuter(log_level=log_level)
self.dir = os.path.dirname(os.path.realpath(__file__))
- with open(perf_report) as f:
+ with open(perf_report, 'r', encoding='utf-8') as f:
self.perf_report_contents = f.readlines()
# Write histogram results to a text file, in order to use gnu plot to draw
- self.hist_temp_output = open('out.txt', 'w')
+ self.hist_temp_output = open('out.txt', 'w', encoding='utf-8')
self.processes = {}
self.deleted_processes = {}
self.count = 0
@@ -277,7 +277,7 @@ class HeatmapGenerator(object):
if address < self.max_addr:
self.count += 1
line = '%d/%d: %d %d' % (pid[0], pid[1], self.count,
- address / self.page_size * self.page_size)
+ address // self.page_size * self.page_size)
if self.hugepage:
if self.hugepage.start <= address < self.hugepage.end:
line += ' hugepage'
@@ -321,7 +321,7 @@ class HeatmapGenerator(object):
names = [x for x in os.listdir('.') if 'inst-histo' in x and '.txt' in x]
hist = {}
for n in names:
- with open(n) as f:
+ with open(n, encoding='utf-8') as f:
for l in f.readlines():
num, addr = l.strip().split(' ')
assert int(addr) not in hist
@@ -354,7 +354,7 @@ class HeatmapGenerator(object):
if 't' not in symbol_type and 'T' not in symbol_type:
# Filter out symbols not in text sections
continue
- if len(self.symbol_addresses) == 0:
+ if not self.symbol_addresses:
# The first symbol in text sections
text_section_start = addr
self.symbol_addresses.append(0)
@@ -374,9 +374,9 @@ class HeatmapGenerator(object):
def _map_addr_to_symbol(self, addr):
# Find out the symbol name
- assert len(self.symbol_addresses) > 0
+ assert self.symbol_addresses
index = bisect.bisect(self.symbol_addresses, addr)
- assert index > 0 and index <= len(self.symbol_names), \
+ assert 0 < index <= len(self.symbol_names), \
'Failed to find an index (%d) in the list (len=%d)' % (
index, len(self.symbol_names))
return self.symbol_names[index - 1]
@@ -387,7 +387,7 @@ class HeatmapGenerator(object):
print(
'----------------------------------------------------------', file=fp)
print(
- 'Page Offset: %d MB, Count: %d' % (page_num / 1024 / 1024,
+ 'Page Offset: %d MB, Count: %d' % (page_num // 1024 // 1024,
sample_num),
file=fp)
@@ -400,8 +400,8 @@ class HeatmapGenerator(object):
if pid is None:
# The sampling is not on Chrome
continue
- if addr / self.page_size != (
- self.processes[pid].start_address + page_num) / self.page_size:
+ if addr // self.page_size != (
+ self.processes[pid].start_address + page_num) // self.page_size:
# Sampling not in the current page
continue
@@ -410,14 +410,14 @@ class HeatmapGenerator(object):
assert name, 'Failed to find symbol name of addr %x' % addr
symbol_counts[name] += 1
- assert sum(symbol_counts.itervalues()) == sample_num, \
+ assert sum(symbol_counts.values()) == sample_num, \
'Symbol name matching missing for some addresses: %d vs %d' % (
- sum(symbol_counts.itervalues()), sample_num)
+ sum(symbol_counts.values()), sample_num)
# Print out the symbol names sorted by the number of samples in
# the page
for name, count in sorted(
- symbol_counts.iteritems(), key=lambda kv: kv[1], reverse=True):
+ symbol_counts.items(), key=lambda kv: kv[1], reverse=True):
if count == 0:
break
print('> %s : %d' % (name, count), file=fp)
@@ -434,20 +434,19 @@ class HeatmapGenerator(object):
# Read histogram from histo.txt
hist = self._restore_histogram()
# Sort the pages in histogram
- sorted_hist = sorted(
- hist.iteritems(), key=lambda value: value[1], reverse=True)
+ sorted_hist = sorted(hist.items(), key=lambda value: value[1], reverse=True)
# Generate symbolizations
self._read_symbols_from_binary(binary)
# Write hottest pages
- with open('addr2symbol.txt', 'w') as fp:
+ with open('addr2symbol.txt', 'w', encoding='utf-8') as fp:
if self.hugepage:
# Print hugepage region first
print(
'Hugepage top %d hot pages (%d MB - %d MB):' %
- (top_n, self.hugepage.start / 1024 / 1024,
- self.hugepage.end / 1024 / 1024),
+ (top_n, self.hugepage.start // 1024 // 1024,
+ self.hugepage.end // 1024 // 1024),
file=fp)
pages_to_print = [(k, v)
for k, v in sorted_hist
@@ -464,5 +463,5 @@ class HeatmapGenerator(object):
self._print_symbols_in_hot_pages(fp, pages_to_print)
else:
# Print top_n hottest pages.
- pages_to_print = [(k, v) for k, v in sorted_hist][:top_n]
+ pages_to_print = sorted_hist[:top_n]
self._print_symbols_in_hot_pages(fp, pages_to_print)
diff --git a/heatmaps/heatmap_generator_test.py b/heatmaps/heatmap_generator_test.py
index 0c0bbfc8..5008c653 100755
--- a/heatmaps/heatmap_generator_test.py
+++ b/heatmaps/heatmap_generator_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -8,11 +8,12 @@
from __future__ import division, print_function
-import mock
-import os
+import unittest.mock as mock
import unittest
-import heatmap_generator
+import os
+
+from heatmaps import heatmap_generator
def _write_perf_mmap(pid, tid, addr, size, fp):
diff --git a/llvm_extra/create_ebuild_file.py b/llvm_extra/create_ebuild_file.py
index 459e702a..058a270b 100755
--- a/llvm_extra/create_ebuild_file.py
+++ b/llvm_extra/create_ebuild_file.py
@@ -1,25 +1,21 @@
-#!/usr/bin/env python2
-
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-from __future__ import print_function
+"""Create llvm ebuild file.
-import os
-import sys
+This script takes an existing host llvm compiler ebuild and
+creates another build that should be installable in a prefixed location.
+The script patches a few lines in the llvm ebuild to make that happen.
-# This script takes an existing host llvm compiler ebuild and
-# creates another build that should be installable in a prefixed location.
-# The script patches a few lines in the llvm ebuild to make that happen.
-#
-# Since the script is based on the current llvm ebuild patterns,
-# it may need to be updated if those patterns change.
-#
-# This script should normally be invoked by the shell script
-# create_llvm_extra.sh .
+Since the script is based on the current llvm ebuild patterns,
+it may need to be updated if those patterns change.
+
+This script should normally be invoked by the shell script
+create_llvm_extra.sh .
-"""
Below is an example of the expected diff of the newly generated ebuild with
some explanation of the diffs.
@@ -40,8 +36,8 @@ diff -Nuar llvm-pre7.0_pre335547_p20180529.ebuild newly-created-file.ebuild
# Change USE flags to match llvm ebuild installtion. To see the set of flags
enabled in llvm compiler ebuild, run $ sudo emerge -pv llvm
--IUSE="debug +default-compiler-rt +default-libcxx doc libedit +libffi multitarget
-+IUSE="debug +default-compiler-rt +default-libcxx doc libedit +libffi +multitarget
+-IUSE="debug +default-compiler-rt +default-libcxx doc libedit +libffi"
++IUSE="debug +default-compiler-rt +default-libcxx doc libedit +libffi
ncurses ocaml python llvm-next llvm-tot test xml video_cards_radeon"
COMMON_DEPEND="
@@ -59,8 +55,8 @@ diff -Nuar llvm-pre7.0_pre335547_p20180529.ebuild newly-created-file.ebuild
# Allow custom cmake build types (like 'Gentoo')
# Convert use of PN to llvm in epatch commands.
-- epatch "${FILESDIR}"/cmake/${PN}-3.8-allow_custom_cmake_build_types.patch
-+ epatch "${FILESDIR}"/cmake/llvm-3.8-allow_custom_cmake_build_types.patch
+- epatch "${FILESDIR}"/cmake/${PN}-3.8-allow_custom_cmake_build.patch
++ epatch "${FILESDIR}"/cmake/llvm-3.8-allow_custom_cmake_build.patch
# crbug/591436
epatch "${FILESDIR}"/clang-executable-detection.patch
@@ -94,6 +90,12 @@ diff -Nuar llvm-pre7.0_pre335547_p20180529.ebuild newly-created-file.ebuild
# some users may find it useful
"""
+from __future__ import print_function
+
+import os
+import sys
+
+
def process_line(line, text):
# Process the line and append to the text we want to generate.
# Check if line has any patterns that we want to handle.
@@ -103,7 +105,7 @@ def process_line(line, text):
text.append(line)
elif line.startswith('SLOT='):
# Change SLOT to "${PV%%_p[[:digit:]]*}"
- SLOT_STRING='SLOT="${PV%%_p[[:digit:]]*}"\n'
+ SLOT_STRING = 'SLOT="${PV%%_p[[:digit:]]*}"\n'
text.append(SLOT_STRING)
elif line.startswith('IUSE') and 'multitarget' in line:
# Enable multitarget USE flag.
@@ -137,9 +139,9 @@ def process_line(line, text):
def main():
if len(sys.argv) != 3:
- filename = os.path.basename(__file__)
- print ('Usage: ', filename,' <input.ebuild> <output.ebuild>')
- return 1
+ filename = os.path.basename(__file__)
+ print('Usage: ', filename, ' <input.ebuild> <output.ebuild>')
+ return 1
text = []
with open(sys.argv[1], 'r') as infile:
@@ -147,10 +149,10 @@ def main():
process_line(line, text)
with open(sys.argv[2], 'w') as outfile:
- outfile.write("".join(text))
+ outfile.write(''.join(text))
return 0
-if __name__== "__main__":
+if __name__ == '__main__':
sys.exit(main())
diff --git a/llvm_tools/custom_script_example.py b/llvm_tools/custom_script_example.py
index 7e107ad8..38dff007 100755
--- a/llvm_tools/custom_script_example.py
+++ b/llvm_tools/custom_script_example.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
diff --git a/orderfile/post_process_orderfile.py b/orderfile/post_process_orderfile.py
index e24ab1cd..3db0b3b8 100755
--- a/orderfile/post_process_orderfile.py
+++ b/orderfile/post_process_orderfile.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -79,7 +79,7 @@ def main(argv):
options = parser.parse_args(argv)
if not os.path.exists(options.input_file):
- sys.exit('Input orderfile doesn\'t exist.')
+ sys.exit("Input orderfile doesn\'t exist.")
with open(options.input_file) as in_stream, \
open(options.chrome_nm) as chrome_nm_stream, \
diff --git a/orderfile/post_process_orderfile_test.py b/orderfile/post_process_orderfile_test.py
index 2532b8b3..a5fb2c73 100755
--- a/orderfile/post_process_orderfile_test.py
+++ b/orderfile/post_process_orderfile_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -41,7 +41,7 @@ def _cleanup(files):
class Tests(unittest.TestCase):
"""All of our tests for post_process_orderfile."""
- #pylint: disable=protected-access
+ # pylint: disable=protected-access
def test__parse_nm_output(self):
temp_dir = tempfile.mkdtemp()
self.addCleanup(_cleanup, [temp_dir])
diff --git a/pgo_tools/merge_profdata_and_upload.py b/pgo_tools/merge_profdata_and_upload.py
index dddc7f1e..ea95289b 100755
--- a/pgo_tools/merge_profdata_and_upload.py
+++ b/pgo_tools/merge_profdata_and_upload.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
@@ -56,7 +56,8 @@ _LLVMMetadata = collections.namedtuple('_LLVMMetadata', ['head_sha'])
def _get_gs_latest(remote_lastest):
assert remote_lastest.startswith(_GS_PREFIX)
try:
- return subprocess.check_output(['gsutil', 'cat', remote_lastest])
+ return subprocess.check_output(['gsutil', 'cat', remote_lastest],
+ encoding='utf-8')
except subprocess.CalledProcessError:
raise RuntimeError('Lastest artifacts not found: %s' % remote_lastest)
@@ -142,7 +143,8 @@ def _tar_and_upload_profdata(profdata, name_suffix):
print('Uploading tarball to gs.\nCMD: %s\n' % upload_cmd)
# gsutil prints all status to stderr, oddly enough.
- gs_output = subprocess.check_output(upload_cmd, stderr=subprocess.STDOUT)
+ gs_output = subprocess.check_output(
+ upload_cmd, stderr=subprocess.STDOUT, encoding='utf-8')
print(gs_output)
# gsutil exits successfully even if it uploaded nothing. It prints a summary
@@ -227,7 +229,7 @@ def main():
for tryjob in args.tryjob:
fetch_and_append_artifacts(tryjob)
- assert heads, 'Didn\'t fetch anything?'
+ assert heads, "Didn't fetch anything?"
def die_with_head_complaint(complaint):
extra = ' (HEADs found: %s)' % sorted(heads)
@@ -239,7 +241,7 @@ def main():
die_with_head_complaint(
'%d LLVM HEADs were found, which is more than one. You probably '
'want a consistent set of HEADs for a profile. If you know you '
- 'don\'t, please specify --llvm_hash, and note that *all* profiles '
+ "don't, please specify --llvm_hash, and note that *all* profiles "
'will be merged into this final profile, regardless of their '
'reported HEAD.' % len(heads))
llvm_hash, = heads
@@ -247,7 +249,7 @@ def main():
if llvm_hash not in heads:
assert llvm_hash == args.llvm_hash
die_with_head_complaint(
- 'HEAD %s wasn\'t found in any fetched artifacts.' % llvm_hash)
+ "HEAD %s wasn't found in any fetched artifacts." % llvm_hash)
print('Using LLVM hash: %s' % llvm_hash)
diff --git a/run_tests_for.py b/run_tests_for.py
index cb8e6430..19f81722 100755
--- a/run_tests_for.py
+++ b/run_tests_for.py
@@ -37,6 +37,12 @@ import sys
TestSpec = collections.namedtuple('TestSpec', ['directory', 'command'])
+# List of python scripts that are not test with relative path to
+# toolchain-utils.
+non_test_py_files = {
+ 'debug_info_test/debug_info_test.py',
+}
+
def _make_relative_to_toolchain_utils(toolchain_utils, path):
"""Cleans & makes a path relative to toolchain_utils.
@@ -52,13 +58,26 @@ def _make_relative_to_toolchain_utils(toolchain_utils, path):
return result
-def _gather_python_tests_in(subdir):
+def _filter_python_tests(test_files, toolchain_utils):
+ """Returns all files that are real python tests."""
+ python_tests = []
+ for test_file in test_files:
+ rel_path = _make_relative_to_toolchain_utils(toolchain_utils, test_file)
+ if rel_path not in non_test_py_files:
+ python_tests.append(_python_test_to_spec(test_file))
+ else:
+ print('## %s ... NON_TEST_PY_FILE' % rel_path)
+ return python_tests
+
+
+def _gather_python_tests_in(rel_subdir, toolchain_utils):
"""Returns all files that appear to be Python tests in a given directory."""
+ subdir = os.path.join(toolchain_utils, rel_subdir)
test_files = (
os.path.join(subdir, file_name)
for file_name in os.listdir(subdir)
if file_name.endswith('_test.py') or file_name.endswith('_unittest.py'))
- return [_python_test_to_spec(test_file) for test_file in test_files]
+ return _filter_python_tests(test_files, toolchain_utils)
def _run_test(test_spec):
@@ -91,7 +110,7 @@ def _python_test_to_spec(test_file):
return TestSpec(directory=test_directory, command=command)
-def _autodetect_python_tests_for(test_file):
+def _autodetect_python_tests_for(test_file, toolchain_utils):
"""Given a test file, detect if there may be related tests."""
if not test_file.endswith('.py'):
return []
@@ -103,8 +122,7 @@ def _autodetect_python_tests_for(test_file):
base = test_file[:-3]
candidates = (base + x for x in test_suffixes)
test_files = (x for x in candidates if os.path.exists(x))
-
- return [_python_test_to_spec(test_file) for test_file in test_files]
+ return _filter_python_tests(test_files, toolchain_utils)
def _run_test_scripts(all_tests, show_successful_output=False):
@@ -195,7 +213,7 @@ def _find_forced_subdir_python_tests(test_paths, toolchain_utils):
results = []
for d in sorted(gather_test_dirs):
- results += _gather_python_tests_in(os.path.join(toolchain_utils, d))
+ results += _gather_python_tests_in(d, toolchain_utils)
return results
@@ -241,7 +259,7 @@ def main(argv):
tests_to_run = _find_forced_subdir_python_tests(modified_files,
toolchain_utils)
for f in modified_files:
- tests_to_run += _autodetect_python_tests_for(f)
+ tests_to_run += _autodetect_python_tests_for(f, toolchain_utils)
tests_to_run += _find_go_tests(modified_files)
# TestSpecs have lists, so we can't use a set. We'd likely want to keep them
diff --git a/toolchain_utils_githooks/check-presubmit.py b/toolchain_utils_githooks/check-presubmit.py
index 2fea102a..fc6ec9fc 100755
--- a/toolchain_utils_githooks/check-presubmit.py
+++ b/toolchain_utils_githooks/check-presubmit.py
@@ -226,7 +226,7 @@ def check_cros_lint(toolchain_utils_root, thread_pool, files):
# pylint+golint.
def try_run_cros_lint(cros_binary):
exit_code, output = run_command_unchecked(
- [cros_binary, 'lint', '--'] + files,
+ [cros_binary, 'lint', '--py3', '--'] + files,
toolchain_utils_root,
env=fixed_env)