aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorMarshall Greenblatt <magreenblatt@gmail.com>2020-01-14 15:29:42 +0100
committerMarshall Greenblatt <magreenblatt@gmail.com>2020-01-14 15:29:42 +0100
commit6d7ad9df13beefa2c7598198060631e9b9b6223b (patch)
tree8184f62536255c7de09907d7d86f5493e1be1a0d /tools
parenta70a108f0173e867ca2fb230e21c937975254158 (diff)
downloadjcef-6d7ad9df13beefa2c7598198060631e9b9b6223b.tar.gz
Add Python 3 support (CEF issue #2856)
Diffstat (limited to 'tools')
-rw-r--r--tools/buildtools/README.jcef7
-rw-r--r--tools/buildtools/download_from_google_storage.py237
-rw-r--r--tools/buildtools/gsutil.py42
-rw-r--r--tools/buildtools/subprocess2.py341
-rw-r--r--tools/clang_util.py10
-rw-r--r--tools/date_util.py1
-rw-r--r--tools/exec_util.py9
-rw-r--r--tools/file_util.py55
-rw-r--r--tools/fix_style.py24
-rw-r--r--tools/git_util.py13
-rw-r--r--tools/make_readme.py8
-rw-r--r--tools/make_version_header.py6
-rw-r--r--tools/readme_util.py5
-rw-r--r--tools/yapf_util.py6
14 files changed, 305 insertions, 459 deletions
diff --git a/tools/buildtools/README.jcef b/tools/buildtools/README.jcef
index 52092b3..c90189e 100644
--- a/tools/buildtools/README.jcef
+++ b/tools/buildtools/README.jcef
@@ -1,8 +1,8 @@
Name: depot_tools
Short Name: depot_tools
URL: https://chromium.googlesource.com/chromium/tools/depot_tools.git
-Date: 24 Apr 2017
-Revision: df6e7348b
+Date: 11 Jan 2020
+Revision: 7a8bf9489
License: BSD
License File: LICENSE
@@ -10,4 +10,5 @@ Description:
Select tools extracted from depot_tools.
Local Modifications:
-None
+- Remove dependency on vpython.
+- Update gsutil version to 4.46.
diff --git a/tools/buildtools/download_from_google_storage.py b/tools/buildtools/download_from_google_storage.py
index c297ced..3f27954 100644
--- a/tools/buildtools/download_from_google_storage.py
+++ b/tools/buildtools/download_from_google_storage.py
@@ -5,11 +5,17 @@
"""Download files from Google Storage based on SHA1 sums."""
+from __future__ import print_function
import hashlib
import optparse
import os
-import Queue
+
+try:
+ import Queue as queue
+except ImportError: # For Py3 compatibility
+ import queue
+
import re
import shutil
import stat
@@ -21,14 +27,22 @@ import time
import subprocess2
+# Env vars that tempdir can be gotten from; minimally, this
+# needs to match python's tempfile module and match normal
+# unix standards.
+_TEMPDIR_ENV_VARS = ('TMPDIR', 'TEMP', 'TMP')
+
GSUTIL_DEFAULT_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'gsutil.py')
# Maps sys.platform to what we actually want to call them.
PLATFORM_MAPPING = {
'cygwin': 'win',
'darwin': 'mac',
- 'linux2': 'linux',
+ 'linux': 'linux', # Python 3.3+.
+ 'linux2': 'linux', # Python < 3.3 uses "linux2" / "linux3".
'win32': 'win',
+ 'aix6': 'aix',
+ 'aix7': 'aix',
}
@@ -54,17 +68,20 @@ def GetNormalizedPlatform():
# Common utilities
class Gsutil(object):
"""Call gsutil with some predefined settings. This is a convenience object,
- and is also immutable."""
+ and is also immutable.
+
+ HACK: This object is used directly by the external script
+ `<depot_tools>/win_toolchain/get_toolchain_if_necessary.py`
+ """
MAX_TRIES = 5
RETRY_BASE_DELAY = 5.0
RETRY_DELAY_MULTIPLE = 1.3
- def __init__(self, path, boto_path=None, timeout=None, version='4.46'):
+ def __init__(self, path, boto_path=None, version='4.46'):
if not os.path.exists(path):
raise FileNotFoundError('GSUtil not found in %s' % path)
self.path = path
- self.timeout = timeout
self.boto_path = boto_path
self.version = version
@@ -77,12 +94,15 @@ class Gsutil(object):
env['AWS_CREDENTIAL_FILE'] = self.boto_path
env['BOTO_CONFIG'] = self.boto_path
+ if PLATFORM_MAPPING[sys.platform] != 'win':
+ env.update((x, "/tmp") for x in _TEMPDIR_ENV_VARS)
+
return env
def call(self, *args):
cmd = [sys.executable, self.path, '--force-version', self.version]
cmd.extend(args)
- return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout)
+ return subprocess2.call(cmd, env=self.get_sub_env())
def check_call(self, *args):
cmd = [sys.executable, self.path, '--force-version', self.version]
@@ -91,15 +111,17 @@ class Gsutil(object):
cmd,
stdout=subprocess2.PIPE,
stderr=subprocess2.PIPE,
- env=self.get_sub_env(),
- timeout=self.timeout)
+ env=self.get_sub_env())
+
+ out = out.decode('utf-8', 'replace')
+ err = err.decode('utf-8', 'replace')
# Parse output.
status_code_match = re.search('status=([0-9]+)', err)
if status_code_match:
return (int(status_code_match.group(1)), out, err)
if ('You are attempting to access protected data with '
- 'no configured credentials.' in err):
+ 'no configured credentials.' in err):
return (403, out, err)
if 'matched no objects' in err:
return (404, out, err)
@@ -143,29 +165,29 @@ def get_sha1(filename):
# Download-specific code starts here
-def enumerate_work_queue(input_filename, work_queue, directory,
- recursive, ignore_errors, output, sha1_file,
- auto_platform):
+def enumerate_input(input_filename, directory, recursive, ignore_errors, output,
+ sha1_file, auto_platform):
if sha1_file:
if not os.path.exists(input_filename):
if not ignore_errors:
- raise FileNotFoundError('%s not found.' % input_filename)
- print >> sys.stderr, '%s not found.' % input_filename
+ raise FileNotFoundError(
+ '{} not found when attempting enumerate files to download.'.format(
+ input_filename))
+ print('%s not found.' % input_filename, file=sys.stderr)
with open(input_filename, 'rb') as f:
- sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
+ sha1_match = re.match(b'^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
- work_queue.put((sha1_match.groups(1)[0], output))
- return 1
+ yield (sha1_match.groups(1)[0].decode('utf-8'), output)
+ return
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % input_filename)
- print >> sys.stderr, 'No sha1 sum found in %s.' % input_filename
- return 0
+ print('No sha1 sum found in %s.' % input_filename, file=sys.stderr)
+ return
if not directory:
- work_queue.put((input_filename, output))
- return 1
+ yield (input_filename, output)
+ return
- work_queue_size = 0
for root, dirs, files in os.walk(input_filename):
if not recursive:
for item in dirs[:]:
@@ -185,23 +207,23 @@ def enumerate_work_queue(input_filename, work_queue, directory,
'the path of %s' % full_path)
if not ignore_errors:
raise InvalidFileError(err)
- print >> sys.stderr, err
+ print(err, file=sys.stderr)
continue
current_platform = PLATFORM_MAPPING[sys.platform]
if current_platform != target_platform:
continue
with open(full_path, 'rb') as f:
- sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
+ sha1_match = re.match(b'^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
- work_queue.put(
- (sha1_match.groups(1)[0], full_path.replace('.sha1', '')))
- work_queue_size += 1
+ yield (
+ sha1_match.groups(1)[0].decode('utf-8'),
+ full_path.replace('.sha1', '')
+ )
else:
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % filename)
- print >> sys.stderr, 'No sha1 sum found in %s.' % filename
- return work_queue_size
+ print('No sha1 sum found in %s.' % filename, file=sys.stderr)
def _validate_tar_file(tar, prefix):
@@ -209,7 +231,9 @@ def _validate_tar_file(tar, prefix):
"""Returns false if the tarinfo is something we explicitly forbid."""
if tarinfo.issym() or tarinfo.islnk():
return False
- if '..' in tarinfo.name or not tarinfo.name.startswith(prefix):
+ if ('../' in tarinfo.name or
+ '..\\' in tarinfo.name or
+ not tarinfo.name.startswith(prefix)):
return False
return True
return all(map(_validate, tar.getmembers()))
@@ -228,14 +252,10 @@ def _downloader_worker_thread(thread_num, q, force, base_url,
thread_num, output_filename))
ret_codes.put((1, '%s is not a tar.gz archive.' % (output_filename)))
continue
- extract_dir = output_filename[0:len(output_filename)-7]
+ extract_dir = output_filename[:-len('.tar.gz')]
if os.path.exists(output_filename) and not force:
if not extract or os.path.exists(extract_dir):
if get_sha1(output_filename) == input_sha1_sum:
- if verbose:
- out_q.put(
- '%d> File %s exists and SHA1 matches. Skipping.' % (
- thread_num, output_filename))
continue
# Check if file exists.
file_url = '%s/%s' % (base_url, input_sha1_sum)
@@ -248,13 +268,14 @@ def _downloader_worker_thread(thread_num, q, force, base_url,
file_url, output_filename)))
else:
# Other error, probably auth related (bad ~/.boto, etc).
- out_q.put('%d> Failed to fetch file %s for %s, skipping. [Err: %s]' % (
- thread_num, file_url, output_filename, err))
- ret_codes.put((1, 'Failed to fetch file %s for %s. [Err: %s]' % (
- file_url, output_filename, err)))
+ out_q.put('%d> Failed to fetch file %s for %s, skipping. [Err: %s]' %
+ (thread_num, file_url, output_filename, err))
+ ret_codes.put((1, 'Failed to fetch file %s for %s. [Err: %s]' %
+ (file_url, output_filename, err)))
continue
# Fetch the file.
- out_q.put('%d> Downloading %s...' % (thread_num, output_filename))
+ if verbose:
+ out_q.put('%d> Downloading %s...' % (thread_num, output_filename))
try:
if delete:
os.remove(output_filename) # Delete the file if it exists already.
@@ -284,6 +305,11 @@ def _downloader_worker_thread(thread_num, q, force, base_url,
continue
with tarfile.open(output_filename, 'r:gz') as tar:
dirname = os.path.dirname(os.path.abspath(output_filename))
+ # If there are long paths inside the tarball we can get extraction
+ # errors on windows due to the 260 path length limit (this includes
+ # pwd). Use the extended path syntax.
+ if sys.platform == 'win32':
+ dirname = '\\\\?\\%s' % dirname
if not _validate_tar_file(tar, os.path.basename(extract_dir)):
out_q.put('%d> Error: %s contains files outside %s.' % (
thread_num, output_filename, extract_dir))
@@ -313,7 +339,7 @@ def _downloader_worker_thread(thread_num, q, force, base_url,
elif sys.platform != 'win32':
# On non-Windows platforms, key off of the custom header
# "x-goog-meta-executable".
- code, out, _ = gsutil.check_call('stat', file_url)
+ code, out, err = gsutil.check_call('stat', file_url)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
@@ -321,24 +347,80 @@ def _downloader_worker_thread(thread_num, q, force, base_url,
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
-def printer_worker(output_queue):
- while True:
- line = output_queue.get()
- # Its plausible we want to print empty lines.
- if line is None:
- break
- print line
+
+class PrinterThread(threading.Thread):
+ def __init__(self, output_queue):
+ super(PrinterThread, self).__init__()
+ self.output_queue = output_queue
+ self.did_print_anything = False
+
+ def run(self):
+ while True:
+ line = self.output_queue.get()
+ # It's plausible we want to print empty lines: Explicit `is None`.
+ if line is None:
+ break
+ self.did_print_anything = True
+ print(line)
+
+
+def _data_exists(input_sha1_sum, output_filename, extract):
+ """Returns True if the data exists locally and matches the sha1.
+
+ This conservatively returns False for error cases.
+
+ Args:
+ input_sha1_sum: Expected sha1 stored on disk.
+ output_filename: The file to potentially download later. Its sha1 will be
+ compared to input_sha1_sum.
+ extract: Wheather or not a downloaded file should be extracted. If the file
+ is not extracted, this just compares the sha1 of the file. If the file
+ is to be extracted, this only compares the sha1 of the target archive if
+ the target directory already exists. The content of the target directory
+ is not checked.
+ """
+ extract_dir = None
+ if extract:
+ if not output_filename.endswith('.tar.gz'):
+ # This will cause an error later. Conservativly return False to not bail
+ # out too early.
+ return False
+ extract_dir = output_filename[:-len('.tar.gz')]
+ if os.path.exists(output_filename):
+ if not extract or os.path.exists(extract_dir):
+ if get_sha1(output_filename) == input_sha1_sum:
+ return True
+ return False
def download_from_google_storage(
input_filename, base_url, gsutil, num_threads, directory, recursive,
force, output, ignore_errors, sha1_file, verbose, auto_platform, extract):
+
+ # Tuples of sha1s and paths.
+ input_data = list(enumerate_input(
+ input_filename, directory, recursive, ignore_errors, output, sha1_file,
+ auto_platform))
+
+ # Sequentially check for the most common case and see if we can bail out
+ # early before making any slow calls to gsutil.
+ if not force and all(
+ _data_exists(sha1, path, extract) for sha1, path in input_data):
+ return 0
+
+ # Call this once to ensure gsutil's update routine is called only once. Only
+ # needs to be done if we'll process input data in parallel, which can lead to
+ # a race in gsutil's self-update on the first call. Note, this causes a
+ # network call, therefore any fast bailout should be done before this point.
+ if len(input_data) > 1:
+ gsutil.check_call('version')
+
# Start up all the worker threads.
all_threads = []
download_start = time.time()
- stdout_queue = Queue.Queue()
- work_queue = Queue.Queue()
- ret_codes = Queue.Queue()
+ stdout_queue = queue.Queue()
+ work_queue = queue.Queue()
+ ret_codes = queue.Queue()
ret_codes.put((0, None))
for thread_num in range(num_threads):
t = threading.Thread(
@@ -348,14 +430,13 @@ def download_from_google_storage(
t.daemon = True
t.start()
all_threads.append(t)
- printer_thread = threading.Thread(target=printer_worker, args=[stdout_queue])
+ printer_thread = PrinterThread(stdout_queue)
printer_thread.daemon = True
printer_thread.start()
- # Enumerate our work queue.
- work_queue_size = enumerate_work_queue(
- input_filename, work_queue, directory, recursive,
- ignore_errors, output, sha1_file, auto_platform)
+ # Populate our work queue.
+ for sha1, path in input_data:
+ work_queue.put((sha1, path))
for _ in all_threads:
work_queue.put((None, None)) # Used to tell worker threads to stop.
@@ -370,13 +451,12 @@ def download_from_google_storage(
for ret_code, message in ret_codes.queue:
max_ret_code = max(ret_code, max_ret_code)
if message:
- print >> sys.stderr, message
- if verbose and not max_ret_code:
- print 'Success!'
+ print(message, file=sys.stderr)
- if verbose:
- print 'Downloading %d files took %1f second(s)' % (
- work_queue_size, time.time() - download_start)
+ # Only print summary if any work was done.
+ if printer_thread.did_print_anything:
+ print('Downloading %d files took %1f second(s)' %
+ (len(input_data), time.time() - download_start))
return max_ret_code
@@ -463,14 +543,16 @@ def main(args):
if (set(('http_proxy', 'https_proxy')).intersection(
env.lower() for env in os.environ) and
'NO_AUTH_BOTO_CONFIG' not in os.environ):
- print >> sys.stderr, ('NOTICE: You have PROXY values set in your '
- 'environment, but gsutil in depot_tools does not '
- '(yet) obey them.')
- print >> sys.stderr, ('Also, --no_auth prevents the normal BOTO_CONFIG '
- 'environment variable from being used.')
- print >> sys.stderr, ('To use a proxy in this situation, please supply '
- 'those settings in a .boto file pointed to by '
- 'the NO_AUTH_BOTO_CONFIG environment var.')
+ print('NOTICE: You have PROXY values set in your environment, but gsutil'
+ 'in depot_tools does not (yet) obey them.',
+ file=sys.stderr)
+ print('Also, --no_auth prevents the normal BOTO_CONFIG environment'
+ 'variable from being used.',
+ file=sys.stderr)
+ print('To use a proxy in this situation, please supply those settings'
+ 'in a .boto file pointed to by the NO_AUTH_BOTO_CONFIG environment'
+ 'variable.',
+ file=sys.stderr)
options.boto = os.environ.get('NO_AUTH_BOTO_CONFIG', os.devnull)
# Make sure gsutil exists where we expect it to.
@@ -483,10 +565,11 @@ def main(args):
# Passing in -g/--config will run our copy of GSUtil, then quit.
if options.config:
- print '===Note from depot_tools==='
- print 'If you do not have a project ID, enter "0" when asked for one.'
- print '===End note from depot_tools==='
- print
+ print('===Note from depot_tools===')
+ print('If you do not have a project ID, enter "0" when asked for one.')
+ print('===End note from depot_tools===')
+ print()
+ gsutil.check_call('version')
return gsutil.call('config')
if not args:
@@ -527,11 +610,15 @@ def main(args):
base_url = 'gs://%s' % options.bucket
- return download_from_google_storage(
+ try:
+ return download_from_google_storage(
input_filename, base_url, gsutil, options.num_threads, options.directory,
options.recursive, options.force, options.output, options.ignore_errors,
options.sha1_file, options.verbose, options.auto_platform,
options.extract)
+ except FileNotFoundError as e:
+ print("Fatal error: {}".format(e))
+ return 1
if __name__ == '__main__':
diff --git a/tools/buildtools/gsutil.py b/tools/buildtools/gsutil.py
index d57cafb..63847d2 100644
--- a/tools/buildtools/gsutil.py
+++ b/tools/buildtools/gsutil.py
@@ -17,7 +17,12 @@ import subprocess
import sys
import tempfile
import time
-import urllib2
+
+try:
+ import urllib2 as urllib
+except ImportError: # For Py3 compatibility
+ import urllib.request as urllib
+
import zipfile
@@ -29,6 +34,9 @@ DEFAULT_BIN_DIR = os.path.join(THIS_DIR, 'external_bin', 'gsutil')
DEFAULT_FALLBACK_GSUTIL = os.path.join(
THIS_DIR, 'third_party', 'gsutil', 'gsutil')
+IS_WINDOWS = os.name == 'nt'
+
+
class InvalidGsutilError(Exception):
pass
@@ -50,8 +58,8 @@ def download_gsutil(version, target_dir):
local_md5 = md5_calc.hexdigest()
metadata_url = '%s%s' % (API_URL, filename)
- metadata = json.load(urllib2.urlopen(metadata_url))
- remote_md5 = base64.b64decode(metadata['md5Hash'])
+ metadata = json.load(urllib.urlopen(metadata_url))
+ remote_md5 = base64.b64decode(metadata['md5Hash']).decode('utf-8')
if local_md5 == remote_md5:
return target_filename
@@ -59,7 +67,7 @@ def download_gsutil(version, target_dir):
# Do the download.
url = '%s%s' % (GSUTIL_URL, filename)
- u = urllib2.urlopen(url)
+ u = urllib.urlopen(url)
with open(target_filename, 'wb') as f:
while True:
buf = u.read(4096)
@@ -69,12 +77,6 @@ def download_gsutil(version, target_dir):
return target_filename
-def check_gsutil(gsutil_bin):
- """Run gsutil version and make sure it runs."""
- return subprocess.call(
- [sys.executable, gsutil_bin, 'version'],
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT) == 0
-
@contextlib.contextmanager
def temporary_directory(base):
tmpdir = tempfile.mkdtemp(prefix='gsutil_py', dir=base)
@@ -84,10 +86,14 @@ def temporary_directory(base):
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
+
def ensure_gsutil(version, target, clean):
bin_dir = os.path.join(target, 'gsutil_%s' % version)
gsutil_bin = os.path.join(bin_dir, 'gsutil', 'gsutil')
- if not clean and os.path.isfile(gsutil_bin) and check_gsutil(gsutil_bin):
+ gsutil_flag = os.path.join(bin_dir, 'gsutil', 'install.flag')
+ # We assume that if gsutil_flag exists, then we have a good version
+ # of the gsutil package.
+ if not clean and os.path.isfile(gsutil_flag):
# Everything is awesome! we're all done here.
return gsutil_bin
@@ -113,10 +119,13 @@ def ensure_gsutil(version, target, clean):
except (OSError, IOError):
# Something else did this in parallel.
pass
+ # Final check that the gsutil bin exists. This should never fail.
+ if not os.path.isfile(gsutil_bin):
+ raise InvalidGsutilError()
+ # Drop a flag file.
+ with open(gsutil_flag, 'w') as f:
+ f.write('This flag file is dropped by gsutil.py')
- # Final check that the gsutil bin is okay. This should never fail.
- if not check_gsutil(gsutil_bin):
- raise InvalidGsutilError()
return gsutil_bin
@@ -130,11 +139,13 @@ def run_gsutil(force_version, fallback, target, args, clean=False):
return subprocess.call(cmd)
+
+
def parse_args():
bin_dir = os.environ.get('DEPOT_TOOLS_GSUTIL_BIN_DIR', DEFAULT_BIN_DIR)
parser = argparse.ArgumentParser()
- parser.add_argument('--force-version', default='4.13')
+ parser.add_argument('--force-version', default='4.30')
parser.add_argument('--clean', action='store_true',
help='Clear any existing gsutil package, forcing a new download.')
parser.add_argument('--fallback', default=DEFAULT_FALLBACK_GSUTIL)
@@ -156,5 +167,6 @@ def main():
return run_gsutil(args.force_version, args.fallback, args.target, args.args,
clean=args.clean)
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/tools/buildtools/subprocess2.py b/tools/buildtools/subprocess2.py
index 26f6269..dea1a2d 100644
--- a/tools/buildtools/subprocess2.py
+++ b/tools/buildtools/subprocess2.py
@@ -1,4 +1,4 @@
-# coding=utf8
+# coding=utf-8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,28 +7,32 @@
In theory you shouldn't need anything else in subprocess, or this module failed.
"""
-import cStringIO
+import codecs
import errno
+import io
import logging
import os
-import Queue
import subprocess
import sys
-import time
import threading
+# Cache the string-escape codec to ensure subprocess can find it later.
+# See crbug.com/912292#c2 for context.
+if sys.version_info.major == 2:
+ import Queue
+ codecs.lookup('string-escape')
+else:
+ import queue as Queue
+ # pylint: disable=redefined-builtin
+ basestring = (str, bytes)
+
# Constants forwarded from subprocess.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
# Sends stdout or stderr to os.devnull.
-VOID = object()
-# Error code when a process was killed because it timed out.
-TIMED_OUT = -2001
-
-# Globals.
-# Set to True if you somehow need to disable this hack.
-SUBPROCESS_CLEANUP_HACKED = False
+VOID = open(os.devnull, 'w')
+VOID_INPUT = open(os.devnull, 'r')
class CalledProcessError(subprocess.CalledProcessError):
@@ -60,51 +64,11 @@ def kill_pid(pid):
# Unable to import 'module'
# pylint: disable=no-member,F0401
import signal
- return os.kill(pid, signal.SIGKILL)
- except ImportError:
- pass
-
-
-def kill_win(process):
- """Kills a process with its windows handle.
-
- Has no effect on other platforms.
- """
- try:
- # Unable to import 'module'
- # pylint: disable=import-error
- import win32process
- # Access to a protected member _handle of a client class
- # pylint: disable=protected-access
- return win32process.TerminateProcess(process._handle, -1)
+ return os.kill(pid, signal.SIGTERM)
except ImportError:
pass
-def add_kill():
- """Adds kill() method to subprocess.Popen for python <2.6"""
- if hasattr(subprocess.Popen, 'kill'):
- return
-
- if sys.platform == 'win32':
- subprocess.Popen.kill = kill_win
- else:
- subprocess.Popen.kill = lambda process: kill_pid(process.pid)
-
-
-def hack_subprocess():
- """subprocess functions may throw exceptions when used in multiple threads.
-
- See http://bugs.python.org/issue1731717 for more information.
- """
- global SUBPROCESS_CLEANUP_HACKED
- if not SUBPROCESS_CLEANUP_HACKED and threading.activeCount() != 1:
- # Only hack if there is ever multiple threads.
- # There is no point to leak with only one thread.
- subprocess._cleanup = lambda: None
- SUBPROCESS_CLEANUP_HACKED = True
-
-
def get_english_env(env):
"""Forces LANG and/or LANGUAGE to be English.
@@ -132,42 +96,6 @@ def get_english_env(env):
return env
-class NagTimer(object):
- """
- Triggers a callback when a time interval passes without an event being fired.
-
- For example, the event could be receiving terminal output from a subprocess;
- and the callback could print a warning to stderr that the subprocess appeared
- to be hung.
- """
- def __init__(self, interval, cb):
- self.interval = interval
- self.cb = cb
- self.timer = threading.Timer(self.interval, self.fn)
- self.last_output = self.previous_last_output = 0
-
- def start(self):
- self.last_output = self.previous_last_output = time.time()
- self.timer.start()
-
- def event(self):
- self.last_output = time.time()
-
- def fn(self):
- now = time.time()
- if self.last_output == self.previous_last_output:
- self.cb(now - self.previous_last_output)
- # Use 0.1 fudge factor, just in case
- # (self.last_output - now) is very close to zero.
- sleep_time = (self.last_output - now - 0.1) % self.interval
- self.previous_last_output = self.last_output
- self.timer = threading.Timer(sleep_time + 0.1, self.fn)
- self.timer.start()
-
- def cancel(self):
- self.timer.cancel()
-
-
class Popen(subprocess.Popen):
"""Wraps subprocess.Popen() with various workarounds.
@@ -190,10 +118,6 @@ class Popen(subprocess.Popen):
popen_lock = threading.Lock()
def __init__(self, args, **kwargs):
- # Make sure we hack subprocess if necessary.
- hack_subprocess()
- add_kill()
-
env = get_english_env(kwargs.get('env'))
if env:
kwargs['env'] = env
@@ -214,37 +138,10 @@ class Popen(subprocess.Popen):
tmp_str += '; cwd=%s' % kwargs['cwd']
logging.debug(tmp_str)
- self.stdout_cb = None
- self.stderr_cb = None
- self.stdin_is_void = False
- self.stdout_is_void = False
- self.stderr_is_void = False
- self.cmd_str = tmp_str
-
- if kwargs.get('stdin') is VOID:
- kwargs['stdin'] = open(os.devnull, 'r')
- self.stdin_is_void = True
-
- for stream in ('stdout', 'stderr'):
- if kwargs.get(stream) in (VOID, os.devnull):
- kwargs[stream] = open(os.devnull, 'w')
- setattr(self, stream + '_is_void', True)
- if callable(kwargs.get(stream)):
- setattr(self, stream + '_cb', kwargs[stream])
- kwargs[stream] = PIPE
-
- self.start = time.time()
- self.timeout = None
- self.nag_timer = None
- self.nag_max = None
- self.shell = kwargs.get('shell', None)
- # Silence pylint on MacOSX
- self.returncode = None
-
try:
with self.popen_lock:
super(Popen, self).__init__(args, **kwargs)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EAGAIN and sys.platform == 'cygwin':
# Convert fork() emulation failure into a CygwinRebaseError().
raise CygwinRebaseError(
@@ -261,205 +158,25 @@ class Popen(subprocess.Popen):
'Check that %s or %s exist and have execution permission.'
% (str(e), kwargs.get('cwd'), args[0]))
- def _tee_threads(self, input): # pylint: disable=redefined-builtin
- """Does I/O for a process's pipes using threads.
-
- It's the simplest and slowest implementation. Expect very slow behavior.
-
- If there is a callback and it doesn't keep up with the calls, the timeout
- effectiveness will be delayed accordingly.
- """
- # Queue of either of <threadname> when done or (<threadname>, data). In
- # theory we would like to limit to ~64kb items to not cause large memory
- # usage when the callback blocks. It is not done because it slows down
- # processing on OSX10.6 by a factor of 2x, making it even slower than
- # Windows! Revisit this decision if it becomes a problem, e.g. crash
- # because of memory exhaustion.
- queue = Queue.Queue()
- done = threading.Event()
- nag = None
-
- def write_stdin():
- try:
- stdin_io = cStringIO.StringIO(input)
- while True:
- data = stdin_io.read(1024)
- if data:
- self.stdin.write(data)
- else:
- self.stdin.close()
- break
- finally:
- queue.put('stdin')
-
- def _queue_pipe_read(pipe, name):
- """Queues characters read from a pipe into a queue."""
- try:
- while True:
- data = pipe.read(1)
- if not data:
- break
- if nag:
- nag.event()
- queue.put((name, data))
- finally:
- queue.put(name)
-
- def timeout_fn():
- try:
- done.wait(self.timeout)
- finally:
- queue.put('timeout')
-
- def wait_fn():
- try:
- self.wait()
- finally:
- queue.put('wait')
-
- # Starts up to 5 threads:
- # Wait for the process to quit
- # Read stdout
- # Read stderr
- # Write stdin
- # Timeout
- threads = {
- 'wait': threading.Thread(target=wait_fn),
- }
- if self.timeout is not None:
- threads['timeout'] = threading.Thread(target=timeout_fn)
- if self.stdout_cb:
- threads['stdout'] = threading.Thread(
- target=_queue_pipe_read, args=(self.stdout, 'stdout'))
- if self.stderr_cb:
- threads['stderr'] = threading.Thread(
- target=_queue_pipe_read, args=(self.stderr, 'stderr'))
- if input:
- threads['stdin'] = threading.Thread(target=write_stdin)
- elif self.stdin:
- # Pipe but no input, make sure it's closed.
- self.stdin.close()
- for t in threads.itervalues():
- t.start()
-
- if self.nag_timer:
- def _nag_cb(elapsed):
- logging.warn(' No output for %.0f seconds from command:' % elapsed)
- logging.warn(' %s' % self.cmd_str)
- if (self.nag_max and
- int('%.0f' % (elapsed / self.nag_timer)) >= self.nag_max):
- queue.put('timeout')
- done.set() # Must do this so that timeout thread stops waiting.
- nag = NagTimer(self.nag_timer, _nag_cb)
- nag.start()
-
- timed_out = False
- try:
- # This thread needs to be optimized for speed.
- while threads:
- item = queue.get()
- if item[0] == 'stdout':
- self.stdout_cb(item[1])
- elif item[0] == 'stderr':
- self.stderr_cb(item[1])
- else:
- # A thread terminated.
- if item in threads:
- threads[item].join()
- del threads[item]
- if item == 'wait':
- # Terminate the timeout thread if necessary.
- done.set()
- elif item == 'timeout' and not timed_out and self.poll() is None:
- logging.debug('Timed out after %.0fs: killing' % (
- time.time() - self.start))
- self.kill()
- timed_out = True
- finally:
- # Stop the threads.
- done.set()
- if nag:
- nag.cancel()
- if 'wait' in threads:
- # Accelerate things, otherwise it would hang until the child process is
- # done.
- logging.debug('Killing child because of an exception')
- self.kill()
- # Join threads.
- for thread in threads.itervalues():
- thread.join()
- if timed_out:
- self.returncode = TIMED_OUT
-
- # pylint: disable=arguments-differ,W0622
- def communicate(self, input=None, timeout=None, nag_timer=None,
- nag_max=None):
- """Adds timeout and callbacks support.
-
- Returns (stdout, stderr) like subprocess.Popen().communicate().
-
- - The process will be killed after |timeout| seconds and returncode set to
- TIMED_OUT.
- - If the subprocess runs for |nag_timer| seconds without producing terminal
- output, print a warning to stderr.
- """
- self.timeout = timeout
- self.nag_timer = nag_timer
- self.nag_max = nag_max
- if (not self.timeout and not self.nag_timer and
- not self.stdout_cb and not self.stderr_cb):
- return super(Popen, self).communicate(input)
-
- if self.timeout and self.shell:
- raise TypeError(
- 'Using timeout and shell simultaneously will cause a process leak '
- 'since the shell will be killed instead of the child process.')
-
- stdout = None
- stderr = None
- # Convert to a lambda to workaround python's deadlock.
- # http://docs.python.org/library/subprocess.html#subprocess.Popen.wait
- # When the pipe fills up, it would deadlock this process.
- if self.stdout and not self.stdout_cb and not self.stdout_is_void:
- stdout = []
- self.stdout_cb = stdout.append
- if self.stderr and not self.stderr_cb and not self.stderr_is_void:
- stderr = []
- self.stderr_cb = stderr.append
- self._tee_threads(input)
- if stdout is not None:
- stdout = ''.join(stdout)
- if stderr is not None:
- stderr = ''.join(stderr)
- return (stdout, stderr)
-
-
-def communicate(args, timeout=None, nag_timer=None, nag_max=None, **kwargs):
- """Wraps subprocess.Popen().communicate() and add timeout support.
+
+def communicate(args, **kwargs):
+ """Wraps subprocess.Popen().communicate().
Returns ((stdout, stderr), returncode).
- - The process will be killed after |timeout| seconds and returncode set to
- TIMED_OUT.
- If the subprocess runs for |nag_timer| seconds without producing terminal
output, print a warning to stderr.
- Automatically passes stdin content as input so do not specify stdin=PIPE.
"""
- stdin = kwargs.pop('stdin', None)
- if stdin is not None:
- if isinstance(stdin, basestring):
- # When stdin is passed as an argument, use it as the actual input data and
- # set the Popen() parameter accordingly.
- kwargs['stdin'] = PIPE
- else:
- kwargs['stdin'] = stdin
- stdin = None
+ stdin = None
+ # When stdin is passed as an argument, use it as the actual input data and
+ # set the Popen() parameter accordingly.
+ if 'stdin' in kwargs and isinstance(kwargs['stdin'], basestring):
+ stdin = kwargs['stdin']
+ kwargs['stdin'] = PIPE
proc = Popen(args, **kwargs)
- if stdin:
- return proc.communicate(stdin, timeout, nag_timer), proc.returncode
- else:
- return proc.communicate(None, timeout, nag_timer), proc.returncode
+ return proc.communicate(stdin), proc.returncode
def call(args, **kwargs):
@@ -502,7 +219,7 @@ def capture(args, **kwargs):
- Discards returncode.
- Blocks stdin by default if not specified since no output will be visible.
"""
- kwargs.setdefault('stdin', VOID)
+ kwargs.setdefault('stdin', VOID_INPUT)
# Like check_output, deny the caller from using stdout arg.
return communicate(args, stdout=PIPE, **kwargs)[0][0]
@@ -518,7 +235,7 @@ def check_output(args, **kwargs):
- Blocks stdin by default if not specified since no output will be visible.
- As per doc, "The stdout argument is not allowed as it is used internally."
"""
- kwargs.setdefault('stdin', VOID)
+ kwargs.setdefault('stdin', VOID_INPUT)
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it would be overridden.')
return check_call_out(args, stdout=PIPE, **kwargs)[0]
diff --git a/tools/clang_util.py b/tools/clang_util.py
index 33d30d6..90632d5 100644
--- a/tools/clang_util.py
+++ b/tools/clang_util.py
@@ -2,6 +2,8 @@
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file
+from __future__ import absolute_import
+from __future__ import print_function
from exec_util import exec_cmd
import os
import sys
@@ -23,11 +25,11 @@ else:
def clang_format(file_name, file_contents):
# -assume-filename is necessary to find the .clang-format file and determine
# the language when specifying contents via stdin.
- result = exec_cmd("%s -assume-filename=%s" % \
- (os.path.join(script_dir, clang_format_exe), file_name), \
- root_dir, file_contents)
+ result = exec_cmd("%s -assume-filename=%s" %
+ (os.path.join(script_dir, clang_format_exe),
+ file_name), root_dir, file_contents.encode('utf-8'))
if result['err'] != '':
- print "clang-format error: %s" % result['err']
+ print("clang-format error: %s" % result['err'])
if result['out'] != '':
output = result['out']
if sys.platform == 'win32':
diff --git a/tools/date_util.py b/tools/date_util.py
index 2cde329..4c17620 100644
--- a/tools/date_util.py
+++ b/tools/date_util.py
@@ -2,6 +2,7 @@
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
+from __future__ import absolute_import
import datetime
diff --git a/tools/exec_util.py b/tools/exec_util.py
index ceabc5b..a6c2347 100644
--- a/tools/exec_util.py
+++ b/tools/exec_util.py
@@ -2,6 +2,7 @@
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file
+from __future__ import absolute_import
from subprocess import Popen, PIPE
import sys
@@ -10,6 +11,7 @@ def exec_cmd(cmd, path, input_string=None):
""" Execute the specified command and return the result. """
out = ''
err = ''
+ ret = -1
parts = cmd.split()
try:
if input_string is None:
@@ -20,6 +22,7 @@ def exec_cmd(cmd, path, input_string=None):
stderr=PIPE,
shell=(sys.platform == 'win32'))
out, err = process.communicate()
+ ret = process.returncode
else:
process = Popen(
parts,
@@ -29,8 +32,10 @@ def exec_cmd(cmd, path, input_string=None):
stderr=PIPE,
shell=(sys.platform == 'win32'))
out, err = process.communicate(input=input_string)
- except IOError, (errno, strerror):
+ ret = process.returncode
+ except IOError as e:
+ (errno, strerror) = e.args
raise
except:
raise
- return {'out': out, 'err': err}
+ return {'out': out.decode('utf-8'), 'err': err.decode('utf-8'), 'ret': ret}
diff --git a/tools/file_util.py b/tools/file_util.py
index 12e12db..0741f6f 100644
--- a/tools/file_util.py
+++ b/tools/file_util.py
@@ -2,7 +2,9 @@
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
+from __future__ import absolute_import
from glob import iglob
+from io import open
import os
import shutil
import sys
@@ -12,31 +14,32 @@ import time
def read_file(name, normalize=True):
""" Read a file. """
try:
- f = open(name, 'r')
- # read the data
- data = f.read()
- if normalize:
- # normalize line endings
- data = data.replace("\r\n", "\n")
- return data
- except IOError, (errno, strerror):
+ with open(name, 'r', encoding='utf-8') as f:
+ # read the data
+ data = f.read()
+ if normalize:
+ # normalize line endings
+ data = data.replace("\r\n", "\n")
+ return data
+ except IOError as e:
+ (errno, strerror) = e.args
sys.stderr.write('Failed to read file ' + name + ': ' + strerror)
raise
- else:
- f.close()
def write_file(name, data):
""" Write a file. """
try:
- f = open(name, 'w')
- # write the data
- f.write(data)
- except IOError, (errno, strerror):
+ with open(name, 'w', encoding='utf-8') as f:
+ # write the data
+ if sys.version_info.major == 2:
+ f.write(data.decode('utf-8'))
+ else:
+ f.write(data)
+ except IOError as e:
+ (errno, strerror) = e.args
sys.stderr.write('Failed to write file ' + name + ': ' + strerror)
raise
- else:
- f.close()
def path_exists(name):
@@ -52,10 +55,11 @@ def backup_file(name):
def copy_file(src, dst, quiet=True):
""" Copy a file. """
try:
- shutil.copy(src, dst)
+ shutil.copy2(src, dst)
if not quiet:
sys.stdout.write('Transferring ' + src + ' file.\n')
- except IOError, (errno, strerror):
+ except IOError as e:
+ (errno, strerror) = e.args
sys.stderr.write('Failed to copy file from ' + src + ' to ' + dst + ': ' +
strerror)
raise
@@ -67,7 +71,8 @@ def move_file(src, dst, quiet=True):
shutil.move(src, dst)
if not quiet:
sys.stdout.write('Moving ' + src + ' file.\n')
- except IOError, (errno, strerror):
+ except IOError as e:
+ (errno, strerror) = e.args
sys.stderr.write('Failed to move file from ' + src + ' to ' + dst + ': ' +
strerror)
raise
@@ -90,7 +95,8 @@ def remove_file(name, quiet=True):
os.remove(name)
if not quiet:
sys.stdout.write('Removing ' + name + ' file.\n')
- except IOError, (errno, strerror):
+ except IOError as e:
+ (errno, strerror) = e.args
sys.stderr.write('Failed to remove file ' + name + ': ' + strerror)
raise
@@ -102,7 +108,8 @@ def copy_dir(src, dst, quiet=True):
shutil.copytree(src, dst)
if not quiet:
sys.stdout.write('Transferring ' + src + ' directory.\n')
- except IOError, (errno, strerror):
+ except IOError as e:
+ (errno, strerror) = e.args
sys.stderr.write('Failed to copy directory from ' + src + ' to ' + dst +
': ' + strerror)
raise
@@ -115,7 +122,8 @@ def remove_dir(name, quiet=True):
shutil.rmtree(name)
if not quiet:
sys.stdout.write('Removing ' + name + ' directory.\n')
- except IOError, (errno, strerror):
+ except IOError as e:
+ (errno, strerror) = e.args
sys.stderr.write('Failed to remove directory ' + name + ': ' + strerror)
raise
@@ -127,7 +135,8 @@ def make_dir(name, quiet=True):
if not quiet:
sys.stdout.write('Creating ' + name + ' directory.\n')
os.makedirs(name)
- except IOError, (errno, strerror):
+ except IOError as e:
+ (errno, strerror) = e.args
sys.stderr.write('Failed to create directory ' + name + ': ' + strerror)
raise
diff --git a/tools/fix_style.py b/tools/fix_style.py
index b610c0d..e0debae 100644
--- a/tools/fix_style.py
+++ b/tools/fix_style.py
@@ -3,6 +3,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+from __future__ import absolute_import
+from __future__ import print_function
import os, re, sys
from clang_util import clang_format
from file_util import eval_file, get_files, read_file, write_file
@@ -34,7 +36,7 @@ def msg(filename, status):
filename = filename[pos:]
filename = "..." + filename
- print "%-60s %s" % (filename, status)
+ print("%-60s %s" % (filename, status))
updatect = 0
@@ -124,15 +126,15 @@ def fix_style(filenames, white_list=None, black_list=None):
if __name__ == "__main__":
if len(sys.argv) == 1:
- print "Usage: %s [file-path|git-hash|unstaged|staged] ..." % sys.argv[0]
- print "\n Format C, C++ and ObjC files using Chromium's clang-format style."
- print "\nOptions:"
- print " file-path\tProcess the specified file or directory."
- print " \t\tDirectories will be processed recursively."
- print " \t\tThe \"*\" wildcard character is supported."
- print " git-hash\tProcess all files changed in the specified Git commit."
- print " unstaged\tProcess all unstaged files in the Git repo."
- print " staged\t\tProcess all staged files in the Git repo."
+ print("Usage: %s [file-path|git-hash|unstaged|staged] ...\n" % sys.argv[0])
+ print(" Format C, C++ and ObjC files using Chromium's clang-format style.")
+ print("\nOptions:")
+ print(" file-path\tProcess the specified file or directory.")
+ print(" \t\tDirectories will be processed recursively.")
+ print(" \t\tThe \"*\" wildcard character is supported.")
+ print(" git-hash\tProcess all files changed in the specified Git commit.")
+ print(" unstaged\tProcess all unstaged files in the Git repo.")
+ print(" staged\t\tProcess all staged files in the Git repo.")
sys.exit(1)
# Read the configuration file.
@@ -140,4 +142,4 @@ if __name__ == "__main__":
# Process anything passed on the command-line.
fix_style(sys.argv[1:])
- print 'Done - Wrote %d files.' % updatect
+ print('Done - Wrote %d files.' % updatect)
diff --git a/tools/git_util.py b/tools/git_util.py
index f0313e3..51935e3 100644
--- a/tools/git_util.py
+++ b/tools/git_util.py
@@ -2,6 +2,7 @@
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file
+from __future__ import absolute_import
from exec_util import exec_cmd
import os
import sys
@@ -84,27 +85,29 @@ def git_apply_patch_file(patch_path, patch_dir):
if sys.platform == 'win32':
# Convert the patch to Unix line endings. This is necessary to avoid
# whitespace errors with git apply.
- patch_string = patch_string.replace('\r\n', '\n')
+ patch_string = patch_string.replace(b'\r\n', b'\n')
# Git apply fails silently if not run relative to a respository root.
if not is_checkout(patch_dir):
sys.stdout.write('... patch directory is not a repository root.\n')
return 'fail'
+ config = '-p0 --ignore-whitespace'
+
# Output patch contents.
- cmd = '%s apply -p0 --numstat' % git_exe
+ cmd = '%s apply %s --numstat' % (git_exe, config)
result = exec_cmd(cmd, patch_dir, patch_string)
write_indented_output(result['out'].replace('<stdin>', patch_name))
# Reverse check to see if the patch has already been applied.
- cmd = '%s apply -p0 --reverse --check' % git_exe
+ cmd = '%s apply %s --reverse --check' % (git_exe, config)
result = exec_cmd(cmd, patch_dir, patch_string)
if result['err'].find('error:') < 0:
sys.stdout.write('... already applied (skipping).\n')
return 'skip'
# Normal check to see if the patch can be applied cleanly.
- cmd = '%s apply -p0 --check' % git_exe
+ cmd = '%s apply %s --check' % (git_exe, config)
result = exec_cmd(cmd, patch_dir, patch_string)
if result['err'].find('error:') >= 0:
sys.stdout.write('... failed to apply:\n')
@@ -113,7 +116,7 @@ def git_apply_patch_file(patch_path, patch_dir):
# Apply the patch file. This should always succeed because the previous
# command succeeded.
- cmd = '%s apply -p0' % git_exe
+ cmd = '%s apply %s' % (git_exe, config)
result = exec_cmd(cmd, patch_dir, patch_string)
if result['err'] == '':
sys.stdout.write('... successfully applied.\n')
diff --git a/tools/make_readme.py b/tools/make_readme.py
index ef3bfb1..ab81fda 100644
--- a/tools/make_readme.py
+++ b/tools/make_readme.py
@@ -2,6 +2,8 @@
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
+from __future__ import absolute_import
+from __future__ import print_function
from date_util import *
from file_util import *
from optparse import OptionParser
@@ -109,7 +111,7 @@ output_dir = options.outputdir
platform = options.platform
if (platform != 'linux32' and platform != 'linux64' and
platform != 'macosx64' and platform != 'win32' and platform != 'win64'):
- print 'Unsupported target \"' + platform + '\"'
+ print('Unsupported target \"' + platform + '\"')
sys.exit(1)
# script directory
@@ -130,8 +132,8 @@ jcef_commit_number = git.get_commit_number(jcef_dir)
jcef_commit_hash = git.get_hash(jcef_dir)
jcef_url = git.get_url(jcef_dir)
jcef_ver = '%s.%s.%s.%s+g%s' % (args['CEF_MAJOR'], args['CEF_MINOR'],
- args['CEF_PATCH'], jcef_commit_number,
- jcef_commit_hash[:7])
+ args['CEF_PATCH'], jcef_commit_number,
+ jcef_commit_hash[:7])
date = get_date()
diff --git a/tools/make_version_header.py b/tools/make_version_header.py
index 7f2c7b4..a0186d9 100644
--- a/tools/make_version_header.py
+++ b/tools/make_version_header.py
@@ -2,6 +2,7 @@
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
+from __future__ import absolute_import
from date_util import *
from file_util import *
import git_util as git
@@ -72,8 +73,9 @@ def write_svn_header(header):
args = {}
read_readme_file(os.path.join(options.cefpath, 'README.txt'), args)
- version = '%s.%s.%s.%s+g%s' % (args['CEF_MAJOR'], args['CEF_MINOR'], args['CEF_PATCH'],
- commit_number, commit_hash[:7])
+ version = '%s.%s.%s.%s+g%s' % (args['CEF_MAJOR'], args['CEF_MINOR'],
+ args['CEF_PATCH'], commit_number,
+ commit_hash[:7])
newcontents = '// Copyright (c) '+year+' The Chromium Embedded Framework Authors. All rights\n'+\
'// reserved. Use of this source code is governed by a BSD-style license that\n'+\
diff --git a/tools/readme_util.py b/tools/readme_util.py
index dc01966..4c3f023 100644
--- a/tools/readme_util.py
+++ b/tools/readme_util.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
from file_util import read_file
@@ -9,8 +10,8 @@ def read_readme_file(file, args):
if len(parts) != 2:
continue
if parts[0].startswith('CEF Version'):
- args['CEF_VER'] = parts[1].strip();
- subparts = parts[1].split('+');
+ args['CEF_VER'] = parts[1].strip()
+ subparts = parts[1].split('+')
if len(subparts) != 3:
raise Exception('Failed to parse CEF Version: %s' % parts[1])
verparts = subparts[0].strip().split('.')
diff --git a/tools/yapf_util.py b/tools/yapf_util.py
index 9618a3f..c5add7e 100644
--- a/tools/yapf_util.py
+++ b/tools/yapf_util.py
@@ -2,6 +2,8 @@
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file
+from __future__ import absolute_import
+from __future__ import print_function
from exec_util import exec_cmd
import os
import sys
@@ -14,9 +16,9 @@ root_dir = os.path.join(script_dir, os.pardir)
def yapf_format(file_name, file_contents):
# Reads .style.yapf in the root_dir when specifying contents via stdin.
result = exec_cmd("%s %s/yapf" % (sys.executable, script_dir), root_dir,
- file_contents)
+ file_contents.encode('utf-8'))
if result['err'] != '':
- print "yapf error: %s" % result['err']
+ print("yapf error: %s" % result['err'])
if result['out'] != '':
output = result['out']
if sys.platform == 'win32':