aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-11-25 04:05:28 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-11-25 04:05:28 +0000
commitf9fcaf5ff59b4777390989050322121184b1b26b (patch)
tree92df91ba2e8af1e8571f88635559b6012f9dfa4c
parent256349dc49686c941c4c53c5512fc29f6299cbf3 (diff)
parent0f54b3300ea7902266ae501b604e0d916a6d8f0c (diff)
downloadrepohooks-androidx-core-core-role-release.tar.gz
Snap for 7944853 from 0f54b3300ea7902266ae501b604e0d916a6d8f0c to androidx-core-core-role-releaseandroidx-core-core-role-release
Change-Id: I73f7cc95568832f8c646ba012ba223b66e9a8cd3
-rw-r--r--PREUPLOAD.cfg19
-rw-r--r--README.md63
-rwxr-xr-xpre-upload.py89
-rw-r--r--rh/__init__.py3
-rw-r--r--rh/config.py252
-rwxr-xr-xrh/config_test.py3
-rwxr-xr-xrh/config_unittest.py105
-rw-r--r--rh/git.py9
-rw-r--r--rh/hooks.py286
-rwxr-xr-xrh/hooks_unittest.py254
-rw-r--r--rh/results.py3
-rw-r--r--rh/shell.py8
-rwxr-xr-xrh/shell_unittest.py3
-rw-r--r--rh/signals.py3
-rw-r--r--rh/sixish.py69
-rw-r--r--rh/terminal.py3
-rw-r--r--rh/utils.py156
-rwxr-xr-xrh/utils_unittest.py101
-rwxr-xr-xtools/android_test_mapping_format.py159
-rwxr-xr-xtools/android_test_mapping_format_unittest.py123
-rwxr-xr-xtools/clang-format.py5
-rwxr-xr-xtools/cpplint.py1069
-rwxr-xr-xtools/cpplint.py-update12
-rwxr-xr-xtools/google-java-format.py5
-rwxr-xr-xtools/pylint.py60
25 files changed, 2016 insertions, 846 deletions
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 03350c7..81f505d 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,21 +1,14 @@
[Hook Scripts]
# Only list fast unittests here.
-py2_config_unittest = python2 ./rh/config_unittest.py
-py3_config_unittest = python3 ./rh/config_unittest.py
-py2_hooks_unittest = python2 ./rh/hooks_unittest.py
-py3_hooks_unittest = python3 ./rh/hooks_unittest.py
-py2_shell_unittest = python2 ./rh/shell_unittest.py
-py3_shell_unittest = python3 ./rh/shell_unittest.py
-py2_utils_unittest = python2 ./rh/utils_unittest.py
-py3_utils_unittest = python3 ./rh/utils_unittest.py
-py2_android_test_mapping_format_unittest = python2 ./tools/android_test_mapping_format_unittest.py
-py3_android_test_mapping_format_unittest = python3 ./tools/android_test_mapping_format_unittest.py
-py2_config_test = python2 ./rh/config_test.py --check-env --commit-id ${PREUPLOAD_COMMIT} --commit-msg ${PREUPLOAD_COMMIT_MESSAGE} --repo-root ${REPO_ROOT} -- ${PREUPLOAD_FILES}
-py3_config_test = python3 ./rh/config_test.py --check-env --commit-id ${PREUPLOAD_COMMIT} --commit-msg ${PREUPLOAD_COMMIT_MESSAGE} --repo-root ${REPO_ROOT} -- ${PREUPLOAD_FILES}
+config_unittest = ./rh/config_unittest.py
+hooks_unittest = ./rh/hooks_unittest.py
+shell_unittest = ./rh/shell_unittest.py
+utils_unittest = ./rh/utils_unittest.py
+android_test_mapping_format_unittest = ./tools/android_test_mapping_format_unittest.py
+config_test = ./rh/config_test.py --check-env --commit-id ${PREUPLOAD_COMMIT} --commit-msg ${PREUPLOAD_COMMIT_MESSAGE} --repo-root ${REPO_ROOT} -- ${PREUPLOAD_FILES}
[Builtin Hooks]
commit_msg_bug_field = true
commit_msg_changeid_field = true
commit_msg_test_field = true
-pylint2 = true
pylint3 = true
diff --git a/README.md b/README.md
index 77f4434..1954d0a 100644
--- a/README.md
+++ b/README.md
@@ -103,6 +103,10 @@ such will be expanded correctly via argument positions, so do not try to
force your own quote handling.
* `${PREUPLOAD_FILES}`: List of files to operate on.
+* `${PREUPLOAD_FILES_PREFIXED}`: A list of files to operate on.
+ Any string preceding/attached to the keyword ${PREUPLOAD_FILES_PREFIXED}
+ will be repeated for each file automatically. If no string is preceding/attached
+ to the keyword, the previous argument will be repeated before each file.
* `${PREUPLOAD_COMMIT}`: Commit hash.
* `${PREUPLOAD_COMMIT_MESSAGE}`: Commit message.
@@ -113,6 +117,22 @@ are automatically expanded for you:
* `${BUILD_OS}`: The string `darwin-x86` for macOS and the string `linux-x86`
for Linux/x86.
+### Examples
+
+Here are some examples of using the placeholders.
+Consider this sample config file.
+```
+[Hook Scripts]
+lister = ls ${PREUPLOAD_FILES}
+checker prefix = check --file=${PREUPLOAD_FILES_PREFIXED}
+checker flag = check --file ${PREUPLOAD_FILES_PREFIXED}
+```
+With a commit that changes `path1/file1` and `path2/file2`, then this will run
+programs with the arguments:
+* ['ls', 'path1/file1', 'path2/file2']
+* ['check', '--file=path1/file1', '--file=path2/file2']
+* ['check', '--file', 'path1/file1', '--file', 'path2/file2']
+
## [Options]
This section allows for setting options that affect the overall behavior of the
@@ -150,6 +170,9 @@ some dog = tool --no-cat-in-commit-message ${PREUPLOAD_COMMIT_MESSAGE}
This section allows for turning on common/builtin hooks. There are a bunch of
canned hooks already included geared towards AOSP style guidelines.
+* `aidl_format`: Run AIDL files (.aidl) through `aidl-format`.
+* `android_test_mapping_format`: Validate TEST_MAPPING files in Android source
+ code. Refer to go/test-mapping for more details.
* `bpfmt`: Run Blueprint files (.bp) through `bpfmt`.
* `checkpatch`: Run commits through the Linux kernel's `checkpatch.pl` script.
* `clang_format`: Run git-clang-format against the commit. The default style is
@@ -161,6 +184,9 @@ canned hooks already included geared towards AOSP style guidelines.
* `commit_msg_relnote_field_format`: Check for possible misspellings of the
`Relnote:` field and that multiline release notes are properly formatted with
quotes.
+* `commit_msg_relnote_for_current_txt`: Check that CLs with changes to
+ current.txt or public_plus_experimental_current.txt also contain a
+ `Relnote:` field in the commit message.
* `commit_msg_test_field`: Require a `Test:` line.
* `cpplint`: Run through the cpplint tool (for C++ code).
* `gofmt`: Run Go code through `gofmt`.
@@ -170,9 +196,8 @@ canned hooks already included geared towards AOSP style guidelines.
* `pylint`: Alias of `pylint2`. Will change to `pylint3` by end of 2019.
* `pylint2`: Run Python code through `pylint` using Python 2.
* `pylint3`: Run Python code through `pylint` using Python 3.
+* `rustfmt`: Run Rust code through `rustfmt`.
* `xmllint`: Run XML code through `xmllint`.
-* `android_test_mapping_format`: Validate TEST_MAPPING files in Android source
- code. Refer to go/test-mapping for more details.
Note: Builtin hooks tend to match specific filenames (e.g. `.json`). If no
files match in a specific commit, then the hook will be skipped for that commit.
@@ -203,6 +228,34 @@ See [Placeholders](#Placeholders) for variables you can expand automatically.
cpplint = --filter=-x ${PREUPLOAD_FILES}
```
+## [Builtin Hooks Exclude Paths]
+
+*** note
+This section can only be added to the repo project-wide settings
+[GLOBAL-PREUPLOAD.cfg].
+***
+
+Used to explicitly exclude some projects when processing a hook. With this
+section, it is possible to define a hook that should apply to the majority of
+projects except a few.
+
+An entry must completely match the project's `REPO_PATH`. The paths can use the
+[shell-style wildcards](https://docs.python.org/library/fnmatch.html) and
+quotes. For advanced cases, it is possible to use a [regular
+expression](https://docs.python.org/howto/regex.html) by using the `^` prefix.
+
+```
+[Builtin Hooks Exclude Paths]
+# Run cpplint on all projects except ones under external/ and vendor/.
+# The "external" and "vendor" projects, if they exist, will still run cpplint.
+cpplint = external/* vendor/*
+
+# Run rustfmt on all projects except ones under external/. All projects under
+# hardware/ will be excluded except for ones starting with hardware/google (due to
+# the negative regex match).
+rustfmt = external/ ^hardware/(!?google)
+```
+
## [Tool Paths]
Some builtin hooks need to call external executables to work correctly. By
@@ -211,6 +264,9 @@ executables can be overridden through `[Tool Paths]`. This is helpful to
provide consistent behavior for developers across different OS and Linux
distros/versions. The following tools are recognized:
+* `aidl-format`: used for the `aidl_format` builtin hook.
+* `android-test-mapping-format`: used for the `android_test_mapping_format`
+ builtin hook.
* `bpfmt`: used for the `bpfmt` builtin hook.
* `clang-format`: used for the `clang_format` builtin hook.
* `cpplint`: used for the `cpplint` builtin hook.
@@ -219,8 +275,7 @@ distros/versions. The following tools are recognized:
* `google-java-format`: used for the `google_java_format` builtin hook.
* `google-java-format-diff`: used for the `google_java_format` builtin hook.
* `pylint`: used for the `pylint` builtin hook.
-* `android-test-mapping-format`: used for the `android_test_mapping_format`
- builtin hook.
+* `rustfmt`: used for the `rustfmt` builtin hook.
See [Placeholders](#Placeholders) for variables you can expand automatically.
diff --git a/pre-upload.py b/pre-upload.py
index 53b5ffb..7eb11b8 100755
--- a/pre-upload.py
+++ b/pre-upload.py
@@ -1,5 +1,4 @@
-#!/usr/bin/python
-# -*- coding:utf-8 -*-
+#!/usr/bin/env python3
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,8 +19,6 @@ Normally this is loaded indirectly by repo itself, but it can be run directly
when developing.
"""
-from __future__ import print_function
-
import argparse
import datetime
import os
@@ -29,19 +26,9 @@ import sys
# Assert some minimum Python versions as we don't test or support any others.
-# We only support Python 2.7, and require 2.7.5+/3.4+ to include signal fix:
-# https://bugs.python.org/issue14173
-if sys.version_info < (2, 7, 5):
- print('repohooks: error: Python-2.7.5+ is required', file=sys.stderr)
- sys.exit(1)
-elif sys.version_info.major == 3 and sys.version_info < (3, 4):
- # We don't actually test <Python-3.6. Hope for the best!
- print('repohooks: error: Python-3.4+ is required', file=sys.stderr)
+if sys.version_info < (3, 6):
+ print('repohooks: error: Python-3.6+ is required', file=sys.stderr)
sys.exit(1)
-elif sys.version_info.major == 3 and sys.version_info < (3, 6):
- # We want to get people off of old versions of Python.
- print('repohooks: warning: Python-3.6+ is going to be required; '
- 'please upgrade soon to maintain support.', file=sys.stderr)
_path = os.path.dirname(os.path.realpath(__file__))
@@ -57,7 +44,6 @@ import rh.results
import rh.config
import rh.git
import rh.hooks
-import rh.sixish
import rh.terminal
import rh.utils
@@ -76,6 +62,9 @@ class Output(object):
FAILED = COLOR.color(COLOR.RED, 'FAILED')
WARNING = COLOR.color(COLOR.YELLOW, 'WARNING')
+ # How long a hook is allowed to run before we warn that it is "too slow".
+ _SLOW_HOOK_DURATION = datetime.timedelta(seconds=30)
+
def __init__(self, project_name):
"""Create a new Output object for a specified project.
@@ -87,6 +76,8 @@ class Output(object):
self.hook_index = 0
self.success = True
self.start_time = datetime.datetime.now()
+ self.hook_start_time = None
+ self._curr_hook_name = None
def set_num_hooks(self, num_hooks):
"""Keep track of how many hooks we'll be running.
@@ -113,28 +104,38 @@ class Output(object):
Args:
hook_name: name of the hook.
"""
+ self._curr_hook_name = hook_name
+ self.hook_start_time = datetime.datetime.now()
status_line = '[%s %d/%d] %s' % (self.RUNNING, self.hook_index,
self.num_hooks, hook_name)
self.hook_index += 1
rh.terminal.print_status_line(status_line)
- def hook_error(self, hook_name, error):
+ def hook_finish(self):
+ """Finish processing any per-hook state."""
+ duration = datetime.datetime.now() - self.hook_start_time
+ if duration >= self._SLOW_HOOK_DURATION:
+ self.hook_warning(
+ 'This hook took %s to finish which is fairly slow for '
+ 'developers.\nPlease consider moving the check to the '
+ 'server/CI system instead.' %
+ (rh.utils.timedelta_str(duration),))
+
+ def hook_error(self, error):
"""Print an error for a single hook.
Args:
- hook_name: name of the hook.
error: error string.
"""
- self.error(hook_name, error)
+ self.error(self._curr_hook_name, error)
- def hook_warning(self, hook_name, warning):
+ def hook_warning(self, warning):
"""Print a warning for a single hook.
Args:
- hook_name: name of the hook.
warning: warning string.
"""
- status_line = '[%s] %s' % (self.WARNING, hook_name)
+ status_line = '[%s] %s' % (self.WARNING, self._curr_hook_name)
rh.terminal.print_status_line(status_line, print_newline=True)
print(warning, file=sys.stderr)
@@ -173,6 +174,11 @@ def _process_hook_results(results):
if not results:
return (None, None)
+ # We track these as dedicated fields in case a hook doesn't output anything.
+ # We want to treat silent non-zero exits as failures too.
+ has_error = False
+ has_warning = False
+
error_ret = ''
warning_ret = ''
for result in results:
@@ -183,11 +189,14 @@ def _process_hook_results(results):
lines = result.error.splitlines()
ret += '\n'.join(' %s' % (x,) for x in lines)
if result.is_warning():
+ has_warning = True
warning_ret += ret
else:
+ has_error = True
error_ret += ret
- return (error_ret or None, warning_ret or None)
+ return (error_ret if has_error else None,
+ warning_ret if has_warning else None)
def _get_project_config():
@@ -205,7 +214,7 @@ def _get_project_config():
# Load the config for this git repo.
'.',
)
- return rh.config.PreUploadConfig(paths=paths, global_paths=global_paths)
+ return rh.config.PreUploadSettings(paths=paths, global_paths=global_paths)
def _attempt_fixes(fixup_func_list, commit_list):
@@ -276,16 +285,17 @@ def _run_project_hooks_in_cwd(project_name, proj_dir, output, commit_list=None):
(e,))
return False
+ project = rh.Project(name=project_name, dir=proj_dir, remote=remote)
+ rel_proj_dir = os.path.relpath(proj_dir, rh.git.find_repo_root())
+
os.environ.update({
'REPO_LREV': rh.git.get_commit_for_ref(upstream_branch),
- 'REPO_PATH': os.path.relpath(proj_dir, rh.git.find_repo_root()),
+ 'REPO_PATH': rel_proj_dir,
'REPO_PROJECT': project_name,
'REPO_REMOTE': remote,
'REPO_RREV': rh.git.get_remote_revision(upstream_branch, remote),
})
- project = rh.Project(name=project_name, dir=proj_dir, remote=remote)
-
if not commit_list:
commit_list = rh.git.get_commits(
ignore_merged_commits=config.ignore_merged_commits)
@@ -298,21 +308,24 @@ def _run_project_hooks_in_cwd(project_name, proj_dir, output, commit_list=None):
os.environ['PREUPLOAD_COMMIT'] = commit
diff = rh.git.get_affected_files(commit)
desc = rh.git.get_commit_desc(commit)
- rh.sixish.setenv('PREUPLOAD_COMMIT_MESSAGE', desc)
+ os.environ['PREUPLOAD_COMMIT_MESSAGE'] = desc
commit_summary = desc.split('\n', 1)[0]
output.commit_start(commit=commit, commit_summary=commit_summary)
- for name, hook in hooks:
+ for name, hook, exclusion_scope in hooks:
output.hook_start(name)
+ if rel_proj_dir in exclusion_scope:
+ break
hook_results = hook(project, commit, desc, diff)
+ output.hook_finish()
(error, warning) = _process_hook_results(hook_results)
- if error or warning:
- if warning:
- output.hook_warning(name, warning)
- if error:
+ if error is not None or warning is not None:
+ if warning is not None:
+ output.hook_warning(warning)
+ if error is not None:
ret = False
- output.hook_error(name, error)
+ output.hook_error(error)
for result in hook_results:
if result.fixup_func:
fixup_func_list.append((name, commit,
@@ -411,8 +424,7 @@ def _identify_project(path):
a blank string upon failure.
"""
cmd = ['repo', 'forall', '.', '-c', 'echo ${REPO_PROJECT}']
- return rh.utils.run(cmd, capture_output=True, redirect_stderr=True,
- cwd=path).stdout.strip()
+ return rh.utils.run(cmd, capture_output=True, cwd=path).stdout.strip()
def direct_main(argv):
@@ -444,8 +456,7 @@ def direct_main(argv):
# project from CWD.
if opts.dir is None:
cmd = ['git', 'rev-parse', '--git-dir']
- git_dir = rh.utils.run(cmd, capture_output=True,
- redirect_stderr=True).stdout.strip()
+ git_dir = rh.utils.run(cmd, capture_output=True).stdout.strip()
if not git_dir:
parser.error('The current directory is not part of a git project.')
opts.dir = os.path.dirname(os.path.abspath(git_dir))
diff --git a/rh/__init__.py b/rh/__init__.py
index c36cb89..9050fb6 100644
--- a/rh/__init__.py
+++ b/rh/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
"""Common repohook objects/constants."""
-from __future__ import print_function
-
import collections
diff --git a/rh/config.py b/rh/config.py
index 6e96fc7..1eb93a7 100644
--- a/rh/config.py
+++ b/rh/config.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,9 +14,9 @@
"""Manage various config files."""
-from __future__ import print_function
-
+import configparser
import functools
+import itertools
import os
import shlex
import sys
@@ -30,7 +29,6 @@ del _path
# pylint: disable=wrong-import-position
import rh.hooks
import rh.shell
-from rh.sixish import configparser
class Error(Exception):
@@ -41,99 +39,76 @@ class ValidationError(Error):
"""Config file has unknown sections/keys or other values."""
+# Sentinel so we can handle None-vs-unspecified.
+_UNSET = object()
+
+
class RawConfigParser(configparser.RawConfigParser):
"""Like RawConfigParser but with some default helpers."""
- @staticmethod
- def _check_args(name, cnt_min, cnt_max, args):
- cnt = len(args)
- if cnt not in (0, cnt_max - cnt_min):
- raise TypeError('%s() takes %i or %i arguments (got %i)' %
- (name, cnt_min, cnt_max, cnt,))
- return cnt
-
- # pylint can't seem to grok our use of *args here.
+ # pylint doesn't like it when we extend the API.
# pylint: disable=arguments-differ
- def options(self, section, *args):
- """Return the options in |section| (with default |args|).
+ def options(self, section, default=_UNSET):
+ """Return the options in |section|.
Args:
section: The section to look up.
- args: What to return if |section| does not exist.
+ default: What to return if |section| does not exist.
"""
- cnt = self._check_args('options', 2, 3, args)
try:
return configparser.RawConfigParser.options(self, section)
except configparser.NoSectionError:
- if cnt == 1:
- return args[0]
+ if default is not _UNSET:
+ return default
raise
- def get(self, section, option, *args):
- """Return the value for |option| in |section| (with default |args|)."""
- cnt = self._check_args('get', 3, 4, args)
- try:
- return configparser.RawConfigParser.get(self, section, option)
- except (configparser.NoSectionError, configparser.NoOptionError):
- if cnt == 1:
- return args[0]
- raise
-
- def items(self, section, *args):
+ def items(self, section=_UNSET, default=_UNSET):
"""Return a list of (key, value) tuples for the options in |section|."""
- cnt = self._check_args('items', 2, 3, args)
+ if section is _UNSET:
+ return super().items()
+
try:
return configparser.RawConfigParser.items(self, section)
except configparser.NoSectionError:
- if cnt == 1:
- return args[0]
+ if default is not _UNSET:
+ return default
raise
class PreUploadConfig(object):
- """Config file used for per-project `repo upload` hooks."""
-
- FILENAME = 'PREUPLOAD.cfg'
- GLOBAL_FILENAME = 'GLOBAL-PREUPLOAD.cfg'
+ """A single (abstract) config used for `repo upload` hooks."""
CUSTOM_HOOKS_SECTION = 'Hook Scripts'
BUILTIN_HOOKS_SECTION = 'Builtin Hooks'
BUILTIN_HOOKS_OPTIONS_SECTION = 'Builtin Hooks Options'
+ BUILTIN_HOOKS_EXCLUDE_SECTION = 'Builtin Hooks Exclude Paths'
TOOL_PATHS_SECTION = 'Tool Paths'
OPTIONS_SECTION = 'Options'
+ VALID_SECTIONS = {
+ CUSTOM_HOOKS_SECTION,
+ BUILTIN_HOOKS_SECTION,
+ BUILTIN_HOOKS_OPTIONS_SECTION,
+ BUILTIN_HOOKS_EXCLUDE_SECTION,
+ TOOL_PATHS_SECTION,
+ OPTIONS_SECTION,
+ }
OPTION_IGNORE_MERGED_COMMITS = 'ignore_merged_commits'
- VALID_OPTIONS = (OPTION_IGNORE_MERGED_COMMITS,)
+ VALID_OPTIONS = {OPTION_IGNORE_MERGED_COMMITS}
- def __init__(self, paths=('',), global_paths=()):
+ def __init__(self, config=None, source=None):
"""Initialize.
- All the config files found will be merged together in order.
-
Args:
- paths: The directories to look for config files.
- global_paths: The directories to look for global config files.
+ config: A configparse.ConfigParser instance.
+ source: Where this config came from. This is used in error messages to
+ facilitate debugging. It is not necessarily a valid path.
"""
- config = RawConfigParser()
-
- def _search(paths, filename):
- for path in paths:
- path = os.path.join(path, filename)
- if os.path.exists(path):
- self.paths.append(path)
- try:
- config.read(path)
- except configparser.ParsingError as e:
- raise ValidationError('%s: %s' % (path, e))
-
- self.paths = []
- _search(global_paths, self.GLOBAL_FILENAME)
- _search(paths, self.FILENAME)
-
- self.config = config
-
- self._validate()
+ self.config = config if config else RawConfigParser()
+ self.source = source
+ if config:
+ self._validate()
@property
def custom_hooks(self):
@@ -142,7 +117,8 @@ class PreUploadConfig(object):
def custom_hook(self, hook):
"""The command to execute for |hook|."""
- return shlex.split(self.config.get(self.CUSTOM_HOOKS_SECTION, hook, ''))
+ return shlex.split(self.config.get(
+ self.CUSTOM_HOOKS_SECTION, hook, fallback=''))
@property
def builtin_hooks(self):
@@ -152,8 +128,13 @@ class PreUploadConfig(object):
def builtin_hook_option(self, hook):
"""The options to pass to |hook|."""
- return shlex.split(self.config.get(self.BUILTIN_HOOKS_OPTIONS_SECTION,
- hook, ''))
+ return shlex.split(self.config.get(
+ self.BUILTIN_HOOKS_OPTIONS_SECTION, hook, fallback=''))
+
+ def builtin_hook_exclude_paths(self, hook):
+ """List of paths for which |hook| should not be executed."""
+ return shlex.split(self.config.get(
+ self.BUILTIN_HOOKS_EXCLUDE_SECTION, hook, fallback=''))
@property
def tool_paths(self):
@@ -161,51 +142,52 @@ class PreUploadConfig(object):
return dict(self.config.items(self.TOOL_PATHS_SECTION, ()))
def callable_hooks(self):
- """Yield a name and callback for each hook to be executed."""
+ """Yield a CallableHook for each hook to be executed."""
+ scope = rh.hooks.ExclusionScope([])
for hook in self.custom_hooks:
options = rh.hooks.HookOptions(hook,
self.custom_hook(hook),
self.tool_paths)
- yield (hook, functools.partial(rh.hooks.check_custom,
- options=options))
+ func = functools.partial(rh.hooks.check_custom, options=options)
+ yield rh.hooks.CallableHook(hook, func, scope)
for hook in self.builtin_hooks:
options = rh.hooks.HookOptions(hook,
self.builtin_hook_option(hook),
self.tool_paths)
- yield (hook, functools.partial(rh.hooks.BUILTIN_HOOKS[hook],
- options=options))
+ func = functools.partial(rh.hooks.BUILTIN_HOOKS[hook],
+ options=options)
+ scope = rh.hooks.ExclusionScope(
+ self.builtin_hook_exclude_paths(hook))
+ yield rh.hooks.CallableHook(hook, func, scope)
@property
def ignore_merged_commits(self):
"""Whether to skip hooks for merged commits."""
return rh.shell.boolean_shell_value(
self.config.get(self.OPTIONS_SECTION,
- self.OPTION_IGNORE_MERGED_COMMITS, None),
+ self.OPTION_IGNORE_MERGED_COMMITS, fallback=None),
False)
+ def update(self, preupload_config):
+ """Merge settings from |preupload_config| into ourself."""
+ self.config.read_dict(preupload_config.config)
+
def _validate(self):
"""Run consistency checks on the config settings."""
config = self.config
# Reject unknown sections.
- valid_sections = set((
- self.CUSTOM_HOOKS_SECTION,
- self.BUILTIN_HOOKS_SECTION,
- self.BUILTIN_HOOKS_OPTIONS_SECTION,
- self.TOOL_PATHS_SECTION,
- self.OPTIONS_SECTION,
- ))
- bad_sections = set(config.sections()) - valid_sections
+ bad_sections = set(config.sections()) - self.VALID_SECTIONS
if bad_sections:
raise ValidationError('%s: unknown sections: %s' %
- (self.paths, bad_sections))
+ (self.source, bad_sections))
# Reject blank custom hooks.
for hook in self.custom_hooks:
if not config.get(self.CUSTOM_HOOKS_SECTION, hook):
raise ValidationError('%s: custom hook "%s" cannot be blank' %
- (self.paths, hook))
+ (self.source, hook))
# Reject unknown builtin hooks.
valid_builtin_hooks = set(rh.hooks.BUILTIN_HOOKS.keys())
@@ -214,7 +196,7 @@ class PreUploadConfig(object):
bad_hooks = hooks - valid_builtin_hooks
if bad_hooks:
raise ValidationError('%s: unknown builtin hooks: %s' %
- (self.paths, bad_hooks))
+ (self.source, bad_hooks))
elif config.has_section(self.BUILTIN_HOOKS_OPTIONS_SECTION):
raise ValidationError('Builtin hook options specified, but missing '
'builtin hook settings')
@@ -224,7 +206,7 @@ class PreUploadConfig(object):
bad_hooks = hooks - valid_builtin_hooks
if bad_hooks:
raise ValidationError('%s: unknown builtin hook options: %s' %
- (self.paths, bad_hooks))
+ (self.source, bad_hooks))
# Verify hooks are valid shell strings.
for hook in self.custom_hooks:
@@ -232,7 +214,7 @@ class PreUploadConfig(object):
self.custom_hook(hook)
except ValueError as e:
raise ValidationError('%s: hook "%s" command line is invalid: '
- '%s' % (self.paths, hook, e))
+ '%s' % (self.source, hook, e)) from e
# Verify hook options are valid shell strings.
for hook in self.builtin_hooks:
@@ -240,7 +222,7 @@ class PreUploadConfig(object):
self.builtin_hook_option(hook)
except ValueError as e:
raise ValidationError('%s: hook options "%s" are invalid: %s' %
- (self.paths, hook, e))
+ (self.source, hook, e)) from e
# Reject unknown tools.
valid_tools = set(rh.hooks.TOOL_PATHS.keys())
@@ -249,13 +231,105 @@ class PreUploadConfig(object):
bad_tools = tools - valid_tools
if bad_tools:
raise ValidationError('%s: unknown tools: %s' %
- (self.paths, bad_tools))
+ (self.source, bad_tools))
# Reject unknown options.
- valid_options = set(self.VALID_OPTIONS)
if config.has_section(self.OPTIONS_SECTION):
options = set(config.options(self.OPTIONS_SECTION))
- bad_options = options - valid_options
+ bad_options = options - self.VALID_OPTIONS
if bad_options:
raise ValidationError('%s: unknown options: %s' %
- (self.paths, bad_options))
+ (self.source, bad_options))
+
+
+class PreUploadFile(PreUploadConfig):
+ """A single config (file) used for `repo upload` hooks.
+
+ This is an abstract class that requires subclasses to define the FILENAME
+ constant.
+
+ Attributes:
+ path: The path of the file.
+ """
+ FILENAME = None
+
+ def __init__(self, path):
+ """Initialize.
+
+ Args:
+ path: The config file to load.
+ """
+ super().__init__(source=path)
+
+ self.path = path
+ try:
+ self.config.read(path)
+ except configparser.ParsingError as e:
+ raise ValidationError('%s: %s' % (path, e)) from e
+
+ self._validate()
+
+ @classmethod
+ def from_paths(cls, paths):
+ """Search for files within paths that matches the class FILENAME.
+
+ Args:
+ paths: List of directories to look for config files.
+
+ Yields:
+ For each valid file found, an instance is created and returned.
+ """
+ for path in paths:
+ path = os.path.join(path, cls.FILENAME)
+ if os.path.exists(path):
+ yield cls(path)
+
+
+class LocalPreUploadFile(PreUploadFile):
+ """A single config file for a project (PREUPLOAD.cfg)."""
+ FILENAME = 'PREUPLOAD.cfg'
+
+ def _validate(self):
+ super()._validate()
+
+ # Reject Exclude Paths section for local config.
+ if self.config.has_section(self.BUILTIN_HOOKS_EXCLUDE_SECTION):
+ raise ValidationError('%s: [%s] is not valid in local files' %
+ (self.path,
+ self.BUILTIN_HOOKS_EXCLUDE_SECTION))
+
+
+class GlobalPreUploadFile(PreUploadFile):
+ """A single config file for a repo (GLOBAL-PREUPLOAD.cfg)."""
+ FILENAME = 'GLOBAL-PREUPLOAD.cfg'
+
+
+class PreUploadSettings(PreUploadConfig):
+ """Settings for `repo upload` hooks.
+
+ This encompasses multiple config files and provides the final (merged)
+ settings for a particular project.
+ """
+
+ def __init__(self, paths=('',), global_paths=()):
+ """Initialize.
+
+ All the config files found will be merged together in order.
+
+ Args:
+ paths: The directories to look for config files.
+ global_paths: The directories to look for global config files.
+ """
+ super().__init__()
+
+ self.paths = []
+ for config in itertools.chain(
+ GlobalPreUploadFile.from_paths(global_paths),
+ LocalPreUploadFile.from_paths(paths)):
+ self.paths.append(config.path)
+ self.update(config)
+
+
+ # We validated configs in isolation, now do one final pass altogether.
+ self.source = '{%s}' % '|'.join(self.paths)
+ self._validate()
diff --git a/rh/config_test.py b/rh/config_test.py
index 794e50f..80fc832 100755
--- a/rh/config_test.py
+++ b/rh/config_test.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
# Copyright 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,6 @@
"""Integration tests for the config module (via PREUPLOAD.cfg)."""
-from __future__ import print_function
-
import argparse
import os
import re
diff --git a/rh/config_unittest.py b/rh/config_unittest.py
index d51afdc..3e3e470 100755
--- a/rh/config_unittest.py
+++ b/rh/config_unittest.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,6 @@
"""Unittests for the config module."""
-from __future__ import print_function
-
import os
import shutil
import sys
@@ -39,37 +36,53 @@ import rh.config
class PreUploadConfigTests(unittest.TestCase):
"""Tests for the PreUploadConfig class."""
+ def testMissing(self):
+ """Instantiating a non-existent config file should be fine."""
+ rh.config.PreUploadConfig()
+
+
+class FileTestCase(unittest.TestCase):
+ """Helper class for tests cases to setup configuration files."""
+
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
- def _write_config(self, data, filename=None):
- """Helper to write out a config file for testing."""
- if filename is None:
- filename = rh.config.PreUploadConfig.FILENAME
+ def _write_config(self, data, filename='temp.cfg'):
+ """Helper to write out a config file for testing.
+
+ Returns:
+ Path to the file where the configuration was written.
+ """
path = os.path.join(self.tempdir, filename)
with open(path, 'w') as fp:
fp.write(data)
+ return path
+
+ def _write_local_config(self, data):
+ """Helper to write out a local config file for testing."""
+ return self._write_config(
+ data, filename=rh.config.LocalPreUploadFile.FILENAME)
def _write_global_config(self, data):
"""Helper to write out a global config file for testing."""
- self._write_config(
- data, filename=rh.config.PreUploadConfig.GLOBAL_FILENAME)
+ return self._write_config(
+ data, filename=rh.config.GlobalPreUploadFile.FILENAME)
- def testMissing(self):
- """Instantiating a non-existent config file should be fine."""
- rh.config.PreUploadConfig()
+
+class PreUploadFileTests(FileTestCase):
+ """Tests for the PreUploadFile class."""
def testEmpty(self):
"""Instantiating an empty config file should be fine."""
- self._write_config('')
- rh.config.PreUploadConfig(paths=(self.tempdir,))
+ path = self._write_config('')
+ rh.config.PreUploadFile(path)
def testValid(self):
"""Verify a fully valid file works."""
- self._write_config("""# This be a comment me matey.
+ path = self._write_config("""# This be a comment me matey.
[Hook Scripts]
name = script --with "some args"
@@ -82,39 +95,56 @@ cpplint = --some 'more args'
[Options]
ignore_merged_commits = true
""")
- rh.config.PreUploadConfig(paths=(self.tempdir,))
+ rh.config.PreUploadFile(path)
def testUnknownSection(self):
"""Reject unknown sections."""
- self._write_config('[BOOGA]')
- self.assertRaises(rh.config.ValidationError, rh.config.PreUploadConfig,
- paths=(self.tempdir,))
+ path = self._write_config('[BOOGA]')
+ self.assertRaises(rh.config.ValidationError, rh.config.PreUploadFile,
+ path)
def testUnknownBuiltin(self):
"""Reject unknown builtin hooks."""
- self._write_config('[Builtin Hooks]\nbooga = borg!')
- self.assertRaises(rh.config.ValidationError, rh.config.PreUploadConfig,
- paths=(self.tempdir,))
+ path = self._write_config('[Builtin Hooks]\nbooga = borg!')
+ self.assertRaises(rh.config.ValidationError, rh.config.PreUploadFile,
+ path)
def testEmptyCustomHook(self):
"""Reject empty custom hooks."""
- self._write_config('[Hook Scripts]\nbooga = \t \n')
- self.assertRaises(rh.config.ValidationError, rh.config.PreUploadConfig,
- paths=(self.tempdir,))
+ path = self._write_config('[Hook Scripts]\nbooga = \t \n')
+ self.assertRaises(rh.config.ValidationError, rh.config.PreUploadFile,
+ path)
def testInvalidIni(self):
"""Reject invalid ini files."""
- self._write_config('[Hook Scripts]\n =')
- self.assertRaises(rh.config.ValidationError, rh.config.PreUploadConfig,
- paths=(self.tempdir,))
+ path = self._write_config('[Hook Scripts]\n =')
+ self.assertRaises(rh.config.ValidationError, rh.config.PreUploadFile,
+ path)
def testInvalidString(self):
"""Catch invalid string quoting."""
- self._write_config("""[Hook Scripts]
+ path = self._write_config("""[Hook Scripts]
name = script --'bad-quotes
""")
- self.assertRaises(rh.config.ValidationError, rh.config.PreUploadConfig,
- paths=(self.tempdir,))
+ self.assertRaises(rh.config.ValidationError, rh.config.PreUploadFile,
+ path)
+
+
+class LocalPreUploadFileTests(FileTestCase):
+ """Test for the LocalPreUploadFile class."""
+
+ def testInvalidSectionConfig(self):
+ """Reject local config that uses invalid sections."""
+ path = self._write_config("""[Builtin Hooks Exclude Paths]
+cpplint = external/ 'test directory' ^vendor/(?!google/)
+""")
+ self.assertRaises(rh.config.ValidationError,
+ rh.config.LocalPreUploadFile,
+ path)
+
+
+class PreUploadSettingsTests(FileTestCase):
+ """Tests for the PreUploadSettings class."""
def testGlobalConfigs(self):
"""Verify global configs stack properly."""
@@ -122,14 +152,21 @@ name = script --'bad-quotes
commit_msg_bug_field = true
commit_msg_changeid_field = true
commit_msg_test_field = false""")
- self._write_config("""[Builtin Hooks]
+ self._write_local_config("""[Builtin Hooks]
commit_msg_bug_field = false
commit_msg_test_field = true""")
- config = rh.config.PreUploadConfig(paths=(self.tempdir,),
- global_paths=(self.tempdir,))
+ config = rh.config.PreUploadSettings(paths=(self.tempdir,),
+ global_paths=(self.tempdir,))
self.assertEqual(config.builtin_hooks,
['commit_msg_changeid_field', 'commit_msg_test_field'])
+ def testGlobalExcludeScope(self):
+ """Verify exclude scope is valid for global config."""
+ self._write_global_config("""[Builtin Hooks Exclude Paths]
+cpplint = external/ 'test directory' ^vendor/(?!google/)
+""")
+ rh.config.PreUploadSettings(global_paths=(self.tempdir,))
+
if __name__ == '__main__':
unittest.main()
diff --git a/rh/git.py b/rh/git.py
index baf669c..ab1e35f 100644
--- a/rh/git.py
+++ b/rh/git.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
"""Git helper functions."""
-from __future__ import print_function
-
import os
import re
import sys
@@ -167,12 +164,12 @@ def get_affected_files(commit):
Returns:
A list of modified/added (and perhaps deleted) files
"""
- return raw_diff(os.getcwd(), '%s^!' % commit)
+ return raw_diff(os.getcwd(), '%s^-' % commit)
def get_commits(ignore_merged_commits=False):
"""Returns a list of commits for this review."""
- cmd = ['git', 'log', '%s..' % get_upstream_branch(), '--format=%H']
+ cmd = ['git', 'rev-list', '%s..' % get_upstream_branch()]
if ignore_merged_commits:
cmd.append('--first-parent')
return rh.utils.run(cmd, capture_output=True).stdout.split()
@@ -180,7 +177,7 @@ def get_commits(ignore_merged_commits=False):
def get_commit_desc(commit):
"""Returns the full commit message of a commit."""
- cmd = ['git', 'log', '--format=%B', commit + '^!']
+ cmd = ['git', 'diff-tree', '-s', '--always', '--format=%B', commit]
return rh.utils.run(cmd, capture_output=True).stdout
diff --git a/rh/hooks.py b/rh/hooks.py
index bdfcaf0..0b3bb29 100644
--- a/rh/hooks.py
+++ b/rh/hooks.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,8 @@
"""Functions that implement the actual checks."""
-from __future__ import print_function
-
+import collections
+import fnmatch
import json
import os
import platform
@@ -31,7 +30,6 @@ del _path
# pylint: disable=wrong-import-position
import rh.git
import rh.results
-from rh.sixish import string_types
import rh.utils
@@ -69,26 +67,39 @@ class Placeholders(object):
ret = []
for arg in args:
- # First scan for exact matches
- for key, val in replacements.items():
- var = '${%s}' % (key,)
- if arg == var:
- if isinstance(val, string_types):
- ret.append(val)
- else:
- ret.extend(val)
- # We break on first hit to avoid double expansion.
- break
+ if arg.endswith('${PREUPLOAD_FILES_PREFIXED}'):
+ if arg == '${PREUPLOAD_FILES_PREFIXED}':
+ assert len(ret) > 1, ('PREUPLOAD_FILES_PREFIXED cannot be '
+ 'the 1st or 2nd argument')
+ prev_arg = ret[-1]
+ ret = ret[0:-1]
+ for file in self.get('PREUPLOAD_FILES'):
+ ret.append(prev_arg)
+ ret.append(file)
+ else:
+ prefix = arg[0:-len('${PREUPLOAD_FILES_PREFIXED}')]
+ ret.extend(
+ prefix + file for file in self.get('PREUPLOAD_FILES'))
else:
- # If no exact matches, do an inline replacement.
- def replace(m):
- val = self.get(m.group(1))
- if isinstance(val, string_types):
- return val
- return ' '.join(val)
- ret.append(re.sub(r'\$\{(%s)\}' % ('|'.join(all_vars),),
- replace, arg))
-
+ # First scan for exact matches
+ for key, val in replacements.items():
+ var = '${%s}' % (key,)
+ if arg == var:
+ if isinstance(val, str):
+ ret.append(val)
+ else:
+ ret.extend(val)
+ # We break on first hit to avoid double expansion.
+ break
+ else:
+ # If no exact matches, do an inline replacement.
+ def replace(m):
+ val = self.get(m.group(1))
+ if isinstance(val, str):
+ return val
+ return ' '.join(val)
+ ret.append(re.sub(r'\$\{(%s)\}' % ('|'.join(all_vars),),
+ replace, arg))
return ret
@classmethod
@@ -128,6 +139,42 @@ class Placeholders(object):
return _get_build_os_name()
+class ExclusionScope(object):
+ """Exclusion scope for a hook.
+
+ An exclusion scope can be used to determine if a hook has been disabled for
+ a specific project.
+ """
+
+ def __init__(self, scope):
+ """Initialize.
+
+ Args:
+ scope: A list of shell-style wildcards (fnmatch) or regular
+ expression. Regular expressions must start with the ^ character.
+ """
+ self._scope = []
+ for path in scope:
+ if path.startswith('^'):
+ self._scope.append(re.compile(path))
+ else:
+ self._scope.append(path)
+
+ def __contains__(self, proj_dir):
+ """Checks if |proj_dir| matches the excluded paths.
+
+ Args:
+ proj_dir: The relative path of the project.
+ """
+ for exclusion_path in self._scope:
+ if hasattr(exclusion_path, 'match'):
+ if exclusion_path.match(proj_dir):
+ return True
+ elif fnmatch.fnmatch(proj_dir, exclusion_path):
+ return True
+ return False
+
+
class HookOptions(object):
"""Holder class for hook options."""
@@ -186,12 +233,18 @@ class HookOptions(object):
return self.expand_vars([tool_path])[0]
+# A callable hook.
+CallableHook = collections.namedtuple('CallableHook', ('name', 'hook', 'scope'))
+
+
def _run(cmd, **kwargs):
"""Helper command for checks that tend to gather output."""
- kwargs.setdefault('redirect_stderr', True)
kwargs.setdefault('combine_stdout_stderr', True)
kwargs.setdefault('capture_output', True)
kwargs.setdefault('check', False)
+ # Make sure hooks run with stdin disconnected to avoid accidentally
+ # interactive tools causing pauses.
+ kwargs.setdefault('input', '')
return rh.utils.run(cmd, **kwargs)
@@ -536,6 +589,18 @@ Single-line Relnote example:
Relnote: Added a new API `Class#containsData`
"""
+RELNOTE_INVALID_QUOTES_MSG = """Commit message contains something that looks
+similar to the "Relnote:" tag but might be malformatted. If you are using
+quotes that do not mark the start or end of a Relnote, you need to escape them
+with a backslash.
+
+Non-starting/non-ending quote Relnote examples:
+
+Relnote: "Fixed an error with `Class#getBar()` where \"foo\" would be returned
+in edge cases."
+Relnote: Added a new API to handle strings like \"foo\"
+"""
+
def check_commit_msg_relnote_field_format(project, commit, desc, _diff,
options=None):
"""Check the commit for one correctly formatted 'Relnote:' line.
@@ -544,6 +609,8 @@ def check_commit_msg_relnote_field_format(project, commit, desc, _diff,
(1) Checks for possible misspellings of the 'Relnote:' tag.
(2) Ensures that multiline release notes are properly formatted with a
starting quote and an endling quote.
+ (3) Checks that release notes that contain non-starting or non-ending
+ quotes are escaped with a backslash.
"""
field = 'Relnote'
regex_relnote = r'^%s:.*$' % (field,)
@@ -601,7 +668,6 @@ def check_commit_msg_relnote_field_format(project, commit, desc, _diff,
break
# Check 3: Check that multiline Relnotes contain matching quotes.
-
first_quote_found = False
second_quote_found = False
for cur_line in desc_lines:
@@ -618,21 +684,124 @@ def check_commit_msg_relnote_field_format(project, commit, desc, _diff,
# Check that the `Relnote:` tag exists and it contains a starting quote.
if check_re_relnote.match(cur_line) and contains_quote:
first_quote_found = True
+ # A single-line Relnote containing a start and ending triple quote
+ # is valid.
+ if cur_line.count('"""') == 2:
+ second_quote_found = True
+ break
# A single-line Relnote containing a start and ending quote
- # is valid as well.
- if cur_line.count('"') == 2:
+ # is valid.
+ if cur_line.count('"') - cur_line.count('\\"') == 2:
second_quote_found = True
break
-
if first_quote_found != second_quote_found:
ret.append(
rh.results.HookResult(('commit msg: "%s:" '
'tag missing closing quote') % (field,),
project, commit,
error=RELNOTE_MISSING_QUOTES_MSG))
+
+ # Check 4: Check that non-starting or non-ending quotes are escaped with a
+ # backslash.
+ line_needs_checking = False
+ uses_invalid_quotes = False
+ for cur_line in desc_lines:
+ if check_re_other_fields.findall(cur_line):
+ line_needs_checking = False
+ on_relnote_line = check_re_relnote.match(cur_line)
+ # Determine if we are parsing the base `Relnote:` line.
+ if on_relnote_line and '"' in cur_line:
+ line_needs_checking = True
+ # We don't think anyone will type '"""' and then forget to
+ # escape it, so we're not checking for this.
+ if '"""' in cur_line:
+ break
+ if line_needs_checking:
+ stripped_line = re.sub('^%s:' % field, '', cur_line,
+ flags=re.IGNORECASE).strip()
+ for i, character in enumerate(stripped_line):
+ if i == 0:
+ # Case 1: Valid quote at the beginning of the
+ # base `Relnote:` line.
+ if on_relnote_line:
+ continue
+ # Case 2: Invalid quote at the beginning of following
+ # lines, where we are not terminating the release note.
+ if character == '"' and stripped_line != '"':
+ uses_invalid_quotes = True
+ break
+ # Case 3: Check all other cases.
+ if (character == '"'
+ and 0 < i < len(stripped_line) - 1
+ and stripped_line[i-1] != '"'
+ and stripped_line[i-1] != "\\"):
+ uses_invalid_quotes = True
+ break
+
+ if uses_invalid_quotes:
+ ret.append(rh.results.HookResult(('commit msg: "%s:" '
+ 'tag using unescaped '
+ 'quotes') % (field,),
+ project, commit,
+ error=RELNOTE_INVALID_QUOTES_MSG))
return ret
+RELNOTE_REQUIRED_CURRENT_TXT_MSG = """\
+Commit contains a change to current.txt or public_plus_experimental_current.txt,
+but the commit message does not contain the required `Relnote:` tag. It must
+match the regex:
+
+ %s
+
+The Relnote: stanza is free-form and should describe what developers need to
+know about your change. If you are making infrastructure changes, you
+can set the Relnote: stanza to be "N/A" for the commit to not be included
+in release notes.
+
+Some examples:
+
+Relnote: "Added a new API `Class#isBetter` to determine whether or not the
+class is better"
+Relnote: Fixed an issue where the UI would hang on a double tap.
+Relnote: N/A
+
+Check the git history for more examples.
+"""
+
+def check_commit_msg_relnote_for_current_txt(project, commit, desc, diff,
+ options=None):
+ """Check changes to current.txt contain the 'Relnote:' stanza."""
+ field = 'Relnote'
+ regex = r'^%s: .+$' % (field,)
+ check_re = re.compile(regex, re.IGNORECASE)
+
+ if options.args():
+ raise ValueError('commit msg %s check takes no options' % (field,))
+
+ filtered = _filter_diff(
+ diff,
+ [r'(^|/)(public_plus_experimental_current|current)\.txt$']
+ )
+ # If the commit does not contain a change to *current.txt, then this repo
+ # hook check no longer applies.
+ if not filtered:
+ return None
+
+ found = []
+ for line in desc.splitlines():
+ if check_re.match(line):
+ found.append(line)
+
+ if not found:
+ error = RELNOTE_REQUIRED_CURRENT_TXT_MSG % (regex)
+ else:
+ return None
+
+ return [rh.results.HookResult('commit msg: "%s:" check' % (field,),
+ project, commit, error=error)]
+
+
def check_cpplint(project, commit, _desc, diff, options=None):
"""Run cpplint."""
# This list matches what cpplint expects. We could run on more (like .cxx),
@@ -659,9 +828,10 @@ def check_gofmt(project, commit, _desc, diff, options=None):
data = rh.git.get_file_content(commit, d.file)
result = _run(cmd, input=data)
if result.stdout:
+ fixup_func = _fixup_func_caller([gofmt, '-w', d.file])
ret.append(rh.results.HookResult(
'gofmt', project, commit, error=result.stdout,
- files=(d.file,)))
+ files=(d.file,), fixup_func=fixup_func))
return ret
@@ -711,10 +881,40 @@ def check_pylint2(project, commit, desc, diff, options=None):
def check_pylint3(project, commit, desc, diff, options=None):
"""Run pylint through Python 3."""
return _check_pylint(project, commit, desc, diff,
- extra_args=['--executable-path=pylint3'],
+ extra_args=['--py3'],
options=options)
+def check_rustfmt(project, commit, _desc, diff, options=None):
+ """Run "rustfmt --check" on diffed rust files"""
+ filtered = _filter_diff(diff, [r'\.rs$'])
+ if not filtered:
+ return None
+
+ rustfmt = options.tool_path('rustfmt')
+ cmd = [rustfmt] + options.args((), filtered)
+ ret = []
+ for d in filtered:
+ data = rh.git.get_file_content(commit, d.file)
+ result = _run(cmd, input=data)
+ # If the parsing failed, stdout will contain enough details on the
+ # location of the error.
+ if result.returncode:
+ ret.append(rh.results.HookResult(
+ 'rustfmt', project, commit, error=result.stdout,
+ files=(d.file,)))
+ continue
+ # TODO(b/164111102): rustfmt stable does not support --check on stdin.
+ # If no error is reported, compare stdin with stdout.
+ if data != result.stdout:
+ msg = ('To fix, please run: %s' %
+ rh.shell.cmd_to_str(cmd + [d.file]))
+ ret.append(rh.results.HookResult(
+ 'rustfmt', project, commit, error=msg,
+ files=(d.file,)))
+ return ret
+
+
def check_xmllint(project, commit, _desc, diff, options=None):
"""Run xmllint."""
# XXX: Should we drop most of these and probe for <?xml> tags?
@@ -774,9 +974,30 @@ def check_android_test_mapping(project, commit, _desc, diff, options=None):
return _check_cmd('android-test-mapping-format', project, commit, cmd)
+def check_aidl_format(project, commit, _desc, diff, options=None):
+ """Checks that AIDL files are formatted with aidl-format."""
+ # All *.aidl files except for those under aidl_api directory.
+ filtered = _filter_diff(diff, [r'\.aidl$'], [r'/aidl_api/'])
+ if not filtered:
+ return None
+ aidl_format = options.tool_path('aidl-format')
+ cmd = [aidl_format, '-d'] + options.args((), filtered)
+ ret = []
+ for d in filtered:
+ data = rh.git.get_file_content(commit, d.file)
+ result = _run(cmd, input=data)
+ if result.stdout:
+ fixup_func = _fixup_func_caller([aidl_format, '-w', d.file])
+ ret.append(rh.results.HookResult(
+ 'aidl-format', project, commit, error=result.stdout,
+ files=(d.file,), fixup_func=fixup_func))
+ return ret
+
+
# Hooks that projects can opt into.
# Note: Make sure to keep the top level README.md up to date when adding more!
BUILTIN_HOOKS = {
+ 'aidl_format': check_aidl_format,
'android_test_mapping_format': check_android_test_mapping,
'bpfmt': check_bpfmt,
'checkpatch': check_checkpatch,
@@ -786,6 +1007,8 @@ BUILTIN_HOOKS = {
'commit_msg_prebuilt_apk_fields': check_commit_msg_prebuilt_apk_fields,
'commit_msg_test_field': check_commit_msg_test_field,
'commit_msg_relnote_field_format': check_commit_msg_relnote_field_format,
+ 'commit_msg_relnote_for_current_txt':
+ check_commit_msg_relnote_for_current_txt,
'cpplint': check_cpplint,
'gofmt': check_gofmt,
'google_java_format': check_google_java_format,
@@ -793,12 +1016,14 @@ BUILTIN_HOOKS = {
'pylint': check_pylint2,
'pylint2': check_pylint2,
'pylint3': check_pylint3,
+ 'rustfmt': check_rustfmt,
'xmllint': check_xmllint,
}
# Additional tools that the hooks can call with their default values.
# Note: Make sure to keep the top level README.md up to date when adding more!
TOOL_PATHS = {
+ 'aidl-format': 'aidl-format',
'android-test-mapping-format':
os.path.join(TOOLS_DIR, 'android_test_mapping_format.py'),
'bpfmt': 'bpfmt',
@@ -809,4 +1034,5 @@ TOOL_PATHS = {
'google-java-format': 'google-java-format',
'google-java-format-diff': 'google-java-format-diff.py',
'pylint': 'pylint',
+ 'rustfmt': 'rustfmt',
}
diff --git a/rh/hooks_unittest.py b/rh/hooks_unittest.py
index 9b48119..8466319 100755
--- a/rh/hooks_unittest.py
+++ b/rh/hooks_unittest.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,11 +15,10 @@
"""Unittests for the hooks module."""
-from __future__ import print_function
-
import os
import sys
import unittest
+from unittest import mock
_path = os.path.realpath(__file__ + '/../..')
if sys.path[0] != _path:
@@ -33,8 +31,6 @@ del _path
import rh
import rh.config
import rh.hooks
-from rh.sixish import mock
-from rh.sixish import string_types
class HooksDocsTests(unittest.TestCase):
@@ -97,7 +93,9 @@ class PlaceholderTests(unittest.TestCase):
'PREUPLOAD_COMMIT_MESSAGE': 'commit message',
'PREUPLOAD_COMMIT': '5c4c293174bb61f0f39035a71acd9084abfa743d',
})
- self.replacer = rh.hooks.Placeholders()
+ self.replacer = rh.hooks.Placeholders(
+ [rh.git.RawDiffEntry(file=x)
+ for x in ['path1/file1', 'path2/file2']])
def tearDown(self):
os.environ.clear()
@@ -117,9 +115,13 @@ class PlaceholderTests(unittest.TestCase):
# We also make sure that things in ${REPO_ROOT} are not double
# expanded (which is why the return includes ${BUILD_OS}).
'${REPO_ROOT}/some/prog/REPO_ROOT/ok',
- # Verify lists are merged rather than inserted. In this case, the
- # list is empty, but we'd hit an error still if we saw [] in args.
+ # Verify lists are merged rather than inserted.
'${PREUPLOAD_FILES}',
+ # Verify each file is preceded with '--file=' prefix.
+ '--file=${PREUPLOAD_FILES_PREFIXED}',
+ # Verify each file is preceded with '--file' argument.
+ '--file',
+ '${PREUPLOAD_FILES_PREFIXED}',
# Verify values with whitespace don't expand into multiple args.
'${PREUPLOAD_COMMIT_MESSAGE}',
# Verify multiple values get replaced.
@@ -130,6 +132,14 @@ class PlaceholderTests(unittest.TestCase):
output_args = self.replacer.expand_vars(input_args)
exp_args = [
'/ ${BUILD_OS}/some/prog/REPO_ROOT/ok',
+ 'path1/file1',
+ 'path2/file2',
+ '--file=path1/file1',
+ '--file=path2/file2',
+ '--file',
+ 'path1/file1',
+ '--file',
+ 'path2/file2',
'commit message',
'5c4c293174bb61f0f39035a71acd9084abfa743d^commit message',
'${THIS_VAR_IS_GOOD}',
@@ -154,7 +164,8 @@ class PlaceholderTests(unittest.TestCase):
def testPREUPLOAD_FILES(self):
"""Verify handling of PREUPLOAD_FILES."""
- self.assertEqual(self.replacer.get('PREUPLOAD_FILES'), [])
+ self.assertEqual(self.replacer.get('PREUPLOAD_FILES'),
+ ['path1/file1', 'path2/file2'])
@mock.patch.object(rh.git, 'find_repo_root', return_value='/repo!')
def testREPO_ROOT(self, m):
@@ -167,6 +178,28 @@ class PlaceholderTests(unittest.TestCase):
self.assertEqual(self.replacer.get('BUILD_OS'), m.return_value)
+class ExclusionScopeTests(unittest.TestCase):
+ """Verify behavior of ExclusionScope class."""
+
+ def testEmpty(self):
+ """Verify the in operator for an empty scope."""
+ scope = rh.hooks.ExclusionScope([])
+ self.assertNotIn('external/*', scope)
+
+ def testGlob(self):
+ """Verify the in operator for a scope using wildcards."""
+ scope = rh.hooks.ExclusionScope(['vendor/*', 'external/*'])
+ self.assertIn('external/tools', scope)
+
+ def testRegex(self):
+ """Verify the in operator for a scope using regular expressions."""
+ scope = rh.hooks.ExclusionScope(['^vendor/(?!google)',
+ 'external/*'])
+ self.assertIn('vendor/', scope)
+ self.assertNotIn('vendor/google/', scope)
+ self.assertIn('vendor/other/', scope)
+
+
class HookOptionsTests(unittest.TestCase):
"""Verify behavior of HookOptions object."""
@@ -224,14 +257,14 @@ class UtilsTests(unittest.TestCase):
# Just verify it returns something and doesn't crash.
# pylint: disable=protected-access
ret = rh.hooks._get_build_os_name()
- self.assertTrue(isinstance(ret, string_types))
+ self.assertTrue(isinstance(ret, str))
self.assertNotEqual(ret, '')
def testGetHelperPath(self):
"""Check get_helper_path behavior."""
# Just verify it doesn't crash. It's a dirt simple func.
ret = rh.hooks.get_helper_path('booga')
- self.assertTrue(isinstance(ret, string_types))
+ self.assertTrue(isinstance(ret, str))
self.assertNotEqual(ret, '')
@@ -278,7 +311,7 @@ class BuiltinHooksTests(unittest.TestCase):
"""
# First call should do nothing as there are no files to check.
ret = func(self.project, 'commit', 'desc', (), options=self.options)
- self.assertEqual(ret, None)
+ self.assertIsNone(ret)
self.assertFalse(mock_check.called)
# Second call should include some checks.
@@ -489,12 +522,13 @@ class BuiltinHooksTests(unittest.TestCase):
True,
(
'subj',
- 'subj\n\nTest: i did done dood it\n',
- 'subj\n\nMore content\n\nTest: i did done dood it\n',
- 'subj\n\nRelnote: This is a release note\n',
- 'subj\n\nRelnote:This is a release note\n',
+ 'subj\n\nTest: i did done dood it\nBug: 1234',
+ 'subj\n\nMore content\n\nTest: i did done dood it\nBug: 1234',
+ 'subj\n\nRelnote: This is a release note\nBug: 1234',
+ 'subj\n\nRelnote:This is a release note\nBug: 1234',
'subj\n\nRelnote: This is a release note.\nBug: 1234',
'subj\n\nRelnote: "This is a release note."\nBug: 1234',
+ 'subj\n\nRelnote: "This is a \\"release note\\"."\n\nBug: 1234',
'subj\n\nRelnote: This is a release note.\nChange-Id: 1234',
'subj\n\nRelnote: This is a release note.\n\nChange-Id: 1234',
('subj\n\nRelnote: "This is a release note."\n\n'
@@ -510,6 +544,10 @@ class BuiltinHooksTests(unittest.TestCase):
'It contains a correct second line.\n'
'And even a third line."\n'
'Bug: 1234'),
+ ('subj\n\nRelnote: "This is a release note.\n'
+ 'It contains a correct second line.\n'
+ '\\"Quotes\\" are even used on the third line."\n'
+ 'Bug: 1234'),
('subj\n\nRelnote: This is release note 1.\n'
'Relnote: This is release note 2.\n'
'Bug: 1234'),
@@ -522,6 +560,37 @@ class BuiltinHooksTests(unittest.TestCase):
'Relnote: "This is release note 2, and it\n'
'contains a correctly formatted second line."\n'
'Bug: 1234'),
+ ('subj\n\nRelnote: "This is a release note with\n'
+ 'a correctly formatted second line."\n\n'
+ 'Bug: 1234'
+ 'Here is some extra "quoted" content.'),
+ ('subj\n\nRelnote: """This is a release note.\n\n'
+ 'This relnote contains an empty line.\n'
+ 'Then a non-empty line.\n\n'
+ 'And another empty line."""\n\n'
+ 'Bug: 1234'),
+ ('subj\n\nRelnote: """This is a release note.\n\n'
+ 'This relnote contains an empty line.\n'
+ 'Then an acceptable "quoted" line.\n\n'
+ 'And another empty line."""\n\n'
+ 'Bug: 1234'),
+ ('subj\n\nRelnote: """This is a release note."""\n\n'
+ 'Bug: 1234'),
+ ('subj\n\nRelnote: """This is a release note.\n'
+ 'It has a second line."""\n\n'
+ 'Bug: 1234'),
+ ('subj\n\nRelnote: """This is a release note.\n'
+ 'It has a second line, but does not end here.\n'
+ '"""\n\n'
+ 'Bug: 1234'),
+ ('subj\n\nRelnote: """This is a release note.\n'
+ '"It" has a second line, but does not end here.\n'
+ '"""\n\n'
+ 'Bug: 1234'),
+ ('subj\n\nRelnote: "This is a release note.\n'
+ 'It has a second line, but does not end here.\n'
+ '"\n\n'
+ 'Bug: 1234'),
))
# Check some bad messages.
@@ -534,6 +603,8 @@ class BuiltinHooksTests(unittest.TestCase):
'subj\n\nRel-note: This is a release note.\n',
'subj\n\nrelnoTes: This is a release note.\n',
'subj\n\nrel-Note: This is a release note.\n',
+ 'subj\n\nRelnote: "This is a "release note"."\nBug: 1234',
+ 'subj\n\nRelnote: This is a "release note".\nBug: 1234',
('subj\n\nRelnote: This is a release note.\n'
'It contains an incorrect second line.'),
('subj\n\nRelnote: "This is a release note.\n'
@@ -556,8 +627,126 @@ class BuiltinHooksTests(unittest.TestCase):
'Relnote: This is release note 2, and it\n'
'contains an incorrectly formatted second line.\n'
'Bug: 1234'),
+ ('subj\n\nRelnote: "This is a release note.\n'
+ 'It contains a correct second line.\n'
+ 'But incorrect "quotes" on the third line."\n'
+ 'Bug: 1234'),
+ ('subj\n\nRelnote: """This is a release note.\n'
+ 'It has a second line, but no closing triple quote.\n\n'
+ 'Bug: 1234'),
+ ('subj\n\nRelnote: "This is a release note.\n'
+ '"It" has a second line, but does not end here.\n'
+ '"\n\n'
+ 'Bug: 1234'),
))
+ def test_commit_msg_relnote_for_current_txt(self, _mock_check, _mock_run):
+ """Verify the commit_msg_relnote_for_current_txt builtin hook."""
+ diff_without_current_txt = ['bar/foo.txt',
+ 'foo.cpp',
+ 'foo.java',
+ 'foo_current.java',
+ 'foo_current.txt',
+ 'baz/current.java',
+ 'baz/foo_current.txt']
+ diff_with_current_txt = diff_without_current_txt + ['current.txt']
+ diff_with_subdir_current_txt = \
+ diff_without_current_txt + ['foo/current.txt']
+ diff_with_experimental_current_txt = \
+ diff_without_current_txt + ['public_plus_experimental_current.txt']
+ # Check some good messages.
+ self._test_commit_messages(
+ rh.hooks.check_commit_msg_relnote_for_current_txt,
+ True,
+ (
+ 'subj\n\nRelnote: This is a release note\n',
+ 'subj\n\nRelnote: This is a release note.\n\nChange-Id: 1234',
+ ('subj\n\nRelnote: This is release note 1 with\n'
+ 'an incorrectly formatted second line.\n\n'
+ 'Relnote: "This is release note 2, and it\n'
+ 'contains a correctly formatted second line."\n'
+ 'Bug: 1234'),
+ ),
+ files=diff_with_current_txt,
+ )
+ # Check some good messages.
+ self._test_commit_messages(
+ rh.hooks.check_commit_msg_relnote_for_current_txt,
+ True,
+ (
+ 'subj\n\nRelnote: This is a release note\n',
+ 'subj\n\nRelnote: This is a release note.\n\nChange-Id: 1234',
+ ('subj\n\nRelnote: This is release note 1 with\n'
+ 'an incorrectly formatted second line.\n\n'
+ 'Relnote: "This is release note 2, and it\n'
+ 'contains a correctly formatted second line."\n'
+ 'Bug: 1234'),
+ ),
+ files=diff_with_experimental_current_txt,
+ )
+ # Check some good messages.
+ self._test_commit_messages(
+ rh.hooks.check_commit_msg_relnote_for_current_txt,
+ True,
+ (
+ 'subj\n\nRelnote: This is a release note\n',
+ 'subj\n\nRelnote: This is a release note.\n\nChange-Id: 1234',
+ ('subj\n\nRelnote: This is release note 1 with\n'
+ 'an incorrectly formatted second line.\n\n'
+ 'Relnote: "This is release note 2, and it\n'
+ 'contains a correctly formatted second line."\n'
+ 'Bug: 1234'),
+ ),
+ files=diff_with_subdir_current_txt,
+ )
+ # Check some good messages.
+ self._test_commit_messages(
+ rh.hooks.check_commit_msg_relnote_for_current_txt,
+ True,
+ (
+ 'subj',
+ 'subj\nBug: 12345\nChange-Id: 1234',
+ 'subj\n\nRelnote: This is a release note\n',
+ 'subj\n\nRelnote: This is a release note.\n\nChange-Id: 1234',
+ ('subj\n\nRelnote: This is release note 1 with\n'
+ 'an incorrectly formatted second line.\n\n'
+ 'Relnote: "This is release note 2, and it\n'
+ 'contains a correctly formatted second line."\n'
+ 'Bug: 1234'),
+ ),
+ files=diff_without_current_txt,
+ )
+ # Check some bad messages.
+ self._test_commit_messages(
+ rh.hooks.check_commit_msg_relnote_for_current_txt,
+ False,
+ (
+ 'subj'
+ 'subj\nBug: 12345\nChange-Id: 1234',
+ ),
+ files=diff_with_current_txt,
+ )
+ # Check some bad messages.
+ self._test_commit_messages(
+ rh.hooks.check_commit_msg_relnote_for_current_txt,
+ False,
+ (
+ 'subj'
+ 'subj\nBug: 12345\nChange-Id: 1234',
+ ),
+ files=diff_with_experimental_current_txt,
+ )
+ # Check some bad messages.
+ self._test_commit_messages(
+ rh.hooks.check_commit_msg_relnote_for_current_txt,
+ False,
+ (
+ 'subj'
+ 'subj\nBug: 12345\nChange-Id: 1234',
+ ),
+ files=diff_with_subdir_current_txt,
+ )
+
def test_cpplint(self, mock_check, _mock_run):
"""Verify the cpplint builtin hook."""
self._test_file_filter(mock_check, rh.hooks.check_cpplint,
@@ -568,21 +757,21 @@ class BuiltinHooksTests(unittest.TestCase):
# First call should do nothing as there are no files to check.
ret = rh.hooks.check_gofmt(
self.project, 'commit', 'desc', (), options=self.options)
- self.assertEqual(ret, None)
+ self.assertIsNone(ret)
self.assertFalse(mock_check.called)
# Second call will have some results.
diff = [rh.git.RawDiffEntry(file='foo.go')]
ret = rh.hooks.check_gofmt(
self.project, 'commit', 'desc', diff, options=self.options)
- self.assertNotEqual(ret, None)
+ self.assertIsNotNone(ret)
def test_jsonlint(self, mock_check, _mock_run):
"""Verify the jsonlint builtin hook."""
# First call should do nothing as there are no files to check.
ret = rh.hooks.check_json(
self.project, 'commit', 'desc', (), options=self.options)
- self.assertEqual(ret, None)
+ self.assertIsNone(ret)
self.assertFalse(mock_check.called)
# TODO: Actually pass some valid/invalid json data down.
@@ -602,6 +791,19 @@ class BuiltinHooksTests(unittest.TestCase):
self._test_file_filter(mock_check, rh.hooks.check_pylint3,
('foo.py',))
+ def test_rustfmt(self, mock_check, _mock_run):
+ # First call should do nothing as there are no files to check.
+ ret = rh.hooks.check_rustfmt(
+ self.project, 'commit', 'desc', (), options=self.options)
+ self.assertEqual(ret, None)
+ self.assertFalse(mock_check.called)
+
+ # Second call will have some results.
+ diff = [rh.git.RawDiffEntry(file='lib.rs')]
+ ret = rh.hooks.check_rustfmt(
+ self.project, 'commit', 'desc', diff, options=self.options)
+ self.assertNotEqual(ret, None)
+
def test_xmllint(self, mock_check, _mock_run):
"""Verify the xmllint builtin hook."""
self._test_file_filter(mock_check, rh.hooks.check_xmllint,
@@ -621,6 +823,20 @@ class BuiltinHooksTests(unittest.TestCase):
self.project, 'commit', 'desc', diff, options=self.options)
self.assertIsNotNone(ret)
+ def test_aidl_format(self, mock_check, _mock_run):
+ """Verify the aidl_format builtin hook."""
+ # First call should do nothing as there are no files to check.
+ ret = rh.hooks.check_aidl_format(
+ self.project, 'commit', 'desc', (), options=self.options)
+ self.assertIsNone(ret)
+ self.assertFalse(mock_check.called)
+
+ # Second call will have some results.
+ diff = [rh.git.RawDiffEntry(file='IFoo.go')]
+ ret = rh.hooks.check_gofmt(
+ self.project, 'commit', 'desc', diff, options=self.options)
+ self.assertIsNotNone(ret)
+
if __name__ == '__main__':
unittest.main()
diff --git a/rh/results.py b/rh/results.py
index bdac83c..bdf7626 100644
--- a/rh/results.py
+++ b/rh/results.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
"""Common errors thrown when repo preupload checks fail."""
-from __future__ import print_function
-
import os
import sys
diff --git a/rh/shell.py b/rh/shell.py
index f466f63..dda3be3 100644
--- a/rh/shell.py
+++ b/rh/shell.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
"""Functions for working with shell code."""
-from __future__ import print_function
-
import os
import sys
@@ -25,9 +22,6 @@ if sys.path[0] != _path:
sys.path.insert(0, _path)
del _path
-# pylint: disable=wrong-import-position
-from rh.sixish import string_types
-
# For use by ShellQuote. Match all characters that the shell might treat
# specially. This means a number of things:
@@ -156,7 +150,7 @@ def boolean_shell_value(sval, default):
if sval is None:
return default
- if isinstance(sval, string_types):
+ if isinstance(sval, str):
s = sval.lower()
if s in ('yes', 'y', '1', 'true'):
return True
diff --git a/rh/shell_unittest.py b/rh/shell_unittest.py
index 47182a5..21478cf 100755
--- a/rh/shell_unittest.py
+++ b/rh/shell_unittest.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,6 @@
"""Unittests for the shell module."""
-from __future__ import print_function
-
import difflib
import os
import sys
diff --git a/rh/signals.py b/rh/signals.py
index 45d4e8a..c8a8d81 100644
--- a/rh/signals.py
+++ b/rh/signals.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
"""Signal related functionality."""
-from __future__ import print_function
-
import os
import signal
import sys
diff --git a/rh/sixish.py b/rh/sixish.py
deleted file mode 100644
index 693598c..0000000
--- a/rh/sixish.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding:utf-8 -*-
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Local version of the standard six module.
-
-Since this module is often unavailable by default on distros (or only available
-for specific versions), manage our own little copy.
-"""
-
-from __future__ import print_function
-
-import os
-import sys
-
-_path = os.path.realpath(__file__ + '/../..')
-if sys.path[0] != _path:
- sys.path.insert(0, _path)
-del _path
-
-
-# Our attempts to wrap things below for diff versions of python confuse pylint.
-# pylint: disable=import-error,no-name-in-module,unused-import
-
-
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
-
-# We allow mock to be disabled as it's not needed by non-unittest code.
-try:
- import unittest.mock as mock
-except ImportError:
- try:
- import mock
- except ImportError:
- pass
-
-
-if sys.version_info.major < 3:
- # pylint: disable=basestring-builtin,undefined-variable
- string_types = basestring
-else:
- string_types = str
-
-
-def setenv(var, val):
- """Set |var| in the environment to |val|.
-
- Python 2 wants ASCII strings, not unicode.
- Python 3 only wants unicode strings.
- """
- try:
- os.environ[var] = val
- except UnicodeEncodeError:
- os.environ[var] = val.encode('utf-8')
diff --git a/rh/terminal.py b/rh/terminal.py
index c549f12..39c96ac 100644
--- a/rh/terminal.py
+++ b/rh/terminal.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,8 +17,6 @@
This module handles terminal interaction including ANSI color codes.
"""
-from __future__ import print_function
-
import os
import sys
diff --git a/rh/utils.py b/rh/utils.py
index 53c40b7..aeab52f 100644
--- a/rh/utils.py
+++ b/rh/utils.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
"""Various utility functions."""
-from __future__ import print_function
-
import errno
import functools
import os
@@ -34,7 +31,6 @@ del _path
# pylint: disable=wrong-import-position
import rh.shell
import rh.signals
-from rh.sixish import string_types
def timedelta_str(delta):
@@ -70,7 +66,7 @@ class CompletedProcess(getattr(subprocess, 'CompletedProcess', object)):
self.stderr = stderr
self.returncode = returncode
else:
- super(CompletedProcess, self).__init__(
+ super().__init__(
args=args, returncode=returncode, stdout=stdout, stderr=stderr)
@property
@@ -103,7 +99,7 @@ class CalledProcessError(subprocess.CalledProcessError):
raise TypeError('exception must be an exception instance; got %r'
% (exception,))
- super(CalledProcessError, self).__init__(returncode, cmd, stdout)
+ super().__init__(returncode, cmd, stdout)
# The parent class will set |output|, so delete it.
del self.output
# TODO(vapier): When we're Python 3-only, delete this assignment as the
@@ -152,57 +148,6 @@ class TerminateCalledProcessError(CalledProcessError):
"""
-def sudo_run(cmd, user='root', **kwargs):
- """Run a command via sudo.
-
- Client code must use this rather than coming up with their own RunCommand
- invocation that jams sudo in- this function is used to enforce certain
- rules in our code about sudo usage, and as a potential auditing point.
-
- Args:
- cmd: The command to run. See RunCommand for rules of this argument-
- SudoRunCommand purely prefixes it with sudo.
- user: The user to run the command as.
- kwargs: See RunCommand options, it's a direct pass thru to it.
- Note that this supports a 'strict' keyword that defaults to True.
- If set to False, it'll suppress strict sudo behavior.
-
- Returns:
- See RunCommand documentation.
-
- Raises:
- This function may immediately raise CalledProcessError if we're operating
- in a strict sudo context and the API is being misused.
- Barring that, see RunCommand's documentation- it can raise the same things
- RunCommand does.
- """
- # We don't use this anywhere, so it's easier to not bother supporting it.
- assert not isinstance(cmd, string_types), 'shell commands not supported'
- assert 'shell' not in kwargs, 'shell=True is not supported'
-
- sudo_cmd = ['sudo']
-
- if user == 'root' and os.geteuid() == 0:
- return run(cmd, **kwargs)
-
- if user != 'root':
- sudo_cmd += ['-u', user]
-
- # Pass these values down into the sudo environment, since sudo will
- # just strip them normally.
- extra_env = kwargs.pop('extra_env', None)
- extra_env = {} if extra_env is None else extra_env.copy()
-
- sudo_cmd.extend('%s=%s' % (k, v) for k, v in extra_env.items())
-
- # Finally, block people from passing options to sudo.
- sudo_cmd.append('--')
-
- sudo_cmd.extend(cmd)
-
- return run(sudo_cmd, **kwargs)
-
-
def _kill_child_process(proc, int_timeout, kill_timeout, cmd, original_handler,
signum, frame):
"""Used as a signal handler by RunCommand.
@@ -238,12 +183,8 @@ def _kill_child_process(proc, int_timeout, kill_timeout, cmd, original_handler,
print('Ignoring unhandled exception in _kill_child_process: %s' % e,
file=sys.stderr)
- # Ensure our child process has been reaped.
- kwargs = {}
- if sys.version_info.major >= 3:
- # ... but don't wait forever.
- kwargs['timeout'] = 60
- proc.wait_lock_breaker(**kwargs)
+ # Ensure our child process has been reaped, but don't wait forever.
+ proc.wait_lock_breaker(timeout=60)
if not rh.signals.relay_signal(original_handler, signum, frame):
# Mock up our own, matching exit code for signaling.
@@ -277,21 +218,7 @@ class _Popen(subprocess.Popen):
try:
os.kill(self.pid, signum)
except EnvironmentError as e:
- if e.errno == errno.EPERM:
- # Kill returns either 0 (signal delivered), or 1 (signal wasn't
- # delivered). This isn't particularly informative, but we still
- # need that info to decide what to do, thus check=False.
- ret = sudo_run(['kill', '-%i' % signum, str(self.pid)],
- redirect_stdout=True,
- redirect_stderr=True, check=False)
- if ret.returncode == 1:
- # The kill binary doesn't distinguish between permission
- # denied and the pid is missing. Denied can only occur
- # under weird grsec/selinux policies. We ignore that
- # potential and just assume the pid was already dead and
- # try to reap it.
- self.poll()
- elif e.errno == errno.ESRCH:
+ if e.errno == errno.ESRCH:
# Since we know the process is dead, reap it now.
# Normally Popen would throw this error- we suppress it since
# frankly that's a misfeature and we're already overriding
@@ -369,8 +296,8 @@ def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
redirect_stdout, redirect_stderr = True, True
# Set default for variables.
- stdout = None
- stderr = None
+ popen_stdout = None
+ popen_stderr = None
stdin = None
result = CompletedProcess()
@@ -379,13 +306,8 @@ def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
kill_timeout = float(kill_timeout)
def _get_tempfile():
- kwargs = {}
- if sys.version_info.major < 3:
- kwargs['bufsize'] = 0
- else:
- kwargs['buffering'] = 0
try:
- return tempfile.TemporaryFile(**kwargs)
+ return tempfile.TemporaryFile(buffering=0)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
@@ -394,7 +316,7 @@ def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
# issue in this particular case since our usage gurantees deletion,
# and since this is primarily triggered during hard cgroups
# shutdown.
- return tempfile.TemporaryFile(dir='/tmp', **kwargs)
+ return tempfile.TemporaryFile(dir='/tmp', buffering=0)
# Modify defaults based on parameters.
# Note that tempfiles must be unbuffered else attempts to read
@@ -403,30 +325,30 @@ def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
# The Popen API accepts either an int or a file handle for stdout/stderr.
# pylint: disable=redefined-variable-type
if redirect_stdout:
- stdout = _get_tempfile()
+ popen_stdout = _get_tempfile()
if combine_stdout_stderr:
- stderr = subprocess.STDOUT
+ popen_stderr = subprocess.STDOUT
elif redirect_stderr:
- stderr = _get_tempfile()
+ popen_stderr = _get_tempfile()
# pylint: enable=redefined-variable-type
# If subprocesses have direct access to stdout or stderr, they can bypass
# our buffers, so we need to flush to ensure that output is not interleaved.
- if stdout is None or stderr is None:
+ if popen_stdout is None or popen_stderr is None:
sys.stdout.flush()
sys.stderr.flush()
# If input is a string, we'll create a pipe and send it through that.
# Otherwise we assume it's a file object that can be read from directly.
- if isinstance(input, string_types):
+ if isinstance(input, str):
stdin = subprocess.PIPE
input = input.encode('utf-8')
elif input is not None:
stdin = input
input = None
- if isinstance(cmd, string_types):
+ if isinstance(cmd, str):
if not shell:
raise Exception('Cannot run a string command without a shell')
cmd = ['/bin/bash', '-c', cmd]
@@ -439,12 +361,18 @@ def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
env = env.copy() if env is not None else os.environ.copy()
env.update(extra_env if extra_env else {})
+ def ensure_text(s):
+ """Make sure |s| is a string if it's bytes."""
+ if isinstance(s, bytes):
+ s = s.decode('utf-8', 'replace')
+ return s
+
result.args = cmd
proc = None
try:
- proc = _Popen(cmd, cwd=cwd, stdin=stdin, stdout=stdout,
- stderr=stderr, shell=False, env=env,
+ proc = _Popen(cmd, cwd=cwd, stdin=stdin, stdout=popen_stdout,
+ stderr=popen_stderr, shell=False, env=env,
close_fds=close_fds)
old_sigint = signal.getsignal(signal.SIGINT)
@@ -463,19 +391,19 @@ def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
signal.signal(signal.SIGINT, old_sigint)
signal.signal(signal.SIGTERM, old_sigterm)
- if stdout:
+ if popen_stdout:
# The linter is confused by how stdout is a file & an int.
# pylint: disable=maybe-no-member,no-member
- stdout.seek(0)
- result.stdout = stdout.read()
- stdout.close()
+ popen_stdout.seek(0)
+ result.stdout = popen_stdout.read()
+ popen_stdout.close()
- if stderr and stderr != subprocess.STDOUT:
+ if popen_stderr and popen_stderr != subprocess.STDOUT:
# The linter is confused by how stderr is a file & an int.
# pylint: disable=maybe-no-member,no-member
- stderr.seek(0)
- result.stderr = stderr.read()
- stderr.close()
+ popen_stderr.seek(0)
+ result.stderr = popen_stderr.read()
+ popen_stderr.close()
result.returncode = proc.returncode
@@ -484,9 +412,16 @@ def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
if extra_env:
msg += ', extra env=%s' % extra_env
raise CalledProcessError(
- result.returncode, result.cmd, stdout=result.stdout,
- stderr=result.stderr, msg=msg)
+ result.returncode, result.cmd, msg=msg,
+ stdout=ensure_text(result.stdout),
+ stderr=ensure_text(result.stderr))
except OSError as e:
+ # Avoid leaking tempfiles.
+ if popen_stdout is not None and not isinstance(popen_stdout, int):
+ popen_stdout.close()
+ if popen_stderr is not None and not isinstance(popen_stderr, int):
+ popen_stderr.close()
+
estr = str(e)
if e.errno == errno.EACCES:
estr += '; does the program need `chmod a+x`?'
@@ -494,8 +429,9 @@ def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
result = CompletedProcess(args=cmd, stderr=estr, returncode=255)
else:
raise CalledProcessError(
- result.returncode, result.cmd, stdout=result.stdout,
- stderr=result.stderr, msg=estr, exception=e)
+ result.returncode, result.cmd, msg=estr, exception=e,
+ stdout=ensure_text(result.stdout),
+ stderr=ensure_text(result.stderr)) from e
finally:
if proc is not None:
# Ensure the process is dead.
@@ -505,10 +441,8 @@ def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
None, None)
# Make sure output is returned as a string rather than bytes.
- if result.stdout is not None:
- result.stdout = result.stdout.decode('utf-8', 'replace')
- if result.stderr is not None:
- result.stderr = result.stderr.decode('utf-8', 'replace')
+ result.stdout = ensure_text(result.stdout)
+ result.stderr = ensure_text(result.stderr)
return result
# pylint: enable=redefined-builtin,input-builtin
diff --git a/rh/utils_unittest.py b/rh/utils_unittest.py
index 8f50998..ea2ddaa 100755
--- a/rh/utils_unittest.py
+++ b/rh/utils_unittest.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
# Copyright 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,6 @@
"""Unittests for the utils module."""
-from __future__ import print_function
-
import datetime
import os
import sys
@@ -33,7 +30,6 @@ del _path
# pylint: disable=wrong-import-position
import rh
import rh.utils
-from rh.sixish import mock
class TimeDeltaStrTests(unittest.TestCase):
@@ -131,49 +127,6 @@ class CalledProcessErrorTests(unittest.TestCase):
self.assertNotEqual('', repr(err))
-# We shouldn't require sudo to run unittests :).
-@mock.patch.object(rh.utils, 'run')
-@mock.patch.object(os, 'geteuid', return_value=1000)
-class SudoRunCommandTests(unittest.TestCase):
- """Verify behavior of sudo_run helper."""
-
- def test_run_as_root_as_root(self, mock_geteuid, mock_run):
- """Check behavior when we're already root."""
- mock_geteuid.return_value = 0
- ret = rh.utils.sudo_run(['ls'], user='root')
- self.assertIsNotNone(ret)
- args, _kwargs = mock_run.call_args
- self.assertEqual((['ls'],), args)
-
- def test_run_as_root_as_nonroot(self, _mock_geteuid, mock_run):
- """Check behavior when we're not already root."""
- ret = rh.utils.sudo_run(['ls'], user='root')
- self.assertIsNotNone(ret)
- args, _kwargs = mock_run.call_args
- self.assertEqual((['sudo', '--', 'ls'],), args)
-
- def test_run_as_nonroot_as_nonroot(self, _mock_geteuid, mock_run):
- """Check behavior when we're not already root."""
- ret = rh.utils.sudo_run(['ls'], user='nobody')
- self.assertIsNotNone(ret)
- args, _kwargs = mock_run.call_args
- self.assertEqual((['sudo', '-u', 'nobody', '--', 'ls'],), args)
-
- def test_env(self, _mock_geteuid, mock_run):
- """Check passing through env vars."""
- ret = rh.utils.sudo_run(['ls'], extra_env={'FOO': 'bar'})
- self.assertIsNotNone(ret)
- args, _kwargs = mock_run.call_args
- self.assertEqual((['sudo', 'FOO=bar', '--', 'ls'],), args)
-
- def test_shell(self, _mock_geteuid, _mock_run):
- """Check attempts to use shell code are rejected."""
- with self.assertRaises(AssertionError):
- rh.utils.sudo_run('foo')
- with self.assertRaises(AssertionError):
- rh.utils.sudo_run(['ls'], shell=True)
-
-
class RunCommandTests(unittest.TestCase):
"""Verify behavior of run helper."""
@@ -208,6 +161,60 @@ class RunCommandTests(unittest.TestCase):
self.assertEqual(u'ß', ret.stdout)
self.assertIsNone(ret.stderr)
+ def test_check_false(self):
+ """Verify handling of check=False."""
+ ret = rh.utils.run(['false'], check=False)
+ self.assertNotEqual(0, ret.returncode)
+ self.assertIn('false', str(ret))
+
+ ret = rh.utils.run(['true'], check=False)
+ self.assertEqual(0, ret.returncode)
+ self.assertIn('true', str(ret))
+
+ def test_check_true(self):
+ """Verify handling of check=True."""
+ with self.assertRaises(rh.utils.CalledProcessError) as e:
+ rh.utils.run(['false'], check=True)
+ err = e.exception
+ self.assertNotEqual(0, err.returncode)
+ self.assertIn('false', str(err))
+
+ ret = rh.utils.run(['true'], check=True)
+ self.assertEqual(0, ret.returncode)
+ self.assertIn('true', str(ret))
+
+ def test_check_false_output(self):
+ """Verify handling of output capturing w/check=False."""
+ with self.assertRaises(rh.utils.CalledProcessError) as e:
+ rh.utils.run(['sh', '-c', 'echo out; echo err >&2; false'],
+ check=True, capture_output=True)
+ err = e.exception
+ self.assertNotEqual(0, err.returncode)
+ self.assertIn('false', str(err))
+
+ def test_check_true_missing_prog_output(self):
+ """Verify handling of output capturing w/missing progs."""
+ with self.assertRaises(rh.utils.CalledProcessError) as e:
+ rh.utils.run(['./!~a/b/c/d/'], check=True, capture_output=True)
+ err = e.exception
+ self.assertNotEqual(0, err.returncode)
+ self.assertIn('a/b/c/d', str(err))
+
+ def test_check_false_missing_prog_output(self):
+ """Verify handling of output capturing w/missing progs."""
+ ret = rh.utils.run(['./!~a/b/c/d/'], check=False, capture_output=True)
+ self.assertNotEqual(0, ret.returncode)
+ self.assertIn('a/b/c/d', str(ret))
+
+ def test_check_false_missing_prog_combined_output(self):
+ """Verify handling of combined output capturing w/missing progs."""
+ with self.assertRaises(rh.utils.CalledProcessError) as e:
+ rh.utils.run(['./!~a/b/c/d/'], check=True,
+ combine_stdout_stderr=True)
+ err = e.exception
+ self.assertNotEqual(0, err.returncode)
+ self.assertIn('a/b/c/d', str(err))
+
if __name__ == '__main__':
unittest.main()
diff --git a/tools/android_test_mapping_format.py b/tools/android_test_mapping_format.py
index 14b02f5..ae784cf 100755
--- a/tools/android_test_mapping_format.py
+++ b/tools/android_test_mapping_format.py
@@ -1,5 +1,4 @@
-#!/usr/bin/python
-# -*- coding:utf-8 -*-
+#!/usr/bin/env python3
# Copyright 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Validate TEST_MAPPING files in Android source code.
+"""Validates TEST_MAPPING files in Android source code.
The goal of this script is to validate the format of TEST_MAPPING files:
1. It must be a valid json file.
@@ -23,13 +22,12 @@ The goal of this script is to validate the format of TEST_MAPPING files:
import TEST_MAPPING files.
"""
-from __future__ import print_function
-
import argparse
import json
import os
import re
import sys
+from typing import Any, Dict
_path = os.path.realpath(__file__ + '/../..')
if sys.path[0] != _path:
@@ -41,14 +39,16 @@ del _path
# pylint: disable=wrong-import-position
import rh.git
-IMPORTS = 'imports'
-NAME = 'name'
-OPTIONS = 'options'
-PATH = 'path'
-HOST = 'host'
-PREFERRED_TARGETS = 'preferred_targets'
-FILE_PATTERNS = 'file_patterns'
-TEST_MAPPING_URL = (
+_IMPORTS = 'imports'
+_NAME = 'name'
+_OPTIONS = 'options'
+_PATH = 'path'
+_HOST = 'host'
+_PREFERRED_TARGETS = 'preferred_targets'
+_FILE_PATTERNS = 'file_patterns'
+_INVALID_IMPORT_CONFIG = 'Invalid import config in TEST_MAPPING file'
+_INVALID_TEST_CONFIG = 'Invalid test config in TEST_MAPPING file'
+_TEST_MAPPING_URL = (
'https://source.android.com/compatibility/tests/development/'
'test-mapping')
@@ -56,13 +56,6 @@ TEST_MAPPING_URL = (
_COMMENTS_RE = re.compile(r'^\s*//')
-if sys.version_info.major < 3:
- # pylint: disable=basestring-builtin,undefined-variable
- string_types = basestring
-else:
- string_types = str
-
-
class Error(Exception):
"""Base exception for all custom exceptions in this module."""
@@ -71,8 +64,8 @@ class InvalidTestMappingError(Error):
"""Exception to raise when detecting an invalid TEST_MAPPING file."""
-def filter_comments(json_data):
- """Remove '//'-format comments in TEST_MAPPING file to valid format.
+def _filter_comments(json_data: str) -> str:
+ """Removes '//'-format comments in TEST_MAPPING file to valid format.
Args:
json_data: TEST_MAPPING file content (as a string).
@@ -80,12 +73,12 @@ def filter_comments(json_data):
Returns:
Valid json string without comments.
"""
- return ''.join('\n' if _COMMENTS_RE.match(x) else x for x in
- json_data.splitlines())
+ return ''.join(
+ '\n' if _COMMENTS_RE.match(x) else x for x in json_data.splitlines())
-def _validate_import(entry, test_mapping_file):
- """Validate an import setting.
+def _validate_import(entry: Dict[str, Any], test_mapping_file: str):
+ """Validates an import setting.
Args:
entry: A dictionary of an import setting.
@@ -96,85 +89,84 @@ def _validate_import(entry, test_mapping_file):
"""
if len(entry) != 1:
raise InvalidTestMappingError(
- 'Invalid import config in test mapping file %s. each import can '
- 'only have one `path` setting. Failed entry: %s' %
- (test_mapping_file, entry))
- if list(entry.keys())[0] != PATH:
+ f'{_INVALID_IMPORT_CONFIG} {test_mapping_file}. Each import can '
+ f'only have one `path` setting. Failed entry: {entry}')
+ if _PATH not in entry:
raise InvalidTestMappingError(
- 'Invalid import config in test mapping file %s. import can only '
- 'have one `path` setting. Failed entry: %s' %
- (test_mapping_file, entry))
+ f'{_INVALID_IMPORT_CONFIG} {test_mapping_file}. Import can '
+ f'only have one `path` setting. Failed entry: {entry}')
-def _validate_test(test, test_mapping_file):
- """Validate a test declaration.
+def _validate_test(test: Dict[str, Any], test_mapping_file: str) -> bool:
+ """Returns whether a test declaration is valid.
Args:
- entry: A dictionary of a test declaration.
+ test: A dictionary of a test declaration.
test_mapping_file: Path to the TEST_MAPPING file to be validated.
Raises:
InvalidTestMappingError: if the a test declaration is invalid.
"""
- if NAME not in test:
+ if _NAME not in test:
raise InvalidTestMappingError(
- 'Invalid test config in test mapping file %s. test config must '
- 'a `name` setting. Failed test config: %s' %
- (test_mapping_file, test))
- if not isinstance(test.get(HOST, False), bool):
- raise InvalidTestMappingError(
- 'Invalid test config in test mapping file %s. `host` setting in '
- 'test config can only have boolean value of `true` or `false`. '
- 'Failed test config: %s' % (test_mapping_file, test))
- preferred_targets = test.get(PREFERRED_TARGETS, [])
- if (not isinstance(preferred_targets, list) or
- any(not isinstance(t, string_types) for t in preferred_targets)):
- raise InvalidTestMappingError(
- 'Invalid test config in test mapping file %s. `preferred_targets` '
- 'setting in test config can only be a list of strings. Failed test '
- 'config: %s' % (test_mapping_file, test))
- file_patterns = test.get(FILE_PATTERNS, [])
- if (not isinstance(file_patterns, list) or
- any(not isinstance(p, string_types) for p in file_patterns)):
+
+ f'{_INVALID_TEST_CONFIG} {test_mapping_file}. Test config must '
+ f'have a `name` setting. Failed test config: {test}')
+
+ if not isinstance(test.get(_HOST, False), bool):
raise InvalidTestMappingError(
- 'Invalid test config in test mapping file %s. `file_patterns` '
- 'setting in test config can only be a list of strings. Failed test '
- 'config: %s' % (test_mapping_file, test))
- for option in test.get(OPTIONS, []):
+ f'{_INVALID_TEST_CONFIG} {test_mapping_file}. `host` setting in '
+ f'test config can only have boolean value of `true` or `false`. '
+ f'Failed test config: {test}')
+
+ for key in (_PREFERRED_TARGETS, _FILE_PATTERNS):
+ value = test.get(key, [])
+ if (not isinstance(value, list) or
+ any(not isinstance(t, str) for t in value)):
+ raise InvalidTestMappingError(
+ f'{_INVALID_TEST_CONFIG} {test_mapping_file}. `{key}` setting '
+ f'in test config can only be a list of strings. '
+ f'Failed test config: {test}')
+
+ for option in test.get(_OPTIONS, []):
+ if not isinstance(option, dict):
+ raise InvalidTestMappingError(
+ f'{_INVALID_TEST_CONFIG} {test_mapping_file}. Option setting '
+ f'in test config can only be a dictionary of key-val setting. '
+ f'Failed entry: {option}')
if len(option) != 1:
raise InvalidTestMappingError(
- 'Invalid option setting in test mapping file %s. each option '
- 'setting can only have one key-val setting. Failed entry: %s' %
- (test_mapping_file, option))
+ f'{_INVALID_TEST_CONFIG} {test_mapping_file}. Each option '
+ f'setting can only have one key-val setting. '
+ f'Failed entry: {option}')
-def _load_file(test_mapping_file):
- """Load a TEST_MAPPING file as a json file."""
+def process_file(test_mapping_file: str):
+ """Validates a TEST_MAPPING file content."""
try:
- return json.loads(filter_comments(test_mapping_file))
- except ValueError as e:
+ test_mapping_data = json.loads(_filter_comments(test_mapping_file))
+ except ValueError as exception:
# The file is not a valid JSON file.
print(
- 'Failed to parse JSON file %s, error: %s' % (test_mapping_file, e),
+ f'Invalid JSON data in TEST_MAPPING file '
+ f'Failed to parse JSON data: {test_mapping_file}, '
+ f'error: {exception}',
file=sys.stderr)
raise
-
-def process_file(test_mapping_file):
- """Validate a TEST_MAPPING file."""
- test_mapping = _load_file(test_mapping_file)
- # Validate imports.
- for import_entry in test_mapping.get(IMPORTS, []):
- _validate_import(import_entry, test_mapping_file)
- # Validate tests.
- all_tests = [test for group, tests in test_mapping.items()
- if group != IMPORTS for test in tests]
- for test in all_tests:
- _validate_test(test, test_mapping_file)
+ for group, value in test_mapping_data.items():
+ if group == _IMPORTS:
+ # Validate imports.
+ for test in value:
+ _validate_import(test, test_mapping_file)
+ else:
+ # Validate tests.
+ for test in value:
+ _validate_test(test, test_mapping_file)
def get_parser():
- """Return a command line parser."""
+ """Returns a command line parser."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--commit', type=str,
help='Specify the commit to validate.')
@@ -184,6 +176,7 @@ def get_parser():
def main(argv):
+ """Main function."""
parser = get_parser()
opts = parser.parse_args(argv)
try:
@@ -191,12 +184,12 @@ def main(argv):
if opts.commit:
json_data = rh.git.get_file_content(opts.commit, filename)
else:
- with open(os.path.join(opts.project_dir, filename)) as f:
- json_data = f.read()
+ with open(os.path.join(opts.project_dir, filename)) as file:
+ json_data = file.read()
process_file(json_data)
except:
print('Visit %s for details about the format of TEST_MAPPING '
- 'file.' % TEST_MAPPING_URL, file=sys.stderr)
+ 'file.' % _TEST_MAPPING_URL, file=sys.stderr)
raise
diff --git a/tools/android_test_mapping_format_unittest.py b/tools/android_test_mapping_format_unittest.py
index 9191a25..14bae32 100755
--- a/tools/android_test_mapping_format_unittest.py
+++ b/tools/android_test_mapping_format_unittest.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
# Copyright 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+"""Unittests for android_test_mapping_format."""
+
import os
import shutil
import tempfile
@@ -22,7 +23,7 @@ import unittest
import android_test_mapping_format
-VALID_TEST_MAPPING = r"""
+_VALID_TEST_MAPPING = r"""
{
"presubmit": [
{
@@ -53,11 +54,11 @@ VALID_TEST_MAPPING = r"""
}
"""
-BAD_JSON = """
+_BAD_JSON = """
{wrong format}
"""
-BAD_TEST_WRONG_KEY = """
+_BAD_TEST_WRONG_KEY = """
{
"presubmit": [
{
@@ -67,7 +68,7 @@ BAD_TEST_WRONG_KEY = """
}
"""
-BAD_TEST_WRONG_HOST_VALUE = """
+_BAD_TEST_WRONG_HOST_VALUE = """
{
"presubmit": [
{
@@ -79,7 +80,7 @@ BAD_TEST_WRONG_HOST_VALUE = """
"""
-BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST = """
+_BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST = """
{
"presubmit": [
{
@@ -90,7 +91,7 @@ BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST = """
}
"""
-BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE = """
+_BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE = """
{
"presubmit": [
{
@@ -101,7 +102,7 @@ BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE = """
}
"""
-BAD_TEST_WRONG_OPTION = """
+_BAD_TEST_WRONG_OPTION = """
{
"presubmit": [
{
@@ -117,7 +118,7 @@ BAD_TEST_WRONG_OPTION = """
}
"""
-BAD_IMPORT_WRONG_KEY = """
+_BAD_IMPORT_WRONG_KEY = """
{
"imports": [
{
@@ -127,7 +128,7 @@ BAD_IMPORT_WRONG_KEY = """
}
"""
-BAD_IMPORT_WRONG_IMPORT_VALUE = """
+_BAD_IMPORT_WRONG_IMPORT_VALUE = """
{
"imports": [
{
@@ -138,7 +139,7 @@ BAD_IMPORT_WRONG_IMPORT_VALUE = """
}
"""
-BAD_FILE_PATTERNS = """
+_BAD_FILE_PATTERNS = """
{
"presubmit": [
{
@@ -149,7 +150,7 @@ BAD_FILE_PATTERNS = """
}
"""
-TEST_MAPPING_WITH_SUPPORTED_COMMENTS = r"""
+_TEST_MAPPING_WITH_SUPPORTED_COMMENTS = r"""
// supported comment
{
// supported comment!@#$%^&*()_
@@ -172,7 +173,7 @@ TEST_MAPPING_WITH_SUPPORTED_COMMENTS = r"""
}
"""
-TEST_MAPPING_WITH_NON_SUPPORTED_COMMENTS = """
+_TEST_MAPPING_WITH_NON_SUPPORTED_COMMENTS = """
{ #non-supported comments
// supported comments
"presubmit": [#non-supported comments
@@ -197,112 +198,112 @@ class AndroidTestMappingFormatTests(unittest.TestCase):
def test_valid_test_mapping(self):
"""Verify that the check doesn't raise any error for valid test mapping.
"""
- with open(self.test_mapping_file, 'w') as f:
- f.write(VALID_TEST_MAPPING)
- with open(self.test_mapping_file, 'r') as f:
- android_test_mapping_format.process_file(f.read())
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_VALID_TEST_MAPPING)
+ with open(self.test_mapping_file, 'r') as file:
+ android_test_mapping_format.process_file(file.read())
def test_invalid_test_mapping_bad_json(self):
"""Verify that TEST_MAPPING file with bad json can be detected."""
- with open(self.test_mapping_file, 'w') as f:
- f.write(BAD_JSON)
- with open(self.test_mapping_file, 'r') as f:
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_BAD_JSON)
+ with open(self.test_mapping_file, 'r') as file:
self.assertRaises(
ValueError, android_test_mapping_format.process_file,
- f.read())
+ file.read())
def test_invalid_test_mapping_wrong_test_key(self):
"""Verify that test config using wrong key can be detected."""
- with open(self.test_mapping_file, 'w') as f:
- f.write(BAD_TEST_WRONG_KEY)
- with open(self.test_mapping_file, 'r') as f:
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_BAD_TEST_WRONG_KEY)
+ with open(self.test_mapping_file, 'r') as file:
self.assertRaises(
android_test_mapping_format.InvalidTestMappingError,
android_test_mapping_format.process_file,
- f.read())
+ file.read())
def test_invalid_test_mapping_wrong_test_value(self):
"""Verify that test config using wrong host value can be detected."""
- with open(self.test_mapping_file, 'w') as f:
- f.write(BAD_TEST_WRONG_HOST_VALUE)
- with open(self.test_mapping_file, 'r') as f:
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_BAD_TEST_WRONG_HOST_VALUE)
+ with open(self.test_mapping_file, 'r') as file:
self.assertRaises(
android_test_mapping_format.InvalidTestMappingError,
android_test_mapping_format.process_file,
- f.read())
+ file.read())
def test_invalid_test_mapping_wrong_preferred_targets_value(self):
"""Verify invalid preferred_targets are rejected."""
- with open(self.test_mapping_file, 'w') as f:
- f.write(BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST)
- with open(self.test_mapping_file, 'r') as f:
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST)
+ with open(self.test_mapping_file, 'r') as file:
self.assertRaises(
android_test_mapping_format.InvalidTestMappingError,
android_test_mapping_format.process_file,
- f.read())
- with open(self.test_mapping_file, 'w') as f:
- f.write(BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE)
- with open(self.test_mapping_file, 'r') as f:
+ file.read())
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE)
+ with open(self.test_mapping_file, 'r') as file:
self.assertRaises(
android_test_mapping_format.InvalidTestMappingError,
android_test_mapping_format.process_file,
- f.read())
+ file.read())
def test_invalid_test_mapping_wrong_test_option(self):
"""Verify that test config using wrong option can be detected."""
- with open(self.test_mapping_file, 'w') as f:
- f.write(BAD_TEST_WRONG_OPTION)
- with open(self.test_mapping_file, 'r') as f:
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_BAD_TEST_WRONG_OPTION)
+ with open(self.test_mapping_file, 'r') as file:
self.assertRaises(
android_test_mapping_format.InvalidTestMappingError,
android_test_mapping_format.process_file,
- f.read())
+ file.read())
def test_invalid_test_mapping_wrong_import_key(self):
"""Verify that import setting using wrong key can be detected."""
- with open(self.test_mapping_file, 'w') as f:
- f.write(BAD_IMPORT_WRONG_KEY)
- with open(self.test_mapping_file, 'r') as f:
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_BAD_IMPORT_WRONG_KEY)
+ with open(self.test_mapping_file, 'r') as file:
self.assertRaises(
android_test_mapping_format.InvalidTestMappingError,
android_test_mapping_format.process_file,
- f.read())
+ file.read())
def test_invalid_test_mapping_wrong_import_value(self):
"""Verify that import setting using wrong value can be detected."""
- with open(self.test_mapping_file, 'w') as f:
- f.write(BAD_IMPORT_WRONG_IMPORT_VALUE)
- with open(self.test_mapping_file, 'r') as f:
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_BAD_IMPORT_WRONG_IMPORT_VALUE)
+ with open(self.test_mapping_file, 'r') as file:
self.assertRaises(
android_test_mapping_format.InvalidTestMappingError,
android_test_mapping_format.process_file,
- f.read())
+ file.read())
def test_invalid_test_mapping_file_patterns_value(self):
"""Verify that file_patterns using wrong value can be detected."""
- with open(self.test_mapping_file, 'w') as f:
- f.write(BAD_FILE_PATTERNS)
- with open(self.test_mapping_file, 'r') as f:
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_BAD_FILE_PATTERNS)
+ with open(self.test_mapping_file, 'r') as file:
self.assertRaises(
android_test_mapping_format.InvalidTestMappingError,
android_test_mapping_format.process_file,
- f.read())
+ file.read())
def test_valid_test_mapping_file_with_supported_comments(self):
"""Verify that '//'-format comment can be filtered."""
- with open(self.test_mapping_file, 'w') as f:
- f.write(TEST_MAPPING_WITH_SUPPORTED_COMMENTS)
- with open(self.test_mapping_file, 'r') as f:
- android_test_mapping_format.process_file(f.read())
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_TEST_MAPPING_WITH_SUPPORTED_COMMENTS)
+ with open(self.test_mapping_file, 'r') as file:
+ android_test_mapping_format.process_file(file.read())
def test_valid_test_mapping_file_with_non_supported_comments(self):
"""Verify that non-supported comment can be detected."""
- with open(self.test_mapping_file, 'w') as f:
- f.write(TEST_MAPPING_WITH_NON_SUPPORTED_COMMENTS)
- with open(self.test_mapping_file, 'r') as f:
+ with open(self.test_mapping_file, 'w') as file:
+ file.write(_TEST_MAPPING_WITH_NON_SUPPORTED_COMMENTS)
+ with open(self.test_mapping_file, 'r') as file:
self.assertRaises(
ValueError, android_test_mapping_format.process_file,
- f.read())
+ file.read())
if __name__ == '__main__':
diff --git a/tools/clang-format.py b/tools/clang-format.py
index 7fa5b10..2533b15 100755
--- a/tools/clang-format.py
+++ b/tools/clang-format.py
@@ -1,5 +1,4 @@
-#!/usr/bin/python
-# -*- coding:utf-8 -*-
+#!/usr/bin/env python3
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,6 @@
"""Wrapper to run git-clang-format and parse its output."""
-from __future__ import print_function
-
import argparse
import os
import sys
diff --git a/tools/cpplint.py b/tools/cpplint.py
index c08cf6f..c5db879 100755
--- a/tools/cpplint.py
+++ b/tools/cpplint.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# pylint: skip-file
#
# Copyright (c) 2009 Google Inc. All rights reserved.
@@ -45,24 +45,49 @@ same line, but it is far from perfect (in either direction).
import codecs
import copy
import getopt
+import glob
+import itertools
import math # for log
import os
import re
import sre_compile
import string
import sys
+import sysconfig
import unicodedata
+import xml.etree.ElementTree
+
+# if empty, use defaults
+_valid_extensions = set([])
+
+__VERSION__ = '1.5.5'
+
+try:
+ xrange # Python 2
+except NameError:
+ # -- pylint: disable=redefined-builtin
+ xrange = range # Python 3
_USAGE = """
-Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
+Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit|sed|gsed]
+ [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
+ [--repository=path]
[--linelength=digits] [--headers=x,y,...]
+ [--recursive]
+ [--exclude=path]
+ [--extensions=hpp,cpp,...]
+ [--includeorder=default|standardcfirst]
[--quiet]
+ [--version]
<file> [file] ...
+ Style checker for C/C++ source files.
+ This is a fork of the Google style checker with minor extensions.
+
The style guidelines this tries to follow are those in
- https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
+ https://google.github.io/styleguide/cppguide.html
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
@@ -73,17 +98,27 @@ Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
- Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
- extensions with the --extensions flag.
+ Default linted extensions are %s.
+ Other file types will be ignored.
+ Change the extensions with the --extensions flag.
Flags:
- output=vs7
+ output=emacs|eclipse|vs7|junit|sed|gsed
By default, the output is formatted to ease emacs parsing. Visual Studio
- compatible output (vs7) may also be used. Other formats are unsupported.
+ compatible output (vs7) may also be used. Further support exists for
+ eclipse (eclipse), and JUnit (junit). XML parsers such as those used
+ in Jenkins and Bamboo may also be used.
+ The sed format outputs sed commands that should fix some of the errors.
+ Note that this requires gnu sed. If that is installed as gsed on your
+ system (common e.g. on macOS with homebrew) you can use the gsed output
+ format. Sed commands are written to stdout, not stderr, so you should be
+ able to pipe output straight to a shell to run the fixes.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
+ Errors with lower verbosity levels have lower confidence and are more
+ likely to be false positives.
quiet
Don't print anything if no errors are found.
@@ -93,11 +128,11 @@ Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
- "-FOO" and "FOO" means "do not print categories that start with FOO".
+ "-FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
- --filter=whitespace,runtime/printf,+runtime/printf_format
+ --filter=-whitespace,-runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
@@ -110,17 +145,41 @@ Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
+ repository=path
+ The top level directory of the repository, used to derive the header
+ guard CPP variable. By default, this is determined by searching for a
+ path that contains .git, .hg, or .svn. When this flag is specified, the
+ given path is used instead. This option allows the header guard CPP
+ variable to remain consistent even if members of a team have different
+ repository root directories (such as when checking out a subdirectory
+ with SVN). In addition, users of non-mainstream version control systems
+ can use this flag to ensure readable header guard CPP variables.
+
+ Examples:
+ Assuming that Alice checks out ProjectName and Bob checks out
+ ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
+ with no --repository flag, the header guard CPP variable will be:
+
+ Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
+ Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
+
+ If Alice uses the --repository=trunk flag and Bob omits the flag or
+ uses --repository=. then the header guard CPP variable will be:
+
+ Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
+ Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
+
root=subdir
The root directory used for deriving header guard CPP variable.
- By default, the header guard CPP variable is calculated as the relative
- path to the directory that contains .git, .hg, or .svn. When this flag
- is specified, the relative path is calculated from the specified
- directory. If the specified directory does not exist, this flag is
- ignored.
+ This directory is relative to the top level directory of the repository
+ which by default is determined by searching for a directory that contains
+ .git, .hg, or .svn but can also be controlled with the --repository flag.
+ If the specified directory does not exist, this flag is ignored.
Examples:
- Assuming that top/src/.git exists (and cwd=top/src), the header guard
- CPP variables for top/src/chrome/browser/ui/browser.h are:
+ Assuming that src is the top level directory of the repository (and
+ cwd=top/src), the header guard CPP variables for
+ src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
@@ -134,17 +193,45 @@ Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
Examples:
--linelength=120
+ recursive
+ Search for files to lint recursively. Each directory given in the list
+ of files to be linted is replaced by all files that descend from that
+ directory. Files with extensions not in the valid extensions list are
+ excluded.
+
+ exclude=path
+ Exclude the given path from the list of files to be linted. Relative
+ paths are evaluated relative to the current directory and shell globbing
+ is performed. This flag can be provided multiple times to exclude
+ multiple files.
+
+ Examples:
+ --exclude=one.cc
+ --exclude=src/*.cc
+ --exclude=src/*.cc --exclude=test/*.cc
+
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
- --extensions=hpp,cpp
+ --extensions=%s
+
+ includeorder=default|standardcfirst
+ For the build/include_order rule, the default is to blindly assume angle
+ bracket includes with file extension are c-system-headers (default),
+ even knowing this will have false classifications.
+ The default is established at google.
+ standardcfirst means to instead use an allow-list of known c headers and
+ treat all others as separate group of "other system headers". The C headers
+ included are those of the C-standard lib and closely related ones.
headers=x,y,...
The header extensions that cpplint will treat as .h in checks. Values are
automatically added to --extensions list.
+ (by default, only files with extensions %s will be assumed to be headers)
Examples:
+ --headers=%s
--headers=hpp,hxx
--headers=hpp
@@ -169,7 +256,7 @@ Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
- through liner.
+ through the linter.
"linelength" allows to specify the allowed line length for the project.
@@ -184,7 +271,7 @@ Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
Example file:
filter=-build/include_order,+build/include_alpha
- exclude_files=.*\.cc
+ exclude_files=.*\\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
@@ -207,9 +294,12 @@ _ERROR_CATEGORIES = [
'build/forward_decl',
'build/header_guard',
'build/include',
+ 'build/include_subdir',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
+ 'build/namespaces_headers',
+ 'build/namespaces_literals',
'build/namespaces',
'build/printf_format',
'build/storage_class',
@@ -265,6 +355,13 @@ _ERROR_CATEGORIES = [
'whitespace/todo',
]
+# keywords to use with --outputs which generate stdout for machine processing
+_MACHINE_OUTPUTS = [
+ 'junit',
+ 'sed',
+ 'gsed'
+]
+
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
@@ -400,6 +497,18 @@ _CPP_HEADERS = frozenset([
'utility',
'valarray',
'vector',
+ # 17.6.1.2 C++14 headers
+ 'shared_mutex',
+ # 17.6.1.2 C++17 headers
+ 'any',
+ 'charconv',
+ 'codecvt',
+ 'execution',
+ 'filesystem',
+ 'memory_resource',
+ 'optional',
+ 'string_view',
+ 'variant',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
@@ -429,6 +538,186 @@ _CPP_HEADERS = frozenset([
'cwctype',
])
+# C headers
+_C_HEADERS = frozenset([
+ # System C headers
+ 'assert.h',
+ 'complex.h',
+ 'ctype.h',
+ 'errno.h',
+ 'fenv.h',
+ 'float.h',
+ 'inttypes.h',
+ 'iso646.h',
+ 'limits.h',
+ 'locale.h',
+ 'math.h',
+ 'setjmp.h',
+ 'signal.h',
+ 'stdalign.h',
+ 'stdarg.h',
+ 'stdatomic.h',
+ 'stdbool.h',
+ 'stddef.h',
+ 'stdint.h',
+ 'stdio.h',
+ 'stdlib.h',
+ 'stdnoreturn.h',
+ 'string.h',
+ 'tgmath.h',
+ 'threads.h',
+ 'time.h',
+ 'uchar.h',
+ 'wchar.h',
+ 'wctype.h',
+ # additional POSIX C headers
+ 'aio.h',
+ 'arpa/inet.h',
+ 'cpio.h',
+ 'dirent.h',
+ 'dlfcn.h',
+ 'fcntl.h',
+ 'fmtmsg.h',
+ 'fnmatch.h',
+ 'ftw.h',
+ 'glob.h',
+ 'grp.h',
+ 'iconv.h',
+ 'langinfo.h',
+ 'libgen.h',
+ 'monetary.h',
+ 'mqueue.h',
+ 'ndbm.h',
+ 'net/if.h',
+ 'netdb.h',
+ 'netinet/in.h',
+ 'netinet/tcp.h',
+ 'nl_types.h',
+ 'poll.h',
+ 'pthread.h',
+ 'pwd.h',
+ 'regex.h',
+ 'sched.h',
+ 'search.h',
+ 'semaphore.h',
+ 'setjmp.h',
+ 'signal.h',
+ 'spawn.h',
+ 'strings.h',
+ 'stropts.h',
+ 'syslog.h',
+ 'tar.h',
+ 'termios.h',
+ 'trace.h',
+ 'ulimit.h',
+ 'unistd.h',
+ 'utime.h',
+ 'utmpx.h',
+ 'wordexp.h',
+ # additional GNUlib headers
+ 'a.out.h',
+ 'aliases.h',
+ 'alloca.h',
+ 'ar.h',
+ 'argp.h',
+ 'argz.h',
+ 'byteswap.h',
+ 'crypt.h',
+ 'endian.h',
+ 'envz.h',
+ 'err.h',
+ 'error.h',
+ 'execinfo.h',
+ 'fpu_control.h',
+ 'fstab.h',
+ 'fts.h',
+ 'getopt.h',
+ 'gshadow.h',
+ 'ieee754.h',
+ 'ifaddrs.h',
+ 'libintl.h',
+ 'mcheck.h',
+ 'mntent.h',
+ 'obstack.h',
+ 'paths.h',
+ 'printf.h',
+ 'pty.h',
+ 'resolv.h',
+ 'shadow.h',
+ 'sysexits.h',
+ 'ttyent.h',
+ # Additional linux glibc headers
+ 'dlfcn.h',
+ 'elf.h',
+ 'features.h',
+ 'gconv.h',
+ 'gnu-versions.h',
+ 'lastlog.h',
+ 'libio.h',
+ 'link.h',
+ 'malloc.h',
+ 'memory.h',
+ 'netash/ash.h',
+ 'netatalk/at.h',
+ 'netax25/ax25.h',
+ 'neteconet/ec.h',
+ 'netipx/ipx.h',
+ 'netiucv/iucv.h',
+ 'netpacket/packet.h',
+ 'netrom/netrom.h',
+ 'netrose/rose.h',
+ 'nfs/nfs.h',
+ 'nl_types.h',
+ 'nss.h',
+ 're_comp.h',
+ 'regexp.h',
+ 'sched.h',
+ 'sgtty.h',
+ 'stab.h',
+ 'stdc-predef.h',
+ 'stdio_ext.h',
+ 'syscall.h',
+ 'termio.h',
+ 'thread_db.h',
+ 'ucontext.h',
+ 'ustat.h',
+ 'utmp.h',
+ 'values.h',
+ 'wait.h',
+ 'xlocale.h',
+ # Hardware specific headers
+ 'arm_neon.h',
+ 'emmintrin.h',
+ 'xmmintin.h',
+ ])
+
+# Folders of C libraries so commonly used in C++,
+# that they have parity with standard C libraries.
+C_STANDARD_HEADER_FOLDERS = frozenset([
+ # standard C library
+ "sys",
+ # glibc for linux
+ "arpa",
+ "asm-generic",
+ "bits",
+ "gnu",
+ "net",
+ "netinet",
+ "protocols",
+ "rpc",
+ "rpcsvc",
+ "scsi",
+ # linux kernel header
+ "drm",
+ "linux",
+ "misc",
+ "mtd",
+ "rdma",
+ "sound",
+ "video",
+ "xen",
+ ])
+
# Type names
_TYPES = re.compile(
r'^(?:'
@@ -452,7 +741,8 @@ _THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Pattern for matching FileInfo.BaseName() against test file name
-_TEST_FILE_SUFFIX = r'(_test|_unittest|_regtest)$'
+_test_suffixes = ['_test', '_regtest', '_unittest']
+_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
# Pattern that matches only complete whitespace, possibly across multiple lines.
_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
@@ -466,7 +756,7 @@ _CHECK_MACROS = [
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
-_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
+_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
@@ -514,9 +804,10 @@ _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
-_LIKELY_MY_HEADER = 3
-_POSSIBLE_MY_HEADER = 4
-_OTHER_HEADER = 5
+_OTHER_SYS_HEADER = 3
+_LIKELY_MY_HEADER = 4
+_POSSIBLE_MY_HEADER = 5
+_OTHER_HEADER = 6
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
@@ -536,6 +827,22 @@ _SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
# Match string that indicates we're working on a Linux Kernel file.
_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
+# Commands for sed to fix the problem
+_SED_FIXUPS = {
+ 'Remove spaces around =': r's/ = /=/',
+ 'Remove spaces around !=': r's/ != /!=/',
+ 'Remove space before ( in if (': r's/if (/if(/',
+ 'Remove space before ( in for (': r's/for (/for(/',
+ 'Remove space before ( in while (': r's/while (/while(/',
+ 'Remove space before ( in switch (': r's/switch (/switch(/',
+ 'Should have a space between // and comment': r's/\/\//\/\/ /',
+ 'Missing space before {': r's/\([^ ]\){/\1 {/',
+ 'Tab found, replace by spaces': r's/\t/ /g',
+ 'Line ends in whitespace. Consider deleting these extra spaces.': r's/\s*$//',
+ 'You don\'t need a ; after a }': r's/};/}/',
+ 'Missing space after ,': r's/,\([^ ]\)/, \1/g',
+}
+
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
@@ -547,17 +854,55 @@ _error_suppressions = {}
_root = None
_root_debug = False
+# The top level repository directory. If set, _root is calculated relative to
+# this directory instead of the directory containing version control artifacts.
+# This is set by the --repository flag.
+_repository = None
+
+# Files to exclude from linting. This is set by the --exclude flag.
+_excludes = None
+
+# Whether to supress all PrintInfo messages, UNRELATED to --quiet flag
+_quiet = False
+
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
-# The allowed extensions for file names
-# This is set by --extensions flag.
-_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
+# This allows to use different include order rule than default
+_include_order = "default"
+
+try:
+ unicode
+except NameError:
+ # -- pylint: disable=redefined-builtin
+ basestring = unicode = str
+
+try:
+ long
+except NameError:
+ # -- pylint: disable=redefined-builtin
+ long = int
+
+if sys.version_info < (3,):
+ # -- pylint: disable=no-member
+ # BINARY_TYPE = str
+ itervalues = dict.itervalues
+ iteritems = dict.iteritems
+else:
+ # BINARY_TYPE = bytes
+ itervalues = dict.values
+ iteritems = dict.items
+
+def unicode_escape_decode(x):
+ if sys.version_info < (3,):
+ return codecs.unicode_escape_decode(x)[0]
+ else:
+ return x
# Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.
# This is set by --headers flag.
-_hpp_headers = set(['h'])
+_hpp_headers = set([])
# {str, bool}: a map from error categories to booleans which indicate if the
# category should be suppressed for every line.
@@ -566,14 +911,47 @@ _global_error_suppressions = {}
def ProcessHppHeadersOption(val):
global _hpp_headers
try:
- _hpp_headers = set(val.split(','))
- # Automatically append to extensions list so it does not have to be set 2 times
- _valid_extensions.update(_hpp_headers)
+ _hpp_headers = {ext.strip() for ext in val.split(',')}
except ValueError:
- PrintUsage('Header extensions must be comma seperated list.')
+ PrintUsage('Header extensions must be comma separated list.')
+
+def ProcessIncludeOrderOption(val):
+ if val is None or val == "default":
+ pass
+ elif val == "standardcfirst":
+ global _include_order
+ _include_order = val
+ else:
+ PrintUsage('Invalid includeorder value %s. Expected default|standardcfirst')
def IsHeaderExtension(file_extension):
- return file_extension in _hpp_headers
+ return file_extension in GetHeaderExtensions()
+
+def GetHeaderExtensions():
+ if _hpp_headers:
+ return _hpp_headers
+ if _valid_extensions:
+ return {h for h in _valid_extensions if 'h' in h}
+ return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh'])
+
+# The allowed extensions for file names
+# This is set by --extensions flag
+def GetAllExtensions():
+ return GetHeaderExtensions().union(_valid_extensions or set(
+ ['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
+
+def ProcessExtensionsOption(val):
+ global _valid_extensions
+ try:
+ extensions = [ext.strip() for ext in val.split(',')]
+ _valid_extensions = set(extensions)
+ except ValueError:
+ PrintUsage('Extensions should be a comma-separated list of values;'
+ 'for example: extensions=hpp,cpp\n'
+ 'This could not be parsed: "%s"' % (val,))
+
+def GetNonHeaderExtensions():
+ return GetAllExtensions().difference(GetHeaderExtensions())
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of line error-suppressions.
@@ -686,7 +1064,7 @@ def Search(pattern, s):
def _IsSourceExtension(s):
"""File extension (excluding dot) matches a source file extension."""
- return s in ('c', 'cc', 'cpp', 'cxx')
+ return s in GetNonHeaderExtensions()
class _IncludeState(object):
@@ -707,11 +1085,13 @@ class _IncludeState(object):
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
- _OTHER_H_SECTION = 4
+ _OTHER_SYS_SECTION = 4
+ _OTHER_H_SECTION = 5
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
+ _OTHER_SYS_HEADER: 'other system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
@@ -721,11 +1101,14 @@ class _IncludeState(object):
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
+ _OTHER_SYS_SECTION: 'other system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
+ self._section = None
+ self._last_header = None
self.ResetSection('')
def FindHeader(self, header):
@@ -832,6 +1215,12 @@ class _IncludeState(object):
else:
self._last_header = ''
return error_message
+ elif header_type == _OTHER_SYS_HEADER:
+ if self._section <= self._OTHER_SYS_SECTION:
+ self._section = self._OTHER_SYS_SECTION
+ else:
+ self._last_header = ''
+ return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
@@ -870,9 +1259,18 @@ class _CppLintState(object):
# output format:
# "emacs" - format that emacs can parse (default)
+ # "eclipse" - format that eclipse can parse
# "vs7" - format that Microsoft Visual Studio 7 can parse
+ # "junit" - format that Jenkins, Bamboo, etc can parse
+ # "sed" - returns a gnu sed command to fix the problem
+ # "gsed" - like sed, but names the command gsed, e.g. for macOS homebrew users
self.output_format = 'emacs'
+ # For JUnit output, save errors and failures until the end so that they
+ # can be written into the XML
+ self._junit_errors = []
+ self._junit_failures = []
+
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
@@ -947,10 +1345,71 @@ class _CppLintState(object):
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
- for category, count in self.errors_by_category.iteritems():
- sys.stderr.write('Category \'%s\' errors found: %d\n' %
+ for category, count in sorted(iteritems(self.errors_by_category)):
+ self.PrintInfo('Category \'%s\' errors found: %d\n' %
(category, count))
- sys.stdout.write('Total errors found: %d\n' % self.error_count)
+ if self.error_count > 0:
+ self.PrintInfo('Total errors found: %d\n' % self.error_count)
+
+ def PrintInfo(self, message):
+ # _quiet does not represent --quiet flag.
+ # Hide infos from stdout to keep stdout pure for machine consumption
+ if not _quiet and self.output_format not in _MACHINE_OUTPUTS:
+ sys.stdout.write(message)
+
+ def PrintError(self, message):
+ if self.output_format == 'junit':
+ self._junit_errors.append(message)
+ else:
+ sys.stderr.write(message)
+
+ def AddJUnitFailure(self, filename, linenum, message, category, confidence):
+ self._junit_failures.append((filename, linenum, message, category,
+ confidence))
+
+ def FormatJUnitXML(self):
+ num_errors = len(self._junit_errors)
+ num_failures = len(self._junit_failures)
+
+ testsuite = xml.etree.ElementTree.Element('testsuite')
+ testsuite.attrib['errors'] = str(num_errors)
+ testsuite.attrib['failures'] = str(num_failures)
+ testsuite.attrib['name'] = 'cpplint'
+
+ if num_errors == 0 and num_failures == 0:
+ testsuite.attrib['tests'] = str(1)
+ xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
+
+ else:
+ testsuite.attrib['tests'] = str(num_errors + num_failures)
+ if num_errors > 0:
+ testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
+ testcase.attrib['name'] = 'errors'
+ error = xml.etree.ElementTree.SubElement(testcase, 'error')
+ error.text = '\n'.join(self._junit_errors)
+ if num_failures > 0:
+ # Group failures by file
+ failed_file_order = []
+ failures_by_file = {}
+ for failure in self._junit_failures:
+ failed_file = failure[0]
+ if failed_file not in failed_file_order:
+ failed_file_order.append(failed_file)
+ failures_by_file[failed_file] = []
+ failures_by_file[failed_file].append(failure)
+ # Create a testcase for each file
+ for failed_file in failed_file_order:
+ failures = failures_by_file[failed_file]
+ testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
+ testcase.attrib['name'] = failed_file
+ failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
+ template = '{0}: {1} [{2}] [{3}]'
+ texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
+ failure.text = '\n'.join(texts)
+
+ xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
+ return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
+
_cpplint_state = _CppLintState()
@@ -1104,12 +1563,12 @@ class FileInfo(object):
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
- """FullName after removing the local path to the repository.
+ r"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
- "C:\Documents and Settings\..." or "/home/username/..." in them and thus
+ "C:\\Documents and Settings\\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
@@ -1118,6 +1577,20 @@ class FileInfo(object):
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
+ # If the user specified a repository path, it exists, and the file is
+ # contained in it, use the specified repository path
+ if _repository:
+ repo = FileInfo(_repository).FullName()
+ root_dir = project_dir
+ while os.path.exists(root_dir):
+ # allow case insensitive compare on Windows
+ if os.path.normcase(root_dir) == os.path.normcase(repo):
+ return os.path.relpath(fullname, root_dir).replace('\\', '/')
+ one_up_dir = os.path.dirname(root_dir)
+ if one_up_dir == root_dir:
+ break
+ root_dir = one_up_dir
+
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
@@ -1168,7 +1641,7 @@ class FileInfo(object):
return self.Split()[1]
def Extension(self):
- """File extension - text following the final period."""
+ """File extension - text following the final period, includes that period."""
return self.Split()[2]
def NoExtension(self):
@@ -1233,15 +1706,25 @@ def Error(filename, linenum, category, confidence, message):
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
- sys.stderr.write('%s(%s): error cpplint: [%s] %s [%d]\n' % (
+ _cpplint_state.PrintError('%s(%s): error cpplint: [%s] %s [%d]\n' % (
filename, linenum, category, message, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
+ elif _cpplint_state.output_format == 'junit':
+ _cpplint_state.AddJUnitFailure(filename, linenum, message, category,
+ confidence)
+ elif _cpplint_state.output_format in ['sed', 'gsed']:
+ if message in _SED_FIXUPS:
+ sys.stdout.write(_cpplint_state.output_format + " -i '%s%s' %s # %s [%s] [%d]\n" % (
+ linenum, _SED_FIXUPS[message], filename, message, category, confidence))
+ else:
+ sys.stderr.write('# %s:%s: "%s" [%s] [%d]\n' % (
+ filename, linenum, message, category, confidence))
else:
- sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
- filename, linenum, message, category, confidence))
-
+ final_message = '%s:%s: %s [%s] [%d]\n' % (
+ filename, linenum, message, category, confidence)
+ sys.stderr.write(final_message)
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
@@ -1378,7 +1861,7 @@ def FindNextMultiLineCommentEnd(lines, lineix):
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
- # Having // dummy comments makes the lines non-empty, so we will not get
+ # Having // <empty> comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
@@ -1752,7 +2235,7 @@ def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
- # dummy line at the front.
+ # placeholder line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
@@ -1788,10 +2271,10 @@ def PathSplitToList(path):
lst = []
while True:
(head, tail) = os.path.split(path)
- if head == path: # absolute paths end
+ if head == path: # absolute paths end
lst.append(head)
break
- if tail == path: # relative paths end
+ if tail == path: # relative paths end
lst.append(tail)
break
@@ -1826,7 +2309,7 @@ def GetHeaderGuardCPPVariable(filename):
def FixupPathFromRoot():
if _root_debug:
sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
- %(_root, fileinfo.RepositoryName()))
+ % (_root, fileinfo.RepositoryName()))
# Process the file path with the --root flag if it was set.
if not _root:
@@ -1847,28 +2330,29 @@ def GetHeaderGuardCPPVariable(filename):
PathSplitToList(_root))
if _root_debug:
- sys.stderr.write("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
- " _root=%s)\n" %(maybe_path, file_path_from_root, _root))
+ sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
+ " _root=%s)\n") % (maybe_path, file_path_from_root, _root))
if maybe_path:
return os.path.join(*maybe_path)
# --root=.. , will prepend the outer directory to the header guard
full_path = fileinfo.FullName()
- root_abspath = os.path.abspath(_root)
+ # adapt slashes for windows
+ root_abspath = os.path.abspath(_root).replace('\\', '/')
maybe_path = StripListPrefix(PathSplitToList(full_path),
PathSplitToList(root_abspath))
if _root_debug:
- sys.stderr.write("_root prepend (maybe_path=%s, full_path=%s, " +
- "root_abspath=%s)\n" %(maybe_path, full_path, root_abspath))
+ sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " +
+ "root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
if maybe_path:
return os.path.join(*maybe_path)
if _root_debug:
- sys.stderr.write("_root ignore, returning %s\n" %(file_path_from_root))
+ sys.stderr.write("_root ignore, returning %s\n" % (file_path_from_root))
# --root=FAKE_DIR is ignored
return file_path_from_root
@@ -1900,6 +2384,11 @@ def CheckForHeaderGuard(filename, clean_lines, error):
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
+ # Allow pragma once instead of header guards
+ for i in raw_lines:
+ if Search(r'^\s*#pragma\s+once', i):
+ return
+
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
@@ -1976,28 +2465,36 @@ def CheckForHeaderGuard(filename, clean_lines, error):
def CheckHeaderFileIncluded(filename, include_state, error):
- """Logs an error if a .cc file does not include its header."""
+ """Logs an error if a source file does not include its header."""
# Do not check test files
fileinfo = FileInfo(filename)
if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
- headerfile = filename[0:len(filename) - len(fileinfo.Extension())] + '.h'
- if not os.path.exists(headerfile):
- return
- headername = FileInfo(headerfile).RepositoryName()
- first_include = 0
- for section_list in include_state.include_list:
- for f in section_list:
- if headername in f[0] or f[0] in headername:
- return
- if not first_include:
- first_include = f[1]
+ for ext in GetHeaderExtensions():
+ basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
+ headerfile = basefilename + '.' + ext
+ if not os.path.exists(headerfile):
+ continue
+ headername = FileInfo(headerfile).RepositoryName()
+ first_include = None
+ include_uses_unix_dir_aliases = False
+ for section_list in include_state.include_list:
+ for f in section_list:
+ include_text = f[0]
+ if "./" in include_text:
+ include_uses_unix_dir_aliases = True
+ if headername in include_text or include_text in headername:
+ return
+ if not first_include:
+ first_include = f[1]
- error(filename, first_include, 'build/include', 5,
- '%s should include its header file %s' % (fileinfo.RepositoryName(),
- headername))
+ message = '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)
+ if include_uses_unix_dir_aliases:
+ message += ". Relative paths like . and .. are not allowed."
+
+ error(filename, first_include, 'build/include', 5, message)
def CheckForBadCharacters(filename, lines, error):
@@ -2018,7 +2515,7 @@ def CheckForBadCharacters(filename, lines, error):
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
- if u'\ufffd' in line:
+ if unicode_escape_decode('\ufffd') in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
@@ -2647,8 +3144,8 @@ class NestingState(object):
# class LOCKABLE API Object {
# };
class_decl_match = Match(
- r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
- r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
+ r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
+ r'(class|struct)\s+(?:[a-zA-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
@@ -2896,6 +3393,7 @@ def CheckForNonStandardConstructs(filename, clean_lines, linenum,
constructor_args[i] = constructor_arg
i += 1
+ variadic_args = [arg for arg in constructor_args if '&&...' in arg]
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
@@ -2906,20 +3404,24 @@ def CheckForNonStandardConstructs(filename, clean_lines, linenum,
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
- len(defaulted_args) >= len(constructor_args) - 1))
+ len(defaulted_args) >= len(constructor_args) - 1) or
+ # variadic arguments with zero or one argument
+ (len(constructor_args) <= 2 and
+ len(variadic_args) >= 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
- Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
+ Match(r'((const\s+(volatile\s+)?)?|(volatile\s+(const\s+)?))?'
+ r'%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
- if defaulted_args:
+ if defaulted_args or variadic_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
@@ -2971,7 +3473,7 @@ def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
- not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
+ not Search(r'\b(if|elif|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
@@ -3084,7 +3586,7 @@ def CheckForFunctionLengths(filename, clean_lines, linenum,
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
- elif Search(r'{', start_line):
+ if Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
@@ -3277,9 +3779,10 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
# get rid of comments and strings
line = clean_lines.elided[linenum]
- # You shouldn't have spaces before your brackets, except maybe after
- # 'delete []' or 'return []() {};'
- if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
+ # You shouldn't have spaces before your brackets, except for C++11 attributes
+ # or maybe after 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
+ if (Search(r'\w\s+\[(?!\[)', line) and
+ not Search(r'(?:auto&?|delete|return)\s+\[', line)):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
@@ -3649,7 +4152,6 @@ def IsDecltype(clean_lines, linenum, column):
return True
return False
-
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
@@ -3798,11 +4300,11 @@ def CheckBraces(filename, clean_lines, linenum, error):
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
- if_else_match = Search(r'\b(if\s*\(|else\b)', line)
+ if_else_match = Search(r'\b(if\s*(|constexpr)\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
- if_match = Search(r'\bif\s*\(', line)
+ if_match = Search(r'\bif\s*(|constexpr)\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
@@ -3861,9 +4363,9 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
- # required than not, so we use a whitelist approach to check these
- # rather than a blacklist. These are the places where "};" should
- # be replaced by just "}":
+ # required than not, so we explicitly list the allowed rules rather
+ # than listing the disallowed ones. These are the places where "};"
+ # should be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
@@ -3919,11 +4421,11 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
- # We implement a whitelist of safe macros instead of a blacklist of
+ # We implement a list of safe macros instead of a list of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
- # the downside for getting the whitelist wrong means some extra
- # semicolons, while the downside for getting the blacklist wrong
+ # the downside for getting the allowed checks wrong means some extra
+ # semicolons, while the downside for getting disallowed checks wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
@@ -4067,12 +4569,12 @@ def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
- body = list(opening_line[opening_pos+1:])
+ bodylist = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
- body.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
+ bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
- body.append(clean_lines.elided[closing_linenum][:closing_pos-1])
- body = '\n'.join(body)
+ bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
+ body = '\n'.join(bodylist)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
@@ -4287,6 +4789,16 @@ def GetLineWidth(line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
+ # Issue 337
+ # https://mail.python.org/pipermail/python-list/2012-August/628809.html
+ if (sys.version_info.major, sys.version_info.minor) <= (3, 2):
+ # https://github.com/python/cpython/blob/2.7/Include/unicodeobject.h#L81
+ is_wide_build = sysconfig.get_config_var("Py_UNICODE_SIZE") >= 4
+ # https://github.com/python/cpython/blob/2.7/Objects/unicodeobject.c#L564
+ is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF
+ if not is_wide_build and is_low_surrogate:
+ width -= 1
+
width += 1
return width
else:
@@ -4334,7 +4846,7 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
- scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
+ scope_or_label_pattern = r'\s*(?:public|private|protected|signals)(?:\s+(?:slots\s*)?)?:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
@@ -4374,16 +4886,23 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
+ #
+ # Doxygen documentation copying can get pretty long when using an overloaded
+ # function declaration
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^\s*//\s*[^\s]*$', line) and
- not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
+ not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
+ not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
line_width = GetLineWidth(line)
if line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
+ # allow simple single line lambdas
+ not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
+ line) and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
@@ -4440,21 +4959,25 @@ def _DropCommonSuffixes(filename):
Returns:
The filename with the common suffix removed.
"""
- for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
- 'inl.h', 'impl.h', 'internal.h'):
+ for suffix in itertools.chain(
+ ('%s.%s' % (test_suffix.lstrip('_'), ext)
+ for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
+ ('%s.%s' % (suffix, ext)
+ for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
-def _ClassifyInclude(fileinfo, include, is_system):
+def _ClassifyInclude(fileinfo, include, used_angle_brackets, include_order="default"):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
- is_system: True if the #include used <> rather than "".
+ used_angle_brackets: True if the #include used <> rather than "".
+ include_order: "default" or other value allowed in program arguments
Returns:
One of the _XXX_HEADER constants.
@@ -4464,6 +4987,8 @@ def _ClassifyInclude(fileinfo, include, is_system):
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
+ >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', True, "standardcfirst")
+ _OTHER_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
@@ -4474,13 +4999,23 @@ def _ClassifyInclude(fileinfo, include, is_system):
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
- is_cpp_h = include in _CPP_HEADERS
+ is_cpp_header = include in _CPP_HEADERS
+
+ # Mark include as C header if in list or in a known folder for standard-ish C headers.
+ is_std_c_header = (include_order == "default") or (include in _C_HEADERS
+ # additional linux glibc header folders
+ or Search(r'(?:%s)\/.*\.h' % "|".join(C_STANDARD_HEADER_FOLDERS), include))
+
+ # Headers with C++ extensions shouldn't be considered C system headers
+ is_system = used_angle_brackets and not os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']
if is_system:
- if is_cpp_h:
+ if is_cpp_header:
return _CPP_SYS_HEADER
- else:
+ if is_std_c_header:
return _C_SYS_HEADER
+ else:
+ return _OTHER_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
@@ -4488,9 +5023,11 @@ def _ClassifyInclude(fileinfo, include, is_system):
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
+ target_dir_pub = os.path.normpath(target_dir + '/../public')
+ target_dir_pub = target_dir_pub.replace('\\', '/')
if target_base == include_base and (
include_dir == target_dir or
- include_dir == os.path.normpath(target_dir + '/../public')):
+ include_dir == target_dir_pub):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
@@ -4534,7 +5071,7 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
- error(filename, linenum, 'build/include', 4,
+ error(filename, linenum, 'build/include_subdir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
@@ -4543,17 +5080,34 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
- is_system = (match.group(1) == '<')
+ used_angle_brackets = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
- elif (include.endswith('.cc') and
+ return
+
+ for extension in GetNonHeaderExtensions():
+ if (include.endswith('.' + extension) and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
- error(filename, linenum, 'build/include', 4,
- 'Do not include .cc files from other packages')
- elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
+ error(filename, linenum, 'build/include', 4,
+ 'Do not include .' + extension + ' files from other packages')
+ return
+
+ # We DO want to include a 3rd party looking header if it matches the
+ # filename. Otherwise we get an erroneous error "...should include its
+ # header" error later.
+ third_src_header = False
+ for ext in GetHeaderExtensions():
+ basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
+ headerfile = basefilename + '.' + ext
+ headername = FileInfo(headerfile).RepositoryName()
+ if headername in include or include in headername:
+ third_src_header = True
+ break
+
+ if third_src_header or not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
@@ -4568,7 +5122,7 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
- _ClassifyInclude(fileinfo, include, is_system))
+ _ClassifyInclude(fileinfo, include, used_angle_brackets, _include_order))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
@@ -4607,7 +5161,7 @@ def _GetTextInside(text, start_pattern):
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
- closing_punctuation = set(matching_punctuation.itervalues())
+ closing_punctuation = set(itervalues(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
@@ -4701,8 +5255,6 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
if match:
include_state.ResetSection(match.group(1))
- # Make Windows paths like Unix.
- fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
@@ -4770,9 +5322,14 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
- error(filename, linenum, 'build/namespaces', 5,
- 'Do not use namespace using-directives. '
- 'Use using-declarations instead.')
+ if Search(r'\bliterals\b', line):
+ error(filename, linenum, 'build/namespaces_literals', 5,
+ 'Do not use namespace using-directives. '
+ 'Use using-declarations instead.')
+ else:
+ error(filename, linenum, 'build/namespaces', 5,
+ 'Do not use namespace using-directives. '
+ 'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
@@ -4819,7 +5376,7 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
if (IsHeaderExtension(file_extension)
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
- error(filename, linenum, 'build/namespaces', 4,
+ error(filename, linenum, 'build/namespaces_headers', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
@@ -5109,19 +5666,19 @@ def CheckForNonConstReference(filename, clean_lines, linenum,
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
- whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
+ allowed_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
- if Search(whitelisted_functions, line):
+ if Search(allowed_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
- # Don't see a whitelisted function on this line. Actually we
+ # Don't see an allowed function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
- Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
+ Search(allowed_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
@@ -5196,7 +5753,7 @@ def CheckCasts(filename, clean_lines, linenum, error):
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
- r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
+ r'\((int|float|double|bool|char|u?int(16|32|64)|size_t)\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
@@ -5349,11 +5906,11 @@ _HEADERS_CONTAINING_TEMPLATES = (
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
- ('<map>', ('map', 'multimap',)),
+ ('<map>', ('multimap',)),
('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
'unique_ptr', 'weak_ptr')),
('<queue>', ('queue', 'priority_queue',)),
- ('<set>', ('set', 'multiset',)),
+ ('<set>', ('multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<tuple>', ('tuple',)),
@@ -5382,11 +5939,21 @@ _re_pattern_headers_maybe_templates = []
for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
for _template in _templates:
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
- # type::max().
+ # 'type::max()'.
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
_header))
+# Match set<type>, but not foo->set<type>, foo.set<type>
+_re_pattern_headers_maybe_templates.append(
+ (re.compile(r'[^>.]\bset\s*\<'),
+ 'set<>',
+ '<set>'))
+# Match 'map<type> var' and 'std::map<type>(...)', but not 'map<type>(...)''
+_re_pattern_headers_maybe_templates.append(
+ (re.compile(r'(std\b::\bmap\s*\<)|(^(std\b::\b)map\b\(\s*\<)'),
+ 'map<>',
+ '<map>'))
# Other scripts may reach in and modify this pattern.
_re_pattern_templates = []
@@ -5419,7 +5986,7 @@ def FilesBelongToSameModule(filename_cc, filename_h):
some false positives. This should be sufficiently rare in practice.
Args:
- filename_cc: is the path for the .cc file
+ filename_cc: is the path for the source (e.g. .cc) file
filename_h: is the path for the header path
Returns:
@@ -5427,20 +5994,23 @@ def FilesBelongToSameModule(filename_cc, filename_h):
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
+ fileinfo_cc = FileInfo(filename_cc)
+ if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
+ return (False, '')
- fileinfo = FileInfo(filename_cc)
- if not fileinfo.IsSource():
+ fileinfo_h = FileInfo(filename_h)
+ if not IsHeaderExtension(fileinfo_h.Extension().lstrip('.')):
return (False, '')
- filename_cc = filename_cc[:-len(fileinfo.Extension())]
- matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo.BaseName())
+
+ filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
+ matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
if matched_test_suffix:
filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
+
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
- if not filename_h.endswith('.h'):
- return (False, '')
- filename_h = filename_h[:-len('.h')]
+ filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
@@ -5466,18 +6036,19 @@ def UpdateIncludeState(filename, include_dict, io=codecs):
"""
headerfile = None
try:
- headerfile = io.open(filename, 'r', 'utf8', 'replace')
+ with io.open(filename, 'r', 'utf8', 'replace') as headerfile:
+ linenum = 0
+ for line in headerfile:
+ linenum += 1
+ clean_line = CleanseComments(line)
+ match = _RE_PATTERN_INCLUDE.search(clean_line)
+ if match:
+ include = match.group(2)
+ include_dict.setdefault(include, linenum)
+ return True
except IOError:
return False
- linenum = 0
- for line in headerfile:
- linenum += 1
- clean_line = CleanseComments(line)
- match = _RE_PATTERN_INCLUDE.search(clean_line)
- if match:
- include = match.group(2)
- include_dict.setdefault(include, linenum)
- return True
+
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
@@ -5555,7 +6126,7 @@ def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
- header_keys = include_dict.keys()
+ header_keys = list(include_dict.keys())
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
@@ -5567,11 +6138,13 @@ def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
- if filename.endswith('.cc') and not header_found:
- return
+ if not header_found:
+ for extension in GetNonHeaderExtensions():
+ if filename.endswith('.' + extension):
+ return
# All the lines have been processed, report the errors found.
- for required_header_unstripped in required:
+ for required_header_unstripped in sorted(required, key=required.__getitem__):
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
@@ -5710,11 +6283,9 @@ def IsBlockInNameSpace(nesting_state, is_forward_declaration):
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
- if len(nesting_state.stack) >= 1 and (
- isinstance(nesting_state.stack[-1], _NamespaceInfo)):
- return True
- else:
- return False
+ return len(nesting_state.stack) >= 1 and (
+ isinstance(nesting_state.stack[-1], _NamespaceInfo))
+
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
@@ -5764,7 +6335,7 @@ def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
- extra_check_functions=[]):
+ extra_check_functions=None):
"""Processes a single line in the file.
Args:
@@ -5803,8 +6374,9 @@ def ProcessLine(filename, file_extension, clean_lines, line,
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
- for check_fn in extra_check_functions:
- check_fn(filename, clean_lines, line, error)
+ if extra_check_functions:
+ for check_fn in extra_check_functions:
+ check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
@@ -5878,7 +6450,7 @@ def FlagCxx14Features(filename, clean_lines, linenum, error):
def ProcessFileData(filename, file_extension, lines, error,
- extra_check_functions=[]):
+ extra_check_functions=None):
"""Performs lint checks and reports any errors to the given error function.
Args:
@@ -5978,7 +6550,7 @@ def ProcessConfigOverrides(filename):
if _cpplint_state.quiet:
# Suppress "Ignoring file" warning when using --quiet.
return False
- sys.stderr.write('Ignoring "%s": file excluded by "%s". '
+ _cpplint_state.PrintInfo('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
@@ -5986,34 +6558,38 @@ def ProcessConfigOverrides(filename):
elif name == 'linelength':
global _line_length
try:
- _line_length = int(val)
+ _line_length = int(val)
except ValueError:
- sys.stderr.write('Line length must be numeric.')
+ _cpplint_state.PrintError('Line length must be numeric.')
+ elif name == 'extensions':
+ ProcessExtensionsOption(val)
elif name == 'root':
global _root
# root directories are specified relative to CPPLINT.cfg dir.
_root = os.path.join(os.path.dirname(cfg_file), val)
elif name == 'headers':
ProcessHppHeadersOption(val)
+ elif name == 'includeorder':
+ ProcessIncludeOrderOption(val)
else:
- sys.stderr.write(
+ _cpplint_state.PrintError(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
- sys.stderr.write(
+ _cpplint_state.PrintError(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
- for filter in reversed(cfg_filters):
- _AddFilters(filter)
+ for cfg_filter in reversed(cfg_filters):
+ _AddFilters(cfg_filter)
return True
-def ProcessFile(filename, vlevel, extra_check_functions=[]):
+def ProcessFile(filename, vlevel, extra_check_functions=None):
"""Does google-lint on a single file.
Args:
@@ -6051,7 +6627,8 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]):
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
- lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
+ with codecs.open(filename, 'r', 'utf8', 'replace') as target_file:
+ lines = target_file.read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
@@ -6063,7 +6640,7 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]):
lf_lines.append(linenum + 1)
except IOError:
- sys.stderr.write(
+ _cpplint_state.PrintError(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
@@ -6073,9 +6650,9 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]):
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
- if filename != '-' and file_extension not in _valid_extensions:
- sys.stderr.write('Ignoring %s; not a valid file name '
- '(%s)\n' % (filename, ', '.join(_valid_extensions)))
+ if filename != '-' and file_extension not in GetAllExtensions():
+ _cpplint_state.PrintError('Ignoring %s; not a valid file name '
+ '(%s)\n' % (filename, ', '.join(GetAllExtensions())))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
@@ -6101,7 +6678,7 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]):
# Suppress printing anything if --quiet was passed unless the error
# count has increased after processing this file.
if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:
- sys.stdout.write('Done processing %s\n' % filename)
+ _cpplint_state.PrintInfo('Done processing %s\n' % filename)
_RestoreFilters()
@@ -6111,12 +6688,21 @@ def PrintUsage(message):
Args:
message: The optional error message.
"""
- sys.stderr.write(_USAGE)
+ sys.stderr.write(_USAGE % (sorted(list(GetAllExtensions())),
+ ','.join(sorted(list(GetAllExtensions()))),
+ sorted(GetHeaderExtensions()),
+ ','.join(sorted(GetHeaderExtensions()))))
+
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
- sys.exit(1)
+ sys.exit(0)
+def PrintVersion():
+ sys.stdout.write('Cpplint fork (https://github.com/cpplint/cpplint)\n')
+ sys.stdout.write('cpplint ' + __VERSION__ + '\n')
+ sys.stdout.write('Python ' + sys.version + '\n')
+ sys.exit(0)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
@@ -6140,12 +6726,18 @@ def ParseArguments(args):
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
+ 'v=',
+ 'version',
'counting=',
'filter=',
'root=',
+ 'repository=',
'linelength=',
'extensions=',
+ 'exclude=',
+ 'recursive',
'headers=',
+ 'includeorder=',
'quiet'])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
@@ -6155,17 +6747,21 @@ def ParseArguments(args):
filters = ''
quiet = _Quiet()
counting_style = ''
+ recursive = False
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
+ if opt == '--version':
+ PrintVersion()
elif opt == '--output':
- if val not in ('emacs', 'vs7', 'eclipse'):
- PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
+ if val not in ('emacs', 'vs7', 'eclipse', 'junit', 'sed', 'gsed'):
+ PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
+ 'sed, gsed and junit.')
output_format = val
elif opt == '--quiet':
quiet = True
- elif opt == '--verbose':
+ elif opt == '--verbose' or opt == '--v':
verbosity = int(val)
elif opt == '--filter':
filters = val
@@ -6178,49 +6774,126 @@ def ParseArguments(args):
elif opt == '--root':
global _root
_root = val
+ elif opt == '--repository':
+ global _repository
+ _repository = val
elif opt == '--linelength':
global _line_length
try:
- _line_length = int(val)
+ _line_length = int(val)
except ValueError:
- PrintUsage('Line length must be digits.')
+ PrintUsage('Line length must be digits.')
+ elif opt == '--exclude':
+ global _excludes
+ if not _excludes:
+ _excludes = set()
+ _excludes.update(glob.glob(val))
elif opt == '--extensions':
- global _valid_extensions
- try:
- _valid_extensions = set(val.split(','))
- except ValueError:
- PrintUsage('Extensions must be comma seperated list.')
+ ProcessExtensionsOption(val)
elif opt == '--headers':
ProcessHppHeadersOption(val)
+ elif opt == '--recursive':
+ recursive = True
+ elif opt == '--includeorder':
+ ProcessIncludeOrderOption(val)
if not filenames:
PrintUsage('No files were specified.')
+ if recursive:
+ filenames = _ExpandDirectories(filenames)
+
+ if _excludes:
+ filenames = _FilterExcludedFiles(filenames)
+
_SetOutputFormat(output_format)
_SetQuiet(quiet)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
+ filenames.sort()
return filenames
+def _ExpandDirectories(filenames):
+ """Searches a list of filenames and replaces directories in the list with
+ all files descending from those directories. Files with extensions not in
+ the valid extensions list are excluded.
-def main():
- filenames = ParseArguments(sys.argv[1:])
-
- # Change stderr to write with replacement characters so we don't die
- # if we try to print something containing non-ASCII characters.
- sys.stderr = codecs.StreamReaderWriter(sys.stderr,
- codecs.getreader('utf8'),
- codecs.getwriter('utf8'),
- 'replace')
+ Args:
+ filenames: A list of files or directories
- _cpplint_state.ResetErrorCounts()
+ Returns:
+ A list of all files that are members of filenames or descended from a
+ directory in filenames
+ """
+ expanded = set()
for filename in filenames:
- ProcessFile(filename, _cpplint_state.verbose_level)
- # If --quiet is passed, suppress printing error count unless there are errors.
- if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
- _cpplint_state.PrintErrorCounts()
+ if not os.path.isdir(filename):
+ expanded.add(filename)
+ continue
+
+ for root, _, files in os.walk(filename):
+ for loopfile in files:
+ fullname = os.path.join(root, loopfile)
+ if fullname.startswith('.' + os.path.sep):
+ fullname = fullname[len('.' + os.path.sep):]
+ expanded.add(fullname)
+
+ filtered = []
+ for filename in expanded:
+ if os.path.splitext(filename)[1][1:] in GetAllExtensions():
+ filtered.append(filename)
+ return filtered
+
+def _FilterExcludedFiles(fnames):
+ """Filters out files listed in the --exclude command line switch. File paths
+ in the switch are evaluated relative to the current working directory
+ """
+ exclude_paths = [os.path.abspath(f) for f in _excludes]
+ # because globbing does not work recursively, exclude all subpath of all excluded entries
+ return [f for f in fnames
+ if not any(e for e in exclude_paths
+ if _IsParentOrSame(e, os.path.abspath(f)))]
+
+def _IsParentOrSame(parent, child):
+ """Return true if child is subdirectory of parent.
+ Assumes both paths are absolute and don't contain symlinks.
+ """
+ parent = os.path.normpath(parent)
+ child = os.path.normpath(child)
+ if parent == child:
+ return True
+
+ prefix = os.path.commonprefix([parent, child])
+ if prefix != parent:
+ return False
+ # Note: os.path.commonprefix operates on character basis, so
+ # take extra care of situations like '/foo/ba' and '/foo/bar/baz'
+ child_suffix = child[len(prefix):]
+ child_suffix = child_suffix.lstrip(os.sep)
+ return child == os.path.join(prefix, child_suffix)
+
+def main():
+ filenames = ParseArguments(sys.argv[1:])
+ backup_err = sys.stderr
+ try:
+ # Change stderr to write with replacement characters so we don't die
+ # if we try to print something containing non-ASCII characters.
+ sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
+
+ _cpplint_state.ResetErrorCounts()
+ for filename in filenames:
+ ProcessFile(filename, _cpplint_state.verbose_level)
+ # If --quiet is passed, suppress printing error count unless there are errors.
+ if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
+ _cpplint_state.PrintErrorCounts()
+
+ if _cpplint_state.output_format == 'junit':
+ sys.stderr.write(_cpplint_state.FormatJUnitXML())
+
+ finally:
+ sys.stderr = backup_err
sys.exit(_cpplint_state.error_count > 0)
diff --git a/tools/cpplint.py-update b/tools/cpplint.py-update
index 4af4389..3d32330 100755
--- a/tools/cpplint.py-update
+++ b/tools/cpplint.py-update
@@ -15,7 +15,10 @@
set -eu
-GITHUB_URL="https://github.com/google/styleguide/raw/gh-pages"
+# The outdated Google version that only supports Python 2.
+GITHUB_URL="https://github.com/google/styleguide/raw/gh-pages/cpplint"
+# The forked version with Python 3 support.
+GITHUB_URL="https://github.com/cpplint/cpplint/raw/develop"
SCRIPT_DIR="$(dirname "$(readlink -f -- "$0")")"
usage() {
@@ -46,8 +49,11 @@ main() {
# Download cpplint.py from upstream.
local cpplint_py="${SCRIPT_DIR}/cpplint.py"
- wget "${GITHUB_URL}/cpplint/cpplint.py" -O "${cpplint_py}"
- sed -i '2i# pylint: skip-file' "${cpplint_py}"
+ wget "${GITHUB_URL}/cpplint.py" -O "${cpplint_py}"
+ sed -i \
+ -e '1s|python$|python3|' \
+ -e '2i# pylint: skip-file' \
+ "${cpplint_py}"
chmod +x "${cpplint_py}"
}
diff --git a/tools/google-java-format.py b/tools/google-java-format.py
index ed5ce28..6659511 100755
--- a/tools/google-java-format.py
+++ b/tools/google-java-format.py
@@ -1,5 +1,4 @@
-#!/usr/bin/python
-# -*- coding:utf-8 -*-
+#!/usr/bin/env python3
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,6 @@
"""Wrapper to run google-java-format to check for any malformatted changes."""
-from __future__ import print_function
-
import argparse
import os
import sys
diff --git a/tools/pylint.py b/tools/pylint.py
index 7885dcf..570f055 100755
--- a/tools/pylint.py
+++ b/tools/pylint.py
@@ -1,5 +1,4 @@
-#!/usr/bin/python
-# -*- coding:utf-8 -*-
+#!/usr/bin/env python3
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,25 +15,61 @@
"""Wrapper to run pylint with the right settings."""
-from __future__ import print_function
-
import argparse
import errno
import os
+import shutil
import sys
+import subprocess
+
+
+assert (sys.version_info.major, sys.version_info.minor) >= (3, 6), (
+ 'Python 3.6 or newer is required; found %s' % (sys.version,))
DEFAULT_PYLINTRC_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'pylintrc')
+def is_pylint3(pylint):
+ """See whether |pylint| supports Python 3."""
+ # Make sure pylint is using Python 3.
+ result = subprocess.run([pylint, '--version'], stdout=subprocess.PIPE,
+ check=True)
+ if b'Python 3' not in result.stdout:
+ print('%s: unable to locate a Python 3 version of pylint; Python 3 '
+ 'support cannot be guaranteed' % (__file__,), file=sys.stderr)
+ return False
+
+ return True
+
+
+def find_pylint3():
+ """Figure out the name of the pylint tool for Python 3.
+
+ It keeps changing with Python 2->3 migrations. Fun.
+ """
+ # Prefer pylint3 as that's what we want.
+ if shutil.which('pylint3'):
+ return 'pylint3'
+
+ # If there's no pylint, give up.
+ if not shutil.which('pylint'):
+ print('%s: unable to locate pylint; please install:\n'
+ 'sudo apt-get install pylint' % (__file__,), file=sys.stderr)
+ sys.exit(1)
+
+ return 'pylint'
+
+
def get_parser():
"""Return a command line parser."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--init-hook', help='Init hook commands to run.')
+ parser.add_argument('--py3', action='store_true',
+ help='Force Python 3 mode')
parser.add_argument('--executable-path',
- help='The path of the pylint executable.',
- default='pylint')
+ help='The path of the pylint executable.')
parser.add_argument('--no-rcfile',
help='Specify to use the executable\'s default '
'configuration.',
@@ -48,7 +83,18 @@ def main(argv):
parser = get_parser()
opts, unknown = parser.parse_known_args(argv)
- cmd = [opts.executable_path]
+ pylint = opts.executable_path
+ if pylint is None:
+ if opts.py3:
+ pylint = find_pylint3()
+ else:
+ pylint = 'pylint'
+
+ # Make sure pylint is using Python 3.
+ if opts.py3:
+ is_pylint3(pylint)
+
+ cmd = [pylint]
if not opts.no_rcfile:
# We assume pylint is running in the top directory of the project,
# so load the pylintrc file from there if it's available.