aboutsummaryrefslogtreecommitdiff
path: root/pw_presubmit/py/pw_presubmit
diff options
context:
space:
mode:
Diffstat (limited to 'pw_presubmit/py/pw_presubmit')
-rw-r--r--pw_presubmit/py/pw_presubmit/bazel_parser.py20
-rw-r--r--pw_presubmit/py/pw_presubmit/build.py506
-rw-r--r--pw_presubmit/py/pw_presubmit/cli.py63
-rw-r--r--pw_presubmit/py/pw_presubmit/cpp_checks.py126
-rwxr-xr-xpw_presubmit/py/pw_presubmit/format_code.py317
-rw-r--r--pw_presubmit/py/pw_presubmit/git_repo.py15
-rw-r--r--pw_presubmit/py/pw_presubmit/gitmodules.py18
-rw-r--r--pw_presubmit/py/pw_presubmit/inclusive_language.py15
-rw-r--r--pw_presubmit/py/pw_presubmit/javascript_checks.py44
-rw-r--r--pw_presubmit/py/pw_presubmit/json_check.py38
-rw-r--r--pw_presubmit/py/pw_presubmit/keep_sorted.py10
-rw-r--r--pw_presubmit/py/pw_presubmit/module_owners.py4
-rw-r--r--pw_presubmit/py/pw_presubmit/ninja_parser.py115
-rw-r--r--pw_presubmit/py/pw_presubmit/npm_presubmit.py3
-rw-r--r--pw_presubmit/py/pw_presubmit/owners_checks.py31
-rwxr-xr-xpw_presubmit/py/pw_presubmit/pigweed_presubmit.py441
-rw-r--r--pw_presubmit/py/pw_presubmit/presubmit.py474
-rw-r--r--pw_presubmit/py/pw_presubmit/presubmit_context.py624
-rw-r--r--pw_presubmit/py/pw_presubmit/python_checks.py230
-rw-r--r--pw_presubmit/py/pw_presubmit/rst_format.py246
-rw-r--r--pw_presubmit/py/pw_presubmit/shell_checks.py23
-rw-r--r--pw_presubmit/py/pw_presubmit/source_in_build.py7
-rw-r--r--pw_presubmit/py/pw_presubmit/todo_check.py37
-rw-r--r--pw_presubmit/py/pw_presubmit/tools.py66
24 files changed, 2729 insertions, 744 deletions
diff --git a/pw_presubmit/py/pw_presubmit/bazel_parser.py b/pw_presubmit/py/pw_presubmit/bazel_parser.py
index 39f42cdcd..aaf3319c8 100644
--- a/pw_presubmit/py/pw_presubmit/bazel_parser.py
+++ b/pw_presubmit/py/pw_presubmit/bazel_parser.py
@@ -16,12 +16,13 @@
from pathlib import Path
import re
import sys
+from typing import List
def parse_bazel_stdout(bazel_stdout: Path) -> str:
"""Extracts a concise error from a bazel log."""
- seen_error = False
- error_lines = []
+ seen_error: bool = False
+ error_lines: List[str] = []
with bazel_stdout.open() as ins:
for line in ins:
@@ -30,7 +31,7 @@ def parse_bazel_stdout(bazel_stdout: Path) -> str:
# be significant, especially for compiler error messages.
line = line.rstrip()
- if re.match(r'^ERROR:', line):
+ if re.match(r'^(ERROR|FAIL):', line):
seen_error = True
if seen_error:
@@ -48,9 +49,16 @@ def parse_bazel_stdout(bazel_stdout: Path) -> str:
if line.strip().startswith('# Configuration'):
continue
- # An "<ALLCAPS>:" line usually means compiler output is done
- # and useful compiler errors are complete.
- if re.match(r'^(?!ERROR)[A-Z]+:', line):
+ # Ignore passing and skipped tests.
+ if 'PASSED' in line or 'SKIPPED' in line:
+ continue
+
+ # Ignore intermixed lines from other build steps.
+ if re.match(r'\[\s*[\d,]+\s*/\s*[\d,]+\s*\]', line):
+ continue
+
+ if re.match(r'Executed \d+ out of \d+ tests', line):
+ error_lines.append(line)
break
error_lines.append(line)
diff --git a/pw_presubmit/py/pw_presubmit/build.py b/pw_presubmit/py/pw_presubmit/build.py
index 6ef59e880..17ca25f40 100644
--- a/pw_presubmit/py/pw_presubmit/build.py
+++ b/pw_presubmit/py/pw_presubmit/build.py
@@ -13,16 +13,20 @@
# the License.
"""Functions for building code during presubmit checks."""
+import base64
import contextlib
+from dataclasses import dataclass
import itertools
import json
import logging
import os
+import posixpath
from pathlib import Path
import re
import subprocess
from shutil import which
import sys
+import tarfile
from typing import (
Any,
Callable,
@@ -31,6 +35,7 @@ from typing import (
ContextManager,
Dict,
Iterable,
+ Iterator,
List,
Mapping,
Optional,
@@ -40,29 +45,34 @@ from typing import (
Union,
)
-from pw_presubmit import (
- bazel_parser,
+from pw_presubmit.presubmit import (
call,
Check,
FileFilter,
filter_paths,
- format_code,
install_package,
- Iterator,
- log_run,
- ninja_parser,
- plural,
- PresubmitContext,
- PresubmitFailure,
PresubmitResult,
SubStep,
- tools,
+)
+from pw_presubmit.presubmit_context import (
+ PresubmitContext,
+ PresubmitFailure,
+)
+from pw_presubmit import (
+ bazel_parser,
+ format_code,
+ ninja_parser,
+)
+from pw_presubmit.tools import (
+ plural,
+ log_run,
+ format_command,
)
_LOG = logging.getLogger(__name__)
-def bazel(ctx: PresubmitContext, cmd: str, *args: str) -> None:
+def bazel(ctx: PresubmitContext, cmd: str, *args: str, **kwargs) -> None:
"""Invokes Bazel with some common flags set.
Intended for use with bazel build and test. May not work with others.
@@ -77,6 +87,7 @@ def bazel(ctx: PresubmitContext, cmd: str, *args: str) -> None:
keep_going.append('--keep_going')
bazel_stdout = ctx.output_dir / 'bazel.stdout'
+ ctx.output_dir.mkdir(exist_ok=True, parents=True)
try:
with bazel_stdout.open('w') as outs:
call(
@@ -90,8 +101,9 @@ def bazel(ctx: PresubmitContext, cmd: str, *args: str) -> None:
*keep_going,
*args,
cwd=ctx.root,
- env=env_with_clang_vars(),
tee=outs,
+ call_annotation={'build_system': 'bazel'},
+ **kwargs,
)
except PresubmitFailure as exc:
@@ -123,8 +135,8 @@ def _gn_value(value) -> str:
return str(value)
-def gn_args(**kwargs) -> str:
- """Builds a string to use for the --args argument to gn gen.
+def gn_args_list(**kwargs) -> List[str]:
+ """Return a list of formatted strings to use as gn args.
Currently supports bool, int, and str values. In the case of str values,
quotation marks will be added automatically, unless the string already
@@ -136,28 +148,70 @@ def gn_args(**kwargs) -> str:
transformed_args.append(f'{arg}={_gn_value(val)}')
# Use ccache if available for faster repeat presubmit runs.
- if which('ccache'):
+ if which('ccache') and 'pw_command_launcher' not in kwargs:
transformed_args.append('pw_command_launcher="ccache"')
- return '--args=' + ' '.join(transformed_args)
+ return transformed_args
+
+
+def gn_args(**kwargs) -> str:
+ """Builds a string to use for the --args argument to gn gen.
+
+ Currently supports bool, int, and str values. In the case of str values,
+ quotation marks will be added automatically, unless the string already
+ contains one or more double quotation marks, or starts with a { or [
+ character, in which case it will be passed through as-is.
+ """
+ return '--args=' + ' '.join(gn_args_list(**kwargs))
+
+
+def write_gn_args_file(destination_file: Path, **kwargs) -> str:
+ """Write gn args to a file.
+
+ Currently supports bool, int, and str values. In the case of str values,
+ quotation marks will be added automatically, unless the string already
+ contains one or more double quotation marks, or starts with a { or [
+ character, in which case it will be passed through as-is.
+
+ Returns:
+ The contents of the written file.
+ """
+ contents = '\n'.join(gn_args_list(**kwargs))
+ # Add a trailing linebreak
+ contents += '\n'
+ destination_file.parent.mkdir(exist_ok=True, parents=True)
+
+ if (
+ destination_file.is_file()
+ and destination_file.read_text(encoding='utf-8') == contents
+ ):
+ # File is identical, don't re-write.
+ return contents
+
+ destination_file.write_text(contents, encoding='utf-8')
+ return contents
def gn_gen(
ctx: PresubmitContext,
*args: str,
- gn_check: bool = True,
+ gn_check: bool = True, # pylint: disable=redefined-outer-name
gn_fail_on_unused: bool = True,
export_compile_commands: Union[bool, str] = True,
preserve_args_gn: bool = False,
**gn_arguments,
) -> None:
- """Runs gn gen in the specified directory with optional GN args."""
+ """Runs gn gen in the specified directory with optional GN args.
+
+ Runs with --check=system if gn_check=True. Note that this does not cover
+ generated files. Run gn_check() after building to check generated files.
+ """
all_gn_args = dict(gn_arguments)
all_gn_args.update(ctx.override_gn_args)
_LOG.debug('%r', all_gn_args)
args_option = gn_args(**all_gn_args)
- if not preserve_args_gn:
+ if not ctx.dry_run and not preserve_args_gn:
# Delete args.gn to ensure this is a clean build.
args_gn = ctx.output_dir / 'args.gn'
if args_gn.is_file():
@@ -174,26 +228,38 @@ def gn_gen(
'gen',
ctx.output_dir,
'--color=always',
+ *(['--check=system'] if gn_check else []),
*(['--fail-on-unused-args'] if gn_fail_on_unused else []),
*([export_commands_arg] if export_commands_arg else []),
*args,
*([args_option] if all_gn_args else []),
cwd=ctx.root,
+ call_annotation={
+ 'gn_gen_args': all_gn_args,
+ 'gn_gen_args_option': args_option,
+ },
)
- if gn_check:
- call(
- 'gn',
- 'check',
- ctx.output_dir,
- '--check-generated',
- '--check-system',
- cwd=ctx.root,
- )
+
+def gn_check(ctx: PresubmitContext) -> PresubmitResult:
+ """Runs gn check, including on generated and system files."""
+ call(
+ 'gn',
+ 'check',
+ ctx.output_dir,
+ '--check-generated',
+ '--check-system',
+ cwd=ctx.root,
+ )
+ return PresubmitResult.PASS
def ninja(
- ctx: PresubmitContext, *args, save_compdb=True, save_graph=True, **kwargs
+ ctx: PresubmitContext,
+ *args,
+ save_compdb: bool = True,
+ save_graph: bool = True,
+ **kwargs,
) -> None:
"""Runs ninja in the specified directory."""
@@ -206,22 +272,25 @@ def ninja(
keep_going.extend(('-k', '0'))
if save_compdb:
- proc = subprocess.run(
+ proc = log_run(
['ninja', '-C', ctx.output_dir, '-t', 'compdb', *args],
capture_output=True,
**kwargs,
)
- (ctx.output_dir / 'ninja.compdb').write_bytes(proc.stdout)
+ if not ctx.dry_run:
+ (ctx.output_dir / 'ninja.compdb').write_bytes(proc.stdout)
if save_graph:
- proc = subprocess.run(
+ proc = log_run(
['ninja', '-C', ctx.output_dir, '-t', 'graph', *args],
capture_output=True,
**kwargs,
)
- (ctx.output_dir / 'ninja.graph').write_bytes(proc.stdout)
+ if not ctx.dry_run:
+ (ctx.output_dir / 'ninja.graph').write_bytes(proc.stdout)
ninja_stdout = ctx.output_dir / 'ninja.stdout'
+ ctx.output_dir.mkdir(exist_ok=True, parents=True)
try:
with ninja_stdout.open('w') as outs:
if sys.platform == 'win32':
@@ -239,6 +308,7 @@ def ninja(
*args,
tee=outs,
propagate_sigterm=True,
+ call_annotation={'build_system': 'ninja'},
**kwargs,
)
@@ -253,7 +323,7 @@ def ninja(
def get_gn_args(directory: Path) -> List[Dict[str, Dict[str, str]]]:
"""Dumps GN variables to JSON."""
- proc = subprocess.run(
+ proc = log_run(
['gn', 'args', directory, '--list', '--json'], stdout=subprocess.PIPE
)
return json.loads(proc.stdout)
@@ -296,7 +366,7 @@ def _get_paths_from_command(source_dir: Path, *args, **kwargs) -> Set[Path]:
)
_LOG.error(
'[COMMAND] %s\n%s\n%s',
- *tools.format_command(args, kwargs),
+ *format_command(args, kwargs),
process.stderr.decode(),
)
raise PresubmitFailure
@@ -398,7 +468,7 @@ def check_bazel_build_for_files(
if bazel_dirs:
for path in (p for p in files if p.suffix in bazel_extensions_to_check):
if path not in bazel_builds:
- # TODO(b/234883555) Replace this workaround for fuzzers.
+ # TODO: b/234883555 - Replace this workaround for fuzzers.
if 'fuzz' not in str(path):
missing.append(path)
@@ -523,6 +593,37 @@ def test_server(executable: str, output_dir: Path):
proc.terminate() # pylint: disable=used-before-assignment
+@contextlib.contextmanager
+def modified_env(**envvars):
+ """Context manager that sets environment variables.
+
+ Use by assigning values to variable names in the argument list, e.g.:
+ `modified_env(MY_FLAG="some value")`
+
+ Args:
+ envvars: Keyword arguments
+ """
+ saved_env = os.environ.copy()
+ os.environ.update(envvars)
+ try:
+ yield
+ finally:
+ os.environ = saved_env
+
+
+def fuzztest_prng_seed(ctx: PresubmitContext) -> str:
+ """Convert the RNG seed to the format expected by FuzzTest.
+
+ FuzzTest can be configured to use the seed by setting the
+ `FUZZTEST_PRNG_SEED` environment variable to this value.
+
+ Args:
+ ctx: The context that includes a pseudorandom number generator seed.
+ """
+ rng_bytes = ctx.rng_seed.to_bytes(32, sys.byteorder)
+ return base64.urlsafe_b64encode(rng_bytes).decode('ascii').rstrip('=')
+
+
@filter_paths(
file_filter=FileFilter(endswith=('.bzl', '.bazel'), name=('WORKSPACE',))
)
@@ -550,41 +651,102 @@ def gn_gen_check(ctx: PresubmitContext):
gn_gen(ctx, gn_check=True)
+Item = Union[int, str]
+Value = Union[Item, Sequence[Item]]
+ValueCallable = Callable[[PresubmitContext], Value]
+InputItem = Union[Item, ValueCallable]
+InputValue = Union[InputItem, Sequence[InputItem]]
+
+
+def _value(ctx: PresubmitContext, val: InputValue) -> Value:
+ """Process any lambdas inside val
+
+ val is a single value or a list of values, any of which might be a lambda
+ that needs to be resolved. Call each of these lambdas with ctx and replace
+ the lambda with the result. Return the updated top-level structure.
+ """
+
+ # TODO(mohrr) Use typing.TypeGuard instead of "type: ignore"
+
+ if isinstance(val, (str, int)):
+ return val
+ if callable(val):
+ return val(ctx)
+
+ result: List[Item] = []
+ for item in val:
+ if callable(item):
+ call_result = item(ctx)
+ if isinstance(item, (int, str)):
+ result.append(call_result)
+ else: # Sequence.
+ result.extend(call_result) # type: ignore
+ elif isinstance(item, (int, str)):
+ result.append(item)
+ else: # Sequence.
+ result.extend(item)
+ return result
+
+
_CtxMgrLambda = Callable[[PresubmitContext], ContextManager]
_CtxMgrOrLambda = Union[ContextManager, _CtxMgrLambda]
+_INCREMENTAL_COVERAGE_TOOL = 'cloud_client'
+_ABSOLUTE_COVERAGE_TOOL = 'raw_coverage_cloud_uploader'
+
+
+@dataclass(frozen=True)
+class CoverageOptions:
+ """Coverage collection configuration.
+
+ For Google use only. See go/kalypsi-abs and go/kalypsi-inc for documentation
+ of the metadata fields.
+ """
+
+ target_bucket_root: str
+ target_bucket_project: str
+ project: str
+ trace_type: str
+ ref: str
+ source: str
+ owner: str
+ bug_component: str
+ trim_prefix: str = ''
+ add_prefix: str = ''
-class GnGenNinja(Check):
- """Thin wrapper of Check for steps that just call gn/ninja."""
+
+class _NinjaBase(Check):
+ """Thin wrapper of Check for steps that call ninja."""
def __init__(
self,
*args,
packages: Sequence[str] = (),
- gn_args: Optional[ # pylint: disable=redefined-outer-name
- Dict[str, Any]
- ] = None,
ninja_contexts: Sequence[_CtxMgrOrLambda] = (),
ninja_targets: Union[str, Sequence[str], Sequence[Sequence[str]]] = (),
+ coverage_options: Optional[CoverageOptions] = None,
**kwargs,
):
- """Initializes a GnGenNinja object.
+ """Initializes a _NinjaBase object.
Args:
*args: Passed on to superclass.
packages: List of 'pw package' packages to install.
- gn_args: Dict of GN args.
ninja_contexts: List of context managers to apply around ninja
calls.
ninja_targets: Single ninja target, list of Ninja targets, or list
of list of ninja targets. If a list of a list, ninja will be
called multiple times with the same build directory.
+ coverage_options: Coverage collection options (or None, if not
+ collecting coverage data).
**kwargs: Passed on to superclass.
"""
- super().__init__(self._substeps(), *args, **kwargs)
- self.packages: Sequence[str] = packages
- self.gn_args: Dict[str, Any] = gn_args or {}
- self.ninja_contexts: Tuple[_CtxMgrOrLambda, ...] = tuple(ninja_contexts)
+ super().__init__(*args, **kwargs)
+ self._packages: Sequence[str] = packages
+ self._ninja_contexts: Tuple[_CtxMgrOrLambda, ...] = tuple(
+ ninja_contexts
+ )
+ self._coverage_options = coverage_options
if isinstance(ninja_targets, str):
ninja_targets = (ninja_targets,)
@@ -594,14 +756,18 @@ class GnGenNinja(Check):
if ninja_targets and all_strings != any_strings:
raise ValueError(repr(ninja_targets))
- self.ninja_target_lists: Tuple[Tuple[str, ...], ...]
+ self._ninja_target_lists: Tuple[Tuple[str, ...], ...]
if all_strings:
targets: List[str] = []
for target in ninja_targets:
targets.append(target) # type: ignore
- self.ninja_target_lists = (tuple(targets),)
+ self._ninja_target_lists = (tuple(targets),)
else:
- self.ninja_target_lists = tuple(tuple(x) for x in ninja_targets)
+ self._ninja_target_lists = tuple(tuple(x) for x in ninja_targets)
+
+ @property
+ def ninja_targets(self) -> List[str]:
+ return list(itertools.chain(*self._ninja_target_lists))
def _install_package( # pylint: disable=no-self-use
self,
@@ -611,63 +777,95 @@ class GnGenNinja(Check):
install_package(ctx, package)
return PresubmitResult.PASS
- def _gn_gen(self, ctx: PresubmitContext) -> PresubmitResult:
- Item = Union[int, str]
- Value = Union[Item, Sequence[Item]]
- ValueCallable = Callable[[PresubmitContext], Value]
- InputItem = Union[Item, ValueCallable]
- InputValue = Union[InputItem, Sequence[InputItem]]
-
- # TODO(mohrr) Use typing.TypeGuard instead of "type: ignore"
-
- def value(val: InputValue) -> Value:
- if isinstance(val, (str, int)):
- return val
- if callable(val):
- return val(ctx)
-
- result: List[Item] = []
- for item in val:
- if callable(item):
- call_result = item(ctx)
- if isinstance(item, (int, str)):
- result.append(call_result)
- else: # Sequence.
- result.extend(call_result) # type: ignore
- elif isinstance(item, (int, str)):
- result.append(item)
- else: # Sequence.
- result.extend(item)
- return result
-
- args = {k: value(v) for k, v in self.gn_args.items()}
- gn_gen(ctx, **args) # type: ignore
- return PresubmitResult.PASS
-
- def _ninja(
- self, ctx: PresubmitContext, targets: Sequence[str]
- ) -> PresubmitResult:
+ @contextlib.contextmanager
+ def _context(self, ctx: PresubmitContext):
+ """Apply any context managers necessary for building."""
with contextlib.ExitStack() as stack:
- for mgr in self.ninja_contexts:
+ for mgr in self._ninja_contexts:
if isinstance(mgr, contextlib.AbstractContextManager):
stack.enter_context(mgr)
else:
stack.enter_context(mgr(ctx)) # type: ignore
+ yield
+
+ def _ninja(
+ self, ctx: PresubmitContext, targets: Sequence[str]
+ ) -> PresubmitResult:
+ with self._context(ctx):
ninja(ctx, *targets)
return PresubmitResult.PASS
- def _substeps(self) -> Iterator[SubStep]:
- for package in self.packages:
+ def _coverage(
+ self, ctx: PresubmitContext, options: CoverageOptions
+ ) -> PresubmitResult:
+ """Archive and (on LUCI) upload coverage reports."""
+ reports = ctx.output_dir / 'coverage_reports'
+ os.makedirs(reports, exist_ok=True)
+ coverage_jsons: List[Path] = []
+ for path in ctx.output_dir.rglob('coverage_report'):
+ _LOG.debug('exploring %s', path)
+ name = str(path.relative_to(ctx.output_dir))
+ name = name.replace('_', '').replace('/', '_')
+ with tarfile.open(reports / f'{name}.tar.gz', 'w:gz') as tar:
+ tar.add(path, arcname=name, recursive=True)
+ json_path = path / 'json' / 'report.json'
+ if json_path.is_file():
+ _LOG.debug('found json %s', json_path)
+ coverage_jsons.append(json_path)
+
+ if not coverage_jsons:
+ ctx.fail('No coverage json file found')
+ return PresubmitResult.FAIL
+
+ if len(coverage_jsons) > 1:
+ _LOG.warning(
+ 'More than one coverage json file, selecting first: %r',
+ coverage_jsons,
+ )
+
+ coverage_json = coverage_jsons[0]
+
+ if ctx.luci:
+ if ctx.luci.is_dev:
+ _LOG.warning('Not uploading coverage since running in dev')
+ return PresubmitResult.PASS
+
+ with self._context(ctx):
+ # GCS bucket paths are POSIX-like.
+ coverage_gcs_path = posixpath.join(
+ options.target_bucket_root,
+ 'incremental' if ctx.luci.is_try else 'absolute',
+ options.target_bucket_project,
+ str(ctx.luci.buildbucket_id),
+ )
+ _copy_to_gcs(
+ ctx,
+ coverage_json,
+ posixpath.join(coverage_gcs_path, 'report.json'),
+ )
+ metadata_json = _write_coverage_metadata(ctx, options)
+ _copy_to_gcs(
+ ctx,
+ metadata_json,
+ posixpath.join(coverage_gcs_path, 'metadata.json'),
+ )
+
+ return PresubmitResult.PASS
+
+ _LOG.warning('Not uploading coverage since running locally')
+ return PresubmitResult.PASS
+
+ def _package_substeps(self) -> Iterator[SubStep]:
+ for package in self._packages:
yield SubStep(
f'install {package} package',
self._install_package,
(package,),
)
- yield SubStep('gn gen', self._gn_gen)
-
+ def _ninja_substeps(self) -> Iterator[SubStep]:
targets_parts = set()
- for targets in self.ninja_target_lists:
+ for targets in self._ninja_target_lists:
targets_part = " ".join(targets)
maxlen = 70
if len(targets_part) > maxlen:
@@ -675,3 +873,127 @@ class GnGenNinja(Check):
assert targets_part not in targets_parts
targets_parts.add(targets_part)
yield SubStep(f'ninja {targets_part}', self._ninja, (targets,))
+
+ def _coverage_substeps(self) -> Iterator[SubStep]:
+ if self._coverage_options is not None:
+ yield SubStep('coverage', self._coverage, (self._coverage_options,))
+
+
+def _copy_to_gcs(ctx: PresubmitContext, filepath: Path, gcs_dst: str):
+ cmd = [
+ "gsutil",
+ "cp",
+ filepath,
+ gcs_dst,
+ ]
+
+ upload_stdout = ctx.output_dir / (filepath.name + '.stdout')
+ with upload_stdout.open('w') as outs:
+ call(*cmd, tee=outs)
+
+
+def _write_coverage_metadata(
+ ctx: PresubmitContext, options: CoverageOptions
+) -> Path:
+ """Write out Kalypsi coverage metadata and return the path to it."""
+ assert ctx.luci is not None
+ assert len(ctx.luci.triggers) == 1
+ change = ctx.luci.triggers[0]
+
+ metadata = {
+ 'host': change.gerrit_host,
+ 'project': options.project,
+ 'trace_type': options.trace_type,
+ 'trim_prefix': options.trim_prefix,
+ 'patchset_num': change.patchset,
+ 'change_id': change.number,
+ 'owner': options.owner,
+ 'bug_component': options.bug_component,
+ }
+
+ if ctx.luci.is_try:
+ # Running in CQ: uploading incremental coverage
+ metadata.update(
+ {
+ # Note: no `add_prefix`. According to the documentation, that's
+ # only supported for absolute coverage.
+ #
+ # TODO(tpudlik): Follow up with Kalypsi team, since this is
+ # surprising (given that trim_prefix is supported for both types
+ # of coverage). This might be an error in the docs.
+ 'patchset_num': change.patchset,
+ 'change_id': change.number,
+ }
+ )
+ else:
+ # Running in CI: uploading absolute coverage
+ metadata.update(
+ {
+ 'add_prefix': options.add_prefix,
+ 'commit_id': change.ref,
+ 'ref': options.ref,
+ 'source': options.source,
+ }
+ )
+
+ metadata_json = ctx.output_dir / "metadata.json"
+ with metadata_json.open('w') as metadata_file:
+ json.dump(metadata, metadata_file)
+ return metadata_json
+
+
+class GnGenNinja(_NinjaBase):
+ """Thin wrapper of Check for steps that just call gn/ninja.
+
+ Runs gn gen, ninja, then gn check.
+ """
+
+ def __init__(
+ self,
+ *args,
+ gn_args: Optional[ # pylint: disable=redefined-outer-name
+ Dict[str, Any]
+ ] = None,
+ **kwargs,
+ ):
+ """Initializes a GnGenNinja object.
+
+ Args:
+ *args: Passed on to superclass.
+ gn_args: Dict of GN args.
+ **kwargs: Passed on to superclass.
+ """
+ super().__init__(self._substeps(), *args, **kwargs)
+ self._gn_args: Dict[str, Any] = gn_args or {}
+
+ @property
+ def gn_args(self) -> Dict[str, Any]:
+ return self._gn_args
+
+ def _gn_gen(self, ctx: PresubmitContext) -> PresubmitResult:
+ args: Dict[str, Any] = {}
+ if self._coverage_options is not None:
+ args['pw_toolchain_COVERAGE_ENABLED'] = True
+ args['pw_build_PYTHON_TEST_COVERAGE'] = True
+ args['pw_C_OPTIMIZATION_LEVELS'] = ('debug',)
+
+ if ctx.incremental:
+ args['pw_toolchain_PROFILE_SOURCE_FILES'] = [
+ f'//{x.relative_to(ctx.root)}' for x in ctx.paths
+ ]
+
+ args.update({k: _value(ctx, v) for k, v in self._gn_args.items()})
+ gn_gen(ctx, gn_check=False, **args) # type: ignore
+ return PresubmitResult.PASS
+
+ def _substeps(self) -> Iterator[SubStep]:
+ yield from self._package_substeps()
+
+ yield SubStep('gn gen', self._gn_gen)
+
+ yield from self._ninja_substeps()
+
+ # Run gn check after building so it can check generated files.
+ yield SubStep('gn check', gn_check)
+
+ yield from self._coverage_substeps()
diff --git a/pw_presubmit/py/pw_presubmit/cli.py b/pw_presubmit/py/pw_presubmit/cli.py
index d29dd1606..477b640ef 100644
--- a/pw_presubmit/py/pw_presubmit/cli.py
+++ b/pw_presubmit/py/pw_presubmit/cli.py
@@ -14,6 +14,7 @@
"""Argument parsing code for presubmit checks."""
import argparse
+import fnmatch
import logging
import os
from pathlib import Path
@@ -56,7 +57,8 @@ def add_path_arguments(parser) -> None:
default=git_repo.TRACKING_BRANCH_ALIAS,
help=(
'Git revision against which to diff for changed files. '
- 'Default is the tracking branch of the current branch.'
+ 'Default is the tracking branch of the current branch: '
+ f'{git_repo.TRACKING_BRANCH_ALIAS}'
),
)
@@ -142,21 +144,28 @@ def _add_programs_arguments(
help='List all the available steps.',
)
- def presubmit_step(arg: str) -> presubmit.Check:
- if arg not in all_steps:
+ def presubmit_step(arg: str) -> List[presubmit.Check]:
+ """Return a list of matching presubmit steps."""
+ filtered_step_names = fnmatch.filter(all_steps.keys(), arg)
+
+ if not filtered_step_names:
all_step_names = ', '.join(sorted(all_steps.keys()))
raise argparse.ArgumentTypeError(
- f'{arg} is not the name of a presubmit step\n\n'
+ f'"{arg}" does not match the name of a presubmit step.\n\n'
f'Valid Steps:\n{all_step_names}'
)
- return all_steps[arg]
+
+ return list(all_steps[name] for name in filtered_step_names)
parser.add_argument(
'--step',
- action='append',
- choices=all_steps.values(),
+ action='extend',
default=[],
- help='Run specific steps instead of running a full program.',
+ help=(
+ 'Run specific steps instead of running a full program. Include an '
+ 'asterix to match more than one step name. For example: --step '
+ "'*_format'"
+ ),
type=presubmit_step,
)
@@ -194,12 +203,22 @@ def add_arguments(
"""Adds common presubmit check options to an argument parser."""
add_path_arguments(parser)
+
+ parser.add_argument(
+ '--dry-run',
+ action='store_true',
+ help=(
+ 'Execute the presubits with in dry-run mode. System commands that'
+ 'pw_presubmit would run are instead printed to the terminal.'
+ ),
+ )
parser.add_argument(
'-k',
'--keep-going',
action='store_true',
help='Continue running presubmit steps after a failure.',
)
+
parser.add_argument(
'--continue-after-build-error',
action='store_true',
@@ -208,11 +227,20 @@ def add_arguments(
'failure.'
),
)
+
+ parser.add_argument(
+ '--rng-seed',
+ type=int,
+ default=1,
+ help='Seed for random number generators.',
+ )
+
parser.add_argument(
'--output-directory',
type=Path,
help=f'Output directory (default: {"<repo root>" / DEFAULT_PATH})',
)
+
parser.add_argument(
'--package-root',
type=Path,
@@ -242,6 +270,13 @@ def add_arguments(
)
+def _get_default_parser() -> argparse.ArgumentParser:
+ """Return all common pw presubmit args for sphinx documentation."""
+ parser = argparse.ArgumentParser(description="Runs local presubmit checks.")
+ add_arguments(parser)
+ return parser
+
+
def run( # pylint: disable=too-many-arguments
default_program: Optional[presubmit.Program],
program: Sequence[presubmit.Program],
@@ -254,6 +289,7 @@ def run( # pylint: disable=too-many-arguments
repositories: Collection[Path] = (),
only_list_steps=False,
list_steps: Optional[Callable[[], None]] = None,
+ dry_run: bool = False,
**other_args,
) -> int:
"""Processes arguments from add_arguments and runs the presubmit.
@@ -335,8 +371,19 @@ def run( # pylint: disable=too-many-arguments
output_directory=output_directory,
package_root=package_root,
substep=substep,
+ dry_run=dry_run,
**other_args,
):
return 0
+ # Check if this failed presumbit was run as a Git hook by looking for GIT_*
+ # environment variables. Mention using --no-verify to skip if so.
+ for env_var in os.environ:
+ if env_var.startswith('GIT'):
+ _LOG.info(
+ 'To skip these checks and continue with this push, '
+ 'add --no-verify to the git command'
+ )
+ break
+
return 1
diff --git a/pw_presubmit/py/pw_presubmit/cpp_checks.py b/pw_presubmit/py/pw_presubmit/cpp_checks.py
index 849242455..a86cb092c 100644
--- a/pw_presubmit/py/pw_presubmit/cpp_checks.py
+++ b/pw_presubmit/py/pw_presubmit/cpp_checks.py
@@ -14,30 +14,144 @@
"""C++-related checks."""
import logging
+from pathlib import Path
+import re
+from typing import Callable, Optional, Iterable, Iterator
+from pw_presubmit.presubmit import (
+ Check,
+ filter_paths,
+)
+from pw_presubmit.presubmit_context import PresubmitContext
from pw_presubmit import (
build,
- Check,
format_code,
- PresubmitContext,
- filter_paths,
+ presubmit_context,
)
_LOG: logging.Logger = logging.getLogger(__name__)
+def _fail(ctx, error, path):
+ ctx.fail(error, path=path)
+ with open(ctx.failure_summary_log, 'a') as outs:
+ print(f'{path}\n{error}\n', file=outs)
+
+
@filter_paths(endswith=format_code.CPP_HEADER_EXTS, exclude=(r'\.pb\.h$',))
def pragma_once(ctx: PresubmitContext) -> None:
"""Presubmit check that ensures all header files contain '#pragma once'."""
+ ctx.paths = presubmit_context.apply_exclusions(ctx)
+
for path in ctx.paths:
_LOG.debug('Checking %s', path)
- with open(path) as file:
+ with path.open() as file:
for line in file:
if line.startswith('#pragma once'):
break
else:
- ctx.fail('#pragma once is missing!', path=path)
+ _fail(ctx, '#pragma once is missing!', path=path)
+
+
+def include_guard_check(
+ guard: Optional[Callable[[Path], str]] = None,
+ allow_pragma_once: bool = True,
+) -> Check:
+ """Create an include guard check.
+
+ Args:
+ guard: Callable that generates an expected include guard name for the
+ given Path. If None, any include guard is acceptable, as long as
+ it's consistent between the '#ifndef' and '#define' lines.
+ allow_pragma_once: Whether to allow headers to use '#pragma once'
+ instead of '#ifndef'/'#define'.
+ """
+
+ def stripped_non_comment_lines(iterable: Iterable[str]) -> Iterator[str]:
+ """Yield non-comment non-empty lines from a C++ file."""
+ multi_line_comment = False
+ for line in iterable:
+ line = line.strip()
+ if not line:
+ continue
+ if line.startswith('//'):
+ continue
+ if line.startswith('/*'):
+ multi_line_comment = True
+ if multi_line_comment:
+ if line.endswith('*/'):
+ multi_line_comment = False
+ continue
+ yield line
+
+ def check_path(ctx: PresubmitContext, path: Path) -> None:
+ """Check if path has a valid include guard."""
+
+ _LOG.debug('checking %s', path)
+ expected: Optional[str] = None
+ if guard:
+ expected = guard(path)
+ _LOG.debug('expecting guard %r', expected)
+
+ with path.open() as ins:
+ iterable = stripped_non_comment_lines(ins)
+ first_line = next(iterable, '')
+ _LOG.debug('first line %r', first_line)
+
+ if allow_pragma_once and first_line.startswith('#pragma once'):
+ _LOG.debug('found %r', first_line)
+ return
+
+ if expected:
+ ifndef_expected = f'#ifndef {expected}'
+ if not re.match(rf'^#ifndef {expected}$', first_line):
+ _fail(
+ ctx,
+ 'Include guard is missing! Expected: '
+ f'{ifndef_expected!r}, Found: {first_line!r}',
+ path=path,
+ )
+ return
+
+ else:
+ match = re.match(
+ r'^#\s*ifndef\s+([a-zA-Z_][a-zA-Z_0-9]*)$',
+ first_line,
+ )
+ if not match:
+ _fail(
+ ctx,
+ 'Include guard is missing! Expected "#ifndef" line, '
+ f'Found: {first_line!r}',
+ path=path,
+ )
+ return
+ expected = match.group(1)
+
+ second_line = next(iterable, '')
+ _LOG.debug('second line %r', second_line)
+
+ if not re.match(rf'^#\s*define\s+{expected}$', second_line):
+ define_expected = f'#define {expected}'
+ _fail(
+ ctx,
+ 'Include guard is missing! Expected: '
+ f'{define_expected!r}, Found: {second_line!r}',
+ path=path,
+ )
+ return
+
+ _LOG.debug('passed')
+
+ @filter_paths(endswith=format_code.CPP_HEADER_EXTS, exclude=(r'\.pb\.h$',))
+ def include_guard(ctx: PresubmitContext):
+ """Check that all header files contain an include guard."""
+ ctx.paths = presubmit_context.apply_exclusions(ctx)
+ for path in ctx.paths:
+ check_path(ctx, path)
+
+ return include_guard
@Check
@@ -76,6 +190,6 @@ def runtime_sanitizers(ctx: PresubmitContext) -> None:
def all_sanitizers():
- # TODO(b/234876100): msan will not work until the C++ standard library
+ # TODO: b/234876100 - msan will not work until the C++ standard library
# included in the sysroot has a variant built with msan.
return [asan, tsan, ubsan, runtime_sanitizers]
diff --git a/pw_presubmit/py/pw_presubmit/format_code.py b/pw_presubmit/py/pw_presubmit/format_code.py
index 9b073ea05..df81ad3f9 100755
--- a/pw_presubmit/py/pw_presubmit/format_code.py
+++ b/pw_presubmit/py/pw_presubmit/format_code.py
@@ -22,10 +22,12 @@ code. These tools must be available on the path when this script is invoked!
import argparse
import collections
import difflib
+import json
import logging
import os
from pathlib import Path
import re
+import shutil
import subprocess
import sys
import tempfile
@@ -44,26 +46,33 @@ from typing import (
Union,
)
-try:
- import pw_presubmit
-except ImportError:
- # Append the pw_presubmit package path to the module search path to allow
- # running this module without installing the pw_presubmit package.
- sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
- import pw_presubmit
-
import pw_cli.color
import pw_cli.env
-from pw_presubmit.presubmit import FileFilter
-from pw_presubmit import (
- cli,
+import pw_env_setup.config_file
+from pw_presubmit.presubmit import (
+ FileFilter,
+ filter_paths,
+)
+from pw_presubmit.presubmit_context import (
FormatContext,
FormatOptions,
+ PresubmitContext,
+ PresubmitFailure,
+)
+from pw_presubmit import (
+ cli,
git_repo,
owners_checks,
- PresubmitContext,
+ presubmit_context,
)
-from pw_presubmit.tools import exclude_paths, file_summary, log_run, plural
+from pw_presubmit.tools import (
+ exclude_paths,
+ file_summary,
+ log_run,
+ plural,
+ colorize_diff,
+)
+from pw_presubmit.rst_format import reformat_rst
_LOG: logging.Logger = logging.getLogger(__name__)
_COLOR = pw_cli.color.colors()
@@ -72,27 +81,15 @@ _DEFAULT_PATH = Path('out', 'format')
_Context = Union[PresubmitContext, FormatContext]
-def _colorize_diff_line(line: str) -> str:
- if line.startswith('--- ') or line.startswith('+++ '):
- return _COLOR.bold_white(line)
- if line.startswith('-'):
- return _COLOR.red(line)
- if line.startswith('+'):
- return _COLOR.green(line)
- if line.startswith('@@ '):
- return _COLOR.cyan(line)
- return line
-
-
-def colorize_diff(lines: Iterable[str]) -> str:
- """Takes a diff str or list of str lines and returns a colorized version."""
- if isinstance(lines, str):
- lines = lines.splitlines(True)
-
- return ''.join(_colorize_diff_line(line) for line in lines)
+def _ensure_newline(orig: bytes) -> bytes:
+ if orig.endswith(b'\n'):
+ return orig
+ return orig + b'\nNo newline at end of file\n'
def _diff(path, original: bytes, formatted: bytes) -> str:
+ original = _ensure_newline(original)
+ formatted = _ensure_newline(formatted)
return ''.join(
difflib.unified_diff(
original.decode(errors='replace').splitlines(True),
@@ -103,24 +100,31 @@ def _diff(path, original: bytes, formatted: bytes) -> str:
)
-Formatter = Callable[[str, bytes], bytes]
+FormatterT = Callable[[str, bytes], bytes]
-def _diff_formatted(path, formatter: Formatter) -> Optional[str]:
+def _diff_formatted(
+ path, formatter: FormatterT, dry_run: bool = False
+) -> Optional[str]:
"""Returns a diff comparing a file to its formatted version."""
with open(path, 'rb') as fd:
original = fd.read()
formatted = formatter(path, original)
+ if dry_run:
+ return None
+
return None if formatted == original else _diff(path, original, formatted)
-def _check_files(files, formatter: Formatter) -> Dict[Path, str]:
+def _check_files(
+ files, formatter: FormatterT, dry_run: bool = False
+) -> Dict[Path, str]:
errors = {}
for path in files:
- difference = _diff_formatted(path, formatter)
+ difference = _diff_formatted(path, formatter, dry_run)
if difference:
errors[path] = difference
@@ -138,7 +142,11 @@ def _clang_format(*args: Union[Path, str], **kwargs) -> bytes:
def clang_format_check(ctx: _Context) -> Dict[Path, str]:
"""Checks formatting; returns {path: diff} for files with bad formatting."""
- return _check_files(ctx.paths, lambda path, _: _clang_format(path))
+ return _check_files(
+ ctx.paths,
+ lambda path, _: _clang_format(path),
+ ctx.dry_run,
+ )
def clang_format_fix(ctx: _Context) -> Dict[Path, str]:
@@ -147,6 +155,30 @@ def clang_format_fix(ctx: _Context) -> Dict[Path, str]:
return {}
+def _typescript_format(*args: Union[Path, str], **kwargs) -> bytes:
+ return log_run(
+ ['npx', 'prettier@3.0.1', *args],
+ stdout=subprocess.PIPE,
+ check=True,
+ **kwargs,
+ ).stdout
+
+
+def typescript_format_check(ctx: _Context) -> Dict[Path, str]:
+ """Checks formatting; returns {path: diff} for files with bad formatting."""
+ return _check_files(
+ ctx.paths,
+ lambda path, _: _typescript_format(path),
+ ctx.dry_run,
+ )
+
+
+def typescript_format_fix(ctx: _Context) -> Dict[Path, str]:
+ """Fixes formatting for the provided files in place."""
+ _typescript_format('--write', *ctx.paths)
+ return {}
+
+
def check_gn_format(ctx: _Context) -> Dict[Path, str]:
"""Checks formatting; returns {path: diff} for files with bad formatting."""
return _check_files(
@@ -157,6 +189,7 @@ def check_gn_format(ctx: _Context) -> Dict[Path, str]:
stdout=subprocess.PIPE,
check=True,
).stdout,
+ ctx.dry_run,
)
@@ -185,7 +218,7 @@ def check_bazel_format(ctx: _Context) -> Dict[Path, str]:
errors[Path(path)] = stderr
return build.read_bytes()
- result = _check_files(ctx.paths, _format_temp)
+ result = _check_files(ctx.paths, _format_temp, ctx.dry_run)
result.update(errors)
return result
@@ -215,6 +248,7 @@ def check_go_format(ctx: _Context) -> Dict[Path, str]:
lambda path, _: log_run(
['gofmt', path], stdout=subprocess.PIPE, check=True
).stdout,
+ ctx.dry_run,
)
@@ -224,7 +258,7 @@ def fix_go_format(ctx: _Context) -> Dict[Path, str]:
return {}
-# TODO(b/259595799) Remove yapf support.
+# TODO: b/259595799 - Remove yapf support.
def _yapf(*args, **kwargs) -> subprocess.CompletedProcess:
return log_run(
['python', '-m', 'yapf', '--parallel', *args],
@@ -268,6 +302,20 @@ def fix_py_format_yapf(ctx: _Context) -> Dict[Path, str]:
def _enumerate_black_configs() -> Iterable[Path]:
+ config = pw_env_setup.config_file.load()
+ black_config_file = (
+ config.get('pw', {})
+ .get('pw_presubmit', {})
+ .get('format', {})
+ .get('black_config_file', {})
+ )
+ if black_config_file:
+ explicit_path = Path(black_config_file)
+ if not explicit_path.is_file():
+ raise ValueError(f'Black config file not found: {explicit_path}')
+ yield explicit_path
+ return # If an explicit path is provided, don't try implicit paths.
+
if directory := os.environ.get('PW_PROJECT_ROOT'):
yield Path(directory, '.black.toml')
yield Path(directory, 'pyproject.toml')
@@ -335,6 +383,7 @@ def check_py_format_black(ctx: _Context) -> Dict[Path, str]:
result = _check_files(
[x for x in ctx.paths if str(x).endswith(paths)],
_format_temp,
+ ctx.dry_run,
)
result.update(errors)
return result
@@ -399,6 +448,47 @@ def _check_trailing_space(paths: Iterable[Path], fix: bool) -> Dict[Path, str]:
return errors
+def _format_json(contents: bytes) -> bytes:
+ return json.dumps(json.loads(contents), indent=2).encode() + b'\n'
+
+
+def _json_error(exc: json.JSONDecodeError, path: Path) -> str:
+ return f'{path}: {exc.msg} {exc.lineno}:{exc.colno}\n'
+
+
+def check_json_format(ctx: _Context) -> Dict[Path, str]:
+ errors = {}
+
+ for path in ctx.paths:
+ orig = path.read_bytes()
+ try:
+ formatted = _format_json(orig)
+ except json.JSONDecodeError as exc:
+ errors[path] = _json_error(exc, path)
+ continue
+
+ if orig != formatted:
+ errors[path] = _diff(path, orig, formatted)
+
+ return errors
+
+
+def fix_json_format(ctx: _Context) -> Dict[Path, str]:
+ errors = {}
+ for path in ctx.paths:
+ orig = path.read_bytes()
+ try:
+ formatted = _format_json(orig)
+ except json.JSONDecodeError as exc:
+ errors[path] = _json_error(exc, path)
+ continue
+
+ if orig != formatted:
+ path.write_bytes(formatted)
+
+ return errors
+
+
def check_trailing_space(ctx: _Context) -> Dict[Path, str]:
return _check_trailing_space(ctx.paths, fix=False)
@@ -408,6 +498,24 @@ def fix_trailing_space(ctx: _Context) -> Dict[Path, str]:
return {}
+def rst_format_check(ctx: _Context) -> Dict[Path, str]:
+ errors = {}
+ for path in ctx.paths:
+ result = reformat_rst(path, diff=True, in_place=False)
+ if result:
+ errors[path] = ''.join(result)
+ return errors
+
+
+def rst_format_fix(ctx: _Context) -> Dict[Path, str]:
+ errors = {}
+ for path in ctx.paths:
+ result = reformat_rst(path, diff=True, in_place=True)
+ if result:
+ errors[path] = ''.join(result)
+ return errors
+
+
def print_format_check(
errors: Dict[Path, str],
show_fix_commands: bool,
@@ -457,7 +565,7 @@ class CodeFormat(NamedTuple):
@property
def extensions(self):
- # TODO(b/23842636): Switch calls of this to using 'filter' and remove.
+ # TODO: b/23842636 - Switch calls of this to using 'filter' and remove.
return self.filter.endswith
@@ -467,7 +575,7 @@ CPP_SOURCE_EXTS = frozenset(
)
CPP_EXTS = CPP_HEADER_EXTS.union(CPP_SOURCE_EXTS)
CPP_FILE_FILTER = FileFilter(
- endswith=CPP_EXTS, exclude=(r'\.pb\.h$', r'\.pb\.c$')
+ endswith=CPP_EXTS, exclude=[r'\.pb\.h$', r'\.pb\.c$']
)
C_FORMAT = CodeFormat(
@@ -476,102 +584,133 @@ C_FORMAT = CodeFormat(
PROTO_FORMAT: CodeFormat = CodeFormat(
'Protocol buffer',
- FileFilter(endswith=('.proto',)),
+ FileFilter(endswith=['.proto']),
clang_format_check,
clang_format_fix,
)
JAVA_FORMAT: CodeFormat = CodeFormat(
'Java',
- FileFilter(endswith=('.java',)),
+ FileFilter(endswith=['.java']),
clang_format_check,
clang_format_fix,
)
JAVASCRIPT_FORMAT: CodeFormat = CodeFormat(
'JavaScript',
- FileFilter(endswith=('.js',)),
- clang_format_check,
- clang_format_fix,
+ FileFilter(endswith=['.js']),
+ typescript_format_check,
+ typescript_format_fix,
+)
+
+TYPESCRIPT_FORMAT: CodeFormat = CodeFormat(
+ 'TypeScript',
+ FileFilter(endswith=['.ts']),
+ typescript_format_check,
+ typescript_format_fix,
+)
+
+# TODO: b/308948504 - Add real code formatting support for CSS
+CSS_FORMAT: CodeFormat = CodeFormat(
+ 'css',
+ FileFilter(endswith=['.css']),
+ check_trailing_space,
+ fix_trailing_space,
)
GO_FORMAT: CodeFormat = CodeFormat(
- 'Go', FileFilter(endswith=('.go',)), check_go_format, fix_go_format
+ 'Go', FileFilter(endswith=['.go']), check_go_format, fix_go_format
)
PYTHON_FORMAT: CodeFormat = CodeFormat(
'Python',
- FileFilter(endswith=('.py',)),
+ FileFilter(endswith=['.py']),
check_py_format,
fix_py_format,
)
GN_FORMAT: CodeFormat = CodeFormat(
- 'GN', FileFilter(endswith=('.gn', '.gni')), check_gn_format, fix_gn_format
+ 'GN', FileFilter(endswith=['.gn', '.gni']), check_gn_format, fix_gn_format
)
BAZEL_FORMAT: CodeFormat = CodeFormat(
'Bazel',
- FileFilter(endswith=('BUILD', '.bazel', '.bzl'), name=('WORKSPACE')),
+ FileFilter(endswith=['.bazel', '.bzl'], name=['^BUILD$', '^WORKSPACE$']),
check_bazel_format,
fix_bazel_format,
)
COPYBARA_FORMAT: CodeFormat = CodeFormat(
'Copybara',
- FileFilter(endswith=('.bara.sky',)),
+ FileFilter(endswith=['.bara.sky']),
check_bazel_format,
fix_bazel_format,
)
-# TODO(b/234881054): Add real code formatting support for CMake
+# TODO: b/234881054 - Add real code formatting support for CMake
CMAKE_FORMAT: CodeFormat = CodeFormat(
'CMake',
- FileFilter(endswith=('CMakeLists.txt', '.cmake')),
+ FileFilter(endswith=['.cmake'], name=['^CMakeLists.txt$']),
check_trailing_space,
fix_trailing_space,
)
RST_FORMAT: CodeFormat = CodeFormat(
'reStructuredText',
- FileFilter(endswith=('.rst',)),
- check_trailing_space,
- fix_trailing_space,
+ FileFilter(endswith=['.rst']),
+ rst_format_check,
+ rst_format_fix,
)
MARKDOWN_FORMAT: CodeFormat = CodeFormat(
'Markdown',
- FileFilter(endswith=('.md',)),
+ FileFilter(endswith=['.md']),
check_trailing_space,
fix_trailing_space,
)
OWNERS_CODE_FORMAT = CodeFormat(
'OWNERS',
- filter=FileFilter(name=('OWNERS',)),
+ filter=FileFilter(name=['^OWNERS$']),
check=check_owners_format,
fix=fix_owners_format,
)
-CODE_FORMATS: Tuple[CodeFormat, ...] = (
- # keep-sorted: start
- BAZEL_FORMAT,
- CMAKE_FORMAT,
- COPYBARA_FORMAT,
- C_FORMAT,
- GN_FORMAT,
- GO_FORMAT,
- JAVASCRIPT_FORMAT,
- JAVA_FORMAT,
- MARKDOWN_FORMAT,
- OWNERS_CODE_FORMAT,
- PROTO_FORMAT,
- PYTHON_FORMAT,
- RST_FORMAT,
- # keep-sorted: end
+JSON_FORMAT: CodeFormat = CodeFormat(
+ 'JSON',
+ FileFilter(endswith=['.json']),
+ check=check_json_format,
+ fix=fix_json_format,
)
-# TODO(b/264578594) Remove these lines when these globals aren't referenced.
+CODE_FORMATS: Tuple[CodeFormat, ...] = tuple(
+ filter(
+ None,
+ (
+ # keep-sorted: start
+ BAZEL_FORMAT,
+ CMAKE_FORMAT,
+ COPYBARA_FORMAT,
+ CSS_FORMAT,
+ C_FORMAT,
+ GN_FORMAT,
+ GO_FORMAT,
+ JAVASCRIPT_FORMAT if shutil.which('npx') else None,
+ JAVA_FORMAT,
+ JSON_FORMAT,
+ MARKDOWN_FORMAT,
+ OWNERS_CODE_FORMAT,
+ PROTO_FORMAT,
+ PYTHON_FORMAT,
+ RST_FORMAT,
+ TYPESCRIPT_FORMAT if shutil.which('npx') else None,
+ # keep-sorted: end
+ ),
+ )
+)
+
+
+# TODO: b/264578594 - Remove these lines when these globals aren't referenced.
CODE_FORMATS_WITH_BLACK: Tuple[CodeFormat, ...] = CODE_FORMATS
CODE_FORMATS_WITH_YAPF: Tuple[CodeFormat, ...] = CODE_FORMATS
@@ -591,8 +730,9 @@ def presubmit_check(
file_filter = FileFilter(**vars(code_format.filter))
file_filter.exclude += tuple(re.compile(e) for e in exclude)
- @pw_presubmit.filter_paths(file_filter=file_filter)
- def check_code_format(ctx: pw_presubmit.PresubmitContext):
+ @filter_paths(file_filter=file_filter)
+ def check_code_format(ctx: PresubmitContext):
+ ctx.paths = presubmit_context.apply_exclusions(ctx)
errors = code_format.check(ctx)
print_format_check(
errors,
@@ -610,7 +750,7 @@ def presubmit_check(
file=outs,
)
- raise pw_presubmit.PresubmitFailure
+ raise PresubmitFailure
language = code_format.language.lower().replace('+', 'p').replace(' ', '_')
check_code_format.name = f'{language}_format'
@@ -646,10 +786,16 @@ class CodeFormatter:
package_root: Optional[Path] = None,
):
self.root = root
- self.paths = list(files)
self._formats: Dict[CodeFormat, List] = collections.defaultdict(list)
self.root_output_dir = output_dir
self.package_root = package_root or output_dir / 'packages'
+ self._format_options = FormatOptions.load()
+ raw_paths = files
+ self.paths: Tuple[Path, ...] = self._format_options.filter_paths(files)
+
+ filtered_paths = set(raw_paths) - set(self.paths)
+ for path in sorted(filtered_paths):
+ _LOG.debug('filtered out %s', path)
for path in self.paths:
for code_format in code_formats:
@@ -671,7 +817,7 @@ class CodeFormatter:
output_dir=outdir,
paths=tuple(self._formats[code_format]),
package_root=self.package_root,
- format_options=FormatOptions.load(),
+ format_options=self._format_options,
)
def check(self) -> Dict[Path, str]:
@@ -882,19 +1028,6 @@ def main() -> int:
return format_paths_in_repo(**vars(arguments(git_paths=True).parse_args()))
-def _pigweed_upstream_main() -> int:
- """Check and fix formatting for source files in upstream Pigweed.
-
- Excludes third party sources.
- """
- args = arguments(git_paths=True).parse_args()
-
- # Exclude paths with third party code from formatting.
- args.exclude.append(re.compile('^third_party/fuchsia/repo/'))
-
- return format_paths_in_repo(**vars(args))
-
-
if __name__ == '__main__':
try:
# If pw_cli is available, use it to initialize logs.
diff --git a/pw_presubmit/py/pw_presubmit/git_repo.py b/pw_presubmit/py/pw_presubmit/git_repo.py
index a3847f8ca..0fb82568c 100644
--- a/pw_presubmit/py/pw_presubmit/git_repo.py
+++ b/pw_presubmit/py/pw_presubmit/git_repo.py
@@ -26,6 +26,7 @@ PatternOrStr = Union[Pattern, str]
TRACKING_BRANCH_ALIAS = '@{upstream}'
_TRACKING_BRANCH_ALIASES = TRACKING_BRANCH_ALIAS, '@{u}'
+_NON_TRACKING_FALLBACK = 'HEAD~10'
def git_stdout(
@@ -37,6 +38,7 @@ def git_stdout(
stdout=subprocess.PIPE,
stderr=None if show_stderr else subprocess.DEVNULL,
check=True,
+ ignore_dry_run=True,
)
.stdout.decode()
.strip()
@@ -73,7 +75,10 @@ def _diff_names(
yield full_path
-def tracking_branch(repo_path: Optional[Path] = None) -> Optional[str]:
+def tracking_branch(
+ repo_path: Optional[Path] = None,
+ fallback: Optional[str] = None,
+) -> Optional[str]:
"""Returns the tracking branch of the current branch.
Since most callers of this function can safely handle a return value of
@@ -105,7 +110,7 @@ def tracking_branch(repo_path: Optional[Path] = None) -> Optional[str]:
)
except subprocess.CalledProcessError:
- return None
+ return fallback
def list_files(
@@ -127,7 +132,7 @@ def list_files(
repo_path = Path.cwd()
if commit in _TRACKING_BRANCH_ALIASES:
- commit = tracking_branch(repo_path)
+ commit = tracking_branch(repo_path, fallback=_NON_TRACKING_FALLBACK)
if commit:
try:
@@ -164,6 +169,7 @@ def has_uncommitted_changes(repo: Optional[Path] = None) -> bool:
['git', '-C', repo, 'update-index', '-q', '--refresh'],
capture_output=True,
check=True,
+ ignore_dry_run=True,
)
except subprocess.CalledProcessError as err:
if err.stderr or i == retries - 1:
@@ -172,7 +178,8 @@ def has_uncommitted_changes(repo: Optional[Path] = None) -> bool:
# diff-index exits with 1 if there are uncommitted changes.
return (
log_run(
- ['git', '-C', repo, 'diff-index', '--quiet', 'HEAD', '--']
+ ['git', '-C', repo, 'diff-index', '--quiet', 'HEAD', '--'],
+ ignore_dry_run=True,
).returncode
== 1
)
diff --git a/pw_presubmit/py/pw_presubmit/gitmodules.py b/pw_presubmit/py/pw_presubmit/gitmodules.py
index 67e9854e6..5796a496d 100644
--- a/pw_presubmit/py/pw_presubmit/gitmodules.py
+++ b/pw_presubmit/py/pw_presubmit/gitmodules.py
@@ -19,18 +19,22 @@ from pathlib import Path
from typing import Callable, Dict, Optional, Sequence
import urllib.parse
-from pw_presubmit import (
- git_repo,
+from pw_presubmit.presubmit import filter_paths
+from pw_presubmit.presubmit_context import (
PresubmitContext,
PresubmitFailure,
- filter_paths,
)
+from pw_presubmit import git_repo, plural, presubmit_context
+
_LOG: logging.Logger = logging.getLogger(__name__)
@dataclasses.dataclass
class Config:
+ # Allow submodules to exist in any form.
+ allow_submodules: bool = True
+
# Allow direct references to non-Google hosts.
allow_non_googlesource_hosts: bool = False
@@ -89,6 +93,12 @@ def process_gitmodules(ctx: PresubmitContext, config: Config, path: Path):
_LOG.debug('Evaluating path %s', path)
submodules: Dict[str, Dict[str, str]] = _parse_gitmodules(path)
+ if submodules and not config.allow_submodules:
+ ctx.fail(
+ f'submodules are not permitted but '
+ f'{plural(submodules, "submodule", exist=True)} {tuple(submodules)}'
+ )
+
assert isinstance(config.allowed_googlesource_hosts, (list, tuple))
for allowed in config.allowed_googlesource_hosts:
if '.' in allowed or '-review' in allowed:
@@ -186,6 +196,8 @@ def create(config: Config = Config()):
@filter_paths(endswith='.gitmodules')
def gitmodules(ctx: PresubmitContext):
"""Check various rules for .gitmodules files."""
+ ctx.paths = presubmit_context.apply_exclusions(ctx)
+
for path in ctx.paths:
process_gitmodules(ctx, config, path)
diff --git a/pw_presubmit/py/pw_presubmit/inclusive_language.py b/pw_presubmit/py/pw_presubmit/inclusive_language.py
index 8ef0a9c00..34fde80f2 100644
--- a/pw_presubmit/py/pw_presubmit/inclusive_language.py
+++ b/pw_presubmit/py/pw_presubmit/inclusive_language.py
@@ -18,7 +18,7 @@ from pathlib import Path
import re
from typing import Dict, List, Union
-from . import presubmit
+from . import presubmit, presubmit_context
# List borrowed from Android:
# https://source.android.com/setup/contribute/respectful-code
@@ -107,13 +107,20 @@ class LineMatch:
@presubmit.check(name='inclusive_language')
def presubmit_check(
- ctx: presubmit.PresubmitContext,
+ ctx: presubmit_context.PresubmitContext,
words_regex=NON_INCLUSIVE_WORDS_REGEX,
):
"""Presubmit check that ensures files do not contain banned words."""
+ # No subprocesses are run for inclusive_language so don't perform this check
+ # if dry_run is on.
+ if ctx.dry_run:
+ return
+
found_words: Dict[Path, List[Union[PathMatch, LineMatch]]] = {}
+ ctx.paths = presubmit_context.apply_exclusions(ctx)
+
for path in ctx.paths:
match = words_regex.search(str(path.relative_to(ctx.root)))
if match:
@@ -174,7 +181,7 @@ ignored with "inclusive-language: disable" and reenabled with
)
# Re-enable just in case: inclusive-language: enable.
- raise presubmit.PresubmitFailure
+ raise presubmit_context.PresubmitFailure
def inclusive_language_checker(*words):
@@ -183,7 +190,7 @@ def inclusive_language_checker(*words):
regex = _process_inclusive_language(*words)
def inclusive_language( # pylint: disable=redefined-outer-name
- ctx: presubmit.PresubmitContext,
+ ctx: presubmit_context.PresubmitContext,
):
globals()['inclusive_language'](ctx, regex)
diff --git a/pw_presubmit/py/pw_presubmit/javascript_checks.py b/pw_presubmit/py/pw_presubmit/javascript_checks.py
new file mode 100644
index 000000000..3dc8b8f68
--- /dev/null
+++ b/pw_presubmit/py/pw_presubmit/javascript_checks.py
@@ -0,0 +1,44 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""JavaScript and TypeScript validity check."""
+
+from pw_presubmit import presubmit_context
+from pw_presubmit.presubmit import (
+ Check,
+ filter_paths,
+)
+from pw_presubmit.presubmit_context import (
+ PresubmitContext,
+ PresubmitFailure,
+)
+from pw_presubmit.tools import log_run
+
+
+@filter_paths(endswith=('.js', '.ts'))
+@Check
+def eslint(ctx: PresubmitContext):
+ """Presubmit check that ensures JavaScript files are valid."""
+
+ ctx.paths = presubmit_context.apply_exclusions(ctx)
+
+ # Check if npm deps are installed.
+ npm_list = log_run(['npm', 'list'])
+ if npm_list.returncode != 0:
+ npm_install = log_run(['npm', 'install'])
+ if npm_install.returncode != 0:
+ raise PresubmitFailure('npm install failed.')
+
+ result = log_run(['npx', 'eslint@8.47.0', *ctx.paths])
+ if result.returncode != 0:
+ raise PresubmitFailure('eslint identifed issues.')
diff --git a/pw_presubmit/py/pw_presubmit/json_check.py b/pw_presubmit/py/pw_presubmit/json_check.py
new file mode 100644
index 000000000..09ab1f715
--- /dev/null
+++ b/pw_presubmit/py/pw_presubmit/json_check.py
@@ -0,0 +1,38 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""JSON validity check."""
+
+import json
+
+from . import presubmit, presubmit_context
+
+
+@presubmit.filter_paths(endswith=('.json',))
+@presubmit.check(name='json_check')
+def presubmit_check(ctx: presubmit_context.PresubmitContext):
+ """Presubmit check that ensures JSON files are valid."""
+
+ ctx.paths = presubmit_context.apply_exclusions(ctx)
+
+ for path in ctx.paths:
+ with path.open('r') as ins:
+ try:
+ json.load(ins)
+ except json.decoder.JSONDecodeError as exc:
+ intro_line = f'failed to parse {path.relative_to(ctx.root)}'
+ with ctx.failure_summary_log.open('w') as outs:
+ print(intro_line, file=outs)
+ print(exc, file=outs)
+ ctx.fail(intro_line)
+ ctx.fail(str(exc))
diff --git a/pw_presubmit/py/pw_presubmit/keep_sorted.py b/pw_presubmit/py/pw_presubmit/keep_sorted.py
index a1068f30c..fc2fab090 100644
--- a/pw_presubmit/py/pw_presubmit/keep_sorted.py
+++ b/pw_presubmit/py/pw_presubmit/keep_sorted.py
@@ -34,11 +34,10 @@ from typing import (
)
import pw_cli
-from . import cli, format_code, git_repo, presubmit, tools
+from . import cli, git_repo, presubmit, presubmit_context, tools
DEFAULT_PATH = Path('out', 'presubmit', 'keep_sorted')
-_COLOR = pw_cli.color.colors()
_LOG: logging.Logger = logging.getLogger(__name__)
# Ignore a whole section. Please do not change the order of these lines.
@@ -182,7 +181,7 @@ class _FileSorter:
if not block.allow_dupes:
lines = list({x.full: x for x in lines}.values())
- StrLinePair = Tuple[str, _Line]
+ StrLinePair = Tuple[str, _Line] # pylint: disable=invalid-name
sort_key_funcs: List[Callable[[StrLinePair], StrLinePair]] = []
if block.ignored_prefixes:
@@ -377,7 +376,7 @@ def _process_files(
if not errors:
return errors
- ctx.fail(f'Found {len(errors)} files with keep-sorted errors:')
+ ctx.fail(f'Found {tools.plural(errors, "file")} with keep-sorted errors:')
with ctx.failure_summary_log.open('w') as outs:
for path, diffs in errors.items():
@@ -390,7 +389,7 @@ def _process_files(
)
outs.write(diff)
- print(format_code.colorize_diff(diff))
+ print(tools.colorize_diff(diff))
return errors
@@ -399,6 +398,7 @@ def _process_files(
def presubmit_check(ctx: presubmit.PresubmitContext) -> None:
"""Presubmit check that ensures specified lists remain sorted."""
+ ctx.paths = presubmit_context.apply_exclusions(ctx)
errors = _process_files(ctx)
if errors:
diff --git a/pw_presubmit/py/pw_presubmit/module_owners.py b/pw_presubmit/py/pw_presubmit/module_owners.py
index f16d6e3d4..0df437e75 100644
--- a/pw_presubmit/py/pw_presubmit/module_owners.py
+++ b/pw_presubmit/py/pw_presubmit/module_owners.py
@@ -17,11 +17,11 @@ import logging
from pathlib import Path
from typing import Callable, Optional, Tuple
-from pw_presubmit import (
+from pw_presubmit.presubmit_context import (
PresubmitContext,
PresubmitFailure,
- presubmit,
)
+from pw_presubmit import presubmit
_LOG: logging.Logger = logging.getLogger(__name__)
diff --git a/pw_presubmit/py/pw_presubmit/ninja_parser.py b/pw_presubmit/py/pw_presubmit/ninja_parser.py
index 131137df6..487af1754 100644
--- a/pw_presubmit/py/pw_presubmit/ninja_parser.py
+++ b/pw_presubmit/py/pw_presubmit/ninja_parser.py
@@ -16,40 +16,81 @@
# https://fuchsia.googlesource.com/infra/recipes/+/336933647862a1a9718b4ca18f0a67e89c2419f8/recipe_modules/ninja/resources/ninja_wrapper.py
"""Extracts a concise error from a ninja log."""
+import argparse
+import logging
from pathlib import Path
import re
+from typing import List, IO
+import sys
-_RULE_RE = re.compile(r'^\s*\[\d+/\d+\] (\S+)')
-_FAILED_RE = re.compile(r'^\s*FAILED: (.*)$')
-_FAILED_END_RE = re.compile(r'^\s*ninja: build stopped:.*')
+_LOG: logging.Logger = logging.getLogger(__name__)
+# Assume any of these lines could be prefixed with ANSI color codes.
+_COLOR_CODES_PREFIX = r'^(?:\x1b)?(?:\[\d+m\s*)?'
-def parse_ninja_stdout(ninja_stdout: Path) -> str:
- """Extract an error summary from ninja output."""
+_GOOGLETEST_FAILED, _GOOGLETEST_RUN, _GOOGLETEST_OK, _GOOGLETEST_DISABLED = (
+ '[ FAILED ]',
+ '[ RUN ]',
+ '[ OK ]',
+ '[ DISABLED ]',
+)
- failure_begins = False
- failure_lines = []
- last_line = ''
- with ninja_stdout.open() as ins:
- for line in ins:
- # Trailing whitespace isn't significant, as it doesn't affect the
- # way the line shows up in the logs. However, leading whitespace may
- # be significant, especially for compiler error messages.
- line = line.rstrip()
- if failure_begins:
- if not _RULE_RE.match(line) and not _FAILED_END_RE.match(line):
- failure_lines.append(line)
- else:
- # Output of failed step ends, save its info.
- failure_begins = False
+def _remove_passing_tests(failure_lines: List[str]) -> List[str]:
+ test_lines: List[str] = []
+ result = []
+ for line in failure_lines:
+ if test_lines:
+ if _GOOGLETEST_OK in line:
+ test_lines = []
+ elif _GOOGLETEST_FAILED in line:
+ result.extend(test_lines)
+ test_lines = []
+ result.append(line)
else:
- failed_nodes_match = _FAILED_RE.match(line)
- failure_begins = False
- if failed_nodes_match:
- failure_begins = True
- failure_lines.extend([last_line, line])
- last_line = line
+ test_lines.append(line)
+ elif _GOOGLETEST_RUN in line:
+ test_lines.append(line)
+ elif _GOOGLETEST_DISABLED in line:
+ pass
+ else:
+ result.append(line)
+ result.extend(test_lines)
+ return result
+
+
+def _parse_ninja(ins: IO) -> str:
+ failure_lines: List[str] = []
+ last_line: str = ''
+
+ for line in ins:
+ _LOG.debug('processing %r', line)
+ # Trailing whitespace isn't significant, as it doesn't affect the
+ # way the line shows up in the logs. However, leading whitespace may
+ # be significant, especially for compiler error messages.
+ line = line.rstrip()
+
+ if failure_lines:
+ _LOG.debug('inside failure block')
+
+ if re.match(_COLOR_CODES_PREFIX + r'\[\d+/\d+\] (\S+)', line):
+ _LOG.debug('next rule started, ending failure block')
+ break
+
+ if re.match(_COLOR_CODES_PREFIX + r'ninja: build stopped:.*', line):
+ _LOG.debug('ninja build stopped, ending failure block')
+ break
+ failure_lines.append(line)
+
+ else:
+ if re.match(_COLOR_CODES_PREFIX + r'FAILED: (.*)$', line):
+ _LOG.debug('starting failure block')
+ failure_lines.extend([last_line, line])
+ elif 'FAILED' in line:
+ _LOG.debug('not a match')
+ last_line = line
+
+ failure_lines = _remove_passing_tests(failure_lines)
# Remove "Requirement already satisfied:" lines since many of those might
# be printed during Python installation, and they usually have no relevance
@@ -60,5 +101,25 @@ def parse_ninja_stdout(ninja_stdout: Path) -> str:
if not x.lstrip().startswith('Requirement already satisfied:')
]
- result = '\n'.join(failure_lines)
+ result: str = '\n'.join(failure_lines)
return re.sub(r'\n+', '\n', result)
+
+
+def parse_ninja_stdout(ninja_stdout: Path) -> str:
+ """Extract an error summary from ninja output."""
+ with ninja_stdout.open() as ins:
+ return _parse_ninja(ins)
+
+
+def main(argv=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('input', type=argparse.FileType('r'))
+ parser.add_argument('output', type=argparse.FileType('w'))
+ args = parser.parse_args(argv)
+
+ for line in _parse_ninja(args.input):
+ args.output.write(line)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/pw_presubmit/py/pw_presubmit/npm_presubmit.py b/pw_presubmit/py/pw_presubmit/npm_presubmit.py
index 565d6c91c..17ccc59a4 100644
--- a/pw_presubmit/py/pw_presubmit/npm_presubmit.py
+++ b/pw_presubmit/py/pw_presubmit/npm_presubmit.py
@@ -13,7 +13,8 @@
# the License.
"""Presubmit to npm install and run tests"""
-from pw_presubmit import call, PresubmitContext
+from pw_presubmit.presubmit import call
+from pw_presubmit.presubmit_context import PresubmitContext
def npm_test(ctx: PresubmitContext) -> None:
diff --git a/pw_presubmit/py/pw_presubmit/owners_checks.py b/pw_presubmit/py/pw_presubmit/owners_checks.py
index d751ff9a7..2d0d177e0 100644
--- a/pw_presubmit/py/pw_presubmit/owners_checks.py
+++ b/pw_presubmit/py/pw_presubmit/owners_checks.py
@@ -35,8 +35,9 @@ from typing import (
Set,
Union,
)
+
from pw_presubmit import git_repo
-from pw_presubmit.presubmit import PresubmitFailure
+from pw_presubmit.presubmit_context import PresubmitFailure
_LOG = logging.getLogger(__name__)
@@ -74,10 +75,6 @@ _LINE_TYPERS: OrderedDict[
class OwnersError(Exception):
"""Generic level OWNERS file error."""
- def __init__(self, message: str, *args: object) -> None:
- super().__init__(*args)
- self.message = message
-
class FormatterError(OwnersError):
"""Errors where formatter doesn't know how to act."""
@@ -126,8 +123,9 @@ class OwnersFile:
def __init__(self, path: pathlib.Path) -> None:
if not path.exists():
- error_msg = f"Tried to import {path} but it does not exists"
- raise OwnersDependencyError(error_msg)
+ raise OwnersDependencyError(
+ f"Tried to import {path} but it does not exist"
+ )
self.path = path
self.original_lines = self.load_owners_file(self.path)
@@ -335,9 +333,10 @@ class OwnersFile:
# all file: rules:
for file_rule in self.sections.get(LineType.FILE_RULE, []):
file_str = file_rule.content[len("file:") :]
- if ":" in file_str:
+ path = self.__complete_path(file_str)
+ if ":" in file_str and not path.is_file():
_LOG.warning(
- "TODO(b/254322931): This check does not yet support "
+ "TODO: b/254322931 - This check does not yet support "
"<project> or <branch> in a file: rule"
)
_LOG.warning(
@@ -346,7 +345,8 @@ class OwnersFile:
self.path,
)
- dependencies.append(self.__complete_path(file_str))
+ else:
+ dependencies.append(path)
# all the per-file rule includes
for per_file in self.sections.get(LineType.PER_FILE, []):
@@ -393,7 +393,8 @@ def _format_owners_file(owners_obj: OwnersFile) -> None:
def _list_unwrapper(
- func, list_or_path: Union[Iterable[pathlib.Path], pathlib.Path]
+ func: Callable[[OwnersFile], None],
+ list_or_path: Union[Iterable[pathlib.Path], pathlib.Path],
) -> Dict[pathlib.Path, str]:
"""Decorator that accepts Paths or list of Paths and iterates as needed."""
errors: Dict[pathlib.Path, str] = {}
@@ -415,10 +416,8 @@ def _list_unwrapper(
try:
func(current_owners)
except OwnersError as err:
- errors[current_owners.path] = err.message
- _LOG.error(
- "%s: %s", str(current_owners.path.absolute()), err.message
- )
+ errors[current_owners.path] = str(err)
+ _LOG.error("%s: %s", current_owners.path.absolute(), err)
return errors
@@ -452,7 +451,7 @@ def main() -> int:
owners_obj.look_for_owners_errors()
owners_obj.check_style()
except OwnersError as err:
- _LOG.error("%s %s", err, err.message)
+ _LOG.error("%s", err)
return 1
return 0
diff --git a/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py b/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
index 1369fbc5e..65f1d7f0e 100755
--- a/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
+++ b/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
@@ -26,14 +26,6 @@ import subprocess
import sys
from typing import Callable, Iterable, List, Sequence, TextIO
-try:
- import pw_presubmit
-except ImportError:
- # Append the pw_presubmit package path to the module search path to allow
- # running this module without installing the pw_presubmit package.
- sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
- import pw_presubmit
-
import pw_package.pigweed_packages
from pw_presubmit import (
@@ -43,23 +35,31 @@ from pw_presubmit import (
format_code,
git_repo,
gitmodules,
- call,
- filter_paths,
inclusive_language,
+ javascript_checks,
+ json_check,
keep_sorted,
module_owners,
npm_presubmit,
owners_checks,
- plural,
- presubmit,
- PresubmitContext,
- PresubmitFailure,
- Programs,
python_checks,
shell_checks,
source_in_build,
todo_check,
)
+from pw_presubmit.presubmit import (
+ FileFilter,
+ Programs,
+ call,
+ filter_paths,
+)
+from pw_presubmit.presubmit_context import (
+ FormatOptions,
+ PresubmitContext,
+ PresubmitFailure,
+)
+from pw_presubmit.tools import log_run, plural
+
from pw_presubmit.install_hook import install_git_hook
_LOG = logging.getLogger(__name__)
@@ -67,7 +67,7 @@ _LOG = logging.getLogger(__name__)
pw_package.pigweed_packages.initialize()
# Trigger builds if files with these extensions change.
-_BUILD_FILE_FILTER = presubmit.FileFilter(
+_BUILD_FILE_FILTER = FileFilter(
suffix=(
*format_code.C_FORMAT.extensions,
'.cfg',
@@ -90,17 +90,24 @@ def _at_all_optimization_levels(target):
#
# Build presubmit checks
#
+gn_all = build.GnGenNinja(
+ name='gn_all',
+ path_filter=_BUILD_FILE_FILTER,
+ gn_args=dict(pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS),
+ ninja_targets=('all',),
+)
+
+
def gn_clang_build(ctx: PresubmitContext):
"""Checks all compile targets that rely on LLVM tooling."""
build_targets = [
*_at_all_optimization_levels('host_clang'),
- 'cpp14_compatibility',
'cpp20_compatibility',
'asan',
'tsan',
'ubsan',
'runtime_sanitizers',
- # TODO(b/234876100): msan will not work until the C++ standard library
+ # TODO: b/234876100 - msan will not work until the C++ standard library
# included in the sysroot has a variant built with msan.
]
@@ -110,18 +117,19 @@ def gn_clang_build(ctx: PresubmitContext):
# QEMU doesn't run on Windows.
if sys.platform != 'win32':
- # TODO(b/244604080): For the pw::InlineString tests, qemu_clang_debug
+ # TODO: b/244604080 - For the pw::InlineString tests, qemu_clang_debug
# and qemu_clang_speed_optimized produce a binary too large for the
# QEMU target's 256KB flash. Restore debug and speed optimized
# builds when this is fixed.
build_targets.append('qemu_clang_size_optimized')
- # TODO(b/240982565): SocketStream currently requires Linux.
+ # TODO: b/240982565 - SocketStream currently requires Linux.
if sys.platform.startswith('linux'):
build_targets.append('integration_tests')
build.gn_gen(ctx, pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS)
build.ninja(ctx, *build_targets)
+ build.gn_check(ctx)
_HOST_COMPILER = 'gcc' if sys.platform == 'win32' else 'clang'
@@ -141,6 +149,7 @@ def gn_full_qemu_check(ctx: PresubmitContext):
*_at_all_optimization_levels('qemu_gcc'),
*_at_all_optimization_levels('qemu_clang'),
)
+ build.gn_check(ctx)
def _gn_combined_build_check_targets() -> Sequence[str]:
@@ -151,13 +160,16 @@ def _gn_combined_build_check_targets() -> Sequence[str]:
'python.tests',
'python.lint',
'docs',
- 'fuzzers',
'pigweed_pypi_distribution',
]
- # TODO(b/234645359): Re-enable on Windows when compatibility tests build.
+ # C headers seem to be missing when building with pw_minimal_cpp_stdlib, so
+ # skip it on Windows.
+ if sys.platform != 'win32':
+ build_targets.append('build_with_pw_minimal_cpp_stdlib')
+
+ # TODO: b/234645359 - Re-enable on Windows when compatibility tests build.
if sys.platform != 'win32':
- build_targets.append('cpp14_compatibility')
build_targets.append('cpp20_compatibility')
# clang-tidy doesn't run on Windows.
@@ -166,18 +178,21 @@ def _gn_combined_build_check_targets() -> Sequence[str]:
# QEMU doesn't run on Windows.
if sys.platform != 'win32':
- build_targets.extend(_at_all_optimization_levels('qemu_gcc'))
-
- # TODO(b/244604080): For the pw::InlineString tests, qemu_clang_debug
- # and qemu_clang_speed_optimized produce a binary too large for the
+ # TODO: b/244604080 - For the pw::InlineString tests, qemu_*_debug
+ # and qemu_*_speed_optimized produce a binary too large for the
# QEMU target's 256KB flash. Restore debug and speed optimized
# builds when this is fixed.
+ build_targets.append('qemu_gcc_size_optimized')
build_targets.append('qemu_clang_size_optimized')
- # TODO(b/240982565): SocketStream currently requires Linux.
+ # TODO: b/240982565 - SocketStream currently requires Linux.
if sys.platform.startswith('linux'):
build_targets.append('integration_tests')
+ # TODO: b/269354373 - clang is not supported on windows yet
+ if sys.platform != 'win32':
+ build_targets.append('host_clang_debug_dynamic_allocation')
+
return build_targets
@@ -185,15 +200,37 @@ gn_combined_build_check = build.GnGenNinja(
name='gn_combined_build_check',
doc='Run most host and device (QEMU) tests.',
path_filter=_BUILD_FILE_FILTER,
- gn_args=dict(pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS),
+ gn_args=dict(
+ pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS,
+ pw_BUILD_BROKEN_GROUPS=True, # Enable to fully test the GN build
+ ),
ninja_targets=_gn_combined_build_check_targets(),
)
+coverage = build.GnGenNinja(
+ name='coverage',
+ doc='Run coverage for the host build.',
+ path_filter=_BUILD_FILE_FILTER,
+ ninja_targets=('coverage',),
+ coverage_options=build.CoverageOptions(
+ target_bucket_root='gs://ng3-metrics/ng3-pigweed-coverage',
+ target_bucket_project='pigweed',
+ project='codesearch',
+ trace_type='LLVM',
+ trim_prefix='/b/s/w/ir/x/w/co',
+ ref='refs/heads/main',
+ source='infra:main',
+ owner='pigweed-infra@google.com',
+ bug_component='503634',
+ ),
+)
+
@_BUILD_FILE_FILTER.apply_to_check()
def gn_arm_build(ctx: PresubmitContext):
build.gn_gen(ctx, pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS)
build.ninja(ctx, *_at_all_optimization_levels('stm32f429i'))
+ build.gn_check(ctx)
stm32f429i = build.GnGenNinja(
@@ -212,7 +249,6 @@ stm32f429i = build.GnGenNinja(
ninja_targets=_at_all_optimization_levels('stm32f429i'),
)
-
gn_emboss_build = build.GnGenNinja(
name='gn_emboss_build',
packages=('emboss',),
@@ -241,48 +277,57 @@ gn_nanopb_build = build.GnGenNinja(
),
)
-gn_crypto_mbedtls_build = build.GnGenNinja(
- name='gn_crypto_mbedtls_build',
+gn_chre_build = build.GnGenNinja(
+ name='gn_chre_build',
path_filter=_BUILD_FILE_FILTER,
- packages=('mbedtls',),
- gn_args={
- 'dir_pw_third_party_mbedtls': lambda ctx: '"{}"'.format(
- ctx.package_root / 'mbedtls'
+ packages=('chre',),
+ gn_args=dict(
+ dir_pw_third_party_chre=lambda ctx: '"{}"'.format(
+ ctx.package_root / 'chre'
),
- 'pw_crypto_SHA256_BACKEND': lambda ctx: '"{}"'.format(
- ctx.root / 'pw_crypto:sha256_mbedtls'
+ pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS,
+ ),
+ ninja_targets=(*_at_all_optimization_levels('host_clang'),),
+)
+
+gn_emboss_nanopb_build = build.GnGenNinja(
+ name='gn_emboss_nanopb_build',
+ path_filter=_BUILD_FILE_FILTER,
+ packages=('emboss', 'nanopb'),
+ gn_args=dict(
+ dir_pw_third_party_emboss=lambda ctx: '"{}"'.format(
+ ctx.package_root / 'emboss'
),
- 'pw_crypto_ECDSA_BACKEND': lambda ctx: '"{}"'.format(
- ctx.root / 'pw_crypto:ecdsa_mbedtls'
+ dir_pw_third_party_nanopb=lambda ctx: '"{}"'.format(
+ ctx.package_root / 'nanopb'
),
- 'pw_C_OPTIMIZATION_LEVELS': _OPTIMIZATION_LEVELS,
- },
+ pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS,
+ ),
ninja_targets=(
- *_at_all_optimization_levels(f'host_{_HOST_COMPILER}'),
- # TODO(b/240982565): SocketStream currently requires Linux.
- *(('integration_tests',) if sys.platform.startswith('linux') else ()),
+ *_at_all_optimization_levels('stm32f429i'),
+ *_at_all_optimization_levels('host_clang'),
),
)
-gn_crypto_boringssl_build = build.GnGenNinja(
- name='gn_crypto_boringssl_build',
+gn_crypto_mbedtls_build = build.GnGenNinja(
+ name='gn_crypto_mbedtls_build',
path_filter=_BUILD_FILE_FILTER,
- packages=('boringssl',),
+ packages=('mbedtls',),
gn_args={
- 'dir_pw_third_party_boringssl': lambda ctx: '"{}"'.format(
- ctx.package_root / 'boringssl'
+ 'dir_pw_third_party_mbedtls': lambda ctx: '"{}"'.format(
+ ctx.package_root / 'mbedtls'
),
'pw_crypto_SHA256_BACKEND': lambda ctx: '"{}"'.format(
- ctx.root / 'pw_crypto:sha256_boringssl'
+ ctx.root / 'pw_crypto:sha256_mbedtls_v3'
),
'pw_crypto_ECDSA_BACKEND': lambda ctx: '"{}"'.format(
- ctx.root / 'pw_crypto:ecdsa_boringssl'
+ ctx.root / 'pw_crypto:ecdsa_mbedtls_v3'
),
'pw_C_OPTIMIZATION_LEVELS': _OPTIMIZATION_LEVELS,
},
ninja_targets=(
*_at_all_optimization_levels(f'host_{_HOST_COMPILER}'),
- # TODO(b/240982565): SocketStream currently requires Linux.
+ # TODO: b/240982565 - SocketStream currently requires Linux.
*(('integration_tests',) if sys.platform.startswith('linux') else ()),
),
)
@@ -302,7 +347,7 @@ gn_crypto_micro_ecc_build = build.GnGenNinja(
},
ninja_targets=(
*_at_all_optimization_levels(f'host_{_HOST_COMPILER}'),
- # TODO(b/240982565): SocketStream currently requires Linux.
+ # TODO: b/240982565 - SocketStream currently requires Linux.
*(('integration_tests',) if sys.platform.startswith('linux') else ()),
),
)
@@ -316,7 +361,7 @@ gn_teensy_build = build.GnGenNinja(
str(ctx.package_root)
),
'pw_arduino_build_CORE_NAME': 'teensy',
- 'pw_arduino_build_PACKAGE_NAME': 'teensy/avr',
+ 'pw_arduino_build_PACKAGE_NAME': 'avr/1.58.1',
'pw_arduino_build_BOARD': 'teensy40',
'pw_C_OPTIMIZATION_LEVELS': _OPTIMIZATION_LEVELS,
},
@@ -336,6 +381,41 @@ gn_pico_build = build.GnGenNinja(
ninja_targets=('pi_pico',),
)
+gn_mimxrt595_build = build.GnGenNinja(
+ name='gn_mimxrt595_build',
+ path_filter=_BUILD_FILE_FILTER,
+ packages=('mcuxpresso',),
+ gn_args={
+ 'dir_pw_third_party_mcuxpresso': lambda ctx: '"{}"'.format(
+ str(ctx.package_root / 'mcuxpresso')
+ ),
+ 'pw_target_mimxrt595_evk_MANIFEST': '$dir_pw_third_party_mcuxpresso'
+ + '/EVK-MIMXRT595_manifest_v3_8.xml',
+ 'pw_third_party_mcuxpresso_SDK': '//targets/mimxrt595_evk:sample_sdk',
+ 'pw_C_OPTIMIZATION_LEVELS': _OPTIMIZATION_LEVELS,
+ },
+ ninja_targets=('mimxrt595'),
+)
+
+gn_mimxrt595_freertos_build = build.GnGenNinja(
+ name='gn_mimxrt595_freertos_build',
+ path_filter=_BUILD_FILE_FILTER,
+ packages=('freertos', 'mcuxpresso'),
+ gn_args={
+ 'dir_pw_third_party_freertos': lambda ctx: '"{}"'.format(
+ str(ctx.package_root / 'freertos')
+ ),
+ 'dir_pw_third_party_mcuxpresso': lambda ctx: '"{}"'.format(
+ str(ctx.package_root / 'mcuxpresso')
+ ),
+ 'pw_target_mimxrt595_evk_freertos_MANIFEST': '{}/{}'.format(
+ "$dir_pw_third_party_mcuxpresso", "EVK-MIMXRT595_manifest_v3_8.xml"
+ ),
+ 'pw_third_party_mcuxpresso_SDK': '//targets/mimxrt595_evk_freertos:sdk',
+ 'pw_C_OPTIMIZATION_LEVELS': _OPTIMIZATION_LEVELS,
+ },
+ ninja_targets=('mimxrt595_freertos'),
+)
gn_software_update_build = build.GnGenNinja(
name='gn_software_update_build',
@@ -358,7 +438,7 @@ gn_software_update_build = build.GnGenNinja(
ctx.package_root / 'mbedtls'
),
'pw_crypto_SHA256_BACKEND': lambda ctx: '"{}"'.format(
- ctx.root / 'pw_crypto:sha256_mbedtls'
+ ctx.root / 'pw_crypto:sha256_mbedtls_v3'
),
'pw_C_OPTIMIZATION_LEVELS': _OPTIMIZATION_LEVELS,
},
@@ -386,7 +466,152 @@ gn_pw_system_demo_build = build.GnGenNinja(
ninja_targets=('pw_system_demo',),
)
-gn_docs_build = build.GnGenNinja(name='gn_docs_build', ninja_targets=('docs',))
+gn_googletest_build = build.GnGenNinja(
+ name='gn_googletest_build',
+ path_filter=_BUILD_FILE_FILTER,
+ packages=('googletest',),
+ gn_args={
+ 'dir_pw_third_party_googletest': lambda ctx: '"{}"'.format(
+ ctx.package_root / 'googletest'
+ ),
+ 'pw_unit_test_MAIN': lambda ctx: '"{}"'.format(
+ ctx.root / 'third_party/googletest:gmock_main'
+ ),
+ 'pw_unit_test_GOOGLETEST_BACKEND': lambda ctx: '"{}"'.format(
+ ctx.root / 'third_party/googletest'
+ ),
+ 'pw_C_OPTIMIZATION_LEVELS': _OPTIMIZATION_LEVELS,
+ },
+ ninja_targets=_at_all_optimization_levels(f'host_{_HOST_COMPILER}'),
+)
+
+gn_fuzz_build = build.GnGenNinja(
+ name='gn_fuzz_build',
+ path_filter=_BUILD_FILE_FILTER,
+ packages=('abseil-cpp', 'fuzztest', 'googletest', 're2'),
+ gn_args={
+ 'dir_pw_third_party_abseil_cpp': lambda ctx: '"{}"'.format(
+ ctx.package_root / 'abseil-cpp'
+ ),
+ 'dir_pw_third_party_fuzztest': lambda ctx: '"{}"'.format(
+ ctx.package_root / 'fuzztest'
+ ),
+ 'dir_pw_third_party_googletest': lambda ctx: '"{}"'.format(
+ ctx.package_root / 'googletest'
+ ),
+ 'dir_pw_third_party_re2': lambda ctx: '"{}"'.format(
+ ctx.package_root / 're2'
+ ),
+ 'pw_unit_test_MAIN': lambda ctx: '"{}"'.format(
+ ctx.root / 'third_party/googletest:gmock_main'
+ ),
+ 'pw_unit_test_GOOGLETEST_BACKEND': lambda ctx: '"{}"'.format(
+ ctx.root / 'third_party/googletest'
+ ),
+ },
+ ninja_targets=('fuzzers',),
+ ninja_contexts=(
+ lambda ctx: build.modified_env(
+ FUZZTEST_PRNG_SEED=build.fuzztest_prng_seed(ctx),
+ ),
+ ),
+)
+
+oss_fuzz_build = build.GnGenNinja(
+ name='oss_fuzz_build',
+ path_filter=_BUILD_FILE_FILTER,
+ packages=('abseil-cpp', 'fuzztest', 'googletest', 're2'),
+ gn_args={
+ 'dir_pw_third_party_abseil_cpp': lambda ctx: '"{}"'.format(
+ ctx.package_root / 'abseil-cpp'
+ ),
+ 'dir_pw_third_party_fuzztest': lambda ctx: '"{}"'.format(
+ ctx.package_root / 'fuzztest'
+ ),
+ 'dir_pw_third_party_googletest': lambda ctx: '"{}"'.format(
+ ctx.package_root / 'googletest'
+ ),
+ 'dir_pw_third_party_re2': lambda ctx: '"{}"'.format(
+ ctx.package_root / 're2'
+ ),
+ 'pw_toolchain_OSS_FUZZ_ENABLED': True,
+ },
+ ninja_targets=('oss_fuzz',),
+)
+
+
+def _env_with_zephyr_vars(ctx: PresubmitContext) -> dict:
+ """Returns the environment variables with ... set for Zephyr."""
+ env = os.environ.copy()
+ # Set some variables here.
+ env['ZEPHYR_BASE'] = str(ctx.package_root / 'zephyr')
+ env['ZEPHYR_MODULES'] = str(ctx.root)
+ return env
+
+
+def zephyr_build(ctx: PresubmitContext) -> None:
+ """Run Zephyr compatible tests"""
+ # Install the Zephyr package
+ build.install_package(ctx, 'zephyr')
+ # Configure the environment
+ env = _env_with_zephyr_vars(ctx)
+ # Get the python twister runner
+ twister = ctx.package_root / 'zephyr' / 'scripts' / 'twister'
+ # Run twister
+ call(
+ sys.executable,
+ twister,
+ '--ninja',
+ '--integration',
+ '--clobber-output',
+ '--inline-logs',
+ '--verbose',
+ '--testsuite-root',
+ ctx.root / 'pw_unit_test_zephyr',
+ env=env,
+ )
+ # Produces reports at (ctx.root / 'twister_out' / 'twister*.xml')
+
+
+def docs_build(ctx: PresubmitContext) -> None:
+ """Build Pigweed docs"""
+
+ # Build main docs through GN/Ninja.
+ build.install_package(ctx, 'nanopb')
+ build.gn_gen(ctx, pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS)
+ build.ninja(ctx, 'docs')
+ build.gn_check(ctx)
+
+ # Build Rust docs through Bazel.
+ build.bazel(
+ ctx,
+ 'build',
+ '--',
+ '//pw_rust:docs',
+ )
+
+ # Copy rust docs from Bazel's out directory into where the GN build
+ # put the main docs.
+ rust_docs_bazel_dir = ctx.output_dir.joinpath(
+ '.bazel-bin', 'pw_rust', 'docs.rustdoc'
+ )
+ rust_docs_output_dir = ctx.output_dir.joinpath(
+ 'docs', 'gen', 'docs', 'html', 'rustdoc'
+ )
+
+ # Remove the docs tree to avoid including stale files from previous runs.
+ shutil.rmtree(rust_docs_output_dir, ignore_errors=True)
+
+ # Bazel generates files and directories without write permissions. In
+ # order to allow this rule to be run multiple times we use shutil.copyfile
+ # for the actual copies to not copy permissions of files.
+ shutil.copytree(
+ rust_docs_bazel_dir,
+ rust_docs_output_dir,
+ copy_function=shutil.copyfile,
+ dirs_exist_ok=True,
+ )
+
gn_host_tools = build.GnGenNinja(
name='gn_host_tools',
@@ -418,6 +643,7 @@ def _run_cmake(ctx: PresubmitContext, toolchain='host_clang') -> None:
def cmake_clang(ctx: PresubmitContext):
_run_cmake(ctx, toolchain='host_clang')
build.ninja(ctx, 'pw_apps', 'pw_run_tests.modules')
+ build.gn_check(ctx)
@filter_paths(
@@ -426,6 +652,7 @@ def cmake_clang(ctx: PresubmitContext):
def cmake_gcc(ctx: PresubmitContext):
_run_cmake(ctx, toolchain='host_gcc')
build.ninja(ctx, 'pw_apps', 'pw_run_tests.modules')
+ build.gn_check(ctx)
@filter_paths(
@@ -497,20 +724,25 @@ def bazel_build(ctx: PresubmitContext) -> None:
# This is just a minimal presubmit intended to ensure we don't break what
# support we have.
#
- # TODO(b/271465588): Eventually just build the entire repo for this
+ # TODO: b/271465588 - Eventually just build the entire repo for this
# platform.
build.bazel(
ctx,
'build',
- # Designated initializers produce a warning-treated-as-error when
- # compiled with -std=c++17.
- #
- # TODO(b/271299438): Remove this.
- '--copt=-Wno-pedantic',
'--platforms=//pw_build/platforms:testonly_freertos',
'//pw_sync/...',
'//pw_thread/...',
'//pw_thread_freertos/...',
+ '//pw_interrupt/...',
+ '//pw_cpu_exception/...',
+ )
+
+ # Build the pw_system example for the Discovery board using STM32Cube.
+ build.bazel(
+ ctx,
+ 'build',
+ '--config=stm32f429i',
+ '//pw_system:system_example',
)
@@ -556,7 +788,7 @@ def _clang_system_include_paths(lang: str) -> List[str]:
f'{os.devnull}',
'-fsyntax-only',
]
- process = subprocess.run(
+ process = log_run(
command, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
@@ -597,6 +829,13 @@ _EXCLUDE_FROM_COPYRIGHT_NOTICE: Sequence[str] = (
r'\bDoxyfile$',
r'\bPW_PLUGINS$',
r'\bconstraint.list$',
+ r'\bconstraint_hashes_darwin.list$',
+ r'\bconstraint_hashes_linux.list$',
+ r'\bconstraint_hashes_windows.list$',
+ r'\bpython_base_requirements.txt$',
+ r'\bupstream_requirements_darwin_lock.txt$',
+ r'\bupstream_requirements_linux_lock.txt$',
+ r'\bupstream_requirements_windows_lock.txt$',
r'^(?:.+/)?\..+$',
# keep-sorted: end
# Metadata
@@ -611,6 +850,7 @@ _EXCLUDE_FROM_COPYRIGHT_NOTICE: Sequence[str] = (
r'\brequirements.txt$',
r'\byarn.lock$',
r'^docker/tag$',
+ r'^patches.json$',
# keep-sorted: end
# Data files
# keep-sorted: start
@@ -623,6 +863,7 @@ _EXCLUDE_FROM_COPYRIGHT_NOTICE: Sequence[str] = (
r'\.json$',
r'\.png$',
r'\.svg$',
+ r'\.vsix$',
r'\.xml$',
# keep-sorted: end
# Documentation
@@ -636,6 +877,10 @@ _EXCLUDE_FROM_COPYRIGHT_NOTICE: Sequence[str] = (
r'\.pb\.h$',
r'\_pb2.pyi?$',
# keep-sorted: end
+ # Generated third-party files
+ # keep-sorted: start
+ r'\bthird_party/.*\.bazelrc$',
+ # keep-sorted: end
# Diff/Patch files
# keep-sorted: start
r'\.diff$',
@@ -674,6 +919,8 @@ _COPYRIGHT = re.compile(
_SKIP_LINE_PREFIXES = (
'#!',
+ '#autoload',
+ '#compdef',
'@echo off',
':<<',
'/*',
@@ -710,9 +957,12 @@ def copyright_notice(ctx: PresubmitContext):
if path.stat().st_size == 0:
continue # Skip empty files
- with path.open() as file:
- if not _COPYRIGHT.match(''.join(_read_notice_lines(file))):
- errors.append(path)
+ try:
+ with path.open() as file:
+ if not _COPYRIGHT.match(''.join(_read_notice_lines(file))):
+ errors.append(path)
+ except UnicodeDecodeError as exc:
+ raise PresubmitFailure(f'failed to read {path}') from exc
if errors:
_LOG.warning(
@@ -793,7 +1043,7 @@ def commit_message_format(_: PresubmitContext):
if (
'Reland' in lines[0]
and 'This is a reland of ' in git_repo.commit_message()
- and "Original change's description: " in git_repo.commit_message()
+ and "Original change's description:" in git_repo.commit_message()
):
_LOG.warning('Ignoring apparent Gerrit-generated reland')
return
@@ -819,7 +1069,7 @@ def commit_message_format(_: PresubmitContext):
# Check that the first line matches the expected pattern.
match = re.match(
- r'^(?:[\w*/]+(?:{[\w* ,]+})?[\w*/]*|SEED-\d+): (?P<desc>.+)$', lines[0]
+ r'^(?:[.\w*/]+(?:{[\w* ,]+})?[\w*/]*|SEED-\d+): (?P<desc>.+)$', lines[0]
)
if not match:
_LOG.warning('The first line does not match the expected format')
@@ -886,6 +1136,7 @@ def static_analysis(ctx: PresubmitContext):
"""Runs all available static analysis tools."""
build.gn_gen(ctx)
build.ninja(ctx, 'python.lint', 'static_analysis')
+ build.gn_check(ctx)
_EXCLUDE_FROM_TODO_CHECK = (
@@ -895,6 +1146,7 @@ _EXCLUDE_FROM_TODO_CHECK = (
r'.gitignore$',
r'.pylintrc$',
r'\bdocs/build_system.rst',
+ r'\bdocs/code_reviews.rst',
r'\bpw_assert_basic/basic_handler.cc',
r'\bpw_assert_basic/public/pw_assert_basic/handler.h',
r'\bpw_blob_store/public/pw_blob_store/flat_file_system_entry.h',
@@ -934,7 +1186,7 @@ def owners_lint_checks(ctx: PresubmitContext):
owners_checks.presubmit_check(ctx.paths)
-SOURCE_FILES_FILTER = presubmit.FileFilter(
+SOURCE_FILES_FILTER = FileFilter(
endswith=_BUILD_FILE_FILTER.endswith,
suffix=('.bazel', '.bzl', '.gn', '.gni', *_BUILD_FILE_FILTER.suffix),
exclude=(
@@ -945,42 +1197,55 @@ SOURCE_FILES_FILTER = presubmit.FileFilter(
),
)
-
#
# Presubmit check programs
#
OTHER_CHECKS = (
# keep-sorted: start
- # TODO(b/235277910): Enable all Bazel tests when they're fixed.
+ # TODO: b/235277910 - Enable all Bazel tests when they're fixed.
bazel_test,
build.gn_gen_check,
cmake_clang,
cmake_gcc,
- gitmodules.create(),
+ coverage,
+ # TODO: b/234876100 - Remove once msan is added to all_sanitizers().
+ cpp_checks.msan,
+ docs_build,
+ gitmodules.create(gitmodules.Config(allow_submodules=False)),
+ gn_all,
gn_clang_build,
gn_combined_build_check,
module_owners.presubmit_check(),
npm_presubmit.npm_test,
pw_transfer_integration_test,
+ python_checks.update_upstream_python_constraints,
+ python_checks.vendor_python_wheels,
# TODO(hepler): Many files are missing from the CMake build. Add this check
# to lintformat when the missing files are fixed.
source_in_build.cmake(SOURCE_FILES_FILTER, _run_cmake),
static_analysis,
stm32f429i,
todo_check.create(todo_check.BUGS_OR_USERNAMES),
+ zephyr_build,
# keep-sorted: end
)
+ARDUINO_PICO = (
+ gn_teensy_build,
+ gn_pico_build,
+ gn_pw_system_demo_build,
+)
+
+INTERNAL = (gn_mimxrt595_build, gn_mimxrt595_freertos_build)
+
# The misc program differs from other_checks in that checks in the misc
# program block CQ on Linux.
MISC = (
# keep-sorted: start
- gn_emboss_build,
- gn_nanopb_build,
- gn_pico_build,
- gn_pw_system_demo_build,
- gn_teensy_build,
+ gn_chre_build,
+ gn_emboss_nanopb_build,
+ gn_googletest_build,
# keep-sorted: end
)
@@ -988,15 +1253,16 @@ SANITIZERS = (cpp_checks.all_sanitizers(),)
SECURITY = (
# keep-sorted: start
- gn_crypto_boringssl_build,
gn_crypto_mbedtls_build,
gn_crypto_micro_ecc_build,
+ gn_fuzz_build,
gn_software_update_build,
+ oss_fuzz_build,
# keep-sorted: end
)
# Avoid running all checks on specific paths.
-PATH_EXCLUSIONS = (re.compile(r'\bthird_party/fuchsia/repo/'),)
+PATH_EXCLUSIONS = FormatOptions.load().exclude
_LINTFORMAT = (
commit_message_format,
@@ -1014,6 +1280,8 @@ _LINTFORMAT = (
source_in_build.gn(SOURCE_FILES_FILTER),
source_is_in_cmake_build_warn_only,
shell_checks.shellcheck if shutil.which('shellcheck') else (),
+ javascript_checks.eslint if shutil.which('npx') else (),
+ json_check.presubmit_check,
keep_sorted.presubmit_check,
todo_check_with_exceptions,
)
@@ -1025,14 +1293,14 @@ LINTFORMAT = (
# (https://stackoverflow.com/q/71024130/1224002). These are cached, but
# after a roll it can be quite slow.
source_in_build.bazel(SOURCE_FILES_FILTER),
- pw_presubmit.python_checks.check_python_versions,
- pw_presubmit.python_checks.gn_python_lint,
+ python_checks.check_python_versions,
+ python_checks.gn_python_lint,
)
QUICK = (
_LINTFORMAT,
gn_quick_build_check,
- # TODO(b/34884583): Re-enable CMake and Bazel for Mac after we have fixed
+ # TODO: b/34884583 - Re-enable CMake and Bazel for Mac after we have fixed
# the clang issues. The problem is that all clang++ invocations need the
# two extra flags: "-nostdc++" and "${clang_prefix}/../lib/libc++.a".
cmake_clang if sys.platform != 'darwin' else (),
@@ -1042,10 +1310,11 @@ FULL = (
_LINTFORMAT,
gn_combined_build_check,
gn_host_tools,
- bazel_test if sys.platform == 'linux' else (),
- bazel_build if sys.platform == 'linux' else (),
+ bazel_test,
+ bazel_build,
python_checks.gn_python_check,
python_checks.gn_python_test_coverage,
+ python_checks.check_upstream_python_constraints,
build_env_setup,
# Skip gn_teensy_build if running on Windows. The Teensycore installer is
# an exe that requires an admin role.
@@ -1054,7 +1323,9 @@ FULL = (
PROGRAMS = Programs(
# keep-sorted: start
+ arduino_pico=ARDUINO_PICO,
full=FULL,
+ internal=INTERNAL,
lintformat=LINTFORMAT,
misc=MISC,
other_checks=OTHER_CHECKS,
diff --git a/pw_presubmit/py/pw_presubmit/presubmit.py b/pw_presubmit/py/pw_presubmit/presubmit.py
index ef10a3abe..3d2303a9e 100644
--- a/pw_presubmit/py/pw_presubmit/presubmit.py
+++ b/pw_presubmit/py/pw_presubmit/presubmit.py
@@ -51,11 +51,10 @@ import logging
import os
from pathlib import Path
import re
-import shutil
import signal
import subprocess
import sys
-import tempfile as tf
+import tempfile
import time
import types
from typing import (
@@ -73,14 +72,22 @@ from typing import (
Tuple,
Union,
)
-import urllib
import pw_cli.color
import pw_cli.env
-import pw_env_setup.config_file
from pw_package import package_manager
+
from pw_presubmit import git_repo, tools
from pw_presubmit.tools import plural
+from pw_presubmit.presubmit_context import (
+ FormatContext,
+ FormatOptions,
+ LuciContext,
+ PRESUBMIT_CONTEXT,
+ PresubmitContext,
+ PresubmitFailure,
+ log_check_traces,
+)
_LOG: logging.Logger = logging.getLogger(__name__)
@@ -121,23 +128,6 @@ def _box(style, left, middle, right, box=tools.make_box('><>')) -> str:
)
-class PresubmitFailure(Exception):
- """Optional exception to use for presubmit failures."""
-
- def __init__(
- self,
- description: str = '',
- path: Optional[Path] = None,
- line: Optional[int] = None,
- ):
- line_part: str = ''
- if line is not None:
- line_part = f'{line}:'
- super().__init__(
- f'{path}:{line_part} {description}' if path else description
- )
-
-
class PresubmitResult(enum.Enum):
PASS = 'PASSED' # Check completed successfully.
FAIL = 'FAILED' # Check failed.
@@ -214,66 +204,6 @@ class Programs(collections.abc.Mapping):
return len(self._programs)
-@dataclasses.dataclass(frozen=True)
-class FormatOptions:
- python_formatter: Optional[str] = 'yapf'
- black_path: Optional[str] = 'black'
-
- # TODO(b/264578594) Add exclude to pigweed.json file.
- # exclude: Sequence[re.Pattern] = dataclasses.field(default_factory=list)
-
- @staticmethod
- def load() -> 'FormatOptions':
- config = pw_env_setup.config_file.load()
- fmt = config.get('pw', {}).get('pw_presubmit', {}).get('format', {})
- return FormatOptions(
- python_formatter=fmt.get('python_formatter', 'yapf'),
- black_path=fmt.get('black_path', 'black'),
- # exclude=tuple(re.compile(x) for x in fmt.get('exclude', ())),
- )
-
-
-@dataclasses.dataclass
-class LuciPipeline:
- round: int
- builds_from_previous_iteration: Sequence[str]
-
- @staticmethod
- def create(
- bbid: int,
- fake_pipeline_props: Optional[Dict[str, Any]] = None,
- ) -> Optional['LuciPipeline']:
- pipeline_props: Dict[str, Any]
- if fake_pipeline_props is not None:
- pipeline_props = fake_pipeline_props
- else:
- pipeline_props = (
- get_buildbucket_info(bbid)
- .get('input', {})
- .get('properties', {})
- .get('$pigweed/pipeline', {})
- )
- if not pipeline_props.get('inside_a_pipeline', False):
- return None
-
- return LuciPipeline(
- round=int(pipeline_props['round']),
- builds_from_previous_iteration=list(
- pipeline_props['builds_from_previous_iteration']
- ),
- )
-
-
-def get_buildbucket_info(bbid) -> Dict[str, Any]:
- if not bbid or not shutil.which('bb'):
- return {}
-
- output = subprocess.check_output(
- ['bb', 'get', '-json', '-p', f'{bbid}'], text=True
- )
- return json.loads(output)
-
-
def download_cas_artifact(
ctx: PresubmitContext, digest: str, output_dir: str
) -> None:
@@ -327,8 +257,8 @@ def archive_cas_artifact(
for path in upload_paths:
assert os.path.abspath(path)
- with tf.NamedTemporaryFile(mode='w+t') as tmp_digest_file:
- with tf.NamedTemporaryFile(mode='w+t') as tmp_paths_file:
+ with tempfile.NamedTemporaryFile(mode='w+t') as tmp_digest_file:
+ with tempfile.NamedTemporaryFile(mode='w+t') as tmp_paths_file:
json_paths = json.dumps(
[
[str(root), str(os.path.relpath(path, root))]
@@ -357,257 +287,6 @@ def archive_cas_artifact(
return uploaded_digest
-@dataclasses.dataclass
-class LuciTrigger:
- """Details the pending change or submitted commit triggering the build."""
-
- number: int
- remote: str
- branch: str
- ref: str
- gerrit_name: str
- submitted: bool
-
- @property
- def gerrit_url(self):
- if not self.number:
- return self.gitiles_url
- return 'https://{}-review.googlesource.com/c/{}'.format(
- self.gerrit_name, self.number
- )
-
- @property
- def gitiles_url(self):
- return '{}/+/{}'.format(self.remote, self.ref)
-
- @staticmethod
- def create_from_environment(
- env: Optional[Dict[str, str]] = None,
- ) -> Sequence['LuciTrigger']:
- if not env:
- env = os.environ.copy()
- raw_path = env.get('TRIGGERING_CHANGES_JSON')
- if not raw_path:
- return ()
- path = Path(raw_path)
- if not path.is_file():
- return ()
-
- result = []
- with open(path, 'r') as ins:
- for trigger in json.load(ins):
- keys = {
- 'number',
- 'remote',
- 'branch',
- 'ref',
- 'gerrit_name',
- 'submitted',
- }
- if keys <= trigger.keys():
- result.append(LuciTrigger(**{x: trigger[x] for x in keys}))
-
- return tuple(result)
-
- @staticmethod
- def create_for_testing():
- change = {
- 'number': 123456,
- 'remote': 'https://pigweed.googlesource.com/pigweed/pigweed',
- 'branch': 'main',
- 'ref': 'refs/changes/56/123456/1',
- 'gerrit_name': 'pigweed',
- 'submitted': True,
- }
- with tf.TemporaryDirectory() as tempdir:
- changes_json = Path(tempdir) / 'changes.json'
- with changes_json.open('w') as outs:
- json.dump([change], outs)
- env = {'TRIGGERING_CHANGES_JSON': changes_json}
- return LuciTrigger.create_from_environment(env)
-
-
-@dataclasses.dataclass
-class LuciContext:
- """LUCI-specific information about the environment."""
-
- buildbucket_id: int
- build_number: int
- project: str
- bucket: str
- builder: str
- swarming_server: str
- swarming_task_id: str
- cas_instance: str
- pipeline: Optional[LuciPipeline]
- triggers: Sequence[LuciTrigger] = dataclasses.field(default_factory=tuple)
-
- @staticmethod
- def create_from_environment(
- env: Optional[Dict[str, str]] = None,
- fake_pipeline_props: Optional[Dict[str, Any]] = None,
- ) -> Optional['LuciContext']:
- """Create a LuciContext from the environment."""
-
- if not env:
- env = os.environ.copy()
-
- luci_vars = [
- 'BUILDBUCKET_ID',
- 'BUILDBUCKET_NAME',
- 'BUILD_NUMBER',
- 'SWARMING_TASK_ID',
- 'SWARMING_SERVER',
- ]
- if any(x for x in luci_vars if x not in env):
- return None
-
- project, bucket, builder = env['BUILDBUCKET_NAME'].split(':')
-
- bbid: int = 0
- pipeline: Optional[LuciPipeline] = None
- try:
- bbid = int(env['BUILDBUCKET_ID'])
- pipeline = LuciPipeline.create(bbid, fake_pipeline_props)
-
- except ValueError:
- pass
-
- # Logic to identify cas instance from swarming server is derived from
- # https://chromium.googlesource.com/infra/luci/recipes-py/+/main/recipe_modules/cas/api.py
- swarm_server = env['SWARMING_SERVER']
- cas_project = urllib.parse.urlparse(swarm_server).netloc.split('.')[0]
- cas_instance = f'projects/{cas_project}/instances/default_instance'
-
- result = LuciContext(
- buildbucket_id=bbid,
- build_number=int(env['BUILD_NUMBER']),
- project=project,
- bucket=bucket,
- builder=builder,
- swarming_server=env['SWARMING_SERVER'],
- swarming_task_id=env['SWARMING_TASK_ID'],
- cas_instance=cas_instance,
- pipeline=pipeline,
- triggers=LuciTrigger.create_from_environment(env),
- )
- _LOG.debug('%r', result)
- return result
-
- @staticmethod
- def create_for_testing():
- env = {
- 'BUILDBUCKET_ID': '881234567890',
- 'BUILDBUCKET_NAME': 'pigweed:bucket.try:builder-name',
- 'BUILD_NUMBER': '123',
- 'SWARMING_SERVER': 'https://chromium-swarm.appspot.com',
- 'SWARMING_TASK_ID': 'cd2dac62d2',
- }
- return LuciContext.create_from_environment(env, {})
-
-
-@dataclasses.dataclass
-class FormatContext:
- """Context passed into formatting helpers.
-
- This class is a subset of PresubmitContext containing only what's needed by
- formatters.
-
- For full documentation on the members see the PresubmitContext section of
- pw_presubmit/docs.rst.
-
- Args:
- root: Source checkout root directory
- output_dir: Output directory for this specific language
- paths: Modified files for the presubmit step to check (often used in
- formatting steps but ignored in compile steps)
- package_root: Root directory for pw package installations
- format_options: Formatting options, derived from pigweed.json
- """
-
- root: Optional[Path]
- output_dir: Path
- paths: Tuple[Path, ...]
- package_root: Path
- format_options: FormatOptions
-
-
-@dataclasses.dataclass
-class PresubmitContext: # pylint: disable=too-many-instance-attributes
- """Context passed into presubmit checks.
-
- For full documentation on the members see pw_presubmit/docs.rst.
-
- Args:
- root: Source checkout root directory
- repos: Repositories (top-level and submodules) processed by
- pw presubmit
- output_dir: Output directory for this specific presubmit step
- failure_summary_log: Path where steps should write a brief summary of
- any failures encountered for use by other tooling.
- paths: Modified files for the presubmit step to check (often used in
- formatting steps but ignored in compile steps)
- all_paths: All files in the tree.
- package_root: Root directory for pw package installations
- override_gn_args: Additional GN args processed by build.gn_gen()
- luci: Information about the LUCI build or None if not running in LUCI
- format_options: Formatting options, derived from pigweed.json
- num_jobs: Number of jobs to run in parallel
- continue_after_build_error: For steps that compile, don't exit on the
- first compilation error
- """
-
- root: Path
- repos: Tuple[Path, ...]
- output_dir: Path
- failure_summary_log: Path
- paths: Tuple[Path, ...]
- all_paths: Tuple[Path, ...]
- package_root: Path
- luci: Optional[LuciContext]
- override_gn_args: Dict[str, str]
- format_options: FormatOptions
- num_jobs: Optional[int] = None
- continue_after_build_error: bool = False
- _failed: bool = False
-
- @property
- def failed(self) -> bool:
- return self._failed
-
- def fail(
- self,
- description: str,
- path: Optional[Path] = None,
- line: Optional[int] = None,
- ):
- """Add a failure to this presubmit step.
-
- If this is called at least once the step fails, but not immediately—the
- check is free to continue and possibly call this method again.
- """
- _LOG.warning('%s', PresubmitFailure(description, path, line))
- self._failed = True
-
- @staticmethod
- def create_for_testing():
- parsed_env = pw_cli.env.pigweed_environment()
- root = parsed_env.PW_PROJECT_ROOT
- presubmit_root = root / 'out' / 'presubmit'
- return PresubmitContext(
- root=root,
- repos=(root,),
- output_dir=presubmit_root / 'test',
- failure_summary_log=presubmit_root / 'failure-summary.log',
- paths=(root / 'foo.cc', root / 'foo.py'),
- all_paths=(root / 'BUILD.gn', root / 'foo.cc', root / 'foo.py'),
- package_root=root / 'environment' / 'packages',
- luci=None,
- override_gn_args={},
- format_options=FormatOptions(),
- )
-
-
class FileFilter:
"""Allows checking if a path matches a series of filters.
@@ -707,7 +386,7 @@ class FilteredCheck:
class Presubmit:
"""Runs a series of presubmit checks on a list of files."""
- def __init__(
+ def __init__( # pylint: disable=too-many-arguments
self,
root: Path,
repos: Sequence[Path],
@@ -717,6 +396,8 @@ class Presubmit:
package_root: Path,
override_gn_args: Dict[str, str],
continue_after_build_error: bool,
+ rng_seed: int,
+ full: bool,
):
self._root = root.resolve()
self._repos = tuple(repos)
@@ -729,15 +410,17 @@ class Presubmit:
self._package_root = package_root.resolve()
self._override_gn_args = override_gn_args
self._continue_after_build_error = continue_after_build_error
+ self._rng_seed = rng_seed
+ self._full = full
def run(
self,
program: Program,
keep_going: bool = False,
substep: Optional[str] = None,
+ dry_run: bool = False,
) -> bool:
"""Executes a series of presubmit checks on the paths."""
-
checks = self.apply_filters(program)
if substep:
assert (
@@ -767,7 +450,9 @@ class Presubmit:
_LOG.debug('Checks:\n%s', '\n'.join(c.name for c in checks))
start_time: float = time.time()
- passed, failed, skipped = self._execute_checks(checks, keep_going)
+ passed, failed, skipped = self._execute_checks(
+ checks, keep_going, dry_run
+ )
self._log_summary(time.time() - start_time, passed, failed, skipped)
return not failed and not skipped
@@ -853,7 +538,7 @@ class Presubmit:
return PresubmitContext(**kwargs)
@contextlib.contextmanager
- def _context(self, filtered_check: FilteredCheck):
+ def _context(self, filtered_check: FilteredCheck, dry_run: bool = False):
# There are many characters banned from filenames on Windows. To
# simplify things, just strip everything that's not a letter, digit,
# or underscore.
@@ -882,21 +567,27 @@ class Presubmit:
package_root=self._package_root,
override_gn_args=self._override_gn_args,
continue_after_build_error=self._continue_after_build_error,
+ rng_seed=self._rng_seed,
+ full=self._full,
luci=LuciContext.create_from_environment(),
format_options=FormatOptions.load(),
+ dry_run=dry_run,
)
finally:
_LOG.removeHandler(handler)
def _execute_checks(
- self, program: List[FilteredCheck], keep_going: bool
+ self,
+ program: List[FilteredCheck],
+ keep_going: bool,
+ dry_run: bool = False,
) -> Tuple[int, int, int]:
"""Runs presubmit checks; returns (passed, failed, skipped) lists."""
passed = failed = 0
for i, filtered_check in enumerate(program, 1):
- with self._context(filtered_check) as ctx:
+ with self._context(filtered_check, dry_run) as ctx:
result = filtered_check.run(ctx, i, len(program))
if result is PresubmitResult.PASS:
@@ -944,6 +635,48 @@ def _process_pathspecs(
return pathspecs_by_repo
+def fetch_file_lists(
+ root: Path,
+ repo: Path,
+ pathspecs: List[str],
+ exclude: Sequence[Pattern] = (),
+ base: Optional[str] = None,
+) -> Tuple[List[Path], List[Path]]:
+ """Returns lists of all files and modified files for the given repo.
+
+ Args:
+ root: root path of the project
+ repo: path to the roots of Git repository to check
+ base: optional base Git commit to list files against
+ pathspecs: optional list of Git pathspecs to run the checks against
+ exclude: regular expressions for Posix-style paths to exclude
+ """
+
+ all_files: List[Path] = []
+ modified_files: List[Path] = []
+
+ all_files_repo = tuple(
+ tools.exclude_paths(
+ exclude, git_repo.list_files(None, pathspecs, repo), root
+ )
+ )
+ all_files += all_files_repo
+
+ if base is None:
+ modified_files += all_files_repo
+ else:
+ modified_files += tools.exclude_paths(
+ exclude, git_repo.list_files(base, pathspecs, repo), root
+ )
+
+ _LOG.info(
+ 'Checking %s',
+ git_repo.describe_files(repo, repo, base, pathspecs, exclude, root),
+ )
+
+ return all_files, modified_files
+
+
def run( # pylint: disable=too-many-arguments,too-many-locals
program: Sequence[Check],
root: Path,
@@ -957,9 +690,11 @@ def run( # pylint: disable=too-many-arguments,too-many-locals
override_gn_args: Sequence[Tuple[str, str]] = (),
keep_going: bool = False,
continue_after_build_error: bool = False,
+ rng_seed: int = 1,
presubmit_class: type = Presubmit,
list_steps_file: Optional[Path] = None,
substep: Optional[str] = None,
+ dry_run: bool = False,
) -> bool:
"""Lists files in the current Git repo and runs a Presubmit with them.
@@ -987,6 +722,8 @@ def run( # pylint: disable=too-many-arguments,too-many-locals
override_gn_args: additional GN args to set on steps
keep_going: continue running presubmit steps after a step fails
continue_after_build_error: continue building if a build step fails
+ rng_seed: seed for a random number generator, for the few steps that
+ need one
presubmit_class: class to use to run Presubmits, should inherit from
Presubmit class above
list_steps_file: File created by --only-list-steps, used to keep from
@@ -1030,26 +767,11 @@ def run( # pylint: disable=too-many-arguments,too-many-locals
else:
for repo, pathspecs in pathspecs_by_repo.items():
- all_files_repo = tuple(
- tools.exclude_paths(
- exclude, git_repo.list_files(None, pathspecs, repo), root
- )
- )
- all_files += all_files_repo
-
- if base is None:
- modified_files += all_files_repo
- else:
- modified_files += tools.exclude_paths(
- exclude, git_repo.list_files(base, pathspecs, repo), root
- )
-
- _LOG.info(
- 'Checking %s',
- git_repo.describe_files(
- repo, repo, base, pathspecs, exclude, root
- ),
+ new_all_files_items, new_modified_file_items = fetch_file_lists(
+ root, repo, pathspecs, exclude, base
)
+ all_files.extend(new_all_files_items)
+ modified_files.extend(new_modified_file_items)
if output_directory is None:
output_directory = root / '.presubmit'
@@ -1066,6 +788,8 @@ def run( # pylint: disable=too-many-arguments,too-many-locals
package_root=package_root,
override_gn_args=dict(override_gn_args or {}),
continue_after_build_error=continue_after_build_error,
+ rng_seed=rng_seed,
+ full=bool(base is None),
)
if only_list_steps:
@@ -1091,7 +815,7 @@ def run( # pylint: disable=too-many-arguments,too-many-locals
if not isinstance(program, Program):
program = Program('', program)
- return presubmit.run(program, keep_going, substep=substep)
+ return presubmit.run(program, keep_going, substep=substep, dry_run=dry_run)
def _make_str_tuple(value: Union[Iterable[str], str]) -> Tuple[str, ...]:
@@ -1195,6 +919,8 @@ class Check:
self.filter = path_filter
self.always_run: bool = always_run
+ self._is_presubmit_check_object = True
+
def substeps(self) -> Sequence[SubStep]:
"""Return the SubSteps of the current step.
@@ -1294,6 +1020,9 @@ class Check:
time_str = _format_time(time.time() - start_time_s)
_LOG.debug('%s %s', self.name, result.value)
+ if ctx.dry_run:
+ log_check_traces(ctx)
+
_print_ui(
_box(_CHECK_LOWER, result.colorized(_LEFT), self.name, time_str)
)
@@ -1416,7 +1145,7 @@ def filter_paths(
'endswith/exclude args, not both'
)
else:
- # TODO(b/238426363): Remove these arguments and use FileFilter only.
+ # TODO: b/238426363 - Remove these arguments and use FileFilter only.
real_file_filter = FileFilter(
endswith=_make_str_tuple(endswith), exclude=exclude
)
@@ -1427,8 +1156,19 @@ def filter_paths(
return filter_paths_for_function
-def call(*args, **kwargs) -> None:
+def call(
+ *args, call_annotation: Optional[Dict[Any, Any]] = None, **kwargs
+) -> None:
"""Optional subprocess wrapper that causes a PresubmitFailure on errors."""
+ ctx = PRESUBMIT_CONTEXT.get()
+ if ctx:
+ call_annotation = call_annotation if call_annotation else {}
+ ctx.append_check_command(
+ *args, call_annotation=call_annotation, **kwargs
+ )
+ if ctx.dry_run:
+ return
+
attributes, command = tools.format_command(args, kwargs)
_LOG.debug('[RUN] %s\n%s', attributes, command)
@@ -1495,6 +1235,16 @@ def install_package(
root = ctx.package_root
mgr = package_manager.PackageManager(root)
+ ctx.append_check_command(
+ 'pw',
+ 'package',
+ 'install',
+ name,
+ call_annotation={'pw_package_install': name},
+ )
+ if ctx.dry_run:
+ return
+
if not mgr.list():
raise PresubmitFailure(
'no packages configured, please import your pw_package '
diff --git a/pw_presubmit/py/pw_presubmit/presubmit_context.py b/pw_presubmit/py/pw_presubmit/presubmit_context.py
new file mode 100644
index 000000000..131be90c0
--- /dev/null
+++ b/pw_presubmit/py/pw_presubmit/presubmit_context.py
@@ -0,0 +1,624 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""pw_presubmit ContextVar."""
+
+from contextvars import ContextVar
+import dataclasses
+import enum
+import inspect
+import logging
+import json
+import os
+from pathlib import Path
+import re
+import shlex
+import shutil
+import subprocess
+import tempfile
+from typing import (
+ Any,
+ Dict,
+ List,
+ Iterable,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Tuple,
+ TYPE_CHECKING,
+)
+import urllib
+
+import pw_cli.color
+import pw_cli.env
+import pw_env_setup.config_file
+
+if TYPE_CHECKING:
+ from pw_presubmit.presubmit import Check
+
+_COLOR = pw_cli.color.colors()
+_LOG: logging.Logger = logging.getLogger(__name__)
+
+PRESUBMIT_CHECK_TRACE: ContextVar[
+ Dict[str, List['PresubmitCheckTrace']]
+] = ContextVar('pw_presubmit_check_trace', default={})
+
+
+@dataclasses.dataclass(frozen=True)
+class FormatOptions:
+ python_formatter: Optional[str] = 'yapf'
+ black_path: Optional[str] = 'black'
+ exclude: Sequence[re.Pattern] = dataclasses.field(default_factory=list)
+
+ @staticmethod
+ def load(env: Optional[Dict[str, str]] = None) -> 'FormatOptions':
+ config = pw_env_setup.config_file.load(env=env)
+ fmt = config.get('pw', {}).get('pw_presubmit', {}).get('format', {})
+ return FormatOptions(
+ python_formatter=fmt.get('python_formatter', 'yapf'),
+ black_path=fmt.get('black_path', 'black'),
+ exclude=tuple(re.compile(x) for x in fmt.get('exclude', ())),
+ )
+
+ def filter_paths(self, paths: Iterable[Path]) -> Tuple[Path, ...]:
+ root = Path(pw_cli.env.pigweed_environment().PW_PROJECT_ROOT)
+ relpaths = [x.relative_to(root) for x in paths]
+
+ for filt in self.exclude:
+ relpaths = [x for x in relpaths if not filt.search(str(x))]
+ return tuple(root / x for x in relpaths)
+
+
+def get_buildbucket_info(bbid) -> Dict[str, Any]:
+ if not bbid or not shutil.which('bb'):
+ return {}
+
+ output = subprocess.check_output(
+ ['bb', 'get', '-json', '-p', f'{bbid}'], text=True
+ )
+ return json.loads(output)
+
+
+@dataclasses.dataclass
+class LuciPipeline:
+ """Details of previous builds in this pipeline, if applicable.
+
+ Attributes:
+ round: The zero-indexed round number.
+ builds_from_previous_iteration: A list of the buildbucket ids from the
+ previous round, if any.
+ """
+
+ round: int
+ builds_from_previous_iteration: Sequence[int]
+
+ @staticmethod
+ def create(
+ bbid: int,
+ fake_pipeline_props: Optional[Dict[str, Any]] = None,
+ ) -> Optional['LuciPipeline']:
+ pipeline_props: Dict[str, Any]
+ if fake_pipeline_props is not None:
+ pipeline_props = fake_pipeline_props
+ else:
+ pipeline_props = (
+ get_buildbucket_info(bbid)
+ .get('input', {})
+ .get('properties', {})
+ .get('$pigweed/pipeline', {})
+ )
+ if not pipeline_props.get('inside_a_pipeline', False):
+ return None
+
+ return LuciPipeline(
+ round=int(pipeline_props['round']),
+ builds_from_previous_iteration=list(
+ int(x) for x in pipeline_props['builds_from_previous_iteration']
+ ),
+ )
+
+
+@dataclasses.dataclass
+class LuciTrigger:
+ """Details the pending change or submitted commit triggering the build.
+
+ Attributes:
+ number: The number of the change in Gerrit.
+ patchset: The number of the patchset of the change.
+ remote: The full URL of the remote.
+ project: The name of the project in Gerrit.
+ branch: The name of the branch on which this change is being/was
+ submitted.
+ ref: The "refs/changes/.." path that can be used to reference the
+ patch for unsubmitted changes and the hash for submitted changes.
+ gerrit_name: The name of the googlesource.com Gerrit host.
+ submitted: Whether the change has been submitted or is still pending.
+ gerrit_host: The scheme and hostname of the googlesource.com Gerrit
+ host.
+ gerrit_url: The full URL to this change on the Gerrit host.
+ gitiles_url: The full URL to this commit in Gitiles.
+ """
+
+ number: int
+ patchset: int
+ remote: str
+ project: str
+ branch: str
+ ref: str
+ gerrit_name: str
+ submitted: bool
+
+ @property
+ def gerrit_host(self):
+ return f'https://{self.gerrit_name}-review.googlesource.com'
+
+ @property
+ def gerrit_url(self):
+ if not self.number:
+ return self.gitiles_url
+ return f'{self.gerrit_host}/c/{self.number}'
+
+ @property
+ def gitiles_url(self):
+ return f'{self.remote}/+/{self.ref}'
+
+ @staticmethod
+ def create_from_environment(
+ env: Optional[Dict[str, str]] = None,
+ ) -> Sequence['LuciTrigger']:
+ if not env:
+ env = os.environ.copy()
+ raw_path = env.get('TRIGGERING_CHANGES_JSON')
+ if not raw_path:
+ return ()
+ path = Path(raw_path)
+ if not path.is_file():
+ return ()
+
+ result = []
+ with open(path, 'r') as ins:
+ for trigger in json.load(ins):
+ keys = {
+ 'number',
+ 'patchset',
+ 'remote',
+ 'project',
+ 'branch',
+ 'ref',
+ 'gerrit_name',
+ 'submitted',
+ }
+ if keys <= trigger.keys():
+ result.append(LuciTrigger(**{x: trigger[x] for x in keys}))
+
+ return tuple(result)
+
+ @staticmethod
+ def create_for_testing(**kwargs):
+ change = {
+ 'number': 123456,
+ 'patchset': 1,
+ 'remote': 'https://pigweed.googlesource.com/pigweed/pigweed',
+ 'project': 'pigweed/pigweed',
+ 'branch': 'main',
+ 'ref': 'refs/changes/56/123456/1',
+ 'gerrit_name': 'pigweed',
+ 'submitted': True,
+ }
+ change.update(kwargs)
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ changes_json = Path(tempdir) / 'changes.json'
+ with changes_json.open('w') as outs:
+ json.dump([change], outs)
+ env = {'TRIGGERING_CHANGES_JSON': changes_json}
+ return LuciTrigger.create_from_environment(env)
+
+
+@dataclasses.dataclass
+class LuciContext:
+ """LUCI-specific information about the environment.
+
+ Attributes:
+ buildbucket_id: The globally-unique buildbucket id of the build.
+ build_number: The builder-specific incrementing build number, if
+ configured for this builder.
+ project: The LUCI project under which this build is running (often
+ "pigweed" or "pigweed-internal").
+ bucket: The LUCI bucket under which this build is running (often ends
+ with "ci" or "try").
+ builder: The builder being run.
+ swarming_server: The swarming server on which this build is running.
+ swarming_task_id: The swarming task id of this build.
+ cas_instance: The CAS instance accessible from this build.
+ pipeline: Information about the build pipeline, if applicable.
+ triggers: Information about triggering commits, if applicable.
+ is_try: True if the bucket is a try bucket.
+ is_ci: True if the bucket is a ci bucket.
+ is_dev: True if the bucket is a dev bucket.
+ """
+
+ buildbucket_id: int
+ build_number: int
+ project: str
+ bucket: str
+ builder: str
+ swarming_server: str
+ swarming_task_id: str
+ cas_instance: str
+ pipeline: Optional[LuciPipeline]
+ triggers: Sequence[LuciTrigger] = dataclasses.field(default_factory=tuple)
+
+ @property
+ def is_try(self):
+ return re.search(r'\btry$', self.bucket)
+
+ @property
+ def is_ci(self):
+ return re.search(r'\bci$', self.bucket)
+
+ @property
+ def is_dev(self):
+ return re.search(r'\bdev\b', self.bucket)
+
+ @staticmethod
+ def create_from_environment(
+ env: Optional[Dict[str, str]] = None,
+ fake_pipeline_props: Optional[Dict[str, Any]] = None,
+ ) -> Optional['LuciContext']:
+ """Create a LuciContext from the environment."""
+
+ if not env:
+ env = os.environ.copy()
+
+ luci_vars = [
+ 'BUILDBUCKET_ID',
+ 'BUILDBUCKET_NAME',
+ 'BUILD_NUMBER',
+ 'SWARMING_TASK_ID',
+ 'SWARMING_SERVER',
+ ]
+ if any(x for x in luci_vars if x not in env):
+ return None
+
+ project, bucket, builder = env['BUILDBUCKET_NAME'].split(':')
+
+ bbid: int = 0
+ pipeline: Optional[LuciPipeline] = None
+ try:
+ bbid = int(env['BUILDBUCKET_ID'])
+ pipeline = LuciPipeline.create(bbid, fake_pipeline_props)
+
+ except ValueError:
+ pass
+
+ # Logic to identify cas instance from swarming server is derived from
+ # https://chromium.googlesource.com/infra/luci/recipes-py/+/main/recipe_modules/cas/api.py
+ swarm_server = env['SWARMING_SERVER']
+ cas_project = urllib.parse.urlparse(swarm_server).netloc.split('.')[0]
+ cas_instance = f'projects/{cas_project}/instances/default_instance'
+
+ result = LuciContext(
+ buildbucket_id=bbid,
+ build_number=int(env['BUILD_NUMBER']),
+ project=project,
+ bucket=bucket,
+ builder=builder,
+ swarming_server=env['SWARMING_SERVER'],
+ swarming_task_id=env['SWARMING_TASK_ID'],
+ cas_instance=cas_instance,
+ pipeline=pipeline,
+ triggers=LuciTrigger.create_from_environment(env),
+ )
+ _LOG.debug('%r', result)
+ return result
+
+ @staticmethod
+ def create_for_testing(**kwargs):
+ env = {
+ 'BUILDBUCKET_ID': '881234567890',
+ 'BUILDBUCKET_NAME': 'pigweed:bucket.try:builder-name',
+ 'BUILD_NUMBER': '123',
+ 'SWARMING_SERVER': 'https://chromium-swarm.appspot.com',
+ 'SWARMING_TASK_ID': 'cd2dac62d2',
+ }
+ env.update(kwargs)
+
+ return LuciContext.create_from_environment(env, {})
+
+
+@dataclasses.dataclass
+class FormatContext:
+ """Context passed into formatting helpers.
+
+ This class is a subset of PresubmitContext containing only what's needed by
+ formatters.
+
+ For full documentation on the members see the PresubmitContext section of
+ pw_presubmit/docs.rst.
+
+ Attributes:
+ root: Source checkout root directory
+ output_dir: Output directory for this specific language.
+ paths: Modified files for the presubmit step to check (often used in
+ formatting steps but ignored in compile steps).
+ package_root: Root directory for pw package installations.
+ format_options: Formatting options, derived from pigweed.json.
+ dry_run: Whether to just report issues or also fix them.
+ """
+
+ root: Optional[Path]
+ output_dir: Path
+ paths: Tuple[Path, ...]
+ package_root: Path
+ format_options: FormatOptions
+ dry_run: bool = False
+
+ def append_check_command(self, *command_args, **command_kwargs) -> None:
+ """Empty append_check_command."""
+
+
+class PresubmitFailure(Exception):
+ """Optional exception to use for presubmit failures."""
+
+ def __init__(
+ self,
+ description: str = '',
+ path: Optional[Path] = None,
+ line: Optional[int] = None,
+ ):
+ line_part: str = ''
+ if line is not None:
+ line_part = f'{line}:'
+ super().__init__(
+ f'{path}:{line_part} {description}' if path else description
+ )
+
+
+@dataclasses.dataclass
+class PresubmitContext: # pylint: disable=too-many-instance-attributes
+ """Context passed into presubmit checks.
+
+ For full documentation on the members see pw_presubmit/docs.rst.
+
+ Attributes:
+ root: Source checkout root directory.
+ repos: Repositories (top-level and submodules) processed by
+ `pw presubmit`.
+ output_dir: Output directory for this specific presubmit step.
+ failure_summary_log: Path where steps should write a brief summary of
+ any failures encountered for use by other tooling.
+ paths: Modified files for the presubmit step to check (often used in
+ formatting steps but ignored in compile steps).
+ all_paths: All files in the tree.
+ package_root: Root directory for pw package installations.
+ override_gn_args: Additional GN args processed by `build.gn_gen()`.
+ luci: Information about the LUCI build or None if not running in LUCI.
+ format_options: Formatting options, derived from pigweed.json.
+ num_jobs: Number of jobs to run in parallel.
+ continue_after_build_error: For steps that compile, don't exit on the
+ first compilation error.
+ rng_seed: Seed for a random number generator, for the few steps that
+ need one.
+ _failed: Whether the presubmit step in question has failed. Set to True
+ by calling ctx.fail().
+ full: Whether this is a full or incremental presubmit run.
+ """
+
+ root: Path
+ repos: Tuple[Path, ...]
+ output_dir: Path
+ failure_summary_log: Path
+ paths: Tuple[Path, ...]
+ all_paths: Tuple[Path, ...]
+ package_root: Path
+ luci: Optional[LuciContext]
+ override_gn_args: Dict[str, str]
+ format_options: FormatOptions
+ num_jobs: Optional[int] = None
+ continue_after_build_error: bool = False
+ rng_seed: int = 1
+ full: bool = False
+ _failed: bool = False
+ dry_run: bool = False
+
+ @property
+ def failed(self) -> bool:
+ return self._failed
+
+ @property
+ def incremental(self) -> bool:
+ return not self.full
+
+ def fail(
+ self,
+ description: str,
+ path: Optional[Path] = None,
+ line: Optional[int] = None,
+ ):
+ """Add a failure to this presubmit step.
+
+ If this is called at least once the step fails, but not immediately—the
+ check is free to continue and possibly call this method again.
+ """
+ _LOG.warning('%s', PresubmitFailure(description, path, line))
+ self._failed = True
+
+ @staticmethod
+ def create_for_testing(**kwargs):
+ parsed_env = pw_cli.env.pigweed_environment()
+ root = parsed_env.PW_PROJECT_ROOT
+ presubmit_root = root / 'out' / 'presubmit'
+ presubmit_kwargs = {
+ 'root': root,
+ 'repos': (root,),
+ 'output_dir': presubmit_root / 'test',
+ 'failure_summary_log': presubmit_root / 'failure-summary.log',
+ 'paths': (root / 'foo.cc', root / 'foo.py'),
+ 'all_paths': (root / 'BUILD.gn', root / 'foo.cc', root / 'foo.py'),
+ 'package_root': root / 'environment' / 'packages',
+ 'luci': None,
+ 'override_gn_args': {},
+ 'format_options': FormatOptions(),
+ }
+ presubmit_kwargs.update(kwargs)
+ return PresubmitContext(**presubmit_kwargs)
+
+ def append_check_command(
+ self,
+ *command_args,
+ call_annotation: Optional[Dict[Any, Any]] = None,
+ **command_kwargs,
+ ) -> None:
+ """Save a subprocess command annotation to this presubmit context.
+
+ This is used to capture commands that will be run for display in ``pw
+ presubmit --dry-run.``
+
+ Args:
+
+ command_args: All args that would normally be passed to
+ subprocess.run
+
+ call_annotation: Optional key value pairs of data to save for this
+ command. Examples:
+
+ ::
+
+ call_annotation={'pw_package_install': 'teensy'}
+ call_annotation={'build_system': 'bazel'}
+ call_annotation={'build_system': 'ninja'}
+
+ command_kwargs: keyword args that would normally be passed to
+ subprocess.run.
+ """
+ call_annotation = call_annotation if call_annotation else {}
+ calling_func: Optional[str] = None
+ calling_check = None
+
+ # Loop through the current call stack looking for `self`, and stopping
+ # when self is a Check() instance and if the __call__ or _try_call
+ # functions are in the stack.
+
+ # This used to be an isinstance(obj, Check) call, but it was changed to
+ # this so Check wouldn't need to be imported here. Doing so would create
+ # a dependency loop.
+ def is_check_object(obj):
+ return getattr(obj, '_is_presubmit_check_object', False)
+
+ for frame_info in inspect.getouterframes(inspect.currentframe()):
+ self_obj = frame_info.frame.f_locals.get('self', None)
+ if (
+ self_obj
+ and is_check_object(self_obj)
+ and frame_info.function in ['_try_call', '__call__']
+ ):
+ calling_func = frame_info.function
+ calling_check = self_obj
+
+ save_check_trace(
+ self.output_dir,
+ PresubmitCheckTrace(
+ self,
+ calling_check,
+ calling_func,
+ command_args,
+ command_kwargs,
+ call_annotation,
+ ),
+ )
+
+ def __post_init__(self) -> None:
+ PRESUBMIT_CONTEXT.set(self)
+
+ def __hash__(self):
+ return hash(
+ tuple(
+ tuple(attribute.items())
+ if isinstance(attribute, dict)
+ else attribute
+ for attribute in dataclasses.astuple(self)
+ )
+ )
+
+
+PRESUBMIT_CONTEXT: ContextVar[Optional[PresubmitContext]] = ContextVar(
+ 'pw_presubmit_context', default=None
+)
+
+
+def get_presubmit_context():
+ return PRESUBMIT_CONTEXT.get()
+
+
+class PresubmitCheckTraceType(enum.Enum):
+ BAZEL = 'BAZEL'
+ CMAKE = 'CMAKE'
+ GN_NINJA = 'GN_NINJA'
+ PW_PACKAGE = 'PW_PACKAGE'
+
+
+class PresubmitCheckTrace(NamedTuple):
+ ctx: 'PresubmitContext'
+ check: Optional['Check']
+ func: Optional[str]
+ args: Iterable[Any]
+ kwargs: Dict[Any, Any]
+ call_annotation: Dict[Any, Any]
+
+ def __repr__(self) -> str:
+ return f'''CheckTrace(
+ ctx={self.ctx.output_dir}
+ id(ctx)={id(self.ctx)}
+ check={self.check}
+ args={self.args}
+ kwargs={self.kwargs.keys()}
+ call_annotation={self.call_annotation}
+)'''
+
+
+def save_check_trace(output_dir: Path, trace: PresubmitCheckTrace) -> None:
+ trace_key = str(output_dir.resolve())
+ trace_list = PRESUBMIT_CHECK_TRACE.get().get(trace_key, [])
+ trace_list.append(trace)
+ PRESUBMIT_CHECK_TRACE.get()[trace_key] = trace_list
+
+
+def get_check_traces(ctx: 'PresubmitContext') -> List[PresubmitCheckTrace]:
+ trace_key = str(ctx.output_dir.resolve())
+ return PRESUBMIT_CHECK_TRACE.get().get(trace_key, [])
+
+
+def log_check_traces(ctx: 'PresubmitContext') -> None:
+ traces = PRESUBMIT_CHECK_TRACE.get()
+
+ for _output_dir, check_traces in traces.items():
+ for check_trace in check_traces:
+ if check_trace.ctx != ctx:
+ continue
+
+ quoted_command_args = ' '.join(
+ shlex.quote(str(arg)) for arg in check_trace.args
+ )
+ _LOG.info(
+ '%s %s',
+ _COLOR.blue('Run ==>'),
+ quoted_command_args,
+ )
+
+
+def apply_exclusions(
+ ctx: PresubmitContext,
+ paths: Optional[Sequence[Path]] = None,
+) -> Tuple[Path, ...]:
+ return ctx.format_options.filter_paths(paths or ctx.paths)
diff --git a/pw_presubmit/py/pw_presubmit/python_checks.py b/pw_presubmit/py/pw_presubmit/python_checks.py
index f694b354c..307a7256a 100644
--- a/pw_presubmit/py/pw_presubmit/python_checks.py
+++ b/pw_presubmit/py/pw_presubmit/python_checks.py
@@ -16,36 +16,40 @@
These checks assume that they are running in a preconfigured Python environment.
"""
+import difflib
import json
import logging
-import os
from pathlib import Path
-import subprocess
+import platform
+import shutil
import sys
+from tempfile import TemporaryDirectory
from typing import Optional
-try:
- import pw_presubmit
-except ImportError:
- # Append the pw_presubmit package path to the module search path to allow
- # running this module without installing the pw_presubmit package.
- sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
- import pw_presubmit
-
from pw_env_setup import python_packages
-from pw_presubmit import (
- build,
+
+from pw_presubmit.presubmit import (
call,
Check,
filter_paths,
+)
+from pw_presubmit.presubmit_context import (
PresubmitContext,
PresubmitFailure,
)
+from pw_presubmit import build
+from pw_presubmit.tools import log_run, colorize_diff_line
_LOG = logging.getLogger(__name__)
_PYTHON_EXTENSIONS = ('.py', '.gn', '.gni')
+_PYTHON_PACKAGE_EXTENSIONS = (
+ 'setup.cfg',
+ 'constraint.list',
+ 'requirements.txt',
+)
+
_PYTHON_IS_3_9_OR_HIGHER = sys.version_info >= (
3,
9,
@@ -78,7 +82,7 @@ def _transform_lcov_file_paths(lcov_file: Path, repo_root: Path) -> str:
file_string = line[3:].rstrip()
source_file_path = Path(file_string)
- # TODO(b/248257406) Remove once we drop support for Python 3.8.
+ # TODO: b/248257406 - Remove once we drop support for Python 3.8.
def is_relative_to(path: Path, other: Path) -> bool:
try:
path.relative_to(other)
@@ -149,7 +153,7 @@ def gn_python_test_coverage(ctx: PresubmitContext):
coverage_omit_patterns,
]
report_args += changed_python_files
- subprocess.run(report_args, check=False, cwd=ctx.output_dir)
+ log_run(report_args, check=False, cwd=ctx.output_dir)
# Generate a json report
call('coverage', 'lcov', coverage_omit_patterns, cwd=ctx.output_dir)
@@ -165,8 +169,204 @@ def gn_python_test_coverage(ctx: PresubmitContext):
_LOG.info('Coverage html report saved to: %s', html_report.resolve())
+@filter_paths(endswith=_PYTHON_PACKAGE_EXTENSIONS)
+def vendor_python_wheels(ctx: PresubmitContext) -> None:
+ """Download Python packages locally for the current platform."""
+ build.gn_gen(ctx)
+ build.ninja(ctx, 'pip_vendor_wheels')
+
+ download_log = (
+ ctx.output_dir
+ / 'python/gen/pw_env_setup/pigweed_build_venv.vendor_wheels'
+ / 'pip_download_log.txt'
+ )
+ _LOG.info('Python package download log: %s', download_log)
+
+ wheel_output = (
+ ctx.output_dir
+ / 'python/gen/pw_env_setup'
+ / 'pigweed_build_venv.vendor_wheels/wheels/'
+ )
+ wheel_destination = ctx.output_dir / 'python_wheels'
+ shutil.rmtree(wheel_destination, ignore_errors=True)
+ shutil.copytree(wheel_output, wheel_destination, dirs_exist_ok=True)
+
+ _LOG.info('Python packages downloaded to: %s', wheel_destination)
+
+
+def _generate_constraint_with_hashes(
+ ctx: PresubmitContext, input_file: Path, output_file: Path
+) -> None:
+ assert input_file.is_file()
+
+ call(
+ "pip-compile",
+ input_file,
+ "--generate-hashes",
+ "--reuse-hashes",
+ "--resolver=backtracking",
+ "--strip-extras",
+ # Force pinning pip and setuptools
+ "--allow-unsafe",
+ "-o",
+ output_file,
+ )
+
+ # Remove absolute paths from comments
+ output_text = output_file.read_text()
+ output_text = output_text.replace(str(ctx.output_dir), '')
+ output_text = output_text.replace(str(ctx.root), '')
+ output_text = output_text.replace(str(output_file.parent), '')
+
+ final_output_text = ''
+ for line in output_text.splitlines(keepends=True):
+ # Remove --find-links lines
+ if line.startswith('--find-links'):
+ continue
+ # Remove blank lines
+ if line == '\n':
+ continue
+ final_output_text += line
+
+ output_file.write_text(final_output_text)
+
+
+def _update_upstream_python_constraints(
+ ctx: PresubmitContext,
+ update_files: bool = False,
+) -> None:
+ """Regenerate platform specific Python constraint files with hashes."""
+ with TemporaryDirectory() as tmpdirname:
+ out_dir = Path(tmpdirname)
+ build.gn_gen(
+ ctx,
+ pw_build_PIP_REQUIREMENTS=[],
+ # Use the constraint file without hashes as the input. This is where
+ # new packages are added by developers.
+ pw_build_PIP_CONSTRAINTS=[
+ '//pw_env_setup/py/pw_env_setup/virtualenv_setup/'
+ 'constraint.list',
+ ],
+ # This should always be set to false when regenrating constraints.
+ pw_build_PYTHON_PIP_INSTALL_REQUIRE_HASHES=False,
+ )
+ build.ninja(ctx, 'pip_constraint_update')
+
+ # Either: darwin, linux or windows
+ platform_name = platform.system().lower()
+
+ constraint_hashes_filename = f'constraint_hashes_{platform_name}.list'
+ constraint_hashes_original = (
+ ctx.root
+ / 'pw_env_setup/py/pw_env_setup/virtualenv_setup'
+ / constraint_hashes_filename
+ )
+ constraint_hashes_tmp_out = out_dir / constraint_hashes_filename
+ _generate_constraint_with_hashes(
+ ctx,
+ input_file=(
+ ctx.output_dir
+ / 'python/gen/pw_env_setup/pigweed_build_venv'
+ / 'compiled_requirements.txt'
+ ),
+ output_file=constraint_hashes_tmp_out,
+ )
+
+ build.gn_gen(
+ ctx,
+ # This should always be set to false when regenrating constraints.
+ pw_build_PYTHON_PIP_INSTALL_REQUIRE_HASHES=False,
+ )
+ build.ninja(ctx, 'pip_constraint_update')
+
+ upstream_requirements_lock_filename = (
+ f'upstream_requirements_{platform_name}_lock.txt'
+ )
+ upstream_requirements_lock_original = (
+ ctx.root
+ / 'pw_env_setup/py/pw_env_setup/virtualenv_setup'
+ / upstream_requirements_lock_filename
+ )
+ upstream_requirements_lock_tmp_out = (
+ out_dir / upstream_requirements_lock_filename
+ )
+ _generate_constraint_with_hashes(
+ ctx,
+ input_file=(
+ ctx.output_dir
+ / 'python/gen/pw_env_setup/pigweed_build_venv'
+ / 'compiled_requirements.txt'
+ ),
+ output_file=upstream_requirements_lock_tmp_out,
+ )
+
+ if update_files:
+ constraint_hashes_original.write_text(
+ constraint_hashes_tmp_out.read_text()
+ )
+ _LOG.info('Updated: %s', constraint_hashes_original)
+ upstream_requirements_lock_original.write_text(
+ upstream_requirements_lock_tmp_out.read_text()
+ )
+ _LOG.info('Updated: %s', upstream_requirements_lock_original)
+ return
+
+ # Make a diff of required changes
+ constraint_hashes_diff = list(
+ difflib.unified_diff(
+ constraint_hashes_original.read_text(
+ 'utf-8', errors='replace'
+ ).splitlines(),
+ constraint_hashes_tmp_out.read_text(
+ 'utf-8', errors='replace'
+ ).splitlines(),
+ fromfile=str(constraint_hashes_original) + ' (original)',
+ tofile=str(constraint_hashes_original) + ' (updated)',
+ lineterm='',
+ n=1,
+ )
+ )
+ upstream_requirements_lock_diff = list(
+ difflib.unified_diff(
+ upstream_requirements_lock_original.read_text(
+ 'utf-8', errors='replace'
+ ).splitlines(),
+ upstream_requirements_lock_tmp_out.read_text(
+ 'utf-8', errors='replace'
+ ).splitlines(),
+ fromfile=str(upstream_requirements_lock_original)
+ + ' (original)',
+ tofile=str(upstream_requirements_lock_original) + ' (updated)',
+ lineterm='',
+ n=1,
+ )
+ )
+ if constraint_hashes_diff:
+ for line in constraint_hashes_diff:
+ print(colorize_diff_line(line))
+ if upstream_requirements_lock_diff:
+ for line in upstream_requirements_lock_diff:
+ print(colorize_diff_line(line))
+ if constraint_hashes_diff or upstream_requirements_lock_diff:
+ raise PresubmitFailure(
+ 'Please run:\n'
+ '\n'
+ ' pw presubmit --step update_upstream_python_constraints'
+ )
+
+
+@filter_paths(endswith=_PYTHON_PACKAGE_EXTENSIONS)
+def check_upstream_python_constraints(ctx: PresubmitContext) -> None:
+ _update_upstream_python_constraints(ctx, update_files=False)
+
+
+@filter_paths(endswith=_PYTHON_PACKAGE_EXTENSIONS)
+def update_upstream_python_constraints(ctx: PresubmitContext) -> None:
+ _update_upstream_python_constraints(ctx, update_files=True)
+
+
@filter_paths(endswith=_PYTHON_EXTENSIONS + ('.pylintrc',))
-def gn_python_lint(ctx: pw_presubmit.PresubmitContext) -> None:
+def gn_python_lint(ctx: PresubmitContext) -> None:
build.gn_gen(ctx)
build.ninja(ctx, 'python.lint')
diff --git a/pw_presubmit/py/pw_presubmit/rst_format.py b/pw_presubmit/py/pw_presubmit/rst_format.py
new file mode 100644
index 000000000..ea5d309b5
--- /dev/null
+++ b/pw_presubmit/py/pw_presubmit/rst_format.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python3
+
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Restructured Text Formatting."""
+
+import argparse
+from dataclasses import dataclass, field
+import difflib
+from functools import cached_property
+from pathlib import Path
+import textwrap
+from typing import List, Optional
+
+from pw_presubmit.tools import colorize_diff
+
+DEFAULT_TAB_WIDTH = 8
+CODE_BLOCK_INDENTATION = 3
+
+
+def _parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description=__doc__)
+
+ parser.add_argument(
+ '--tab-width',
+ default=DEFAULT_TAB_WIDTH,
+ help='Number of spaces to use when converting tab characters.',
+ )
+ parser.add_argument(
+ '--diff',
+ action='store_true',
+ help='Print a diff of formatting changes.',
+ )
+ parser.add_argument(
+ '-i',
+ '--in-place',
+ action='store_true',
+ help='Replace existing file with the reformatted copy.',
+ )
+ parser.add_argument(
+ 'rst_files',
+ nargs='+',
+ default=[],
+ type=Path,
+ help='Paths to rst files.',
+ )
+
+ return parser.parse_args()
+
+
+def _indent_amount(line: str) -> int:
+ return len(line) - len(line.lstrip())
+
+
+def _reindent(input_text: str, amount: int) -> str:
+ text = ''
+ for line in textwrap.dedent(input_text).splitlines():
+ if len(line.strip()) == 0:
+ text += '\n'
+ continue
+ text += ' ' * amount
+ text += line
+ text += '\n'
+ return text
+
+
+def _strip_trailing_whitespace(line: str) -> str:
+ return line.rstrip() + '\n'
+
+
+@dataclass
+class CodeBlock:
+ """Store a single code block."""
+
+ directive_lineno: int
+ directive_line: str
+ first_line_indent: Optional[int] = None
+ end_lineno: Optional[int] = None
+ option_lines: List[str] = field(default_factory=list)
+ code_lines: List[str] = field(default_factory=list)
+
+ def __post_init__(self) -> None:
+ self._blank_line_after_options_found = False
+
+ def finished(self) -> bool:
+ return self.end_lineno is not None
+
+ def append_line(self, index: int, line: str) -> None:
+ """Process a line for this code block."""
+ # Check if outside the code block (indentation is less):
+ if (
+ self.first_line_indent is not None
+ and len(line.strip()) > 0
+ and _indent_amount(line) < self.first_line_indent
+ ):
+ # Code block ended
+ self.end_lineno = index
+ return
+
+ # If first line indent hasn't been found
+ if self.first_line_indent is None:
+ # Check if the first word is a directive option.
+ # E.g. :caption:
+ line_words = line.split()
+ if (
+ line_words
+ and line_words[0].startswith(':')
+ and line_words[0].endswith(':')
+ ):
+ self.option_lines.append(line.rstrip())
+ return
+ # Check for a blank line
+ if len(line.strip()) == 0:
+ if (
+ self.option_lines
+ and not self._blank_line_after_options_found
+ ):
+ self._blank_line_after_options_found = True
+ return
+ # Check for a line that is a continuation of a previous option.
+ if self.option_lines and not self._blank_line_after_options_found:
+ self.option_lines.append(line.rstrip())
+ return
+
+ self.first_line_indent = _indent_amount(line)
+
+ # Save this line as code.
+ self.code_lines.append(line.rstrip())
+
+ @cached_property
+ def directive_indent_amount(self) -> int:
+ return _indent_amount(self.directive_line)
+
+ def options_block_text(self) -> str:
+ return _reindent(
+ input_text='\n'.join(self.option_lines),
+ amount=self.directive_indent_amount + CODE_BLOCK_INDENTATION,
+ )
+
+ def code_block_text(self) -> str:
+ return _reindent(
+ input_text='\n'.join(self.code_lines),
+ amount=self.directive_indent_amount + CODE_BLOCK_INDENTATION,
+ )
+
+ def to_text(self) -> str:
+ text = ''
+ text += self.directive_line
+ if self.option_lines:
+ text += self.options_block_text()
+ text += '\n'
+ text += self.code_block_text()
+ text += '\n'
+ return text
+
+ def __repr__(self) -> str:
+ return self.to_text()
+
+
+def reindent_code_blocks(in_text: str) -> str:
+ """Reindent code blocks to 3 spaces."""
+ out_text = ''
+ current_block: Optional[CodeBlock] = None
+ for index, line in enumerate(in_text.splitlines(keepends=True)):
+ # If a code block is active, process this line.
+ if current_block:
+ current_block.append_line(index, line)
+ if current_block.finished():
+ out_text += current_block.to_text()
+ # This line wasn't part of the code block, process as normal.
+ out_text += _strip_trailing_whitespace(line)
+ # Erase this code_block variable
+ current_block = None
+ # Check for new code block start
+ elif line.lstrip().startswith('.. code') and line.rstrip().endswith(
+ '::'
+ ):
+ current_block = CodeBlock(
+ directive_lineno=index, directive_line=line
+ )
+ continue
+ else:
+ out_text += _strip_trailing_whitespace(line)
+ # If the document ends with a code block it may still need to be written.
+ if current_block is not None:
+ out_text += current_block.to_text()
+ return out_text
+
+
+def reformat_rst(
+ file_name: Path,
+ diff: bool = False,
+ in_place: bool = False,
+ tab_width: int = DEFAULT_TAB_WIDTH,
+) -> List[str]:
+ """Reformat an rst file.
+
+ Returns a list of diff lines."""
+ in_text = file_name.read_text()
+
+ # Replace tabs with spaces
+ out_text = in_text.replace('\t', ' ' * tab_width)
+
+ # Indent .. code-block:: directives.
+ out_text = reindent_code_blocks(in_text)
+
+ result_diff = list(
+ difflib.unified_diff(
+ in_text.splitlines(True),
+ out_text.splitlines(True),
+ f'{file_name} (original)',
+ f'{file_name} (reformatted)',
+ )
+ )
+ if diff and result_diff:
+ print(''.join(colorize_diff(result_diff)))
+
+ if in_place:
+ file_name.write_text(out_text)
+
+ return result_diff
+
+
+def rst_format_main(
+ rst_files: List[Path],
+ diff: bool = False,
+ in_place: bool = False,
+ tab_width: int = DEFAULT_TAB_WIDTH,
+) -> None:
+ for rst_file in rst_files:
+ reformat_rst(rst_file, diff, in_place, tab_width)
+
+
+if __name__ == '__main__':
+ rst_format_main(**vars(_parse_args()))
diff --git a/pw_presubmit/py/pw_presubmit/shell_checks.py b/pw_presubmit/py/pw_presubmit/shell_checks.py
index 510c09f06..96b481f0b 100644
--- a/pw_presubmit/py/pw_presubmit/shell_checks.py
+++ b/pw_presubmit/py/pw_presubmit/shell_checks.py
@@ -14,13 +14,17 @@
"""Shell related checks."""
import logging
-from pw_presubmit import (
+
+from pw_presubmit import presubmit_context
+from pw_presubmit.presubmit import (
Check,
- PresubmitContext,
filter_paths,
- tools,
+)
+from pw_presubmit.presubmit_context import (
+ PresubmitContext,
PresubmitFailure,
)
+from pw_presubmit.tools import log_run
_LOG = logging.getLogger(__name__)
@@ -32,11 +36,14 @@ _SHELL_EXTENSIONS = ('.sh', '.bash')
def shellcheck(ctx: PresubmitContext) -> None:
"""Run shell script static analiyzer on presubmit."""
- _LOG.warning(
- "The Pigweed project discourages use of shellscripts. "
- "https://pigweed.dev/docs/faq.html"
- )
+ ctx.paths = presubmit_context.apply_exclusions(ctx)
+
+ if ctx.paths:
+ _LOG.warning(
+ "The Pigweed project discourages use of shellscripts. "
+ "https://pigweed.dev/docs/faq.html"
+ )
- result = tools.log_run(['shellcheck', *ctx.paths])
+ result = log_run(['shellcheck', *ctx.paths])
if result.returncode != 0:
raise PresubmitFailure('Shellcheck identifed issues.')
diff --git a/pw_presubmit/py/pw_presubmit/source_in_build.py b/pw_presubmit/py/pw_presubmit/source_in_build.py
index f19dd1174..e48a19124 100644
--- a/pw_presubmit/py/pw_presubmit/source_in_build.py
+++ b/pw_presubmit/py/pw_presubmit/source_in_build.py
@@ -16,10 +16,12 @@
import logging
from typing import Callable, Sequence
-from pw_presubmit import build, format_code, git_repo
+from pw_presubmit import build, format_code, git_repo, presubmit_context
from pw_presubmit.presubmit import (
Check,
FileFilter,
+)
+from pw_presubmit.presubmit_context import (
PresubmitContext,
PresubmitFailure,
)
@@ -54,6 +56,7 @@ def bazel(
"""Checks that source files are in the Bazel build."""
paths = source_filter.filter(ctx.all_paths)
+ paths = presubmit_context.apply_exclusions(ctx, paths)
missing = build.check_bazel_build_for_files(
files_and_extensions_to_check,
@@ -100,6 +103,7 @@ def gn( # pylint: disable=invalid-name
"""Checks that source files are in the GN build."""
paths = source_filter.filter(ctx.all_paths)
+ paths = presubmit_context.apply_exclusions(ctx, paths)
missing = build.check_gn_build_for_files(
files_and_extensions_to_check,
@@ -146,6 +150,7 @@ def cmake(
"""Checks that source files are in the CMake build."""
paths = source_filter.filter(ctx.all_paths)
+ paths = presubmit_context.apply_exclusions(ctx, paths)
run_cmake(ctx)
missing = build.check_compile_commands_for_files(
diff --git a/pw_presubmit/py/pw_presubmit/todo_check.py b/pw_presubmit/py/pw_presubmit/todo_check.py
index 783667f10..53a587edd 100644
--- a/pw_presubmit/py/pw_presubmit/todo_check.py
+++ b/pw_presubmit/py/pw_presubmit/todo_check.py
@@ -18,7 +18,9 @@ from pathlib import Path
import re
from typing import Iterable, Pattern, Sequence, Union
-from pw_presubmit import PresubmitContext, filter_paths
+from pw_presubmit import presubmit_context
+from pw_presubmit.presubmit import filter_paths
+from pw_presubmit.presubmit_context import PresubmitContext
_LOG: logging.Logger = logging.getLogger(__name__)
@@ -39,9 +41,37 @@ EXCLUDE: Sequence[str] = (
)
# todo-check: disable
-BUGS_ONLY = re.compile(r'\bTODO\(b/\d+(?:, ?b/\d+)*\).*\w')
+BUGS_ONLY = re.compile(
+ r'(?:\bTODO\(b/\d+(?:, ?b/\d+)*\).*\w)|'
+ r'(?:\bTODO: b/\d+(?:, ?b/\d+)* - )'
+)
BUGS_OR_USERNAMES = re.compile(
- r'\bTODO\((?:b/\d+|[a-z]+)(?:, ?(?:b/\d+|[a-z]+))*\).*\w'
+ r"""
+(?: # Legacy style.
+ \bTODO\(
+ (?:b/\d+|[a-z]+) # Username or bug.
+ (?:,[ ]?(?:b/\d+|[a-z]+))* # Additional usernames or bugs.
+ \)
+.*\w # Explanation.
+)|
+(?: # New style.
+ \bTODO:[ ]
+ (?:
+ b/\d+| # Bug.
+ # Username@ with optional domain.
+ [a-z]+@(?:[a-z][-a-z0-9]*(?:\.[a-z][-a-z0-9]*)+)?
+ )
+ (?:,[ ]? # Additional.
+ (?:
+ b/\d+| # Bug.
+ # Username@ with optional domain.
+ [a-z]+@(?:[a-z][-a-z0-9]*(?:\.[a-z][-a-z0-9]*)+)?
+ )
+ )*
+[ ]-[ ].*\w # Explanation.
+)
+ """,
+ re.VERBOSE,
)
_TODO = re.compile(r'\bTODO\b')
# todo-check: enable
@@ -102,6 +132,7 @@ def create(
@filter_paths(exclude=exclude)
def todo_check(ctx: PresubmitContext):
"""Check that TODO lines are valid.""" # todo-check: ignore
+ ctx.paths = presubmit_context.apply_exclusions(ctx)
for path in ctx.paths:
_process_file(ctx, todo_pattern, path)
diff --git a/pw_presubmit/py/pw_presubmit/tools.py b/pw_presubmit/py/pw_presubmit/tools.py
index bf184db73..9e1319e09 100644
--- a/pw_presubmit/py/pw_presubmit/tools.py
+++ b/pw_presubmit/py/pw_presubmit/tools.py
@@ -32,7 +32,31 @@ from typing import (
Tuple,
)
+import pw_cli.color
+from pw_presubmit.presubmit_context import PRESUBMIT_CONTEXT
+
_LOG: logging.Logger = logging.getLogger(__name__)
+_COLOR = pw_cli.color.colors()
+
+
+def colorize_diff_line(line: str) -> str:
+ if line.startswith('--- ') or line.startswith('+++ '):
+ return _COLOR.bold_white(line)
+ if line.startswith('-'):
+ return _COLOR.red(line)
+ if line.startswith('+'):
+ return _COLOR.green(line)
+ if line.startswith('@@ '):
+ return _COLOR.cyan(line)
+ return line
+
+
+def colorize_diff(lines: Iterable[str]) -> str:
+ """Takes a diff str or list of str lines and returns a colorized version."""
+ if isinstance(lines, str):
+ lines = lines.splitlines(True)
+
+ return ''.join(colorize_diff_line(line) for line in lines)
def plural(
@@ -42,8 +66,20 @@ def plural(
these: bool = False,
number: bool = True,
are: bool = False,
+ exist: bool = False,
) -> str:
- """Returns the singular or plural form of a word based on a count."""
+ """Returns the singular or plural form of a word based on a count.
+
+ Args:
+ items_or_count: Number of items or a collection of items
+ singular: Singular form of the name of the item
+ count_format: .format()-style specification for items_or_count
+ these: Prefix the string with "this" or "these", depending on number
+ number: Include the number in the return string (e.g., "3 things" vs.
+ "things")
+ are: Suffix the string with "is" or "are", depending on number
+ exist: Suffix the string with "exists" or "exist", depending on number
+ """
try:
count = len(items_or_count)
@@ -52,7 +88,14 @@ def plural(
prefix = ('this ' if count == 1 else 'these ') if these else ''
num = f'{count:{count_format}} ' if number else ''
- suffix = (' is' if count == 1 else ' are') if are else ''
+
+ suffix = ''
+ if are and exist:
+ raise ValueError(f'cannot combine are ({are}) and exist ({exist})')
+ if are:
+ suffix = ' is' if count == 1 else ' are'
+ if exist:
+ suffix = ' exists' if count == 1 else ' exist'
if singular.endswith('y'):
result = f'{singular[:-1]}{"y" if count == 1 else "ies"}'
@@ -181,11 +224,26 @@ def format_command(args: Sequence, kwargs: dict) -> Tuple[str, str]:
return attr, ' '.join(shlex.quote(str(arg)) for arg in args)
-def log_run(args, **kwargs) -> subprocess.CompletedProcess:
+def log_run(
+ args, ignore_dry_run: bool = False, **kwargs
+) -> subprocess.CompletedProcess:
"""Logs a command then runs it with subprocess.run.
- Takes the same arguments as subprocess.run.
+ Takes the same arguments as subprocess.run. The command is only executed if
+ dry-run is not enabled.
"""
+ ctx = PRESUBMIT_CONTEXT.get()
+ if ctx:
+ if not ignore_dry_run:
+ ctx.append_check_command(*args, **kwargs)
+ if ctx.dry_run and not ignore_dry_run:
+ # Return an empty CompletedProcess
+ empty_proc: subprocess.CompletedProcess = (
+ subprocess.CompletedProcess('', 0)
+ )
+ empty_proc.stdout = b''
+ empty_proc.stderr = b''
+ return empty_proc
_LOG.debug('[COMMAND] %s\n%s', *format_command(args, kwargs))
return subprocess.run(args, **kwargs)