aboutsummaryrefslogtreecommitdiff
path: root/pw_presubmit/py
diff options
context:
space:
mode:
Diffstat (limited to 'pw_presubmit/py')
-rw-r--r--pw_presubmit/py/bazel_parser_test.py35
-rw-r--r--pw_presubmit/py/context_test.py2
-rw-r--r--pw_presubmit/py/ninja_parser_test.py33
-rw-r--r--pw_presubmit/py/pw_presubmit/bazel_parser.py7
-rw-r--r--pw_presubmit/py/pw_presubmit/build.py172
-rwxr-xr-xpw_presubmit/py/pw_presubmit/format_code.py49
-rw-r--r--pw_presubmit/py/pw_presubmit/javascript_checks.py2
-rw-r--r--pw_presubmit/py/pw_presubmit/ninja_parser.py16
-rwxr-xr-xpw_presubmit/py/pw_presubmit/pigweed_presubmit.py274
-rw-r--r--pw_presubmit/py/pw_presubmit/presubmit_context.py16
-rw-r--r--pw_presubmit/py/pw_presubmit/todo_check.py60
-rw-r--r--pw_presubmit/py/todo_check_test.py57
12 files changed, 513 insertions, 210 deletions
diff --git a/pw_presubmit/py/bazel_parser_test.py b/pw_presubmit/py/bazel_parser_test.py
index 6248b866b..32192523d 100644
--- a/pw_presubmit/py/bazel_parser_test.py
+++ b/pw_presubmit/py/bazel_parser_test.py
@@ -14,6 +14,7 @@
# the License.
"""Tests for bazel_parser."""
+import difflib
from pathlib import Path
import tempfile
import unittest
@@ -116,9 +117,9 @@ _REAL_TEST_INPUT_2 = """
[4,570 / 4,573] 332 / 343 tests; Testing //pw_transfer/integration_test:cross_language_medium_read_test; 54s linux-sandbox ... (3 actions running)
[4,570 / 4,573] 332 / 343 tests; Testing //pw_transfer/integration_test:cross_language_medium_read_test; 65s linux-sandbox ... (3 actions running)
[4,570 / 4,573] 332 / 343 tests; Testing //pw_transfer/integration_test:cross_language_medium_read_test; 83s linux-sandbox ... (3 actions running)
-FAIL: //pw_transfer/integration_test:cross_language_medium_read_test (see /b/s/w/ir/cache/bazel/_bazel_swarming/823a25200ba977576a072121498f22ec/execroot/pigweed/bazel-out/k8-fastbuild/testlogs/pw_transfer/integration_test/cross_language_medium_read_test/test.log)
+FAIL: //pw_transfer/integration_test:cross_language_medium_read_test (see <truncated>/pw_transfer/integration_test/cross_language_medium_read_test/test.log)
INFO: From Testing //pw_transfer/integration_test:cross_language_medium_read_test:
-stdout (/b/s/w/ir/cache/bazel/_bazel_swarming/823a25200ba977576a072121498f22ec/execroot/pigweed/bazel-out/_tmp/actions/stdout-2115) 1131999 exceeds maximum size of --experimental_ui_max_stdouterr_bytes=1048576 bytes; skipping
+stdout (<truncated>/_tmp/actions/stdout-2115) 1131999 exceeds maximum size of --experimental_ui_max_stdouterr_bytes=1048576 bytes; skipping
[4,572 / 4,573] 334 / 343 tests, 1 failed; Testing //pw_transfer/integration_test:expected_errors_test; 84s linux-sandbox
[4,572 / 4,573] 334 / 343 tests, 1 failed; Testing //pw_transfer/integration_test:expected_errors_test; 94s linux-sandbox
[4,572 / 4,573] 334 / 343 tests, 1 failed; Testing //pw_transfer/integration_test:expected_errors_test; 124s linux-sandbox
@@ -469,21 +470,21 @@ INFO: Build completed, 1 test FAILED, 1206 total actions
//pw_transfer/integration_test:proxy_test PASSED in 2.4s
//pw_transfer/py:transfer_test PASSED in 1.4s
//pw_transfer/integration_test:cross_language_medium_read_test FAILED in 82.1s
-/b/s/w/ir/cache/bazel/_bazel_swarming/823a25200ba977576a072121498f22ec/execroot/pigweed/bazel-out/k8-fastbuild/testlogs/pw_transfer/integration_test/cross_language_medium_read_test/test.log
+<truncated>/pw_transfer/integration_test/cross_language_medium_read_test/test.log
Executed 35 out of 343 tests: 334 tests pass, 1 fails locally and 8 were skipped.
There were tests whose specified size is too big. Use the --test_verbose_timeout_warnings command line option to see which ones these are.
"""
_REAL_TEST_SUMMARY_2 = """
-FAIL: //pw_transfer/integration_test:cross_language_medium_read_test (see /b/s/w/ir/cache/bazel/_bazel_swarming/823a25200ba977576a072121498f22ec/execroot/pigweed/bazel-out/k8-fastbuild/testlogs/pw_transfer/integration_test/cross_language_medium_read_test/test.log)
+FAIL: //pw_transfer/integration_test:cross_language_medium_read_test (see <truncated>/pw_transfer/integration_test/cross_language_medium_read_test/test.log)
INFO: From Testing //pw_transfer/integration_test:cross_language_medium_read_test:
-stdout (/b/s/w/ir/cache/bazel/_bazel_swarming/823a25200ba977576a072121498f22ec/execroot/pigweed/bazel-out/_tmp/actions/stdout-2115) 1131999 exceeds maximum size of --experimental_ui_max_stdouterr_bytes=1048576 bytes; skipping
+stdout (<truncated>/_tmp/actions/stdout-2115) 1131999 exceeds maximum size of --experimental_ui_max_stdouterr_bytes=1048576 bytes; skipping
INFO: Elapsed time: 507.459s, Critical Path: 181.66s
INFO: 1206 processes: 1235 linux-sandbox, 4 worker.
INFO: Build completed, 1 test FAILED, 1206 total actions
//pw_transfer/integration_test:cross_language_medium_read_test FAILED in 82.1s
-/b/s/w/ir/cache/bazel/_bazel_swarming/823a25200ba977576a072121498f22ec/execroot/pigweed/bazel-out/k8-fastbuild/testlogs/pw_transfer/integration_test/cross_language_medium_read_test/test.log
+<truncated>/pw_transfer/integration_test/cross_language_medium_read_test/test.log
Executed 35 out of 343 tests: 334 tests pass, 1 fails locally and 8 were skipped.
"""
@@ -502,24 +503,38 @@ class TestBazelParser(unittest.TestCase):
self.output = bazel_parser.parse_bazel_stdout(path)
+ def _assert_equal(self, left, right):
+ if (
+ not isinstance(left, str)
+ or not isinstance(right, str)
+ or '\n' not in left
+ or '\n' not in right
+ ):
+ return self.assertEqual(left, right)
+
+ diff = ''.join(
+ difflib.unified_diff(left.splitlines(True), right.splitlines(True))
+ )
+ return self.assertSequenceEqual(left, right, f'\n{diff}\n')
+
def test_simple(self) -> None:
error = 'ERROR: abc\nerror 1\nerror2\n'
self._run('[0/10] foo\n[1/10] bar\n' + error)
- self.assertEqual(error.strip(), self.output.strip())
+ self._assert_equal(error.strip(), self.output.strip())
def test_path(self) -> None:
error_in = 'ERROR: abc\n PATH=... \\\nerror 1\nerror2\n'
error_out = 'ERROR: abc\nerror 1\nerror2\n'
self._run('[0/10] foo\n[1/10] bar\n' + error_in)
- self.assertEqual(error_out.strip(), self.output.strip())
+ self._assert_equal(error_out.strip(), self.output.strip())
def test_failure(self) -> None:
self._run(_REAL_TEST_INPUT)
- self.assertEqual(_REAL_TEST_SUMMARY.strip(), self.output.strip())
+ self._assert_equal(_REAL_TEST_SUMMARY.strip(), self.output.strip())
def test_failure_2(self) -> None:
self._run(_REAL_TEST_INPUT_2)
- self.assertEqual(_REAL_TEST_SUMMARY_2.strip(), self.output.strip())
+ self._assert_equal(_REAL_TEST_SUMMARY_2.strip(), self.output.strip())
if __name__ == '__main__':
diff --git a/pw_presubmit/py/context_test.py b/pw_presubmit/py/context_test.py
index c1b7f41b1..dad969b91 100644
--- a/pw_presubmit/py/context_test.py
+++ b/pw_presubmit/py/context_test.py
@@ -52,6 +52,8 @@ class ContextTest(unittest.TestCase):
self.assertFalse(ctx.is_try)
self.assertTrue(ctx.is_ci)
self.assertTrue(ctx.is_dev)
+ self.assertFalse(ctx.is_shadow)
+ self.assertFalse(ctx.is_prod)
def test_lucitrigger(self):
trigger = presubmit_context.LuciTrigger.create_for_testing(
diff --git a/pw_presubmit/py/ninja_parser_test.py b/pw_presubmit/py/ninja_parser_test.py
index a810ab59f..bfc30d353 100644
--- a/pw_presubmit/py/ninja_parser_test.py
+++ b/pw_presubmit/py/ninja_parser_test.py
@@ -14,6 +14,7 @@
# the License.
"""Tests for ninja_parser."""
+import difflib
from pathlib import Path
import unittest
from unittest.mock import MagicMock, mock_open, patch
@@ -26,7 +27,7 @@ _REAL_BUILD_INPUT = """
[1168/1797] cp ../../pw_software_update/py/dev_sign_test.py python/gen/pw_software_update/py/py.generated_python_package/dev_sign_test.py
[1169/1797] ACTION //pw_presubmit/py:py.lint.mypy(//pw_build/python_toolchain:python)
\x1b[31mFAILED: python/gen/pw_presubmit/py/py.lint.mypy.pw_pystamp
-python ../../pw_build/py/pw_build/python_runner.py --gn-root ../../ --current-path ../../pw_presubmit/py --default-toolchain=//pw_toolchain/default:default --current-toolchain=//pw_build/python_toolchain:python --env=MYPY_FORCE_COLOR=1 --touch python/gen/pw_presubmit/py/py.lint.mypy.pw_pystamp --capture-output --module mypy --python-virtualenv-config python/gen/pw_env_setup/pigweed_build_venv/venv_metadata.json --python-dep-list-files python/gen/pw_presubmit/py/py.lint.mypy_metadata_path_list.txt -- --pretty --show-error-codes ../../pw_presubmit/py/pw_presubmit/__init__.py ../../pw_presubmit/py/pw_presubmit/build.py ../../pw_presubmit/py/pw_presubmit/cli.py ../../pw_presubmit/py/pw_presubmit/cpp_checks.py ../../pw_presubmit/py/pw_presubmit/format_code.py ../../pw_presubmit/py/pw_presubmit/git_repo.py ../../pw_presubmit/py/pw_presubmit/inclusive_language.py ../../pw_presubmit/py/pw_presubmit/install_hook.py ../../pw_presubmit/py/pw_presubmit/keep_sorted.py ../../pw_presubmit/py/pw_presubmit/ninja_parser.py ../../pw_presubmit/py/pw_presubmit/npm_presubmit.py ../../pw_presubmit/py/pw_presubmit/pigweed_presubmit.py ../../pw_presubmit/py/pw_presubmit/presubmit.py ../../pw_presubmit/py/pw_presubmit/python_checks.py ../../pw_presubmit/py/pw_presubmit/shell_checks.py ../../pw_presubmit/py/pw_presubmit/todo_check.py ../../pw_presubmit/py/pw_presubmit/tools.py ../../pw_presubmit/py/git_repo_test.py ../../pw_presubmit/py/keep_sorted_test.py ../../pw_presubmit/py/ninja_parser_test.py ../../pw_presubmit/py/presubmit_test.py ../../pw_presubmit/py/tools_test.py ../../pw_presubmit/py/setup.py
+python [...]/py/pw_build/python_runner.py --gn-root ../../ --current-path ../../pw_presubmit/py --default-toolchain=//pw_toolchain/default:default --current-toolchain=//pw_build/python_toolchain:python --env=MYPY_FORCE_COLOR=1 --touch python/gen/pw_presubmit/py/py.lint.mypy.pw_pystamp --capture-output --module mypy --python-virtualenv-config python/gen/pw_env_setup/pigweed_build_venv/venv_metadata.json --python-dep-list-files python/gen/pw_presubmit/py/py.lint.mypy_metadata_path_list.txt -- --pretty --show-error-codes ../../pw_presubmit/py/pw_presubmit/__init__.py ../../pw_presubmit/py/pw_presubmit/build.py ../../pw_presubmit/py/pw_presubmit/cli.py ../../pw_presubmit/py/pw_presubmit/cpp_checks.py ../../pw_presubmit/py/pw_presubmit/format_code.py ../../pw_presubmit/py/pw_presubmit/git_repo.py ../../pw_presubmit/py/pw_presubmit/inclusive_language.py ../../pw_presubmit/py/pw_presubmit/install_hook.py ../../pw_presubmit/py/pw_presubmit/keep_sorted.py ../../pw_presubmit/py/pw_presubmit/ninja_parser.py ../../pw_presubmit/py/pw_presubmit/npm_presubmit.py ../../pw_presubmit/py/pw_presubmit/pigweed_presubmit.py ../../pw_presubmit/py/pw_presubmit/presubmit.py ../../pw_presubmit/py/pw_presubmit/python_checks.py ../../pw_presubmit/py/pw_presubmit/shell_checks.py ../../pw_presubmit/py/pw_presubmit/todo_check.py ../../pw_presubmit/py/pw_presubmit/tools.py ../../pw_presubmit/py/git_repo_test.py ../../pw_presubmit/py/keep_sorted_test.py ../../pw_presubmit/py/ninja_parser_test.py ../../pw_presubmit/py/presubmit_test.py ../../pw_presubmit/py/tools_test.py ../../pw_presubmit/py/setup.py
Requirement already satisfied: pyserial in c:\\b\\s\\w\\ir\\x\\w\\co\\environment\\pigweed-venv\\lib\\site-packages (from pigweed==0.0.13+20230126212203) (3.5)
../../pw_presubmit/py/presubmit_test.py:63: error: Module has no attribute
"Filter" [attr-defined]
@@ -57,7 +58,7 @@ ninja: build stopped: subcommand failed.
_REAL_BUILD_SUMMARY = """
[1169/1797] ACTION //pw_presubmit/py:py.lint.mypy(//pw_build/python_toolchain:python)
\x1b[31mFAILED: python/gen/pw_presubmit/py/py.lint.mypy.pw_pystamp
-python ../../pw_build/py/pw_build/python_runner.py --gn-root ../../ --current-path ../../pw_presubmit/py --default-toolchain=//pw_toolchain/default:default --current-toolchain=//pw_build/python_toolchain:python --env=MYPY_FORCE_COLOR=1 --touch python/gen/pw_presubmit/py/py.lint.mypy.pw_pystamp --capture-output --module mypy --python-virtualenv-config python/gen/pw_env_setup/pigweed_build_venv/venv_metadata.json --python-dep-list-files python/gen/pw_presubmit/py/py.lint.mypy_metadata_path_list.txt -- --pretty --show-error-codes ../../pw_presubmit/py/pw_presubmit/__init__.py ../../pw_presubmit/py/pw_presubmit/build.py ../../pw_presubmit/py/pw_presubmit/cli.py ../../pw_presubmit/py/pw_presubmit/cpp_checks.py ../../pw_presubmit/py/pw_presubmit/format_code.py ../../pw_presubmit/py/pw_presubmit/git_repo.py ../../pw_presubmit/py/pw_presubmit/inclusive_language.py ../../pw_presubmit/py/pw_presubmit/install_hook.py ../../pw_presubmit/py/pw_presubmit/keep_sorted.py ../../pw_presubmit/py/pw_presubmit/ninja_parser.py ../../pw_presubmit/py/pw_presubmit/npm_presubmit.py ../../pw_presubmit/py/pw_presubmit/pigweed_presubmit.py ../../pw_presubmit/py/pw_presubmit/presubmit.py ../../pw_presubmit/py/pw_presubmit/python_checks.py ../../pw_presubmit/py/pw_presubmit/shell_checks.py ../../pw_presubmit/py/pw_presubmit/todo_check.py ../../pw_presubmit/py/pw_presubmit/tools.py ../../pw_presubmit/py/git_repo_test.py ../../pw_presubmit/py/keep_sorted_test.py ../../pw_presubmit/py/ninja_parser_test.py ../../pw_presubmit/py/presubmit_test.py ../../pw_presubmit/py/tools_test.py ../../pw_presubmit/py/setup.py
+python [...]/py/pw_build/python_runner.py --gn-root ../../ --current-path ../../pw_presubmit/py --default-toolchain=//pw_toolchain/default:default --current-toolchain=//pw_build/python_toolchain:python --env=MYPY_FORCE_COLOR=1 --touch python/gen/pw_presubmit/py/py.lint.mypy.pw_pystamp --capture-output --module mypy --python-virtualenv-config python/gen/pw_env_setup/pigweed_build_venv/venv_metadata.json --python-dep-list-files python/gen/pw_presubmit/py/py.lint.mypy_metadata_path_list.txt -- --pretty --show-error-codes [...]/py/pw_presubmit/__init__.py [...]/py/pw_presubmit/build.py [...]/py/pw_presubmit/cli.py [...]/py/pw_presubmit/cpp_checks.py [...]/py/pw_presubmit/format_code.py [...]/py/pw_presubmit/git_repo.py [...]/py/pw_presubmit/inclusive_language.py [...]/py/pw_presubmit/install_hook.py [...]/py/pw_presubmit/keep_sorted.py [...]/py/pw_presubmit/ninja_parser.py [...]/py/pw_presubmit/npm_presubmit.py [...]/py/pw_presubmit/pigweed_presubmit.py [...]/py/pw_presubmit/presubmit.py [...]/py/pw_presubmit/python_checks.py [...]/py/pw_presubmit/shell_checks.py [...]/py/pw_presubmit/todo_check.py [...]/py/pw_presubmit/tools.py ../../pw_presubmit/py/git_repo_test.py ../../pw_presubmit/py/keep_sorted_test.py ../../pw_presubmit/py/ninja_parser_test.py ../../pw_presubmit/py/presubmit_test.py ../../pw_presubmit/py/tools_test.py ../../pw_presubmit/py/setup.py
../../pw_presubmit/py/presubmit_test.py:63: error: Module has no attribute
"Filter" [attr-defined]
TestData(presubmit.Filter(suffix=('.a', '.b')), 'foo.c', False...
@@ -73,7 +74,7 @@ _REAL_TEST_INPUT = r"""
[20280/31188] Finished [ACTION //pw_rpc:cpp_client_server_integration_test(//targets/host/pigweed_internal:pw_strict_host_clang_debug)] (10.2s)
[ACTION //pw_rpc:cpp_client_server_integration_test(//targets/host/pigweed_internal:pw_strict_host_clang_debug)]
[31mFAILED: [0mpw_strict_host_clang_debug/gen/pw_rpc/cpp_client_server_integration_test.pw_pystamp
-python ../../pw_build/py/pw_build/python_runner.py --gn-root ../../ --current-path ../../pw_rpc --default-toolchain=//pw_toolchain/default:default --current-toolchain=//targets/host/pigweed_internal:pw_strict_host_clang_debug --touch pw_strict_host_clang_debug/gen/pw_rpc/cpp_client_server_integration_test.pw_pystamp --capture-output --python-virtualenv-config python/gen/pw_env_setup/pigweed_build_venv/venv_metadata.json --python-dep-list-files pw_strict_host_clang_debug/gen/pw_rpc/cpp_client_server_integration_test_metadata_path_list.txt -- ../../pw_rpc/py/pw_rpc/testing.py --server \<TARGET_FILE\(:test_rpc_server\)\> --client \<TARGET_FILE\(:client_integration_test\)\> -- 30577
+python [...]/py/pw_build/python_runner.py --gn-root ../../ --current-path ../../pw_rpc --default-toolchain=//pw_toolchain/default:default --current-toolchain=//targets/host/pigweed_internal:pw_strict_host_clang_debug --touch pw_strict_host_clang_debug/gen/pw_rpc/cpp_client_server_integration_test.pw_pystamp --capture-output --python-virtualenv-config python/gen/pw_env_setup/pigweed_build_venv/venv_metadata.json --python-dep-list-files pw_strict_host_clang_debug/gen/pw_rpc/cpp_client_server_integration_test_metadata_path_list.txt -- [...]/py/pw_rpc/testing.py --server \<TARGET_FILE\(:test_rpc_server\)\> --client \<TARGET_FILE\(:client_integration_test\)\> -- 30577
[35m[1mINF[0m Starting pw_rpc server on port 30577
[35m[1mINF[0m Connecting to pw_rpc client at localhost:30577
[35m[1mINF[0m [==========] Running all tests.
@@ -120,7 +121,7 @@ python ../../pw_build/py/pw_build/python_runner.py --gn-root ../../ --current-pa
_REAL_TEST_SUMMARY = r"""
[ACTION //pw_rpc:cpp_client_server_integration_test(//targets/host/pigweed_internal:pw_strict_host_clang_debug)]
[31mFAILED: [0mpw_strict_host_clang_debug/gen/pw_rpc/cpp_client_server_integration_test.pw_pystamp
-python ../../pw_build/py/pw_build/python_runner.py --gn-root ../../ --current-path ../../pw_rpc --default-toolchain=//pw_toolchain/default:default --current-toolchain=//targets/host/pigweed_internal:pw_strict_host_clang_debug --touch pw_strict_host_clang_debug/gen/pw_rpc/cpp_client_server_integration_test.pw_pystamp --capture-output --python-virtualenv-config python/gen/pw_env_setup/pigweed_build_venv/venv_metadata.json --python-dep-list-files pw_strict_host_clang_debug/gen/pw_rpc/cpp_client_server_integration_test_metadata_path_list.txt -- ../../pw_rpc/py/pw_rpc/testing.py --server \<TARGET_FILE\(:test_rpc_server\)\> --client \<TARGET_FILE\(:client_integration_test\)\> -- 30577
+python [...]/py/pw_build/python_runner.py --gn-root ../../ --current-path ../../pw_rpc --default-toolchain=//pw_toolchain/default:default --current-toolchain=//targets/host/pigweed_internal:pw_strict_host_clang_debug --touch pw_strict_host_clang_debug/gen/pw_rpc/cpp_client_server_integration_test.pw_pystamp --capture-output --python-virtualenv-config python/gen/pw_env_setup/pigweed_build_venv/venv_metadata.json --python-dep-list-files pw_strict_host_clang_debug/gen/pw_rpc/cpp_client_server_integration_test_metadata_path_list.txt -- [...]/py/pw_rpc/testing.py --server \<TARGET_FILE\(:test_rpc_server\)\> --client \<TARGET_FILE\(:client_integration_test\)\> -- 30577
[35m[1mINF[0m Starting pw_rpc server on port 30577
[35m[1mINF[0m Connecting to pw_rpc client at localhost:30577
[35m[1mINF[0m [==========] Running all tests.
@@ -160,28 +161,42 @@ class TestNinjaParser(unittest.TestCase):
with patch.object(path, 'open', mocked_open_read):
return ninja_parser.parse_ninja_stdout(path)
+ def _assert_equal(self, left, right):
+ if (
+ not isinstance(left, str)
+ or not isinstance(right, str)
+ or '\n' not in left
+ or '\n' not in right
+ ):
+ return self.assertEqual(left, right)
+
+ diff = ''.join(
+ difflib.unified_diff(left.splitlines(True), right.splitlines(True))
+ )
+ return self.assertSequenceEqual(left, right, f'\n{diff}\n')
+
def test_simple(self) -> None:
error = '[2/10] baz\nFAILED: something\nerror 1\nerror 2\n'
result = self._run('[0/10] foo\n[1/10] bar\n' + error + _STOP)
- self.assertEqual(error.strip(), result.strip())
+ self._assert_equal(error.strip(), result.strip())
def test_short(self) -> None:
error = '[2/10] baz\nFAILED: something\n'
result = self._run('[0/10] foo\n[1/10] bar\n' + error + _STOP)
- self.assertEqual(error.strip(), result.strip())
+ self._assert_equal(error.strip(), result.strip())
def test_unexpected(self) -> None:
error = '[2/10] baz\nERROR: something\nerror 1\n'
result = self._run('[0/10] foo\n[1/10] bar\n' + error)
- self.assertEqual('', result.strip())
+ self._assert_equal('', result.strip())
def test_real_build(self) -> None:
result = self._run(_REAL_BUILD_INPUT)
- self.assertEqual(_REAL_BUILD_SUMMARY.strip(), result.strip())
+ self._assert_equal(_REAL_BUILD_SUMMARY.strip(), result.strip())
def test_real_test(self) -> None:
result = self._run(_REAL_TEST_INPUT)
- self.assertEqual(_REAL_TEST_SUMMARY.strip(), result.strip())
+ self._assert_equal(_REAL_TEST_SUMMARY.strip(), result.strip())
if __name__ == '__main__':
diff --git a/pw_presubmit/py/pw_presubmit/bazel_parser.py b/pw_presubmit/py/pw_presubmit/bazel_parser.py
index aaf3319c8..05dedef52 100644
--- a/pw_presubmit/py/pw_presubmit/bazel_parser.py
+++ b/pw_presubmit/py/pw_presubmit/bazel_parser.py
@@ -61,6 +61,13 @@ def parse_bazel_stdout(bazel_stdout: Path) -> str:
error_lines.append(line)
break
+ line = re.sub(
+ r'/b/s/w/ir/\S*/bazel-out(/k8-fastbuild)?(/bin)?'
+ r'(/testlogs?)',
+ '<truncated>',
+ line,
+ )
+
error_lines.append(line)
result = '\n'.join(error_lines)
diff --git a/pw_presubmit/py/pw_presubmit/build.py b/pw_presubmit/py/pw_presubmit/build.py
index 17ca25f40..8bb839da2 100644
--- a/pw_presubmit/py/pw_presubmit/build.py
+++ b/pw_presubmit/py/pw_presubmit/build.py
@@ -45,6 +45,7 @@ from typing import (
Union,
)
+import pw_cli.color
from pw_presubmit.presubmit import (
call,
Check,
@@ -206,7 +207,8 @@ def gn_gen(
Runs with --check=system if gn_check=True. Note that this does not cover
generated files. Run gn_check() after building to check generated files.
"""
- all_gn_args = dict(gn_arguments)
+ all_gn_args = {'pw_build_COLORIZE_OUTPUT': pw_cli.color.is_enabled()}
+ all_gn_args.update(gn_arguments)
all_gn_args.update(ctx.override_gn_args)
_LOG.debug('%r', all_gn_args)
args_option = gn_args(**all_gn_args)
@@ -225,9 +227,9 @@ def gn_gen(
call(
'gn',
+ '--color' if pw_cli.color.is_enabled() else '--nocolor',
'gen',
ctx.output_dir,
- '--color=always',
*(['--check=system'] if gn_check else []),
*(['--fail-on-unused-args'] if gn_fail_on_unused else []),
*([export_commands_arg] if export_commands_arg else []),
@@ -691,30 +693,73 @@ def _value(ctx: PresubmitContext, val: InputValue) -> Value:
_CtxMgrLambda = Callable[[PresubmitContext], ContextManager]
_CtxMgrOrLambda = Union[ContextManager, _CtxMgrLambda]
-_INCREMENTAL_COVERAGE_TOOL = 'cloud_client'
-_ABSOLUTE_COVERAGE_TOOL = 'raw_coverage_cloud_uploader'
-
@dataclass(frozen=True)
-class CoverageOptions:
- """Coverage collection configuration.
+class CommonCoverageOptions:
+ """Coverage options shared by both CodeSearch and Gerrit.
- For Google use only. See go/kalypsi-abs and go/kalypsi-inc for documentation
- of the metadata fields.
+ For Google use only.
"""
+ # The "root" of the Kalypsi GCS bucket path to which the coverage data
+ # should be uploaded. Typically gs://ng3-metrics/ng3-<teamname>-coverage.
target_bucket_root: str
+
+ # The project name in the Kalypsi GCS bucket path.
target_bucket_project: str
- project: str
+
+ # See go/kalypsi-abs#trace-type-required.
trace_type: str
- ref: str
- source: str
+
+ # go/kalypsi-abs#owner-required.
owner: str
+
+ # go/kalypsi-abs#bug-component-required.
bug_component: str
- trim_prefix: str = ''
+
+
+@dataclass(frozen=True)
+class CodeSearchCoverageOptions:
+ """CodeSearch-specific coverage options. For Google use only."""
+
+ # The name of the Gerrit host containing the CodeSearch repo. Just the name
+ # ("pigweed"), not the full URL ("pigweed.googlesource.com"). This may be
+ # different from the host from which the code was originally checked out.
+ host: str
+
+ # The name of the project, as expected by CodeSearch. Typically
+ # 'codesearch'.
+ project: str
+
+ # See go/kalypsi-abs#ref-required.
+ ref: str
+
+ # See go/kalypsi-abs#source-required.
+ source: str
+
+ # See go/kalypsi-abs#add-prefix-optional.
add_prefix: str = ''
+@dataclass(frozen=True)
+class GerritCoverageOptions:
+ """Gerrit-specific coverage options. For Google use only."""
+
+ # The name of the project, as expected by Gerrit. This is typically the
+ # repository name, e.g. 'pigweed/pigweed' for upstream Pigweed.
+ # See go/kalypsi-inc#project-required.
+ project: str
+
+
+@dataclass(frozen=True)
+class CoverageOptions:
+ """Coverage collection configuration. For Google use only."""
+
+ common: CommonCoverageOptions
+ codesearch: Tuple[CodeSearchCoverageOptions, ...]
+ gerrit: GerritCoverageOptions
+
+
class _NinjaBase(Check):
"""Thin wrapper of Check for steps that call ninja."""
@@ -826,29 +871,30 @@ class _NinjaBase(Check):
coverage_json = coverage_jsons[0]
if ctx.luci:
- if ctx.luci.is_dev:
- _LOG.warning('Not uploading coverage since running in dev')
+ if not ctx.luci.is_prod:
+ _LOG.warning('Not uploading coverage since not running in prod')
return PresubmitResult.PASS
with self._context(ctx):
- # GCS bucket paths are POSIX-like.
- coverage_gcs_path = posixpath.join(
- options.target_bucket_root,
- 'incremental' if ctx.luci.is_try else 'absolute',
- options.target_bucket_project,
- str(ctx.luci.buildbucket_id),
- )
- _copy_to_gcs(
- ctx,
- coverage_json,
- posixpath.join(coverage_gcs_path, 'report.json'),
- )
- metadata_json = _write_coverage_metadata(ctx, options)
- _copy_to_gcs(
- ctx,
- metadata_json,
- posixpath.join(coverage_gcs_path, 'metadata.json'),
- )
+ metadata_json_paths = _write_coverage_metadata(ctx, options)
+ for i, metadata_json in enumerate(metadata_json_paths):
+ # GCS bucket paths are POSIX-like.
+ coverage_gcs_path = posixpath.join(
+ options.common.target_bucket_root,
+ 'incremental' if ctx.luci.is_try else 'absolute',
+ options.common.target_bucket_project,
+ f'{ctx.luci.buildbucket_id}-{i}',
+ )
+ _copy_to_gcs(
+ ctx,
+ coverage_json,
+ posixpath.join(coverage_gcs_path, 'report.json'),
+ )
+ _copy_to_gcs(
+ ctx,
+ metadata_json,
+ posixpath.join(coverage_gcs_path, 'metadata.json'),
+ )
return PresubmitResult.PASS
@@ -894,52 +940,58 @@ def _copy_to_gcs(ctx: PresubmitContext, filepath: Path, gcs_dst: str):
def _write_coverage_metadata(
ctx: PresubmitContext, options: CoverageOptions
-) -> Path:
- """Write out Kalypsi coverage metadata and return the path to it."""
+) -> Sequence[Path]:
+ """Write out Kalypsi coverage metadata file(s) and return their paths."""
assert ctx.luci is not None
assert len(ctx.luci.triggers) == 1
change = ctx.luci.triggers[0]
metadata = {
- 'host': change.gerrit_host,
- 'project': options.project,
- 'trace_type': options.trace_type,
- 'trim_prefix': options.trim_prefix,
+ 'trace_type': options.common.trace_type,
+ 'trim_prefix': str(ctx.root),
'patchset_num': change.patchset,
'change_id': change.number,
- 'owner': options.owner,
- 'bug_component': options.bug_component,
+ 'owner': options.common.owner,
+ 'bug_component': options.common.bug_component,
}
if ctx.luci.is_try:
# Running in CQ: uploading incremental coverage
metadata.update(
{
- # Note: no `add_prefix`. According to the documentation, that's
- # only supported for absolute coverage.
- #
- # TODO(tpudlik): Follow up with Kalypsi team, since this is
- # surprising (given that trim_prefix is supported for both types
- # of coverage). This might be an error in the docs.
- 'patchset_num': change.patchset,
'change_id': change.number,
+ 'host': change.gerrit_name,
+ 'patchset_num': change.patchset,
+ 'project': options.gerrit.project,
}
)
- else:
- # Running in CI: uploading absolute coverage
+
+ metadata_json = ctx.output_dir / "metadata.json"
+ with metadata_json.open('w') as metadata_file:
+ json.dump(metadata, metadata_file)
+ return (metadata_json,)
+
+ # Running in CI: uploading absolute coverage, possibly to multiple locations
+ # since a repo could be in codesearch in multiple places.
+ metadata_jsons = []
+ for i, cs in enumerate(options.codesearch):
metadata.update(
{
- 'add_prefix': options.add_prefix,
+ 'add_prefix': cs.add_prefix,
'commit_id': change.ref,
- 'ref': options.ref,
- 'source': options.source,
+ 'host': cs.host,
+ 'project': cs.project,
+ 'ref': cs.ref,
+ 'source': cs.source,
}
)
- metadata_json = ctx.output_dir / "metadata.json"
- with metadata_json.open('w') as metadata_file:
- json.dump(metadata, metadata_file)
- return metadata_json
+ metadata_json = ctx.output_dir / f'metadata-{i}.json'
+ with metadata_json.open('w') as metadata_file:
+ json.dump(metadata, metadata_file)
+ metadata_jsons.append(metadata_json)
+
+ return tuple(metadata_jsons)
class GnGenNinja(_NinjaBase):
@@ -966,6 +1018,9 @@ class GnGenNinja(_NinjaBase):
super().__init__(self._substeps(), *args, **kwargs)
self._gn_args: Dict[str, Any] = gn_args or {}
+ def add_default_gn_args(self, args):
+ """Add any project-specific default GN args to 'args'."""
+
@property
def gn_args(self) -> Dict[str, Any]:
return self._gn_args
@@ -975,13 +1030,14 @@ class GnGenNinja(_NinjaBase):
if self._coverage_options is not None:
args['pw_toolchain_COVERAGE_ENABLED'] = True
args['pw_build_PYTHON_TEST_COVERAGE'] = True
- args['pw_C_OPTIMIZATION_LEVELS'] = ('debug',)
if ctx.incremental:
args['pw_toolchain_PROFILE_SOURCE_FILES'] = [
f'//{x.relative_to(ctx.root)}' for x in ctx.paths
]
+ self.add_default_gn_args(args)
+
args.update({k: _value(ctx, v) for k, v in self._gn_args.items()})
gn_gen(ctx, gn_check=False, **args) # type: ignore
return PresubmitResult.PASS
diff --git a/pw_presubmit/py/pw_presubmit/format_code.py b/pw_presubmit/py/pw_presubmit/format_code.py
index df81ad3f9..7a1b81a8c 100755
--- a/pw_presubmit/py/pw_presubmit/format_code.py
+++ b/pw_presubmit/py/pw_presubmit/format_code.py
@@ -151,15 +151,26 @@ def clang_format_check(ctx: _Context) -> Dict[Path, str]:
def clang_format_fix(ctx: _Context) -> Dict[Path, str]:
"""Fixes formatting for the provided files in place."""
- _clang_format('-i', *ctx.paths)
+ print_format_fix(_clang_format('-i', *ctx.paths))
return {}
def _typescript_format(*args: Union[Path, str], **kwargs) -> bytes:
+ # TODO: b/323378974 - Better integrate NPM actions with pw_env_setup so
+ # we don't have to manually set `npm_config_cache` every time we run npm.
+ # Force npm cache to live inside the environment directory.
+ npm_env = os.environ.copy()
+ npm_env['npm_config_cache'] = str(
+ Path(npm_env['_PW_ACTUAL_ENVIRONMENT_ROOT']) / 'npm-cache'
+ )
+
+ npm = shutil.which('npm.cmd' if os.name == 'nt' else 'npm')
return log_run(
- ['npx', 'prettier@3.0.1', *args],
+ [npm, 'exec', 'prettier', *args],
stdout=subprocess.PIPE,
+ stdin=subprocess.DEVNULL,
check=True,
+ env=npm_env,
**kwargs,
).stdout
@@ -175,7 +186,7 @@ def typescript_format_check(ctx: _Context) -> Dict[Path, str]:
def typescript_format_fix(ctx: _Context) -> Dict[Path, str]:
"""Fixes formatting for the provided files in place."""
- _typescript_format('--write', *ctx.paths)
+ print_format_fix(_typescript_format(*ctx.paths, '--', '--write'))
return {}
@@ -227,7 +238,24 @@ def fix_bazel_format(ctx: _Context) -> Dict[Path, str]:
"""Fixes formatting for the provided files in place."""
errors = {}
for path in ctx.paths:
- proc = log_run(['buildifier', path], capture_output=True)
+ proc = log_run(
+ [
+ 'buildifier',
+ '--lint=fix',
+ # These warnings are safe to enable because they can always be
+ # auto-fixed.
+ (
+ '--warnings='
+ 'load,'
+ 'load-on-top,'
+ 'native-build,'
+ 'same-origin-load,'
+ 'out-of-order-load'
+ ),
+ path,
+ ],
+ capture_output=True,
+ )
if proc.returncode:
errors[path] = proc.stderr.decode()
return errors
@@ -297,7 +325,7 @@ def check_py_format_yapf(ctx: _Context) -> Dict[Path, str]:
def fix_py_format_yapf(ctx: _Context) -> Dict[Path, str]:
"""Fixes formatting for the provided files in place."""
- _yapf('--in-place', *ctx.paths, check=True)
+ print_format_fix(_yapf('--in-place', *ctx.paths, check=True).stdout)
return {}
@@ -405,6 +433,7 @@ def fix_py_format_black(ctx: _Context) -> Dict[Path, str]:
[ctx.format_options.black_path, *_black_config_args(), path],
capture_output=True,
)
+ print_format_fix(proc.stdout)
if proc.returncode:
errors[path] = proc.stderr.decode()
return errors
@@ -557,6 +586,12 @@ def print_format_check(
_LOG.warning('To fix formatting, run:\n\n%s\n', '\n'.join(message))
+def print_format_fix(stdout: bytes):
+ """Prints the output of a format --fix call."""
+ for line in stdout.splitlines():
+ _LOG.info('Fix cmd stdout: %r', line.decode('utf-8'))
+
+
class CodeFormat(NamedTuple):
language: str
filter: FileFilter
@@ -695,7 +730,7 @@ CODE_FORMATS: Tuple[CodeFormat, ...] = tuple(
C_FORMAT,
GN_FORMAT,
GO_FORMAT,
- JAVASCRIPT_FORMAT if shutil.which('npx') else None,
+ JAVASCRIPT_FORMAT if shutil.which('npm') else None,
JAVA_FORMAT,
JSON_FORMAT,
MARKDOWN_FORMAT,
@@ -703,7 +738,7 @@ CODE_FORMATS: Tuple[CodeFormat, ...] = tuple(
PROTO_FORMAT,
PYTHON_FORMAT,
RST_FORMAT,
- TYPESCRIPT_FORMAT if shutil.which('npx') else None,
+ TYPESCRIPT_FORMAT if shutil.which('npm') else None,
# keep-sorted: end
),
)
diff --git a/pw_presubmit/py/pw_presubmit/javascript_checks.py b/pw_presubmit/py/pw_presubmit/javascript_checks.py
index 3dc8b8f68..9d321a430 100644
--- a/pw_presubmit/py/pw_presubmit/javascript_checks.py
+++ b/pw_presubmit/py/pw_presubmit/javascript_checks.py
@@ -39,6 +39,6 @@ def eslint(ctx: PresubmitContext):
if npm_install.returncode != 0:
raise PresubmitFailure('npm install failed.')
- result = log_run(['npx', 'eslint@8.47.0', *ctx.paths])
+ result = log_run(['npm', 'exec', 'eslint', *ctx.paths])
if result.returncode != 0:
raise PresubmitFailure('eslint identifed issues.')
diff --git a/pw_presubmit/py/pw_presubmit/ninja_parser.py b/pw_presubmit/py/pw_presubmit/ninja_parser.py
index 487af1754..5d5befde6 100644
--- a/pw_presubmit/py/pw_presubmit/ninja_parser.py
+++ b/pw_presubmit/py/pw_presubmit/ninja_parser.py
@@ -101,6 +101,22 @@ def _parse_ninja(ins: IO) -> str:
if not x.lstrip().startswith('Requirement already satisfied:')
]
+ # Trim paths so the output is less likely to wrap. Specifically, if the
+ # path has six or more segments trim it down to three.
+ path_re = re.compile(
+ r'(?P<prefix>\b|\s)'
+ r'[-\w._]+/[-\w._]+(?:/[-\w._]+)+'
+ r'(?P<core>/[-\w._]+/[-\w._]+/[-\w._]+)'
+ r'(?P<suffix>\b|\s)'
+ )
+
+ def replace(m: re.Match):
+ return ''.join(
+ (m.group('prefix'), '[...]', m.group('core'), m.group('suffix'))
+ )
+
+ failure_lines = [path_re.sub(replace, x) for x in failure_lines]
+
result: str = '\n'.join(failure_lines)
return re.sub(r'\n+', '\n', result)
diff --git a/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py b/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
index 65f1d7f0e..b8b836129 100755
--- a/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
+++ b/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
@@ -20,6 +20,7 @@ import json
import logging
import os
from pathlib import Path
+import platform
import re
import shutil
import subprocess
@@ -87,10 +88,18 @@ def _at_all_optimization_levels(target):
yield f'{target}_{level}'
+class PigweedGnGenNinja(build.GnGenNinja):
+ """Add Pigweed-specific defaults to GnGenNinja."""
+
+ def add_default_gn_args(self, args):
+ """Add project-specific default GN args to 'args'."""
+ args['pw_C_OPTIMIZATION_LEVELS'] = ('debug',)
+
+
#
# Build presubmit checks
#
-gn_all = build.GnGenNinja(
+gn_all = PigweedGnGenNinja(
name='gn_all',
path_filter=_BUILD_FILE_FILTER,
gn_args=dict(pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS),
@@ -159,10 +168,13 @@ def _gn_combined_build_check_targets() -> Sequence[str]:
*_at_all_optimization_levels(f'host_{_HOST_COMPILER}'),
'python.tests',
'python.lint',
- 'docs',
'pigweed_pypi_distribution',
]
+ # TODO: b/315998985 - Add docs back to Mac ARM build.
+ if sys.platform != 'darwin' or platform.machine() != 'arm64':
+ build_targets.append('docs')
+
# C headers seem to be missing when building with pw_minimal_cpp_stdlib, so
# skip it on Windows.
if sys.platform != 'win32':
@@ -196,7 +208,7 @@ def _gn_combined_build_check_targets() -> Sequence[str]:
return build_targets
-gn_combined_build_check = build.GnGenNinja(
+gn_combined_build_check = PigweedGnGenNinja(
name='gn_combined_build_check',
doc='Run most host and device (QEMU) tests.',
path_filter=_BUILD_FILE_FILTER,
@@ -207,21 +219,31 @@ gn_combined_build_check = build.GnGenNinja(
ninja_targets=_gn_combined_build_check_targets(),
)
-coverage = build.GnGenNinja(
+coverage = PigweedGnGenNinja(
name='coverage',
doc='Run coverage for the host build.',
path_filter=_BUILD_FILE_FILTER,
ninja_targets=('coverage',),
coverage_options=build.CoverageOptions(
- target_bucket_root='gs://ng3-metrics/ng3-pigweed-coverage',
- target_bucket_project='pigweed',
- project='codesearch',
- trace_type='LLVM',
- trim_prefix='/b/s/w/ir/x/w/co',
- ref='refs/heads/main',
- source='infra:main',
- owner='pigweed-infra@google.com',
- bug_component='503634',
+ common=build.CommonCoverageOptions(
+ target_bucket_project='pigweed',
+ target_bucket_root='gs://ng3-metrics/ng3-pigweed-coverage',
+ trace_type='LLVM',
+ owner='pigweed-infra@google.com',
+ bug_component='503634',
+ ),
+ codesearch=(
+ build.CodeSearchCoverageOptions(
+ host='pigweed-internal',
+ project='codesearch',
+ add_prefix='pigweed',
+ ref='refs/heads/main',
+ source='infra:main',
+ ),
+ ),
+ gerrit=build.GerritCoverageOptions(
+ project='pigweed/pigweed',
+ ),
),
)
@@ -233,7 +255,7 @@ def gn_arm_build(ctx: PresubmitContext):
build.gn_check(ctx)
-stm32f429i = build.GnGenNinja(
+stm32f429i = PigweedGnGenNinja(
name='stm32f429i',
path_filter=_BUILD_FILE_FILTER,
gn_args={
@@ -249,67 +271,7 @@ stm32f429i = build.GnGenNinja(
ninja_targets=_at_all_optimization_levels('stm32f429i'),
)
-gn_emboss_build = build.GnGenNinja(
- name='gn_emboss_build',
- packages=('emboss',),
- gn_args=dict(
- dir_pw_third_party_emboss=lambda ctx: '"{}"'.format(
- ctx.package_root / 'emboss'
- ),
- pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS,
- ),
- ninja_targets=(*_at_all_optimization_levels(f'host_{_HOST_COMPILER}'),),
-)
-
-gn_nanopb_build = build.GnGenNinja(
- name='gn_nanopb_build',
- path_filter=_BUILD_FILE_FILTER,
- packages=('nanopb',),
- gn_args=dict(
- dir_pw_third_party_nanopb=lambda ctx: '"{}"'.format(
- ctx.package_root / 'nanopb'
- ),
- pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS,
- ),
- ninja_targets=(
- *_at_all_optimization_levels('stm32f429i'),
- *_at_all_optimization_levels('host_clang'),
- ),
-)
-
-gn_chre_build = build.GnGenNinja(
- name='gn_chre_build',
- path_filter=_BUILD_FILE_FILTER,
- packages=('chre',),
- gn_args=dict(
- dir_pw_third_party_chre=lambda ctx: '"{}"'.format(
- ctx.package_root / 'chre'
- ),
- pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS,
- ),
- ninja_targets=(*_at_all_optimization_levels('host_clang'),),
-)
-
-gn_emboss_nanopb_build = build.GnGenNinja(
- name='gn_emboss_nanopb_build',
- path_filter=_BUILD_FILE_FILTER,
- packages=('emboss', 'nanopb'),
- gn_args=dict(
- dir_pw_third_party_emboss=lambda ctx: '"{}"'.format(
- ctx.package_root / 'emboss'
- ),
- dir_pw_third_party_nanopb=lambda ctx: '"{}"'.format(
- ctx.package_root / 'nanopb'
- ),
- pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS,
- ),
- ninja_targets=(
- *_at_all_optimization_levels('stm32f429i'),
- *_at_all_optimization_levels('host_clang'),
- ),
-)
-
-gn_crypto_mbedtls_build = build.GnGenNinja(
+gn_crypto_mbedtls_build = PigweedGnGenNinja(
name='gn_crypto_mbedtls_build',
path_filter=_BUILD_FILE_FILTER,
packages=('mbedtls',),
@@ -332,7 +294,7 @@ gn_crypto_mbedtls_build = build.GnGenNinja(
),
)
-gn_crypto_micro_ecc_build = build.GnGenNinja(
+gn_crypto_micro_ecc_build = PigweedGnGenNinja(
name='gn_crypto_micro_ecc_build',
path_filter=_BUILD_FILE_FILTER,
packages=('micro-ecc',),
@@ -352,7 +314,7 @@ gn_crypto_micro_ecc_build = build.GnGenNinja(
),
)
-gn_teensy_build = build.GnGenNinja(
+gn_teensy_build = PigweedGnGenNinja(
name='gn_teensy_build',
path_filter=_BUILD_FILE_FILTER,
packages=('teensy',),
@@ -368,11 +330,14 @@ gn_teensy_build = build.GnGenNinja(
ninja_targets=_at_all_optimization_levels('arduino'),
)
-gn_pico_build = build.GnGenNinja(
+gn_pico_build = PigweedGnGenNinja(
name='gn_pico_build',
path_filter=_BUILD_FILE_FILTER,
- packages=('pico_sdk',),
+ packages=('pico_sdk', 'freertos'),
gn_args={
+ 'dir_pw_third_party_freertos': lambda ctx: '"{}"'.format(
+ str(ctx.package_root / 'freertos')
+ ),
'PICO_SRC_DIR': lambda ctx: '"{}"'.format(
str(ctx.package_root / 'pico_sdk')
),
@@ -381,7 +346,7 @@ gn_pico_build = build.GnGenNinja(
ninja_targets=('pi_pico',),
)
-gn_mimxrt595_build = build.GnGenNinja(
+gn_mimxrt595_build = PigweedGnGenNinja(
name='gn_mimxrt595_build',
path_filter=_BUILD_FILE_FILTER,
packages=('mcuxpresso',),
@@ -397,7 +362,7 @@ gn_mimxrt595_build = build.GnGenNinja(
ninja_targets=('mimxrt595'),
)
-gn_mimxrt595_freertos_build = build.GnGenNinja(
+gn_mimxrt595_freertos_build = PigweedGnGenNinja(
name='gn_mimxrt595_freertos_build',
path_filter=_BUILD_FILE_FILTER,
packages=('freertos', 'mcuxpresso'),
@@ -417,7 +382,7 @@ gn_mimxrt595_freertos_build = build.GnGenNinja(
ninja_targets=('mimxrt595_freertos'),
)
-gn_software_update_build = build.GnGenNinja(
+gn_software_update_build = PigweedGnGenNinja(
name='gn_software_update_build',
path_filter=_BUILD_FILE_FILTER,
packages=('nanopb', 'protobuf', 'mbedtls', 'micro-ecc'),
@@ -445,7 +410,7 @@ gn_software_update_build = build.GnGenNinja(
ninja_targets=_at_all_optimization_levels('host_clang'),
)
-gn_pw_system_demo_build = build.GnGenNinja(
+gn_pw_system_demo_build = PigweedGnGenNinja(
name='gn_pw_system_demo_build',
path_filter=_BUILD_FILE_FILTER,
packages=('freertos', 'nanopb', 'stm32cube_f4', 'pico_sdk'),
@@ -466,26 +431,48 @@ gn_pw_system_demo_build = build.GnGenNinja(
ninja_targets=('pw_system_demo',),
)
-gn_googletest_build = build.GnGenNinja(
- name='gn_googletest_build',
+gn_chre_googletest_nanopb_sapphire_build = PigweedGnGenNinja(
+ name='gn_chre_googletest_nanopb_sapphire_build',
path_filter=_BUILD_FILE_FILTER,
- packages=('googletest',),
- gn_args={
- 'dir_pw_third_party_googletest': lambda ctx: '"{}"'.format(
+ packages=('boringssl', 'chre', 'emboss', 'googletest', 'icu', 'nanopb'),
+ gn_args=dict(
+ dir_pw_third_party_chre=lambda ctx: '"{}"'.format(
+ ctx.package_root / 'chre'
+ ),
+ dir_pw_third_party_nanopb=lambda ctx: '"{}"'.format(
+ ctx.package_root / 'nanopb'
+ ),
+ dir_pw_third_party_googletest=lambda ctx: '"{}"'.format(
ctx.package_root / 'googletest'
),
- 'pw_unit_test_MAIN': lambda ctx: '"{}"'.format(
+ dir_pw_third_party_emboss=lambda ctx: '"{}"'.format(
+ ctx.package_root / 'emboss'
+ ),
+ dir_pw_third_party_boringssl=lambda ctx: '"{}"'.format(
+ ctx.package_root / 'boringssl'
+ ),
+ dir_pw_third_party_icu=lambda ctx: '"{}"'.format(
+ ctx.package_root / 'icu'
+ ),
+ pw_unit_test_MAIN=lambda ctx: '"{}"'.format(
ctx.root / 'third_party/googletest:gmock_main'
),
- 'pw_unit_test_GOOGLETEST_BACKEND': lambda ctx: '"{}"'.format(
- ctx.root / 'third_party/googletest'
+ pw_unit_test_BACKEND=lambda ctx: '"{}"'.format(
+ ctx.root / 'pw_unit_test:googletest'
),
- 'pw_C_OPTIMIZATION_LEVELS': _OPTIMIZATION_LEVELS,
- },
- ninja_targets=_at_all_optimization_levels(f'host_{_HOST_COMPILER}'),
+ pw_function_CONFIG=lambda ctx: '"{}"'.format(
+ ctx.root / 'pw_function:enable_dynamic_allocation'
+ ),
+ pw_bluetooth_sapphire_ENABLED=True,
+ pw_C_OPTIMIZATION_LEVELS=_OPTIMIZATION_LEVELS,
+ ),
+ ninja_targets=(
+ *_at_all_optimization_levels(f'host_{_HOST_COMPILER}'),
+ *_at_all_optimization_levels('stm32f429i'),
+ ),
)
-gn_fuzz_build = build.GnGenNinja(
+gn_fuzz_build = PigweedGnGenNinja(
name='gn_fuzz_build',
path_filter=_BUILD_FILE_FILTER,
packages=('abseil-cpp', 'fuzztest', 'googletest', 're2'),
@@ -505,8 +492,8 @@ gn_fuzz_build = build.GnGenNinja(
'pw_unit_test_MAIN': lambda ctx: '"{}"'.format(
ctx.root / 'third_party/googletest:gmock_main'
),
- 'pw_unit_test_GOOGLETEST_BACKEND': lambda ctx: '"{}"'.format(
- ctx.root / 'third_party/googletest'
+ 'pw_unit_test_BACKEND': lambda ctx: '"{}"'.format(
+ ctx.root / 'pw_unit_test:googletest'
),
},
ninja_targets=('fuzzers',),
@@ -517,7 +504,7 @@ gn_fuzz_build = build.GnGenNinja(
),
)
-oss_fuzz_build = build.GnGenNinja(
+oss_fuzz_build = PigweedGnGenNinja(
name='oss_fuzz_build',
path_filter=_BUILD_FILE_FILTER,
packages=('abseil-cpp', 'fuzztest', 'googletest', 're2'),
@@ -546,6 +533,7 @@ def _env_with_zephyr_vars(ctx: PresubmitContext) -> dict:
# Set some variables here.
env['ZEPHYR_BASE'] = str(ctx.package_root / 'zephyr')
env['ZEPHYR_MODULES'] = str(ctx.root)
+ env['ZEPHYR_TOOLCHAIN_VARIANT'] = 'llvm'
return env
@@ -557,6 +545,28 @@ def zephyr_build(ctx: PresubmitContext) -> None:
env = _env_with_zephyr_vars(ctx)
# Get the python twister runner
twister = ctx.package_root / 'zephyr' / 'scripts' / 'twister'
+ # Get a list of the test roots
+ testsuite_roots = [
+ ctx.pw_root / dir
+ for dir in os.listdir(ctx.pw_root)
+ if dir.startswith('pw_')
+ ]
+ testsuite_roots_list = [
+ args for dir in testsuite_roots for args in ('--testsuite-root', dir)
+ ]
+ sysroot_dir = (
+ ctx.pw_root
+ / 'environment'
+ / 'cipd'
+ / 'packages'
+ / 'pigweed'
+ / 'clang_sysroot'
+ )
+ platform_filters = (
+ ['-P', 'native_posix', '-P', 'native_sim']
+ if platform.system() in ['Windows', 'Darwin']
+ else []
+ )
# Run twister
call(
sys.executable,
@@ -566,8 +576,12 @@ def zephyr_build(ctx: PresubmitContext) -> None:
'--clobber-output',
'--inline-logs',
'--verbose',
- '--testsuite-root',
- ctx.root / 'pw_unit_test_zephyr',
+ *platform_filters,
+ '-x=CONFIG_LLVM_USE_LLD=y',
+ '-x=CONFIG_COMPILER_RT_RTLIB=y',
+ f'-x=TOOLCHAIN_C_FLAGS=--sysroot={sysroot_dir}',
+ f'-x=TOOLCHAIN_LD_FLAGS=--sysroot={sysroot_dir}',
+ *testsuite_roots_list,
env=env,
)
# Produces reports at (ctx.root / 'twister_out' / 'twister*.xml')
@@ -613,7 +627,7 @@ def docs_build(ctx: PresubmitContext) -> None:
)
-gn_host_tools = build.GnGenNinja(
+gn_host_tools = PigweedGnGenNinja(
name='gn_host_tools',
ninja_targets=('host_tools',),
)
@@ -663,11 +677,31 @@ def bazel_test(ctx: PresubmitContext) -> None:
build.bazel(
ctx,
'test',
- '--test_output=errors',
'--',
'//...',
)
+ # Run tests for non-default config options
+
+ # pw_rpc
+ build.bazel(
+ ctx,
+ 'test',
+ '--//pw_rpc:config_override='
+ '//pw_rpc:completion_request_callback_config_enabled',
+ '--',
+ '//pw_rpc/...',
+ )
+
+ # pw_grpc
+ build.bazel(
+ ctx,
+ 'test',
+ '--//pw_rpc:config_override=//pw_grpc:pw_rpc_config',
+ '--',
+ '//pw_grpc/...',
+ )
+
@filter_paths(
endswith=(
@@ -710,11 +744,11 @@ def bazel_build(ctx: PresubmitContext) -> None:
'//...',
)
- for platform, targets in targets_for_platform.items():
+ for platforms, targets in targets_for_platform.items():
build.bazel(
ctx,
'build',
- f'--platforms={platform}',
+ f'--platforms={platforms}',
f"--cxxopt='-std={cxxversion}'",
*targets,
)
@@ -737,6 +771,14 @@ def bazel_build(ctx: PresubmitContext) -> None:
'//pw_cpu_exception/...',
)
+ build.bazel(
+ ctx,
+ 'build',
+ '--//pw_thread_freertos:config_override=//pw_build:test_module_config',
+ '--platforms=//pw_build/platforms:testonly_freertos',
+ '//pw_build:module_config_test',
+ )
+
# Build the pw_system example for the Discovery board using STM32Cube.
build.bazel(
ctx,
@@ -745,6 +787,17 @@ def bazel_build(ctx: PresubmitContext) -> None:
'//pw_system:system_example',
)
+ # Build the fuzztest example.
+ #
+ # TODO: b/324652164 - This doesn't work on MacOS yet.
+ if sys.platform != 'darwin':
+ build.bazel(
+ ctx,
+ 'build',
+ '--config=fuzztest',
+ '//pw_fuzzer/examples/fuzztest:metrics_fuzztest',
+ )
+
def pw_transfer_integration_test(ctx: PresubmitContext) -> None:
"""Runs the pw_transfer cross-language integration test only.
@@ -826,6 +879,7 @@ def edit_compile_commands(
_EXCLUDE_FROM_COPYRIGHT_NOTICE: Sequence[str] = (
# Configuration
# keep-sorted: start
+ r'MODULE.bazel.lock',
r'\bDoxyfile$',
r'\bPW_PLUGINS$',
r'\bconstraint.list$',
@@ -1203,7 +1257,6 @@ SOURCE_FILES_FILTER = FileFilter(
OTHER_CHECKS = (
# keep-sorted: start
- # TODO: b/235277910 - Enable all Bazel tests when they're fixed.
bazel_test,
build.gn_gen_check,
cmake_clang,
@@ -1243,9 +1296,7 @@ INTERNAL = (gn_mimxrt595_build, gn_mimxrt595_freertos_build)
# program block CQ on Linux.
MISC = (
# keep-sorted: start
- gn_chre_build,
- gn_emboss_nanopb_build,
- gn_googletest_build,
+ gn_chre_googletest_nanopb_sapphire_build,
# keep-sorted: end
)
@@ -1255,12 +1306,12 @@ SECURITY = (
# keep-sorted: start
gn_crypto_mbedtls_build,
gn_crypto_micro_ecc_build,
- gn_fuzz_build,
gn_software_update_build,
- oss_fuzz_build,
# keep-sorted: end
)
+FUZZ = (gn_fuzz_build, oss_fuzz_build)
+
# Avoid running all checks on specific paths.
PATH_EXCLUSIONS = FormatOptions.load().exclude
@@ -1280,7 +1331,7 @@ _LINTFORMAT = (
source_in_build.gn(SOURCE_FILES_FILTER),
source_is_in_cmake_build_warn_only,
shell_checks.shellcheck if shutil.which('shellcheck') else (),
- javascript_checks.eslint if shutil.which('npx') else (),
+ javascript_checks.eslint if shutil.which('npm') else (),
json_check.presubmit_check,
keep_sorted.presubmit_check,
todo_check_with_exceptions,
@@ -1325,6 +1376,7 @@ PROGRAMS = Programs(
# keep-sorted: start
arduino_pico=ARDUINO_PICO,
full=FULL,
+ fuzz=FUZZ,
internal=INTERNAL,
lintformat=LINTFORMAT,
misc=MISC,
diff --git a/pw_presubmit/py/pw_presubmit/presubmit_context.py b/pw_presubmit/py/pw_presubmit/presubmit_context.py
index 131be90c0..d57267919 100644
--- a/pw_presubmit/py/pw_presubmit/presubmit_context.py
+++ b/pw_presubmit/py/pw_presubmit/presubmit_context.py
@@ -241,11 +241,14 @@ class LuciContext:
swarming_server: The swarming server on which this build is running.
swarming_task_id: The swarming task id of this build.
cas_instance: The CAS instance accessible from this build.
+ context_file: The path to the LUCI_CONTEXT file.
pipeline: Information about the build pipeline, if applicable.
triggers: Information about triggering commits, if applicable.
is_try: True if the bucket is a try bucket.
is_ci: True if the bucket is a ci bucket.
is_dev: True if the bucket is a dev bucket.
+ is_shadow: True if the bucket is a shadow bucket.
+ is_prod: True if both is_dev and is_shadow are False.
"""
buildbucket_id: int
@@ -256,6 +259,7 @@ class LuciContext:
swarming_server: str
swarming_task_id: str
cas_instance: str
+ context_file: Path
pipeline: Optional[LuciPipeline]
triggers: Sequence[LuciTrigger] = dataclasses.field(default_factory=tuple)
@@ -271,6 +275,14 @@ class LuciContext:
def is_dev(self):
return re.search(r'\bdev\b', self.bucket)
+ @property
+ def is_shadow(self):
+ return re.search(r'\bshadow\b', self.bucket)
+
+ @property
+ def is_prod(self):
+ return not self.is_dev and not self.is_shadow
+
@staticmethod
def create_from_environment(
env: Optional[Dict[str, str]] = None,
@@ -285,6 +297,7 @@ class LuciContext:
'BUILDBUCKET_ID',
'BUILDBUCKET_NAME',
'BUILD_NUMBER',
+ 'LUCI_CONTEXT',
'SWARMING_TASK_ID',
'SWARMING_SERVER',
]
@@ -319,6 +332,7 @@ class LuciContext:
cas_instance=cas_instance,
pipeline=pipeline,
triggers=LuciTrigger.create_from_environment(env),
+ context_file=Path(env['LUCI_CONTEXT']),
)
_LOG.debug('%r', result)
return result
@@ -329,6 +343,7 @@ class LuciContext:
'BUILDBUCKET_ID': '881234567890',
'BUILDBUCKET_NAME': 'pigweed:bucket.try:builder-name',
'BUILD_NUMBER': '123',
+ 'LUCI_CONTEXT': '/path/to/context/file.json',
'SWARMING_SERVER': 'https://chromium-swarm.appspot.com',
'SWARMING_TASK_ID': 'cd2dac62d2',
}
@@ -431,6 +446,7 @@ class PresubmitContext: # pylint: disable=too-many-instance-attributes
full: bool = False
_failed: bool = False
dry_run: bool = False
+ pw_root: Path = pw_cli.env.pigweed_environment().PW_ROOT
@property
def failed(self) -> bool:
diff --git a/pw_presubmit/py/pw_presubmit/todo_check.py b/pw_presubmit/py/pw_presubmit/todo_check.py
index 53a587edd..a973d0ca8 100644
--- a/pw_presubmit/py/pw_presubmit/todo_check.py
+++ b/pw_presubmit/py/pw_presubmit/todo_check.py
@@ -16,7 +16,7 @@
import logging
from pathlib import Path
import re
-from typing import Iterable, Pattern, Sequence, Union
+from typing import Dict, Iterable, List, Pattern, Sequence, Union
from pw_presubmit import presubmit_context
from pw_presubmit.presubmit import filter_paths
@@ -41,9 +41,15 @@ EXCLUDE: Sequence[str] = (
)
# todo-check: disable
+# pylint: disable=line-too-long
BUGS_ONLY = re.compile(
r'(?:\bTODO\(b/\d+(?:, ?b/\d+)*\).*\w)|'
- r'(?:\bTODO: b/\d+(?:, ?b/\d+)* - )'
+ r'(?:\bTODO: b/\d+(?:, ?b/\d+)* - )|'
+ r'(?:\bTODO: https://issues.pigweed.dev/issues/\d+ - )|'
+ r'(?:\bTODO: https://pwbug.dev/\d+ - )|'
+ r'(?:\bTODO: pwbug.dev/\d+ - )|'
+ r'(?:\bTODO: <pwbug.dev/\d+> - )|'
+ r'(?:\bTODO: https://github\.com/bazelbuild/[a-z][-_a-z0-9]*/issues/\d+[ ]-[ ])'
)
BUGS_OR_USERNAMES = re.compile(
r"""
@@ -58,6 +64,10 @@ BUGS_OR_USERNAMES = re.compile(
\bTODO:[ ]
(?:
b/\d+| # Bug.
+ https://pwbug.dev/\d+| # Short URL
+ pwbug.dev/\d+| # Even shorter URL
+ <pwbug.dev/\d+>| # Markdown compatible even shorter URL
+ https://issues.pigweed.dev/issues/\d+| # Fully qualified bug for rustdoc
# Username@ with optional domain.
[a-z]+@(?:[a-z][-a-z0-9]*(?:\.[a-z][-a-z0-9]*)+)?
)
@@ -69,11 +79,27 @@ BUGS_OR_USERNAMES = re.compile(
)
)*
[ ]-[ ].*\w # Explanation.
+)|
+(?: # Fuchsia style.
+ \bTODO\(
+ (?:fxbug\.dev/\d+|[a-z]+) # Username or bug.
+ (?:,[ ]?(?:fxbug\.dev/\d+|[a-z]+))* # Additional usernames or bugs.
+ \)
+.*\w # Explanation.
+)|
+(?: # Bazel GitHub issues. No usernames allowed.
+ \bTODO:[ ]
+ (?:
+ https://github\.com/bazelbuild/[a-z][-_a-z0-9]*/issues/\d+
+ )
+[ ]-[ ].*\w # Explanation.
)
""",
re.VERBOSE,
)
-_TODO = re.compile(r'\bTODO\b')
+# pylint: enable=line-too-long
+
+_TODO_OR_FIXME = re.compile(r'(\bTODO\b)|(\bFIXME\b)')
# todo-check: enable
# If seen, ignore this line and the next.
@@ -91,7 +117,7 @@ def _process_file(ctx: PresubmitContext, todo_pattern: re.Pattern, path: Path):
prev = ''
try:
- summary = []
+ summary: List[str] = []
for i, line in enumerate(ins, 1):
if _DISABLE in line:
enabled = False
@@ -102,25 +128,24 @@ def _process_file(ctx: PresubmitContext, todo_pattern: re.Pattern, path: Path):
prev = line
continue
- if _TODO.search(line):
+ if _TODO_OR_FIXME.search(line):
if not todo_pattern.search(line):
# todo-check: ignore
ctx.fail(f'Bad TODO on line {i}:', path)
ctx.fail(f' {line.strip()}')
- summary.append(
- f'{path.relative_to(ctx.root)}:{i}:{line.strip()}'
- )
+ ctx.fail('Prefer this format in new code:')
+ # todo-check: ignore
+ ctx.fail(' TODO: b/XXXXX - info here')
+ summary.append(f'{i}:{line.strip()}')
prev = line
- if summary:
- with ctx.failure_summary_log.open('w') as outs:
- for line in summary:
- print(line, file=outs)
+ return summary
except UnicodeDecodeError:
# File is not text, like a gif.
_LOG.debug('File %s is not a text file', path)
+ return []
def create(
@@ -133,7 +158,16 @@ def create(
def todo_check(ctx: PresubmitContext):
"""Check that TODO lines are valid.""" # todo-check: ignore
ctx.paths = presubmit_context.apply_exclusions(ctx)
+ summary: Dict[Path, List[str]] = {}
for path in ctx.paths:
- _process_file(ctx, todo_pattern, path)
+ if file_summary := _process_file(ctx, todo_pattern, path):
+ summary[path] = file_summary
+
+ if summary:
+ with ctx.failure_summary_log.open('w') as outs:
+ for path, lines in summary.items():
+ print('====', path.relative_to(ctx.root), file=outs)
+ for line in lines:
+ print(line, file=outs)
return todo_check
diff --git a/pw_presubmit/py/todo_check_test.py b/pw_presubmit/py/todo_check_test.py
index 803b58229..87123e7c9 100644
--- a/pw_presubmit/py/todo_check_test.py
+++ b/pw_presubmit/py/todo_check_test.py
@@ -50,7 +50,7 @@ class TestTodoCheck(unittest.TestCase):
self._run(todo_check.BUGS_ONLY, contents)
def test_one_bug_legacy(self) -> None:
- contents = 'TODO: b/123 - foo\n'
+ contents = 'TODO(b/123): foo\n'
self._run_bugs_users(contents)
self.ctx.fail.assert_not_called()
self._run_bugs(contents)
@@ -63,6 +63,33 @@ class TestTodoCheck(unittest.TestCase):
self._run_bugs(contents)
self.ctx.fail.assert_not_called()
+ def test_one_bug_short_url(self) -> None:
+ contents = 'TODO: https://pwbug.dev/123 - foo\n'
+ self._run_bugs_users(contents)
+ self.ctx.fail.assert_not_called()
+ self._run_bugs(contents)
+ self.ctx.fail.assert_not_called()
+
+ def test_one_bug_shorter_url(self) -> None:
+ contents = 'TODO: pwbug.dev/123 - foo\n'
+ self._run_bugs_users(contents)
+ self.ctx.fail.assert_not_called()
+ self._run_bugs(contents)
+
+ def test_one_bug_shorter_markdown_url(self) -> None:
+ contents = 'TODO: <pwbug.dev/123> - foo\n'
+ self._run_bugs_users(contents)
+ self.ctx.fail.assert_not_called()
+ self._run_bugs(contents)
+ self.ctx.fail.assert_not_called()
+
+ def test_one_bug_full_url(self) -> None:
+ contents = 'TODO: https://issues.pigweed.dev/issues/123 - foo\n'
+ self._run_bugs_users(contents)
+ self.ctx.fail.assert_not_called()
+ self._run_bugs(contents)
+ self.ctx.fail.assert_not_called()
+
def test_two_bugs_legacy(self) -> None:
contents = 'TODO(b/123, b/456): foo\n'
self._run_bugs_users(contents)
@@ -271,6 +298,34 @@ class TestTodoCheck(unittest.TestCase):
self._run_bugs_users('TODO: foo\n')
self.ctx.fail.assert_called()
+ def test_fuchsia(self) -> None:
+ self._run_bugs_users('TODO(fxbug.dev/123): foo\n')
+ self.ctx.fail.assert_not_called()
+
+ def test_fuchsia_two_bugs(self) -> None:
+ self._run_bugs_users('TODO(fxbug.dev/123,fxbug.dev/321): bar\n')
+ self.ctx.fail.assert_not_called()
+
+ def test_bazel_gh_issue(self) -> None:
+ contents = (
+ 'TODO: https://github.com/bazelbuild/bazel/issues/12345 - '
+ 'Bazel sometimes works\n'
+ )
+ self._run_bugs_users(contents)
+ self.ctx.fail.assert_not_called()
+ self._run_bugs(contents)
+ self.ctx.fail.assert_not_called()
+
+ def test_bazel_gh_issue_underscore(self) -> None:
+ contents = (
+ 'TODO: https://github.com/bazelbuild/rules_cc/issues/678910 - '
+ 'Sometimes it does not work\n'
+ )
+ self._run_bugs_users(contents)
+ self.ctx.fail.assert_not_called()
+ self._run_bugs(contents)
+ self.ctx.fail.assert_not_called()
+
if __name__ == '__main__':
unittest.main()