aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-04-28 15:58:02 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-04-28 15:58:02 +0000
commita0415ca6ea992b76fb76165f04d1b46b88e89c23 (patch)
tree50f293047de631ceb9bca95c30b1a8981a060c62
parenta667f0008913920f2975dfcd91bc4b9962e9bf5d (diff)
parent40fbb41c00454895867d7ae5d3ba20e3111073ba (diff)
downloadpigweed-android13-frc-media-swcodec-release.tar.gz
Snap for 8512216 from 40fbb41c00454895867d7ae5d3ba20e3111073ba to tm-frc-media-swcodec-releaset_frc_swc_330443040t_frc_swc_330443010android13-frc-media-swcodec-release
Change-Id: I8067b29d978dad1cc830c177d88d111dce1592f2
-rw-r--r--.clang-tidy28
-rw-r--r--.gitignore6
-rw-r--r--BUILD.gn187
-rw-r--r--CMakeLists.txt13
-rw-r--r--PIGWEED_MODULES120
-rw-r--r--WORKSPACE42
-rw-r--r--build_overrides/pi_pico.gni23
-rw-r--r--build_overrides/pigweed.gni1
-rw-r--r--docs/BUILD.gn127
-rw-r--r--docs/automated_analysis.rst66
-rw-r--r--docs/build_system.rst13
-rw-r--r--docs/concepts/index.rst111
-rw-r--r--docs/conf.py4
-rw-r--r--docs/contributing.rst16
-rw-r--r--docs/faq.rst1
-rw-r--r--docs/getting_started.rst2
-rw-r--r--docs/index.rst3
-rw-r--r--docs/module_structure.rst36
-rw-r--r--docs/release_notes/2022_jan.rst192
-rw-r--r--docs/release_notes/index.rst10
-rw-r--r--modules.gni134
-rw-r--r--pw_arduino_build/arduino.gni4
-rw-r--r--pw_assert/BUILD.gn10
-rw-r--r--pw_assert/CMakeLists.txt2
-rw-r--r--pw_assert/backend.gni26
-rw-r--r--pw_assert_log/BUILD.bazel15
-rw-r--r--pw_assert_log/BUILD.gn41
-rw-r--r--pw_assert_log/assert_lite_public_overrides/pw_assert_backend/assert_lite_backend.h20
-rw-r--r--pw_assert_log/docs.rst25
-rw-r--r--pw_assert_log/public/pw_assert_log/assert_lite_log.h26
-rw-r--r--pw_assert_tokenized/docs.rst2
-rw-r--r--pw_assert_tokenized/public/pw_assert_tokenized/assert_tokenized.h2
-rw-r--r--pw_assert_tokenized/public/pw_assert_tokenized/check_tokenized.h2
-rw-r--r--pw_bloat/bloat.cmake64
-rw-r--r--pw_bloat/bloat.gni2
-rw-r--r--pw_bloat/bloat_this_binary.cc2
-rw-r--r--pw_bloat/docs.rst210
-rw-r--r--pw_bloat/py/BUILD.gn2
-rw-r--r--pw_bloat/py/bloaty_config_test.py114
-rwxr-xr-xpw_bloat/py/pw_bloat/bloat.py4
-rw-r--r--pw_bloat/py/pw_bloat/bloaty_config.py339
-rw-r--r--pw_bloat/py/setup.cfg4
-rw-r--r--pw_blob_store/blob_store.cc15
-rw-r--r--pw_blob_store/blob_store_test.cc40
-rw-r--r--pw_blob_store/docs.rst8
-rw-r--r--pw_blob_store/flat_file_system_entry_test.cc2
-rw-r--r--pw_blob_store/public/pw_blob_store/blob_store.h14
-rw-r--r--pw_boot_cortex_m/basic_cortex_m.ld32
-rw-r--r--pw_build/BUILD.gn8
-rw-r--r--pw_build/CMakeLists.txt3
-rw-r--r--pw_build/bazel_internal/BUILD.bazel39
-rw-r--r--pw_build/bazel_internal/BUILD.gn21
-rw-r--r--pw_build/bazel_internal/linker_script.ld51
-rw-r--r--pw_build/bazel_internal/pigweed_internal.bzl68
-rw-r--r--pw_build/bazel_internal/test.cc17
-rw-r--r--pw_build/docs.rst52
-rw-r--r--pw_build/error.gni43
-rw-r--r--pw_build/exec.gni6
-rw-r--r--pw_build/generated_pigweed_modules_lists.gni471
-rw-r--r--pw_build/hil.gni3
-rw-r--r--pw_build/pigweed.bzl2
-rw-r--r--pw_build/pigweed.cmake93
-rw-r--r--pw_build/py/BUILD.gn1
-rw-r--r--pw_build/py/pw_build/create_python_tree.py59
-rw-r--r--pw_build/py/pw_build/generate_modules_lists.py254
-rw-r--r--pw_build/py/pw_build/python_package.py75
-rwxr-xr-xpw_build/py/pw_build/python_runner.py72
-rw-r--r--pw_build/python_action.gni5
-rw-r--r--pw_build/python_dist.gni15
-rw-r--r--pw_bytes/docs.rst15
-rw-r--r--pw_bytes/public/pw_bytes/units.h40
-rw-r--r--pw_bytes/units_test.cc23
-rw-r--r--pw_cli/docs.rst10
-rw-r--r--pw_cli/py/pw_cli/arguments.py2
-rw-r--r--pw_cli/py/pw_cli/branding.py11
-rw-r--r--pw_cli/py/pw_cli/log.py5
-rw-r--r--pw_console/BUILD.gn7
-rw-r--r--pw_console/docs.rst8
-rw-r--r--pw_console/embedding.rst10
-rw-r--r--pw_console/images/command_runner_main_menu.svg1984
-rw-r--r--pw_console/images/pw_system_boot.pngbin0 -> 228251 bytes
-rw-r--r--pw_console/images/python_completion.pngbin0 -> 302553 bytes
-rw-r--r--pw_console/py/BUILD.gn3
-rw-r--r--pw_console/py/command_runner_test.py258
-rw-r--r--pw_console/py/console_app_test.py13
-rw-r--r--pw_console/py/console_prefs_test.py55
-rw-r--r--pw_console/py/help_window_test.py10
-rw-r--r--pw_console/py/log_store_test.py4
-rw-r--r--pw_console/py/log_view_test.py167
-rw-r--r--pw_console/py/pw_console/__init__.py1
-rw-r--r--pw_console/py/pw_console/__main__.py10
-rw-r--r--pw_console/py/pw_console/command_runner.py515
-rw-r--r--pw_console/py/pw_console/console_app.py289
-rw-r--r--pw_console/py/pw_console/console_prefs.py199
-rw-r--r--pw_console/py/pw_console/docs/user_guide.rst554
-rw-r--r--pw_console/py/pw_console/embed.py85
-rw-r--r--pw_console/py/pw_console/help_window.py70
-rw-r--r--pw_console/py/pw_console/key_bindings.py118
-rw-r--r--pw_console/py/pw_console/log_pane.py205
-rw-r--r--pw_console/py/pw_console/log_pane_saveas_dialog.py5
-rw-r--r--pw_console/py/pw_console/log_pane_selection_dialog.py4
-rw-r--r--pw_console/py/pw_console/log_pane_toolbars.py2
-rw-r--r--pw_console/py/pw_console/log_screen.py55
-rw-r--r--pw_console/py/pw_console/log_store.py73
-rw-r--r--pw_console/py/pw_console/log_view.py199
-rw-r--r--pw_console/py/pw_console/plugins/calc_pane.py49
-rw-r--r--pw_console/py/pw_console/python_logging.py27
-rw-r--r--pw_console/py/pw_console/quit_dialog.py26
-rw-r--r--pw_console/py/pw_console/repl_pane.py49
-rw-r--r--pw_console/py/pw_console/search_toolbar.py412
-rw-r--r--pw_console/py/pw_console/style.py76
-rw-r--r--pw_console/py/pw_console/text_formatting.py13
-rw-r--r--pw_console/py/pw_console/widgets/border.py20
-rw-r--r--pw_console/py/pw_console/widgets/mouse_handlers.py20
-rw-r--r--pw_console/py/pw_console/widgets/table.py30
-rw-r--r--pw_console/py/pw_console/widgets/window_pane.py12
-rw-r--r--pw_console/py/pw_console/widgets/window_pane_toolbar.py18
-rw-r--r--pw_console/py/pw_console/window_list.py74
-rw-r--r--pw_console/py/pw_console/window_manager.py499
-rw-r--r--pw_console/py/pw_console/yaml_config_loader_mixin.py154
-rw-r--r--pw_console/py/repl_pane_test.py7
-rw-r--r--pw_console/py/table_test.py4
-rw-r--r--pw_console/py/text_formatting_test.py29
-rw-r--r--pw_console/py/window_manager_test.py317
-rw-r--r--pw_console/testing.rst215
-rw-r--r--pw_containers/CMakeLists.txt119
-rw-r--r--pw_cpu_exception_cortex_m/BUILD.bazel13
-rw-r--r--pw_cpu_exception_cortex_m/BUILD.gn12
-rw-r--r--pw_cpu_exception_cortex_m/CMakeLists.txt13
-rw-r--r--pw_cpu_exception_cortex_m/entry.cc16
-rw-r--r--pw_cpu_exception_cortex_m/public/pw_cpu_exception_cortex_m/util.h14
-rw-r--r--pw_cpu_exception_cortex_m/snapshot.cc30
-rw-r--r--pw_cpu_exception_cortex_m/util.cc31
-rw-r--r--pw_cpu_exception_cortex_m/util_test.cc114
-rw-r--r--pw_crypto/ecdsa_boringssl.cc2
-rw-r--r--pw_crypto/ecdsa_mbedtls.cc2
-rw-r--r--pw_crypto/ecdsa_uecc.cc2
-rw-r--r--pw_crypto/public/pw_crypto/sha256.h3
-rw-r--r--pw_crypto/sha256_boringssl.cc2
-rw-r--r--pw_crypto/sha256_mbedtls.cc2
-rw-r--r--pw_docgen/docs.gni13
-rw-r--r--pw_docgen/docs.rst36
-rw-r--r--pw_docgen/py/BUILD.gn2
-rw-r--r--pw_docgen/py/pw_docgen/docgen.py23
-rw-r--r--pw_docgen/py/pw_docgen/sphinx/__init__.py0
-rw-r--r--pw_docgen/py/pw_docgen/sphinx/google_analytics.py39
-rw-r--r--pw_docgen/py/setup.cfg5
-rw-r--r--pw_doctor/docs.rst13
-rwxr-xr-xpw_doctor/py/pw_doctor/doctor.py71
-rw-r--r--pw_env_setup/bazel/cipd_setup/internal/cipd_internal.bzl2
-rw-r--r--pw_env_setup/config.json3
-rw-r--r--pw_env_setup/docs.rst62
-rwxr-xr-xpw_env_setup/get_pw_env_setup.sh5
-rwxr-xr-xpw_env_setup/post-checkout-hook-helper.sh38
-rwxr-xr-xpw_env_setup/post-checkout-hook.sh25
-rw-r--r--pw_env_setup/py/BUILD.gn1
-rw-r--r--pw_env_setup/py/pw_env_setup/cipd_setup/arm.json3
-rw-r--r--pw_env_setup/py/pw_env_setup/cipd_setup/bazel.json3
-rw-r--r--pw_env_setup/py/pw_env_setup/cipd_setup/luci.json21
-rw-r--r--pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json40
-rw-r--r--pw_env_setup/py/pw_env_setup/cipd_setup/python.json6
-rwxr-xr-xpw_env_setup/py/pw_env_setup/cipd_setup/update.py230
-rwxr-xr-xpw_env_setup/py/pw_env_setup/cipd_setup/wrapper.py18
-rwxr-xr-xpw_env_setup/py/pw_env_setup/env_setup.py70
-rw-r--r--pw_env_setup/py/pw_env_setup/environment.py4
-rw-r--r--pw_env_setup/py/pw_env_setup/gni_visitor.py100
-rw-r--r--pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint.list11
-rw-r--r--pw_env_setup/pypi_common_setup.cfg2
-rw-r--r--pw_env_setup/util.sh3
-rw-r--r--pw_file/docs.rst2
-rw-r--r--pw_file/flat_file_system.cc41
-rw-r--r--pw_file/public/pw_file/flat_file_system.h2
-rw-r--r--pw_function/function_test.cc7
-rw-r--r--pw_function/public/pw_function/function.h6
-rw-r--r--pw_fuzzer/fuzzer.bzl2
-rw-r--r--pw_hdlc/py/pw_hdlc/rpc.py94
-rw-r--r--pw_hdlc/py/pw_hdlc/rpc_console.py6
-rw-r--r--pw_hdlc/rpc_example/docs.rst2
-rw-r--r--pw_hdlc/rpc_example/hdlc_rpc_server.cc3
-rw-r--r--pw_i2c_mcuxpresso/BUILD.bazel36
-rw-r--r--pw_i2c_mcuxpresso/BUILD.gn44
-rw-r--r--pw_i2c_mcuxpresso/OWNERS2
-rw-r--r--pw_i2c_mcuxpresso/docs.rst26
-rw-r--r--pw_i2c_mcuxpresso/initiator.cc156
-rw-r--r--pw_i2c_mcuxpresso/public/pw_i2c_mcuxpresso/initiator.h68
-rw-r--r--pw_interrupt/BUILD.gn6
-rw-r--r--pw_interrupt/backend.gni18
-rw-r--r--pw_kvs/BUILD.bazel14
-rw-r--r--pw_kvs/BUILD.gn88
-rw-r--r--pw_kvs/CMakeLists.txt2
-rw-r--r--pw_kvs/docs.rst2
-rw-r--r--pw_kvs/flash_memory.cc38
-rw-r--r--pw_kvs/flash_partition_stream_test.cc396
-rw-r--r--pw_kvs/key_value_store_initialized_test.cc28
-rw-r--r--pw_kvs/public/pw_kvs/flash_memory.h52
-rw-r--r--pw_kvs/public/pw_kvs/key_value_store.h9
-rw-r--r--pw_log/Android.bp (renamed from pw_transfer/chunk_data_buffer.cc)32
-rw-r--r--pw_log/AndroidManifest.xml23
-rw-r--r--pw_log/BUILD.bazel33
-rw-r--r--pw_log/BUILD.gn35
-rw-r--r--pw_log/CMakeLists.txt34
-rw-r--r--pw_log/docs.rst49
-rw-r--r--pw_log/glog_adapter_test.cc102
-rw-r--r--pw_log/java/android_main/dev/pigweed/pw_log/Logger.java103
-rw-r--r--pw_log/java/main/dev/pigweed/pw_log/BUILD.bazel25
-rw-r--r--pw_log/java/main/dev/pigweed/pw_log/Logger.java71
-rw-r--r--pw_log/log.proto13
-rw-r--r--pw_log/proto_utils.cc17
-rw-r--r--pw_log/proto_utils_test.cc169
-rw-r--r--pw_log/protobuf.rst10
-rw-r--r--pw_log/public/pw_log/glog_adapter.h43
-rw-r--r--pw_log/public/pw_log/glog_adapter_config.h27
-rw-r--r--pw_log/public/pw_log/internal/glog_adapter.h91
-rw-r--r--pw_log/public/pw_log/proto_utils.h73
-rw-r--r--pw_log_android/Android.bp29
-rw-r--r--pw_log_android/BUILD.gn15
-rw-r--r--pw_log_android/public/pw_log_android/log_android.h43
-rw-r--r--pw_log_android/public_overrides/pw_log_backend/log_backend.h16
-rw-r--r--pw_log_rpc/BUILD.bazel37
-rw-r--r--pw_log_rpc/BUILD.gn2
-rw-r--r--pw_log_rpc/OWNERS1
-rw-r--r--pw_log_rpc/docs.rst62
-rw-r--r--pw_log_rpc/log_filter_service.cc27
-rw-r--r--pw_log_rpc/log_filter_service_test.cc12
-rw-r--r--pw_log_rpc/log_filter_test.cc1
-rw-r--r--pw_log_rpc/log_service_test.cc277
-rw-r--r--pw_log_rpc/public/pw_log_rpc/internal/config.h31
-rw-r--r--pw_log_rpc/public/pw_log_rpc/log_filter_service.h41
-rw-r--r--pw_log_rpc/public/pw_log_rpc/rpc_log_drain.h57
-rw-r--r--pw_log_rpc/public/pw_log_rpc/rpc_log_drain_thread.h10
-rw-r--r--pw_log_rpc/pw_log_rpc_private/test_utils.h13
-rw-r--r--pw_log_rpc/rpc_log_drain.cc145
-rw-r--r--pw_log_rpc/rpc_log_drain_test.cc157
-rw-r--r--pw_log_rpc/test_utils.cc73
-rw-r--r--pw_log_string/BUILD.bazel18
-rw-r--r--pw_log_string/BUILD.gn62
-rw-r--r--pw_log_string/CMakeLists.txt18
-rw-r--r--pw_log_string/OWNERS1
-rw-r--r--pw_log_string/backend.gni7
-rw-r--r--pw_log_string/docs.rst26
-rw-r--r--pw_log_string/handler.cc35
-rw-r--r--pw_log_string/public/pw_log_string/handler.h42
-rw-r--r--pw_log_string/public/pw_log_string/log_string.h22
-rw-r--r--pw_log_zephyr/BUILD.gn15
-rw-r--r--pw_metric/metric_service_nanopb.cc3
-rw-r--r--pw_minimal_cpp_stdlib/BUILD.gn12
-rw-r--r--pw_multisink/BUILD.bazel2
-rw-r--r--pw_multisink/CMakeLists.txt107
-rw-r--r--pw_multisink/docs.rst7
-rw-r--r--pw_multisink/multisink.cc49
-rw-r--r--pw_multisink/multisink_test.cc280
-rw-r--r--pw_multisink/multisink_threaded_test.cc12
-rw-r--r--pw_multisink/public/pw_multisink/multisink.h40
-rw-r--r--pw_package/docs.rst7
-rw-r--r--pw_package/py/BUILD.gn3
-rw-r--r--pw_package/py/pw_package/packages/googletest.py40
-rw-r--r--pw_package/py/pw_package/packages/pico_sdk.py40
-rw-r--r--pw_package/py/pw_package/packages/smartfusion_mss.py40
-rw-r--r--pw_package/py/pw_package/pigweed_packages.py3
-rw-r--r--pw_preprocessor/docs.rst25
-rw-r--r--pw_preprocessor/public/pw_preprocessor/compiler.h36
-rw-r--r--pw_presubmit/py/pw_presubmit/build.py2
-rw-r--r--pw_presubmit/py/pw_presubmit/cli.py3
-rwxr-xr-xpw_presubmit/py/pw_presubmit/format_code.py5
-rw-r--r--pw_presubmit/py/pw_presubmit/git_repo.py16
-rwxr-xr-xpw_presubmit/py/pw_presubmit/pigweed_presubmit.py22
-rw-r--r--pw_presubmit/py/pw_presubmit/presubmit.py3
-rw-r--r--pw_protobuf/BUILD.bazel27
-rw-r--r--pw_protobuf/BUILD.gn13
-rw-r--r--pw_protobuf/CMakeLists.txt16
-rw-r--r--pw_protobuf/codegen_decoder_test.cc936
-rw-r--r--pw_protobuf/codegen_encoder_test.cc (renamed from pw_protobuf/codegen_test.cc)99
-rw-r--r--pw_protobuf/docs.rst292
-rw-r--r--pw_protobuf/encoder_test.cc96
-rw-r--r--pw_protobuf/public/pw_protobuf/encoder.h170
-rw-r--r--pw_protobuf/public/pw_protobuf/stream_decoder.h417
-rw-r--r--pw_protobuf/pw_protobuf_test_protos/imported.proto6
-rw-r--r--pw_protobuf/pw_protobuf_test_protos/importer.proto4
-rw-r--r--pw_protobuf/pw_protobuf_test_protos/proto2.proto2
-rw-r--r--pw_protobuf/pw_protobuf_test_protos/repeated.proto2
-rw-r--r--pw_protobuf/py/pw_protobuf/codegen_pwpb.py1023
-rw-r--r--pw_protobuf/stream_decoder.cc207
-rw-r--r--pw_protobuf/stream_decoder_test.cc542
-rw-r--r--pw_protobuf_compiler/BUILD.bazel11
-rw-r--r--pw_protobuf_compiler/docs.rst48
-rw-r--r--pw_protobuf_compiler/proto.bzl94
-rw-r--r--pw_protobuf_compiler/proto.gni8
-rw-r--r--pw_protobuf_compiler/pw_nanopb_cc_library.bzl37
-rw-r--r--pw_protobuf_compiler/pw_proto_library.bzl289
-rw-r--r--pw_protobuf_compiler/ts/BUILD.bazel2
-rw-r--r--pw_protobuf_compiler/ts/codegen/BUILD.bazel2
-rw-r--r--pw_protobuf_compiler/ts/ts_proto_collection.bzl2
-rw-r--r--pw_result/BUILD.bazel15
-rw-r--r--pw_result/BUILD.gn6
-rw-r--r--pw_result/docs.rst77
-rw-r--r--pw_result/public/pw_result/internal/result_internal.h379
-rw-r--r--pw_result/public/pw_result/result.h728
-rw-r--r--pw_result/result_test.cc72
-rw-r--r--pw_result/statusor_test.cc1735
-rw-r--r--pw_ring_buffer/CMakeLists.txt22
-rw-r--r--pw_rpc/Android.bp2
-rw-r--r--pw_rpc/BUILD.bazel111
-rw-r--r--pw_rpc/BUILD.gn11
-rw-r--r--pw_rpc/benchmark.cc9
-rw-r--r--pw_rpc/channel.cc31
-rw-r--r--pw_rpc/channel_test.cc24
-rw-r--r--pw_rpc/client.cc4
-rw-r--r--pw_rpc/client_call.cc2
-rw-r--r--pw_rpc/client_integration_test.cc2
-rw-r--r--pw_rpc/client_server_test.cc6
-rw-r--r--pw_rpc/docs.rst10
-rw-r--r--pw_rpc/fake_channel_output.cc7
-rw-r--r--pw_rpc/internal/packet.proto2
-rw-r--r--pw_rpc/java/main/dev/pigweed/pw_rpc/BUILD.bazel2
-rw-r--r--pw_rpc/java/main/dev/pigweed/pw_rpc/Client.java59
-rw-r--r--pw_rpc/java/main/dev/pigweed/pw_rpc/Packets.java4
-rw-r--r--pw_rpc/java/main/dev/pigweed/pw_rpc/PendingRpc.java3
-rw-r--r--pw_rpc/java/main/dev/pigweed/pw_rpc/RpcManager.java23
-rw-r--r--pw_rpc/java/main/dev/pigweed/pw_rpc/Status.java3
-rw-r--r--pw_rpc/java/main/dev/pigweed/pw_rpc/StreamObserverCall.java9
-rw-r--r--pw_rpc/java/test/dev/pigweed/pw_rpc/ClientTest.java4
-rw-r--r--pw_rpc/java/test/dev/pigweed/pw_rpc/PacketsTest.java4
-rw-r--r--pw_rpc/java/test/dev/pigweed/pw_rpc/RpcManagerTest.java4
-rw-r--r--pw_rpc/java/test/dev/pigweed/pw_rpc/StreamObserverCallTest.java4
-rw-r--r--pw_rpc/java/test/dev/pigweed/pw_rpc/TestClient.java4
-rw-r--r--pw_rpc/nanopb/BUILD.bazel262
-rw-r--r--pw_rpc/nanopb/client_call_test.cc42
-rw-r--r--pw_rpc/nanopb/codegen_test.cc30
-rw-r--r--pw_rpc/nanopb/method_lookup_test.cc11
-rw-r--r--pw_rpc/nanopb/method_test.cc7
-rw-r--r--pw_rpc/nanopb/method_union_test.cc4
-rw-r--r--pw_rpc/nanopb/public/pw_rpc/nanopb/fake_channel_output.h12
-rw-r--r--pw_rpc/nanopb/server_callback_test.cc2
-rw-r--r--pw_rpc/nanopb/server_reader_writer_test.cc13
-rw-r--r--pw_rpc/packet.cc20
-rw-r--r--pw_rpc/public/pw_rpc/benchmark.h2
-rw-r--r--pw_rpc/public/pw_rpc/channel.h6
-rw-r--r--pw_rpc/public/pw_rpc/client.h1
-rw-r--r--pw_rpc/public/pw_rpc/internal/fake_channel_output.h126
-rw-r--r--pw_rpc/public/pw_rpc/internal/lock.h4
-rw-r--r--pw_rpc/public/pw_rpc/internal/method_impl_tester.h25
-rw-r--r--pw_rpc/public/pw_rpc/internal/test_method_context.h6
-rw-r--r--pw_rpc/public/pw_rpc/payloads_view.h6
-rw-r--r--pw_rpc/public/pw_rpc/server.h32
-rw-r--r--pw_rpc/public/pw_rpc/thread_testing.h42
-rw-r--r--pw_rpc/py/BUILD.bazel2
-rw-r--r--pw_rpc/py/docs.rst7
-rw-r--r--pw_rpc/py/pw_rpc/__init__.py2
-rw-r--r--pw_rpc/py/pw_rpc/client.py15
-rw-r--r--pw_rpc/py/pw_rpc/codegen_raw.py7
-rw-r--r--pw_rpc/py/pw_rpc/descriptors.py52
-rwxr-xr-xpw_rpc/py/tests/client_test.py20
-rw-r--r--pw_rpc/raw/BUILD.bazel31
-rw-r--r--pw_rpc/raw/client_testing.cc2
-rw-r--r--pw_rpc/raw/codegen_test.cc22
-rw-r--r--pw_rpc/raw/method_test.cc34
-rw-r--r--pw_rpc/raw/method_union_test.cc15
-rw-r--r--pw_rpc/raw/public/pw_rpc/raw/internal/method.h12
-rw-r--r--pw_rpc/raw/server_reader_writer_test.cc14
-rw-r--r--pw_rpc/server.cc3
-rw-r--r--pw_rpc/server_test.cc62
-rw-r--r--pw_rpc/ts/BUILD.bazel15
-rw-r--r--pw_snapshot/docs.rst4
-rw-r--r--pw_snapshot/module_usage.rst28
-rw-r--r--pw_software_update/BUILD.bazel2
-rw-r--r--pw_software_update/BUILD.gn2
-rw-r--r--pw_software_update/bundled_update_service.cc386
-rw-r--r--pw_software_update/manifest_accessor.cc120
-rw-r--r--pw_software_update/public/pw_software_update/bundled_update_backend.h105
-rw-r--r--pw_software_update/public/pw_software_update/bundled_update_service.h31
-rw-r--r--pw_software_update/public/pw_software_update/config.h19
-rw-r--r--pw_software_update/public/pw_software_update/manifest_accessor.h60
-rw-r--r--pw_software_update/public/pw_software_update/update_bundle_accessor.h189
-rw-r--r--pw_software_update/py/pw_software_update/generate_test_bundle.py15
-rw-r--r--pw_software_update/update_bundle_accessor.cc748
-rw-r--r--pw_software_update/update_bundle_test.cc197
-rw-r--r--pw_span/CMakeLists.txt2
-rw-r--r--pw_spi/public/pw_spi/device.h3
-rw-r--r--pw_status/docs.rst8
-rw-r--r--pw_stm32cube_build/py/pw_stm32cube_build/find_files.py1
-rw-r--r--pw_stream/public/pw_stream/stream.h2
-rw-r--r--pw_string/string_builder.cc4
-rw-r--r--pw_sync_embos/BUILD.gn36
-rw-r--r--pw_sync_freertos/BUILD.gn90
-rw-r--r--pw_sync_freertos/CMakeLists.txt177
-rw-r--r--pw_sync_freertos/thread_notification.cc4
-rw-r--r--pw_sync_freertos/timed_thread_notification.cc4
-rw-r--r--pw_sync_stl/BUILD.gn28
-rw-r--r--pw_sync_threadx/BUILD.gn29
-rw-r--r--pw_sys_io_emcraft_sf2/BUILD.bazel39
-rw-r--r--pw_sys_io_emcraft_sf2/BUILD.gn60
-rw-r--r--pw_sys_io_emcraft_sf2/docs.rst44
-rw-r--r--pw_sys_io_emcraft_sf2/public/pw_sys_io_emcraft_sf2/init.h23
-rw-r--r--pw_sys_io_emcraft_sf2/pw_sys_io_emcraft_sf2_private/config.h22
-rw-r--r--pw_sys_io_emcraft_sf2/sys_io_emcraft_sf2.cc104
-rw-r--r--pw_sys_io_stm32cube/BUILD.gn6
-rw-r--r--pw_sys_io_zephyr/Kconfig17
-rw-r--r--pw_sys_io_zephyr/docs.rst10
-rw-r--r--pw_sys_io_zephyr/sys_io.cc53
-rw-r--r--pw_system/BUILD.bazel57
-rw-r--r--pw_system/BUILD.gn72
-rw-r--r--pw_system/CMakeLists.txt164
-rw-r--r--pw_system/backend.gni12
-rw-r--r--pw_system/docs.rst95
-rw-r--r--pw_system/hdlc_rpc_server.cc (renamed from pw_system/rpc.cc)5
-rw-r--r--pw_system/init.cc4
-rw-r--r--pw_system/log.cc82
-rw-r--r--pw_system/log_backend.cc106
-rw-r--r--pw_system/public/pw_system/rpc_server.h (renamed from pw_system/pw_system_private/rpc.h)6
-rw-r--r--pw_system/pw_system_private/log.h4
-rw-r--r--pw_system/py/pw_system/console.py18
-rw-r--r--pw_system/py/pw_system/device.py15
-rw-r--r--pw_system/system_target.gni82
-rw-r--r--pw_thread/CMakeLists.txt1
-rw-r--r--pw_thread/pw_thread_protos/thread.proto8
-rw-r--r--pw_thread/py/pw_thread/thread_analyzer.py13
-rw-r--r--pw_thread/py/thread_analyzer_test.py51
-rw-r--r--pw_thread_embos/BUILD.gn16
-rw-r--r--pw_thread_freertos/BUILD.gn20
-rw-r--r--pw_thread_stl/BUILD.gn21
-rw-r--r--pw_thread_threadx/BUILD.gn15
-rw-r--r--pw_tokenizer/docs.rst38
-rw-r--r--pw_tokenizer/generate_decoding_test_data.cc6
-rw-r--r--pw_tokenizer/py/BUILD.gn1
-rwxr-xr-xpw_tokenizer/py/decode_test.py56
-rw-r--r--pw_tokenizer/py/pw_tokenizer/decode.py30
-rwxr-xr-xpw_tokenizer/py/pw_tokenizer/detokenize.py33
-rw-r--r--pw_tokenizer/py/pw_tokenizer/parse_message.py182
-rw-r--r--pw_toolchain/docs.rst7
-rw-r--r--pw_toolchain/generate_toolchain.gni6
-rw-r--r--pw_toolchain/host_clang/BUILD.gn5
-rw-r--r--pw_toolchain/py/pw_toolchain/clang_tidy.py4
-rw-r--r--pw_toolchain/static_analysis_toolchain.gni10
-rw-r--r--pw_trace_tokenized/example/basic.cc10
-rw-r--r--pw_trace_tokenized/example/filter.cc8
-rw-r--r--pw_trace_tokenized/example/rpc.cc11
-rw-r--r--pw_trace_tokenized/example/trigger.cc8
-rwxr-xr-xpw_trace_tokenized/py/pw_trace_tokenized/get_trace.py9
-rwxr-xr-xpw_trace_tokenized/py/pw_trace_tokenized/trace_tokenized.py8
-rw-r--r--pw_trace_tokenized/trace_buffer.cc3
-rw-r--r--pw_trace_tokenized/trace_rpc_service_nanopb.cc5
-rw-r--r--pw_transfer/BUILD.bazel94
-rw-r--r--pw_transfer/BUILD.gn115
-rw-r--r--pw_transfer/CMakeLists.txt14
-rw-r--r--pw_transfer/chunk.cc22
-rw-r--r--pw_transfer/client.cc185
-rw-r--r--pw_transfer/client_connection.cc46
-rw-r--r--pw_transfer/client_context.cc40
-rw-r--r--pw_transfer/client_test.cc540
-rw-r--r--pw_transfer/context.cc850
-rw-r--r--pw_transfer/docs.rst25
-rw-r--r--pw_transfer/integration_test.cc38
-rw-r--r--pw_transfer/public/pw_transfer/client.h65
-rw-r--r--pw_transfer/public/pw_transfer/handler.h23
-rw-r--r--pw_transfer/public/pw_transfer/internal/chunk.h11
-rw-r--r--pw_transfer/public/pw_transfer/internal/chunk_data_buffer.h50
-rw-r--r--pw_transfer/public/pw_transfer/internal/client_connection.h75
-rw-r--r--pw_transfer/public/pw_transfer/internal/client_context.h61
-rw-r--r--pw_transfer/public/pw_transfer/internal/config.h48
-rw-r--r--pw_transfer/public/pw_transfer/internal/context.h358
-rw-r--r--pw_transfer/public/pw_transfer/internal/event.h105
-rw-r--r--pw_transfer/public/pw_transfer/internal/server_context.h64
-rw-r--r--pw_transfer/public/pw_transfer/rate_estimate.h42
-rw-r--r--pw_transfer/public/pw_transfer/transfer.h86
-rw-r--r--pw_transfer/public/pw_transfer/transfer_thread.h270
-rw-r--r--pw_transfer/py/pw_transfer/transfer.py59
-rwxr-xr-xpw_transfer/py/tests/python_cpp_transfer_test.py30
-rw-r--r--pw_transfer/py/tests/transfer_test.py20
-rw-r--r--pw_transfer/rate_estimate.cc35
-rw-r--r--pw_transfer/server_context.cc122
-rw-r--r--pw_transfer/test_rpc_server.cc18
-rw-r--r--pw_transfer/transfer.cc72
-rw-r--r--pw_transfer/transfer.proto19
-rw-r--r--pw_transfer/transfer_test.cc602
-rw-r--r--pw_transfer/transfer_thread.cc320
-rw-r--r--pw_transfer/transfer_thread_test.cc218
-rw-r--r--pw_transfer/ts/BUILD.bazel2
-rw-r--r--pw_transfer/ts/transfer.ts56
-rw-r--r--pw_transfer/ts/transfer_test.ts7
-rw-r--r--pw_unit_test/BUILD.bazel27
-rw-r--r--pw_unit_test/docs.rst33
-rw-r--r--pw_unit_test/framework.cc11
-rw-r--r--pw_unit_test/framework_test.cc16
-rw-r--r--pw_unit_test/logging_event_handler.cc7
-rw-r--r--pw_unit_test/public/pw_unit_test/event_handler.h2
-rw-r--r--pw_unit_test/public/pw_unit_test/framework.h31
-rw-r--r--pw_unit_test/simple_printing_event_handler.cc7
-rw-r--r--pw_watch/docs.rst4
-rw-r--r--pw_watch/py/pw_watch/debounce.py2
-rwxr-xr-xpw_watch/py/pw_watch/watch.py324
-rw-r--r--pw_watch/py/pw_watch/watch_app.py403
-rw-r--r--pw_web_ui/BUILD.bazel20
-rw-r--r--targets/default_config.BUILD10
-rw-r--r--targets/emcraft_sf2_som/BUILD.bazel58
-rw-r--r--targets/emcraft_sf2_som/BUILD.gn149
-rw-r--r--targets/emcraft_sf2_som/boot.cc195
-rw-r--r--targets/emcraft_sf2_som/config/FreeRTOSConfig.h81
-rw-r--r--targets/emcraft_sf2_som/config/sf2_mss_hal_conf.h25
-rw-r--r--targets/emcraft_sf2_som/target_docs.rst35
-rw-r--r--targets/emcraft_sf2_som/vector_table.c75
-rw-r--r--targets/host/macos.gni5
-rw-r--r--targets/host/pigweed_internal/BUILD.gn28
-rw-r--r--targets/host/target_docs.rst72
-rw-r--r--targets/host/target_toolchains.gni95
-rw-r--r--targets/host_device_simulator/target_docs.rst4
-rw-r--r--targets/rp2040/BUILD.bazel36
-rw-r--r--targets/rp2040/BUILD.gn79
-rw-r--r--targets/rp2040/pico_executable.gni25
-rw-r--r--targets/rp2040/pico_logging_test_main.cc24
-rw-r--r--targets/rp2040/target_docs.rst63
-rw-r--r--targets/stm32f429i_disc1_stm32cube/BUILD.gn5
-rw-r--r--targets/stm32f429i_disc1_stm32cube/boot.cc18
-rw-r--r--targets/stm32f429i_disc1_stm32cube/target_docs.rst20
-rw-r--r--third_party/freertos/BUILD.gn36
-rw-r--r--third_party/freertos/CMakeLists.txt15
-rw-r--r--third_party/pico_sdk/gn/BUILD.gn27
-rw-r--r--third_party/pico_sdk/gn/generate_config_header.gni66
-rw-r--r--third_party/pico_sdk/pi_pico.gni34
-rw-r--r--third_party/pico_sdk/src/BUILD.gn23
-rw-r--r--third_party/pico_sdk/src/boards/BUILD.gn58
-rw-r--r--third_party/pico_sdk/src/common/BUILD.gn34
-rw-r--r--third_party/pico_sdk/src/common/boot_picoboot/BUILD.gn35
-rw-r--r--third_party/pico_sdk/src/common/boot_uf2/BUILD.gn32
-rw-r--r--third_party/pico_sdk/src/common/pico_base/BUILD.gn66
-rw-r--r--third_party/pico_sdk/src/common/pico_base/generate_version_header.gni80
-rw-r--r--third_party/pico_sdk/src/common/pico_binary_info/BUILD.gn38
-rw-r--r--third_party/pico_sdk/src/common/pico_bit_ops/BUILD.gn33
-rw-r--r--third_party/pico_sdk/src/common/pico_divider/BUILD.gn36
-rw-r--r--third_party/pico_sdk/src/common/pico_stdlib/BUILD.gn59
-rw-r--r--third_party/pico_sdk/src/common/pico_stdlib/pico_stdio.gni24
-rw-r--r--third_party/pico_sdk/src/common/pico_sync/BUILD.gn49
-rw-r--r--third_party/pico_sdk/src/common/pico_time/BUILD.gn49
-rw-r--r--third_party/pico_sdk/src/common/pico_usb_reset_interface/BUILD.gn32
-rw-r--r--third_party/pico_sdk/src/common/pico_util/BUILD.gn47
-rw-r--r--third_party/pico_sdk/src/rp2040/BUILD.gn37
-rw-r--r--third_party/pico_sdk/src/rp2040/hardware_regs/BUILD.gn77
-rw-r--r--third_party/pico_sdk/src/rp2040/hardware_structs/BUILD.gn69
-rw-r--r--third_party/pico_sdk/src/rp2_common/BUILD.gn73
-rw-r--r--third_party/pico_sdk/src/rp2_common/boot_stage2/BUILD.gn114
-rw-r--r--third_party/pico_sdk/src/rp2_common/cmsis/BUILD.gn59
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_adc/BUILD.gn39
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_base/BUILD.gn36
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_claim/BUILD.gn41
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_clocks/BUILD.gn54
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_divider/BUILD.gn44
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_dma/BUILD.gn39
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_exception/BUILD.gn43
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_flash/BUILD.gn39
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_gpio/BUILD.gn45
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_i2c/BUILD.gn43
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_interp/BUILD.gn39
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_irq/BUILD.gn53
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_pio/BUILD.gn44
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_pll/BUILD.gn43
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_pwm/BUILD.gn37
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_resets/BUILD.gn36
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_rtc/BUILD.gn42
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_spi/BUILD.gn43
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_sync/BUILD.gn42
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_timer/BUILD.gn45
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_uart/BUILD.gn45
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_vreg/BUILD.gn37
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_watchdog/BUILD.gn37
-rw-r--r--third_party/pico_sdk/src/rp2_common/hardware_xosc/BUILD.gn43
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_bit_ops/BUILD.gn31
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_bootrom/BUILD.gn37
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_bootsel_via_double_reset/BUILD.gn32
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_divider/BUILD.gn30
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_double/BUILD.gn47
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_fix/BUILD.gn17
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_fix/rp2040_usb_device_enumeration/BUILD.gn39
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_float/BUILD.gn47
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_int64_ops/BUILD.gn35
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_malloc/BUILD.gn37
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_mem_ops/BUILD.gn41
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_multicore/BUILD.gn45
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_platform/BUILD.gn44
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_printf/BUILD.gn43
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_runtime/BUILD.gn46
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_standard_link/BUILD.gn46
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_stdio/BUILD.gn73
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_stdio_semihosting/BUILD.gn35
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_stdio_uart/BUILD.gn41
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_stdio_usb/BUILD.gn52
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_stdlib/BUILD.gn51
-rw-r--r--third_party/pico_sdk/src/rp2_common/pico_unique_id/BUILD.gn35
-rw-r--r--third_party/pico_sdk/src/rp2_common/tinyusb/BUILD.gn15
-rw-r--r--third_party/rules_proto_grpc/BUILD.bazel15
-rw-r--r--third_party/rules_proto_grpc/internal_proto.bzl237
-rw-r--r--third_party/smartfusion_mss/BUILD.bazel40
-rw-r--r--third_party/smartfusion_mss/BUILD.gn112
-rw-r--r--third_party/smartfusion_mss/README.md6
-rw-r--r--third_party/smartfusion_mss/configs/config_debug.h19
-rw-r--r--third_party/smartfusion_mss/configs/config_default.h17
-rw-r--r--third_party/smartfusion_mss/configs/config_pigweed_common.h20
-rw-r--r--third_party/smartfusion_mss/mss.gni24
-rw-r--r--third_party/stm32cube/BUILD.gn267
-rw-r--r--zephyr/OWNERS1
598 files changed, 32599 insertions, 6811 deletions
diff --git a/.clang-tidy b/.clang-tidy
index 1a6497849..10f27f397 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -30,20 +30,20 @@ Checks: >
misc-static-assert,
misc-unconventional-assign-operator,
misc-unused-using-decls,
- modernize-avoid-bind
- modernize-deprecated-ios-base-aliases
- modernize-make-shared
- modernize-make-unique
- modernize-replace-auto-ptr
- modernize-replace-disallow-copy-and-assign-macro
- modernize-replace-random-shuffle
- modernize-shrink-to-fit
- modernize-use-bool-literals
- modernize-use-equals-delete
- modernize-use-noexcept
- modernize-use-nullptr
- modernize-use-transparent-functors
- modernize-use-uncaught-exceptions
+ modernize-avoid-bind,
+ modernize-deprecated-ios-base-aliases,
+ modernize-make-shared,
+ modernize-make-unique,
+ modernize-replace-auto-ptr,
+ modernize-replace-disallow-copy-and-assign-macro,
+ modernize-replace-random-shuffle,
+ modernize-shrink-to-fit,
+ modernize-use-bool-literals,
+ modernize-use-equals-delete,
+ modernize-use-noexcept,
+ modernize-use-nullptr,
+ modernize-use-transparent-functors,
+ modernize-use-uncaught-exceptions,
performance-faster-string-find,
performance-for-range-copy,
performance-implicit-conversion-in-loop,
diff --git a/.gitignore b/.gitignore
index b914aa0d2..f66dbc962 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,11 +10,16 @@ docs/_build
.project
.cproject
.vscode
+# Clangd directories
.clangd/
+/.cache/clangd/
+# Vim
*.swp
*.swo
+# Emacs
*flycheck_*
*_flymake.*
+.#*
# Python
*.pyc
@@ -50,6 +55,7 @@ pw_env_setup/py/oxidizer/build
# Env Setup
environment
.environment
+build_overrides/pigweed_environment.gni
# TODO(pwbug/216) Remove following lines in this section.
# Maybe find a way to delete these files before these lines are removed.
python*-env/
diff --git a/BUILD.gn b/BUILD.gn
index 7124ab280..4f11d5bdc 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations under
# the License.
+import("//build_overrides/pi_pico.gni")
import("//build_overrides/pigweed.gni")
import("$dir_pw_arduino_build/arduino.gni")
@@ -47,12 +48,14 @@ declare_args() {
# exclusively to facilitate easy upstream development and testing.
group("default") {
deps = [
+ ":check_modules",
":docs",
":host",
+ ":pi_pico",
+ ":python.lint",
+ ":python.tests",
":static_analysis",
":stm32f429i",
- "$dir_pw_env_setup:python.lint",
- "$dir_pw_env_setup:python.tests",
"pw_rpc:test_protos.python.install",
]
@@ -61,10 +64,54 @@ group("default") {
}
}
+# Verify that this BUILD.gn file is only used by Pigweed itself.
+assert(get_path_info("//", "abspath") == get_path_info(".", "abspath"),
+ "Pigweed's top-level BUILD.gn may only be used when building upstream " +
+ "Pigweed. To pull all Pigweed code into your build, import " +
+ "\$dir_pigweed/modules.gni and create a top-level pw_test_group " +
+ "that depends on the tests in pw_modules_tests. See " +
+ "https://pigweed.dev/build_system.html for details.")
+
+_update_or_check_modules_lists = {
+ script = "$dir_pw_build/py/pw_build/generate_modules_lists.py"
+ args = [
+ rebase_path(".", root_build_dir),
+ rebase_path("PIGWEED_MODULES", root_build_dir),
+ rebase_path("$dir_pw_build/generated_pigweed_modules_lists.gni",
+ root_build_dir),
+ ]
+ inputs = [
+ "$dir_pw_build/generated_pigweed_modules_lists.gni",
+ "PIGWEED_MODULES",
+ ]
+}
+
+# Check that PIGWEED_MODULES is up-to-date and sorted.
+action("check_modules") {
+ forward_variables_from(_update_or_check_modules_lists, "*")
+ outputs = [ "$target_gen_dir/$target_name.passed" ]
+ args += [ "--warn-only" ] + rebase_path(outputs, root_build_dir)
+}
+
+# Run this command after adding an item to PIGWEED_MODULES to update the
+# generated .gni with Pigweed modules lists.
+action("update_modules") {
+ forward_variables_from(_update_or_check_modules_lists, "*")
+ outputs = [ "$target_gen_dir/$target_name.ALWAYS_RERUN" ] # Never created
+}
+
group("pw_system_demo") {
deps = [ "$dir_pw_system:system_examples" ]
}
+group("pi_pico") {
+ if (PICO_SRC_DIR != "") {
+ deps = [ ":pw_module_tests(targets/rp2040)" ]
+ }
+}
+
+_internal_toolchains = "$dir_pigweed/targets/host/pigweed_internal"
+
# This template generates a group that builds pigweed_default with a particular
# toolchain.
template("_build_pigweed_default_at_all_optimization_levels") {
@@ -90,11 +137,11 @@ template("_build_pigweed_default_at_all_optimization_levels") {
# Select a default toolchain based on host OS.
if (host_os == "linux") {
- _default_toolchain_prefix = "$dir_pigweed/targets/host:host_clang_"
+ _default_toolchain_prefix = "$_internal_toolchains:pw_strict_host_clang_"
} else if (host_os == "mac") {
- _default_toolchain_prefix = "$dir_pigweed/targets/host:host_clang_"
+ _default_toolchain_prefix = "$_internal_toolchains:pw_strict_host_clang_"
} else if (host_os == "win") {
- _default_toolchain_prefix = "$dir_pigweed/targets/host:host_gcc_"
+ _default_toolchain_prefix = "$_internal_toolchains:pw_strict_host_gcc_"
} else {
assert(false, "Please define a host config for your system: $host_os")
}
@@ -106,11 +153,11 @@ _build_pigweed_default_at_all_optimization_levels("host") {
}
_build_pigweed_default_at_all_optimization_levels("host_clang") {
- toolchain_prefix = "$dir_pigweed/targets/host:host_clang_"
+ toolchain_prefix = "$_internal_toolchains:pw_strict_host_clang_"
}
_build_pigweed_default_at_all_optimization_levels("host_gcc") {
- toolchain_prefix = "$dir_pigweed/targets/host:host_gcc_"
+ toolchain_prefix = "$_internal_toolchains:pw_strict_host_gcc_"
}
if (pw_third_party_mcuxpresso_SDK != "") {
@@ -139,11 +186,11 @@ _build_pigweed_default_at_all_optimization_levels("qemu_clang") {
"$dir_pigweed/targets/lm3s6965evb_qemu:lm3s6965evb_qemu_clang_"
}
-# Run clang-tidy on pigweed_default with host_clang_debug toolchain options.
+# Run clang-tidy on pigweed_default with pw_strict_host_clang_debug toolchain options.
# Make sure to invoke gn clean out when any relevant .clang-tidy
# file is updated.
group("static_analysis") {
- _toolchain = "$dir_pigweed/targets/host:host_clang_debug"
+ _toolchain = "$_internal_toolchains:pw_strict_host_clang_debug"
deps = [ ":pigweed_default($_toolchain.static_analysis)" ]
}
@@ -280,129 +327,11 @@ if (current_toolchain != default_toolchain) {
# All Pigweed modules that can be built using gn. This is not built by default.
group("pw_modules") {
- deps = [
- "$dir_pigweed/docs",
- "$dir_pw_allocator",
- "$dir_pw_analog",
- "$dir_pw_base64",
- "$dir_pw_blob_store",
- "$dir_pw_bytes",
- "$dir_pw_checksum",
- "$dir_pw_chrono",
- "$dir_pw_console",
- "$dir_pw_cpu_exception",
- "$dir_pw_hdlc",
- "$dir_pw_i2c",
- "$dir_pw_metric",
- "$dir_pw_persistent_ram",
- "$dir_pw_polyfill",
- "$dir_pw_preprocessor",
- "$dir_pw_protobuf",
- "$dir_pw_result",
- "$dir_pw_spi",
- "$dir_pw_status",
- "$dir_pw_stream",
- "$dir_pw_string",
- "$dir_pw_sync",
- "$dir_pw_sys_io",
- "$dir_pw_system",
- "$dir_pw_thread",
- "$dir_pw_tool",
- "$dir_pw_trace",
- "$dir_pw_unit_test",
- "$dir_pw_varint",
- ]
-
- if (host_os != "win") {
- deps += [
- # TODO(frolv): Remove these two when new KVS is ready.
- "$dir_pw_kvs",
- "$dir_pw_minimal_cpp_stdlib",
-
- # TODO(pwbug/111): Remove this when building successfully on Windows.
- "$dir_pw_tokenizer",
- ]
- }
+ deps = pw_modules
}
# Targets for all module unit test groups.
pw_test_group("pw_module_tests") {
- group_deps = [
- "$dir_pw_allocator:tests",
- "$dir_pw_analog:tests",
- "$dir_pw_assert:tests",
- "$dir_pw_base64:tests",
- "$dir_pw_blob_store:tests",
- "$dir_pw_bluetooth_hci:tests",
- "$dir_pw_bytes:tests",
- "$dir_pw_checksum:tests",
- "$dir_pw_chrono:tests",
- "$dir_pw_containers:tests",
- "$dir_pw_cpu_exception_cortex_m:tests",
- "$dir_pw_crypto:tests",
- "$dir_pw_file:tests",
- "$dir_pw_function:tests",
- "$dir_pw_fuzzer:tests",
- "$dir_pw_hdlc:tests",
- "$dir_pw_hex_dump:tests",
- "$dir_pw_i2c:tests",
- "$dir_pw_libc:tests",
- "$dir_pw_log:tests",
- "$dir_pw_log_null:tests",
- "$dir_pw_log_rpc:tests",
- "$dir_pw_log_tokenized:tests",
- "$dir_pw_malloc_freelist:tests",
- "$dir_pw_metric:tests",
- "$dir_pw_multisink:tests",
- "$dir_pw_persistent_ram:tests",
- "$dir_pw_polyfill:tests",
- "$dir_pw_preprocessor:tests",
- "$dir_pw_protobuf:tests",
- "$dir_pw_protobuf_compiler:tests",
- "$dir_pw_random:tests",
- "$dir_pw_result:tests",
- "$dir_pw_ring_buffer:tests",
- "$dir_pw_router:tests",
- "$dir_pw_rpc:tests",
- "$dir_pw_snapshot:tests",
- "$dir_pw_software_update:tests",
- "$dir_pw_span:tests",
- "$dir_pw_spi:tests",
- "$dir_pw_status:tests",
- "$dir_pw_stream:tests",
- "$dir_pw_string:tests",
- "$dir_pw_sync:tests",
- "$dir_pw_thread:tests",
- "$dir_pw_thread_stl:tests",
- "$dir_pw_tls_client:tests",
- "$dir_pw_tls_client_boringssl:tests",
- "$dir_pw_tls_client_mbedtls:tests",
- "$dir_pw_tokenizer:tests",
- "$dir_pw_trace:tests",
- "$dir_pw_trace_tokenized:tests",
- "$dir_pw_unit_test:tests",
- "$dir_pw_varint:tests",
- "$dir_pw_work_queue:tests",
- ]
-
- if (defined(pw_toolchain_SCOPE.is_host_toolchain) &&
- pw_toolchain_SCOPE.is_host_toolchain) {
- # TODO(pwbug/196): KVS tests are not compatible with device builds as they
- # use features such as std::map and are computationally expensive. Solving
- # this requires a more complex capabilities-based build and configuration
- # system which allowing enabling specific tests for targets that support
- # them and modifying test parameters for different targets.
- group_deps += [ "$dir_pw_kvs:tests" ]
-
- if (host_os != "win") {
- # TODO(pwbug/441): Fix transfer tests on Windows.
- group_deps += [ "$dir_pw_transfer:tests" ]
- }
- }
-
- if (host_os != "win") {
- # TODO(amontanez): pw_minimal_cpp_stdlib tests do not build on windows.
- group_deps += [ "$dir_pw_minimal_cpp_stdlib:tests" ]
- }
+ group_deps = pw_module_tests
}
}
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d75967c98..a6c922cb0 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -20,10 +20,10 @@ cmake_minimum_required(VERSION 3.16)
# Regardless of whether it's set or not the following include will ensure it is.
include(pw_build/pigweed.cmake)
-add_subdirectory(pw_assert_zephyr EXCLUDE_FROM_ALL)
add_subdirectory(pw_assert EXCLUDE_FROM_ALL)
add_subdirectory(pw_assert_basic EXCLUDE_FROM_ALL)
add_subdirectory(pw_assert_log EXCLUDE_FROM_ALL)
+add_subdirectory(pw_assert_zephyr EXCLUDE_FROM_ALL)
add_subdirectory(pw_base64 EXCLUDE_FROM_ALL)
add_subdirectory(pw_blob_store EXCLUDE_FROM_ALL)
add_subdirectory(pw_build EXCLUDE_FROM_ALL)
@@ -44,17 +44,19 @@ add_subdirectory(pw_interrupt EXCLUDE_FROM_ALL)
add_subdirectory(pw_interrupt_cortex_m EXCLUDE_FROM_ALL)
add_subdirectory(pw_interrupt_zephyr EXCLUDE_FROM_ALL)
add_subdirectory(pw_kvs EXCLUDE_FROM_ALL)
-add_subdirectory(pw_log_zephyr EXCLUDE_FROM_ALL)
add_subdirectory(pw_log EXCLUDE_FROM_ALL)
add_subdirectory(pw_log_basic EXCLUDE_FROM_ALL)
add_subdirectory(pw_log_null EXCLUDE_FROM_ALL)
+add_subdirectory(pw_log_string EXCLUDE_FROM_ALL)
add_subdirectory(pw_log_tokenized EXCLUDE_FROM_ALL)
+add_subdirectory(pw_log_zephyr EXCLUDE_FROM_ALL)
add_subdirectory(pw_minimal_cpp_stdlib EXCLUDE_FROM_ALL)
+add_subdirectory(pw_multisink EXCLUDE_FROM_ALL)
add_subdirectory(pw_persistent_ram EXCLUDE_FROM_ALL)
add_subdirectory(pw_polyfill EXCLUDE_FROM_ALL)
+add_subdirectory(pw_preprocessor EXCLUDE_FROM_ALL)
add_subdirectory(pw_protobuf EXCLUDE_FROM_ALL)
add_subdirectory(pw_protobuf_compiler EXCLUDE_FROM_ALL)
-add_subdirectory(pw_preprocessor EXCLUDE_FROM_ALL)
add_subdirectory(pw_random EXCLUDE_FROM_ALL)
add_subdirectory(pw_result EXCLUDE_FROM_ALL)
add_subdirectory(pw_ring_buffer EXCLUDE_FROM_ALL)
@@ -65,12 +67,13 @@ add_subdirectory(pw_span EXCLUDE_FROM_ALL)
add_subdirectory(pw_status EXCLUDE_FROM_ALL)
add_subdirectory(pw_stream EXCLUDE_FROM_ALL)
add_subdirectory(pw_string EXCLUDE_FROM_ALL)
-add_subdirectory(pw_sync_zephyr EXCLUDE_FROM_ALL)
add_subdirectory(pw_sync EXCLUDE_FROM_ALL)
+add_subdirectory(pw_sync_freertos EXCLUDE_FROM_ALL)
add_subdirectory(pw_sync_stl EXCLUDE_FROM_ALL)
-add_subdirectory(pw_sys_io_zephyr EXCLUDE_FROM_ALL)
+add_subdirectory(pw_sync_zephyr EXCLUDE_FROM_ALL)
add_subdirectory(pw_sys_io EXCLUDE_FROM_ALL)
add_subdirectory(pw_sys_io_stdio EXCLUDE_FROM_ALL)
+add_subdirectory(pw_sys_io_zephyr EXCLUDE_FROM_ALL)
add_subdirectory(pw_system EXCLUDE_FROM_ALL)
add_subdirectory(pw_thread EXCLUDE_FROM_ALL)
add_subdirectory(pw_thread_freertos EXCLUDE_FROM_ALL)
diff --git a/PIGWEED_MODULES b/PIGWEED_MODULES
new file mode 100644
index 000000000..dfb7f2725
--- /dev/null
+++ b/PIGWEED_MODULES
@@ -0,0 +1,120 @@
+docker
+pw_allocator
+pw_analog
+pw_android_toolchain
+pw_arduino_build
+pw_assert
+pw_assert_basic
+pw_assert_log
+pw_assert_tokenized
+pw_assert_zephyr
+pw_base64
+pw_bloat
+pw_blob_store
+pw_bluetooth_hci
+pw_boot
+pw_boot_cortex_m
+pw_build
+pw_build_info
+pw_build_mcuxpresso
+pw_bytes
+pw_checksum
+pw_chrono
+pw_chrono_embos
+pw_chrono_freertos
+pw_chrono_stl
+pw_chrono_threadx
+pw_chrono_zephyr
+pw_cli
+pw_console
+pw_containers
+pw_cpu_exception
+pw_cpu_exception_cortex_m
+pw_crypto
+pw_docgen
+pw_doctor
+pw_env_setup
+pw_file
+pw_function
+pw_fuzzer
+pw_hdlc
+pw_hex_dump
+pw_i2c
+pw_i2c_mcuxpresso
+pw_interrupt
+pw_interrupt_cortex_m
+pw_interrupt_zephyr
+pw_kvs
+pw_libc
+pw_log
+pw_log_android
+pw_log_basic
+pw_log_null
+pw_log_rpc
+pw_log_string
+pw_log_tokenized
+pw_log_zephyr
+pw_malloc
+pw_malloc_freelist
+pw_metric
+pw_minimal_cpp_stdlib
+pw_module
+pw_multisink
+pw_package
+pw_persistent_ram
+pw_polyfill
+pw_preprocessor
+pw_presubmit
+pw_protobuf
+pw_protobuf_compiler
+pw_random
+pw_result
+pw_ring_buffer
+pw_router
+pw_rpc
+pw_snapshot
+pw_software_update
+pw_span
+pw_spi
+pw_status
+pw_stm32cube_build
+pw_stream
+pw_string
+pw_symbolizer
+pw_sync
+pw_sync_baremetal
+pw_sync_embos
+pw_sync_freertos
+pw_sync_stl
+pw_sync_threadx
+pw_sync_zephyr
+pw_sys_io
+pw_sys_io_arduino
+pw_sys_io_baremetal_lm3s6965evb
+pw_sys_io_baremetal_stm32f429
+pw_sys_io_emcraft_sf2
+pw_sys_io_mcuxpresso
+pw_sys_io_stdio
+pw_sys_io_stm32cube
+pw_sys_io_zephyr
+pw_system
+pw_target_runner
+pw_thread
+pw_thread_embos
+pw_thread_freertos
+pw_thread_stl
+pw_thread_threadx
+pw_tls_client
+pw_tls_client_boringssl
+pw_tls_client_mbedtls
+pw_tokenizer
+pw_tool
+pw_toolchain
+pw_trace
+pw_trace_tokenized
+pw_transfer
+pw_unit_test
+pw_varint
+pw_watch
+pw_web_ui
+pw_work_queue
diff --git a/WORKSPACE b/WORKSPACE
index 7d4d2d8f3..e95aa1b97 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -31,12 +31,13 @@ load("@cipd_deps//:cipd_init.bzl", "cipd_init")
cipd_init()
# Set up Python support.
-# Required by: rules_fuzzing.
+# Required by: rules_fuzzing, com_github_nanopb_nanopb.
# Used in modules: None.
http_archive(
name = "rules_python",
- sha256 = "934c9ceb552e84577b0faf1e5a2f0450314985b4d8712b2b70717dc679fdc01b",
- url = "https://github.com/bazelbuild/rules_python/releases/download/0.3.0/rules_python-0.3.0.tar.gz",
+ sha256 = "a30abdfc7126d497a7698c29c46ea9901c6392d6ed315171a6df5ce433aa4502",
+ strip_prefix = "rules_python-0.6.0",
+ url = "https://github.com/bazelbuild/rules_python/archive/0.6.0.tar.gz",
)
# Set up Starlark library.
@@ -122,6 +123,41 @@ rules_proto_grpc_toolchains()
rules_proto_grpc_repos()
+# Set up Protobuf rules.
+# Required by: pigweed, com_github_bazelbuild_buildtools.
+# Used in modules: //pw_protobuf.
+http_archive(
+ name = "com_google_protobuf",
+ sha256 = "c6003e1d2e7fefa78a3039f19f383b4f3a61e81be8c19356f85b6461998ad3db",
+ strip_prefix = "protobuf-3.17.3",
+ url = "https://github.com/protocolbuffers/protobuf/archive/v3.17.3.tar.gz",
+)
+
+load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
+
+protobuf_deps()
+
+# Setup Nanopb protoc plugin.
+# Required by: Pigweed.
+# Used in modules: pw_protobuf.
+git_repository(
+ name = "com_github_nanopb_nanopb",
+ commit = "e601fca6d9ed7fb5c09e2732452753b2989f128b",
+ remote = "https://github.com/nanopb/nanopb.git",
+)
+
+load("@com_github_nanopb_nanopb//:nanopb_deps.bzl", "nanopb_deps")
+
+nanopb_deps()
+
+load("@com_github_nanopb_nanopb//:python_deps.bzl", "nanopb_python_deps")
+
+nanopb_python_deps()
+
+load("@com_github_nanopb_nanopb//:nanopb_workspace.bzl", "nanopb_workspace")
+
+nanopb_workspace()
+
# Set up Bazel platforms.
# Required by: pigweed.
# Used in modules: //pw_build, (Assorted modules via select statements).
diff --git a/build_overrides/pi_pico.gni b/build_overrides/pi_pico.gni
new file mode 100644
index 000000000..e6343f601
--- /dev/null
+++ b/build_overrides/pi_pico.gni
@@ -0,0 +1,23 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+declare_args() {
+ # Since the GN build lives in Pigweed for now, PICO_ROOT is
+ # Always relative to Pigweed's root.
+ PICO_ROOT = "${dir_pw_third_party}/pico_sdk"
+}
+
+import("${PICO_ROOT}/pi_pico.gni")
diff --git a/build_overrides/pigweed.gni b/build_overrides/pigweed.gni
index 2c057872e..04ef3b1b4 100644
--- a/build_overrides/pigweed.gni
+++ b/build_overrides/pigweed.gni
@@ -17,4 +17,5 @@ declare_args() {
dir_pigweed = "//"
}
+# Import modules.gni, which generates the modules list.
import("$dir_pigweed/modules.gni")
diff --git a/docs/BUILD.gn b/docs/BUILD.gn
index 84366b4cc..bd299067a 100644
--- a/docs/BUILD.gn
+++ b/docs/BUILD.gn
@@ -33,6 +33,7 @@ pw_doc_group("core_docs") {
]
sources = [
"code_of_conduct.rst",
+ "concepts/index.rst",
"contributing.rst",
"embedded_cpp_guide.rst",
"faq.rst",
@@ -44,137 +45,32 @@ pw_doc_group("core_docs") {
]
}
+pw_doc_group("release_notes") {
+ sources = [
+ "release_notes/2022_jan.rst",
+ "release_notes/index.rst",
+ ]
+}
+
# Documentation for upstream Pigweed targets.
group("target_docs") {
deps = [
"$dir_pigweed/targets/android:target_docs",
"$dir_pigweed/targets/arduino:target_docs",
"$dir_pigweed/targets/docs:target_docs",
+ "$dir_pigweed/targets/emcraft_sf2_som:docs",
"$dir_pigweed/targets/host:target_docs",
"$dir_pigweed/targets/host_device_simulator:target_docs",
"$dir_pigweed/targets/lm3s6965evb_qemu:target_docs",
"$dir_pigweed/targets/mimxrt595_evk:target_docs",
+ "$dir_pigweed/targets/rp2040:target_docs",
"$dir_pigweed/targets/stm32f429i_disc1:target_docs",
"$dir_pigweed/targets/stm32f429i_disc1_stm32cube:target_docs",
]
}
group("module_docs") {
- deps = [
- "$dir_docker:docs",
- "$dir_pw_allocator:docs",
- "$dir_pw_analog:docs",
- "$dir_pw_android_toolchain:docs",
- "$dir_pw_arduino_build:docs",
- "$dir_pw_assert:docs",
- "$dir_pw_assert_basic:docs",
- "$dir_pw_assert_log:docs",
- "$dir_pw_assert_tokenized:docs",
- "$dir_pw_assert_zephyr:docs",
- "$dir_pw_base64:docs",
- "$dir_pw_bloat:docs",
- "$dir_pw_blob_store:docs",
- "$dir_pw_bluetooth_hci:docs",
- "$dir_pw_boot:docs",
- "$dir_pw_boot_cortex_m:docs",
- "$dir_pw_build:docs",
- "$dir_pw_build_info:docs",
- "$dir_pw_build_mcuxpresso:docs",
- "$dir_pw_bytes:docs",
- "$dir_pw_checksum:docs",
- "$dir_pw_chrono:docs",
- "$dir_pw_chrono_embos:docs",
- "$dir_pw_chrono_freertos:docs",
- "$dir_pw_chrono_stl:docs",
- "$dir_pw_chrono_threadx:docs",
- "$dir_pw_chrono_zephyr:docs",
- "$dir_pw_cli:docs",
- "$dir_pw_console:docs",
- "$dir_pw_containers:docs",
- "$dir_pw_cpu_exception:docs",
- "$dir_pw_cpu_exception_cortex_m:docs",
- "$dir_pw_crypto:docs",
- "$dir_pw_docgen:docs",
- "$dir_pw_doctor:docs",
- "$dir_pw_env_setup:docs",
- "$dir_pw_file:docs",
- "$dir_pw_function:docs",
- "$dir_pw_fuzzer:docs",
- "$dir_pw_hdlc:docs",
- "$dir_pw_hex_dump:docs",
- "$dir_pw_i2c:docs",
- "$dir_pw_interrupt:docs",
- "$dir_pw_interrupt_cortex_m:docs",
- "$dir_pw_interrupt_zephyr:docs",
- "$dir_pw_kvs:docs",
- "$dir_pw_libc:docs",
- "$dir_pw_log:docs",
- "$dir_pw_log_basic:docs",
- "$dir_pw_log_null:docs",
- "$dir_pw_log_rpc:docs",
- "$dir_pw_log_string:docs",
- "$dir_pw_log_tokenized:docs",
- "$dir_pw_malloc:docs",
- "$dir_pw_malloc_freelist:docs",
- "$dir_pw_metric:docs",
- "$dir_pw_minimal_cpp_stdlib:docs",
- "$dir_pw_module:docs",
- "$dir_pw_multisink:docs",
- "$dir_pw_package:docs",
- "$dir_pw_persistent_ram:docs",
- "$dir_pw_polyfill:docs",
- "$dir_pw_preprocessor:docs",
- "$dir_pw_presubmit:docs",
- "$dir_pw_protobuf:docs",
- "$dir_pw_protobuf_compiler:docs",
- "$dir_pw_random:docs",
- "$dir_pw_result:docs",
- "$dir_pw_ring_buffer:docs",
- "$dir_pw_router:docs",
- "$dir_pw_rpc:docs",
- "$dir_pw_snapshot:docs",
- "$dir_pw_software_update:docs",
- "$dir_pw_span:docs",
- "$dir_pw_spi:docs",
- "$dir_pw_status:docs",
- "$dir_pw_stm32cube_build:docs",
- "$dir_pw_stream:docs",
- "$dir_pw_string:docs",
- "$dir_pw_symbolizer:docs",
- "$dir_pw_sync:docs",
- "$dir_pw_sync_baremetal:docs",
- "$dir_pw_sync_embos:docs",
- "$dir_pw_sync_freertos:docs",
- "$dir_pw_sync_stl:docs",
- "$dir_pw_sync_threadx:docs",
- "$dir_pw_sync_zephyr:docs",
- "$dir_pw_sys_io:docs",
- "$dir_pw_sys_io_arduino:docs",
- "$dir_pw_sys_io_baremetal_stm32f429:docs",
- "$dir_pw_sys_io_mcuxpresso:docs",
- "$dir_pw_sys_io_stdio:docs",
- "$dir_pw_sys_io_zephyr:docs",
- "$dir_pw_system:docs",
- "$dir_pw_target_runner:docs",
- "$dir_pw_thread:docs",
- "$dir_pw_thread_embos:docs",
- "$dir_pw_thread_freertos:docs",
- "$dir_pw_thread_stl:docs",
- "$dir_pw_thread_threadx:docs",
- "$dir_pw_tls_client:docs",
- "$dir_pw_tls_client_boringssl:docs",
- "$dir_pw_tls_client_mbedtls:docs",
- "$dir_pw_tokenizer:docs",
- "$dir_pw_toolchain:docs",
- "$dir_pw_trace:docs",
- "$dir_pw_trace_tokenized:docs",
- "$dir_pw_transfer:docs",
- "$dir_pw_unit_test:docs",
- "$dir_pw_varint:docs",
- "$dir_pw_watch:docs",
- "$dir_pw_web_ui:docs",
- "$dir_pw_work_queue:docs",
- ]
+ deps = pw_module_docs
}
group("third_party_docs") {
@@ -202,6 +98,7 @@ pw_doc_gen("docs") {
deps = [
":core_docs",
":module_docs",
+ ":release_notes",
":sphinx_themes.install",
":target_docs",
":third_party_docs",
diff --git a/docs/automated_analysis.rst b/docs/automated_analysis.rst
index 422bbfdd4..23a31780d 100644
--- a/docs/automated_analysis.rst
+++ b/docs/automated_analysis.rst
@@ -11,7 +11,6 @@ to verify the code of projects using Pigweed.
-------
Summary
-------
-
On presubmit or in CI we verify Pigweed using:
* pylint
@@ -34,7 +33,6 @@ Static analysis
PyLint
------
-
`PyLint`_ is a customizable Python linter. Pigweed complies with almost all
the default checks; see `.pylintrc`_ for details. PyLint detects problems such
as overly broad catch statements, unused arguments/variables, and mutable
@@ -51,7 +49,6 @@ your Pigweed-based project.
Mypy
----
-
Python 3 allows for `type annotations`_ for variables, function arguments, and
return values. Most, but not all, of Pigweed's Python code has type
annotations, and these annotations have caught real bugs in code that didn't
@@ -71,7 +68,6 @@ also included in a variety of presubmit steps, like ``static_analysis`` and
clang-tidy
----------
-
`clang-tidy`_ is a C++ "linter" and static analysis tool. It identifies
bug-prone patterns (e.g., use after move), non-idiomatic usage (e.g., creating
``std::unique_ptr`` with ``new`` rather than ``std::make_unique``), and
@@ -87,7 +83,8 @@ We do not currently enable the `Clang Static Analyzers`_ because they suffer
from false positives, and their findings are time-consuming to manually verify.
clang-tidy can be run with ``ninja static_analysis`` or ``pw presubmit --step
-static_analysis``.
+static_analysis``. Note that as a static analysis tool, clang-tidy will not
+produce any runnable binaries: it simply analyzes the source files.
.. _clang-tidy: https://clang.llvm.org/extra/clang-tidy/
.. _Abseil: https://abseil.io/
@@ -97,11 +94,6 @@ static_analysis``.
Clang sanitizers
================
-
-.. note::
- Running sanitizers in presubmit for all Pigweed code is work in progress.
- See https://bugs.pigweed.dev/514 for details.
-
We run all of Pigweed's unit tests with the additional instrumentation
described in this section. For more detail about these sanitizers, see the
`Github documentation`_.
@@ -113,11 +105,18 @@ described in this section. For more detail about these sanitizers, see the
* ubsan: `UndefinedBehaviorSanitizer`_ is a fast undefined behavior detector.
We use the default ``-fsanitize=undefined`` option.
+.. note::
+ Pigweed does not currently support msan. See https://bugs.pigweed.dev/560
+ for details.
+
The exact configurations we use for these sanitizers are in
`pw_toolchain/host_clang/BUILD.gn <https://cs.opensource.google/pigweed/pigweed/+/main:pw_toolchain/host_clang/BUILD.gn>`_.
You can see the current status of the sanitizer builds in the `Pigweed CI
console`_, as ``pigweed-linux-san-*``.
+Unlike clang-tidy, the clang sanitizers are runtime instrumentation: the
+instrumented binary needs to be run for issues to be detected.
+
.. _Github documentation: https://github.com/google/sanitizers
.. _AddressSanitizer: https://clang.llvm.org/docs/AddressSanitizer.html
.. _MemorySanitizer: https://clang.llvm.org/docs/MemorySanitizer.html
@@ -131,7 +130,7 @@ Fuzzers
`Fuzz testing`_ detects errors in software by providing it with randomly
generated inputs. We use `OSS-fuzz`_ to continuously uncover potential
vulnerabilities in Pigweed. `Dashboard with Pigweed's latest results`_. See
-the `pw_fuzzer <module-pw_fuzzer>`_ module documentation for more details.
+the :ref:`module-pw_fuzzer` module documentation for more details.
.. _Dashboard with Pigweed's latest results: https://oss-fuzz-build-logs.storage.googleapis.com/index.html#pigweed
.. _Fuzz testing: https://en.wikipedia.org/wiki/Fuzzing
@@ -145,7 +144,6 @@ Enabling analysis for your project
PyLint and Mypy
===============
-
PyLint and Mypy can be configured to run every time your project is built by
adding ``python.lint`` to your default build group. (You can also add one or both
individually using ``python.lint.mypy`` and ``python.lint.pylint``.) Likewise,
@@ -157,7 +155,6 @@ directly include the `python_checks.gn_python_lint`_ presubmit step.
clang-tidy
==========
-
`pw_toolchain/static_analysis_toolchain.gni`_ provides the
``pw_static_analysis_toolchain`` template that can be used to create a build
group performing static analysis. See :ref:`module-pw_toolchain` documentation
@@ -174,11 +171,48 @@ source file.
Clang sanitizers
================
+There are two ways to enable sanitizers for your build.
-.. note::
- This section is under construction.
+GN args on debug toolchains
+---------------------------
+If you are already building your tests with one of the following toolchains (or
+a toolchain derived from one of them):
+
+* ``pw_toolchain_host_clang.debug``
+* ``pw_toolchain_host_clang.speed_optimized``
+* ``pw_toolchain_host_clang.size_optimized``
+
+you can enable the clang sanitizers simply by setting the gn arg
+``pw_toolchain_SANITIZERS`` to the desired subset of
+``["address", "thread", "undefined"]``.
+
+Example
+^^^^^^^
+If your project defines a toolchain ``host_clang_debug`` that is derived from
+one of the above toolchains, and you'd like to run the ``pw_executable`` target
+``sample_binary`` defined in the ``BUILD.gn`` file in ``examples/sample`` with
+asan, you would run,
+
+.. code-block:: bash
+
+ gn gen out --args='pw_toolchain_SANITIZERS=["address"]'
+ ninja -C out host_clang_debug/obj/example/sample/bin/sample_binary
+ out/host_clang_debug/obj/example/sample/bin/sample_binary
+
+Sanitizer toolchains
+--------------------
+Otherwise, instead of using ``gn args`` you can build your tests with the
+appropriate toolchain from the following list (or a toolchain derived from one
+of them):
+
+* ``pw_toolchain_host_clang.asan``
+* ``pw_toolchain_host_clang.ubsan``
+* ``pw_toolchain_host_clang.tsan``
+
+See the :ref:`module-pw_toolchain` module documentation for more
+about Pigweed toolchains.
Fuzzers
=======
-See the `pw_fuzzer <module-pw_fuzzer>`_ module documentation.
+See the :ref:`module-pw_fuzzer` module documentation.
diff --git a/docs/build_system.rst b/docs/build_system.rst
index 2716d31ac..6cd32a54a 100644
--- a/docs/build_system.rst
+++ b/docs/build_system.rst
@@ -328,6 +328,13 @@ In upstream, Pigweed splits its top-level GN targets into a few logical groups,
which are described below. In order to build a GN target, it *must* be listed in
one of the groups in this file.
+.. important::
+
+ Pigweed's top-level ``BUILD.gn`` file should not be used by downstream
+ projects. Projects that wish to pull all of Pigweed's code into their build
+ may use the ``pw_modules`` and ``pw_module_tests`` variables in
+ ``modules.gni``.
+
apps
~~~~
This group defines the application images built in Pigweed. It lists all of the
@@ -372,10 +379,16 @@ pw_modules
~~~~~~~~~~
This group lists the main libraries for all of Pigweed's modules.
+The modules in the ``pw_modules`` group are listed in the ``pw_modules``
+variable, which is provided by ``modules.gni``.
+
pw_module_tests
~~~~~~~~~~~~~~~
All modules' unit tests are collected here, so that they can all be run at once.
+The test groups in ``pw_module_tests`` group are listed in the
+``pw_module_tests`` variable, which is provided by ``modules.gni``.
+
pigweed_default
~~~~~~~~~~~~~~~
This group defines everything built in a Pigweed build invocation by collecting
diff --git a/docs/concepts/index.rst b/docs/concepts/index.rst
new file mode 100644
index 000000000..1caf7b597
--- /dev/null
+++ b/docs/concepts/index.rst
@@ -0,0 +1,111 @@
+.. _docs-concepts:
+
+=============
+About Pigweed
+=============
+
+Why Build Pigweed?
+==================
+Our goal is to make embedded software development efficient, robust, and
+heck, even delightful, for projects ranging from weekend Arduino experiements
+to commercial products selling in the millions.
+
+Embedded software development is notoriously arcane. Developers often have to
+track down vendor toolchains specific to the hardware they're targeting, write
+their code against hardware-specfic SDKs/HALs, and limit themselves to a small
+subset of C. Project teams are on their own to figure out how to set up a build
+system, automated testing, serial communication, and many other embedded
+project fundamentals. This is error prone and takes effort away from developing
+the actual product!
+
+There are solutions on the market that promise to solve all of these problems
+with a monolithic framework—just write your code against the framework and use
+hardware the framework supports, and you get an efficient embedded development
+environment. But this approach doesn't work well for existing projects that
+weren't built on the framework from the beginning or for projects that have
+specific needs the framework wasn't designed for. We know from experience that
+this approach alone doesn't meet our goal.
+
+So we have set out to build a platform that supports successful embedded
+developers at every scale by allowing them to adopt as much or as little of
+what Pigweed provides as they need, in the way that works best for their
+project.
+
+How Pigweed Works
+=================
+Pigweed provides four foundational pillars to support your embedded development:
+
+* :ref:`A comprehensive set of libraries for embedded development<docs-concepts-embedded-development-libraries>`
+* :ref:`A hermetic and replicable development environment<docs-concepts-development-environment>`
+* :ref:`A system for building, testing, and linting your project<docs-concepts-build-system>`
+* :ref:`A full framework for new projects that want a turn-key solution<docs-concepts-full-framework>`
+
+.. _docs-concepts-embedded-development-libraries:
+
+Embedded Development Libraries
+------------------------------
+Pigweed enables you to use modern C++ and software development best practices in
+your embedded project without compromising performance or increasing memory use
+compared to conventional embedded C.
+
+We provide libraries (modules) for :ref:`strings<module-pw_string>`,
+:ref:`time<module-pw_chrono>`, :ref:`assertions<module-pw_assert>`,
+:ref:`logging<module-pw_log>`, :ref:`serial communication<module-pw_spi>`,
+:ref:`remote procedure calls (RPC)<module-pw_rpc>`, and
+:ref:`much more<docs-module-guides>`.
+
+These modules are designed to work both on your host machine and on a wide
+variety of target devices. We achieve this by writing them in an inherently
+portable way, or through the facade/backend pattern. As a result, you can write
+most or all of your code to run transparently on your host machine and targets.
+
+.. _docs-concepts-development-environment:
+
+Development Environment
+-----------------------
+Managing toolchains, build systems, and other software needed for a project is
+complex. Pigweed provides all of this out of the box for Linux, Mac, and
+Windows systems in a sealed environment that leaves the rest of your system
+untouched. Getting new developers started is as simple as cloning your project
+repository and activating the Pigweed environment.
+
+.. _docs-concepts-build-system:
+
+Build System
+------------
+Pigweed modules are built to integrate seamlessly into projects using GN. We
+are rapidly expanding our good support for CMake and nascent support for Bazel
+so you can use your build system of choice. For new projects, Pigweed provides a
+build system you can integrate your own code into that works out of the box.
+
+.. _docs-concepts-full-framework:
+
+Full Framework (coming in 2022)
+-------------------------------
+For those who want a fully-integrated solution that provides everything Pigweed
+has to offer with an opinionated project structure, we are working diligently
+on a :ref:`Pigweed framework<module-pw_system>`. Stay tuned for more news to
+come! In the meantime, we invite you to discuss this and collaborate with us
+on `Discord <https://discord.gg/M9NSeTA>`_.
+
+.. _docs-concepts-right-for-my-project:
+
+Is Pigweed Right for My Project?
+================================
+Pigweed is still in its early stages, and while we have ambitious plans for it,
+Pigweed might not be the right fit for your project today. Here are some things
+to keep in mind:
+
+* Many individual modules are stable and are running on shipped devices today.
+ If any of those modules meet your needs, you should feel safe bringing them
+ into your project.
+
+* Some modules are in very early and active stages of development. They likely
+ have unstable APIs and may not work on all supported targets. If this is the
+ case, it will be indicated in the module's documentation. If you're interested
+ in contributing to the development of one of these modules, we encourage you
+ to experiment with them. Otherwise they aren't ready for use in most projects.
+
+* Setting up new projects to use Pigweed is currently not very easy, but we are
+ working to address that. In the meantime, join the Pigweed community on
+ `Discord <https://discord.gg/M9NSeTA>`_ to get help.
diff --git a/docs/conf.py b/docs/conf.py
index 6dac3f515..519b87bc9 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -13,6 +13,7 @@
# the License.
"""Pigweed's Sphinx configuration."""
+from datetime import date
import sphinx
# The suffix of source filenames.
@@ -23,7 +24,7 @@ master_doc = 'index'
# General information about the project.
project = 'Pigweed'
-copyright = '2020 The Pigweed Authors' # pylint: disable=redefined-builtin
+copyright = f'{date.today().year} The Pigweed Authors' # pylint: disable=redefined-builtin
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -39,6 +40,7 @@ pygments_style = 'pigweed-code-light'
pygments_dark_style = 'pigweed-code'
extensions = [
+ 'pw_docgen.sphinx.google_analytics', # Enables optional Google Analytics
'sphinx.ext.autodoc', # Automatic documentation for Python code
'sphinx.ext.napoleon', # Parses Google-style docstrings
'sphinxcontrib.mermaid',
diff --git a/docs/contributing.rst b/docs/contributing.rst
index 77a4bf1c3..beb43f1de 100644
--- a/docs/contributing.rst
+++ b/docs/contributing.rst
@@ -373,8 +373,8 @@ Running local presubmits
To speed up the review process, consider adding :ref:`module-pw_presubmit` as a
git push hook using the following command:
-**Linux/macOS**
-
+Linux/macOS
+^^^^^^^^^^^
.. code:: bash
$ pw presubmit --install
@@ -398,6 +398,18 @@ example) you may push using this command:
$ git push origin HEAD:refs/for/main --no-verify
+Presubmit and branch management
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+When creating new feature branches, make sure to specify the upstream branch to
+track, e.g.
+
+.. code:: bash
+
+ $ git checkout -b myfeature origin/main
+
+When tracking an upstream branch, ``pw presubmit`` will only run checks on the
+modified files, rather than the entire repository.
+
.. _Sphinx: https://www.sphinx-doc.org/
.. inclusive-language: disable
diff --git a/docs/faq.rst b/docs/faq.rst
index 3b51b4a0c..7a6f248e1 100644
--- a/docs/faq.rst
+++ b/docs/faq.rst
@@ -144,6 +144,7 @@ to provide equivalent binaries, which is some effort.
Host platforms that we are likely to support in the future
..........................................................
+- **Mac on ARM (M1)** - This is currently supported through Rosetta.
- **Linux on ARM** - At time of writing (mid 2020), we do not support ARM-based
host platforms. However, we would like to support this eventually.
- **Windows on WSL2 x86-64** - There are some minor issues preventing WSL2 on
diff --git a/docs/getting_started.rst b/docs/getting_started.rst
index 8c3949ff7..dc9af5567 100644
--- a/docs/getting_started.rst
+++ b/docs/getting_started.rst
@@ -102,7 +102,7 @@ Prerequisites
Most Linux installations should work out of box, and not require any manual
installation of prerequisites beyond basics like ``git`` and
-``build-essential``. Make sure gcc is set to gcc-8.
+``build-essential`` (or the equivalent for your distro).
**macOS**
diff --git a/docs/index.rst b/docs/index.rst
index a89fec9b6..0b5d9015d 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,3 +1,4 @@
+.. _docs-root:
.. highlight:: sh
.. toctree::
@@ -6,6 +7,8 @@
Home <self>
docs/getting_started
+ docs/concepts/index
+ docs/release_notes/index
Source Code <https://cs.opensource.google/pigweed/pigweed>
Code Reviews <https://pigweed-review.googlesource.com>
Mailing List <https://groups.google.com/forum/#!forum/pigweed>
diff --git a/docs/module_structure.rst b/docs/module_structure.rst
index e22bf4e92..f97654f9e 100644
--- a/docs/module_structure.rst
+++ b/docs/module_structure.rst
@@ -427,39 +427,39 @@ To create a new Pigweed module, follow the below steps.
accidentally duplicating work, or avoiding writing code that won't get
accepted.
-1. Create module folder following `Module name`_ guidelines
+1. Create module folder following `Module name`_ guidelines.
2. Add `C++ public headers`_ files in
``{pw_module_dir}/public/{pw_module_name}/``
3. Add `C++ implementation files`_ files in ``{pw_module_dir}/``
4. Add module documentation
- - Add ``{pw_module_dir}/README.md`` that has a module summary
- - Add ``{pw_module_dir}/docs.rst`` that contains the main module
- documentation
+ - Add ``{pw_module_dir}/README.md`` that has a module summary
+ - Add ``{pw_module_dir}/docs.rst`` that contains the main module
+ documentation
-5. Add build support inside of new module
+5. Add GN build support in ``{pw_module_dir}/BUILD.gn``
- - Add GN with ``{pw_module_dir}/BUILD.gn``
- - Add Bazel with ``{pw_module_dir}/BUILD``
- - Add CMake with ``{pw_module_dir}/CMakeLists.txt``
+ - Declare tests in ``pw_test_group("tests")``
+ - Declare docs in ``pw_docs_group("docs")``
-6. Add folder alias for new module variable in ``/modules.gni``
+6. Add Bazel build support in ``{pw_module_dir}/BUILD.bazel``
- - ``dir_pw_new = get_path_info("pw_new", "abspath")``
+7. Add CMake build support in ``{pw_module_dir}/CMakeLists.txt``
-7. Add new module to main GN build
+8. Add the new module to the ``/PIGWEED_MODULES`` list
- - in ``/BUILD.gn`` to ``group("pw_modules")`` using folder alias variable
+ Modules must be listed one per line with no extra spaces or comments. This
+ automatically adds the new module, its tests, and its docs, to the GN build.
-8. Add test target for new module in ``/BUILD.gn`` to
- ``pw_test_group("pw_module_tests")``
-9. Add new module to CMake build
+9. Update the generated Pigweed modules lists file
- - In ``/CMakeLists.txt`` add ``add_subdirectory(pw_new)``
+ .. code-block:: bash
-10. Add the new module to docs module
+ ninja -C out update_modules
- - Add in ``docs/BUILD.gn`` to ``group("module_docs")``
+10. Add the new module to CMake build
+
+ - In ``/CMakeLists.txt`` add ``add_subdirectory(pw_new)``
11. Run :ref:`module-pw_module-module-check`
diff --git a/docs/release_notes/2022_jan.rst b/docs/release_notes/2022_jan.rst
new file mode 100644
index 000000000..f7244e5e2
--- /dev/null
+++ b/docs/release_notes/2022_jan.rst
@@ -0,0 +1,192 @@
+.. _docs-release-notes-2022-jan:
+
+===================================
+Pigweed: What's New in January 2022
+===================================
+Happy new year from the Pigweed team! We’re excited to share what we’ve been up
+to this month and we’re really looking forward to what 2022 will bring to the
+Pigweed community.
+
+:ref:`Pigweed<docs-root>` is a collection of libraries and tools for building
+robust embedded software, efficiently. Pigweed allows you to write code that
+runs transparently on both your host machine and tiny 32-bit microcontrollers
+like those in the :ref:`STM32<target-stm32f429i-disc1>` and
+:ref:`Arduino<target-arduino>` families, while giving you the comforts of modern
+software development traditionally lacking in embedded systems, like
+:ref:`easy unit testing<module-pw_unit_test>`,
+:ref:`powerful build systems<docs-build-system>`,
+:ref:`flexible logging<module-pw_log>`, and
+:ref:`reliable communication<module-pw_rpc>`.
+
+.. admonition:: Note
+ :class: warning
+
+ Many Pigweed modules are already shipping in commercial products, but it is
+ still an early access project. Find out if
+ :ref:`Pigweed is right for you<docs-concepts-right-for-my-project>`.
+
+Pigweed is a free and open source project and we welcome contributions! Join us
+on `Discord <https://discord.gg/M9NSeTA>`_ to share feedback, ask questions, and
+get involved with the Pigweed community!
+
+------------------------------
+Experimental Pigweed framework
+------------------------------
+.. admonition:: tl;dr
+ :class: checkmark
+
+ We’re starting the “whole OS” framework version of Pigweed! It’s not ready
+ for use yet but you might want to take a peek.
+
+Pigweed is designed to be highly modular—you can use as many or as few of the
+Pigweed modules as you need for your project, and those modules will work
+flexibly in any project structure. This works great when you want to add Pigweed
+super powers like hybrid host/target unit testing or RPC communication to an
+existing project. While Pigweed gives you nearly all of the tools you need to
+efficiently build a robust, reliable embedded project, until now we haven’t had
+a great solution for building a new project on Pigweed.
+
+The Pigweed framework assembles an opinionated project structure, build system,
+and development environment that does three key things:
+
+* Takes care of difficult but unproductive project plumbing issues like setting
+ up a target toolchain and providing support for
+ :ref:`OS abstractions<docs-os_abstraction_layers>`.
+
+* Configures Pigweed module backends that give you logging, asserts, threads,
+ dynamic memory allocation, and more, that work transparently both on host and
+ on target
+
+* Sets up a productive development environment with rich code analysis and
+ powerful device interaction tools
+
+You can experiment with this right now by checking out the ``pw_system``
+:ref:`documentation<module-pw_system>`. The experimental configuration leverages
+FreeRTOS and runs on the STM32F429I Discovery board. With a
+:ref:`few simple commands<target-stm32f429i-disc1-stm32cube>`, you can have a
+complete embedded development environment set up and focus on building your
+product.
+
+.. warning::
+
+ The Pigweed framework is still in very active development and you should
+ expect breaking changes in the future. If you’re experimenting with it, we
+ would love to hear from you! Join us on
+ `Discord <https://discord.gg/M9NSeTA>`_!
+
+-------------------------------------
+Support for plugins in ``pw_console``
+-------------------------------------
+Teams that use Pigweed quickly come to rely on the
+:ref:`console<module-pw_console>` as a vital tool for interacting with their
+devices via RPC. It’s now possible to tailor the console to meet your project’s
+specific needs through a new :ref:`plugin interface<module-pw_console-plugins>`.
+You can build your own menus, window panes, keybindings, and clickable buttons
+to truly make ``pw_console`` your own.
+
+How are you using the Pigweed console in your project? Let us know on
+`Discord <https://discord.gg/M9NSeTA>`_!
+
+------------------------------------
+Expanded support for Bazel and CMake
+------------------------------------
+Pigweed’s primary build system is
+`GN (Generate Ninja) <https://gn.googlesource.com/gn>`_, but to make it easier
+to use Pigweed modules in existing projects, we have been expanding support for
+the `Bazel <https://bazel.build/>`_ and `CMake <https://cmake.org/>`_ build
+systems. Right now, the best way to determine which build systems a module
+supports is to look out for ``BUILD.gn``, ``BUILD.bazel`` and ``CMakeLists.txt``
+files (respectively) in module directories. While we work on improving build
+system support and documentation, check out the
+:ref:`build system documentation<docs-build-system>` for more detailed
+information and join us on Discord for support.
+
+----------------------------------------
+Changes to the RPC ``ChannelOutput`` API
+----------------------------------------
+RPC endpoints use :ref:`ChannelOutput<module-pw_rpc-ChannelOutput>` instances to
+send packets encoding RPC data. To send an encoded RPC packet, we need a buffer
+containing the packet’s data. In the past, we could request a buffer by doing
+something like this:
+
+.. code-block:: cpp
+
+ auto buffer = pw::rpc::ChannelOutput::AcquireBuffer(buffer_size)
+ // fill in the buffer here
+ pw::rpc::ChannelOutput::SendAndReleaseBuffer(buffer)
+
+The ``ChannelOutput::AcquireBuffer`` and ``ChannelOutput::SendAndReleaseBuffer``
+methods are no longer part of ``ChannelOutput``’s public API, making its
+internal buffer private. Now, we create our own buffer and ``ChannelOutput`` is
+simply responsible for sending it:
+
+.. code-block:: cpp
+
+ auto buffer = ... // create your own local buffer with RPC packet data
+ pw::rpc::ChannelOutput::Send(buffer)
+
+This approach avoids several tricky concurrency issues related to buffer
+lifetimes, and simplifies the ``ChannelOutput`` API. It also opens up the
+possibility of projects managing RPC buffers in more flexible ways, e.g. via
+dynamically-allocated memory or separate shared memory mechanisms.
+
+.. warning::
+
+ This is a breaking change if you update pw_rpc, but one that can be fixed
+ quickly.
+
+We’re actively reviewing the RPC API with a view towards significantly improving
+it in the future. Share your input with us on
+`Discord <https://discord.gg/M9NSeTA>`_!
+
+------------
+More Updates
+------------
+* It’s now possible to generate a token database from a list of strings in a
+ JSON file for ``pw_tokenizer``. This can be useful when you need to tokenize
+ strings that can’t be parsed from compiled binaries.
+
+* ``pw_assert``‘s new ``pw_assert_tokenized`` backend provides a much more
+ space-efficient implementation compared to using ``pw_assert_log`` with
+ ``pw_log_tokenized``. However, there are trade offs to consider, so check out
+ the :ref:`documentation<module-pw_assert_tokenized>`.
+
+* CMake builds now support compile-time module configuration similar to GN
+ through the use of the ``pw_add_module_config`` and ``pw_set_module_config``
+ functions.
+
+* In ``pw_build``, it is now possible to set a specific working directory for
+ :ref:`pw_exec<module-pw_build-pw_exec>` actions.
+
+* ``pw_cpu_exception`` now supports the ARMv8M Mainline architecture in
+ ``pw_cpu_exception_cortex_m``. This allows us to take advantage of stack limit
+ boundary features in microcontrollers using that architecture, like Cortex M33
+ and M35P.
+
+------------
+Get Involved
+------------
+.. tip::
+
+ We welcome contributions from the community! Here are just a few
+ opportunities to get involved.
+
+* Pigweed now includes GN build files for
+ `TinyUSB <https://github.com/hathach/tinyusb>`_, a popular USB library for
+ embedded systems. Projects can now include it by cloning the TinyUSB
+ repository and configuring GN to build it. But right now, we lack interfaces
+ between TinyUSB and Pigweed abstractions like pw_stream. This is a great
+ opportunity to help get very useful functionality across the finish line.
+
+* We’re very interested in supporting the
+ `Raspberry Pi Pico <https://www.raspberrypi.com/products/raspberry-pi-pico/>`_
+ and the ecosystem of devices using the RP2040 microcontroller. We will be
+ working in earnest on this in the coming months and welcome anyone who wants
+ to lend a helping hand!
+
+* Evolving the Pigweed framework from its current experimental state to a
+ relatively complete embedded project platform is one of our major focuses this
+ year, and we want your help. That help can range from providing input on what
+ you’re looking for in a framework, to building small projects with it and
+ providing feedback, up to contributing directly to its development. Join us to
+ talk about it on `Discord <https://discord.gg/M9NSeTA>`_!
diff --git a/docs/release_notes/index.rst b/docs/release_notes/index.rst
new file mode 100644
index 000000000..4340b002d
--- /dev/null
+++ b/docs/release_notes/index.rst
@@ -0,0 +1,10 @@
+.. _docs-release-notes:
+
+=============
+Release Notes
+=============
+
+.. toctree::
+ :titlesonly:
+
+ January 2022 <2022_jan>
diff --git a/modules.gni b/modules.gni
index 42a8e5aa3..fc6f47b3f 100644
--- a/modules.gni
+++ b/modules.gni
@@ -1,4 +1,4 @@
-# Copyright 2021 The Pigweed Authors
+# Copyright 2022 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -12,129 +12,15 @@
# License for the specific language governing permissions and limitations under
# the License.
+# Generate a .gni file with a GN arg for each module and lists of modules and
+# tests. Then, import the generated .gni file.
+#
+# To avoid generating it multiple times, the modules list is only generated by
+# the default toolchain. The default toolchain runs before any other toolchains,
+# so the module list will be created before it is used by other toolchains.
+
+import("pw_build/generated_pigweed_modules_lists.gni")
+
declare_args() {
- # This file defines a directory variable for each of Pigweed's modules. This
- # allows modules to be moved or swapped out without breaking existing builds.
- # All module variables are prefixed with dir_.
- dir_docker = get_path_info("docker", "abspath")
- dir_pw_analog = get_path_info("pw_analog", "abspath")
- dir_pw_allocator = get_path_info("pw_allocator", "abspath")
- dir_pw_android_toolchain = get_path_info("pw_android_toolchain", "abspath")
- dir_pw_arduino_build = get_path_info("pw_arduino_build", "abspath")
- dir_pw_assert = get_path_info("pw_assert", "abspath")
- dir_pw_assert_basic = get_path_info("pw_assert_basic", "abspath")
- dir_pw_assert_log = get_path_info("pw_assert_log", "abspath")
- dir_pw_assert_tokenized = get_path_info("pw_assert_tokenized", "abspath")
- dir_pw_assert_zephyr = get_path_info("pw_assert_zephyr", "abspath")
- dir_pw_base64 = get_path_info("pw_base64", "abspath")
- dir_pw_bloat = get_path_info("pw_bloat", "abspath")
- dir_pw_blob_store = get_path_info("pw_blob_store", "abspath")
- dir_pw_boot = get_path_info("pw_boot", "abspath")
- dir_pw_boot_cortex_m = get_path_info("pw_boot_cortex_m", "abspath")
- dir_pw_build = get_path_info("pw_build", "abspath")
- dir_pw_build_info = get_path_info("pw_build_info", "abspath")
- dir_pw_build_mcuxpresso = get_path_info("pw_build_mcuxpresso", "abspath")
- dir_pw_bytes = get_path_info("pw_bytes", "abspath")
- dir_pw_checksum = get_path_info("pw_checksum", "abspath")
- dir_pw_chrono = get_path_info("pw_chrono", "abspath")
- dir_pw_chrono_embos = get_path_info("pw_chrono_embos", "abspath")
- dir_pw_chrono_freertos = get_path_info("pw_chrono_freertos", "abspath")
- dir_pw_chrono_stl = get_path_info("pw_chrono_stl", "abspath")
- dir_pw_chrono_threadx = get_path_info("pw_chrono_threadx", "abspath")
- dir_pw_chrono_zephyr = get_path_info("pw_chrono_zephyr", "abspath")
- dir_pw_cli = get_path_info("pw_cli", "abspath")
- dir_pw_console = get_path_info("pw_console", "abspath")
- dir_pw_containers = get_path_info("pw_containers", "abspath")
- dir_pw_cpu_exception = get_path_info("pw_cpu_exception", "abspath")
- dir_pw_cpu_exception_cortex_m =
- get_path_info("pw_cpu_exception_cortex_m", "abspath")
- dir_pw_crypto = get_path_info("pw_crypto", "abspath")
- dir_pw_docgen = get_path_info("pw_docgen", "abspath")
- dir_pw_doctor = get_path_info("pw_doctor", "abspath")
- dir_pw_env_setup = get_path_info("pw_env_setup", "abspath")
- dir_pw_file = get_path_info("pw_file", "abspath")
- dir_pw_function = get_path_info("pw_function", "abspath")
- dir_pw_fuzzer = get_path_info("pw_fuzzer", "abspath")
- dir_pw_bluetooth_hci = get_path_info("pw_bluetooth_hci", "abspath")
- dir_pw_hex_dump = get_path_info("pw_hex_dump", "abspath")
- dir_pw_hdlc = get_path_info("pw_hdlc", "abspath")
- dir_pw_i2c = get_path_info("pw_i2c", "abspath")
- dir_pw_interrupt = get_path_info("pw_interrupt", "abspath")
- dir_pw_interrupt_cortex_m = get_path_info("pw_interrupt_cortex_m", "abspath")
- dir_pw_interrupt_zephyr = get_path_info("pw_interrupt_zephyr", "abspath")
- dir_pw_kvs = get_path_info("pw_kvs", "abspath")
- dir_pw_libc = get_path_info("pw_libc", "abspath")
- dir_pw_log = get_path_info("pw_log", "abspath")
- dir_pw_log_basic = get_path_info("pw_log_basic", "abspath")
- dir_pw_log_null = get_path_info("pw_log_null", "abspath")
- dir_pw_log_rpc = get_path_info("pw_log_rpc", "abspath")
- dir_pw_log_string = get_path_info("pw_log_string", "abspath")
- dir_pw_log_tokenized = get_path_info("pw_log_tokenized", "abspath")
- dir_pw_malloc = get_path_info("pw_malloc", "abspath")
- dir_pw_malloc_freelist = get_path_info("pw_malloc_freelist", "abspath")
- dir_pw_metric = get_path_info("pw_metric", "abspath")
- dir_pw_minimal_cpp_stdlib = get_path_info("pw_minimal_cpp_stdlib", "abspath")
- dir_pw_module = get_path_info("pw_module", "abspath")
- dir_pw_multisink = get_path_info("pw_multisink", "abspath")
- dir_pw_package = get_path_info("pw_package", "abspath")
- dir_pw_persistent_ram = get_path_info("pw_persistent_ram", "abspath")
- dir_pw_polyfill = get_path_info("pw_polyfill", "abspath")
- dir_pw_preprocessor = get_path_info("pw_preprocessor", "abspath")
- dir_pw_presubmit = get_path_info("pw_presubmit", "abspath")
- dir_pw_protobuf = get_path_info("pw_protobuf", "abspath")
- dir_pw_protobuf_compiler = get_path_info("pw_protobuf_compiler", "abspath")
- dir_pw_random = get_path_info("pw_random", "abspath")
- dir_pw_result = get_path_info("pw_result", "abspath")
- dir_pw_ring_buffer = get_path_info("pw_ring_buffer", "abspath")
- dir_pw_router = get_path_info("pw_router", "abspath")
- dir_pw_rpc = get_path_info("pw_rpc", "abspath")
- dir_pw_snapshot = get_path_info("pw_snapshot", "abspath")
- dir_pw_software_update = get_path_info("pw_software_update", "abspath")
- dir_pw_span = get_path_info("pw_span", "abspath")
- dir_pw_spi = get_path_info("pw_spi", "abspath")
- dir_pw_status = get_path_info("pw_status", "abspath")
- dir_pw_stm32cube_build = get_path_info("pw_stm32cube_build", "abspath")
- dir_pw_stream = get_path_info("pw_stream", "abspath")
- dir_pw_string = get_path_info("pw_string", "abspath")
- dir_pw_symbolizer = get_path_info("pw_symbolizer", "abspath")
- dir_pw_sync = get_path_info("pw_sync", "abspath")
- dir_pw_sync_embos = get_path_info("pw_sync_embos", "abspath")
- dir_pw_sync_freertos = get_path_info("pw_sync_freertos", "abspath")
- dir_pw_sync_baremetal = get_path_info("pw_sync_baremetal", "abspath")
- dir_pw_sync_stl = get_path_info("pw_sync_stl", "abspath")
- dir_pw_sync_threadx = get_path_info("pw_sync_threadx", "abspath")
- dir_pw_sync_zephyr = get_path_info("pw_sync_zephyr", "abspath")
- dir_pw_sys_io = get_path_info("pw_sys_io", "abspath")
- dir_pw_sys_io_baremetal_lm3s6965evb =
- get_path_info("pw_sys_io_baremetal_lm3s6965evb", "abspath")
- dir_pw_sys_io_baremetal_stm32f429 =
- get_path_info("pw_sys_io_baremetal_stm32f429", "abspath")
- dir_pw_sys_io_arduino = get_path_info("pw_sys_io_arduino", "abspath")
- dir_pw_sys_io_mcuxpresso = get_path_info("pw_sys_io_mcuxpresso", "abspath")
- dir_pw_sys_io_stdio = get_path_info("pw_sys_io_stdio", "abspath")
- dir_pw_sys_io_stm32cube = get_path_info("pw_sys_io_stm32cube", "abspath")
- dir_pw_sys_io_zephyr = get_path_info("pw_sys_io_zephyr", "abspath")
- dir_pw_system = get_path_info("pw_system", "abspath")
- dir_pw_target_runner = get_path_info("pw_target_runner", "abspath")
- dir_pw_thread = get_path_info("pw_thread", "abspath")
- dir_pw_thread_stl = get_path_info("pw_thread_stl", "abspath")
- dir_pw_thread_embos = get_path_info("pw_thread_embos", "abspath")
- dir_pw_thread_freertos = get_path_info("pw_thread_freertos", "abspath")
- dir_pw_thread_threadx = get_path_info("pw_thread_threadx", "abspath")
dir_pw_third_party = get_path_info("third_party", "abspath")
- dir_pw_tls_client = get_path_info("pw_tls_client", "abspath")
- dir_pw_tls_client_boringssl =
- get_path_info("pw_tls_client_boringssl", "abspath")
- dir_pw_tls_client_mbedtls = get_path_info("pw_tls_client_mbedtls", "abspath")
- dir_pw_tokenizer = get_path_info("pw_tokenizer", "abspath")
- dir_pw_tool = get_path_info("pw_tool", "abspath")
- dir_pw_toolchain = get_path_info("pw_toolchain", "abspath")
- dir_pw_trace = get_path_info("pw_trace", "abspath")
- dir_pw_trace_tokenized = get_path_info("pw_trace_tokenized", "abspath")
- dir_pw_transfer = get_path_info("pw_transfer", "abspath")
- dir_pw_unit_test = get_path_info("pw_unit_test", "abspath")
- dir_pw_varint = get_path_info("pw_varint", "abspath")
- dir_pw_watch = get_path_info("pw_watch", "abspath")
- dir_pw_web_ui = get_path_info("pw_web_ui", "abspath")
- dir_pw_work_queue = get_path_info("pw_work_queue", "abspath")
}
diff --git a/pw_arduino_build/arduino.gni b/pw_arduino_build/arduino.gni
index f35836d51..3ba0000e9 100644
--- a/pw_arduino_build/arduino.gni
+++ b/pw_arduino_build/arduino.gni
@@ -13,6 +13,7 @@
# the License.
import("//build_overrides/pigweed.gni")
+import("//build_overrides/pigweed_environment.gni")
declare_args() {
# Enable/disable Arduino builds via group("arduino").
@@ -61,8 +62,7 @@ if (pw_arduino_build_CORE_PATH != "") {
pw_arduino_build_PACKAGE_NAME + " list-boards")
_compiler_path_override =
- rebase_path(getenv("_PW_ACTUAL_ENVIRONMENT_ROOT") + "/cipd/pigweed/bin",
- root_build_dir)
+ rebase_path(dir_cipd_pigweed + "/bin", root_build_dir)
arduino_core_library_path = "$_arduino_selected_core_path/hardware/" +
"$pw_arduino_build_PACKAGE_NAME/libraries"
diff --git a/pw_assert/BUILD.gn b/pw_assert/BUILD.gn
index ee2e59a27..15882105f 100644
--- a/pw_assert/BUILD.gn
+++ b/pw_assert/BUILD.gn
@@ -18,21 +18,13 @@ import("$dir_pw_build/facade.gni")
import("$dir_pw_build/module_config.gni")
import("$dir_pw_docgen/docs.gni")
import("$dir_pw_unit_test/test.gni")
+import("backend.gni")
declare_args() {
# The build target that overrides the default configuration options for this
# module. This should point to a source set that provides defines through a
# public config (which may -include a file or add defines directly).
pw_assert_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
-
- # Backend for the pw_assert module's CHECK facade.
- pw_assert_BACKEND = ""
-
- # Backend for the pw_assert module's ASSERT facade.
- #
- # Warning: This naming is transitional. Modifying this build argument WILL
- # result in future breakages. (pwbug/246)
- pw_assert_LITE_BACKEND = "${dir_pw_assert}:lite_compatibility_backend"
}
config("public_include_path") {
diff --git a/pw_assert/CMakeLists.txt b/pw_assert/CMakeLists.txt
index 93308d631..b58bb2700 100644
--- a/pw_assert/CMakeLists.txt
+++ b/pw_assert/CMakeLists.txt
@@ -21,4 +21,4 @@ pw_add_facade(pw_assert
pw_preprocessor
${pw_assert_CONFIG}
)
-target_include_directories(pw_assert PUBLIC assert_lite_public_overrides)
+target_include_directories(pw_assert INTERFACE assert_lite_public_overrides)
diff --git a/pw_assert/backend.gni b/pw_assert/backend.gni
new file mode 100644
index 000000000..c4876cfac
--- /dev/null
+++ b/pw_assert/backend.gni
@@ -0,0 +1,26 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+declare_args() {
+ # Backend for the pw_assert module's CHECK facade.
+ pw_assert_BACKEND = ""
+
+ # Backend for the pw_assert module's ASSERT facade.
+ #
+ # Warning: This naming is transitional. Modifying this build argument WILL
+ # result in future breakages. (pwbug/246)
+ pw_assert_LITE_BACKEND = "${dir_pw_assert}:lite_compatibility_backend"
+}
diff --git a/pw_assert_log/BUILD.bazel b/pw_assert_log/BUILD.bazel
index 58225b7de..3f21931ca 100644
--- a/pw_assert_log/BUILD.bazel
+++ b/pw_assert_log/BUILD.bazel
@@ -22,7 +22,7 @@ package(default_visibility = ["//visibility:public"])
licenses(["notice"])
pw_cc_library(
- name = "headers",
+ name = "pw_assert_log",
srcs = [
"assert_log.cc",
],
@@ -35,16 +35,23 @@ pw_cc_library(
"public_overrides",
],
deps = [
+ "//pw_assert:facade",
"//pw_log",
"//pw_preprocessor",
],
)
pw_cc_library(
- name = "pw_assert_log",
+ name = "lite_backend",
+ hdrs = [
+ "assert_lite_public_overrides/pw_assert_backend/assert_lite_backend.h",
+ "public/pw_assert_log/assert_lite_log.h",
+ ],
+ includes = [
+ "assert_lite_public_overrides",
+ "public",
+ ],
deps = [
- ":headers",
- "//pw_assert:facade",
"//pw_preprocessor",
],
)
diff --git a/pw_assert_log/BUILD.gn b/pw_assert_log/BUILD.gn
index f1ace4669..addc8882c 100644
--- a/pw_assert_log/BUILD.gn
+++ b/pw_assert_log/BUILD.gn
@@ -25,28 +25,47 @@ config("backend_config") {
include_dirs = [ "public_overrides" ]
}
+config("lite_backend_overrides") {
+ include_dirs = [ "assert_lite_public_overrides" ]
+}
+
+# This backend to pw_assert's PW_CHECK()/PW_CRASH() macros via PW_LOG.
pw_source_set("pw_assert_log") {
public_configs = [
":backend_config",
":default_config",
]
- public_deps = [
- ":core",
- dir_pw_log,
- ]
- public = [ "public_overrides/pw_assert_backend/assert_backend.h" ]
-}
-
-pw_source_set("core") {
- public_configs = [ ":default_config" ]
public_deps = [ "$dir_pw_log" ]
+ public = [ "public_overrides/pw_assert_backend/assert_backend.h" ]
deps = [
"$dir_pw_assert:config",
"$dir_pw_assert:facade",
"$dir_pw_preprocessor",
]
- public = [ "public/pw_assert_log/assert_log.h" ]
- sources = [ "assert_log.cc" ]
+ sources = [
+ "assert_log.cc",
+ "public/pw_assert_log/assert_log.h",
+ ]
+}
+
+# This backend to pw_assert's PW_ASSERT() macros via PW_LOG. It is intended only
+# for use with PW_LOG backends which are constexpr compatible such as
+# pw_log_android.
+#
+# Warning: The "lite" naming is transitional. assert_lite_backend.h headers
+# will be renamed as the pw_assert API is reassessed. (pwbug/246)
+pw_source_set("lite_backend") {
+ public_configs = [
+ ":lite_backend_overrides",
+ ":default_config",
+ ]
+ public_deps = [ dir_pw_preprocessor ]
+ public =
+ [ "assert_lite_public_overrides/pw_assert_backend/assert_lite_backend.h" ]
+ sources = [ "public/pw_assert_log/assert_lite_log.h" ]
+}
+
+group("lite_compatibility_backend.impl") {
}
# pw_assert_log doesn't have deps with potential circular dependencies, so this
diff --git a/pw_assert_log/assert_lite_public_overrides/pw_assert_backend/assert_lite_backend.h b/pw_assert_log/assert_lite_public_overrides/pw_assert_backend/assert_lite_backend.h
new file mode 100644
index 000000000..91a6ff6ef
--- /dev/null
+++ b/pw_assert_log/assert_lite_public_overrides/pw_assert_backend/assert_lite_backend.h
@@ -0,0 +1,20 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// This override header merely points to the true backend, in this case the
+// basic one. The reason to redirect is to permit the use of multiple backends
+// (though only pw_assert/check.h can only point to 1 backend).
+#pragma once
+
+#include "pw_assert_log/assert_lite_log.h"
diff --git a/pw_assert_log/docs.rst b/pw_assert_log/docs.rst
index 626e28b66..3fa0d3aab 100644
--- a/pw_assert_log/docs.rst
+++ b/pw_assert_log/docs.rst
@@ -4,16 +4,25 @@
pw_assert_log
=============
---------
-Overview
---------
-This assert backend implements the ``pw_assert`` facade, by routing the assert
-message into the logger with the ``PW_LOG_LEVEL_FATAL`` log level. This is an
-easy way to tokenize your assert messages, by using the ``pw_log_tokenized``
-log backend for logging, then using ``pw_assert_log`` to route the tokenized
-messages into the tokenized log handler.
+-----------------
+pw_assert_BACKEND
+-----------------
+This assert backend implements the ``pw_assert:check`` facade, by routing the
+``PW_CHECK()``/``PW_CRASH()`` macros into ``PW_LOG`` with the
+``PW_LOG_LEVEL_FATAL`` log level. This is an easy way to tokenize your assert
+messages, by using the ``pw_log_tokenized`` log backend for logging, then using
+``pw_assert_log`` to route the tokenized messages into the tokenized log
+handler.
To use this module:
1. Set your assert backend: ``pw_assert_BACKEND = dir_pw_assert_log``
2. Ensure your logging backend knows how to handle the assert failure flag
+
+----------------------
+pw_assert_LITE_BACKEND
+----------------------
+This assert backend implements the ``pw_assert:assert`` facade, by routing the
+``PW_ASSERT()`` macros into ``PW_LOG`` with the ``PW_LOG_LEVEL_FATAL`` log
+level. This is an easy way to forward your asserts to a native macro assert
+API if it is already constexpr safe such as on Android.
diff --git a/pw_assert_log/public/pw_assert_log/assert_lite_log.h b/pw_assert_log/public/pw_assert_log/assert_lite_log.h
new file mode 100644
index 000000000..08ddcbc8c
--- /dev/null
+++ b/pw_assert_log/public/pw_assert_log/assert_lite_log.h
@@ -0,0 +1,26 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_log/levels.h"
+#include "pw_log/log.h"
+#include "pw_log/options.h"
+#include "pw_preprocessor/compiler.h"
+
+#define PW_ASSERT_HANDLE_FAILURE(condition_string) \
+ do { \
+ PW_LOG( \
+ PW_LOG_LEVEL_FATAL, PW_LOG_FLAGS, "Assert failed: " condition_string); \
+ PW_UNREACHABLE; \
+ } while (0)
diff --git a/pw_assert_tokenized/docs.rst b/pw_assert_tokenized/docs.rst
index bc645a7da..0a0aeebf2 100644
--- a/pw_assert_tokenized/docs.rst
+++ b/pw_assert_tokenized/docs.rst
@@ -56,7 +56,7 @@ Setup
#. Add file name tokens to your token database. pw_assert_tokenized can't create
file name tokens that can be parsed out of the final compiled binary. The
``pw_relative_source_file_names``
- `GN template <module-pw_build-relative-source-file-names>`_ can be used to
+ :ref:`GN template<module-pw_build-relative-source-file-names>` can be used to
collect the names of all source files used in your final executable into a
JSON file, which can then be included in the creation of a tokenizer
database.
diff --git a/pw_assert_tokenized/public/pw_assert_tokenized/assert_tokenized.h b/pw_assert_tokenized/public/pw_assert_tokenized/assert_tokenized.h
index 7d3811dfd..46ae933d5 100644
--- a/pw_assert_tokenized/public/pw_assert_tokenized/assert_tokenized.h
+++ b/pw_assert_tokenized/public/pw_assert_tokenized/assert_tokenized.h
@@ -21,4 +21,4 @@
// generated offline separately.
#define PW_ASSERT_HANDLE_FAILURE(expression) \
pw_assert_tokenized_HandleAssertFailure(PW_TOKENIZER_STRING_TOKEN(__FILE__), \
- __LINE__);
+ __LINE__)
diff --git a/pw_assert_tokenized/public/pw_assert_tokenized/check_tokenized.h b/pw_assert_tokenized/public/pw_assert_tokenized/check_tokenized.h
index 591a1944a..bda3a1abf 100644
--- a/pw_assert_tokenized/public/pw_assert_tokenized/check_tokenized.h
+++ b/pw_assert_tokenized/public/pw_assert_tokenized/check_tokenized.h
@@ -18,7 +18,7 @@
#define _PW_ASSERT_TOKENIZED_TO_HANDLER(str) \
do { \
- constexpr uint32_t token = \
+ const uint32_t token = \
PW_TOKENIZE_STRING("Check failure in " __FILE__ ": " str); \
pw_assert_tokenized_HandleCheckFailure(token, __LINE__); \
} while (0)
diff --git a/pw_bloat/bloat.cmake b/pw_bloat/bloat.cmake
new file mode 100644
index 000000000..6a4e9c703
--- /dev/null
+++ b/pw_bloat/bloat.cmake
@@ -0,0 +1,64 @@
+# Copyright 2021 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+include_guard(GLOBAL)
+
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+# This function creates a library under the specified ${NAME} which provides a
+# a generated bloaty configuration for a given ELF file using
+# pw_bloat.bloaty_config.
+#
+# Produces the ${OUTPUT} Bloaty McBloatface configuration file.
+#
+# Args:
+#
+# NAME - name of the library to create
+# ELF_FILE - The input ELF file to process using pw_bloat.bloaty_config
+# OUTPUT - The output Bloaty McBloatface configuration file
+function(pw_bloaty_config NAME)
+ set(option_args)
+ set(one_value_args ELF_FILE OUTPUT)
+ set(multi_value_args)
+ _pw_parse_argv_strict(pw_bloaty_config
+ 1 "${option_args}" "${one_value_args}" "${multi_value_args}"
+ )
+
+ if("${arg_ELF_FILE}" STREQUAL "")
+ message(FATAL_ERROR
+ "pw_bloaty_config requires an input ELF file in ELF_FILE. "
+ "No ELF_FILE was listed for ${NAME}")
+ endif()
+
+ if("${arg_OUTPUT}" STREQUAL "")
+ message(FATAL_ERROR
+ "pw_bloaty_config requires an output config file in OUTPUT. "
+ "No OUTPUT was listed for ${NAME}")
+ endif()
+
+ add_library(${NAME} INTERFACE)
+ add_dependencies(${NAME} INTERFACE ${NAME}_generated_config)
+
+ add_custom_command(
+ COMMENT "Generating ${NAME}'s ${arg_OUTPUT} for ${arg_ELF_FILE}."
+ COMMAND
+ ${Python3_EXECUTABLE}
+ "$ENV{PW_ROOT}/pw_bloat/py/pw_bloat/bloaty_config.py"
+ ${arg_ELF_FILE} -o ${arg_OUTPUT} -l warning
+ DEPENDS ${arg_ELF_FILE}
+ OUTPUT ${arg_OUTPUT} POST_BUILD
+ )
+ add_custom_target(${NAME}_generated_config
+ DEPENDS ${arg_OUTPUT}
+ )
+endfunction()
diff --git a/pw_bloat/bloat.gni b/pw_bloat/bloat.gni
index a22a0925b..661df3202 100644
--- a/pw_bloat/bloat.gni
+++ b/pw_bloat/bloat.gni
@@ -183,7 +183,7 @@ template("pw_size_report") {
python_deps = [ "$dir_pw_bloat/py" ]
inputs = _bloaty_configs
outputs = [
- "$target_gen_dir/${target_name}.txt",
+ "${_doc_rst_output}.txt",
_doc_rst_output,
]
deps = _all_target_dependencies
diff --git a/pw_bloat/bloat_this_binary.cc b/pw_bloat/bloat_this_binary.cc
index 425c8f65e..2dbf47df1 100644
--- a/pw_bloat/bloat_this_binary.cc
+++ b/pw_bloat/bloat_this_binary.cc
@@ -25,7 +25,7 @@ namespace pw::bloat {
char* volatile non_optimizable_pointer;
void BloatThisBinary() {
- volatile unsigned counter = 0;
+ [[maybe_unused]] volatile unsigned counter = 0;
// In case someone accidentally ends up flashing and running a bloat
// executable on their device, loop forever instead of running this code.
diff --git a/pw_bloat/docs.rst b/pw_bloat/docs.rst
index b21e520cf..a5f9c3dbe 100644
--- a/pw_bloat/docs.rst
+++ b/pw_bloat/docs.rst
@@ -3,9 +3,10 @@
--------
pw_bloat
--------
-The bloat module provides tools to generate size report cards for output
-binaries using `Bloaty McBloatface <https://github.com/google/bloaty>`_ and
-Pigweed's GN build system.
+The bloat module provides tools and helpers around using
+`Bloaty McBloatface <https://github.com/google/bloaty>`_ including generating
+generate size report cards for output binaries through Pigweed's GN build
+system.
Bloat report cards allow tracking the memory usage of a system over time as code
changes are made and provide a breakdown of which parts of the code have the
@@ -40,11 +41,11 @@ base for the size diff can be specified either globally through the top-level
sources = [ "empty_main.cc" ]
}
- exectuable("hello_world_printf") {
+ executable("hello_world_printf") {
sources = [ "hello_printf.cc" ]
}
- exectuable("hello_world_iostream") {
+ executable("hello_world_iostream") {
sources = [ "hello_iostream.cc" ]
}
@@ -97,3 +98,202 @@ Simple bloat loop example
Simple bloat function example
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. include:: examples/simple_bloat_function
+
+Additional Bloaty data sources
+==============================
+`Bloaty McBloatface <https://github.com/google/bloaty>`_ by itself cannot help
+answer some questions which embedded developers frequently face such as
+understanding how much space is left. To address this, Pigweed provides Python
+tooling (``pw_bloat.bloaty_config``) to generate bloaty configuration files
+based on the final ELF files through small tweaks in the linker scripts to
+expose extra information.
+
+See the sections below on how to enable the additional data sections through
+modifications in your linker script(s).
+
+As an example to generate the helper configuration which enables additional data
+sources for ``example.elf`` if you've updated your linker script(s) accordingly,
+simply run
+``python -m pw_bloaty.bloaty_config example.elf > example.bloaty``. The
+``example.bloaty`` can then be used with bloaty using the ``-c`` flag, for
+example
+``bloaty -c example.bloaty example.elf --domain vm -d memoryregions,utilization``
+which may return something like:
+
+.. code-block::
+
+ 84.2% 1023Ki FLASH
+ 94.2% 963Ki Free space
+ 5.8% 59.6Ki Used space
+ 15.8% 192Ki RAM
+ 100.0% 192Ki Used space
+ 0.0% 512 VECTOR_TABLE
+ 96.9% 496 Free space
+ 3.1% 16 Used space
+ 0.0% 0 Not resident in memory
+ NAN% 0 Used space
+
+
+``utilization`` data source
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The most common question many embedded developers face when using ``bloaty`` is
+how much space you are using and how much space is left. To correctly answer
+this, section sizes must be used in order to correctly account for section
+alignment requirements.
+
+The generated ``utilization`` data source will work with any ELF file, where
+``Used Space`` is reported for the sum of virtual memory size of all sections.
+
+In order for ``Free Space`` to be reported, your linker scripts must include
+properly aligned sections which span the unused remaining space for the relevant
+memory region with the ``unused_space`` string anywhere in their name. This
+typically means creating a trailing section which is pinned to span to the end
+of the memory region.
+
+For example imagine this partial example GNU LD linker script:
+
+.. code-block::
+
+ MEMORY
+ {
+ FLASH(rx) : \
+ ORIGIN = PW_BOOT_FLASH_BEGIN, \
+ LENGTH = PW_BOOT_FLASH_SIZE
+ RAM(rwx) : \
+ ORIGIN = PW_BOOT_RAM_BEGIN, \
+ LENGTH = PW_BOOT_RAM_SIZE
+ }
+
+ SECTIONS
+ {
+ /* Main executable code. */
+ .code : ALIGN(8)
+ {
+ /* Application code. */
+ *(.text)
+ *(.text*)
+ KEEP(*(.init))
+ KEEP(*(.fini))
+
+ . = ALIGN(8);
+ /* Constants.*/
+ *(.rodata)
+ *(.rodata*)
+ } >FLASH
+
+ /* Explicitly initialized global and static data. (.data)*/
+ .static_init_ram : ALIGN(8)
+ {
+ *(.data)
+ *(.data*)
+ . = ALIGN(8);
+ } >RAM AT> FLASH
+
+ /* Zero initialized global/static data. (.bss) */
+ .zero_init_ram : ALIGN(8)
+ {
+ *(.bss)
+ *(.bss*)
+ *(COMMON)
+ . = ALIGN(8);
+ } >RAM
+ }
+
+Could be modified as follows enable ``Free Space`` reporting:
+
+.. code-block::
+
+ MEMORY
+ {
+ FLASH(rx) : ORIGIN = PW_BOOT_FLASH_BEGIN, LENGTH = PW_BOOT_FLASH_SIZE
+ RAM(rwx) : ORIGIN = PW_BOOT_RAM_BEGIN, LENGTH = PW_BOOT_RAM_SIZE
+ }
+
+ SECTIONS
+ {
+ /* Main executable code. */
+ .code : ALIGN(8)
+ {
+ /* Application code. */
+ *(.text)
+ *(.text*)
+ KEEP(*(.init))
+ KEEP(*(.fini))
+
+ . = ALIGN(8);
+ /* Constants.*/
+ *(.rodata)
+ *(.rodata*)
+ } >FLASH
+
+ /* Explicitly initialized global and static data. (.data)*/
+ .static_init_ram : ALIGN(8)
+ {
+ *(.data)
+ *(.data*)
+ . = ALIGN(8);
+ } >RAM AT> FLASH
+
+ /* Zero initialized global/static data. (.bss). */
+ .zero_init_ram : ALIGN(8)
+ {
+ *(.bss)
+ *(.bss*)
+ *(COMMON)
+ . = ALIGN(8);
+ } >RAM
+
+ /*
+ * Do not declare any output sections after this comment. This area is
+ * reserved only for declaring unused sections of memory. These sections are
+ * used by pw_bloat.bloaty_config to create the utilization data source for
+ * bloaty.
+ */
+ .FLASH.unused_space (NOLOAD) : ALIGN(8)
+ {
+ . = ABSOLUTE(ORIGIN(FLASH) + LENGTH(FLASH));
+ } >FLASH
+
+ .RAM.unused_space (NOLOAD) : ALIGN(8)
+ {
+ . = ABSOLUTE(ORIGIN(RAM) + LENGTH(RAM));
+ } >RAM
+ }
+
+``memoryregions`` data source
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Understanding how symbols, sections, and other data sources can be attributed
+back to the memory regions defined in your linker script is another common
+problem area. Unfortunately the ELF format does not include the original memory
+regions, meaning ``bloaty`` can not do this today by itself. In addition, it's
+relatively common that there are multiple memory regions which alias to the same
+memory but through different buses which could make attribution difficult.
+
+Instead of taking the less portable and brittle approach to parse ``*.map``
+files, ``pw_bloat.bloaty_config`` consumes symbols which are defined in the
+linker script with a special format to extract this information from the ELF
+file: ``pw_bloat_config_memory_region_NAME_{start,end}{_N,}``.
+
+These symbols are then used to determine how to map segments to these memory
+regions. Note that segments must be used in order to account for inter-section
+padding which are not attributed against any sections.
+
+As an example, if you have a single view in the single memory region named
+``FLASH``, then you should produce the following two symbols in your linker
+script:
+
+.. code-block::
+
+ pw_bloat_config_memory_region_FLASH_start = ORIGIN(FLASH);
+ pw_bloat_config_memory_region_FLASH_end = ORIGIN(FLASH) + LENGTH(FLASH);
+
+As another example, if you have two aliased memory regions (``DCTM`` and
+``ITCM``) into the same effective memory named you'd like to call ``RAM``, then
+you should produce the following four symbols in your linker script:
+
+.. code-block::
+
+ pw_bloat_config_memory_region_RAM_start_0 = ORIGIN(ITCM);
+ pw_bloat_config_memory_region_RAM_end_0 = ORIGIN(ITCM) + LENGTH(ITCM);
+ pw_bloat_config_memory_region_RAM_start_1 = ORIGIN(DTCM);
+ pw_bloat_config_memory_region_RAM_end_1 = ORIGIN(DTCM) + LENGTH(DTCM);
diff --git a/pw_bloat/py/BUILD.gn b/pw_bloat/py/BUILD.gn
index 24379d39e..028e4e140 100644
--- a/pw_bloat/py/BUILD.gn
+++ b/pw_bloat/py/BUILD.gn
@@ -27,9 +27,11 @@ pw_python_package("py") {
"pw_bloat/binary_diff.py",
"pw_bloat/bloat.py",
"pw_bloat/bloat_output.py",
+ "pw_bloat/bloaty_config.py",
"pw_bloat/no_bloaty.py",
"pw_bloat/no_toolchains.py",
]
+ tests = [ "bloaty_config_test.py" ]
pylintrc = "$dir_pigweed/.pylintrc"
python_deps = [ "$dir_pw_cli/py" ]
}
diff --git a/pw_bloat/py/bloaty_config_test.py b/pw_bloat/py/bloaty_config_test.py
new file mode 100644
index 000000000..340ffe2f6
--- /dev/null
+++ b/pw_bloat/py/bloaty_config_test.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python3
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Tests for bloaty configuration tooling."""
+
+import unittest
+
+from pw_bloat import bloaty_config
+
+
+class BloatyConfigTest(unittest.TestCase):
+ """Tests that the bloaty config tool produces the expected config."""
+ def test_map_segments_to_memory_regions(self) -> None:
+ """Ensures the mapping works correctly based on a real example."""
+ segments = {
+ 3: (int(0x800f268), int(0x8100200)),
+ 5: (int(0x20004650), int(0x20020650)),
+ 6: (int(0x20020650), int(0x20030000)),
+ 1: (int(0x8000200), int(0x800f060)),
+ 4: (int(0x20000208), int(0x20004650)),
+ 2: (int(0x20000000), int(0x20000208)),
+ 0: (int(0x8000000), int(0x8000200)),
+ }
+ memory_regions = {
+ 'FLASH': {
+ 0: (int(0x8000200), int(0x8100200))
+ },
+ 'RAM': {
+ 0: (int(0x20000000), int(0x20030000))
+ },
+ 'VECTOR_TABLE': {
+ 0: (int(0x8000000), int(0x8000200))
+ },
+ }
+ expected = {
+ 3: 'FLASH',
+ 5: 'RAM',
+ 6: 'RAM',
+ 1: 'FLASH',
+ 4: 'RAM',
+ 2: 'RAM',
+ 0: 'VECTOR_TABLE',
+ }
+ actual = bloaty_config.map_segments_to_memory_regions(
+ segments=segments, memory_regions=memory_regions)
+ self.assertEqual(expected, actual)
+
+ def test_generate_memoryregions_data_source(self) -> None:
+ """Ensures the formatted generation works correctly."""
+ segments_to_memory_regions = {
+ 0: 'RAM',
+ 1: 'RAM',
+ 13: 'FLASH',
+ }
+ config = bloaty_config.generate_memoryregions_data_source(
+ segments_to_memory_regions)
+ expected = '\n'.join((
+ r'custom_data_source: {',
+ r' name: "memoryregions"',
+ r' base_data_source: "segments"',
+ r' rewrite: {',
+ r' pattern:"^LOAD #0 \\[.*\\]$"',
+ r' replacement:"RAM"',
+ r' }',
+ r' rewrite: {',
+ r' pattern:"^LOAD #1 \\[.*\\]$"',
+ r' replacement:"RAM"',
+ r' }',
+ r' rewrite: {',
+ r' pattern:"^LOAD #13 \\[.*\\]$"',
+ r' replacement:"FLASH"',
+ r' }',
+ r' rewrite: {',
+ r' pattern:".*"',
+ r' replacement:"Not resident in memory"',
+ r' }',
+ r'}',
+ r'',
+ ))
+ self.assertEqual(expected, config)
+
+ def test_generate_utilization_data_source(self) -> None:
+ config = bloaty_config.generate_utilization_data_source()
+ expected = '\n'.join((
+ 'custom_data_source: {',
+ ' name:"utilization"',
+ ' base_data_source:"sections"',
+ ' rewrite: {',
+ ' pattern:"unused_space"',
+ ' replacement:"Free space"',
+ ' }',
+ ' rewrite: {',
+ ' pattern:".*"',
+ ' replacement:"Used space"',
+ ' }',
+ '}',
+ '',
+ ))
+ self.assertEqual(expected, config)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pw_bloat/py/pw_bloat/bloat.py b/pw_bloat/py/pw_bloat/bloat.py
index ee79d79f3..d62f233db 100755
--- a/pw_bloat/py/pw_bloat/bloat.py
+++ b/pw_bloat/py/pw_bloat/bloat.py
@@ -199,6 +199,10 @@ def main() -> int:
write_file(f'{args.target}.txt', complete_output)
print(complete_output)
+ # TODO(frolv): Remove when custom output for full mode is added.
+ if args.full:
+ write_file(f'{args.target}', complete_output)
+
return 0
diff --git a/pw_bloat/py/pw_bloat/bloaty_config.py b/pw_bloat/py/pw_bloat/bloaty_config.py
new file mode 100644
index 000000000..d5a38dc2a
--- /dev/null
+++ b/pw_bloat/py/pw_bloat/bloaty_config.py
@@ -0,0 +1,339 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Generates a useful bloaty config file containing new data sources."""
+
+import argparse
+import logging
+import re
+import sys
+from typing import BinaryIO, Dict, List, Optional, TextIO
+
+import pw_cli.argument_types
+from elftools.elf import elffile # type: ignore
+
+_LOG = logging.getLogger('bloaty_config')
+
+# 'pw_bloat_config_memory_region_NAME_{start,end}{_N,}' where _N defaults to 0.
+_MEMORY_REGION_SYMBOL_RE = re.compile(
+ r'pw_bloat_config_memory_region_' +
+ r'(?P<name>\w+)_(?P<limit>(start|end))(_(?P<index>\d+))?')
+
+
+def _parse_args() -> argparse.Namespace:
+ """Return a CLI argument parser for this module."""
+ parser = argparse.ArgumentParser(
+ description='Generates useful bloaty configurations entries',
+ epilog='Hint: try this:\n'
+ ' python -m pw_bloat.bloaty_config my_app.elf -o my_app.bloat')
+ parser.add_argument('elf_file', type=argparse.FileType('rb'))
+ parser.add_argument('--output',
+ '-o',
+ type=argparse.FileType('w'),
+ help='The generated bloaty configuration',
+ default=sys.stdout)
+ parser.add_argument(
+ '--utilization',
+ action=argparse.BooleanOptionalAction,
+ default=True,
+ help=(
+ 'Generate the utilization custom_data_source based on sections ' +
+ 'with "unused_space" in anywhere in their name'))
+ parser.add_argument(
+ '--memoryregions',
+ action=argparse.BooleanOptionalAction,
+ default=True,
+ help=('Generate the memoryregions custom_data_source based on ' +
+ 'symbols defined in the linker script matching the following ' +
+ 'pattern: ' +
+ '"pw::bloat::config::memory_region::NAME[0].{start,end}"'))
+ parser.add_argument('-l',
+ '--loglevel',
+ type=pw_cli.argument_types.log_level,
+ default=logging.INFO,
+ help='Set the log level'
+ '(debug, info, warning, error, critical)')
+ return parser.parse_args()
+
+
+def _parse_memory_regions(parsed_elf_file: elffile.ELFFile) -> Optional[Dict]:
+ """
+ Search for the special pw::bloat::config symbols in the ELF binary.
+
+ This produces a dictionary which looks like:
+ {
+ MEMORY_REGION_NAME_0:{
+ 0:(VM_START_ADDRESS, VM_END_ADDRESS)
+ ...
+ N:(VM_START_ADDRESS, VM_END_ADDRESS)
+ }
+ ...
+ MEMORY_REGION_NAME_M:{
+ 0:(VM_START_ADDRESS, VM_END_ADDRESS)
+ ...
+ K:(VM_START_ADDRESS, VM_END_ADDRESS)
+ }
+ }
+ """
+ symtab_section = parsed_elf_file.get_section_by_name('.symtab')
+ assert symtab_section
+
+ # Produces an initial dictionary which looks like:
+ # {
+ # MEMORY_REGION_NAME_0:{
+ # 0:{ 'start':vm_start_address, 'end':vm_end_address }
+ # ...
+ # N:{ 'start':vm_start_address, 'end':vm_end_address }
+ # }
+ # ...
+ # MEMORY_REGION_NAME_M:{
+ # 0:{ 'start':vm_start_address, 'end':vm_end_address }
+ # ...
+ # K:{ 'start':vm_start_address, 'end':vm_end_address }
+ # }
+ # }
+ memory_regions: Dict = {}
+ for symbol in symtab_section.iter_symbols():
+ match = _MEMORY_REGION_SYMBOL_RE.match(symbol.name)
+ if not match:
+ continue
+
+ name = match.group('name')
+ limit = match.group('limit')
+ if match.group('index'):
+ index = int(match.group('index'))
+ else:
+ index = 0
+ if name not in memory_regions:
+ memory_regions[name] = {}
+ memory_region = memory_regions[name]
+ if index not in memory_region:
+ memory_region[index] = {}
+ memory_region_segment = memory_region[index]
+ memory_region_segment[limit] = symbol.entry.st_value
+
+ # If the user did not provide a single pw::bloat::config symbol in the ELF
+ # binary then bail out and do nothing.
+ if not memory_regions:
+ _LOG.info('No valid pw::bloat::config::memory_region::* symbols found')
+ return None
+
+ # Ensure all memory regions' ranges have an end and start.
+ missing_range_limits = False
+ for region_name, ranges in memory_regions.items():
+ for index, limits in ranges.items():
+ if 'start' not in limits:
+ missing_range_limits = True
+ _LOG.error('%s[%d] is missing the start address', region_name,
+ index)
+ if 'end' not in limits:
+ missing_range_limits = True
+ _LOG.error('%s[%d] is missing the end address', region_name,
+ index)
+ if missing_range_limits:
+ _LOG.error('Invalid memory regions detected: missing ranges')
+ return None
+
+ # Translate the initial memory_regions dictionary to the tupled return
+ # format, i.e. (start, end) values in the nested dictionary.
+ tupled_memory_regions: Dict = {}
+ for region_name, ranges in memory_regions.items():
+ if region_name not in tupled_memory_regions:
+ tupled_memory_regions[region_name] = {}
+ for index, limits in ranges.items():
+ tupled_memory_regions[region_name][index] = (limits['start'],
+ limits['end'])
+
+ # Ensure the memory regions do not overlap.
+ if _memory_regions_overlap(tupled_memory_regions):
+ _LOG.error('Invalid memory regions detected: overlaps detected')
+ return None
+
+ return tupled_memory_regions
+
+
+def _parse_segments(parsed_elf_file: elffile.ELFFile) -> Dict:
+ """
+ Report all of the segment information from the ELF binary.
+
+ Iterates over all of the segments in the ELF file's program header and
+ reports where they reside in virtual memory through a dictionary which
+ looks like:
+ {
+ 0:(start_vmaddr,end_vmaddr),
+ ...
+ N:(start_vmaddr,end_vmaddr),
+ }
+ """
+ segments = {}
+ for i in range(parsed_elf_file.num_segments()):
+ segment = parsed_elf_file.get_segment(i)
+ start_vmaddr = segment['p_vaddr']
+ memory_size = segment['p_memsz']
+ if memory_size == 0:
+ continue # Not a loaded segment which resides in virtual memory.
+ end_vmaddr = start_vmaddr + memory_size
+ segments[i] = (start_vmaddr, end_vmaddr)
+ return segments
+
+
+def _memory_regions_overlap(memory_regions: Dict) -> bool:
+ """Returns where any memory regions overlap each other."""
+ overlaps_detected = False
+ for current_name, current_ranges in memory_regions.items():
+ for current_index, (current_start,
+ current_end) in current_ranges.items():
+ for other_name, other_ranges in memory_regions.items():
+ for other_index, (other_start,
+ other_end) in other_ranges.items():
+ if (current_name == other_name
+ and current_index == other_index):
+ continue # Skip yourself.
+ # Check if the other region end is within this region.
+ other_end_overlaps = (current_start < other_end <=
+ current_end)
+ other_start_overlaps = (current_start <= other_start <
+ current_end)
+ if other_end_overlaps or other_start_overlaps:
+ overlaps_detected = True
+ _LOG.error(f'error: {current_name}[{current_index}] ' +
+ f'[{hex(current_start)},' +
+ f'{hex(current_end)}] overlaps with ' +
+ f'{other_name}[{other_index}] '
+ f'[{hex(other_start)},' +
+ f'{hex(other_end)}] overlaps with ')
+ return overlaps_detected
+
+
+def _get_segments_to_memory_region_map(elf_file: BinaryIO) -> Optional[Dict]:
+ """
+ Processes an ELF file to look up what memory regions segments are in.
+
+ Returns the result from map_segments_to_memory_regions if valid memory
+ regions were parsed out of the ELF file.
+ """
+ parsed_elf_file = elffile.ELFFile(elf_file)
+
+ memory_regions = _parse_memory_regions(parsed_elf_file)
+ if not memory_regions:
+ return None
+
+ segments = _parse_segments(parsed_elf_file)
+
+ return map_segments_to_memory_regions(segments=segments,
+ memory_regions=memory_regions)
+
+
+def map_segments_to_memory_regions(segments: Dict,
+ memory_regions: Dict) -> Dict:
+ """
+ Maps segments to the virtual memory regions they reside in.
+
+ This takes in the results from _parse_memory_regions and _parse_segments and
+ produces a dictionary which looks like:
+ {
+ SEGMENT_INDEX_0:'MEMORY_REGION_NAME_0',
+ SEGMENT_INDEX_1:'MEMORY_REGION_NAME_0',
+ ...
+ SEGMENT_INDEX_N:'MEMORY_REGION_NAME_M',
+ }
+ """
+
+ # Now for each segment, determine what memory region it belongs to
+ # and generate a bloaty config output for it.
+ segment_to_memory_region = {}
+ for segment, (segment_start, segment_end) in segments.items():
+ # Note this is the final filter bloaty rewrite pattern format.
+ for memory_region_name, memory_region_info in memory_regions.items():
+ for _, (subregion_start,
+ subregion_end) in memory_region_info.items():
+ if (segment_start >= subregion_start
+ and segment_end <= subregion_end):
+ # We found the subregion the segment resides in.
+ segment_to_memory_region[segment] = memory_region_name
+ if segment not in segment_to_memory_region:
+ _LOG.error(
+ f'Error: Failed to find memory region for LOAD #{segment} ' +
+ f'[{hex(segment_start)},{hex(segment_end)}]')
+ return segment_to_memory_region
+
+
+def generate_memoryregions_data_source(segment_to_memory_region: Dict) -> str:
+ output: List[str] = []
+ output.append('custom_data_source: {')
+ output.append(' name: "memoryregions"')
+ output.append(' base_data_source: "segments"')
+ for segment_index, memory_region in segment_to_memory_region.items():
+ output.append(' rewrite: {')
+ segment_filter = r'^LOAD ' + f'#{segment_index}' + r' \\[.*\\]$'
+ output.append(f' pattern:"{segment_filter}"')
+ output.append(f' replacement:"{memory_region}"')
+ output.append(' }')
+ output.append(' rewrite: {')
+ output.append(' pattern:".*"')
+ output.append(' replacement:"Not resident in memory"')
+ output.append(' }')
+ output.append('}')
+ return '\n'.join(output) + '\n'
+
+
+def generate_utilization_data_source() -> str:
+ output: List[str] = []
+ output.append('custom_data_source: {')
+ output.append(' name:"utilization"')
+ output.append(' base_data_source:"sections"')
+ output.append(' rewrite: {')
+ output.append(' pattern:"unused_space"')
+ output.append(' replacement:"Free space"')
+ output.append(' }')
+ output.append(' rewrite: {')
+ output.append(' pattern:".*"')
+ output.append(' replacement:"Used space"')
+ output.append(' }')
+ output.append('}')
+ return '\n'.join(output) + '\n'
+
+
+def generate_bloaty_config(elf_file: BinaryIO, enable_memoryregions: bool,
+ enable_utilization: bool, out_file: TextIO) -> None:
+ if enable_memoryregions:
+ # Enable the "memoryregions" data_source if the user provided the
+ # required pw_bloat specific symbols in their linker script.
+ segment_to_memory_region = _get_segments_to_memory_region_map(elf_file)
+ if not segment_to_memory_region:
+ _LOG.info('memoryregions data_source is not provided')
+ else:
+ _LOG.info('memoryregions data_source is provided')
+ out_file.write(
+ generate_memoryregions_data_source(segment_to_memory_region))
+
+ if enable_utilization:
+ _LOG.info('utilization data_source is provided')
+ out_file.write(generate_utilization_data_source())
+
+
+def main() -> int:
+ """Generates a useful bloaty config file containing new data sources."""
+ args = _parse_args()
+
+ logging.basicConfig(format='%(message)s', level=args.loglevel)
+
+ generate_bloaty_config(elf_file=args.elf_file,
+ enable_memoryregions=args.memoryregions,
+ enable_utilization=args.utilization,
+ out_file=args.output)
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/pw_bloat/py/setup.cfg b/pw_bloat/py/setup.cfg
index 42d9a47a2..b1f87faa9 100644
--- a/pw_bloat/py/setup.cfg
+++ b/pw_bloat/py/setup.cfg
@@ -21,7 +21,9 @@ description = Tools for generating binary size report cards
[options]
packages = find:
zip_safe = False
-install_requires = pw_cli
+install_requires =
+ pw_cli
+ pyelftools
[options.package_data]
pw_bloat = py.typed
diff --git a/pw_blob_store/blob_store.cc b/pw_blob_store/blob_store.cc
index f55127232..d58455326 100644
--- a/pw_blob_store/blob_store.cc
+++ b/pw_blob_store/blob_store.cc
@@ -161,7 +161,7 @@ Status BlobStore::OpenRead() {
return Status::Unavailable();
}
- if (!ValidToRead()) {
+ if (!HasData()) {
PW_LOG_ERROR("Blob reader unable open without valid data");
return Status::FailedPrecondition();
}
@@ -345,7 +345,8 @@ Status BlobStore::FlushFinalPartialChunk() {
PW_DCHECK_UINT_GT(bytes_in_buffer, 0);
PW_DCHECK_UINT_LE(bytes_in_buffer, flash_write_size_bytes_);
- PW_DCHECK_UINT_LE(flash_write_size_bytes_, WriteBytesRemaining());
+ PW_DCHECK_UINT_LE(flash_write_size_bytes_,
+ MaxDataSizeBytes() - flash_address_);
// If there is no buffer there should never be any bytes enqueued.
PW_DCHECK(!write_buffer_.empty());
@@ -406,7 +407,7 @@ Status BlobStore::EraseIfNeeded() {
}
StatusWithSize BlobStore::Read(size_t offset, ByteSpan dest) const {
- if (!ValidToRead()) {
+ if (!HasData()) {
return StatusWithSize::FailedPrecondition();
}
if (offset >= ReadableDataBytes()) {
@@ -420,7 +421,7 @@ StatusWithSize BlobStore::Read(size_t offset, ByteSpan dest) const {
}
Result<ConstByteSpan> BlobStore::GetMemoryMappedBlob() const {
- if (!ValidToRead()) {
+ if (!HasData()) {
return Status::FailedPrecondition();
}
@@ -688,7 +689,7 @@ size_t BlobStore::BlobReader::ConservativeLimit(LimitType limit) const {
Status BlobStore::BlobReader::Open(size_t offset) {
PW_DCHECK(!open_);
- if (!store_.ValidToRead()) {
+ if (!store_.HasData()) {
return Status::FailedPrecondition();
}
if (offset >= store_.ReadableDataBytes()) {
@@ -712,8 +713,8 @@ Status BlobStore::BlobReader::DoSeek(ptrdiff_t offset, Whence origin) {
return Status::FailedPrecondition();
}
- // Note that Open ensures it is ValidToRead() which
- // in turn guarantees store_.ReadableDataBytes() > 0.
+ // Note that Open ensures HasData() which in turn guarantees
+ // store_.ReadableDataBytes() > 0.
size_t pos = offset_;
PW_TRY(CalculateSeek(offset, origin, store_.ReadableDataBytes() - 1, pos));
diff --git a/pw_blob_store/blob_store_test.cc b/pw_blob_store/blob_store_test.cc
index 11fd84947..5b35df22f 100644
--- a/pw_blob_store/blob_store_test.cc
+++ b/pw_blob_store/blob_store_test.cc
@@ -236,6 +236,7 @@ TEST_F(BlobStoreTest, Reader_ConservativeLimits) {
BlobStoreBuffer<kBufferSize> blob(
"TestBlobBlock", partition_, &checksum, kvs::TestKvs(), kBufferSize);
EXPECT_EQ(OkStatus(), blob.Init());
+ EXPECT_TRUE(blob.HasData());
BlobStore::BlobReader reader(blob);
ASSERT_EQ(OkStatus(), reader.Open());
@@ -261,12 +262,15 @@ TEST_F(BlobStoreTest, IsOpen) {
EXPECT_EQ(OkStatus(), writer.Open());
EXPECT_EQ(true, writer.IsOpen());
+ EXPECT_FALSE(blob.HasData());
+
// Need to write something, so the blob reader is able to open.
std::array<std::byte, 64> tmp_buffer = {};
EXPECT_EQ(OkStatus(), writer.Write(tmp_buffer));
EXPECT_EQ(OkStatus(), writer.Close());
EXPECT_EQ(false, writer.IsOpen());
+ EXPECT_TRUE(blob.HasData());
BlobStore::BlobReader reader(blob);
EXPECT_EQ(false, reader.IsOpen());
ASSERT_EQ(OkStatus(), reader.Open());
@@ -576,16 +580,24 @@ TEST_F(BlobStoreTest, Discard) {
blob_title, partition_, &checksum, kvs::TestKvs(), kBufferSize);
EXPECT_EQ(OkStatus(), blob.Init());
+ EXPECT_TRUE(blob.HasData());
+
BlobStore::BlobWriterWithBuffer writer(blob);
EXPECT_EQ(OkStatus(), writer.Open());
EXPECT_EQ(OkStatus(), writer.Write(tmp_buffer));
+ // Blob should NOT be valid to read, because the write data was only buffered,
+ // and has not been written to flash yet.
+ EXPECT_FALSE(blob.HasData());
+
// The write does an implicit erase so there should be no key for this blob.
EXPECT_EQ(Status::NotFound(),
kvs::TestKvs().acquire()->Get(blob_title, tmp_buffer).status());
EXPECT_EQ(OkStatus(), writer.Close());
+ EXPECT_TRUE(blob.HasData());
+
EXPECT_EQ(OkStatus(),
kvs::TestKvs().acquire()->Get(blob_title, tmp_buffer).status());
@@ -593,6 +605,8 @@ TEST_F(BlobStoreTest, Discard) {
EXPECT_EQ(OkStatus(), writer.Discard());
EXPECT_EQ(OkStatus(), writer.Close());
+ EXPECT_FALSE(blob.HasData());
+
EXPECT_EQ(Status::NotFound(),
kvs::TestKvs().acquire()->Get(blob_title, tmp_buffer).status());
}
@@ -722,7 +736,31 @@ TEST_F(BlobStoreTest, InvalidSeekOffset) {
ASSERT_EQ(Status::OutOfRange(), reader.Seek(kOffset));
}
-// Test reading with a read buffer larger than the available data in the
+// Write a block to blob and close with part of a write buffer with unflushed
+// data.
+TEST_F(BlobStoreTest, WriteBufferWithRemainderInBuffer) {
+ InitSourceBufferToRandom(0x11309);
+
+ kvs::ChecksumCrc16 checksum;
+ constexpr size_t kBufferSize = 256;
+ BlobStoreBuffer<kBufferSize> blob(
+ "TestBlobBlock", partition_, &checksum, kvs::TestKvs(), kBufferSize);
+ EXPECT_EQ(OkStatus(), blob.Init());
+
+ const size_t write_size_bytes = kBlobDataSize - 10;
+ ConstByteSpan write_data = std::span(source_buffer_).first(write_size_bytes);
+
+ BlobStore::BlobWriterWithBuffer writer(blob);
+ EXPECT_EQ(OkStatus(), writer.Open());
+ ASSERT_EQ(OkStatus(), writer.Write(write_data));
+ EXPECT_EQ(OkStatus(), writer.Close());
+
+ BlobStore::BlobReader reader(blob);
+ ASSERT_EQ(OkStatus(), reader.Open());
+ EXPECT_EQ(write_size_bytes, reader.ConservativeReadLimit());
+}
+
+// Test reading with a read buffer larger than the available data in the blob.
TEST_F(BlobStoreTest, ReadBufferIsLargerThanData) {
InitSourceBufferToRandom(0x57326);
diff --git a/pw_blob_store/docs.rst b/pw_blob_store/docs.rst
index 23bd3f465..a418f58af 100644
--- a/pw_blob_store/docs.rst
+++ b/pw_blob_store/docs.rst
@@ -16,6 +16,11 @@ Most operations on a ``BlobStore`` are done using ``BlobReader`` and
a ``BlobStore`` may have multiple open ``BlobReader`` objects, no other
readers/writers may be active if a ``BlobWriter`` is opened on a blob store.
+The data state of a blob can be checked using the ``HasData()`` method.
+The method returns true if the blob is currenty valid and has at least one data
+byte. This allows checking if a blob has stored data without needing to
+instantiate and open a reader or writer.
+
Write buffer
============
@@ -88,6 +93,9 @@ for the ``std::string_view`` to be invalidated after the function returns.
Reading from a BlobStore
------------------------
+A ``BlobStore`` may have multiple open ``BlobReader`` objects. No other
+readers/writers may be open/active if a ``BlobWriter`` is opened on a blob
+store.
0) Create BlobReader instance
1) BlobReader::Open().
diff --git a/pw_blob_store/flat_file_system_entry_test.cc b/pw_blob_store/flat_file_system_entry_test.cc
index 3e16063c7..8410deab9 100644
--- a/pw_blob_store/flat_file_system_entry_test.cc
+++ b/pw_blob_store/flat_file_system_entry_test.cc
@@ -153,7 +153,7 @@ TEST_F(FlatFileSystemBlobStoreEntryTest, NoData) {
FlatFileSystemBlobStoreEntry::FilePermissions::READ;
// Ensure the BlobStore is erased.
- partition_.Erase();
+ ASSERT_EQ(OkStatus(), partition_.Erase());
sync::VirtualMutex blob_store_mutex;
FlatFileSystemBlobStoreEntry blob_store_file(
diff --git a/pw_blob_store/public/pw_blob_store/blob_store.h b/pw_blob_store/public/pw_blob_store/blob_store.h
index 120fcea1b..899ae8368 100644
--- a/pw_blob_store/public/pw_blob_store/blob_store.h
+++ b/pw_blob_store/public/pw_blob_store/blob_store.h
@@ -379,6 +379,17 @@ class BlobStore {
// Maximum number of data bytes this BlobStore is able to store.
size_t MaxDataSizeBytes() const;
+ // Get the current data state of the blob without needing to instantiate
+ // and/or open a reader or writer. This check is independent of any writers or
+ // readers of this blob that might exist (open or closed).
+ //
+ // NOTE: This state can be changed by any writer that is open(ed) for this
+ // blob. Readers can not be opened until any open writers are closed.
+ //
+ // true - Blob is valid/OK and has at least 1 data byte.
+ // false - Blob is either invalid or does not have any data bytes
+ bool HasData() const { return (valid_data_ && ReadableDataBytes() > 0); }
+
private:
Status LoadMetadata();
@@ -477,9 +488,6 @@ class BlobStore {
Status EraseIfNeeded();
- // Blob is valid/OK and has data to read.
- bool ValidToRead() const { return (valid_data_ && ReadableDataBytes() > 0); }
-
// Read valid data. Attempts to read the lesser of output.size_bytes() or
// available bytes worth of data. Returns:
//
diff --git a/pw_boot_cortex_m/basic_cortex_m.ld b/pw_boot_cortex_m/basic_cortex_m.ld
index 69334c5f6..a14a20423 100644
--- a/pw_boot_cortex_m/basic_cortex_m.ld
+++ b/pw_boot_cortex_m/basic_cortex_m.ld
@@ -186,6 +186,27 @@ SECTIONS
{
KEEP(*(.ARM.extab*))
}
+
+ /*
+ * Do not declare any output sections after this comment. This area is
+ * reserved only for declaring unused sections of memory. These sections are
+ * used by pw_bloat.bloaty_config to create the utilization data source for
+ * bloaty.
+ */
+ .VECTOR_TABLE.unused_space (NOLOAD) : ALIGN(8)
+ {
+ . = ABSOLUTE(ORIGIN(VECTOR_TABLE) + LENGTH(VECTOR_TABLE));
+ } >VECTOR_TABLE
+
+ .FLASH.unused_space (NOLOAD) : ALIGN(8)
+ {
+ . = ABSOLUTE(ORIGIN(FLASH) + LENGTH(FLASH));
+ } >FLASH
+
+ .RAM.unused_space (NOLOAD) : ALIGN(8)
+ {
+ . = ABSOLUTE(ORIGIN(RAM) + LENGTH(RAM));
+ } >RAM
}
/* Symbols used by core_init.c: */
@@ -202,3 +223,14 @@ _pw_zero_init_ram_end = _pw_zero_init_ram_start + SIZEOF(.zero_init_ram);
/* arm-none-eabi expects `end` symbol to point to start of heap for sbrk. */
PROVIDE(end = _pw_zero_init_ram_end);
+
+/* These symbols are used by pw_bloat.bloaty_config to create the memoryregions
+ * data source for bloaty in this format (where the optional _N defaults to 0):
+ * pw_bloat_config_memory_region_NAME_{start,end}{_N,} */
+pw_bloat_config_memory_region_VECTOR_TABLE_start = ORIGIN(VECTOR_TABLE);
+pw_bloat_config_memory_region_VECTOR_TABLE_end =
+ ORIGIN(VECTOR_TABLE) + LENGTH(VECTOR_TABLE);
+pw_bloat_config_memory_region_FLASH_start = ORIGIN(FLASH);
+pw_bloat_config_memory_region_FLASH_end = ORIGIN(FLASH) + LENGTH(FLASH);
+pw_bloat_config_memory_region_RAM_start = ORIGIN(RAM);
+pw_bloat_config_memory_region_RAM_end = ORIGIN(RAM) + LENGTH(RAM);
diff --git a/pw_build/BUILD.gn b/pw_build/BUILD.gn
index a6d0cd7de..1a74c2bad 100644
--- a/pw_build/BUILD.gn
+++ b/pw_build/BUILD.gn
@@ -70,6 +70,14 @@ config("reduced_size") {
"-fdata-sections",
]
cflags_cc = [ "-fno-rtti" ]
+
+ if (current_os == "mac" || current_os == "ios") {
+ # Delete unreferenced sections. Helpful with -ffunction-sections.
+ ldflags = [ "-Wl,-dead_strip" ]
+ } else {
+ # Delete unreferenced sections. Helpful with -ffunction-sections.
+ ldflags = [ "-Wl,--gc-sections" ]
+ }
}
config("strict_warnings") {
diff --git a/pw_build/CMakeLists.txt b/pw_build/CMakeLists.txt
index b2a9b36bf..07b44a217 100644
--- a/pw_build/CMakeLists.txt
+++ b/pw_build/CMakeLists.txt
@@ -29,6 +29,9 @@ target_compile_options(pw_build
$<$<CXX_COMPILER_ID:Clang>:-fcolor-diagnostics>
$<$<CXX_COMPILER_ID:GNU>:-fdiagnostics-color=always>
)
+if(ZEPHYR_PIGWEED_MODULE_DIR)
+ target_link_libraries(pw_build INTERFACE zephyr_interface)
+endif()
# Declare top-level targets for tests.
add_custom_target(pw_tests.default)
diff --git a/pw_build/bazel_internal/BUILD.bazel b/pw_build/bazel_internal/BUILD.bazel
new file mode 100644
index 000000000..8607c8340
--- /dev/null
+++ b/pw_build/bazel_internal/BUILD.bazel
@@ -0,0 +1,39 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(":pigweed_internal.bzl", "pw_linker_script")
+
+pw_linker_script(
+ name = "linker_script_test",
+ defines = [
+ "PW_BOOT_FLASH_BEGIN=0x08000200",
+ "PW_BOOT_FLASH_SIZE=1024K",
+ "PW_BOOT_HEAP_SIZE=112K",
+ "PW_BOOT_MIN_STACK_SIZE=1K",
+ "PW_BOOT_RAM_BEGIN=0x20000000",
+ "PW_BOOT_RAM_SIZE=192K",
+ "PW_BOOT_VECTOR_TABLE_BEGIN=0x08000000",
+ "PW_BOOT_VECTOR_TABLE_SIZE=512",
+ ],
+ linker_script = "linker_script.ld",
+)
+
+# Use cc_binary to build the test to avoid duplicating the linker script in the
+# command line via implicit deps in pw_cc_binary.
+cc_binary(
+ name = "test_linker_script",
+ srcs = ["test.cc"],
+ additional_linker_inputs = [":linker_script_test"],
+ linkopts = ["-T $(location :linker_script_test)"],
+)
diff --git a/pw_build/bazel_internal/BUILD.gn b/pw_build/bazel_internal/BUILD.gn
new file mode 100644
index 000000000..ef76ebd40
--- /dev/null
+++ b/pw_build/bazel_internal/BUILD.gn
@@ -0,0 +1,21 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("$dir_pw_build/target_types.gni")
+
+# This is target is to keep the presubmit happy. But is not actually used.
+pw_source_set("tests") {
+ public_deps = [ dir_pw_preprocessor ]
+ sources = [ "test.cc" ]
+}
diff --git a/pw_build/bazel_internal/linker_script.ld b/pw_build/bazel_internal/linker_script.ld
new file mode 100644
index 000000000..7c3169e7b
--- /dev/null
+++ b/pw_build/bazel_internal/linker_script.ld
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2022 The Pigweed Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+/* This linker script is for test purposes only and should not be used in a
+ * production environment.
+*/
+#ifndef PW_BOOT_VECTOR_TABLE_BEGIN
+#error "PW_BOOT_VECTOR_TABLE_BEGIN is not defined, and is required to use pw_boot_cortex_m"
+#endif // PW_BOOT_VECTOR_TABLE_BEGIN
+
+
+ /* Note: This technically doesn't set the firmware's entry point. Setting the
+ * firmware entry point is done by setting vector_table[1]
+ * (Reset_Handler). However, this DOES tell the compiler how to optimize
+ * when --gc-sections is enabled.
+ */
+ENTRY(pw_boot_Entry)
+
+MEMORY
+{
+ /* TODO(pwbug/57): Make it possible for projects to freely customize
+ * memory regions.
+ */
+
+ /* Vector Table (typically in flash) */
+ VECTOR_TABLE(rx) : \
+ ORIGIN = PW_BOOT_VECTOR_TABLE_BEGIN, \
+ LENGTH = PW_BOOT_VECTOR_TABLE_SIZE
+ /* Internal Flash */
+ FLASH(rx) : \
+ ORIGIN = PW_BOOT_FLASH_BEGIN, \
+ LENGTH = PW_BOOT_FLASH_SIZE
+ /* Internal SRAM */
+ RAM(rwx) : \
+ ORIGIN = PW_BOOT_RAM_BEGIN, \
+ LENGTH = PW_BOOT_RAM_SIZE
+}
+
diff --git a/pw_build/bazel_internal/pigweed_internal.bzl b/pw_build/bazel_internal/pigweed_internal.bzl
index c55d39e07..5a2bad12c 100644
--- a/pw_build/bazel_internal/pigweed_internal.bzl
+++ b/pw_build/bazel_internal/pigweed_internal.bzl
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
-# Copyright 2021 The Pigweed Authors
+# Copyright 2022 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -15,6 +15,9 @@
# the License.
""" An internal set of tools for creating embedded CC targets. """
+load("@rules_cc//cc:action_names.bzl", "C_COMPILE_ACTION_NAME")
+load("@rules_cc//cc:toolchain_utils.bzl", "find_cpp_toolchain")
+
DEBUGGING = [
"-g",
]
@@ -68,7 +71,7 @@ def add_defaults(kwargs):
kwargs: cc_* arguments to be modified.
"""
- copts = kwargs.get("copts", []) + PW_DEFAULT_COPTS
+ copts = PW_DEFAULT_COPTS + kwargs.get("copts", [])
kwargs["copts"] = select({
"//pw_build:kythe": copts + KYTHE_COPTS,
"//conditions:default": copts,
@@ -142,3 +145,64 @@ def has_pw_assert_dep(deps):
if dep in pw_assert_targets:
return True
return False
+
+def _preprocess_linker_script_impl(ctx):
+ cc_toolchain = find_cpp_toolchain(ctx)
+ output_script = ctx.actions.declare_file(ctx.label.name + ".ld")
+ feature_configuration = cc_common.configure_features(
+ ctx = ctx,
+ cc_toolchain = cc_toolchain,
+ requested_features = ctx.features,
+ unsupported_features = ctx.disabled_features,
+ )
+ cxx_compiler_path = cc_common.get_tool_for_action(
+ feature_configuration = feature_configuration,
+ action_name = C_COMPILE_ACTION_NAME,
+ )
+ c_compile_variables = cc_common.create_compile_variables(
+ feature_configuration = feature_configuration,
+ cc_toolchain = cc_toolchain,
+ user_compile_flags = ctx.fragments.cpp.copts + ctx.fragments.cpp.conlyopts,
+ )
+ env = cc_common.get_environment_variables(
+ feature_configuration = feature_configuration,
+ action_name = C_COMPILE_ACTION_NAME,
+ variables = c_compile_variables,
+ )
+ ctx.actions.run(
+ outputs = [output_script],
+ inputs = depset(
+ [ctx.file.linker_script],
+ transitive = [cc_toolchain.all_files],
+ ),
+ executable = cxx_compiler_path,
+ arguments = [
+ "-E",
+ "-P",
+ "-xc",
+ ctx.file.linker_script.short_path,
+ "-o",
+ output_script.path,
+ ] + [
+ "-D" + d
+ for d in ctx.attr.defines
+ ] + ctx.attr.copts,
+ env = env,
+ )
+ return [DefaultInfo(files = depset([output_script]))]
+
+pw_linker_script = rule(
+ _preprocess_linker_script_impl,
+ attrs = {
+ "copts": attr.string_list(doc = "C compile options."),
+ "defines": attr.string_list(doc = "C preprocessor defines."),
+ "linker_script": attr.label(
+ mandatory = True,
+ allow_single_file = True,
+ doc = "Linker script to preprocess.",
+ ),
+ "_cc_toolchain": attr.label(default = Label("@bazel_tools//tools/cpp:current_cc_toolchain")),
+ },
+ toolchains = ["@bazel_tools//tools/cpp:toolchain_type"],
+ fragments = ["cpp"],
+)
diff --git a/pw_build/bazel_internal/test.cc b/pw_build/bazel_internal/test.cc
new file mode 100644
index 000000000..6498ae9a9
--- /dev/null
+++ b/pw_build/bazel_internal/test.cc
@@ -0,0 +1,17 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// This file is intentionally very simple and is used only to test that the
+// linker script generator works as expected.
+int main() { return 0; } \ No newline at end of file
diff --git a/pw_build/docs.rst b/pw_build/docs.rst
index 082205b6b..90e499c9e 100644
--- a/pw_build/docs.rst
+++ b/pw_build/docs.rst
@@ -386,6 +386,8 @@ The following expressions are supported:
stamp = true
}
+.. _module-pw_build-pw_exec:
+
pw_exec
-------
``pw_exec`` allows for execution of arbitrary programs. It is a wrapper around
@@ -401,6 +403,10 @@ pw_exec
case $PATH is searched).
* ``args``: Optional list of arguments to the program.
* ``deps``: Dependencies for this target.
+* ``public_deps``: Public dependencies for this target. In addition to outputs
+ from this target, outputs generated by public dependencies can be used as
+ inputs from targets that depend on this one. This is not the case for private
+ deps.
* ``inputs``: Optional list of build inputs to the program.
* ``outputs``: Optional list of artifacts produced by the program's execution.
* ``env``: Optional list of key-value pairs defining environment variables for
@@ -576,7 +582,7 @@ pw_tokenizer is unable to embed token information as part of C/C++ compilation.
This template produces a JSON file containing an array of strings (file paths
with ``-ffile-prefix-map``-like transformations applied) that can be used to
-`generate a token database <module-pw_tokenizer-database-creation>`_.
+:ref:`generate a token database <module-pw_tokenizer-database-creation>`.
**Arguments**
@@ -659,6 +665,23 @@ than being absolute paths (e.g. ``/home/user/ralph/coding/my_proj/main.cc``).
This is a result of transformations applied to strip absolute pathing prefixes,
matching the behavior of pw_build's ``$dir_pw_build:relative_paths`` config.
+Build time errors: pw_error and pw_build_assert
+-----------------------------------------------
+In Pigweed's complex, multi-toolchain GN build it is not possible to build every
+target in every configuration. GN's ``assert`` statement is not ideal for
+enforcing the correct configuration because it may prevent the GN build files or
+targets from being referred to at all, even if they aren't used.
+
+The ``pw_error`` GN template results in an error if it is executed during the
+build. These error targets can exist in the build graph, but cannot be depended
+on without an error.
+
+``pw_build_assert`` evaluates to a ``pw_error`` if a condition fails or nothing
+(an empty group) if the condition passes. Targets can add a dependency on a
+``pw_build_assert`` to enforce a condition at build time.
+
+The templates for build time errors are defined in ``pw_build/error.gni``.
+
CMake
=====
Pigweed's `CMake`_ support is provided primarily for projects that have an
@@ -826,6 +849,33 @@ file. The built-in Bazel rules ``cc_binary``, ``cc_library``, and ``cc_test``
are wrapped with ``pw_cc_binary``, ``pw_cc_library``, and ``pw_cc_test``.
These wrappers add parameters to calls to the compiler and linker.
+In addition to wrapping the built-in rules, Pigweed also provides a custom
+rule for handling linker scripts with Bazel. e.g.
+
+.. code-block:: python
+
+ pw_linker_script(
+ name = "some_linker_script",
+ linker_script = ":some_configurable_linker_script.ld",
+ defines = [
+ "PW_BOOT_FLASH_BEGIN=0x08000200",
+ "PW_BOOT_FLASH_SIZE=1024K",
+ "PW_BOOT_HEAP_SIZE=112K",
+ "PW_BOOT_MIN_STACK_SIZE=1K",
+ "PW_BOOT_RAM_BEGIN=0x20000000",
+ "PW_BOOT_RAM_SIZE=192K",
+ "PW_BOOT_VECTOR_TABLE_BEGIN=0x08000000",
+ "PW_BOOT_VECTOR_TABLE_SIZE=512",
+ ],
+ )
+
+ pw_cc_binary(
+ name = "some_binary",
+ srcs = ["some_source.c"],
+ additional_linker_inputs = [":some_linker_script"],
+ linkopts = ["-T $(location :some_linker_script)"],
+ )
+
Currently Pigweed is making use of a set of
`open source <https://github.com/silvergasp/bazel-embedded>`_ toolchains. The
host builds are only supported on Linux/Mac based systems. Additionally the
diff --git a/pw_build/error.gni b/pw_build/error.gni
index 653b4a912..33804c982 100644
--- a/pw_build/error.gni
+++ b/pw_build/error.gni
@@ -18,8 +18,10 @@ import("python_action.gni")
# or 'message_lines' must be specified, but not both.
#
# Args:
+#
# message: The message to print. Use \n for newlines.
# message_lines: List of lines to use for the message.
+# visibility: GN visibility to apply to the underlying target.
#
template("pw_error") {
assert(
@@ -48,5 +50,46 @@ template("pw_error") {
# This output file is never created.
outputs = [ "$target_gen_dir/$target_name.build_error" ]
+
+ forward_variables_from(invoker, [ "visibility" ])
+ }
+}
+
+# An assert that is evaluated at build time. The assertion is only checked if
+# this target is depended on by another target. If the assertion passes, nothing
+# happens. If it fails, the target prints an error message with pw_error.
+#
+# To enforce a pw_build_assert, targets add a dependency on a pw_build_assert.
+# Multiple targets may depend on the same pw_build_assert if the same assertion
+# applies.
+#
+# Args:
+#
+# condition: The assertion to verify.
+# message: The message to print. Use \n for newlines.
+# message_lines: List of lines to use for the message.
+# visibility: GN visibility to apply to the underlying target.
+#
+template("pw_build_assert") {
+ assert(defined(invoker.condition),
+ "pw_build_assert requires a boolean condition")
+ assert(defined(invoker.message) != defined(invoker.message_lines),
+ "pw_build_assert requires either 'message' or 'message_lines'")
+
+ _pw_error_variables = [
+ "message",
+ "message_lines",
+ "visibility",
+ ]
+
+ if (invoker.condition) {
+ not_needed(invoker, _pw_error_variables)
+ group(target_name) {
+ forward_variables_from(invoker, [ "visibility" ])
+ }
+ } else {
+ pw_error(target_name) {
+ forward_variables_from(invoker, _pw_error_variables)
+ }
}
}
diff --git a/pw_build/exec.gni b/pw_build/exec.gni
index 26f3d6251..241c87b55 100644
--- a/pw_build/exec.gni
+++ b/pw_build/exec.gni
@@ -27,6 +27,11 @@ import("python_action.gni")
#
# deps: Dependencies for this target.
#
+# public_deps: Public dependencies for this target. In addition to outputs from
+# this target, outputs generated by public dependencies can be used as inputs
+# from targets that depend on this one. This is not the case for private
+# deps.
+#
# inputs: Optional list of build inputs to the program.
#
# outputs: Optional list of artifacts produced by the program's execution.
@@ -132,6 +137,7 @@ template("pw_exec") {
"deps",
"inputs",
"pool",
+ "public_deps",
])
if (!defined(inputs)) {
diff --git a/pw_build/generated_pigweed_modules_lists.gni b/pw_build/generated_pigweed_modules_lists.gni
new file mode 100644
index 000000000..f3d3627ef
--- /dev/null
+++ b/pw_build/generated_pigweed_modules_lists.gni
@@ -0,0 +1,471 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# Build args and lists for all modules in Pigweed.
+#
+# DO NOT EDIT! Generated by pw_build/py/pw_build/generate_modules_lists.py.
+#
+# To add modules here, list them in PIGWEED_MODULES and build the
+# update_modules target and commit the updated version of this file:
+#
+# ninja -C out update_modules
+#
+# DO NOT IMPORT THIS FILE DIRECTLY!
+#
+# Import it through //build_overrides/pigweed.gni instead.
+
+# Declare a build arg for each module.
+declare_args() {
+ dir_docker = get_path_info("../docker", "abspath")
+ dir_pw_allocator = get_path_info("../pw_allocator", "abspath")
+ dir_pw_analog = get_path_info("../pw_analog", "abspath")
+ dir_pw_android_toolchain = get_path_info("../pw_android_toolchain", "abspath")
+ dir_pw_arduino_build = get_path_info("../pw_arduino_build", "abspath")
+ dir_pw_assert = get_path_info("../pw_assert", "abspath")
+ dir_pw_assert_basic = get_path_info("../pw_assert_basic", "abspath")
+ dir_pw_assert_log = get_path_info("../pw_assert_log", "abspath")
+ dir_pw_assert_tokenized = get_path_info("../pw_assert_tokenized", "abspath")
+ dir_pw_assert_zephyr = get_path_info("../pw_assert_zephyr", "abspath")
+ dir_pw_base64 = get_path_info("../pw_base64", "abspath")
+ dir_pw_bloat = get_path_info("../pw_bloat", "abspath")
+ dir_pw_blob_store = get_path_info("../pw_blob_store", "abspath")
+ dir_pw_bluetooth_hci = get_path_info("../pw_bluetooth_hci", "abspath")
+ dir_pw_boot = get_path_info("../pw_boot", "abspath")
+ dir_pw_boot_cortex_m = get_path_info("../pw_boot_cortex_m", "abspath")
+ dir_pw_build = get_path_info("../pw_build", "abspath")
+ dir_pw_build_info = get_path_info("../pw_build_info", "abspath")
+ dir_pw_build_mcuxpresso = get_path_info("../pw_build_mcuxpresso", "abspath")
+ dir_pw_bytes = get_path_info("../pw_bytes", "abspath")
+ dir_pw_checksum = get_path_info("../pw_checksum", "abspath")
+ dir_pw_chrono = get_path_info("../pw_chrono", "abspath")
+ dir_pw_chrono_embos = get_path_info("../pw_chrono_embos", "abspath")
+ dir_pw_chrono_freertos = get_path_info("../pw_chrono_freertos", "abspath")
+ dir_pw_chrono_stl = get_path_info("../pw_chrono_stl", "abspath")
+ dir_pw_chrono_threadx = get_path_info("../pw_chrono_threadx", "abspath")
+ dir_pw_chrono_zephyr = get_path_info("../pw_chrono_zephyr", "abspath")
+ dir_pw_cli = get_path_info("../pw_cli", "abspath")
+ dir_pw_console = get_path_info("../pw_console", "abspath")
+ dir_pw_containers = get_path_info("../pw_containers", "abspath")
+ dir_pw_cpu_exception = get_path_info("../pw_cpu_exception", "abspath")
+ dir_pw_cpu_exception_cortex_m =
+ get_path_info("../pw_cpu_exception_cortex_m", "abspath")
+ dir_pw_crypto = get_path_info("../pw_crypto", "abspath")
+ dir_pw_docgen = get_path_info("../pw_docgen", "abspath")
+ dir_pw_doctor = get_path_info("../pw_doctor", "abspath")
+ dir_pw_env_setup = get_path_info("../pw_env_setup", "abspath")
+ dir_pw_file = get_path_info("../pw_file", "abspath")
+ dir_pw_function = get_path_info("../pw_function", "abspath")
+ dir_pw_fuzzer = get_path_info("../pw_fuzzer", "abspath")
+ dir_pw_hdlc = get_path_info("../pw_hdlc", "abspath")
+ dir_pw_hex_dump = get_path_info("../pw_hex_dump", "abspath")
+ dir_pw_i2c = get_path_info("../pw_i2c", "abspath")
+ dir_pw_i2c_mcuxpresso = get_path_info("../pw_i2c_mcuxpresso", "abspath")
+ dir_pw_interrupt = get_path_info("../pw_interrupt", "abspath")
+ dir_pw_interrupt_cortex_m =
+ get_path_info("../pw_interrupt_cortex_m", "abspath")
+ dir_pw_interrupt_zephyr = get_path_info("../pw_interrupt_zephyr", "abspath")
+ dir_pw_kvs = get_path_info("../pw_kvs", "abspath")
+ dir_pw_libc = get_path_info("../pw_libc", "abspath")
+ dir_pw_log = get_path_info("../pw_log", "abspath")
+ dir_pw_log_android = get_path_info("../pw_log_android", "abspath")
+ dir_pw_log_basic = get_path_info("../pw_log_basic", "abspath")
+ dir_pw_log_null = get_path_info("../pw_log_null", "abspath")
+ dir_pw_log_rpc = get_path_info("../pw_log_rpc", "abspath")
+ dir_pw_log_string = get_path_info("../pw_log_string", "abspath")
+ dir_pw_log_tokenized = get_path_info("../pw_log_tokenized", "abspath")
+ dir_pw_log_zephyr = get_path_info("../pw_log_zephyr", "abspath")
+ dir_pw_malloc = get_path_info("../pw_malloc", "abspath")
+ dir_pw_malloc_freelist = get_path_info("../pw_malloc_freelist", "abspath")
+ dir_pw_metric = get_path_info("../pw_metric", "abspath")
+ dir_pw_minimal_cpp_stdlib =
+ get_path_info("../pw_minimal_cpp_stdlib", "abspath")
+ dir_pw_module = get_path_info("../pw_module", "abspath")
+ dir_pw_multisink = get_path_info("../pw_multisink", "abspath")
+ dir_pw_package = get_path_info("../pw_package", "abspath")
+ dir_pw_persistent_ram = get_path_info("../pw_persistent_ram", "abspath")
+ dir_pw_polyfill = get_path_info("../pw_polyfill", "abspath")
+ dir_pw_preprocessor = get_path_info("../pw_preprocessor", "abspath")
+ dir_pw_presubmit = get_path_info("../pw_presubmit", "abspath")
+ dir_pw_protobuf = get_path_info("../pw_protobuf", "abspath")
+ dir_pw_protobuf_compiler = get_path_info("../pw_protobuf_compiler", "abspath")
+ dir_pw_random = get_path_info("../pw_random", "abspath")
+ dir_pw_result = get_path_info("../pw_result", "abspath")
+ dir_pw_ring_buffer = get_path_info("../pw_ring_buffer", "abspath")
+ dir_pw_router = get_path_info("../pw_router", "abspath")
+ dir_pw_rpc = get_path_info("../pw_rpc", "abspath")
+ dir_pw_snapshot = get_path_info("../pw_snapshot", "abspath")
+ dir_pw_software_update = get_path_info("../pw_software_update", "abspath")
+ dir_pw_span = get_path_info("../pw_span", "abspath")
+ dir_pw_spi = get_path_info("../pw_spi", "abspath")
+ dir_pw_status = get_path_info("../pw_status", "abspath")
+ dir_pw_stm32cube_build = get_path_info("../pw_stm32cube_build", "abspath")
+ dir_pw_stream = get_path_info("../pw_stream", "abspath")
+ dir_pw_string = get_path_info("../pw_string", "abspath")
+ dir_pw_symbolizer = get_path_info("../pw_symbolizer", "abspath")
+ dir_pw_sync = get_path_info("../pw_sync", "abspath")
+ dir_pw_sync_baremetal = get_path_info("../pw_sync_baremetal", "abspath")
+ dir_pw_sync_embos = get_path_info("../pw_sync_embos", "abspath")
+ dir_pw_sync_freertos = get_path_info("../pw_sync_freertos", "abspath")
+ dir_pw_sync_stl = get_path_info("../pw_sync_stl", "abspath")
+ dir_pw_sync_threadx = get_path_info("../pw_sync_threadx", "abspath")
+ dir_pw_sync_zephyr = get_path_info("../pw_sync_zephyr", "abspath")
+ dir_pw_sys_io = get_path_info("../pw_sys_io", "abspath")
+ dir_pw_sys_io_arduino = get_path_info("../pw_sys_io_arduino", "abspath")
+ dir_pw_sys_io_baremetal_lm3s6965evb =
+ get_path_info("../pw_sys_io_baremetal_lm3s6965evb", "abspath")
+ dir_pw_sys_io_baremetal_stm32f429 =
+ get_path_info("../pw_sys_io_baremetal_stm32f429", "abspath")
+ dir_pw_sys_io_emcraft_sf2 =
+ get_path_info("../pw_sys_io_emcraft_sf2", "abspath")
+ dir_pw_sys_io_mcuxpresso = get_path_info("../pw_sys_io_mcuxpresso", "abspath")
+ dir_pw_sys_io_stdio = get_path_info("../pw_sys_io_stdio", "abspath")
+ dir_pw_sys_io_stm32cube = get_path_info("../pw_sys_io_stm32cube", "abspath")
+ dir_pw_sys_io_zephyr = get_path_info("../pw_sys_io_zephyr", "abspath")
+ dir_pw_system = get_path_info("../pw_system", "abspath")
+ dir_pw_target_runner = get_path_info("../pw_target_runner", "abspath")
+ dir_pw_thread = get_path_info("../pw_thread", "abspath")
+ dir_pw_thread_embos = get_path_info("../pw_thread_embos", "abspath")
+ dir_pw_thread_freertos = get_path_info("../pw_thread_freertos", "abspath")
+ dir_pw_thread_stl = get_path_info("../pw_thread_stl", "abspath")
+ dir_pw_thread_threadx = get_path_info("../pw_thread_threadx", "abspath")
+ dir_pw_tls_client = get_path_info("../pw_tls_client", "abspath")
+ dir_pw_tls_client_boringssl =
+ get_path_info("../pw_tls_client_boringssl", "abspath")
+ dir_pw_tls_client_mbedtls =
+ get_path_info("../pw_tls_client_mbedtls", "abspath")
+ dir_pw_tokenizer = get_path_info("../pw_tokenizer", "abspath")
+ dir_pw_tool = get_path_info("../pw_tool", "abspath")
+ dir_pw_toolchain = get_path_info("../pw_toolchain", "abspath")
+ dir_pw_trace = get_path_info("../pw_trace", "abspath")
+ dir_pw_trace_tokenized = get_path_info("../pw_trace_tokenized", "abspath")
+ dir_pw_transfer = get_path_info("../pw_transfer", "abspath")
+ dir_pw_unit_test = get_path_info("../pw_unit_test", "abspath")
+ dir_pw_varint = get_path_info("../pw_varint", "abspath")
+ dir_pw_watch = get_path_info("../pw_watch", "abspath")
+ dir_pw_web_ui = get_path_info("../pw_web_ui", "abspath")
+ dir_pw_work_queue = get_path_info("../pw_work_queue", "abspath")
+}
+
+# Declare these as GN args in case this is imported in args.gni.
+# Use a separate block so variables in the prior block can be used.
+declare_args() {
+ # A list with paths to all Pigweed module. DO NOT SET THIS BUILD ARGUMENT!
+ pw_modules = [
+ dir_docker,
+ dir_pw_allocator,
+ dir_pw_analog,
+ dir_pw_android_toolchain,
+ dir_pw_arduino_build,
+ dir_pw_assert,
+ dir_pw_assert_basic,
+ dir_pw_assert_log,
+ dir_pw_assert_tokenized,
+ dir_pw_assert_zephyr,
+ dir_pw_base64,
+ dir_pw_bloat,
+ dir_pw_blob_store,
+ dir_pw_bluetooth_hci,
+ dir_pw_boot,
+ dir_pw_boot_cortex_m,
+ dir_pw_build,
+ dir_pw_build_info,
+ dir_pw_build_mcuxpresso,
+ dir_pw_bytes,
+ dir_pw_checksum,
+ dir_pw_chrono,
+ dir_pw_chrono_embos,
+ dir_pw_chrono_freertos,
+ dir_pw_chrono_stl,
+ dir_pw_chrono_threadx,
+ dir_pw_chrono_zephyr,
+ dir_pw_cli,
+ dir_pw_console,
+ dir_pw_containers,
+ dir_pw_cpu_exception,
+ dir_pw_cpu_exception_cortex_m,
+ dir_pw_crypto,
+ dir_pw_docgen,
+ dir_pw_doctor,
+ dir_pw_env_setup,
+ dir_pw_file,
+ dir_pw_function,
+ dir_pw_fuzzer,
+ dir_pw_hdlc,
+ dir_pw_hex_dump,
+ dir_pw_i2c,
+ dir_pw_i2c_mcuxpresso,
+ dir_pw_interrupt,
+ dir_pw_interrupt_cortex_m,
+ dir_pw_interrupt_zephyr,
+ dir_pw_kvs,
+ dir_pw_libc,
+ dir_pw_log,
+ dir_pw_log_android,
+ dir_pw_log_basic,
+ dir_pw_log_null,
+ dir_pw_log_rpc,
+ dir_pw_log_string,
+ dir_pw_log_tokenized,
+ dir_pw_log_zephyr,
+ dir_pw_malloc,
+ dir_pw_malloc_freelist,
+ dir_pw_metric,
+ dir_pw_minimal_cpp_stdlib,
+ dir_pw_module,
+ dir_pw_multisink,
+ dir_pw_package,
+ dir_pw_persistent_ram,
+ dir_pw_polyfill,
+ dir_pw_preprocessor,
+ dir_pw_presubmit,
+ dir_pw_protobuf,
+ dir_pw_protobuf_compiler,
+ dir_pw_random,
+ dir_pw_result,
+ dir_pw_ring_buffer,
+ dir_pw_router,
+ dir_pw_rpc,
+ dir_pw_snapshot,
+ dir_pw_software_update,
+ dir_pw_span,
+ dir_pw_spi,
+ dir_pw_status,
+ dir_pw_stm32cube_build,
+ dir_pw_stream,
+ dir_pw_string,
+ dir_pw_symbolizer,
+ dir_pw_sync,
+ dir_pw_sync_baremetal,
+ dir_pw_sync_embos,
+ dir_pw_sync_freertos,
+ dir_pw_sync_stl,
+ dir_pw_sync_threadx,
+ dir_pw_sync_zephyr,
+ dir_pw_sys_io,
+ dir_pw_sys_io_arduino,
+ dir_pw_sys_io_baremetal_lm3s6965evb,
+ dir_pw_sys_io_baremetal_stm32f429,
+ dir_pw_sys_io_emcraft_sf2,
+ dir_pw_sys_io_mcuxpresso,
+ dir_pw_sys_io_stdio,
+ dir_pw_sys_io_stm32cube,
+ dir_pw_sys_io_zephyr,
+ dir_pw_system,
+ dir_pw_target_runner,
+ dir_pw_thread,
+ dir_pw_thread_embos,
+ dir_pw_thread_freertos,
+ dir_pw_thread_stl,
+ dir_pw_thread_threadx,
+ dir_pw_tls_client,
+ dir_pw_tls_client_boringssl,
+ dir_pw_tls_client_mbedtls,
+ dir_pw_tokenizer,
+ dir_pw_tool,
+ dir_pw_toolchain,
+ dir_pw_trace,
+ dir_pw_trace_tokenized,
+ dir_pw_transfer,
+ dir_pw_unit_test,
+ dir_pw_varint,
+ dir_pw_watch,
+ dir_pw_web_ui,
+ dir_pw_work_queue,
+ ]
+
+ # A list with all Pigweed module test groups. DO NOT SET THIS BUILD ARGUMENT!
+ pw_module_tests = [
+ "$dir_pw_allocator:tests",
+ "$dir_pw_analog:tests",
+ "$dir_pw_assert:tests",
+ "$dir_pw_base64:tests",
+ "$dir_pw_blob_store:tests",
+ "$dir_pw_bluetooth_hci:tests",
+ "$dir_pw_bytes:tests",
+ "$dir_pw_checksum:tests",
+ "$dir_pw_chrono:tests",
+ "$dir_pw_containers:tests",
+ "$dir_pw_cpu_exception_cortex_m:tests",
+ "$dir_pw_crypto:tests",
+ "$dir_pw_file:tests",
+ "$dir_pw_function:tests",
+ "$dir_pw_fuzzer:tests",
+ "$dir_pw_hdlc:tests",
+ "$dir_pw_hex_dump:tests",
+ "$dir_pw_i2c:tests",
+ "$dir_pw_kvs:tests",
+ "$dir_pw_libc:tests",
+ "$dir_pw_log:tests",
+ "$dir_pw_log_null:tests",
+ "$dir_pw_log_rpc:tests",
+ "$dir_pw_log_tokenized:tests",
+ "$dir_pw_malloc_freelist:tests",
+ "$dir_pw_metric:tests",
+ "$dir_pw_minimal_cpp_stdlib:tests",
+ "$dir_pw_multisink:tests",
+ "$dir_pw_persistent_ram:tests",
+ "$dir_pw_polyfill:tests",
+ "$dir_pw_preprocessor:tests",
+ "$dir_pw_protobuf:tests",
+ "$dir_pw_protobuf_compiler:tests",
+ "$dir_pw_random:tests",
+ "$dir_pw_result:tests",
+ "$dir_pw_ring_buffer:tests",
+ "$dir_pw_router:tests",
+ "$dir_pw_rpc:tests",
+ "$dir_pw_snapshot:tests",
+ "$dir_pw_software_update:tests",
+ "$dir_pw_span:tests",
+ "$dir_pw_spi:tests",
+ "$dir_pw_status:tests",
+ "$dir_pw_stream:tests",
+ "$dir_pw_string:tests",
+ "$dir_pw_sync:tests",
+ "$dir_pw_thread:tests",
+ "$dir_pw_thread_embos:tests",
+ "$dir_pw_thread_freertos:tests",
+ "$dir_pw_thread_stl:tests",
+ "$dir_pw_thread_threadx:tests",
+ "$dir_pw_tls_client:tests",
+ "$dir_pw_tls_client_boringssl:tests",
+ "$dir_pw_tls_client_mbedtls:tests",
+ "$dir_pw_tokenizer:tests",
+ "$dir_pw_trace:tests",
+ "$dir_pw_trace_tokenized:tests",
+ "$dir_pw_transfer:tests",
+ "$dir_pw_unit_test:tests",
+ "$dir_pw_varint:tests",
+ "$dir_pw_work_queue:tests",
+ ]
+
+ # A list with all Pigweed modules docs groups. DO NOT SET THIS BUILD ARGUMENT!
+ pw_module_docs = [
+ "$dir_docker:docs",
+ "$dir_pw_allocator:docs",
+ "$dir_pw_analog:docs",
+ "$dir_pw_android_toolchain:docs",
+ "$dir_pw_arduino_build:docs",
+ "$dir_pw_assert:docs",
+ "$dir_pw_assert_basic:docs",
+ "$dir_pw_assert_log:docs",
+ "$dir_pw_assert_tokenized:docs",
+ "$dir_pw_assert_zephyr:docs",
+ "$dir_pw_base64:docs",
+ "$dir_pw_bloat:docs",
+ "$dir_pw_blob_store:docs",
+ "$dir_pw_bluetooth_hci:docs",
+ "$dir_pw_boot:docs",
+ "$dir_pw_boot_cortex_m:docs",
+ "$dir_pw_build:docs",
+ "$dir_pw_build_info:docs",
+ "$dir_pw_build_mcuxpresso:docs",
+ "$dir_pw_bytes:docs",
+ "$dir_pw_checksum:docs",
+ "$dir_pw_chrono:docs",
+ "$dir_pw_chrono_embos:docs",
+ "$dir_pw_chrono_freertos:docs",
+ "$dir_pw_chrono_stl:docs",
+ "$dir_pw_chrono_threadx:docs",
+ "$dir_pw_chrono_zephyr:docs",
+ "$dir_pw_cli:docs",
+ "$dir_pw_console:docs",
+ "$dir_pw_containers:docs",
+ "$dir_pw_cpu_exception:docs",
+ "$dir_pw_cpu_exception_cortex_m:docs",
+ "$dir_pw_crypto:docs",
+ "$dir_pw_docgen:docs",
+ "$dir_pw_doctor:docs",
+ "$dir_pw_env_setup:docs",
+ "$dir_pw_file:docs",
+ "$dir_pw_function:docs",
+ "$dir_pw_fuzzer:docs",
+ "$dir_pw_hdlc:docs",
+ "$dir_pw_hex_dump:docs",
+ "$dir_pw_i2c:docs",
+ "$dir_pw_i2c_mcuxpresso:docs",
+ "$dir_pw_interrupt:docs",
+ "$dir_pw_interrupt_cortex_m:docs",
+ "$dir_pw_interrupt_zephyr:docs",
+ "$dir_pw_kvs:docs",
+ "$dir_pw_libc:docs",
+ "$dir_pw_log:docs",
+ "$dir_pw_log_basic:docs",
+ "$dir_pw_log_null:docs",
+ "$dir_pw_log_rpc:docs",
+ "$dir_pw_log_string:docs",
+ "$dir_pw_log_tokenized:docs",
+ "$dir_pw_malloc:docs",
+ "$dir_pw_malloc_freelist:docs",
+ "$dir_pw_metric:docs",
+ "$dir_pw_minimal_cpp_stdlib:docs",
+ "$dir_pw_module:docs",
+ "$dir_pw_multisink:docs",
+ "$dir_pw_package:docs",
+ "$dir_pw_persistent_ram:docs",
+ "$dir_pw_polyfill:docs",
+ "$dir_pw_preprocessor:docs",
+ "$dir_pw_presubmit:docs",
+ "$dir_pw_protobuf:docs",
+ "$dir_pw_protobuf_compiler:docs",
+ "$dir_pw_random:docs",
+ "$dir_pw_result:docs",
+ "$dir_pw_ring_buffer:docs",
+ "$dir_pw_router:docs",
+ "$dir_pw_rpc:docs",
+ "$dir_pw_snapshot:docs",
+ "$dir_pw_software_update:docs",
+ "$dir_pw_span:docs",
+ "$dir_pw_spi:docs",
+ "$dir_pw_status:docs",
+ "$dir_pw_stm32cube_build:docs",
+ "$dir_pw_stream:docs",
+ "$dir_pw_string:docs",
+ "$dir_pw_symbolizer:docs",
+ "$dir_pw_sync:docs",
+ "$dir_pw_sync_baremetal:docs",
+ "$dir_pw_sync_embos:docs",
+ "$dir_pw_sync_freertos:docs",
+ "$dir_pw_sync_stl:docs",
+ "$dir_pw_sync_threadx:docs",
+ "$dir_pw_sync_zephyr:docs",
+ "$dir_pw_sys_io:docs",
+ "$dir_pw_sys_io_arduino:docs",
+ "$dir_pw_sys_io_baremetal_stm32f429:docs",
+ "$dir_pw_sys_io_emcraft_sf2:docs",
+ "$dir_pw_sys_io_mcuxpresso:docs",
+ "$dir_pw_sys_io_stdio:docs",
+ "$dir_pw_sys_io_stm32cube:docs",
+ "$dir_pw_sys_io_zephyr:docs",
+ "$dir_pw_system:docs",
+ "$dir_pw_target_runner:docs",
+ "$dir_pw_thread:docs",
+ "$dir_pw_thread_embos:docs",
+ "$dir_pw_thread_freertos:docs",
+ "$dir_pw_thread_stl:docs",
+ "$dir_pw_thread_threadx:docs",
+ "$dir_pw_tls_client:docs",
+ "$dir_pw_tls_client_boringssl:docs",
+ "$dir_pw_tls_client_mbedtls:docs",
+ "$dir_pw_tokenizer:docs",
+ "$dir_pw_toolchain:docs",
+ "$dir_pw_trace:docs",
+ "$dir_pw_trace_tokenized:docs",
+ "$dir_pw_transfer:docs",
+ "$dir_pw_unit_test:docs",
+ "$dir_pw_varint:docs",
+ "$dir_pw_watch:docs",
+ "$dir_pw_web_ui:docs",
+ "$dir_pw_work_queue:docs",
+ ]
+}
diff --git a/pw_build/hil.gni b/pw_build/hil.gni
index 99fc1df1c..bb3b4eef2 100644
--- a/pw_build/hil.gni
+++ b/pw_build/hil.gni
@@ -34,6 +34,9 @@ template("pw_hil_test") {
action = {
pool = "$dir_pw_build/pool:pw_hil_test($default_toolchain)"
stamp = true
+
+ # We want the test stdout to be saved.
+ capture_output = false
}
forward_variables_from(invoker, "*", [ "target_type" ])
}
diff --git a/pw_build/pigweed.bzl b/pw_build/pigweed.bzl
index a519b4b24..a1e24bc10 100644
--- a/pw_build/pigweed.bzl
+++ b/pw_build/pigweed.bzl
@@ -14,7 +14,7 @@
"""Pigweed build environment for bazel."""
load(
- ":bazel_internal/pigweed_internal.bzl",
+ "//pw_build/bazel_internal:pigweed_internal.bzl",
_add_cc_and_c_targets = "add_cc_and_c_targets",
_has_pw_assert_dep = "has_pw_assert_dep",
)
diff --git a/pw_build/pigweed.cmake b/pw_build/pigweed.cmake
index 0551a1af6..ceed0b609 100644
--- a/pw_build/pigweed.cmake
+++ b/pw_build/pigweed.cmake
@@ -103,10 +103,6 @@ function(pw_auto_add_simple_module MODULE)
${headers}
)
- if(arg_IMPLEMENTS_FACADE)
- target_include_directories("${MODULE}" PUBLIC public_overrides)
- endif()
-
pw_auto_add_module_tests("${MODULE}"
PRIVATE_DEPS
${arg_PUBLIC_DEPS}
@@ -199,7 +195,18 @@ function(pw_add_module_library NAME)
)
endif()
- add_library("${NAME}" EXCLUDE_FROM_ALL ${arg_HEADERS} ${arg_SOURCES})
+ # Instead of forking all of the code below or injecting an empty source file,
+ # conditionally select PUBLIC vs INTERFACE depending on whether there are
+ # sources to compile.
+ if(NOT "${arg_SOURCES}" STREQUAL "")
+ add_library("${NAME}" EXCLUDE_FROM_ALL)
+ set(public_or_interface PUBLIC)
+ else("${arg_SOURCES}" STREQUAL "")
+ add_library("${NAME}" EXCLUDE_FROM_ALL INTERFACE)
+ set(public_or_interface INTERFACE)
+ endif(NOT "${arg_SOURCES}" STREQUAL "")
+
+ target_sources("${NAME}" PRIVATE ${arg_SOURCES} ${arg_HEADERS})
# CMake 3.22 does not have a notion of target_headers yet, so in the mean
# time we ask for headers to be specified for consistency with GN & Bazel and
@@ -216,62 +223,82 @@ function(pw_add_module_library NAME)
endforeach()
if(NOT "${arg_PUBLIC_INCLUDES}" STREQUAL "")
- target_include_directories("${NAME}" PUBLIC ${arg_PUBLIC_INCLUDES})
- else()
+ target_include_directories("${NAME}"
+ ${public_or_interface}
+ ${arg_PUBLIC_INCLUDES}
+ )
+ else("${arg_PUBLIC_INCLUDES}" STREQUAL "")
# TODO(pwbug/601): Deprecate this legacy implicit PUBLIC_INCLUDES.
- target_include_directories("${NAME}" PUBLIC public)
- endif()
+ target_include_directories("${NAME}" ${public_or_interface} public)
+ endif(NOT "${arg_PUBLIC_INCLUDES}" STREQUAL "")
+
if(NOT "${arg_PRIVATE_INCLUDES}" STREQUAL "")
target_include_directories("${NAME}" PRIVATE ${arg_PRIVATE_INCLUDES})
- endif()
+ endif(NOT "${arg_PRIVATE_INCLUDES}" STREQUAL "")
+
target_link_libraries("${NAME}"
- PUBLIC
+ ${public_or_interface}
pw_build
${arg_PUBLIC_DEPS}
- PRIVATE
- pw_build.warnings
- ${arg_PRIVATE_DEPS}
)
+ if(NOT "${arg_SOURCES}" STREQUAL "")
+ target_link_libraries("${NAME}"
+ PRIVATE
+ pw_build.warnings
+ ${arg_PRIVATE_DEPS}
+ )
+ endif(NOT "${arg_SOURCES}" STREQUAL "")
+
if(NOT "${arg_IMPLEMENTS_FACADES}" STREQUAL "")
- target_include_directories("${NAME}" PUBLIC public_overrides)
+ target_include_directories("${NAME}"
+ ${public_or_interface}
+ public_overrides
+ )
if("${arg_PUBLIC_INCLUDES}" STREQUAL "")
# TODO(pwbug/601): Deprecate this legacy implicit PUBLIC_INCLUDES.
- target_include_directories("${NAME}" PUBLIC public_overrides)
- endif()
+ target_include_directories("${NAME}"
+ ${public_or_interface}
+ public_overrides
+ )
+ endif("${arg_PUBLIC_INCLUDES}" STREQUAL "")
set(facades ${arg_IMPLEMENTS_FACADES})
list(TRANSFORM facades APPEND ".facade")
- target_link_libraries("${NAME}" PUBLIC ${facades})
- endif()
-
- # Libraries require at least one source file.
- if(NOT arg_SOURCES)
- target_sources("${NAME}" PRIVATE $<TARGET_PROPERTY:pw_build.empty,SOURCES>)
- endif()
+ target_link_libraries("${NAME}" ${public_or_interface} ${facades})
+ endif(NOT "${arg_IMPLEMENTS_FACADES}" STREQUAL "")
if(NOT "${arg_PUBLIC_DEFINES}" STREQUAL "")
- target_compile_definitions("${NAME}" PUBLIC ${arg_PUBLIC_DEFINES})
- endif()
+ target_compile_definitions("${NAME}"
+ ${public_or_interface}
+ ${arg_PUBLIC_DEFINES}
+ )
+ endif(NOT "${arg_PUBLIC_DEFINES}" STREQUAL "")
if(NOT "${arg_PRIVATE_DEFINES}" STREQUAL "")
target_compile_definitions("${NAME}" PRIVATE ${arg_PRIVATE_DEFINES})
- endif()
+ endif(NOT "${arg_PRIVATE_DEFINES}" STREQUAL "")
if(NOT "${arg_PUBLIC_COMPILE_OPTIONS}" STREQUAL "")
- target_compile_options("${NAME}" PUBLIC ${arg_PUBLIC_COMPILE_OPTIONS})
- endif()
+ target_compile_options("${NAME}"
+ ${public_or_interface}
+ ${arg_PUBLIC_COMPILE_OPTIONS}
+ )
+ endif(NOT "${arg_PUBLIC_COMPILE_OPTIONS}" STREQUAL "")
if(NOT "${arg_PRIVATE_COMPILE_OPTIONS}" STREQUAL "")
target_compile_options("${NAME}" PRIVATE ${arg_PRIVATE_COMPILE_OPTIONS})
- endif()
+ endif(NOT "${arg_PRIVATE_COMPILE_OPTIONS}" STREQUAL "")
if(NOT "${arg_PUBLIC_LINK_OPTIONS}" STREQUAL "")
- target_link_options("${NAME}" PUBLIC ${arg_PUBLIC_LINK_OPTIONS})
- endif()
+ target_link_options("${NAME}"
+ ${public_or_interface}
+ ${arg_PUBLIC_LINK_OPTIONS}
+ )
+ endif(NOT "${arg_PUBLIC_LINK_OPTIONS}" STREQUAL "")
if(NOT "${arg_PRIVATE_LINK_OPTIONS}" STREQUAL "")
target_link_options("${NAME}" PRIVATE ${arg_PRIVATE_LINK_OPTIONS})
- endif()
+ endif(NOT "${arg_PRIVATE_LINK_OPTIONS}" STREQUAL "")
endfunction(pw_add_module_library)
# Declares a module as a facade.
diff --git a/pw_build/py/BUILD.gn b/pw_build/py/BUILD.gn
index 7fc0eada7..5341a92b9 100644
--- a/pw_build/py/BUILD.gn
+++ b/pw_build/py/BUILD.gn
@@ -31,6 +31,7 @@ pw_python_package("py") {
"pw_build/exec.py",
"pw_build/file_prefix_map.py",
"pw_build/generate_cc_blob_library.py",
+ "pw_build/generate_modules_lists.py",
"pw_build/generate_python_package.py",
"pw_build/generate_python_package_gn.py",
"pw_build/generated_tests.py",
diff --git a/pw_build/py/pw_build/create_python_tree.py b/pw_build/py/pw_build/create_python_tree.py
index abfeed2f3..90ddee52a 100644
--- a/pw_build/py/pw_build/create_python_tree.py
+++ b/pw_build/py/pw_build/create_python_tree.py
@@ -17,18 +17,14 @@ import argparse
import configparser
from datetime import datetime
import io
-import json
-import os
from pathlib import Path
import re
import shutil
import subprocess
import tempfile
-from typing import Iterable, List
+from typing import Iterable
-import setuptools # type: ignore
-
-from pw_build.python_package import PythonPackage
+from pw_build.python_package import PythonPackage, load_packages
def _parse_args():
@@ -203,32 +199,11 @@ def write_config(
setup_cfg_file.write_text(comment_block_text + setup_cfg_text.getvalue())
-def load_packages(input_list_files: Iterable[Path]) -> List[PythonPackage]:
- """Load Python package metadata and configs."""
-
- packages = []
- for input_path in input_list_files:
-
- with input_path.open() as input_file:
- # Each line contains the path to a json file.
- for json_file in input_file.readlines():
- # Load the json as a dict.
- json_file_path = Path(json_file.strip()).resolve()
- with json_file_path.open() as json_fp:
- json_dict = json.load(json_fp)
-
- packages.append(PythonPackage.from_dict(**json_dict))
- return packages
-
-
def build_python_tree(python_packages: Iterable[PythonPackage],
tree_destination_dir: Path,
include_tests: bool = False) -> None:
"""Install PythonPackages to a destination directory."""
- # Save the current out directory
- out_dir = Path.cwd()
-
# Create the root destination directory.
destination_path = tree_destination_dir.resolve()
# Delete any existing files
@@ -238,33 +213,10 @@ def build_python_tree(python_packages: Iterable[PythonPackage],
# Define a temporary location to run setup.py build in.
with tempfile.TemporaryDirectory() as build_base_name:
build_base = Path(build_base_name)
- lib_dir_path = build_base / 'lib'
for pkg in python_packages:
- # Create the temp install dir
- lib_dir_path.mkdir(parents=True, exist_ok=True)
-
- # cd to the location of setup.py
- setup_dir_path = out_dir / pkg.setup_dir
- os.chdir(setup_dir_path)
- # Run build with temp build-base location
- # Note: New files will be placed inside lib_dir_path
- setuptools.setup(script_args=[
- 'build',
- '--force',
- '--build-base',
- str(build_base),
- ])
-
- new_pkg_dir = lib_dir_path / pkg.package_name
-
- # If tests should be included, copy them to the tests dir
- if include_tests and pkg.tests:
- test_dir_path = new_pkg_dir / 'tests'
- test_dir_path.mkdir(parents=True, exist_ok=True)
-
- for test_source_path in pkg.tests:
- shutil.copy(out_dir / test_source_path, test_dir_path)
+ lib_dir_path = pkg.setuptools_build_with_base(
+ build_base, include_tests=include_tests)
# Move installed files from the temp build-base into
# destination_path.
@@ -277,9 +229,6 @@ def build_python_tree(python_packages: Iterable[PythonPackage],
# Clean build base lib folder for next install
shutil.rmtree(lib_dir_path, ignore_errors=True)
- # cd back to out directory
- os.chdir(out_dir)
-
def copy_extra_files(extra_file_strings: Iterable[str]) -> None:
"""Copy extra files to their destinations."""
diff --git a/pw_build/py/pw_build/generate_modules_lists.py b/pw_build/py/pw_build/generate_modules_lists.py
new file mode 100644
index 000000000..7026b229f
--- /dev/null
+++ b/pw_build/py/pw_build/generate_modules_lists.py
@@ -0,0 +1,254 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Manages the list of Pigweed modules.
+
+Used by modules.gni to generate:
+
+- a build arg for each module,
+- a list of module paths (pw_modules),
+- a list of module tests (pw_module_tests), and
+- a list of module docs (pw_module_docs).
+"""
+
+import argparse
+import difflib
+import io
+import os
+from pathlib import Path
+import sys
+import subprocess
+from typing import Iterator, List, Optional, Sequence, Tuple
+
+_COPYRIGHT_NOTICE = '''\
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.'''
+
+_WARNING = '\033[31m\033[1mWARNING:\033[0m ' # Red WARNING: prefix
+_ERROR = '\033[41m\033[37m\033[1mERROR:\033[0m ' # Red background ERROR: prefix
+
+_MISSING_MODULES_WARNING = _WARNING + '''\
+The PIGWEED_MODULES list is missing the following modules:
+{modules}
+
+If the listed modules are Pigweed modules, add them to PIGWEED_MODULES.
+
+If the listed modules are not actual Pigweed modules, remove any stray pw_*
+directories in the Pigweed repository (git clean -fd).
+'''
+
+_OUT_OF_DATE_WARNING = _ERROR + '''\
+The generated Pigweed modules list .gni file is out of date!
+
+Regenerate the modules lists and commit it to fix this:
+
+ ninja -C {out_dir} update_modules
+
+ git add {file}
+'''
+
+_FORMAT_FAILED_WARNING = _ERROR + '''\
+Failed to generate a valid .gni from PIGWEED_MODULES!
+
+This may be a Pigweed bug; please report this to the Pigweed team.
+'''
+
+_DO_NOT_SET = 'DO NOT SET THIS BUILD ARGUMENT!'
+
+
+def _module_list_warnings(root: Path, modules: Sequence[str]) -> Iterator[str]:
+ missing = _missing_modules(root, modules)
+ if missing:
+ yield _MISSING_MODULES_WARNING.format(modules=''.join(
+ f'\n - {module}' for module in missing))
+
+ if any(modules[i] > modules[i + 1] for i in range(len(modules) - 1)):
+ yield _WARNING + 'The PIGWEED_MODULES list is not sorted!'
+ yield ''
+ yield 'Apply the following diff to fix the order:'
+ yield ''
+ yield from difflib.unified_diff(modules,
+ sorted(modules),
+ lineterm='',
+ n=1,
+ fromfile='PIGWEED_MODULES',
+ tofile='PIGWEED_MODULES')
+
+ yield ''
+
+
+# TODO(hepler): Add tests and docs targets to all modules.
+def _find_tests_and_docs(
+ root: Path, modules: Sequence[str]) -> Tuple[List[str], List[str]]:
+ """Lists "tests" and "docs" targets for modules that declare them."""
+ tests = []
+ docs = []
+
+ for module in modules:
+ build_gn_contents = root.joinpath(module, 'BUILD.gn').read_bytes()
+ if b'group("tests")' in build_gn_contents:
+ tests.append(f'"$dir_{module}:tests",')
+
+ if b'group("docs")' in build_gn_contents:
+ docs.append(f'"$dir_{module}:docs",')
+
+ return tests, docs
+
+
+def _generate_modules_gni(root: Path, prefix: Path,
+ modules: Sequence[str]) -> Iterator[str]:
+ """Generates a .gni file with variables and lists for Pigweed modules."""
+ script = Path(__file__).resolve().relative_to(root.resolve()).as_posix()
+
+ yield _COPYRIGHT_NOTICE
+ yield ''
+ yield '# Build args and lists for all modules in Pigweed.'
+ yield '#'
+ yield f'# DO NOT EDIT! Generated by {script}.'
+ yield '#'
+ yield '# To add modules here, list them in PIGWEED_MODULES and build the'
+ yield '# update_modules target and commit the updated version of this file:'
+ yield '#'
+ yield '# ninja -C out update_modules'
+ yield '#'
+ yield '# DO NOT IMPORT THIS FILE DIRECTLY!'
+ yield '#'
+ yield '# Import it through //build_overrides/pigweed.gni instead.'
+ yield ''
+ yield '# Declare a build arg for each module.'
+ yield 'declare_args() {'
+
+ for module in modules:
+ module_path = prefix.joinpath(module).as_posix()
+ yield f'dir_{module} = get_path_info("{module_path}", "abspath")'
+
+ yield '}'
+ yield ''
+ yield '# Declare these as GN args in case this is imported in args.gni.'
+ yield '# Use a separate block so variables in the prior block can be used.'
+ yield 'declare_args() {'
+ yield f'# A list with paths to all Pigweed module. {_DO_NOT_SET}'
+ yield 'pw_modules = ['
+
+ for module in modules:
+ yield f'dir_{module},'
+
+ yield ']'
+ yield ''
+
+ tests, docs = _find_tests_and_docs(root, modules)
+
+ yield f'# A list with all Pigweed module test groups. {_DO_NOT_SET}'
+ yield 'pw_module_tests = ['
+ yield from tests
+ yield ']'
+ yield ''
+ yield f'# A list with all Pigweed modules docs groups. {_DO_NOT_SET}'
+ yield 'pw_module_docs = ['
+ yield from docs
+ yield ']'
+ yield ''
+ yield '}'
+
+
+def _missing_modules(root: Path, modules: Sequence[str]) -> Sequence[str]:
+ return sorted(
+ frozenset(
+ str(p.relative_to(root))
+ for p in root.glob('pw_*') if p.is_dir()) - frozenset(modules))
+
+
+def _parse_args() -> dict:
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.add_argument('root', type=Path, help='Root build dir')
+ parser.add_argument('modules_list', type=Path, help='Input modules list')
+ parser.add_argument('modules_gni_file', type=Path, help='Output .gni file')
+ parser.add_argument(
+ '--warn-only',
+ type=Path,
+ help='Only check PIGWEED_MODULES; takes a path to a stamp file to use')
+
+ return vars(parser.parse_args())
+
+
+def _main(root: Path, modules_list: Path, modules_gni_file: Path,
+ warn_only: Optional[Path]) -> int:
+ prefix = Path(os.path.relpath(root, modules_gni_file.parent))
+ modules = modules_list.read_text().splitlines()
+
+ # Detect any problems with the modules list.
+ warnings = list(_module_list_warnings(root, modules))
+ errors = []
+
+ modules.sort() # Sort in case the modules list in case it wasn't sorted.
+
+ # Check if the contents of the .gni file are out of date.
+ if warn_only:
+ text = io.StringIO()
+ for line in _generate_modules_gni(root, prefix, modules):
+ print(line, file=text)
+
+ process = subprocess.run(['gn', 'format', '--stdin'],
+ input=text.getvalue().encode('utf-8'),
+ stdout=subprocess.PIPE)
+ if process.returncode != 0:
+ errors.append(_FORMAT_FAILED_WARNING)
+ elif modules_gni_file.read_bytes() != process.stdout:
+ errors.append(
+ _OUT_OF_DATE_WARNING.format(
+ out_dir=os.path.relpath(os.curdir, root),
+ file=os.path.relpath(modules_gni_file, root)))
+ elif not warnings: # Update the modules .gni file.
+ with modules_gni_file.open('w', encoding='utf-8') as file:
+ for line in _generate_modules_gni(root, prefix, modules):
+ print(line, file=file)
+
+ process = subprocess.run(['gn', 'format', modules_gni_file],
+ stdout=subprocess.DEVNULL)
+ if process.returncode != 0:
+ errors.append(_FORMAT_FAILED_WARNING)
+
+ # If there are errors, display them and abort.
+ if warnings or errors:
+ for line in warnings + errors:
+ print(line, file=sys.stderr)
+
+ # Delete the stamp so this always reruns. Deleting is necessary since
+ # some of the checks do not depend on input files.
+ if warn_only and warn_only.exists():
+ warn_only.unlink()
+
+ # Warnings are non-fatal if warn_only is True.
+ return 1 if errors or not warn_only else 0
+
+ if warn_only:
+ warn_only.touch()
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(_main(**_parse_args()))
diff --git a/pw_build/py/pw_build/python_package.py b/pw_build/py/pw_build/python_package.py
index 26c6a65c5..fa3be5407 100644
--- a/pw_build/py/pw_build/python_package.py
+++ b/pw_build/py/pw_build/python_package.py
@@ -14,10 +14,26 @@
"""Dataclass for a Python package."""
import configparser
+from contextlib import contextmanager
import copy
from dataclasses import dataclass
+import json
+import os
from pathlib import Path
-from typing import Dict, List, Optional
+import shutil
+from typing import Dict, List, Optional, Iterable
+
+import setuptools # type: ignore
+
+
+@contextmanager
+def change_working_dir(directory: Path):
+ original_dir = Path.cwd()
+ try:
+ os.chdir(directory)
+ yield directory
+ finally:
+ os.chdir(original_dir)
@dataclass
@@ -92,3 +108,60 @@ class PythonPackage:
config.read_file(config_file)
return config
return None
+
+ def setuptools_build_with_base(self,
+ build_base: Path,
+ include_tests: bool = False) -> Path:
+ # Create the lib install dir in case it doesn't exist.
+ lib_dir_path = build_base / 'lib'
+ lib_dir_path.mkdir(parents=True, exist_ok=True)
+
+ starting_directory = Path.cwd()
+ # cd to the location of setup.py
+ with change_working_dir(self.setup_dir):
+ # Run build with temp build-base location
+ # Note: New files will be placed inside lib_dir_path
+ setuptools.setup(script_args=[
+ 'build',
+ '--force',
+ '--build-base',
+ str(build_base),
+ ])
+
+ new_pkg_dir = lib_dir_path / self.package_name
+ # If tests should be included, copy them to the tests dir
+ if include_tests and self.tests:
+ test_dir_path = new_pkg_dir / 'tests'
+ test_dir_path.mkdir(parents=True, exist_ok=True)
+
+ for test_source_path in self.tests:
+ shutil.copy(starting_directory / test_source_path,
+ test_dir_path)
+
+ return lib_dir_path
+
+ def setuptools_develop(self) -> None:
+ with change_working_dir(self.setup_dir):
+ setuptools.setup(script_args=['develop'])
+
+ def setuptools_install(self) -> None:
+ with change_working_dir(self.setup_dir):
+ setuptools.setup(script_args=['install'])
+
+
+def load_packages(input_list_files: Iterable[Path]) -> List[PythonPackage]:
+ """Load Python package metadata and configs."""
+
+ packages = []
+ for input_path in input_list_files:
+
+ with input_path.open() as input_file:
+ # Each line contains the path to a json file.
+ for json_file in input_file.readlines():
+ # Load the json as a dict.
+ json_file_path = Path(json_file.strip()).resolve()
+ with json_file_path.open() as json_fp:
+ json_dict = json.load(json_fp)
+
+ packages.append(PythonPackage.from_dict(**json_dict))
+ return packages
diff --git a/pw_build/py/pw_build/python_runner.py b/pw_build/py/pw_build/python_runner.py
index 67a105ade..8591c4ffc 100755
--- a/pw_build/py/pw_build/python_runner.py
+++ b/pw_build/py/pw_build/python_runner.py
@@ -18,6 +18,7 @@ the command.
"""
import argparse
+import atexit
from dataclasses import dataclass
import enum
import logging
@@ -27,10 +28,16 @@ import re
import shlex
import subprocess
import sys
+import time
from typing import Callable, Dict, Iterable, Iterator, List, NamedTuple
from typing import Optional, Tuple
+if sys.platform != 'win32':
+ import fcntl # pylint: disable=import-error
+ # TODO(b/227670947): Support Windows.
+
_LOG = logging.getLogger(__name__)
+_LOCK_ACQUISITION_TIMEOUT = 30 * 60 # 30 minutes in seconds
def _parse_args() -> argparse.Namespace:
@@ -76,6 +83,12 @@ def _parse_args() -> argparse.Namespace:
nargs=argparse.REMAINDER,
help='Python script with arguments to run',
)
+ parser.add_argument(
+ '--lockfile',
+ type=Path,
+ required=True,
+ help=('Path to a pip lockfile. Any pip execution will aquire an '
+ 'exclusive lock on it, any other module a shared lock.'))
return parser.parse_args()
@@ -442,7 +455,57 @@ def expand_expressions(paths: GnPaths, arg: str) -> Iterable[str]:
return (''.join(arg) for arg in expanded_args if arg)
-def main(
+class LockAcquisitionTimeoutError(Exception):
+ """Raised on a timeout."""
+
+
+def acquire_lock(lockfile: Path, exclusive: bool):
+ """Attempts to acquire the lock.
+
+ Args:
+ lockfile: pathlib.Path to the lock.
+ exclusive: whether this needs to be an exclusive lock.
+
+ Raises:
+ LockAcquisitionTimeoutError: If the lock is not acquired after a
+ reasonable time.
+ """
+ if sys.platform == 'win32':
+ # No-op on Windows, which doesn't have POSIX file locking.
+ # TODO(b/227670947): Get this working on Windows, too.
+ return
+
+ start_time = time.monotonic()
+ if exclusive:
+ lock_type = fcntl.LOCK_EX # type: ignore[name-defined]
+ else:
+ lock_type = fcntl.LOCK_SH # type: ignore[name-defined]
+ fd = os.open(lockfile, os.O_RDWR | os.O_CREAT)
+
+ # Make sure we close the file when the process exits. If we manage to
+ # acquire the lock below, closing the file will release it.
+ def cleanup():
+ os.close(fd)
+
+ atexit.register(cleanup)
+
+ backoff = 1
+ while time.monotonic() - start_time < _LOCK_ACQUISITION_TIMEOUT:
+ try:
+ fcntl.flock( # type: ignore[name-defined]
+ fd, lock_type | fcntl.LOCK_NB) # type: ignore[name-defined]
+ return # Lock acquired!
+ except BlockingIOError:
+ pass # Keep waiting.
+
+ time.sleep(backoff * 0.05)
+ backoff += 1
+
+ raise LockAcquisitionTimeoutError(
+ f"Failed to acquire lock {lockfile} in {_LOCK_ACQUISITION_TIMEOUT}")
+
+
+def main( # pylint: disable=too-many-arguments
gn_root: Path,
current_path: Path,
original_cmd: List[str],
@@ -453,6 +516,7 @@ def main(
capture_output: bool,
touch: Optional[Path],
working_directory: Optional[Path],
+ lockfile: Path,
) -> int:
"""Script entry point."""
@@ -497,6 +561,12 @@ def main(
if working_directory:
run_args['cwd'] = working_directory
+ try:
+ acquire_lock(lockfile, module == 'pip')
+ except LockAcquisitionTimeoutError as exception:
+ _LOG.error('%s', exception)
+ return 1
+
_LOG.debug('RUN %s', ' '.join(shlex.quote(arg) for arg in command))
completed_process = subprocess.run(command, **run_args)
diff --git a/pw_build/python_action.gni b/pw_build/python_action.gni
index 75973cd6b..1110535cf 100644
--- a/pw_build/python_action.gni
+++ b/pw_build/python_action.gni
@@ -65,6 +65,11 @@ template("pw_python_action") {
"--current-path",
rebase_path(".", root_build_dir),
+ # pip lockfile, prevents pip from running in parallel with other Python
+ # actions.
+ "--lockfile",
+ rebase_path("$root_out_dir/pip.lock", root_build_dir),
+
"--default-toolchain=$default_toolchain",
"--current-toolchain=$current_toolchain",
]
diff --git a/pw_build/python_dist.gni b/pw_build/python_dist.gni
index 8015e8964..36f797ab0 100644
--- a/pw_build/python_dist.gni
+++ b/pw_build/python_dist.gni
@@ -110,18 +110,18 @@ template("pw_python_zip_with_setup") {
if (defined(invoker.dirs)) {
_dirs = invoker.dirs
}
+ _public_deps = []
+ if (defined(invoker.public_deps)) {
+ _public_deps = invoker.public_deps
+ }
pw_python_wheels("$target_name.wheels") {
packages = invoker.packages
- forward_variables_from(invoker,
- [
- "deps",
- "public_deps",
- ])
+ forward_variables_from(invoker, [ "deps" ])
}
pw_zip(target_name) {
- forward_variables_from(invoker, [ "public_deps" ])
+ forward_variables_from(invoker, [ "deps" ])
inputs = _inputs + [
"$dir_pw_build/python_dist/setup.bat > /${target_name}/",
"$dir_pw_build/python_dist/setup.sh > /${target_name}/",
@@ -131,7 +131,8 @@ template("pw_python_zip_with_setup") {
output = _zip_path
- deps = [ ":${_outer_name}.wheels" ]
+ # TODO(pwbug/634): Remove the plumbing-through of invoker's public_deps.
+ public_deps = _public_deps + [ ":${_outer_name}.wheels" ]
}
}
diff --git a/pw_bytes/docs.rst b/pw_bytes/docs.rst
index c2868b470..6313abb89 100644
--- a/pw_bytes/docs.rst
+++ b/pw_bytes/docs.rst
@@ -45,8 +45,8 @@ Functions for converting the endianness of integral values.
pw_bytes/units.h
----------------
-Constants and helper user-defined literals for specifying a number of bytes in
-powers of two, as defined by IEC 60027-2 A.2 and ISO/IEC 80000:13-2008.
+Constants, functions and user-defined literals for specifying a number of bytes
+in powers of two, as defined by IEC 60027-2 A.2 and ISO/IEC 80000:13-2008.
The supported suffixes include:
* ``_B`` for bytes (1024^0)
@@ -67,6 +67,17 @@ In order to use these you must use a using namespace directive, for example:
constexpr size_t kRandomBufferSizeBytes = 1_MiB + 42_KiB;
+In some cases, the use of user-defined literals is not permitted because of the
+required using namespace directive. One example of this is in header files,
+where it is undesirable to pollute the namespace. For this situation, there are
+also similar functions:
+
+.. code-block:: cpp
+
+ #include "pw_bytes/units.h"
+
+ constexpr size_t kBufferSizeBytes = pw::bytes::MiB(1) + pw::bytes::KiB(42);
+
Zephyr
======
To enable ``pw_bytes`` for Zephyr add ``CONFIG_PIGWEED_BYTES=y`` to the
diff --git a/pw_bytes/public/pw_bytes/units.h b/pw_bytes/public/pw_bytes/units.h
index 67327e558..97bb8d635 100644
--- a/pw_bytes/public/pw_bytes/units.h
+++ b/pw_bytes/public/pw_bytes/units.h
@@ -36,10 +36,46 @@ inline constexpr unsigned long long int kBytesInPebibyte = 1ull << 50;
// Exbibytes (EiB): 1024^6 or 2^60
inline constexpr unsigned long long int kBytesInExbibyte = 1ull << 60;
+// Functions for specifying a number of bytes in powers of two, as defined by
+// IEC 60027-2 A.2 and ISO/IEC 80000:13-2008.
+//
+// These are useful in headers when using user-defined literals are disallowed.
+//
+// #include "pw_bytes/units.h"
+//
+// constexpr size_t kBufferSizeBytes = pw::bytes::MiB(1) + pw::bytes::KiB(42);
+inline constexpr unsigned long long int B(unsigned long long int bytes) {
+ return bytes;
+}
+
+inline constexpr unsigned long long int KiB(unsigned long long int kibibytes) {
+ return kibibytes * kBytesInKibibyte;
+}
+
+inline constexpr unsigned long long int MiB(unsigned long long int mibibytes) {
+ return mibibytes * kBytesInMibibyte;
+}
+
+inline constexpr unsigned long long int GiB(unsigned long long int gibibytes) {
+ return gibibytes * kBytesInGibibyte;
+}
+
+inline constexpr unsigned long long int TiB(unsigned long long int tebibytes) {
+ return tebibytes * kBytesInTebibyte;
+}
+
+inline constexpr unsigned long long int PiB(unsigned long long int pebibytes) {
+ return pebibytes * kBytesInPebibyte;
+}
+
+inline constexpr unsigned long long int EiB(unsigned long long int exbibytes) {
+ return exbibytes * kBytesInExbibyte;
+}
+
namespace unit_literals {
-// Helper user-defined literals for specifying a number of bytes in powers of
-// two, as defined by IEC 60027-2 A.2 and ISO/IEC 80000:13-2008.
+// User-defined literals for specifying a number of bytes in powers of two, as
+// defined by IEC 60027-2 A.2 and ISO/IEC 80000:13-2008.
//
// The supported prefixes include:
// _B for bytes (1024^0)
diff --git a/pw_bytes/units_test.cc b/pw_bytes/units_test.cc
index abb63588f..a857eafab 100644
--- a/pw_bytes/units_test.cc
+++ b/pw_bytes/units_test.cc
@@ -21,6 +21,29 @@ namespace {
using namespace pw::bytes::unit_literals;
+// Byte Function tests
+static_assert(B(1) == 1ull);
+static_assert(B(42) == 42ull);
+
+static_assert(KiB(1) == 1'024ull);
+static_assert(KiB(42) == 43'008ull);
+
+static_assert(MiB(1) == 1'048'576ull);
+static_assert(MiB(42) == 44'040'192ull);
+
+static_assert(GiB(1) == 1'073'741'824ull);
+static_assert(GiB(42) == 45'097'156'608ull);
+
+static_assert(TiB(1) == 1'099'511'627'776ull);
+static_assert(TiB(42) == 46'179'488'366'592ull);
+
+static_assert(PiB(1) == 1'125'899'906'842'624ull);
+static_assert(PiB(42) == 47'287'796'087'390'208ull);
+
+static_assert(EiB(1) == 1'152'921'504'606'846'976ull);
+static_assert(EiB(4) == 4'611'686'018'427'387'904ull);
+
+// User-defined literal tests
static_assert(1_B == 1ull);
static_assert(42_B == 42ull);
diff --git a/pw_cli/docs.rst b/pw_cli/docs.rst
index 42144f1bd..90197643f 100644
--- a/pw_cli/docs.rst
+++ b/pw_cli/docs.rst
@@ -226,16 +226,6 @@ However, there are some tools to make the process faster and easier.
into a file, and then point ``PW_BRANDING_BANNER`` at it. Most of the fonts
use normal ASCII characters; and fonts with extended ASCII characters use the
Unicode versions of them (needed for modern terminals).
-* `Online ANSII Edit by Andy Herbert
- <http://andyherbert.github.io/ansiedit/public/index.html>`_ - Browser based
- editor that can export to mixed UTF-8 and ANSII color. It's also `open source
- <https://github.com/andyherbert/ansiedit>`_. What's nice about this editor is
- that you can create a multi-color banner, and save it with the ``File`` -->
- ``Export as ANSi (UTF-8)`` option, and use it directly as a Pigweed banner.
- One caveat is that the editor uses UTF-8 box drawing characters, which don't
- work well with all terminals. However, the box drawing characters look so
- slick on terminals that support them that we feel this is a worthwhile
- tradeoff.
There are other options, but these require additional work to put into Pigweed
since they only export in the traditional ANS or ICE formats. The old ANS
diff --git a/pw_cli/py/pw_cli/arguments.py b/pw_cli/py/pw_cli/arguments.py
index ed612aac6..86ff6e266 100644
--- a/pw_cli/py/pw_cli/arguments.py
+++ b/pw_cli/py/pw_cli/arguments.py
@@ -36,7 +36,7 @@ def parse_args() -> argparse.Namespace:
def print_banner() -> None:
"""Prints the PIGWEED (or project specific) banner to stderr."""
- print(banner(), file=sys.stderr)
+ print(banner() + '\n', file=sys.stderr)
def format_help(registry: plugins.Registry) -> str:
diff --git a/pw_cli/py/pw_cli/branding.py b/pw_cli/py/pw_cli/branding.py
index 87a424ccc..81274eced 100644
--- a/pw_cli/py/pw_cli/branding.py
+++ b/pw_cli/py/pw_cli/branding.py
@@ -13,6 +13,7 @@
# the License.
"""Facilities for accessing the current Pigweed branding"""
+import operator
from typing import Optional
from pathlib import Path
@@ -31,7 +32,7 @@ _PIGWEED_BANNER = '''
'''
-def banner():
+def banner() -> str:
global _memoized_banner # pylint: disable=global-statement
if _memoized_banner is not None:
return _memoized_banner
@@ -46,10 +47,8 @@ def banner():
# Color the banner if requested.
banner_color = parsed_env.PW_BRANDING_BANNER_COLOR
if banner_color != '':
- _memoized_banner = getattr(
- pw_cli.color.colors(),
- banner_color,
- str,
- )(_memoized_banner)
+ set_color = operator.attrgetter(banner_color)(pw_cli.color.colors())
+ _memoized_banner = '\n'.join(
+ set_color(line) for line in _memoized_banner.splitlines())
return _memoized_banner
diff --git a/pw_cli/py/pw_cli/log.py b/pw_cli/py/pw_cli/log.py
index efe4f4034..3ef755470 100644
--- a/pw_cli/py/pw_cli/log.py
+++ b/pw_cli/py/pw_cli/log.py
@@ -139,7 +139,10 @@ def install(level: Union[str, int] = logging.INFO,
_setup_handler(_STDERR_HANDLER, formatter, level, logger)
if log_file:
- _setup_handler(logging.FileHandler(log_file), formatter, level, logger)
+ # Set utf-8 encoding for the log file. Encoding errors may come up on
+ # Windows if the default system encoding is set to cp1250.
+ _setup_handler(logging.FileHandler(log_file, encoding='utf-8'),
+ formatter, level, logger)
# Since we're using a file, filter logs out of the stderr handler.
_STDERR_HANDLER.setLevel(logging.CRITICAL + 1)
diff --git a/pw_console/BUILD.gn b/pw_console/BUILD.gn
index 119377c4b..712e74a6b 100644
--- a/pw_console/BUILD.gn
+++ b/pw_console/BUILD.gn
@@ -18,12 +18,15 @@ import("$dir_pw_docgen/docs.gni")
pw_doc_group("docs") {
inputs = [
- "images/serial_debug.svg",
"images/calculator_plugin.png",
"images/clock_plugin1.png",
"images/clock_plugin2.png",
- "py/pw_console/plugins/clock_pane.py",
+ "images/command_runner_main_menu.svg",
+ "images/pw_system_boot.png",
+ "images/python_completion.png",
+ "images/serial_debug.svg",
"py/pw_console/plugins/calc_pane.py",
+ "py/pw_console/plugins/clock_pane.py",
]
sources = [
"docs.rst",
diff --git a/pw_console/docs.rst b/pw_console/docs.rst
index 52f882d9d..0c7ffc3e0 100644
--- a/pw_console/docs.rst
+++ b/pw_console/docs.rst
@@ -6,12 +6,15 @@ pw_console
:bdg-primary:`host`
:bdg-secondary:`Python`
-:bdg-warning:`unstable`
+:bdg-success:`stable`
The Pigweed Console provides a Python repl (read eval print loop) using
`ptpython`_ and a log message viewer in a single-window terminal based
interface. It is designed to be a replacement for `IPython's embed()`_ function.
+.. figure:: images/pw_system_boot.png
+ :alt: Pigweed Console screenshot with serial debug log messages.
+
Features
========
@@ -22,6 +25,9 @@ devices using :ref:`module-pw_rpc` over a :ref:`module-pw_hdlc` transport.
provides interactive RPC sending while the log viewer provides immediate
feedback on device status.
+ .. figure:: images/python_completion.png
+ :alt: Pigweed Console screenshot showing RPC Python repl completions.
+
- Easily embeddable within a project's own custom console. This should allow
users to define their own transport layer.
diff --git a/pw_console/embedding.rst b/pw_console/embedding.rst
index bd5973b37..aad4d106e 100644
--- a/pw_console/embedding.rst
+++ b/pw_console/embedding.rst
@@ -8,13 +8,21 @@ Embedding Guide
Using embed()
-------------
``pw console`` is invoked by calling ``PwConsoleEmbed().embed()`` in your
-own Python script.
+own Python script. For a complete example of an embedded device console script see
+:bdg-link-primary-line:`pw_system/py/pw_system/console.py <https://cs.opensource.google/pigweed/pigweed/+/main:pw_system/py/pw_system/console.py>`.
.. automodule:: pw_console.embed
:members: PwConsoleEmbed
:undoc-members:
:show-inheritance:
+.. _module-pw_console-embedding-logstore:
+
+.. autoclass:: pw_console.log_store.LogStore
+ :members: __init__
+ :undoc-members:
+ :show-inheritance:
+
.. _module-pw_console-embedding-plugins:
Adding Plugins
diff --git a/pw_console/images/command_runner_main_menu.svg b/pw_console/images/command_runner_main_menu.svg
new file mode 100644
index 000000000..4ef98ef2a
--- /dev/null
+++ b/pw_console/images/command_runner_main_menu.svg
@@ -0,0 +1,1984 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="1428pt" height="682pt" viewBox="0 0 1428 682" version="1.1">
+<defs>
+<g>
+<symbol overflow="visible" id="glyph0-0">
+<path style="stroke:none;" d="M 3.203125 -1.046875 L 10.796875 -1.046875 L 10.796875 -19.8125 L 3.203125 -19.8125 Z M 1.265625 0.875 L 1.265625 -21.734375 L 12.734375 -21.734375 L 12.734375 0.875 Z M 7.59375 -9.09375 L 7.59375 -8.375 L 5.671875 -8.375 L 5.671875 -9.109375 C 5.671875 -9.796875 5.878906 -10.441406 6.296875 -11.046875 L 7.828125 -13.296875 C 8.097656 -13.679688 8.234375 -14.035156 8.234375 -14.359375 C 8.234375 -14.816406 8.097656 -15.207031 7.828125 -15.53125 C 7.566406 -15.851562 7.210938 -16.015625 6.765625 -16.015625 C 5.960938 -16.015625 5.34375 -15.484375 4.90625 -14.421875 L 3.34375 -15.34375 C 4.1875 -16.957031 5.332031 -17.765625 6.78125 -17.765625 C 7.75 -17.765625 8.550781 -17.4375 9.1875 -16.78125 C 9.820312 -16.132812 10.140625 -15.320312 10.140625 -14.34375 C 10.140625 -13.644531 9.898438 -12.941406 9.421875 -12.234375 L 7.8125 -9.890625 C 7.664062 -9.679688 7.59375 -9.414062 7.59375 -9.09375 Z M 6.78125 -6.75 C 7.144531 -6.75 7.457031 -6.617188 7.71875 -6.359375 C 7.988281 -6.109375 8.125 -5.796875 8.125 -5.421875 C 8.125 -5.046875 7.988281 -4.726562 7.71875 -4.46875 C 7.457031 -4.207031 7.144531 -4.078125 6.78125 -4.078125 C 6.394531 -4.078125 6.070312 -4.207031 5.8125 -4.46875 C 5.5625 -4.726562 5.4375 -5.046875 5.4375 -5.421875 C 5.4375 -5.796875 5.5625 -6.109375 5.8125 -6.359375 C 6.070312 -6.617188 6.394531 -6.75 6.78125 -6.75 Z M 6.78125 -6.75 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-1">
+<path style="stroke:none;" d=""/>
+</symbol>
+<symbol overflow="visible" id="glyph0-2">
+<path style="stroke:none;" d="M 11.546875 1.8125 L 4.296875 1.8125 L 4.296875 -22.671875 L 11.546875 -22.671875 L 11.546875 -20.609375 L 6.625 -20.609375 L 6.625 -0.265625 L 11.546875 -0.265625 Z M 11.546875 1.8125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-3">
+<path style="stroke:none;" d="M 12.8125 -18.875 L 5.25 -18.875 L 5.25 -12.125 L 11.328125 -12.125 L 11.328125 -10.140625 L 5.25 -10.140625 L 5.25 0 L 2.921875 0 L 2.921875 -20.859375 L 12.8125 -20.859375 Z M 12.8125 -18.875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-4">
+<path style="stroke:none;" d="M 8.484375 -20.734375 C 8.484375 -20.234375 8.3125 -19.804688 7.96875 -19.453125 C 7.632812 -19.109375 7.210938 -18.9375 6.703125 -18.9375 C 6.191406 -18.9375 5.765625 -19.109375 5.421875 -19.453125 C 5.078125 -19.804688 4.90625 -20.234375 4.90625 -20.734375 C 4.90625 -21.222656 5.078125 -21.640625 5.421875 -21.984375 C 5.765625 -22.335938 6.191406 -22.515625 6.703125 -22.515625 C 7.210938 -22.515625 7.632812 -22.335938 7.96875 -21.984375 C 8.3125 -21.640625 8.484375 -21.222656 8.484375 -20.734375 Z M 12 0 L 2.25 0 L 2.25 -1.984375 L 5.96875 -1.984375 L 5.96875 -13.5 L 3.296875 -13.5 L 3.296875 -15.484375 L 8.296875 -15.484375 L 8.296875 -1.984375 L 12 -1.984375 Z M 12 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-5">
+<path style="stroke:none;" d="M 11.875 0 L 2.125 0 L 2.125 -1.984375 L 5.84375 -1.984375 L 5.84375 -18.875 L 3.1875 -18.875 L 3.1875 -20.859375 L 8.171875 -20.859375 L 8.171875 -1.984375 L 11.875 -1.984375 Z M 11.875 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-6">
+<path style="stroke:none;" d="M 12.203125 -7.28125 L 4.109375 -7.28125 L 4.109375 -6.8125 C 4.109375 -4.8125 4.398438 -3.484375 4.984375 -2.828125 C 5.566406 -2.171875 6.359375 -1.84375 7.359375 -1.84375 C 8.628906 -1.84375 9.644531 -2.488281 10.40625 -3.78125 L 12.296875 -2.578125 C 11.109375 -0.679688 9.410156 0.265625 7.203125 0.265625 C 5.628906 0.265625 4.332031 -0.296875 3.3125 -1.421875 C 2.289062 -2.554688 1.78125 -4.351562 1.78125 -6.8125 L 1.78125 -8.6875 C 1.78125 -11.132812 2.289062 -12.925781 3.3125 -14.0625 C 4.332031 -15.195312 5.5625 -15.765625 7 -15.765625 C 8.53125 -15.765625 9.78125 -15.238281 10.75 -14.1875 C 11.71875 -13.144531 12.203125 -11.441406 12.203125 -9.078125 Z M 9.890625 -9.328125 C 9.890625 -10.898438 9.617188 -12.015625 9.078125 -12.671875 C 8.535156 -13.328125 7.84375 -13.65625 7 -13.65625 C 6.195312 -13.65625 5.515625 -13.328125 4.953125 -12.671875 C 4.390625 -12.015625 4.109375 -10.898438 4.109375 -9.328125 Z M 9.890625 -9.328125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-7">
+<path style="stroke:none;" d="M 9.703125 1.8125 L 2.46875 1.8125 L 2.46875 -0.265625 L 7.375 -0.265625 L 7.375 -20.609375 L 2.46875 -20.609375 L 2.46875 -22.671875 L 9.703125 -22.671875 Z M 9.703125 1.8125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-8">
+<path style="stroke:none;" d="M 12.203125 0 L 2.578125 0 L 2.578125 -20.859375 L 12.203125 -20.859375 L 12.203125 -18.875 L 4.90625 -18.875 L 4.90625 -12.125 L 10.296875 -12.125 L 10.296875 -10.140625 L 4.90625 -10.140625 L 4.90625 -1.984375 L 12.203125 -1.984375 Z M 12.203125 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-9">
+<path style="stroke:none;" d="M 1.59375 -8.8125 C 1.59375 -11.28125 2.03125 -13.054688 2.90625 -14.140625 C 3.789062 -15.222656 4.847656 -15.765625 6.078125 -15.765625 C 7.671875 -15.765625 8.875 -15.046875 9.6875 -13.609375 L 9.6875 -20.859375 L 12 -20.859375 L 12 0 L 9.6875 0 L 9.6875 -1.859375 C 8.875 -0.441406 7.671875 0.265625 6.078125 0.265625 C 4.847656 0.265625 3.789062 -0.269531 2.90625 -1.34375 C 2.03125 -2.425781 1.59375 -4.203125 1.59375 -6.671875 Z M 3.984375 -6.671875 C 3.984375 -4.878906 4.207031 -3.625 4.65625 -2.90625 C 5.113281 -2.195312 5.78125 -1.84375 6.65625 -1.84375 C 8.1875 -1.84375 9.195312 -2.867188 9.6875 -4.921875 L 9.6875 -10.4375 C 9.207031 -12.582031 8.195312 -13.65625 6.65625 -13.65625 C 5.78125 -13.65625 5.113281 -13.296875 4.65625 -12.578125 C 4.207031 -11.867188 3.984375 -10.613281 3.984375 -8.8125 Z M 3.984375 -6.671875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-10">
+<path style="stroke:none;" d="M 12.953125 -2.15625 C 11.867188 -0.539062 10.414062 0.265625 8.59375 0.265625 C 7.394531 0.265625 6.382812 -0.140625 5.5625 -0.953125 C 4.75 -1.773438 4.34375 -2.984375 4.34375 -4.578125 L 4.34375 -13.5 L 1.75 -13.5 L 1.75 -15.484375 L 4.34375 -15.484375 L 4.34375 -20.859375 L 6.671875 -20.859375 L 6.671875 -15.484375 L 11.890625 -15.484375 L 11.890625 -13.5 L 6.671875 -13.5 L 6.671875 -4.4375 C 6.671875 -3.582031 6.851562 -2.9375 7.21875 -2.5 C 7.582031 -2.0625 8.085938 -1.84375 8.734375 -1.84375 C 9.804688 -1.84375 10.625 -2.378906 11.1875 -3.453125 Z M 12.953125 -2.15625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-11">
+<path style="stroke:none;" d="M 5.6875 0 L 3.109375 -9.609375 C 1.941406 -13.910156 1.359375 -17.273438 1.359375 -19.703125 L 1.359375 -20.859375 L 3.6875 -20.859375 L 3.6875 -19.703125 C 3.6875 -17.691406 4.125 -14.804688 5 -11.046875 L 7 -2.609375 L 9 -11.046875 C 9.875 -14.804688 10.3125 -17.691406 10.3125 -19.703125 L 10.3125 -20.859375 L 12.640625 -20.859375 L 12.640625 -19.703125 C 12.640625 -17.273438 12.054688 -13.910156 10.890625 -9.609375 L 8.3125 0 Z M 5.6875 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-12">
+<path style="stroke:none;" d="M 9.578125 -1.28125 L 10.125 -6.296875 C 10.351562 -8.390625 10.46875 -10.53125 10.46875 -12.71875 L 10.46875 -15.484375 L 12.75 -15.484375 L 12.75 -13.421875 C 12.75 -11.554688 12.484375 -9.179688 11.953125 -6.296875 L 10.796875 0 L 8.609375 0 L 7 -6.96875 L 5.390625 0 L 3.203125 0 L 2.046875 -6.296875 C 1.523438 -9.179688 1.265625 -11.554688 1.265625 -13.421875 L 1.265625 -15.484375 L 3.53125 -15.484375 L 3.53125 -12.71875 C 3.53125 -10.53125 3.644531 -8.390625 3.875 -6.296875 L 4.421875 -1.28125 L 6.265625 -9.25 L 7.734375 -9.25 Z M 9.578125 -1.28125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-13">
+<path style="stroke:none;" d="M 9.734375 -1.265625 L 10.09375 -6.1875 C 10.226562 -8.007812 10.296875 -10.070312 10.296875 -12.375 L 10.296875 -20.859375 L 12.625 -20.859375 L 12.625 -15.828125 C 12.625 -12.398438 12.375 -9.1875 11.875 -6.1875 L 10.90625 0 L 8.71875 0 L 7 -8.078125 L 5.28125 0 L 3.09375 0 L 2.125 -6.1875 C 1.625 -9.1875 1.375 -12.398438 1.375 -15.828125 L 1.375 -20.859375 L 3.703125 -20.859375 L 3.703125 -12.375 C 3.703125 -10.070312 3.769531 -8.007812 3.90625 -6.1875 L 4.265625 -1.265625 L 6.265625 -10.671875 L 7.734375 -10.671875 Z M 9.734375 -1.265625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-14">
+<path style="stroke:none;" d="M 12.015625 0 L 9.6875 0 L 9.6875 -10.03125 C 9.6875 -11.375 9.476562 -12.304688 9.0625 -12.828125 C 8.644531 -13.359375 8.070312 -13.625 7.34375 -13.625 C 5.8125 -13.625 4.800781 -12.570312 4.3125 -10.46875 L 4.3125 0 L 1.984375 0 L 1.984375 -15.484375 L 4.3125 -15.484375 L 4.3125 -13.5 C 5.300781 -15.007812 6.503906 -15.765625 7.921875 -15.765625 C 9.066406 -15.765625 10.035156 -15.335938 10.828125 -14.484375 C 11.617188 -13.628906 12.015625 -12.191406 12.015625 -10.171875 Z M 12.015625 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-15">
+<path style="stroke:none;" d="M 12.21875 -6.703125 C 12.21875 -4.328125 11.726562 -2.570312 10.75 -1.4375 C 9.78125 -0.300781 8.53125 0.265625 7 0.265625 C 5.46875 0.265625 4.21875 -0.300781 3.25 -1.4375 C 2.28125 -2.570312 1.796875 -4.328125 1.796875 -6.703125 L 1.796875 -8.8125 C 1.796875 -11.175781 2.28125 -12.925781 3.25 -14.0625 C 4.21875 -15.195312 5.46875 -15.765625 7 -15.765625 C 8.53125 -15.765625 9.78125 -15.195312 10.75 -14.0625 C 11.726562 -12.925781 12.21875 -11.175781 12.21875 -8.8125 Z M 9.90625 -6.703125 L 9.90625 -8.8125 C 9.90625 -10.632812 9.628906 -11.894531 9.078125 -12.59375 C 8.535156 -13.300781 7.84375 -13.65625 7 -13.65625 C 6.15625 -13.65625 5.460938 -13.300781 4.921875 -12.59375 C 4.378906 -11.894531 4.109375 -10.632812 4.109375 -8.8125 L 4.109375 -6.703125 C 4.109375 -4.867188 4.378906 -3.597656 4.921875 -2.890625 C 5.460938 -2.191406 6.15625 -1.84375 7 -1.84375 C 7.84375 -1.84375 8.535156 -2.191406 9.078125 -2.890625 C 9.628906 -3.597656 9.90625 -4.867188 9.90625 -6.703125 Z M 9.90625 -6.703125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-16">
+<path style="stroke:none;" d="M 7.0625 -1.796875 C 8.019531 -1.796875 8.75 -2.023438 9.25 -2.484375 C 9.75 -2.941406 10 -3.5 10 -4.15625 C 10 -5.0625 9.445312 -5.765625 8.34375 -6.265625 L 5.359375 -7.625 C 3.503906 -8.5 2.578125 -9.75 2.578125 -11.375 C 2.578125 -12.644531 3.03125 -13.691406 3.9375 -14.515625 C 4.851562 -15.347656 6.035156 -15.765625 7.484375 -15.765625 C 9.546875 -15.765625 11.128906 -14.8125 12.234375 -12.90625 L 10.34375 -11.765625 C 9.757812 -13.054688 8.804688 -13.703125 7.484375 -13.703125 C 6.691406 -13.703125 6.0625 -13.5 5.59375 -13.09375 C 5.132812 -12.695312 4.90625 -12.203125 4.90625 -11.609375 C 4.90625 -10.816406 5.421875 -10.175781 6.453125 -9.6875 L 9.265625 -8.375 C 11.296875 -7.425781 12.3125 -5.984375 12.3125 -4.046875 C 12.3125 -2.898438 11.835938 -1.894531 10.890625 -1.03125 C 9.941406 -0.164062 8.65625 0.265625 7.03125 0.265625 C 4.75 0.265625 2.96875 -0.789062 1.6875 -2.90625 L 3.59375 -4.0625 C 4.351562 -2.550781 5.507812 -1.796875 7.0625 -1.796875 Z M 7.0625 -1.796875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-17">
+<path style="stroke:none;" d="M 12.015625 0 L 9.6875 0 L 9.6875 -10.4375 L 4.3125 -10.4375 L 4.3125 0 L 1.984375 0 L 1.984375 -20.859375 L 4.3125 -20.859375 L 4.3125 -12.421875 L 9.6875 -12.421875 L 9.6875 -20.859375 L 12.015625 -20.859375 Z M 12.015625 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-18">
+<path style="stroke:none;" d="M 12.421875 -6.65625 C 12.421875 -4.21875 11.953125 -2.453125 11.015625 -1.359375 C 10.085938 -0.273438 9.054688 0.265625 7.921875 0.265625 C 6.328125 0.265625 5.128906 -0.457031 4.328125 -1.90625 L 4.328125 4.828125 L 2 4.828125 L 2 -15.484375 L 4.328125 -15.484375 L 4.328125 -13.640625 C 5.128906 -15.054688 6.328125 -15.765625 7.921875 -15.765625 C 9.054688 -15.765625 10.085938 -15.222656 11.015625 -14.140625 C 11.953125 -13.066406 12.421875 -11.289062 12.421875 -8.8125 Z M 10.015625 -8.8125 C 10.015625 -10.625 9.765625 -11.882812 9.265625 -12.59375 C 8.765625 -13.300781 8.125 -13.65625 7.34375 -13.65625 C 5.8125 -13.65625 4.804688 -12.59375 4.328125 -10.46875 L 4.328125 -5.046875 C 4.796875 -2.910156 5.800781 -1.84375 7.34375 -1.84375 C 8.125 -1.84375 8.765625 -2.203125 9.265625 -2.921875 C 9.765625 -3.640625 10.015625 -4.882812 10.015625 -6.65625 Z M 10.015625 -8.8125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-19">
+<path style="stroke:none;" d="M 12.953125 -3.09375 C 11.648438 -0.851562 9.878906 0.265625 7.640625 0.265625 C 6.128906 0.265625 4.851562 -0.238281 3.8125 -1.25 C 2.769531 -2.269531 2.25 -4.132812 2.25 -6.84375 L 2.25 -14.046875 C 2.25 -16.679688 2.769531 -18.523438 3.8125 -19.578125 C 4.851562 -20.640625 6.117188 -21.171875 7.609375 -21.171875 C 9.953125 -21.171875 11.6875 -20.039062 12.8125 -17.78125 L 10.90625 -16.625 C 10.21875 -18.238281 9.125 -19.046875 7.625 -19.046875 C 6.757812 -19.046875 6.035156 -18.726562 5.453125 -18.09375 C 4.878906 -17.457031 4.59375 -16.109375 4.59375 -14.046875 L 4.59375 -6.84375 C 4.59375 -4.78125 4.878906 -3.429688 5.453125 -2.796875 C 6.035156 -2.160156 6.757812 -1.84375 7.625 -1.84375 C 9.125 -1.84375 10.265625 -2.6875 11.046875 -4.375 Z M 12.953125 -3.09375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-20">
+<path style="stroke:none;" d="M 12.453125 -18.875 L 8.15625 -18.875 L 8.15625 0 L 5.84375 0 L 5.84375 -18.875 L 1.546875 -18.875 L 1.546875 -20.859375 L 12.453125 -20.859375 Z M 12.453125 -18.875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-21">
+<path style="stroke:none;" d="M 12.328125 0 L 10.015625 0 L 10.015625 -5.515625 C 10.015625 -9.023438 10.148438 -13.148438 10.421875 -17.890625 L 7.71875 -9.25 L 6.28125 -9.25 L 3.578125 -17.890625 C 3.859375 -13.148438 4 -9.023438 4 -5.515625 L 4 0 L 1.671875 0 L 1.671875 -20.859375 L 4 -20.859375 L 7 -12.203125 L 10.015625 -20.859375 L 12.328125 -20.859375 Z M 12.328125 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-22">
+<path style="stroke:none;" d="M 12.53125 -4.875 C 12.53125 -3.457031 12.054688 -2.242188 11.109375 -1.234375 C 10.171875 -0.234375 8.867188 0.265625 7.203125 0.265625 C 4.578125 0.265625 2.660156 -0.878906 1.453125 -3.171875 L 3.171875 -4.46875 C 4.078125 -2.71875 5.414062 -1.84375 7.1875 -1.84375 C 8.007812 -1.84375 8.71875 -2.09375 9.3125 -2.59375 C 9.90625 -3.101562 10.203125 -3.859375 10.203125 -4.859375 C 10.203125 -5.671875 9.691406 -6.554688 8.671875 -7.515625 L 4.1875 -11.703125 C 2.9375 -12.878906 2.3125 -14.359375 2.3125 -16.140625 C 2.3125 -17.554688 2.769531 -18.75 3.6875 -19.71875 C 4.613281 -20.6875 5.878906 -21.171875 7.484375 -21.171875 C 9.566406 -21.171875 11.253906 -20.21875 12.546875 -18.3125 L 10.765625 -16.90625 C 9.992188 -18.332031 8.90625 -19.046875 7.5 -19.046875 C 6.6875 -19.046875 6.007812 -18.8125 5.46875 -18.34375 C 4.925781 -17.882812 4.65625 -17.148438 4.65625 -16.140625 C 4.65625 -15.109375 5.0625 -14.210938 5.875 -13.453125 L 10.34375 -9.296875 C 11.800781 -7.910156 12.53125 -6.4375 12.53125 -4.875 Z M 12.53125 -4.875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-23">
+<path style="stroke:none;" d="M 11.671875 0 L 9.359375 0 L 9.359375 -1.75 C 8.367188 -0.40625 7.125 0.265625 5.625 0.265625 C 4.351562 0.265625 3.328125 -0.164062 2.546875 -1.03125 C 1.765625 -1.90625 1.375 -3 1.375 -4.3125 C 1.375 -5.65625 1.863281 -6.78125 2.84375 -7.6875 C 3.820312 -8.601562 5.253906 -9.0625 7.140625 -9.0625 L 9.359375 -9.0625 L 9.359375 -10.734375 C 9.359375 -12.679688 8.429688 -13.65625 6.578125 -13.65625 C 5.210938 -13.65625 4.191406 -13.054688 3.515625 -11.859375 L 1.75 -13.171875 C 2.96875 -14.898438 4.582031 -15.765625 6.59375 -15.765625 C 8.070312 -15.765625 9.285156 -15.335938 10.234375 -14.484375 C 11.191406 -13.640625 11.671875 -12.390625 11.671875 -10.734375 Z M 9.359375 -3.640625 L 9.359375 -7.1875 L 7.140625 -7.1875 C 6.023438 -7.1875 5.1875 -6.925781 4.625 -6.40625 C 4.0625 -5.894531 3.78125 -5.195312 3.78125 -4.3125 C 3.78125 -2.664062 4.582031 -1.84375 6.1875 -1.84375 C 7.457031 -1.84375 8.515625 -2.441406 9.359375 -3.640625 Z M 9.359375 -3.640625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-24">
+<path style="stroke:none;" d="M 13 -12.859375 L 10.984375 -11.96875 C 10.523438 -13.09375 9.753906 -13.65625 8.671875 -13.65625 C 6.390625 -13.65625 5.25 -11.535156 5.25 -7.296875 L 5.25 0 L 2.921875 0 L 2.921875 -15.484375 L 5.25 -15.484375 L 5.25 -13 C 6.164062 -14.84375 7.425781 -15.765625 9.03125 -15.765625 C 10.894531 -15.765625 12.21875 -14.796875 13 -12.859375 Z M 13 -12.859375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-25">
+<path style="stroke:none;" d="M 12.84375 -2.578125 C 11.65625 -0.679688 9.957031 0.265625 7.75 0.265625 C 6.164062 0.265625 4.863281 -0.296875 3.84375 -1.421875 C 2.820312 -2.554688 2.3125 -4.347656 2.3125 -6.796875 L 2.3125 -8.703125 C 2.3125 -11.140625 2.820312 -12.925781 3.84375 -14.0625 C 4.863281 -15.195312 6.164062 -15.765625 7.75 -15.765625 C 9.957031 -15.765625 11.65625 -14.8125 12.84375 -12.90625 L 10.953125 -11.71875 C 10.179688 -13.007812 9.160156 -13.65625 7.890625 -13.65625 C 6.898438 -13.65625 6.113281 -13.328125 5.53125 -12.671875 C 4.945312 -12.015625 4.65625 -10.691406 4.65625 -8.703125 L 4.65625 -6.796875 C 4.65625 -4.804688 4.945312 -3.484375 5.53125 -2.828125 C 6.113281 -2.171875 6.898438 -1.84375 7.890625 -1.84375 C 9.160156 -1.84375 10.179688 -2.488281 10.953125 -3.78125 Z M 12.84375 -2.578125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-26">
+<path style="stroke:none;" d="M 12.015625 0 L 9.6875 0 L 9.6875 -10.015625 C 9.6875 -11.398438 9.476562 -12.351562 9.0625 -12.875 C 8.644531 -13.394531 8.070312 -13.65625 7.34375 -13.65625 C 5.8125 -13.65625 4.800781 -12.59375 4.3125 -10.46875 L 4.3125 0 L 1.984375 0 L 1.984375 -20.859375 L 4.3125 -20.859375 L 4.3125 -13.625 C 5.300781 -15.050781 6.503906 -15.765625 7.921875 -15.765625 C 9.066406 -15.765625 10.035156 -15.335938 10.828125 -14.484375 C 11.617188 -13.628906 12.015625 -12.179688 12.015625 -10.140625 Z M 12.015625 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-27">
+<path style="stroke:none;" d="M 12.015625 0 L 9.6875 0 L 9.6875 -2.015625 C 8.695312 -0.492188 7.492188 0.265625 6.078125 0.265625 C 4.929688 0.265625 3.960938 -0.164062 3.171875 -1.03125 C 2.378906 -1.90625 1.984375 -3.460938 1.984375 -5.703125 L 1.984375 -15.484375 L 4.3125 -15.484375 L 4.3125 -5.703125 C 4.3125 -4.203125 4.519531 -3.179688 4.9375 -2.640625 C 5.351562 -2.109375 5.925781 -1.84375 6.65625 -1.84375 C 8.1875 -1.84375 9.195312 -2.851562 9.6875 -4.875 L 9.6875 -15.484375 L 12.015625 -15.484375 Z M 12.015625 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-28">
+<path style="stroke:none;" d="M 12.28125 -7.296875 L 1.71875 -7.296875 L 1.71875 -9.28125 L 12.28125 -9.28125 Z M 12.28125 -7.296875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-29">
+<path style="stroke:none;" d="M 12.8125 0 L 10.484375 0 C 10.410156 -1.132812 9.851562 -2.460938 8.8125 -3.984375 L 6.5 -7.28125 L 4.765625 -5.1875 L 4.765625 0 L 2.453125 0 L 2.453125 -20.859375 L 4.765625 -20.859375 L 4.765625 -8.046875 L 8.1875 -12.25 C 9.332031 -13.65625 9.914062 -14.734375 9.9375 -15.484375 L 12.265625 -15.484375 L 12.234375 -14.96875 C 12.191406 -14.15625 11.421875 -12.835938 9.921875 -11.015625 L 8.0625 -8.734375 L 10.46875 -5.25 C 11.9375 -3.125 12.703125 -1.554688 12.765625 -0.546875 Z M 12.8125 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-30">
+<path style="stroke:none;" d="M 4.96875 0 L 2.640625 0 L 2.640625 -20.859375 L 7.1875 -20.859375 C 9.039062 -20.859375 10.441406 -20.351562 11.390625 -19.34375 C 12.335938 -18.34375 12.8125 -17.003906 12.8125 -15.328125 C 12.8125 -13.441406 12.289062 -11.992188 11.25 -10.984375 C 10.207031 -9.972656 8.851562 -9.46875 7.1875 -9.46875 L 4.96875 -9.46875 Z M 4.96875 -11.453125 L 7.171875 -11.453125 C 8.242188 -11.453125 9.050781 -11.722656 9.59375 -12.265625 C 10.132812 -12.804688 10.40625 -13.828125 10.40625 -15.328125 C 10.40625 -16.609375 10.15625 -17.519531 9.65625 -18.0625 C 9.164062 -18.601562 8.335938 -18.875 7.171875 -18.875 L 4.96875 -18.875 Z M 4.96875 -11.453125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-31">
+<path style="stroke:none;" d="M 12.671875 -8.5625 C 12.671875 -5.832031 12.046875 -3.722656 10.796875 -2.234375 C 9.554688 -0.742188 7.875 0 5.75 0 L 2.296875 0 L 2.296875 -20.859375 L 5.75 -20.859375 C 7.875 -20.859375 9.554688 -20.15625 10.796875 -18.75 C 12.046875 -17.351562 12.671875 -15.300781 12.671875 -12.59375 Z M 10.28125 -8.5625 L 10.28125 -12.59375 C 10.28125 -14.71875 9.875 -16.296875 9.0625 -17.328125 C 8.25 -18.359375 7.144531 -18.875 5.75 -18.875 L 4.609375 -18.875 L 4.609375 -1.984375 L 5.75 -1.984375 C 7.144531 -1.984375 8.25 -2.535156 9.0625 -3.640625 C 9.875 -4.742188 10.28125 -6.382812 10.28125 -8.5625 Z M 10.28125 -8.5625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-32">
+<path style="stroke:none;" d="M 12.421875 -6.671875 C 12.421875 -4.203125 11.976562 -2.425781 11.09375 -1.34375 C 10.207031 -0.269531 9.148438 0.265625 7.921875 0.265625 C 6.328125 0.265625 5.128906 -0.441406 4.328125 -1.859375 L 4.328125 0 L 2 0 L 2 -20.859375 L 4.328125 -20.859375 L 4.328125 -13.609375 C 5.128906 -15.046875 6.328125 -15.765625 7.921875 -15.765625 C 9.148438 -15.765625 10.207031 -15.222656 11.09375 -14.140625 C 11.976562 -13.054688 12.421875 -11.28125 12.421875 -8.8125 Z M 10.015625 -6.671875 L 10.015625 -8.8125 C 10.015625 -10.613281 9.785156 -11.867188 9.328125 -12.578125 C 8.878906 -13.296875 8.21875 -13.65625 7.34375 -13.65625 C 5.800781 -13.65625 4.796875 -12.625 4.328125 -10.5625 L 4.328125 -4.921875 C 4.804688 -2.867188 5.8125 -1.84375 7.34375 -1.84375 C 8.21875 -1.84375 8.878906 -2.195312 9.328125 -2.90625 C 9.785156 -3.625 10.015625 -4.878906 10.015625 -6.671875 Z M 10.015625 -6.671875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-33">
+<path style="stroke:none;" d="M 13.046875 -13.625 L 12.234375 -13.625 L 10.984375 -13.75 C 11.671875 -12.90625 12.015625 -12.046875 12.015625 -11.171875 C 12.015625 -9.972656 11.582031 -8.976562 10.71875 -8.1875 C 9.851562 -7.394531 8.671875 -7 7.171875 -7 C 6.609375 -7 6.097656 -7 5.640625 -7 C 4.898438 -6.519531 4.53125 -6.054688 4.53125 -5.609375 C 4.53125 -5.140625 4.675781 -4.789062 4.96875 -4.5625 C 5.257812 -4.34375 5.738281 -4.234375 6.40625 -4.234375 L 7.703125 -4.234375 C 9.628906 -4.234375 11.023438 -3.820312 11.890625 -3 C 12.753906 -2.1875 13.1875 -1.15625 13.1875 0.09375 C 13.1875 1.5 12.632812 2.660156 11.53125 3.578125 C 10.4375 4.492188 8.988281 4.953125 7.1875 4.953125 C 5.3125 4.953125 3.882812 4.523438 2.90625 3.671875 C 1.9375 2.828125 1.453125 1.800781 1.453125 0.59375 C 1.453125 -0.988281 2.25 -2.1875 3.84375 -3 C 2.664062 -3.488281 2.078125 -4.238281 2.078125 -5.25 C 2.078125 -6.269531 2.703125 -7.132812 3.953125 -7.84375 C 2.671875 -8.707031 2.03125 -9.898438 2.03125 -11.421875 C 2.03125 -12.679688 2.484375 -13.707031 3.390625 -14.5 C 4.296875 -15.300781 5.554688 -15.703125 7.171875 -15.703125 C 7.648438 -15.703125 8.804688 -15.628906 10.640625 -15.484375 L 13.046875 -15.484375 Z M 9.78125 -11.4375 C 9.78125 -12.050781 9.550781 -12.59375 9.09375 -13.0625 C 8.632812 -13.53125 7.953125 -13.765625 7.046875 -13.765625 C 6.109375 -13.765625 5.414062 -13.539062 4.96875 -13.09375 C 4.519531 -12.65625 4.296875 -12.085938 4.296875 -11.390625 C 4.296875 -10.585938 4.53125 -9.972656 5 -9.546875 C 5.476562 -9.128906 6.144531 -8.921875 7 -8.921875 C 7.925781 -8.921875 8.617188 -9.140625 9.078125 -9.578125 C 9.546875 -10.015625 9.78125 -10.632812 9.78125 -11.4375 Z M 10.78125 0.09375 C 10.78125 -0.59375 10.554688 -1.132812 10.109375 -1.53125 C 9.660156 -1.9375 8.859375 -2.140625 7.703125 -2.140625 L 6.765625 -2.140625 C 5.710938 -2.140625 4.960938 -1.898438 4.515625 -1.421875 C 4.066406 -0.941406 3.84375 -0.328125 3.84375 0.421875 C 3.84375 1.203125 4.132812 1.800781 4.71875 2.21875 C 5.300781 2.644531 6.070312 2.859375 7.03125 2.859375 C 9.53125 2.859375 10.78125 1.9375 10.78125 0.09375 Z M 10.78125 0.09375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-34">
+<path style="stroke:none;" d="M 4.140625 2.5 L 5.921875 -2.125 L 2.703125 -10.015625 C 1.941406 -11.910156 1.53125 -13.332031 1.46875 -14.28125 L 1.390625 -15.484375 L 3.71875 -15.484375 L 3.78125 -14.375 C 3.820312 -13.738281 4.179688 -12.503906 4.859375 -10.671875 L 7 -4.828125 L 9.140625 -10.59375 C 9.804688 -12.457031 10.164062 -13.71875 10.21875 -14.375 L 10.28125 -15.484375 L 12.609375 -15.484375 L 12.53125 -14.28125 C 12.476562 -13.4375 12.066406 -12.015625 11.296875 -10.015625 L 6.359375 2.796875 C 6.179688 3.234375 6.078125 3.742188 6.046875 4.328125 L 6.015625 4.828125 L 3.6875 4.828125 L 3.71875 4.328125 C 3.757812 3.742188 3.898438 3.132812 4.140625 2.5 Z M 4.140625 2.5 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-35">
+<path style="stroke:none;" d="M 7.265625 -9.515625 L 4.84375 -9.515625 L 4.84375 0 L 2.515625 0 L 2.515625 -20.859375 L 7.0625 -20.859375 C 8.914062 -20.859375 10.289062 -20.359375 11.1875 -19.359375 C 12.09375 -18.359375 12.546875 -17.015625 12.546875 -15.328125 C 12.546875 -13.898438 12.253906 -12.722656 11.671875 -11.796875 C 11.097656 -10.878906 10.382812 -10.257812 9.53125 -9.9375 L 9.78125 -9.5625 C 11.507812 -6.863281 12.460938 -3.988281 12.640625 -0.9375 L 12.703125 0 L 10.375 0 L 10.34375 -0.71875 C 10.164062 -3.632812 9.253906 -6.378906 7.609375 -8.953125 Z M 4.84375 -11.484375 L 7.0625 -11.484375 C 8.132812 -11.484375 8.914062 -11.800781 9.40625 -12.4375 C 9.894531 -13.082031 10.140625 -14.050781 10.140625 -15.34375 C 10.140625 -16.613281 9.894531 -17.519531 9.40625 -18.0625 C 8.914062 -18.601562 8.132812 -18.875 7.0625 -18.875 L 4.84375 -18.875 Z M 4.84375 -11.484375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-36">
+<path style="stroke:none;" d="M 3.8125 -1.15625 L 3.8125 0 L 1.484375 0 L 1.484375 -1.15625 C 1.484375 -3.15625 2.082031 -6.519531 3.28125 -11.25 L 5.6875 -20.859375 L 8.3125 -20.859375 L 10.71875 -11.25 C 11.914062 -6.519531 12.515625 -3.15625 12.515625 -1.15625 L 12.515625 0 L 10.1875 0 L 10.1875 -1.15625 C 10.1875 -2.101562 10.054688 -3.34375 9.796875 -4.875 L 4.203125 -4.875 C 3.941406 -3.34375 3.8125 -2.101562 3.8125 -1.15625 Z M 7 -17.171875 L 5.265625 -9.953125 C 4.992188 -8.835938 4.757812 -7.804688 4.5625 -6.859375 L 9.421875 -6.859375 C 9.234375 -7.804688 9.003906 -8.835938 8.734375 -9.953125 Z M 7 -17.171875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-37">
+<path style="stroke:none;" d="M 12.8125 0 L 3.328125 0 L 3.328125 -20.859375 L 5.640625 -20.859375 L 5.640625 -1.984375 L 12.8125 -1.984375 Z M 12.8125 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-38">
+<path style="stroke:none;" d="M 8.34375 0 L 5.65625 0 L 2.984375 -8.09375 C 2.054688 -10.863281 1.550781 -12.957031 1.46875 -14.375 L 1.390625 -15.484375 L 3.71875 -15.484375 L 3.78125 -14.375 C 3.863281 -13.03125 4.304688 -11.046875 5.109375 -8.421875 L 7 -2.1875 L 8.890625 -8.421875 C 9.691406 -11.046875 10.132812 -13.03125 10.21875 -14.375 L 10.28125 -15.484375 L 12.609375 -15.484375 L 12.53125 -14.375 C 12.445312 -12.957031 11.941406 -10.863281 11.015625 -8.09375 Z M 8.34375 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-39">
+<path style="stroke:none;" d="M 12.609375 -8.21875 L 10.125 -8.21875 L 6.984375 -17.765625 L 3.765625 -8.21875 L 1.375 -8.21875 L 5.765625 -20.859375 L 8.171875 -20.859375 Z M 12.609375 -8.21875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-40">
+<path style="stroke:none;" d="M 14 -13.703125 L 14 -7.125 L 10.296875 -7.125 L 10.296875 4.953125 L 3.703125 4.953125 L 3.703125 -13.703125 Z M 14 -13.703125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-41">
+<path style="stroke:none;" d="M 14.265625 -13.703125 L 14.265625 -7.125 L -0.265625 -7.125 L -0.265625 -13.703125 Z M 14.265625 -13.703125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-42">
+<path style="stroke:none;" d="M 11.453125 0 L 2.53125 0 L 2.53125 -1.984375 L 5.828125 -1.984375 L 5.828125 -18.875 L 3.234375 -18.875 L 3.234375 -20.859375 L 10.75 -20.859375 L 10.75 -18.875 L 8.15625 -18.875 L 8.15625 -1.984375 L 11.453125 -1.984375 Z M 11.453125 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-43">
+<path style="stroke:none;" d="M 12.65625 0 L 10.765625 0 L 10.765625 -11.6875 C 10.765625 -13.007812 10.375 -13.671875 9.59375 -13.671875 C 8.914062 -13.671875 8.363281 -13.160156 7.9375 -12.140625 L 7.9375 0 L 6.0625 0 L 6.0625 -11.6875 C 6.039062 -13.007812 5.726562 -13.671875 5.125 -13.671875 C 4.351562 -13.671875 3.726562 -13.160156 3.25 -12.140625 L 3.25 0 L 1.34375 0 L 1.34375 -15.484375 L 3.25 -15.484375 L 3.25 -13.953125 C 3.945312 -15.160156 4.804688 -15.765625 5.828125 -15.765625 C 6.742188 -15.765625 7.375 -15.160156 7.71875 -13.953125 C 8.375 -15.160156 9.195312 -15.765625 10.1875 -15.765625 C 11.832031 -15.765625 12.65625 -14.441406 12.65625 -11.796875 Z M 12.65625 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-44">
+<path style="stroke:none;" d="M 10.296875 -13.703125 L 10.296875 4.953125 L 3.703125 4.953125 L 3.703125 -7.125 L 0 -7.125 L 0 -13.703125 Z M 10.296875 -13.703125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-45">
+<path style="stroke:none;" d="M 10.296875 4.953125 L 3.703125 4.953125 L 3.703125 -25.765625 L 10.296875 -25.765625 Z M 10.296875 4.953125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-46">
+<path style="stroke:none;" d="M 1.59375 -13.4375 L 1.59375 -15.9375 L 12.9375 -9.765625 L 12.9375 -6.828125 L 1.59375 -0.640625 L 1.59375 -3.140625 L 11.234375 -8.296875 Z M 1.59375 -13.4375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-47">
+<path style="stroke:none;" d="M 12.171875 -6.515625 C 12.171875 -4.359375 11.648438 -2.6875 10.609375 -1.5 C 9.566406 -0.320312 8.300781 0.265625 6.8125 0.265625 C 4.550781 0.265625 2.847656 -0.804688 1.703125 -2.953125 L 3.609375 -4.21875 C 4.253906 -2.632812 5.316406 -1.84375 6.796875 -1.84375 C 7.597656 -1.84375 8.300781 -2.203125 8.90625 -2.921875 C 9.519531 -3.640625 9.828125 -4.835938 9.828125 -6.515625 C 9.828125 -8.203125 9.523438 -9.382812 8.921875 -10.0625 C 8.328125 -10.738281 7.351562 -11.078125 6 -11.078125 L 2.90625 -11.078125 L 2.90625 -20.859375 L 11.734375 -20.859375 L 11.734375 -18.875 L 5.234375 -18.875 L 5.234375 -13.1875 L 6.40625 -13.1875 C 8.132812 -13.1875 9.523438 -12.664062 10.578125 -11.625 C 11.640625 -10.59375 12.171875 -8.890625 12.171875 -6.515625 Z M 12.171875 -6.515625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-48">
+<path style="stroke:none;" d="M 9.6875 -2.421875 C 9.6875 -1.710938 9.429688 -1.085938 8.921875 -0.546875 C 8.410156 -0.00390625 7.765625 0.265625 6.984375 0.265625 C 6.203125 0.265625 5.5625 -0.00390625 5.0625 -0.546875 C 4.570312 -1.085938 4.328125 -1.710938 4.328125 -2.421875 C 4.328125 -3.117188 4.570312 -3.738281 5.0625 -4.28125 C 5.5625 -4.832031 6.203125 -5.109375 6.984375 -5.109375 C 7.765625 -5.109375 8.410156 -4.832031 8.921875 -4.28125 C 9.429688 -3.726562 9.6875 -3.109375 9.6875 -2.421875 Z M 9.6875 -13.078125 C 9.6875 -12.359375 9.429688 -11.726562 8.921875 -11.1875 C 8.410156 -10.65625 7.765625 -10.390625 6.984375 -10.390625 C 6.203125 -10.390625 5.5625 -10.65625 5.0625 -11.1875 C 4.570312 -11.726562 4.328125 -12.359375 4.328125 -13.078125 C 4.328125 -13.785156 4.570312 -14.410156 5.0625 -14.953125 C 5.5625 -15.492188 6.203125 -15.765625 6.984375 -15.765625 C 7.765625 -15.765625 8.410156 -15.488281 8.921875 -14.9375 C 9.429688 -14.394531 9.6875 -13.773438 9.6875 -13.078125 Z M 9.6875 -13.078125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-49">
+<path style="stroke:none;" d="M 3.5625 -7.390625 C 3.90625 -7.390625 4.191406 -7.1875 4.421875 -6.78125 C 4.984375 -5.882812 5.367188 -5.4375 5.578125 -5.4375 C 5.679688 -5.4375 5.75 -5.503906 5.78125 -5.640625 C 6.664062 -8.816406 7.515625 -11.3125 8.328125 -13.125 C 9.148438 -14.945312 9.847656 -16.269531 10.421875 -17.09375 C 11.078125 -18 11.707031 -18.453125 12.3125 -18.453125 C 12.46875 -18.453125 12.597656 -18.410156 12.703125 -18.328125 C 12.816406 -18.253906 12.875 -18.140625 12.875 -17.984375 C 12.875 -17.796875 12.753906 -17.507812 12.515625 -17.125 C 10.628906 -13.84375 8.894531 -9.082031 7.3125 -2.84375 C 7.1875 -2.425781 6.875 -2.117188 6.375 -1.921875 C 5.875 -1.722656 5.484375 -1.625 5.203125 -1.625 C 4.640625 -1.625 3.957031 -2.203125 3.15625 -3.359375 C 2.351562 -4.515625 1.953125 -5.25 1.953125 -5.5625 C 1.953125 -5.957031 2.148438 -6.359375 2.546875 -6.765625 C 2.953125 -7.179688 3.289062 -7.390625 3.5625 -7.390625 Z M 3.5625 -7.390625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-50">
+<path style="stroke:none;" d="M 12.5 -20.859375 L 3.75 1.75 L 1.484375 1.75 L 10.25 -20.859375 Z M 12.5 -20.859375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-51">
+<path style="stroke:none;" d="M 4.90625 -3.234375 C 4.175781 -2.210938 3.773438 -1.132812 3.703125 0 L 1.375 0 L 1.40625 -0.53125 C 1.46875 -1.550781 2.050781 -2.835938 3.15625 -4.390625 L 5.703125 -7.984375 L 3.515625 -11.09375 C 2.410156 -12.632812 1.828125 -13.921875 1.765625 -14.953125 L 1.75 -15.484375 L 4.078125 -15.484375 C 4.140625 -14.359375 4.535156 -13.28125 5.265625 -12.25 L 7 -9.8125 L 8.734375 -12.25 C 9.460938 -13.28125 9.859375 -14.359375 9.921875 -15.484375 L 12.25 -15.484375 L 12.234375 -14.953125 C 12.171875 -13.921875 11.585938 -12.632812 10.484375 -11.09375 L 8.28125 -7.984375 L 10.84375 -4.390625 C 11.945312 -2.835938 12.53125 -1.550781 12.59375 -0.53125 L 12.625 0 L 10.296875 0 C 10.222656 -1.132812 9.820312 -2.210938 9.09375 -3.234375 L 7 -6.171875 Z M 4.90625 -3.234375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-52">
+<path style="stroke:none;" d="M 11.0625 -17.859375 C 10.695312 -18.640625 10.019531 -19.03125 9.03125 -19.03125 C 8.382812 -19.03125 7.878906 -18.820312 7.515625 -18.40625 C 7.160156 -17.988281 6.984375 -17.242188 6.984375 -16.171875 L 6.984375 -14.609375 L 12.203125 -14.609375 L 12.203125 -12.640625 L 6.984375 -12.640625 L 6.984375 0 L 4.65625 0 L 4.65625 -12.640625 L 2.046875 -12.640625 L 2.046875 -14.609375 L 4.65625 -14.609375 L 4.65625 -16.03125 C 4.65625 -17.789062 5.0625 -19.082031 5.875 -19.90625 C 6.695312 -20.738281 7.703125 -21.15625 8.890625 -21.15625 C 10.710938 -21.15625 12.019531 -20.523438 12.8125 -19.265625 Z M 11.0625 -17.859375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-53">
+<path style="stroke:none;" d="M 3.703125 -7.140625 L 3.703125 -25.765625 L 10.296875 -25.765625 L 10.296875 -13.703125 L 14 -13.703125 L 14 -7.125 Z M 3.703125 -7.140625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-54">
+<path style="stroke:none;" d="M 0 -7.125 L 0 -13.703125 L 3.703125 -13.703125 L 3.703125 -25.765625 L 10.296875 -25.765625 L 10.296875 -7.125 Z M 0 -7.125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-55">
+<path style="stroke:none;" d="M -0.265625 -9.421875 L -0.265625 -11.390625 L 14.265625 -11.390625 L 14.265625 -9.421875 Z M -0.265625 -9.421875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-56">
+<path style="stroke:none;" d="M 0 -7.125 L 0 -9.109375 L 14 -9.109375 L 14 -7.125 Z M 0 -11.71875 L 0 -13.703125 L 14 -13.703125 L 14 -11.71875 Z M 0 -11.71875 "/>
+</symbol>
+</g>
+</defs>
+<g id="surface89756">
+<rect x="0" y="0" width="1428" height="682" style="fill:rgb(16.078431%,17.647059%,24.313725%);fill-opacity:1;stroke:none;"/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 434 124 L 448 124 L 448 155 L 434 155 Z M 434 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 0 0 L 854 0 L 854 31 L 0 31 Z M 0 0 "/>
+<g style="fill:rgb(74.901961%,75.294118%,76.862745%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="0" y="26"/>
+ <use xlink:href="#glyph0-2" x="14" y="26"/>
+ <use xlink:href="#glyph0-3" x="28" y="26"/>
+ <use xlink:href="#glyph0-4" x="42" y="26"/>
+ <use xlink:href="#glyph0-5" x="56" y="26"/>
+ <use xlink:href="#glyph0-6" x="70" y="26"/>
+ <use xlink:href="#glyph0-7" x="84" y="26"/>
+ <use xlink:href="#glyph0-1" x="98" y="26"/>
+ <use xlink:href="#glyph0-2" x="112" y="26"/>
+ <use xlink:href="#glyph0-8" x="126" y="26"/>
+ <use xlink:href="#glyph0-9" x="140" y="26"/>
+ <use xlink:href="#glyph0-4" x="154" y="26"/>
+ <use xlink:href="#glyph0-10" x="168" y="26"/>
+ <use xlink:href="#glyph0-7" x="182" y="26"/>
+ <use xlink:href="#glyph0-1" x="196" y="26"/>
+ <use xlink:href="#glyph0-2" x="210" y="26"/>
+ <use xlink:href="#glyph0-11" x="224" y="26"/>
+ <use xlink:href="#glyph0-4" x="238" y="26"/>
+ <use xlink:href="#glyph0-6" x="252" y="26"/>
+ <use xlink:href="#glyph0-12" x="266" y="26"/>
+ <use xlink:href="#glyph0-7" x="280" y="26"/>
+ <use xlink:href="#glyph0-1" x="294" y="26"/>
+ <use xlink:href="#glyph0-2" x="308" y="26"/>
+ <use xlink:href="#glyph0-13" x="322" y="26"/>
+ <use xlink:href="#glyph0-4" x="336" y="26"/>
+ <use xlink:href="#glyph0-14" x="350" y="26"/>
+ <use xlink:href="#glyph0-9" x="364" y="26"/>
+ <use xlink:href="#glyph0-15" x="378" y="26"/>
+ <use xlink:href="#glyph0-12" x="392" y="26"/>
+ <use xlink:href="#glyph0-16" x="406" y="26"/>
+ <use xlink:href="#glyph0-7" x="420" y="26"/>
+ <use xlink:href="#glyph0-1" x="434" y="26"/>
+ <use xlink:href="#glyph0-2" x="448" y="26"/>
+ <use xlink:href="#glyph0-17" x="462" y="26"/>
+ <use xlink:href="#glyph0-6" x="476" y="26"/>
+ <use xlink:href="#glyph0-5" x="490" y="26"/>
+ <use xlink:href="#glyph0-18" x="504" y="26"/>
+ <use xlink:href="#glyph0-7" x="518" y="26"/>
+ <use xlink:href="#glyph0-1" x="532" y="26"/>
+ <use xlink:href="#glyph0-1" x="546" y="26"/>
+ <use xlink:href="#glyph0-1" x="560" y="26"/>
+ <use xlink:href="#glyph0-1" x="574" y="26"/>
+ <use xlink:href="#glyph0-1" x="588" y="26"/>
+ <use xlink:href="#glyph0-1" x="602" y="26"/>
+ <use xlink:href="#glyph0-1" x="616" y="26"/>
+ <use xlink:href="#glyph0-1" x="630" y="26"/>
+ <use xlink:href="#glyph0-1" x="644" y="26"/>
+ <use xlink:href="#glyph0-1" x="658" y="26"/>
+ <use xlink:href="#glyph0-1" x="672" y="26"/>
+ <use xlink:href="#glyph0-1" x="686" y="26"/>
+ <use xlink:href="#glyph0-1" x="700" y="26"/>
+ <use xlink:href="#glyph0-1" x="714" y="26"/>
+ <use xlink:href="#glyph0-1" x="728" y="26"/>
+ <use xlink:href="#glyph0-1" x="742" y="26"/>
+ <use xlink:href="#glyph0-1" x="756" y="26"/>
+ <use xlink:href="#glyph0-1" x="770" y="26"/>
+ <use xlink:href="#glyph0-1" x="784" y="26"/>
+ <use xlink:href="#glyph0-1" x="798" y="26"/>
+ <use xlink:href="#glyph0-1" x="812" y="26"/>
+ <use xlink:href="#glyph0-1" x="826" y="26"/>
+ <use xlink:href="#glyph0-1" x="840" y="26"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 854 0 L 1092 0 L 1092 31 L 854 31 Z M 854 0 "/>
+<g style="fill:rgb(100%,72.156863%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="854" y="26"/>
+ <use xlink:href="#glyph0-15" x="868" y="26"/>
+ <use xlink:href="#glyph0-14" x="882" y="26"/>
+ <use xlink:href="#glyph0-16" x="896" y="26"/>
+ <use xlink:href="#glyph0-15" x="910" y="26"/>
+ <use xlink:href="#glyph0-5" x="924" y="26"/>
+ <use xlink:href="#glyph0-6" x="938" y="26"/>
+ <use xlink:href="#glyph0-1" x="952" y="26"/>
+ <use xlink:href="#glyph0-20" x="966" y="26"/>
+ <use xlink:href="#glyph0-6" x="980" y="26"/>
+ <use xlink:href="#glyph0-16" x="994" y="26"/>
+ <use xlink:href="#glyph0-10" x="1008" y="26"/>
+ <use xlink:href="#glyph0-1" x="1022" y="26"/>
+ <use xlink:href="#glyph0-21" x="1036" y="26"/>
+ <use xlink:href="#glyph0-15" x="1050" y="26"/>
+ <use xlink:href="#glyph0-9" x="1064" y="26"/>
+ <use xlink:href="#glyph0-6" x="1078" y="26"/>
+</g>
+<g style="fill:rgb(100%,72.156863%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="855" y="26"/>
+ <use xlink:href="#glyph0-15" x="869" y="26"/>
+ <use xlink:href="#glyph0-14" x="883" y="26"/>
+ <use xlink:href="#glyph0-16" x="897" y="26"/>
+ <use xlink:href="#glyph0-15" x="911" y="26"/>
+ <use xlink:href="#glyph0-5" x="925" y="26"/>
+ <use xlink:href="#glyph0-6" x="939" y="26"/>
+ <use xlink:href="#glyph0-1" x="953" y="26"/>
+ <use xlink:href="#glyph0-20" x="967" y="26"/>
+ <use xlink:href="#glyph0-6" x="981" y="26"/>
+ <use xlink:href="#glyph0-16" x="995" y="26"/>
+ <use xlink:href="#glyph0-10" x="1009" y="26"/>
+ <use xlink:href="#glyph0-1" x="1023" y="26"/>
+ <use xlink:href="#glyph0-21" x="1037" y="26"/>
+ <use xlink:href="#glyph0-15" x="1051" y="26"/>
+ <use xlink:href="#glyph0-9" x="1065" y="26"/>
+ <use xlink:href="#glyph0-6" x="1079" y="26"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1092 0 L 1120 0 L 1120 31 L 1092 31 Z M 1092 0 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1120 0 L 1134 0 L 1134 31 L 1120 31 Z M 1120 0 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1134 0 L 1302 0 L 1302 31 L 1134 31 Z M 1134 0 "/>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-22" x="1134" y="26"/>
+ <use xlink:href="#glyph0-6" x="1148" y="26"/>
+ <use xlink:href="#glyph0-23" x="1162" y="26"/>
+ <use xlink:href="#glyph0-24" x="1176" y="26"/>
+ <use xlink:href="#glyph0-25" x="1190" y="26"/>
+ <use xlink:href="#glyph0-26" x="1204" y="26"/>
+ <use xlink:href="#glyph0-1" x="1218" y="26"/>
+ <use xlink:href="#glyph0-21" x="1232" y="26"/>
+ <use xlink:href="#glyph0-6" x="1246" y="26"/>
+ <use xlink:href="#glyph0-14" x="1260" y="26"/>
+ <use xlink:href="#glyph0-27" x="1274" y="26"/>
+ <use xlink:href="#glyph0-1" x="1288" y="26"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1302 0 L 1386 0 L 1386 31 L 1302 31 Z M 1302 0 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="1302" y="26"/>
+ <use xlink:href="#glyph0-10" x="1316" y="26"/>
+ <use xlink:href="#glyph0-24" x="1330" y="26"/>
+ <use xlink:href="#glyph0-5" x="1344" y="26"/>
+ <use xlink:href="#glyph0-28" x="1358" y="26"/>
+ <use xlink:href="#glyph0-18" x="1372" y="26"/>
+</g>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="1303" y="26"/>
+ <use xlink:href="#glyph0-10" x="1317" y="26"/>
+ <use xlink:href="#glyph0-24" x="1331" y="26"/>
+ <use xlink:href="#glyph0-5" x="1345" y="26"/>
+ <use xlink:href="#glyph0-28" x="1359" y="26"/>
+ <use xlink:href="#glyph0-18" x="1373" y="26"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1386 0 L 1400 0 L 1400 31 L 1386 31 Z M 1386 0 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1400 0 L 1414 0 L 1414 31 L 1400 31 Z M 1400 0 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 0 L 1428 0 L 1428 31 L 1414 31 Z M 1414 0 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(6.27451%,5.882353%,6.27451%);fill-opacity:1;" d="M 0 31 L 14 31 L 14 62 L 0 62 Z M 0 31 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 14 31 L 112 31 L 112 62 L 14 62 Z M 14 31 "/>
+<g style="fill:rgb(74.901961%,75.294118%,76.862745%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="14" y="57"/>
+ <use xlink:href="#glyph0-19" x="28" y="57"/>
+ <use xlink:href="#glyph0-5" x="42" y="57"/>
+ <use xlink:href="#glyph0-15" x="56" y="57"/>
+ <use xlink:href="#glyph0-25" x="70" y="57"/>
+ <use xlink:href="#glyph0-29" x="84" y="57"/>
+ <use xlink:href="#glyph0-1" x="98" y="57"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(6.27451%,5.882353%,6.27451%);fill-opacity:1;" d="M 112 31 L 126 31 L 126 62 L 112 62 Z M 112 31 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 126 31 L 364 31 L 364 62 L 126 62 Z M 126 31 "/>
+<g style="fill:rgb(74.901961%,75.294118%,76.862745%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="126" y="57"/>
+ <use xlink:href="#glyph0-30" x="140" y="57"/>
+ <use xlink:href="#glyph0-12" x="154" y="57"/>
+ <use xlink:href="#glyph0-19" x="168" y="57"/>
+ <use xlink:href="#glyph0-15" x="182" y="57"/>
+ <use xlink:href="#glyph0-14" x="196" y="57"/>
+ <use xlink:href="#glyph0-16" x="210" y="57"/>
+ <use xlink:href="#glyph0-15" x="224" y="57"/>
+ <use xlink:href="#glyph0-5" x="238" y="57"/>
+ <use xlink:href="#glyph0-6" x="252" y="57"/>
+ <use xlink:href="#glyph0-1" x="266" y="57"/>
+ <use xlink:href="#glyph0-31" x="280" y="57"/>
+ <use xlink:href="#glyph0-6" x="294" y="57"/>
+ <use xlink:href="#glyph0-32" x="308" y="57"/>
+ <use xlink:href="#glyph0-27" x="322" y="57"/>
+ <use xlink:href="#glyph0-33" x="336" y="57"/>
+ <use xlink:href="#glyph0-1" x="350" y="57"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(6.27451%,5.882353%,6.27451%);fill-opacity:1;" d="M 364 31 L 378 31 L 378 62 L 364 62 Z M 364 31 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 378 31 L 560 31 L 560 62 L 378 62 Z M 378 31 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="378" y="57"/>
+ <use xlink:href="#glyph0-30" x="392" y="57"/>
+ <use xlink:href="#glyph0-34" x="406" y="57"/>
+ <use xlink:href="#glyph0-10" x="420" y="57"/>
+ <use xlink:href="#glyph0-26" x="434" y="57"/>
+ <use xlink:href="#glyph0-15" x="448" y="57"/>
+ <use xlink:href="#glyph0-14" x="462" y="57"/>
+ <use xlink:href="#glyph0-1" x="476" y="57"/>
+ <use xlink:href="#glyph0-35" x="490" y="57"/>
+ <use xlink:href="#glyph0-6" x="504" y="57"/>
+ <use xlink:href="#glyph0-18" x="518" y="57"/>
+ <use xlink:href="#glyph0-5" x="532" y="57"/>
+ <use xlink:href="#glyph0-1" x="546" y="57"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(6.27451%,5.882353%,6.27451%);fill-opacity:1;" d="M 560 31 L 574 31 L 574 62 L 560 62 Z M 560 31 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 574 31 L 714 31 L 714 62 L 574 62 Z M 574 31 "/>
+<g style="fill:rgb(74.901961%,75.294118%,76.862745%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="574" y="57"/>
+ <use xlink:href="#glyph0-36" x="588" y="57"/>
+ <use xlink:href="#glyph0-5" x="602" y="57"/>
+ <use xlink:href="#glyph0-5" x="616" y="57"/>
+ <use xlink:href="#glyph0-1" x="630" y="57"/>
+ <use xlink:href="#glyph0-37" x="644" y="57"/>
+ <use xlink:href="#glyph0-15" x="658" y="57"/>
+ <use xlink:href="#glyph0-33" x="672" y="57"/>
+ <use xlink:href="#glyph0-16" x="686" y="57"/>
+ <use xlink:href="#glyph0-1" x="700" y="57"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(6.27451%,5.882353%,6.27451%);fill-opacity:1;" d="M 714 31 L 728 31 L 728 62 L 714 62 Z M 714 31 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 728 31 L 980 31 L 980 62 L 728 62 Z M 728 31 "/>
+<g style="fill:rgb(74.901961%,75.294118%,76.862745%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="728" y="57"/>
+ <use xlink:href="#glyph0-3" x="742" y="57"/>
+ <use xlink:href="#glyph0-23" x="756" y="57"/>
+ <use xlink:href="#glyph0-29" x="770" y="57"/>
+ <use xlink:href="#glyph0-6" x="784" y="57"/>
+ <use xlink:href="#glyph0-1" x="798" y="57"/>
+ <use xlink:href="#glyph0-31" x="812" y="57"/>
+ <use xlink:href="#glyph0-6" x="826" y="57"/>
+ <use xlink:href="#glyph0-38" x="840" y="57"/>
+ <use xlink:href="#glyph0-4" x="854" y="57"/>
+ <use xlink:href="#glyph0-25" x="868" y="57"/>
+ <use xlink:href="#glyph0-6" x="882" y="57"/>
+ <use xlink:href="#glyph0-1" x="896" y="57"/>
+ <use xlink:href="#glyph0-37" x="910" y="57"/>
+ <use xlink:href="#glyph0-15" x="924" y="57"/>
+ <use xlink:href="#glyph0-33" x="938" y="57"/>
+ <use xlink:href="#glyph0-16" x="952" y="57"/>
+ <use xlink:href="#glyph0-1" x="966" y="57"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(6.27451%,5.882353%,6.27451%);fill-opacity:1;" d="M 980 31 L 994 31 L 994 62 L 980 62 Z M 980 31 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 994 31 L 1162 31 L 1162 62 L 994 62 Z M 994 31 "/>
+<g style="fill:rgb(74.901961%,75.294118%,76.862745%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="994" y="57"/>
+ <use xlink:href="#glyph0-19" x="1008" y="57"/>
+ <use xlink:href="#glyph0-23" x="1022" y="57"/>
+ <use xlink:href="#glyph0-5" x="1036" y="57"/>
+ <use xlink:href="#glyph0-25" x="1050" y="57"/>
+ <use xlink:href="#glyph0-27" x="1064" y="57"/>
+ <use xlink:href="#glyph0-5" x="1078" y="57"/>
+ <use xlink:href="#glyph0-23" x="1092" y="57"/>
+ <use xlink:href="#glyph0-10" x="1106" y="57"/>
+ <use xlink:href="#glyph0-15" x="1120" y="57"/>
+ <use xlink:href="#glyph0-24" x="1134" y="57"/>
+ <use xlink:href="#glyph0-1" x="1148" y="57"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(6.27451%,5.882353%,6.27451%);fill-opacity:1;" d="M 1162 31 L 1414 31 L 1414 62 L 1162 62 Z M 1162 31 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 31 L 1428 31 L 1428 62 L 1414 62 Z M 1414 31 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 62 L 1400 62 L 1400 93 L 0 93 Z M 0 62 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(6.27451%,5.882353%,6.27451%);fill-opacity:1;" d="M 1400 62 L 1414 62 L 1414 93 L 1400 93 Z M 1400 62 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-39" x="1400" y="88"/>
+</g>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-39" x="1401" y="88"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 62 L 1428 62 L 1428 93 L 1414 93 Z M 1414 62 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 93 L 140 93 L 140 124 L 0 124 Z M 0 93 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 93 L 1260 93 L 1260 124 L 140 124 Z M 140 93 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-40" x="140" y="119"/>
+ <use xlink:href="#glyph0-41" x="154" y="119"/>
+ <use xlink:href="#glyph0-41" x="168" y="119"/>
+ <use xlink:href="#glyph0-1" x="182" y="119"/>
+ <use xlink:href="#glyph0-21" x="196" y="119"/>
+ <use xlink:href="#glyph0-6" x="210" y="119"/>
+ <use xlink:href="#glyph0-14" x="224" y="119"/>
+ <use xlink:href="#glyph0-27" x="238" y="119"/>
+ <use xlink:href="#glyph0-1" x="252" y="119"/>
+ <use xlink:href="#glyph0-42" x="266" y="119"/>
+ <use xlink:href="#glyph0-10" x="280" y="119"/>
+ <use xlink:href="#glyph0-6" x="294" y="119"/>
+ <use xlink:href="#glyph0-43" x="308" y="119"/>
+ <use xlink:href="#glyph0-16" x="322" y="119"/>
+ <use xlink:href="#glyph0-1" x="336" y="119"/>
+ <use xlink:href="#glyph0-41" x="350" y="119"/>
+ <use xlink:href="#glyph0-41" x="364" y="119"/>
+ <use xlink:href="#glyph0-41" x="378" y="119"/>
+ <use xlink:href="#glyph0-41" x="392" y="119"/>
+ <use xlink:href="#glyph0-41" x="406" y="119"/>
+ <use xlink:href="#glyph0-41" x="420" y="119"/>
+ <use xlink:href="#glyph0-41" x="434" y="119"/>
+ <use xlink:href="#glyph0-41" x="448" y="119"/>
+ <use xlink:href="#glyph0-41" x="462" y="119"/>
+ <use xlink:href="#glyph0-41" x="476" y="119"/>
+ <use xlink:href="#glyph0-41" x="490" y="119"/>
+ <use xlink:href="#glyph0-41" x="504" y="119"/>
+ <use xlink:href="#glyph0-41" x="518" y="119"/>
+ <use xlink:href="#glyph0-41" x="532" y="119"/>
+ <use xlink:href="#glyph0-41" x="546" y="119"/>
+ <use xlink:href="#glyph0-41" x="560" y="119"/>
+ <use xlink:href="#glyph0-41" x="574" y="119"/>
+ <use xlink:href="#glyph0-41" x="588" y="119"/>
+ <use xlink:href="#glyph0-41" x="602" y="119"/>
+ <use xlink:href="#glyph0-41" x="616" y="119"/>
+ <use xlink:href="#glyph0-41" x="630" y="119"/>
+ <use xlink:href="#glyph0-41" x="644" y="119"/>
+ <use xlink:href="#glyph0-41" x="658" y="119"/>
+ <use xlink:href="#glyph0-41" x="672" y="119"/>
+ <use xlink:href="#glyph0-41" x="686" y="119"/>
+ <use xlink:href="#glyph0-41" x="700" y="119"/>
+ <use xlink:href="#glyph0-41" x="714" y="119"/>
+ <use xlink:href="#glyph0-41" x="728" y="119"/>
+ <use xlink:href="#glyph0-41" x="742" y="119"/>
+ <use xlink:href="#glyph0-41" x="756" y="119"/>
+ <use xlink:href="#glyph0-41" x="770" y="119"/>
+ <use xlink:href="#glyph0-41" x="784" y="119"/>
+ <use xlink:href="#glyph0-41" x="798" y="119"/>
+ <use xlink:href="#glyph0-41" x="812" y="119"/>
+ <use xlink:href="#glyph0-41" x="826" y="119"/>
+ <use xlink:href="#glyph0-41" x="840" y="119"/>
+ <use xlink:href="#glyph0-41" x="854" y="119"/>
+ <use xlink:href="#glyph0-41" x="868" y="119"/>
+ <use xlink:href="#glyph0-41" x="882" y="119"/>
+ <use xlink:href="#glyph0-41" x="896" y="119"/>
+ <use xlink:href="#glyph0-41" x="910" y="119"/>
+ <use xlink:href="#glyph0-41" x="924" y="119"/>
+ <use xlink:href="#glyph0-41" x="938" y="119"/>
+ <use xlink:href="#glyph0-41" x="952" y="119"/>
+ <use xlink:href="#glyph0-41" x="966" y="119"/>
+ <use xlink:href="#glyph0-41" x="980" y="119"/>
+ <use xlink:href="#glyph0-41" x="994" y="119"/>
+ <use xlink:href="#glyph0-41" x="1008" y="119"/>
+ <use xlink:href="#glyph0-41" x="1022" y="119"/>
+ <use xlink:href="#glyph0-41" x="1036" y="119"/>
+ <use xlink:href="#glyph0-41" x="1050" y="119"/>
+ <use xlink:href="#glyph0-41" x="1064" y="119"/>
+ <use xlink:href="#glyph0-41" x="1078" y="119"/>
+ <use xlink:href="#glyph0-41" x="1092" y="119"/>
+ <use xlink:href="#glyph0-41" x="1106" y="119"/>
+ <use xlink:href="#glyph0-41" x="1120" y="119"/>
+ <use xlink:href="#glyph0-41" x="1134" y="119"/>
+ <use xlink:href="#glyph0-41" x="1148" y="119"/>
+ <use xlink:href="#glyph0-41" x="1162" y="119"/>
+ <use xlink:href="#glyph0-41" x="1176" y="119"/>
+ <use xlink:href="#glyph0-41" x="1190" y="119"/>
+ <use xlink:href="#glyph0-41" x="1204" y="119"/>
+ <use xlink:href="#glyph0-41" x="1218" y="119"/>
+ <use xlink:href="#glyph0-41" x="1232" y="119"/>
+ <use xlink:href="#glyph0-44" x="1246" y="119"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 93 L 1400 93 L 1400 124 L 1260 124 Z M 1260 93 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;" d="M 1400 93 L 1414 93 L 1414 124 L 1400 124 Z M 1400 93 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 93 L 1428 93 L 1428 124 L 1414 124 Z M 1414 93 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 124 L 140 124 L 140 155 L 0 155 Z M 0 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 124 L 196 124 L 196 155 L 140 155 Z M 140 124 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-45" x="140" y="150"/>
+ <use xlink:href="#glyph0-1" x="154" y="150"/>
+ <use xlink:href="#glyph0-46" x="168" y="150"/>
+ <use xlink:href="#glyph0-1" x="182" y="150"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 196 124 L 854 124 L 854 155 L 196 155 Z M 196 124 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-9" x="196" y="150"/>
+ <use xlink:href="#glyph0-6" x="210" y="150"/>
+ <use xlink:href="#glyph0-38" x="224" y="150"/>
+ <use xlink:href="#glyph0-4" x="238" y="150"/>
+ <use xlink:href="#glyph0-25" x="252" y="150"/>
+ <use xlink:href="#glyph0-6" x="266" y="150"/>
+ <use xlink:href="#glyph0-1" x="280" y="150"/>
+ <use xlink:href="#glyph0-5" x="294" y="150"/>
+ <use xlink:href="#glyph0-15" x="308" y="150"/>
+ <use xlink:href="#glyph0-33" x="322" y="150"/>
+ <use xlink:href="#glyph0-1" x="336" y="150"/>
+ <use xlink:href="#glyph0-12" x="350" y="150"/>
+ <use xlink:href="#glyph0-4" x="364" y="150"/>
+ <use xlink:href="#glyph0-14" x="378" y="150"/>
+ <use xlink:href="#glyph0-9" x="392" y="150"/>
+ <use xlink:href="#glyph0-15" x="406" y="150"/>
+ <use xlink:href="#glyph0-12" x="420" y="150"/>
+ <use xlink:href="#glyph0-1" x="434" y="150"/>
+ <use xlink:href="#glyph0-1" x="448" y="150"/>
+ <use xlink:href="#glyph0-1" x="462" y="150"/>
+ <use xlink:href="#glyph0-1" x="476" y="150"/>
+ <use xlink:href="#glyph0-1" x="490" y="150"/>
+ <use xlink:href="#glyph0-1" x="504" y="150"/>
+ <use xlink:href="#glyph0-1" x="518" y="150"/>
+ <use xlink:href="#glyph0-1" x="532" y="150"/>
+ <use xlink:href="#glyph0-1" x="546" y="150"/>
+ <use xlink:href="#glyph0-1" x="560" y="150"/>
+ <use xlink:href="#glyph0-1" x="574" y="150"/>
+ <use xlink:href="#glyph0-1" x="588" y="150"/>
+ <use xlink:href="#glyph0-1" x="602" y="150"/>
+ <use xlink:href="#glyph0-1" x="616" y="150"/>
+ <use xlink:href="#glyph0-1" x="630" y="150"/>
+ <use xlink:href="#glyph0-1" x="644" y="150"/>
+ <use xlink:href="#glyph0-1" x="658" y="150"/>
+ <use xlink:href="#glyph0-1" x="672" y="150"/>
+ <use xlink:href="#glyph0-1" x="686" y="150"/>
+ <use xlink:href="#glyph0-1" x="700" y="150"/>
+ <use xlink:href="#glyph0-1" x="714" y="150"/>
+ <use xlink:href="#glyph0-1" x="728" y="150"/>
+ <use xlink:href="#glyph0-1" x="742" y="150"/>
+ <use xlink:href="#glyph0-1" x="756" y="150"/>
+ <use xlink:href="#glyph0-1" x="770" y="150"/>
+ <use xlink:href="#glyph0-1" x="784" y="150"/>
+ <use xlink:href="#glyph0-1" x="798" y="150"/>
+ <use xlink:href="#glyph0-1" x="812" y="150"/>
+ <use xlink:href="#glyph0-1" x="826" y="150"/>
+ <use xlink:href="#glyph0-1" x="840" y="150"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 854 124 L 868 124 L 868 155 L 854 155 Z M 854 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 868 124 L 966 124 L 966 155 L 868 155 Z M 868 124 "/>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="868" y="150"/>
+ <use xlink:href="#glyph0-23" x="882" y="150"/>
+ <use xlink:href="#glyph0-14" x="896" y="150"/>
+ <use xlink:href="#glyph0-25" x="910" y="150"/>
+ <use xlink:href="#glyph0-6" x="924" y="150"/>
+ <use xlink:href="#glyph0-5" x="938" y="150"/>
+ <use xlink:href="#glyph0-1" x="952" y="150"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 966 124 L 1050 124 L 1050 155 L 966 155 Z M 966 124 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="966" y="150"/>
+ <use xlink:href="#glyph0-10" x="980" y="150"/>
+ <use xlink:href="#glyph0-24" x="994" y="150"/>
+ <use xlink:href="#glyph0-5" x="1008" y="150"/>
+ <use xlink:href="#glyph0-28" x="1022" y="150"/>
+ <use xlink:href="#glyph0-25" x="1036" y="150"/>
+</g>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="967" y="150"/>
+ <use xlink:href="#glyph0-10" x="981" y="150"/>
+ <use xlink:href="#glyph0-24" x="995" y="150"/>
+ <use xlink:href="#glyph0-5" x="1009" y="150"/>
+ <use xlink:href="#glyph0-28" x="1023" y="150"/>
+ <use xlink:href="#glyph0-25" x="1037" y="150"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1050 124 L 1064 124 L 1064 155 L 1050 155 Z M 1050 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1064 124 L 1078 124 L 1078 155 L 1064 155 Z M 1064 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1078 124 L 1092 124 L 1092 155 L 1078 155 Z M 1078 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1092 124 L 1148 124 L 1148 155 L 1092 155 Z M 1092 124 "/>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-35" x="1092" y="150"/>
+ <use xlink:href="#glyph0-27" x="1106" y="150"/>
+ <use xlink:href="#glyph0-14" x="1120" y="150"/>
+ <use xlink:href="#glyph0-1" x="1134" y="150"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1148 124 L 1218 124 L 1218 155 L 1148 155 Z M 1148 124 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-8" x="1148" y="150"/>
+ <use xlink:href="#glyph0-14" x="1162" y="150"/>
+ <use xlink:href="#glyph0-10" x="1176" y="150"/>
+ <use xlink:href="#glyph0-6" x="1190" y="150"/>
+ <use xlink:href="#glyph0-24" x="1204" y="150"/>
+</g>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-8" x="1149" y="150"/>
+ <use xlink:href="#glyph0-14" x="1163" y="150"/>
+ <use xlink:href="#glyph0-10" x="1177" y="150"/>
+ <use xlink:href="#glyph0-6" x="1191" y="150"/>
+ <use xlink:href="#glyph0-24" x="1205" y="150"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1218 124 L 1232 124 L 1232 155 L 1218 155 Z M 1218 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 124 L 1260 124 L 1260 155 L 1232 155 Z M 1232 124 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="150"/>
+ <use xlink:href="#glyph0-45" x="1246" y="150"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 124 L 1400 124 L 1400 155 L 1260 155 Z M 1260 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;" d="M 1400 124 L 1414 124 L 1414 155 L 1400 155 Z M 1400 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 124 L 1428 124 L 1428 155 L 1414 155 Z M 1414 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 155 L 140 155 L 140 186 L 0 186 Z M 0 155 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 155 L 168 155 L 168 186 L 140 186 Z M 140 155 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-45" x="140" y="181"/>
+ <use xlink:href="#glyph0-1" x="154" y="181"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(30.588235%,30.588235%,30.588235%);fill-opacity:1;" d="M 168 155 L 182 155 L 182 186 L 168 186 Z M 168 155 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-2" x="168" y="181"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(30.588235%,30.588235%,30.588235%);fill-opacity:1;" d="M 182 155 L 266 155 L 266 186 L 182 186 Z M 182 155 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="182" y="181"/>
+ <use xlink:href="#glyph0-4" x="196" y="181"/>
+ <use xlink:href="#glyph0-14" x="210" y="181"/>
+ <use xlink:href="#glyph0-9" x="224" y="181"/>
+ <use xlink:href="#glyph0-15" x="238" y="181"/>
+ <use xlink:href="#glyph0-12" x="252" y="181"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(30.588235%,30.588235%,30.588235%);fill-opacity:1;" d="M 266 155 L 448 155 L 448 186 L 266 186 Z M 266 155 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="266" y="181"/>
+ <use xlink:href="#glyph0-7" x="280" y="181"/>
+ <use xlink:href="#glyph0-1" x="294" y="181"/>
+ <use xlink:href="#glyph0-46" x="308" y="181"/>
+ <use xlink:href="#glyph0-1" x="322" y="181"/>
+ <use xlink:href="#glyph0-47" x="336" y="181"/>
+ <use xlink:href="#glyph0-48" x="350" y="181"/>
+ <use xlink:href="#glyph0-1" x="364" y="181"/>
+ <use xlink:href="#glyph0-3" x="378" y="181"/>
+ <use xlink:href="#glyph0-23" x="392" y="181"/>
+ <use xlink:href="#glyph0-29" x="406" y="181"/>
+ <use xlink:href="#glyph0-6" x="420" y="181"/>
+ <use xlink:href="#glyph0-1" x="434" y="181"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(30.588235%,30.588235%,30.588235%);fill-opacity:1;" d="M 448 155 L 532 155 L 532 186 L 448 186 Z M 448 155 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-31" x="448" y="181"/>
+ <use xlink:href="#glyph0-6" x="462" y="181"/>
+ <use xlink:href="#glyph0-38" x="476" y="181"/>
+ <use xlink:href="#glyph0-4" x="490" y="181"/>
+ <use xlink:href="#glyph0-25" x="504" y="181"/>
+ <use xlink:href="#glyph0-6" x="518" y="181"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(30.588235%,30.588235%,30.588235%);fill-opacity:1;" d="M 532 155 L 546 155 L 546 186 L 532 186 Z M 532 155 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(30.588235%,30.588235%,30.588235%);fill-opacity:1;" d="M 546 155 L 588 155 L 588 186 L 546 186 Z M 546 155 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-37" x="546" y="181"/>
+ <use xlink:href="#glyph0-15" x="560" y="181"/>
+ <use xlink:href="#glyph0-33" x="574" y="181"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(30.588235%,30.588235%,30.588235%);fill-opacity:1;" d="M 588 155 L 840 155 L 840 186 L 588 186 Z M 588 155 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="588" y="181"/>
+ <use xlink:href="#glyph0-1" x="602" y="181"/>
+ <use xlink:href="#glyph0-46" x="616" y="181"/>
+ <use xlink:href="#glyph0-1" x="630" y="181"/>
+ <use xlink:href="#glyph0-2" x="644" y="181"/>
+ <use xlink:href="#glyph0-49" x="658" y="181"/>
+ <use xlink:href="#glyph0-7" x="672" y="181"/>
+ <use xlink:href="#glyph0-1" x="686" y="181"/>
+ <use xlink:href="#glyph0-22" x="700" y="181"/>
+ <use xlink:href="#glyph0-26" x="714" y="181"/>
+ <use xlink:href="#glyph0-15" x="728" y="181"/>
+ <use xlink:href="#glyph0-12" x="742" y="181"/>
+ <use xlink:href="#glyph0-50" x="756" y="181"/>
+ <use xlink:href="#glyph0-17" x="770" y="181"/>
+ <use xlink:href="#glyph0-4" x="784" y="181"/>
+ <use xlink:href="#glyph0-9" x="798" y="181"/>
+ <use xlink:href="#glyph0-6" x="812" y="181"/>
+ <use xlink:href="#glyph0-1" x="826" y="181"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(30.588235%,30.588235%,30.588235%);fill-opacity:1;" d="M 840 155 L 924 155 L 924 186 L 840 186 Z M 840 155 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="840" y="181"/>
+ <use xlink:href="#glyph0-4" x="854" y="181"/>
+ <use xlink:href="#glyph0-14" x="868" y="181"/>
+ <use xlink:href="#glyph0-9" x="882" y="181"/>
+ <use xlink:href="#glyph0-15" x="896" y="181"/>
+ <use xlink:href="#glyph0-12" x="910" y="181"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(30.588235%,30.588235%,30.588235%);fill-opacity:1;" d="M 924 155 L 1232 155 L 1232 186 L 924 186 Z M 924 155 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 155 L 1260 155 L 1260 186 L 1232 186 Z M 1232 155 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="181"/>
+ <use xlink:href="#glyph0-45" x="1246" y="181"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 155 L 1400 155 L 1400 186 L 1260 186 Z M 1260 155 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;" d="M 1400 155 L 1414 155 L 1414 186 L 1400 186 Z M 1400 155 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 155 L 1428 155 L 1428 186 L 1414 186 Z M 1414 155 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 186 L 140 186 L 140 217 L 0 217 Z M 0 186 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 186 L 168 186 L 168 217 L 140 217 Z M 140 186 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-45" x="140" y="212"/>
+ <use xlink:href="#glyph0-1" x="154" y="212"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 168 186 L 182 186 L 182 217 L 168 217 Z M 168 186 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-2" x="168" y="212"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 182 186 L 266 186 L 266 217 L 182 217 Z M 182 186 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="182" y="212"/>
+ <use xlink:href="#glyph0-4" x="196" y="212"/>
+ <use xlink:href="#glyph0-14" x="210" y="212"/>
+ <use xlink:href="#glyph0-9" x="224" y="212"/>
+ <use xlink:href="#glyph0-15" x="238" y="212"/>
+ <use xlink:href="#glyph0-12" x="252" y="212"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 266 186 L 448 186 L 448 217 L 266 217 Z M 266 186 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="266" y="212"/>
+ <use xlink:href="#glyph0-7" x="280" y="212"/>
+ <use xlink:href="#glyph0-1" x="294" y="212"/>
+ <use xlink:href="#glyph0-46" x="308" y="212"/>
+ <use xlink:href="#glyph0-1" x="322" y="212"/>
+ <use xlink:href="#glyph0-47" x="336" y="212"/>
+ <use xlink:href="#glyph0-48" x="350" y="212"/>
+ <use xlink:href="#glyph0-1" x="364" y="212"/>
+ <use xlink:href="#glyph0-3" x="378" y="212"/>
+ <use xlink:href="#glyph0-23" x="392" y="212"/>
+ <use xlink:href="#glyph0-29" x="406" y="212"/>
+ <use xlink:href="#glyph0-6" x="420" y="212"/>
+ <use xlink:href="#glyph0-1" x="434" y="212"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 448 186 L 532 186 L 532 217 L 448 217 Z M 448 186 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-31" x="448" y="212"/>
+ <use xlink:href="#glyph0-6" x="462" y="212"/>
+ <use xlink:href="#glyph0-38" x="476" y="212"/>
+ <use xlink:href="#glyph0-4" x="490" y="212"/>
+ <use xlink:href="#glyph0-25" x="504" y="212"/>
+ <use xlink:href="#glyph0-6" x="518" y="212"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 532 186 L 546 186 L 546 217 L 532 217 Z M 532 186 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 546 186 L 588 186 L 588 217 L 546 217 Z M 546 186 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-37" x="546" y="212"/>
+ <use xlink:href="#glyph0-15" x="560" y="212"/>
+ <use xlink:href="#glyph0-33" x="574" y="212"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 588 186 L 1232 186 L 1232 217 L 588 217 Z M 588 186 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="588" y="212"/>
+ <use xlink:href="#glyph0-1" x="602" y="212"/>
+ <use xlink:href="#glyph0-46" x="616" y="212"/>
+ <use xlink:href="#glyph0-1" x="630" y="212"/>
+ <use xlink:href="#glyph0-22" x="644" y="212"/>
+ <use xlink:href="#glyph0-23" x="658" y="212"/>
+ <use xlink:href="#glyph0-38" x="672" y="212"/>
+ <use xlink:href="#glyph0-6" x="686" y="212"/>
+ <use xlink:href="#glyph0-50" x="700" y="212"/>
+ <use xlink:href="#glyph0-8" x="714" y="212"/>
+ <use xlink:href="#glyph0-51" x="728" y="212"/>
+ <use xlink:href="#glyph0-18" x="742" y="212"/>
+ <use xlink:href="#glyph0-15" x="756" y="212"/>
+ <use xlink:href="#glyph0-24" x="770" y="212"/>
+ <use xlink:href="#glyph0-10" x="784" y="212"/>
+ <use xlink:href="#glyph0-1" x="798" y="212"/>
+ <use xlink:href="#glyph0-23" x="812" y="212"/>
+ <use xlink:href="#glyph0-1" x="826" y="212"/>
+ <use xlink:href="#glyph0-25" x="840" y="212"/>
+ <use xlink:href="#glyph0-15" x="854" y="212"/>
+ <use xlink:href="#glyph0-18" x="868" y="212"/>
+ <use xlink:href="#glyph0-34" x="882" y="212"/>
+ <use xlink:href="#glyph0-1" x="896" y="212"/>
+ <use xlink:href="#glyph0-1" x="910" y="212"/>
+ <use xlink:href="#glyph0-1" x="924" y="212"/>
+ <use xlink:href="#glyph0-1" x="938" y="212"/>
+ <use xlink:href="#glyph0-1" x="952" y="212"/>
+ <use xlink:href="#glyph0-1" x="966" y="212"/>
+ <use xlink:href="#glyph0-1" x="980" y="212"/>
+ <use xlink:href="#glyph0-1" x="994" y="212"/>
+ <use xlink:href="#glyph0-1" x="1008" y="212"/>
+ <use xlink:href="#glyph0-1" x="1022" y="212"/>
+ <use xlink:href="#glyph0-1" x="1036" y="212"/>
+ <use xlink:href="#glyph0-1" x="1050" y="212"/>
+ <use xlink:href="#glyph0-1" x="1064" y="212"/>
+ <use xlink:href="#glyph0-1" x="1078" y="212"/>
+ <use xlink:href="#glyph0-1" x="1092" y="212"/>
+ <use xlink:href="#glyph0-1" x="1106" y="212"/>
+ <use xlink:href="#glyph0-1" x="1120" y="212"/>
+ <use xlink:href="#glyph0-1" x="1134" y="212"/>
+ <use xlink:href="#glyph0-1" x="1148" y="212"/>
+ <use xlink:href="#glyph0-1" x="1162" y="212"/>
+ <use xlink:href="#glyph0-1" x="1176" y="212"/>
+ <use xlink:href="#glyph0-1" x="1190" y="212"/>
+ <use xlink:href="#glyph0-1" x="1204" y="212"/>
+ <use xlink:href="#glyph0-1" x="1218" y="212"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 186 L 1260 186 L 1260 217 L 1232 217 Z M 1232 186 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="212"/>
+ <use xlink:href="#glyph0-45" x="1246" y="212"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 186 L 1400 186 L 1400 217 L 1260 217 Z M 1260 186 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;" d="M 1400 186 L 1414 186 L 1414 217 L 1400 217 Z M 1400 186 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 186 L 1428 186 L 1428 217 L 1414 217 Z M 1414 186 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 217 L 140 217 L 140 248 L 0 248 Z M 0 217 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 217 L 168 217 L 168 248 L 140 248 Z M 140 217 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-45" x="140" y="243"/>
+ <use xlink:href="#glyph0-1" x="154" y="243"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 168 217 L 182 217 L 182 248 L 168 248 Z M 168 217 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-2" x="168" y="243"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 182 217 L 266 217 L 266 248 L 182 248 Z M 182 217 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="182" y="243"/>
+ <use xlink:href="#glyph0-4" x="196" y="243"/>
+ <use xlink:href="#glyph0-14" x="210" y="243"/>
+ <use xlink:href="#glyph0-9" x="224" y="243"/>
+ <use xlink:href="#glyph0-15" x="238" y="243"/>
+ <use xlink:href="#glyph0-12" x="252" y="243"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 266 217 L 448 217 L 448 248 L 266 248 Z M 266 217 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="266" y="243"/>
+ <use xlink:href="#glyph0-7" x="280" y="243"/>
+ <use xlink:href="#glyph0-1" x="294" y="243"/>
+ <use xlink:href="#glyph0-46" x="308" y="243"/>
+ <use xlink:href="#glyph0-1" x="322" y="243"/>
+ <use xlink:href="#glyph0-47" x="336" y="243"/>
+ <use xlink:href="#glyph0-48" x="350" y="243"/>
+ <use xlink:href="#glyph0-1" x="364" y="243"/>
+ <use xlink:href="#glyph0-3" x="378" y="243"/>
+ <use xlink:href="#glyph0-23" x="392" y="243"/>
+ <use xlink:href="#glyph0-29" x="406" y="243"/>
+ <use xlink:href="#glyph0-6" x="420" y="243"/>
+ <use xlink:href="#glyph0-1" x="434" y="243"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 448 217 L 532 217 L 532 248 L 448 248 Z M 448 217 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-31" x="448" y="243"/>
+ <use xlink:href="#glyph0-6" x="462" y="243"/>
+ <use xlink:href="#glyph0-38" x="476" y="243"/>
+ <use xlink:href="#glyph0-4" x="490" y="243"/>
+ <use xlink:href="#glyph0-25" x="504" y="243"/>
+ <use xlink:href="#glyph0-6" x="518" y="243"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 532 217 L 546 217 L 546 248 L 532 248 Z M 532 217 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 546 217 L 588 217 L 588 248 L 546 248 Z M 546 217 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-37" x="546" y="243"/>
+ <use xlink:href="#glyph0-15" x="560" y="243"/>
+ <use xlink:href="#glyph0-33" x="574" y="243"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 588 217 L 1232 217 L 1232 248 L 588 248 Z M 588 217 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="588" y="243"/>
+ <use xlink:href="#glyph0-1" x="602" y="243"/>
+ <use xlink:href="#glyph0-46" x="616" y="243"/>
+ <use xlink:href="#glyph0-1" x="630" y="243"/>
+ <use xlink:href="#glyph0-2" x="644" y="243"/>
+ <use xlink:href="#glyph0-1" x="658" y="243"/>
+ <use xlink:href="#glyph0-7" x="672" y="243"/>
+ <use xlink:href="#glyph0-1" x="686" y="243"/>
+ <use xlink:href="#glyph0-37" x="700" y="243"/>
+ <use xlink:href="#glyph0-4" x="714" y="243"/>
+ <use xlink:href="#glyph0-14" x="728" y="243"/>
+ <use xlink:href="#glyph0-6" x="742" y="243"/>
+ <use xlink:href="#glyph0-1" x="756" y="243"/>
+ <use xlink:href="#glyph0-12" x="770" y="243"/>
+ <use xlink:href="#glyph0-24" x="784" y="243"/>
+ <use xlink:href="#glyph0-23" x="798" y="243"/>
+ <use xlink:href="#glyph0-18" x="812" y="243"/>
+ <use xlink:href="#glyph0-18" x="826" y="243"/>
+ <use xlink:href="#glyph0-4" x="840" y="243"/>
+ <use xlink:href="#glyph0-14" x="854" y="243"/>
+ <use xlink:href="#glyph0-33" x="868" y="243"/>
+ <use xlink:href="#glyph0-1" x="882" y="243"/>
+ <use xlink:href="#glyph0-1" x="896" y="243"/>
+ <use xlink:href="#glyph0-1" x="910" y="243"/>
+ <use xlink:href="#glyph0-1" x="924" y="243"/>
+ <use xlink:href="#glyph0-1" x="938" y="243"/>
+ <use xlink:href="#glyph0-1" x="952" y="243"/>
+ <use xlink:href="#glyph0-1" x="966" y="243"/>
+ <use xlink:href="#glyph0-1" x="980" y="243"/>
+ <use xlink:href="#glyph0-1" x="994" y="243"/>
+ <use xlink:href="#glyph0-1" x="1008" y="243"/>
+ <use xlink:href="#glyph0-1" x="1022" y="243"/>
+ <use xlink:href="#glyph0-1" x="1036" y="243"/>
+ <use xlink:href="#glyph0-1" x="1050" y="243"/>
+ <use xlink:href="#glyph0-1" x="1064" y="243"/>
+ <use xlink:href="#glyph0-1" x="1078" y="243"/>
+ <use xlink:href="#glyph0-1" x="1092" y="243"/>
+ <use xlink:href="#glyph0-1" x="1106" y="243"/>
+ <use xlink:href="#glyph0-1" x="1120" y="243"/>
+ <use xlink:href="#glyph0-1" x="1134" y="243"/>
+ <use xlink:href="#glyph0-1" x="1148" y="243"/>
+ <use xlink:href="#glyph0-1" x="1162" y="243"/>
+ <use xlink:href="#glyph0-1" x="1176" y="243"/>
+ <use xlink:href="#glyph0-1" x="1190" y="243"/>
+ <use xlink:href="#glyph0-1" x="1204" y="243"/>
+ <use xlink:href="#glyph0-1" x="1218" y="243"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 217 L 1260 217 L 1260 248 L 1232 248 Z M 1232 217 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="243"/>
+ <use xlink:href="#glyph0-45" x="1246" y="243"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 217 L 1400 217 L 1400 248 L 1260 248 Z M 1260 217 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;" d="M 1400 217 L 1414 217 L 1414 248 L 1400 248 Z M 1400 217 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 217 L 1428 217 L 1428 248 L 1414 248 Z M 1414 217 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 248 L 140 248 L 140 279 L 0 279 Z M 0 248 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 248 L 168 248 L 168 279 L 140 279 Z M 140 248 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-45" x="140" y="274"/>
+ <use xlink:href="#glyph0-1" x="154" y="274"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 168 248 L 182 248 L 182 279 L 168 279 Z M 168 248 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-2" x="168" y="274"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 182 248 L 266 248 L 266 279 L 182 279 Z M 182 248 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="182" y="274"/>
+ <use xlink:href="#glyph0-4" x="196" y="274"/>
+ <use xlink:href="#glyph0-14" x="210" y="274"/>
+ <use xlink:href="#glyph0-9" x="224" y="274"/>
+ <use xlink:href="#glyph0-15" x="238" y="274"/>
+ <use xlink:href="#glyph0-12" x="252" y="274"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 266 248 L 448 248 L 448 279 L 266 279 Z M 266 248 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="266" y="274"/>
+ <use xlink:href="#glyph0-7" x="280" y="274"/>
+ <use xlink:href="#glyph0-1" x="294" y="274"/>
+ <use xlink:href="#glyph0-46" x="308" y="274"/>
+ <use xlink:href="#glyph0-1" x="322" y="274"/>
+ <use xlink:href="#glyph0-47" x="336" y="274"/>
+ <use xlink:href="#glyph0-48" x="350" y="274"/>
+ <use xlink:href="#glyph0-1" x="364" y="274"/>
+ <use xlink:href="#glyph0-3" x="378" y="274"/>
+ <use xlink:href="#glyph0-23" x="392" y="274"/>
+ <use xlink:href="#glyph0-29" x="406" y="274"/>
+ <use xlink:href="#glyph0-6" x="420" y="274"/>
+ <use xlink:href="#glyph0-1" x="434" y="274"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 448 248 L 532 248 L 532 279 L 448 279 Z M 448 248 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-31" x="448" y="274"/>
+ <use xlink:href="#glyph0-6" x="462" y="274"/>
+ <use xlink:href="#glyph0-38" x="476" y="274"/>
+ <use xlink:href="#glyph0-4" x="490" y="274"/>
+ <use xlink:href="#glyph0-25" x="504" y="274"/>
+ <use xlink:href="#glyph0-6" x="518" y="274"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 532 248 L 546 248 L 546 279 L 532 279 Z M 532 248 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 546 248 L 588 248 L 588 279 L 546 279 Z M 546 248 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-37" x="546" y="274"/>
+ <use xlink:href="#glyph0-15" x="560" y="274"/>
+ <use xlink:href="#glyph0-33" x="574" y="274"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 588 248 L 1232 248 L 1232 279 L 588 279 Z M 588 248 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="588" y="274"/>
+ <use xlink:href="#glyph0-1" x="602" y="274"/>
+ <use xlink:href="#glyph0-46" x="616" y="274"/>
+ <use xlink:href="#glyph0-1" x="630" y="274"/>
+ <use xlink:href="#glyph0-2" x="644" y="274"/>
+ <use xlink:href="#glyph0-49" x="658" y="274"/>
+ <use xlink:href="#glyph0-7" x="672" y="274"/>
+ <use xlink:href="#glyph0-1" x="686" y="274"/>
+ <use xlink:href="#glyph0-20" x="700" y="274"/>
+ <use xlink:href="#glyph0-23" x="714" y="274"/>
+ <use xlink:href="#glyph0-32" x="728" y="274"/>
+ <use xlink:href="#glyph0-5" x="742" y="274"/>
+ <use xlink:href="#glyph0-6" x="756" y="274"/>
+ <use xlink:href="#glyph0-1" x="770" y="274"/>
+ <use xlink:href="#glyph0-38" x="784" y="274"/>
+ <use xlink:href="#glyph0-4" x="798" y="274"/>
+ <use xlink:href="#glyph0-6" x="812" y="274"/>
+ <use xlink:href="#glyph0-12" x="826" y="274"/>
+ <use xlink:href="#glyph0-1" x="840" y="274"/>
+ <use xlink:href="#glyph0-1" x="854" y="274"/>
+ <use xlink:href="#glyph0-1" x="868" y="274"/>
+ <use xlink:href="#glyph0-1" x="882" y="274"/>
+ <use xlink:href="#glyph0-1" x="896" y="274"/>
+ <use xlink:href="#glyph0-1" x="910" y="274"/>
+ <use xlink:href="#glyph0-1" x="924" y="274"/>
+ <use xlink:href="#glyph0-1" x="938" y="274"/>
+ <use xlink:href="#glyph0-1" x="952" y="274"/>
+ <use xlink:href="#glyph0-1" x="966" y="274"/>
+ <use xlink:href="#glyph0-1" x="980" y="274"/>
+ <use xlink:href="#glyph0-1" x="994" y="274"/>
+ <use xlink:href="#glyph0-1" x="1008" y="274"/>
+ <use xlink:href="#glyph0-1" x="1022" y="274"/>
+ <use xlink:href="#glyph0-1" x="1036" y="274"/>
+ <use xlink:href="#glyph0-1" x="1050" y="274"/>
+ <use xlink:href="#glyph0-1" x="1064" y="274"/>
+ <use xlink:href="#glyph0-1" x="1078" y="274"/>
+ <use xlink:href="#glyph0-1" x="1092" y="274"/>
+ <use xlink:href="#glyph0-1" x="1106" y="274"/>
+ <use xlink:href="#glyph0-1" x="1120" y="274"/>
+ <use xlink:href="#glyph0-1" x="1134" y="274"/>
+ <use xlink:href="#glyph0-1" x="1148" y="274"/>
+ <use xlink:href="#glyph0-1" x="1162" y="274"/>
+ <use xlink:href="#glyph0-1" x="1176" y="274"/>
+ <use xlink:href="#glyph0-1" x="1190" y="274"/>
+ <use xlink:href="#glyph0-1" x="1204" y="274"/>
+ <use xlink:href="#glyph0-1" x="1218" y="274"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 248 L 1260 248 L 1260 279 L 1232 279 Z M 1232 248 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="274"/>
+ <use xlink:href="#glyph0-45" x="1246" y="274"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 248 L 1400 248 L 1400 279 L 1260 279 Z M 1260 248 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;" d="M 1400 248 L 1414 248 L 1414 279 L 1400 279 Z M 1400 248 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 248 L 1428 248 L 1428 279 L 1414 279 Z M 1414 248 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 279 L 140 279 L 140 310 L 0 310 Z M 0 279 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 279 L 168 279 L 168 310 L 140 310 Z M 140 279 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-45" x="140" y="305"/>
+ <use xlink:href="#glyph0-1" x="154" y="305"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 168 279 L 182 279 L 182 310 L 168 310 Z M 168 279 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-2" x="168" y="305"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 182 279 L 266 279 L 266 310 L 182 310 Z M 182 279 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="182" y="305"/>
+ <use xlink:href="#glyph0-4" x="196" y="305"/>
+ <use xlink:href="#glyph0-14" x="210" y="305"/>
+ <use xlink:href="#glyph0-9" x="224" y="305"/>
+ <use xlink:href="#glyph0-15" x="238" y="305"/>
+ <use xlink:href="#glyph0-12" x="252" y="305"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 266 279 L 448 279 L 448 310 L 266 310 Z M 266 279 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="266" y="305"/>
+ <use xlink:href="#glyph0-7" x="280" y="305"/>
+ <use xlink:href="#glyph0-1" x="294" y="305"/>
+ <use xlink:href="#glyph0-46" x="308" y="305"/>
+ <use xlink:href="#glyph0-1" x="322" y="305"/>
+ <use xlink:href="#glyph0-47" x="336" y="305"/>
+ <use xlink:href="#glyph0-48" x="350" y="305"/>
+ <use xlink:href="#glyph0-1" x="364" y="305"/>
+ <use xlink:href="#glyph0-3" x="378" y="305"/>
+ <use xlink:href="#glyph0-23" x="392" y="305"/>
+ <use xlink:href="#glyph0-29" x="406" y="305"/>
+ <use xlink:href="#glyph0-6" x="420" y="305"/>
+ <use xlink:href="#glyph0-1" x="434" y="305"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 448 279 L 532 279 L 532 310 L 448 310 Z M 448 279 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-31" x="448" y="305"/>
+ <use xlink:href="#glyph0-6" x="462" y="305"/>
+ <use xlink:href="#glyph0-38" x="476" y="305"/>
+ <use xlink:href="#glyph0-4" x="490" y="305"/>
+ <use xlink:href="#glyph0-25" x="504" y="305"/>
+ <use xlink:href="#glyph0-6" x="518" y="305"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 532 279 L 546 279 L 546 310 L 532 310 Z M 532 279 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 546 279 L 588 279 L 588 310 L 546 310 Z M 546 279 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-37" x="546" y="305"/>
+ <use xlink:href="#glyph0-15" x="560" y="305"/>
+ <use xlink:href="#glyph0-33" x="574" y="305"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 588 279 L 1232 279 L 1232 310 L 588 310 Z M 588 279 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="588" y="305"/>
+ <use xlink:href="#glyph0-1" x="602" y="305"/>
+ <use xlink:href="#glyph0-46" x="616" y="305"/>
+ <use xlink:href="#glyph0-1" x="630" y="305"/>
+ <use xlink:href="#glyph0-2" x="644" y="305"/>
+ <use xlink:href="#glyph0-49" x="658" y="305"/>
+ <use xlink:href="#glyph0-7" x="672" y="305"/>
+ <use xlink:href="#glyph0-1" x="686" y="305"/>
+ <use xlink:href="#glyph0-3" x="700" y="305"/>
+ <use xlink:href="#glyph0-15" x="714" y="305"/>
+ <use xlink:href="#glyph0-5" x="728" y="305"/>
+ <use xlink:href="#glyph0-5" x="742" y="305"/>
+ <use xlink:href="#glyph0-15" x="756" y="305"/>
+ <use xlink:href="#glyph0-12" x="770" y="305"/>
+ <use xlink:href="#glyph0-1" x="784" y="305"/>
+ <use xlink:href="#glyph0-1" x="798" y="305"/>
+ <use xlink:href="#glyph0-1" x="812" y="305"/>
+ <use xlink:href="#glyph0-1" x="826" y="305"/>
+ <use xlink:href="#glyph0-1" x="840" y="305"/>
+ <use xlink:href="#glyph0-1" x="854" y="305"/>
+ <use xlink:href="#glyph0-1" x="868" y="305"/>
+ <use xlink:href="#glyph0-1" x="882" y="305"/>
+ <use xlink:href="#glyph0-1" x="896" y="305"/>
+ <use xlink:href="#glyph0-1" x="910" y="305"/>
+ <use xlink:href="#glyph0-1" x="924" y="305"/>
+ <use xlink:href="#glyph0-1" x="938" y="305"/>
+ <use xlink:href="#glyph0-1" x="952" y="305"/>
+ <use xlink:href="#glyph0-1" x="966" y="305"/>
+ <use xlink:href="#glyph0-1" x="980" y="305"/>
+ <use xlink:href="#glyph0-1" x="994" y="305"/>
+ <use xlink:href="#glyph0-1" x="1008" y="305"/>
+ <use xlink:href="#glyph0-1" x="1022" y="305"/>
+ <use xlink:href="#glyph0-1" x="1036" y="305"/>
+ <use xlink:href="#glyph0-1" x="1050" y="305"/>
+ <use xlink:href="#glyph0-1" x="1064" y="305"/>
+ <use xlink:href="#glyph0-1" x="1078" y="305"/>
+ <use xlink:href="#glyph0-1" x="1092" y="305"/>
+ <use xlink:href="#glyph0-1" x="1106" y="305"/>
+ <use xlink:href="#glyph0-1" x="1120" y="305"/>
+ <use xlink:href="#glyph0-1" x="1134" y="305"/>
+ <use xlink:href="#glyph0-1" x="1148" y="305"/>
+ <use xlink:href="#glyph0-1" x="1162" y="305"/>
+ <use xlink:href="#glyph0-1" x="1176" y="305"/>
+ <use xlink:href="#glyph0-1" x="1190" y="305"/>
+ <use xlink:href="#glyph0-1" x="1204" y="305"/>
+ <use xlink:href="#glyph0-1" x="1218" y="305"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 279 L 1260 279 L 1260 310 L 1232 310 Z M 1232 279 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="305"/>
+ <use xlink:href="#glyph0-45" x="1246" y="305"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 279 L 1400 279 L 1400 310 L 1260 310 Z M 1260 279 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;" d="M 1400 279 L 1414 279 L 1414 310 L 1400 310 Z M 1400 279 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 279 L 1428 279 L 1428 310 L 1414 310 Z M 1414 279 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 310 L 140 310 L 140 341 L 0 341 Z M 0 310 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 310 L 168 310 L 168 341 L 140 341 Z M 140 310 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-45" x="140" y="336"/>
+ <use xlink:href="#glyph0-1" x="154" y="336"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 168 310 L 182 310 L 182 341 L 168 341 Z M 168 310 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-2" x="168" y="336"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 182 310 L 266 310 L 266 341 L 182 341 Z M 182 310 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="182" y="336"/>
+ <use xlink:href="#glyph0-4" x="196" y="336"/>
+ <use xlink:href="#glyph0-14" x="210" y="336"/>
+ <use xlink:href="#glyph0-9" x="224" y="336"/>
+ <use xlink:href="#glyph0-15" x="238" y="336"/>
+ <use xlink:href="#glyph0-12" x="252" y="336"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 266 310 L 448 310 L 448 341 L 266 341 Z M 266 310 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="266" y="336"/>
+ <use xlink:href="#glyph0-7" x="280" y="336"/>
+ <use xlink:href="#glyph0-1" x="294" y="336"/>
+ <use xlink:href="#glyph0-46" x="308" y="336"/>
+ <use xlink:href="#glyph0-1" x="322" y="336"/>
+ <use xlink:href="#glyph0-47" x="336" y="336"/>
+ <use xlink:href="#glyph0-48" x="350" y="336"/>
+ <use xlink:href="#glyph0-1" x="364" y="336"/>
+ <use xlink:href="#glyph0-3" x="378" y="336"/>
+ <use xlink:href="#glyph0-23" x="392" y="336"/>
+ <use xlink:href="#glyph0-29" x="406" y="336"/>
+ <use xlink:href="#glyph0-6" x="420" y="336"/>
+ <use xlink:href="#glyph0-1" x="434" y="336"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 448 310 L 532 310 L 532 341 L 448 341 Z M 448 310 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-31" x="448" y="336"/>
+ <use xlink:href="#glyph0-6" x="462" y="336"/>
+ <use xlink:href="#glyph0-38" x="476" y="336"/>
+ <use xlink:href="#glyph0-4" x="490" y="336"/>
+ <use xlink:href="#glyph0-25" x="504" y="336"/>
+ <use xlink:href="#glyph0-6" x="518" y="336"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 532 310 L 546 310 L 546 341 L 532 341 Z M 532 310 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 546 310 L 588 310 L 588 341 L 546 341 Z M 546 310 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-37" x="546" y="336"/>
+ <use xlink:href="#glyph0-15" x="560" y="336"/>
+ <use xlink:href="#glyph0-33" x="574" y="336"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 588 310 L 1232 310 L 1232 341 L 588 341 Z M 588 310 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="588" y="336"/>
+ <use xlink:href="#glyph0-1" x="602" y="336"/>
+ <use xlink:href="#glyph0-46" x="616" y="336"/>
+ <use xlink:href="#glyph0-1" x="630" y="336"/>
+ <use xlink:href="#glyph0-19" x="644" y="336"/>
+ <use xlink:href="#glyph0-5" x="658" y="336"/>
+ <use xlink:href="#glyph0-6" x="672" y="336"/>
+ <use xlink:href="#glyph0-23" x="686" y="336"/>
+ <use xlink:href="#glyph0-24" x="700" y="336"/>
+ <use xlink:href="#glyph0-1" x="714" y="336"/>
+ <use xlink:href="#glyph0-26" x="728" y="336"/>
+ <use xlink:href="#glyph0-4" x="742" y="336"/>
+ <use xlink:href="#glyph0-16" x="756" y="336"/>
+ <use xlink:href="#glyph0-10" x="770" y="336"/>
+ <use xlink:href="#glyph0-15" x="784" y="336"/>
+ <use xlink:href="#glyph0-24" x="798" y="336"/>
+ <use xlink:href="#glyph0-34" x="812" y="336"/>
+ <use xlink:href="#glyph0-1" x="826" y="336"/>
+ <use xlink:href="#glyph0-1" x="840" y="336"/>
+ <use xlink:href="#glyph0-1" x="854" y="336"/>
+ <use xlink:href="#glyph0-1" x="868" y="336"/>
+ <use xlink:href="#glyph0-1" x="882" y="336"/>
+ <use xlink:href="#glyph0-1" x="896" y="336"/>
+ <use xlink:href="#glyph0-1" x="910" y="336"/>
+ <use xlink:href="#glyph0-1" x="924" y="336"/>
+ <use xlink:href="#glyph0-1" x="938" y="336"/>
+ <use xlink:href="#glyph0-1" x="952" y="336"/>
+ <use xlink:href="#glyph0-1" x="966" y="336"/>
+ <use xlink:href="#glyph0-1" x="980" y="336"/>
+ <use xlink:href="#glyph0-1" x="994" y="336"/>
+ <use xlink:href="#glyph0-1" x="1008" y="336"/>
+ <use xlink:href="#glyph0-1" x="1022" y="336"/>
+ <use xlink:href="#glyph0-1" x="1036" y="336"/>
+ <use xlink:href="#glyph0-1" x="1050" y="336"/>
+ <use xlink:href="#glyph0-1" x="1064" y="336"/>
+ <use xlink:href="#glyph0-1" x="1078" y="336"/>
+ <use xlink:href="#glyph0-1" x="1092" y="336"/>
+ <use xlink:href="#glyph0-1" x="1106" y="336"/>
+ <use xlink:href="#glyph0-1" x="1120" y="336"/>
+ <use xlink:href="#glyph0-1" x="1134" y="336"/>
+ <use xlink:href="#glyph0-1" x="1148" y="336"/>
+ <use xlink:href="#glyph0-1" x="1162" y="336"/>
+ <use xlink:href="#glyph0-1" x="1176" y="336"/>
+ <use xlink:href="#glyph0-1" x="1190" y="336"/>
+ <use xlink:href="#glyph0-1" x="1204" y="336"/>
+ <use xlink:href="#glyph0-1" x="1218" y="336"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 310 L 1260 310 L 1260 341 L 1232 341 Z M 1232 310 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="336"/>
+ <use xlink:href="#glyph0-45" x="1246" y="336"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 310 L 1400 310 L 1400 341 L 1260 341 Z M 1260 310 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(6.27451%,5.882353%,6.27451%);fill-opacity:1;" d="M 1400 310 L 1414 310 L 1414 341 L 1400 341 Z M 1400 310 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-38" x="1400" y="336"/>
+</g>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-38" x="1401" y="336"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 310 L 1428 310 L 1428 341 L 1414 341 Z M 1414 310 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 0 341 L 14 341 L 14 372 L 0 372 Z M 0 341 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 14 341 L 168 341 L 168 372 L 14 372 Z M 14 341 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="14" y="367"/>
+ <use xlink:href="#glyph0-30" x="28" y="367"/>
+ <use xlink:href="#glyph0-34" x="42" y="367"/>
+ <use xlink:href="#glyph0-10" x="56" y="367"/>
+ <use xlink:href="#glyph0-26" x="70" y="367"/>
+ <use xlink:href="#glyph0-15" x="84" y="367"/>
+ <use xlink:href="#glyph0-14" x="98" y="367"/>
+ <use xlink:href="#glyph0-1" x="112" y="367"/>
+ <use xlink:href="#glyph0-35" x="126" y="367"/>
+ <use xlink:href="#glyph0-45" x="140" y="367"/>
+ <use xlink:href="#glyph0-1" x="154" y="367"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 168 341 L 182 341 L 182 372 L 168 372 Z M 168 341 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-2" x="168" y="367"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 182 341 L 266 341 L 266 372 L 182 372 Z M 182 341 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="182" y="367"/>
+ <use xlink:href="#glyph0-4" x="196" y="367"/>
+ <use xlink:href="#glyph0-14" x="210" y="367"/>
+ <use xlink:href="#glyph0-9" x="224" y="367"/>
+ <use xlink:href="#glyph0-15" x="238" y="367"/>
+ <use xlink:href="#glyph0-12" x="252" y="367"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 266 341 L 448 341 L 448 372 L 266 372 Z M 266 341 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="266" y="367"/>
+ <use xlink:href="#glyph0-7" x="280" y="367"/>
+ <use xlink:href="#glyph0-1" x="294" y="367"/>
+ <use xlink:href="#glyph0-46" x="308" y="367"/>
+ <use xlink:href="#glyph0-1" x="322" y="367"/>
+ <use xlink:href="#glyph0-47" x="336" y="367"/>
+ <use xlink:href="#glyph0-48" x="350" y="367"/>
+ <use xlink:href="#glyph0-1" x="364" y="367"/>
+ <use xlink:href="#glyph0-3" x="378" y="367"/>
+ <use xlink:href="#glyph0-23" x="392" y="367"/>
+ <use xlink:href="#glyph0-29" x="406" y="367"/>
+ <use xlink:href="#glyph0-6" x="420" y="367"/>
+ <use xlink:href="#glyph0-1" x="434" y="367"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 448 341 L 532 341 L 532 372 L 448 372 Z M 448 341 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-31" x="448" y="367"/>
+ <use xlink:href="#glyph0-6" x="462" y="367"/>
+ <use xlink:href="#glyph0-38" x="476" y="367"/>
+ <use xlink:href="#glyph0-4" x="490" y="367"/>
+ <use xlink:href="#glyph0-25" x="504" y="367"/>
+ <use xlink:href="#glyph0-6" x="518" y="367"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 532 341 L 546 341 L 546 372 L 532 372 Z M 532 341 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 546 341 L 588 341 L 588 372 L 546 372 Z M 546 341 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-37" x="546" y="367"/>
+ <use xlink:href="#glyph0-15" x="560" y="367"/>
+ <use xlink:href="#glyph0-33" x="574" y="367"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 588 341 L 1232 341 L 1232 372 L 588 372 Z M 588 341 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="588" y="367"/>
+ <use xlink:href="#glyph0-1" x="602" y="367"/>
+ <use xlink:href="#glyph0-46" x="616" y="367"/>
+ <use xlink:href="#glyph0-1" x="630" y="367"/>
+ <use xlink:href="#glyph0-31" x="644" y="367"/>
+ <use xlink:href="#glyph0-27" x="658" y="367"/>
+ <use xlink:href="#glyph0-18" x="672" y="367"/>
+ <use xlink:href="#glyph0-5" x="686" y="367"/>
+ <use xlink:href="#glyph0-4" x="700" y="367"/>
+ <use xlink:href="#glyph0-25" x="714" y="367"/>
+ <use xlink:href="#glyph0-23" x="728" y="367"/>
+ <use xlink:href="#glyph0-10" x="742" y="367"/>
+ <use xlink:href="#glyph0-6" x="756" y="367"/>
+ <use xlink:href="#glyph0-1" x="770" y="367"/>
+ <use xlink:href="#glyph0-18" x="784" y="367"/>
+ <use xlink:href="#glyph0-23" x="798" y="367"/>
+ <use xlink:href="#glyph0-14" x="812" y="367"/>
+ <use xlink:href="#glyph0-6" x="826" y="367"/>
+ <use xlink:href="#glyph0-1" x="840" y="367"/>
+ <use xlink:href="#glyph0-1" x="854" y="367"/>
+ <use xlink:href="#glyph0-1" x="868" y="367"/>
+ <use xlink:href="#glyph0-1" x="882" y="367"/>
+ <use xlink:href="#glyph0-1" x="896" y="367"/>
+ <use xlink:href="#glyph0-1" x="910" y="367"/>
+ <use xlink:href="#glyph0-1" x="924" y="367"/>
+ <use xlink:href="#glyph0-1" x="938" y="367"/>
+ <use xlink:href="#glyph0-1" x="952" y="367"/>
+ <use xlink:href="#glyph0-1" x="966" y="367"/>
+ <use xlink:href="#glyph0-1" x="980" y="367"/>
+ <use xlink:href="#glyph0-1" x="994" y="367"/>
+ <use xlink:href="#glyph0-1" x="1008" y="367"/>
+ <use xlink:href="#glyph0-1" x="1022" y="367"/>
+ <use xlink:href="#glyph0-1" x="1036" y="367"/>
+ <use xlink:href="#glyph0-1" x="1050" y="367"/>
+ <use xlink:href="#glyph0-1" x="1064" y="367"/>
+ <use xlink:href="#glyph0-1" x="1078" y="367"/>
+ <use xlink:href="#glyph0-1" x="1092" y="367"/>
+ <use xlink:href="#glyph0-1" x="1106" y="367"/>
+ <use xlink:href="#glyph0-1" x="1120" y="367"/>
+ <use xlink:href="#glyph0-1" x="1134" y="367"/>
+ <use xlink:href="#glyph0-1" x="1148" y="367"/>
+ <use xlink:href="#glyph0-1" x="1162" y="367"/>
+ <use xlink:href="#glyph0-1" x="1176" y="367"/>
+ <use xlink:href="#glyph0-1" x="1190" y="367"/>
+ <use xlink:href="#glyph0-1" x="1204" y="367"/>
+ <use xlink:href="#glyph0-1" x="1218" y="367"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 341 L 1260 341 L 1260 372 L 1232 372 Z M 1232 341 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="367"/>
+ <use xlink:href="#glyph0-45" x="1246" y="367"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1260 341 L 1358 341 L 1358 372 L 1260 372 Z M 1260 341 "/>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-15" x="1260" y="367"/>
+ <use xlink:href="#glyph0-1" x="1274" y="367"/>
+ <use xlink:href="#glyph0-52" x="1288" y="367"/>
+ <use xlink:href="#glyph0-15" x="1302" y="367"/>
+ <use xlink:href="#glyph0-25" x="1316" y="367"/>
+ <use xlink:href="#glyph0-27" x="1330" y="367"/>
+ <use xlink:href="#glyph0-16" x="1344" y="367"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1358 341 L 1372 341 L 1372 372 L 1358 372 Z M 1358 341 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1372 341 L 1414 341 L 1414 372 L 1372 372 Z M 1372 341 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 341 L 1428 341 L 1428 372 L 1414 372 Z M 1414 341 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 372 L 56 372 L 56 403 L 0 403 Z M 0 372 "/>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-46" x="0" y="398"/>
+ <use xlink:href="#glyph0-46" x="14" y="398"/>
+ <use xlink:href="#glyph0-46" x="28" y="398"/>
+ <use xlink:href="#glyph0-1" x="42" y="398"/>
+</g>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-46" x="1" y="398"/>
+ <use xlink:href="#glyph0-46" x="15" y="398"/>
+ <use xlink:href="#glyph0-46" x="29" y="398"/>
+ <use xlink:href="#glyph0-1" x="43" y="398"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 56 372 L 140 372 L 140 403 L 56 403 Z M 56 372 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 372 L 168 372 L 168 403 L 140 403 Z M 140 372 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-45" x="140" y="398"/>
+ <use xlink:href="#glyph0-1" x="154" y="398"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 168 372 L 182 372 L 182 403 L 168 403 Z M 168 372 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-2" x="168" y="398"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 182 372 L 266 372 L 266 403 L 182 403 Z M 182 372 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="182" y="398"/>
+ <use xlink:href="#glyph0-4" x="196" y="398"/>
+ <use xlink:href="#glyph0-14" x="210" y="398"/>
+ <use xlink:href="#glyph0-9" x="224" y="398"/>
+ <use xlink:href="#glyph0-15" x="238" y="398"/>
+ <use xlink:href="#glyph0-12" x="252" y="398"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 266 372 L 448 372 L 448 403 L 266 403 Z M 266 372 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="266" y="398"/>
+ <use xlink:href="#glyph0-7" x="280" y="398"/>
+ <use xlink:href="#glyph0-1" x="294" y="398"/>
+ <use xlink:href="#glyph0-46" x="308" y="398"/>
+ <use xlink:href="#glyph0-1" x="322" y="398"/>
+ <use xlink:href="#glyph0-47" x="336" y="398"/>
+ <use xlink:href="#glyph0-48" x="350" y="398"/>
+ <use xlink:href="#glyph0-1" x="364" y="398"/>
+ <use xlink:href="#glyph0-3" x="378" y="398"/>
+ <use xlink:href="#glyph0-23" x="392" y="398"/>
+ <use xlink:href="#glyph0-29" x="406" y="398"/>
+ <use xlink:href="#glyph0-6" x="420" y="398"/>
+ <use xlink:href="#glyph0-1" x="434" y="398"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 448 372 L 532 372 L 532 403 L 448 403 Z M 448 372 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-31" x="448" y="398"/>
+ <use xlink:href="#glyph0-6" x="462" y="398"/>
+ <use xlink:href="#glyph0-38" x="476" y="398"/>
+ <use xlink:href="#glyph0-4" x="490" y="398"/>
+ <use xlink:href="#glyph0-25" x="504" y="398"/>
+ <use xlink:href="#glyph0-6" x="518" y="398"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 532 372 L 546 372 L 546 403 L 532 403 Z M 532 372 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 546 372 L 588 372 L 588 403 L 546 403 Z M 546 372 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-37" x="546" y="398"/>
+ <use xlink:href="#glyph0-15" x="560" y="398"/>
+ <use xlink:href="#glyph0-33" x="574" y="398"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 588 372 L 1232 372 L 1232 403 L 588 403 Z M 588 372 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="588" y="398"/>
+ <use xlink:href="#glyph0-1" x="602" y="398"/>
+ <use xlink:href="#glyph0-46" x="616" y="398"/>
+ <use xlink:href="#glyph0-1" x="630" y="398"/>
+ <use xlink:href="#glyph0-17" x="644" y="398"/>
+ <use xlink:href="#glyph0-4" x="658" y="398"/>
+ <use xlink:href="#glyph0-9" x="672" y="398"/>
+ <use xlink:href="#glyph0-6" x="686" y="398"/>
+ <use xlink:href="#glyph0-1" x="700" y="398"/>
+ <use xlink:href="#glyph0-16" x="714" y="398"/>
+ <use xlink:href="#glyph0-6" x="728" y="398"/>
+ <use xlink:href="#glyph0-23" x="742" y="398"/>
+ <use xlink:href="#glyph0-24" x="756" y="398"/>
+ <use xlink:href="#glyph0-25" x="770" y="398"/>
+ <use xlink:href="#glyph0-26" x="784" y="398"/>
+ <use xlink:href="#glyph0-1" x="798" y="398"/>
+ <use xlink:href="#glyph0-26" x="812" y="398"/>
+ <use xlink:href="#glyph0-4" x="826" y="398"/>
+ <use xlink:href="#glyph0-33" x="840" y="398"/>
+ <use xlink:href="#glyph0-26" x="854" y="398"/>
+ <use xlink:href="#glyph0-5" x="868" y="398"/>
+ <use xlink:href="#glyph0-4" x="882" y="398"/>
+ <use xlink:href="#glyph0-33" x="896" y="398"/>
+ <use xlink:href="#glyph0-26" x="910" y="398"/>
+ <use xlink:href="#glyph0-10" x="924" y="398"/>
+ <use xlink:href="#glyph0-4" x="938" y="398"/>
+ <use xlink:href="#glyph0-14" x="952" y="398"/>
+ <use xlink:href="#glyph0-33" x="966" y="398"/>
+ <use xlink:href="#glyph0-1" x="980" y="398"/>
+ <use xlink:href="#glyph0-1" x="994" y="398"/>
+ <use xlink:href="#glyph0-1" x="1008" y="398"/>
+ <use xlink:href="#glyph0-1" x="1022" y="398"/>
+ <use xlink:href="#glyph0-1" x="1036" y="398"/>
+ <use xlink:href="#glyph0-1" x="1050" y="398"/>
+ <use xlink:href="#glyph0-1" x="1064" y="398"/>
+ <use xlink:href="#glyph0-1" x="1078" y="398"/>
+ <use xlink:href="#glyph0-1" x="1092" y="398"/>
+ <use xlink:href="#glyph0-1" x="1106" y="398"/>
+ <use xlink:href="#glyph0-1" x="1120" y="398"/>
+ <use xlink:href="#glyph0-1" x="1134" y="398"/>
+ <use xlink:href="#glyph0-1" x="1148" y="398"/>
+ <use xlink:href="#glyph0-1" x="1162" y="398"/>
+ <use xlink:href="#glyph0-1" x="1176" y="398"/>
+ <use xlink:href="#glyph0-1" x="1190" y="398"/>
+ <use xlink:href="#glyph0-1" x="1204" y="398"/>
+ <use xlink:href="#glyph0-1" x="1218" y="398"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 372 L 1260 372 L 1260 403 L 1232 403 Z M 1232 372 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="398"/>
+ <use xlink:href="#glyph0-45" x="1246" y="398"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 372 L 1414 372 L 1414 403 L 1260 403 Z M 1260 372 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 372 L 1428 372 L 1428 403 L 1414 403 Z M 1414 372 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 403 L 140 403 L 140 434 L 0 434 Z M 0 403 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 403 L 168 403 L 168 434 L 140 434 Z M 140 403 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-45" x="140" y="429"/>
+ <use xlink:href="#glyph0-1" x="154" y="429"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 168 403 L 182 403 L 182 434 L 168 434 Z M 168 403 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-2" x="168" y="429"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 182 403 L 266 403 L 266 434 L 182 434 Z M 182 403 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="182" y="429"/>
+ <use xlink:href="#glyph0-4" x="196" y="429"/>
+ <use xlink:href="#glyph0-14" x="210" y="429"/>
+ <use xlink:href="#glyph0-9" x="224" y="429"/>
+ <use xlink:href="#glyph0-15" x="238" y="429"/>
+ <use xlink:href="#glyph0-12" x="252" y="429"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 266 403 L 448 403 L 448 434 L 266 434 Z M 266 403 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="266" y="429"/>
+ <use xlink:href="#glyph0-7" x="280" y="429"/>
+ <use xlink:href="#glyph0-1" x="294" y="429"/>
+ <use xlink:href="#glyph0-46" x="308" y="429"/>
+ <use xlink:href="#glyph0-1" x="322" y="429"/>
+ <use xlink:href="#glyph0-47" x="336" y="429"/>
+ <use xlink:href="#glyph0-48" x="350" y="429"/>
+ <use xlink:href="#glyph0-1" x="364" y="429"/>
+ <use xlink:href="#glyph0-3" x="378" y="429"/>
+ <use xlink:href="#glyph0-23" x="392" y="429"/>
+ <use xlink:href="#glyph0-29" x="406" y="429"/>
+ <use xlink:href="#glyph0-6" x="420" y="429"/>
+ <use xlink:href="#glyph0-1" x="434" y="429"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 448 403 L 532 403 L 532 434 L 448 434 Z M 448 403 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-31" x="448" y="429"/>
+ <use xlink:href="#glyph0-6" x="462" y="429"/>
+ <use xlink:href="#glyph0-38" x="476" y="429"/>
+ <use xlink:href="#glyph0-4" x="490" y="429"/>
+ <use xlink:href="#glyph0-25" x="504" y="429"/>
+ <use xlink:href="#glyph0-6" x="518" y="429"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 532 403 L 546 403 L 546 434 L 532 434 Z M 532 403 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 546 403 L 588 403 L 588 434 L 546 434 Z M 546 403 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-37" x="546" y="429"/>
+ <use xlink:href="#glyph0-15" x="560" y="429"/>
+ <use xlink:href="#glyph0-33" x="574" y="429"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 588 403 L 1232 403 L 1232 434 L 588 434 Z M 588 403 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="588" y="429"/>
+ <use xlink:href="#glyph0-1" x="602" y="429"/>
+ <use xlink:href="#glyph0-46" x="616" y="429"/>
+ <use xlink:href="#glyph0-1" x="630" y="429"/>
+ <use xlink:href="#glyph0-19" x="644" y="429"/>
+ <use xlink:href="#glyph0-24" x="658" y="429"/>
+ <use xlink:href="#glyph0-6" x="672" y="429"/>
+ <use xlink:href="#glyph0-23" x="686" y="429"/>
+ <use xlink:href="#glyph0-10" x="700" y="429"/>
+ <use xlink:href="#glyph0-6" x="714" y="429"/>
+ <use xlink:href="#glyph0-1" x="728" y="429"/>
+ <use xlink:href="#glyph0-52" x="742" y="429"/>
+ <use xlink:href="#glyph0-4" x="756" y="429"/>
+ <use xlink:href="#glyph0-5" x="770" y="429"/>
+ <use xlink:href="#glyph0-10" x="784" y="429"/>
+ <use xlink:href="#glyph0-6" x="798" y="429"/>
+ <use xlink:href="#glyph0-24" x="812" y="429"/>
+ <use xlink:href="#glyph0-1" x="826" y="429"/>
+ <use xlink:href="#glyph0-52" x="840" y="429"/>
+ <use xlink:href="#glyph0-24" x="854" y="429"/>
+ <use xlink:href="#glyph0-15" x="868" y="429"/>
+ <use xlink:href="#glyph0-43" x="882" y="429"/>
+ <use xlink:href="#glyph0-1" x="896" y="429"/>
+ <use xlink:href="#glyph0-16" x="910" y="429"/>
+ <use xlink:href="#glyph0-6" x="924" y="429"/>
+ <use xlink:href="#glyph0-23" x="938" y="429"/>
+ <use xlink:href="#glyph0-24" x="952" y="429"/>
+ <use xlink:href="#glyph0-25" x="966" y="429"/>
+ <use xlink:href="#glyph0-26" x="980" y="429"/>
+ <use xlink:href="#glyph0-1" x="994" y="429"/>
+ <use xlink:href="#glyph0-24" x="1008" y="429"/>
+ <use xlink:href="#glyph0-6" x="1022" y="429"/>
+ <use xlink:href="#glyph0-16" x="1036" y="429"/>
+ <use xlink:href="#glyph0-27" x="1050" y="429"/>
+ <use xlink:href="#glyph0-5" x="1064" y="429"/>
+ <use xlink:href="#glyph0-10" x="1078" y="429"/>
+ <use xlink:href="#glyph0-16" x="1092" y="429"/>
+ <use xlink:href="#glyph0-1" x="1106" y="429"/>
+ <use xlink:href="#glyph0-1" x="1120" y="429"/>
+ <use xlink:href="#glyph0-1" x="1134" y="429"/>
+ <use xlink:href="#glyph0-1" x="1148" y="429"/>
+ <use xlink:href="#glyph0-1" x="1162" y="429"/>
+ <use xlink:href="#glyph0-1" x="1176" y="429"/>
+ <use xlink:href="#glyph0-1" x="1190" y="429"/>
+ <use xlink:href="#glyph0-1" x="1204" y="429"/>
+ <use xlink:href="#glyph0-1" x="1218" y="429"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 403 L 1260 403 L 1260 434 L 1232 434 Z M 1232 403 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="429"/>
+ <use xlink:href="#glyph0-45" x="1246" y="429"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 403 L 1414 403 L 1414 434 L 1260 434 Z M 1260 403 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 403 L 1428 403 L 1428 434 L 1414 434 Z M 1414 403 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 434 L 140 434 L 140 465 L 0 465 Z M 0 434 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 434 L 168 434 L 168 465 L 140 465 Z M 140 434 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-45" x="140" y="460"/>
+ <use xlink:href="#glyph0-1" x="154" y="460"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 168 434 L 182 434 L 182 465 L 168 465 Z M 168 434 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-2" x="168" y="460"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 182 434 L 266 434 L 266 465 L 182 465 Z M 182 434 "/>
+<g style="fill:rgb(53.333333%,93.72549%,53.333333%);fill-opacity:1;">
+ <use xlink:href="#glyph0-13" x="182" y="460"/>
+ <use xlink:href="#glyph0-4" x="196" y="460"/>
+ <use xlink:href="#glyph0-14" x="210" y="460"/>
+ <use xlink:href="#glyph0-9" x="224" y="460"/>
+ <use xlink:href="#glyph0-15" x="238" y="460"/>
+ <use xlink:href="#glyph0-12" x="252" y="460"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 266 434 L 448 434 L 448 465 L 266 465 Z M 266 434 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="266" y="460"/>
+ <use xlink:href="#glyph0-7" x="280" y="460"/>
+ <use xlink:href="#glyph0-1" x="294" y="460"/>
+ <use xlink:href="#glyph0-46" x="308" y="460"/>
+ <use xlink:href="#glyph0-1" x="322" y="460"/>
+ <use xlink:href="#glyph0-47" x="336" y="460"/>
+ <use xlink:href="#glyph0-48" x="350" y="460"/>
+ <use xlink:href="#glyph0-1" x="364" y="460"/>
+ <use xlink:href="#glyph0-3" x="378" y="460"/>
+ <use xlink:href="#glyph0-23" x="392" y="460"/>
+ <use xlink:href="#glyph0-29" x="406" y="460"/>
+ <use xlink:href="#glyph0-6" x="420" y="460"/>
+ <use xlink:href="#glyph0-1" x="434" y="460"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 448 434 L 532 434 L 532 465 L 448 465 Z M 448 434 "/>
+<g style="fill:rgb(57.254902%,85.098039%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-31" x="448" y="460"/>
+ <use xlink:href="#glyph0-6" x="462" y="460"/>
+ <use xlink:href="#glyph0-38" x="476" y="460"/>
+ <use xlink:href="#glyph0-4" x="490" y="460"/>
+ <use xlink:href="#glyph0-25" x="504" y="460"/>
+ <use xlink:href="#glyph0-6" x="518" y="460"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 532 434 L 546 434 L 546 465 L 532 465 Z M 532 434 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 546 434 L 588 434 L 588 465 L 546 465 Z M 546 434 "/>
+<g style="fill:rgb(37.647059%,90.588235%,87.843137%);fill-opacity:1;">
+ <use xlink:href="#glyph0-37" x="546" y="460"/>
+ <use xlink:href="#glyph0-15" x="560" y="460"/>
+ <use xlink:href="#glyph0-33" x="574" y="460"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 588 434 L 1232 434 L 1232 465 L 588 465 Z M 588 434 "/>
+<g style="fill:rgb(100%,100%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-16" x="588" y="460"/>
+ <use xlink:href="#glyph0-1" x="602" y="460"/>
+ <use xlink:href="#glyph0-46" x="616" y="460"/>
+ <use xlink:href="#glyph0-1" x="630" y="460"/>
+ <use xlink:href="#glyph0-19" x="644" y="460"/>
+ <use xlink:href="#glyph0-5" x="658" y="460"/>
+ <use xlink:href="#glyph0-6" x="672" y="460"/>
+ <use xlink:href="#glyph0-23" x="686" y="460"/>
+ <use xlink:href="#glyph0-24" x="700" y="460"/>
+ <use xlink:href="#glyph0-50" x="714" y="460"/>
+ <use xlink:href="#glyph0-35" x="728" y="460"/>
+ <use xlink:href="#glyph0-6" x="742" y="460"/>
+ <use xlink:href="#glyph0-16" x="756" y="460"/>
+ <use xlink:href="#glyph0-6" x="770" y="460"/>
+ <use xlink:href="#glyph0-10" x="784" y="460"/>
+ <use xlink:href="#glyph0-1" x="798" y="460"/>
+ <use xlink:href="#glyph0-23" x="812" y="460"/>
+ <use xlink:href="#glyph0-25" x="826" y="460"/>
+ <use xlink:href="#glyph0-10" x="840" y="460"/>
+ <use xlink:href="#glyph0-4" x="854" y="460"/>
+ <use xlink:href="#glyph0-38" x="868" y="460"/>
+ <use xlink:href="#glyph0-6" x="882" y="460"/>
+ <use xlink:href="#glyph0-1" x="896" y="460"/>
+ <use xlink:href="#glyph0-52" x="910" y="460"/>
+ <use xlink:href="#glyph0-4" x="924" y="460"/>
+ <use xlink:href="#glyph0-5" x="938" y="460"/>
+ <use xlink:href="#glyph0-10" x="952" y="460"/>
+ <use xlink:href="#glyph0-6" x="966" y="460"/>
+ <use xlink:href="#glyph0-24" x="980" y="460"/>
+ <use xlink:href="#glyph0-16" x="994" y="460"/>
+ <use xlink:href="#glyph0-1" x="1008" y="460"/>
+ <use xlink:href="#glyph0-1" x="1022" y="460"/>
+ <use xlink:href="#glyph0-1" x="1036" y="460"/>
+ <use xlink:href="#glyph0-1" x="1050" y="460"/>
+ <use xlink:href="#glyph0-1" x="1064" y="460"/>
+ <use xlink:href="#glyph0-1" x="1078" y="460"/>
+ <use xlink:href="#glyph0-1" x="1092" y="460"/>
+ <use xlink:href="#glyph0-1" x="1106" y="460"/>
+ <use xlink:href="#glyph0-1" x="1120" y="460"/>
+ <use xlink:href="#glyph0-1" x="1134" y="460"/>
+ <use xlink:href="#glyph0-1" x="1148" y="460"/>
+ <use xlink:href="#glyph0-1" x="1162" y="460"/>
+ <use xlink:href="#glyph0-1" x="1176" y="460"/>
+ <use xlink:href="#glyph0-1" x="1190" y="460"/>
+ <use xlink:href="#glyph0-1" x="1204" y="460"/>
+ <use xlink:href="#glyph0-1" x="1218" y="460"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1232 434 L 1260 434 L 1260 465 L 1232 465 Z M 1232 434 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="1232" y="460"/>
+ <use xlink:href="#glyph0-45" x="1246" y="460"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 434 L 1414 434 L 1414 465 L 1260 465 Z M 1260 434 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 434 L 1428 434 L 1428 465 L 1414 465 Z M 1414 434 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 465 L 140 465 L 140 496 L 0 496 Z M 0 465 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 140 465 L 1260 465 L 1260 496 L 140 496 Z M 140 465 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-53" x="140" y="491"/>
+ <use xlink:href="#glyph0-41" x="154" y="491"/>
+ <use xlink:href="#glyph0-41" x="168" y="491"/>
+ <use xlink:href="#glyph0-41" x="182" y="491"/>
+ <use xlink:href="#glyph0-41" x="196" y="491"/>
+ <use xlink:href="#glyph0-41" x="210" y="491"/>
+ <use xlink:href="#glyph0-41" x="224" y="491"/>
+ <use xlink:href="#glyph0-41" x="238" y="491"/>
+ <use xlink:href="#glyph0-41" x="252" y="491"/>
+ <use xlink:href="#glyph0-41" x="266" y="491"/>
+ <use xlink:href="#glyph0-41" x="280" y="491"/>
+ <use xlink:href="#glyph0-41" x="294" y="491"/>
+ <use xlink:href="#glyph0-41" x="308" y="491"/>
+ <use xlink:href="#glyph0-41" x="322" y="491"/>
+ <use xlink:href="#glyph0-41" x="336" y="491"/>
+ <use xlink:href="#glyph0-41" x="350" y="491"/>
+ <use xlink:href="#glyph0-41" x="364" y="491"/>
+ <use xlink:href="#glyph0-41" x="378" y="491"/>
+ <use xlink:href="#glyph0-41" x="392" y="491"/>
+ <use xlink:href="#glyph0-41" x="406" y="491"/>
+ <use xlink:href="#glyph0-41" x="420" y="491"/>
+ <use xlink:href="#glyph0-41" x="434" y="491"/>
+ <use xlink:href="#glyph0-41" x="448" y="491"/>
+ <use xlink:href="#glyph0-41" x="462" y="491"/>
+ <use xlink:href="#glyph0-41" x="476" y="491"/>
+ <use xlink:href="#glyph0-41" x="490" y="491"/>
+ <use xlink:href="#glyph0-41" x="504" y="491"/>
+ <use xlink:href="#glyph0-41" x="518" y="491"/>
+ <use xlink:href="#glyph0-41" x="532" y="491"/>
+ <use xlink:href="#glyph0-41" x="546" y="491"/>
+ <use xlink:href="#glyph0-41" x="560" y="491"/>
+ <use xlink:href="#glyph0-41" x="574" y="491"/>
+ <use xlink:href="#glyph0-41" x="588" y="491"/>
+ <use xlink:href="#glyph0-41" x="602" y="491"/>
+ <use xlink:href="#glyph0-41" x="616" y="491"/>
+ <use xlink:href="#glyph0-41" x="630" y="491"/>
+ <use xlink:href="#glyph0-41" x="644" y="491"/>
+ <use xlink:href="#glyph0-41" x="658" y="491"/>
+ <use xlink:href="#glyph0-41" x="672" y="491"/>
+ <use xlink:href="#glyph0-41" x="686" y="491"/>
+ <use xlink:href="#glyph0-41" x="700" y="491"/>
+ <use xlink:href="#glyph0-41" x="714" y="491"/>
+ <use xlink:href="#glyph0-41" x="728" y="491"/>
+ <use xlink:href="#glyph0-41" x="742" y="491"/>
+ <use xlink:href="#glyph0-41" x="756" y="491"/>
+ <use xlink:href="#glyph0-41" x="770" y="491"/>
+ <use xlink:href="#glyph0-41" x="784" y="491"/>
+ <use xlink:href="#glyph0-41" x="798" y="491"/>
+ <use xlink:href="#glyph0-41" x="812" y="491"/>
+ <use xlink:href="#glyph0-41" x="826" y="491"/>
+ <use xlink:href="#glyph0-41" x="840" y="491"/>
+ <use xlink:href="#glyph0-41" x="854" y="491"/>
+ <use xlink:href="#glyph0-41" x="868" y="491"/>
+ <use xlink:href="#glyph0-41" x="882" y="491"/>
+ <use xlink:href="#glyph0-41" x="896" y="491"/>
+ <use xlink:href="#glyph0-41" x="910" y="491"/>
+ <use xlink:href="#glyph0-41" x="924" y="491"/>
+ <use xlink:href="#glyph0-41" x="938" y="491"/>
+ <use xlink:href="#glyph0-41" x="952" y="491"/>
+ <use xlink:href="#glyph0-41" x="966" y="491"/>
+ <use xlink:href="#glyph0-41" x="980" y="491"/>
+ <use xlink:href="#glyph0-41" x="994" y="491"/>
+ <use xlink:href="#glyph0-41" x="1008" y="491"/>
+ <use xlink:href="#glyph0-41" x="1022" y="491"/>
+ <use xlink:href="#glyph0-41" x="1036" y="491"/>
+ <use xlink:href="#glyph0-41" x="1050" y="491"/>
+ <use xlink:href="#glyph0-41" x="1064" y="491"/>
+ <use xlink:href="#glyph0-41" x="1078" y="491"/>
+ <use xlink:href="#glyph0-41" x="1092" y="491"/>
+ <use xlink:href="#glyph0-41" x="1106" y="491"/>
+ <use xlink:href="#glyph0-41" x="1120" y="491"/>
+ <use xlink:href="#glyph0-41" x="1134" y="491"/>
+ <use xlink:href="#glyph0-41" x="1148" y="491"/>
+ <use xlink:href="#glyph0-41" x="1162" y="491"/>
+ <use xlink:href="#glyph0-41" x="1176" y="491"/>
+ <use xlink:href="#glyph0-41" x="1190" y="491"/>
+ <use xlink:href="#glyph0-41" x="1204" y="491"/>
+ <use xlink:href="#glyph0-41" x="1218" y="491"/>
+ <use xlink:href="#glyph0-41" x="1232" y="491"/>
+ <use xlink:href="#glyph0-54" x="1246" y="491"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 1260 465 L 1414 465 L 1414 496 L 1260 496 Z M 1260 465 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 465 L 1428 465 L 1428 496 L 1414 496 Z M 1414 465 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 496 L 1414 496 L 1414 527 L 0 527 Z M 0 496 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 496 L 1428 496 L 1428 527 L 1414 527 Z M 1414 496 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 527 L 1414 527 L 1414 558 L 0 558 Z M 0 527 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 527 L 1428 527 L 1428 558 L 1414 558 Z M 1414 527 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 558 L 1414 558 L 1414 589 L 0 589 Z M 0 558 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 558 L 1428 558 L 1428 589 L 1414 589 Z M 1414 558 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(0%,0%,0%);fill-opacity:1;" d="M 0 589 L 1414 589 L 1414 620 L 0 620 Z M 0 589 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 589 L 1428 589 L 1428 620 L 1414 620 Z M 1414 589 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 0 620 L 14 620 L 14 651 L 0 651 Z M 0 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 14 620 L 196 620 L 196 651 L 14 651 Z M 14 620 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="14" y="646"/>
+ <use xlink:href="#glyph0-30" x="28" y="646"/>
+ <use xlink:href="#glyph0-34" x="42" y="646"/>
+ <use xlink:href="#glyph0-10" x="56" y="646"/>
+ <use xlink:href="#glyph0-26" x="70" y="646"/>
+ <use xlink:href="#glyph0-15" x="84" y="646"/>
+ <use xlink:href="#glyph0-14" x="98" y="646"/>
+ <use xlink:href="#glyph0-1" x="112" y="646"/>
+ <use xlink:href="#glyph0-35" x="126" y="646"/>
+ <use xlink:href="#glyph0-6" x="140" y="646"/>
+ <use xlink:href="#glyph0-18" x="154" y="646"/>
+ <use xlink:href="#glyph0-5" x="168" y="646"/>
+ <use xlink:href="#glyph0-1" x="182" y="646"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 196 620 L 224 620 L 224 651 L 196 651 Z M 196 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 224 620 L 238 620 L 238 651 L 224 651 Z M 224 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 238 620 L 322 620 L 322 651 L 238 651 Z M 238 620 "/>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-30" x="238" y="646"/>
+ <use xlink:href="#glyph0-23" x="252" y="646"/>
+ <use xlink:href="#glyph0-16" x="266" y="646"/>
+ <use xlink:href="#glyph0-10" x="280" y="646"/>
+ <use xlink:href="#glyph0-6" x="294" y="646"/>
+ <use xlink:href="#glyph0-1" x="308" y="646"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 322 620 L 406 620 L 406 651 L 322 651 Z M 322 620 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="322" y="646"/>
+ <use xlink:href="#glyph0-10" x="336" y="646"/>
+ <use xlink:href="#glyph0-24" x="350" y="646"/>
+ <use xlink:href="#glyph0-5" x="364" y="646"/>
+ <use xlink:href="#glyph0-28" x="378" y="646"/>
+ <use xlink:href="#glyph0-38" x="392" y="646"/>
+</g>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="323" y="646"/>
+ <use xlink:href="#glyph0-10" x="337" y="646"/>
+ <use xlink:href="#glyph0-24" x="351" y="646"/>
+ <use xlink:href="#glyph0-5" x="365" y="646"/>
+ <use xlink:href="#glyph0-28" x="379" y="646"/>
+ <use xlink:href="#glyph0-38" x="393" y="646"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 406 620 L 420 620 L 420 651 L 406 651 Z M 406 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 420 620 L 448 620 L 448 651 L 420 651 Z M 420 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 448 620 L 462 620 L 462 651 L 448 651 Z M 448 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 462 620 L 644 620 L 644 651 L 462 651 Z M 462 620 "/>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="462" y="646"/>
+ <use xlink:href="#glyph0-15" x="476" y="646"/>
+ <use xlink:href="#glyph0-18" x="490" y="646"/>
+ <use xlink:href="#glyph0-34" x="504" y="646"/>
+ <use xlink:href="#glyph0-1" x="518" y="646"/>
+ <use xlink:href="#glyph0-50" x="532" y="646"/>
+ <use xlink:href="#glyph0-1" x="546" y="646"/>
+ <use xlink:href="#glyph0-19" x="560" y="646"/>
+ <use xlink:href="#glyph0-5" x="574" y="646"/>
+ <use xlink:href="#glyph0-6" x="588" y="646"/>
+ <use xlink:href="#glyph0-23" x="602" y="646"/>
+ <use xlink:href="#glyph0-24" x="616" y="646"/>
+ <use xlink:href="#glyph0-1" x="630" y="646"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 644 620 L 728 620 L 728 651 L 644 651 Z M 644 620 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="644" y="646"/>
+ <use xlink:href="#glyph0-10" x="658" y="646"/>
+ <use xlink:href="#glyph0-24" x="672" y="646"/>
+ <use xlink:href="#glyph0-5" x="686" y="646"/>
+ <use xlink:href="#glyph0-28" x="700" y="646"/>
+ <use xlink:href="#glyph0-25" x="714" y="646"/>
+</g>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-19" x="645" y="646"/>
+ <use xlink:href="#glyph0-10" x="659" y="646"/>
+ <use xlink:href="#glyph0-24" x="673" y="646"/>
+ <use xlink:href="#glyph0-5" x="687" y="646"/>
+ <use xlink:href="#glyph0-28" x="701" y="646"/>
+ <use xlink:href="#glyph0-25" x="715" y="646"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 728 620 L 742 620 L 742 651 L 728 651 Z M 728 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 742 620 L 770 620 L 770 651 L 742 651 Z M 742 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 770 620 L 784 620 L 784 651 L 770 651 Z M 770 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 784 620 L 840 620 L 840 651 L 784 651 Z M 784 620 "/>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-35" x="784" y="646"/>
+ <use xlink:href="#glyph0-27" x="798" y="646"/>
+ <use xlink:href="#glyph0-14" x="812" y="646"/>
+ <use xlink:href="#glyph0-1" x="826" y="646"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 840 620 L 910 620 L 910 651 L 840 651 Z M 840 620 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-8" x="840" y="646"/>
+ <use xlink:href="#glyph0-14" x="854" y="646"/>
+ <use xlink:href="#glyph0-10" x="868" y="646"/>
+ <use xlink:href="#glyph0-6" x="882" y="646"/>
+ <use xlink:href="#glyph0-24" x="896" y="646"/>
+</g>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-8" x="841" y="646"/>
+ <use xlink:href="#glyph0-14" x="855" y="646"/>
+ <use xlink:href="#glyph0-10" x="869" y="646"/>
+ <use xlink:href="#glyph0-6" x="883" y="646"/>
+ <use xlink:href="#glyph0-24" x="897" y="646"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 910 620 L 924 620 L 924 651 L 910 651 Z M 910 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 924 620 L 952 620 L 952 651 L 924 651 Z M 924 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 952 620 L 966 620 L 966 651 L 952 651 Z M 952 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 966 620 L 1092 620 L 1092 651 L 966 651 Z M 966 620 "/>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-22" x="966" y="646"/>
+ <use xlink:href="#glyph0-6" x="980" y="646"/>
+ <use xlink:href="#glyph0-10" x="994" y="646"/>
+ <use xlink:href="#glyph0-10" x="1008" y="646"/>
+ <use xlink:href="#glyph0-4" x="1022" y="646"/>
+ <use xlink:href="#glyph0-14" x="1036" y="646"/>
+ <use xlink:href="#glyph0-33" x="1050" y="646"/>
+ <use xlink:href="#glyph0-16" x="1064" y="646"/>
+ <use xlink:href="#glyph0-1" x="1078" y="646"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1092 620 L 1106 620 L 1106 651 L 1092 651 Z M 1092 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1106 620 L 1302 620 L 1302 651 L 1106 651 Z M 1106 620 "/>
+<g style="fill:rgb(87.843137%,90.196078%,94.117647%);fill-opacity:1;">
+ <use xlink:href="#glyph0-25" x="1106" y="646"/>
+ <use xlink:href="#glyph0-5" x="1120" y="646"/>
+ <use xlink:href="#glyph0-4" x="1134" y="646"/>
+ <use xlink:href="#glyph0-25" x="1148" y="646"/>
+ <use xlink:href="#glyph0-29" x="1162" y="646"/>
+ <use xlink:href="#glyph0-1" x="1176" y="646"/>
+ <use xlink:href="#glyph0-10" x="1190" y="646"/>
+ <use xlink:href="#glyph0-15" x="1204" y="646"/>
+ <use xlink:href="#glyph0-1" x="1218" y="646"/>
+ <use xlink:href="#glyph0-52" x="1232" y="646"/>
+ <use xlink:href="#glyph0-15" x="1246" y="646"/>
+ <use xlink:href="#glyph0-25" x="1260" y="646"/>
+ <use xlink:href="#glyph0-27" x="1274" y="646"/>
+ <use xlink:href="#glyph0-16" x="1288" y="646"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(19.607843%,19.607843%,19.607843%);fill-opacity:1;" d="M 1302 620 L 1316 620 L 1316 651 L 1302 651 Z M 1302 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1316 620 L 1358 620 L 1358 651 L 1316 651 Z M 1316 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 1414 620 L 1428 620 L 1428 651 L 1414 651 Z M 1414 620 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(11.764706%,11.764706%,11.764706%);fill-opacity:1;" d="M 1358 620 L 1414 620 L 1414 651 L 1358 651 Z M 1358 620 "/>
+<g style="fill:rgb(81.176471%,79.215686%,100%);fill-opacity:1;">
+ <use xlink:href="#glyph0-55" x="1358" y="646"/>
+ <use xlink:href="#glyph0-56" x="1372" y="646"/>
+ <use xlink:href="#glyph0-56" x="1386" y="646"/>
+ <use xlink:href="#glyph0-55" x="1400" y="646"/>
+</g>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(52.941176%,58.823529%,69.019608%);fill-opacity:1;" d="M 434 124 L 448 124 L 448 155 L 434 155 Z M 434 124 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 0 651 L 28 651 L 28 682 L 0 682 Z M 0 651 "/>
+<path style=" stroke:none;fill-rule:nonzero;fill:rgb(14.117647%,15.686275%,21.568627%);fill-opacity:1;" d="M 28 651 L 1429 651 L 1429 682 L 28 682 Z M 28 651 "/>
+</g>
+</svg>
diff --git a/pw_console/images/pw_system_boot.png b/pw_console/images/pw_system_boot.png
new file mode 100644
index 000000000..d1c391bd0
--- /dev/null
+++ b/pw_console/images/pw_system_boot.png
Binary files differ
diff --git a/pw_console/images/python_completion.png b/pw_console/images/python_completion.png
new file mode 100644
index 000000000..87a424cae
--- /dev/null
+++ b/pw_console/images/python_completion.png
Binary files differ
diff --git a/pw_console/py/BUILD.gn b/pw_console/py/BUILD.gn
index aa605b138..b7164c95b 100644
--- a/pw_console/py/BUILD.gn
+++ b/pw_console/py/BUILD.gn
@@ -25,6 +25,7 @@ pw_python_package("py") {
sources = [
"pw_console/__init__.py",
"pw_console/__main__.py",
+ "pw_console/command_runner.py",
"pw_console/console_app.py",
"pw_console/console_prefs.py",
"pw_console/embed.py",
@@ -70,8 +71,10 @@ pw_python_package("py") {
"pw_console/widgets/window_pane_toolbar.py",
"pw_console/window_list.py",
"pw_console/window_manager.py",
+ "pw_console/yaml_config_loader_mixin.py",
]
tests = [
+ "command_runner_test.py",
"console_app_test.py",
"console_prefs_test.py",
"help_window_test.py",
diff --git a/pw_console/py/command_runner_test.py b/pw_console/py/command_runner_test.py
new file mode 100644
index 000000000..45362fe90
--- /dev/null
+++ b/pw_console/py/command_runner_test.py
@@ -0,0 +1,258 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Tests for pw_console.command_runner dialog."""
+
+import logging
+import re
+import unittest
+from typing import Callable, List, Tuple
+
+from unittest.mock import MagicMock
+
+from prompt_toolkit.application import create_app_session
+from prompt_toolkit.output import ColorDepth
+# inclusive-language: ignore
+from prompt_toolkit.output import DummyOutput as FakeOutput
+
+from pw_console.console_app import ConsoleApp
+from pw_console.console_prefs import ConsolePrefs
+from pw_console.text_formatting import (
+ flatten_formatted_text_tuples,
+ join_adjacent_style_tuples,
+)
+from window_manager_test import target_list_and_pane, window_pane_titles
+
+
+def _create_console_app(log_pane_count=2):
+ console_app = ConsoleApp(color_depth=ColorDepth.DEPTH_8_BIT,
+ prefs=ConsolePrefs(project_file=False,
+ project_user_file=False,
+ user_file=False))
+ console_app.prefs.reset_config()
+
+ # Setup log panes
+ loggers = {}
+ for i in range(log_pane_count):
+ loggers['LogPane-{}'.format(i)] = [
+ logging.getLogger('test_log{}'.format(i))
+ ]
+ for window_title, logger_instances in loggers.items():
+ console_app.add_log_handler(window_title, logger_instances)
+
+ return console_app
+
+
+class TestCommandRunner(unittest.TestCase):
+ """Tests for CommandRunner."""
+ def setUp(self):
+ self.maxDiff = None # pylint: disable=invalid-name
+
+ def test_flatten_menu_items(self) -> None:
+ with create_app_session(output=FakeOutput()):
+ console_app = _create_console_app(log_pane_count=2)
+ flattened_menu_items = [
+ text for text, handler in
+ console_app.command_runner.load_menu_items()
+ ]
+
+ # Check some common menu items exist.
+ self.assertIn('[File] > Open Logger', flattened_menu_items)
+ self.assertIn('[File] > Themes > UI Themes > High Contrast',
+ flattened_menu_items)
+ self.assertIn('[Help] > User Guide', flattened_menu_items)
+ self.assertIn('[Help] > Keyboard Shortcuts', flattened_menu_items)
+ # Check for log windows
+ self.assertRegex(
+ '\n'.join(flattened_menu_items),
+ re.compile(r'^\[Windows\] > .* LogPane-[0-9]+ > .*$',
+ re.MULTILINE),
+ )
+
+ def test_filter_and_highlight_matches(self) -> None:
+ """Check filtering matches and highlighting works correctly."""
+ with create_app_session(output=FakeOutput()):
+ console_app = _create_console_app(log_pane_count=2)
+ command_runner = console_app.command_runner
+
+ command_runner.filter_completions = MagicMock(
+ wraps=command_runner.filter_completions)
+ command_runner.width = 20
+
+ # Define custom completion items
+ def empty_handler() -> None:
+ return None
+
+ def get_completions() -> List[Tuple[str, Callable]]:
+ return [
+ ('[File] > Open Logger', empty_handler),
+ ('[Windows] > 1: Host Logs > Show/Hide', empty_handler),
+ ('[Windows] > 2: Device Logs > Show/Hide', empty_handler),
+ ('[Help] > User Guide', empty_handler),
+ ]
+
+ command_runner.filter_completions.assert_not_called()
+ command_runner.set_completions(window_title='Test Completions',
+ load_completions=get_completions)
+ command_runner.filter_completions.assert_called_once()
+ command_runner.filter_completions.reset_mock()
+
+ # Input field should be empty
+ self.assertEqual(command_runner.input_field.buffer.text, '')
+ # Flatten resulting formatted text
+ result_items = join_adjacent_style_tuples(
+ flatten_formatted_text_tuples(
+ command_runner.completion_fragments))
+
+ # index 0: the selected line
+ # index 1: the rest of the completions with line breaks
+ self.assertEqual(len(result_items), 2)
+ first_item_style = result_items[0][0]
+ first_item_text = result_items[0][1]
+ second_item_text = result_items[1][1]
+ # Check expected number of lines are present
+ self.assertEqual(len(first_item_text.splitlines()), 1)
+ self.assertEqual(len(second_item_text.splitlines()), 3)
+ # First line is highlighted as a selected item
+ self.assertEqual(first_item_style,
+ 'class:command-runner-selected-item')
+ self.assertIn('[File] > Open Logger', first_item_text)
+
+ # Type: file open
+ command_runner.input_field.buffer.text = 'file open'
+ self.assertEqual(command_runner.input_field.buffer.text,
+ 'file open')
+ # Run the filter
+ command_runner.filter_completions()
+ # Flatten resulting formatted text
+ result_items = join_adjacent_style_tuples(
+ flatten_formatted_text_tuples(
+ command_runner.completion_fragments))
+ # Check file and open are highlighted
+ self.assertEqual(
+ result_items[:4],
+ [
+ ('class:command-runner-selected-item', '['),
+ ('class:command-runner-selected-item '
+ 'class:command-runner-fuzzy-highlight-0 ', 'File'),
+ ('class:command-runner-selected-item', '] > '),
+ ('class:command-runner-selected-item '
+ 'class:command-runner-fuzzy-highlight-1 ', 'Open'),
+ ],
+ )
+
+ # Type: open file
+ command_runner.input_field.buffer.text = 'open file'
+ # Run the filter
+ command_runner.filter_completions()
+ result_items = join_adjacent_style_tuples(
+ flatten_formatted_text_tuples(
+ command_runner.completion_fragments))
+ # Check file and open are highlighted, the fuzzy-highlight class
+ # should be swapped.
+ self.assertEqual(
+ result_items[:4],
+ [
+ ('class:command-runner-selected-item', '['),
+ ('class:command-runner-selected-item '
+ 'class:command-runner-fuzzy-highlight-1 ', 'File'),
+ ('class:command-runner-selected-item', '] > '),
+ ('class:command-runner-selected-item '
+ 'class:command-runner-fuzzy-highlight-0 ', 'Open'),
+ ],
+ )
+
+ # Clear input
+ command_runner._reset_selected_item() # pylint: disable=protected-access
+ command_runner.filter_completions()
+ result_items = join_adjacent_style_tuples(
+ flatten_formatted_text_tuples(
+ command_runner.completion_fragments))
+ self.assertEqual(len(first_item_text.splitlines()), 1)
+ self.assertEqual(len(second_item_text.splitlines()), 3)
+
+ # Press down (select the next item)
+ command_runner._next_item() # pylint: disable=protected-access
+ # Filter and check results
+ command_runner.filter_completions()
+ result_items = join_adjacent_style_tuples(
+ flatten_formatted_text_tuples(
+ command_runner.completion_fragments))
+ self.assertEqual(len(result_items), 3)
+ # First line - not selected
+ self.assertEqual(result_items[0], ('', '[File] > Open Logger\n'))
+ # Second line - is selected
+ self.assertEqual(result_items[1],
+ ('class:command-runner-selected-item',
+ '[Windows] > 1: Host Logs > Show/Hide\n'))
+ # Third and fourth lines separated by \n - not selected
+ self.assertEqual(result_items[2],
+ ('', '[Windows] > 2: Device Logs > Show/Hide\n'
+ '[Help] > User Guide'))
+
+ def test_run_action(self) -> None:
+ """Check running an action works correctly."""
+ with create_app_session(output=FakeOutput()):
+ console_app = _create_console_app(log_pane_count=2)
+ command_runner = console_app.command_runner
+ self.assertEqual(
+ window_pane_titles(console_app.window_manager),
+ [
+ # Split 1
+ [
+ 'LogPane-1 - test_log1',
+ 'LogPane-0 - test_log0',
+ 'Python Repl - ',
+ ],
+ ],
+ )
+ command_runner.open_dialog()
+ # Set LogPane-1 as the focused window pane
+ target_list_and_pane(console_app.window_manager, 0, 0)
+
+ command_runner.input_field.buffer.text = 'move right'
+
+ # pylint: disable=protected-access
+ command_runner._make_regexes = MagicMock(
+ wraps=command_runner._make_regexes)
+ # pylint: enable=protected-access
+ command_runner.filter_completions()
+ # Filter should only be re-run if input text changed
+ command_runner.filter_completions()
+ command_runner._make_regexes.assert_called_once() # pylint: disable=protected-access
+
+ self.assertIn('[View] > Move Window Right',
+ command_runner.selected_item_text)
+ # Run the Move Window Right action
+ command_runner._run_selected_item() # pylint: disable=protected-access
+ # Dialog should be closed
+ self.assertFalse(command_runner.show_dialog)
+ # LogPane-1 should be moved to the right in it's own split
+ self.assertEqual(
+ window_pane_titles(console_app.window_manager),
+ [
+ # Split 1
+ [
+ 'LogPane-0 - test_log0',
+ 'Python Repl - ',
+ ],
+ # Split 2
+ [
+ 'LogPane-1 - test_log1',
+ ],
+ ],
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pw_console/py/console_app_test.py b/pw_console/py/console_app_test.py
index b4f7f368d..4dec65f79 100644
--- a/pw_console/py/console_app_test.py
+++ b/pw_console/py/console_app_test.py
@@ -22,6 +22,7 @@ from prompt_toolkit.output import ColorDepth
from prompt_toolkit.output import DummyOutput as FakeOutput
from pw_console.console_app import ConsoleApp
+from pw_console.console_prefs import ConsolePrefs
class TestConsoleApp(unittest.TestCase):
@@ -29,14 +30,22 @@ class TestConsoleApp(unittest.TestCase):
def test_instantiate(self) -> None:
"""Test init."""
with create_app_session(output=FakeOutput()):
- console_app = ConsoleApp(color_depth=ColorDepth.DEPTH_8_BIT)
+ console_app = ConsoleApp(color_depth=ColorDepth.DEPTH_8_BIT,
+ prefs=ConsolePrefs(
+ project_file=False,
+ project_user_file=False,
+ user_file=False))
self.assertIsNotNone(console_app)
def test_multiple_loggers_in_one_pane(self) -> None:
"""Test window resizing."""
# pylint: disable=protected-access
with create_app_session(output=FakeOutput()):
- console_app = ConsoleApp(color_depth=ColorDepth.DEPTH_8_BIT)
+ console_app = ConsoleApp(color_depth=ColorDepth.DEPTH_8_BIT,
+ prefs=ConsolePrefs(
+ project_file=False,
+ project_user_file=False,
+ user_file=False))
loggers = {
'Logs': [
diff --git a/pw_console/py/console_prefs_test.py b/pw_console/py/console_prefs_test.py
index b0dcff7bf..958c00f2a 100644
--- a/pw_console/py/console_prefs_test.py
+++ b/pw_console/py/console_prefs_test.py
@@ -45,12 +45,37 @@ class TestConsolePrefs(unittest.TestCase):
self.maxDiff = None # pylint: disable=invalid-name
def test_load_no_existing_files(self) -> None:
- prefs = ConsolePrefs(project_file=False, user_file=False)
- self.assertEqual(_DEFAULT_CONFIG['pw_console'], prefs._config)
+ prefs = ConsolePrefs(project_file=False,
+ project_user_file=False,
+ user_file=False)
+ self.assertEqual(_DEFAULT_CONFIG, prefs._config)
self.assertTrue(str(prefs.repl_history).endswith('pw_console_history'))
self.assertTrue(
str(prefs.search_history).endswith('pw_console_search'))
+ def test_load_empty_file(self) -> None:
+ # Create an empty file
+ project_config_file = _create_tempfile('')
+ try:
+ prefs = ConsolePrefs(project_file=project_config_file,
+ project_user_file=False,
+ user_file=False)
+ result_settings = {
+ k: v
+ for k, v in prefs._config.items()
+ if k in _DEFAULT_CONFIG.keys()
+ }
+ other_settings = {
+ k: v
+ for k, v in prefs._config.items()
+ if k not in _DEFAULT_CONFIG.keys()
+ }
+ # Check that only the default config was loaded.
+ self.assertEqual(_DEFAULT_CONFIG, result_settings)
+ self.assertEqual(0, len(other_settings))
+ finally:
+ project_config_file.unlink()
+
def test_load_project_file(self) -> None:
project_config = {
'pw_console': {
@@ -62,6 +87,7 @@ class TestConsolePrefs(unittest.TestCase):
project_config_file = _create_tempfile(yaml.dump(project_config))
try:
prefs = ConsolePrefs(project_file=project_config_file,
+ project_user_file=False,
user_file=False)
result_settings = {
k: v
@@ -91,6 +117,16 @@ class TestConsolePrefs(unittest.TestCase):
}
project_config_file = _create_tempfile(yaml.dump(project_config))
+ project_user_config = {
+ 'pw_console': {
+ 'ui_theme': 'nord',
+ 'repl_history': '~/project_user_history',
+ 'search_history': '~/project_user_search',
+ },
+ }
+ project_user_config_file = _create_tempfile(
+ yaml.dump(project_user_config))
+
user_config = {
'pw_console': {
'ui_theme': 'dark',
@@ -100,6 +136,7 @@ class TestConsolePrefs(unittest.TestCase):
user_config_file = _create_tempfile(yaml.dump(user_config))
try:
prefs = ConsolePrefs(project_file=project_config_file,
+ project_user_file=project_user_config_file,
user_file=user_config_file)
# Set by the project
self.assertEqual(project_config['pw_console']['code_theme'],
@@ -108,18 +145,28 @@ class TestConsolePrefs(unittest.TestCase):
project_config['pw_console']['swap_light_and_dark'],
prefs.swap_light_and_dark)
- history = project_config['pw_console']['repl_history']
+ # Project user setting, result should not be project only setting.
+ project_history = project_config['pw_console']['repl_history']
+ assert isinstance(project_history, str)
+ self.assertNotEqual(
+ Path(project_history).expanduser(), prefs.repl_history)
+
+ history = project_user_config['pw_console']['repl_history']
assert isinstance(history, str)
self.assertEqual(Path(history).expanduser(), prefs.repl_history)
- # User config overrides project
+ # User config overrides project and project_user
self.assertEqual(user_config['pw_console']['ui_theme'],
prefs.ui_theme)
self.assertEqual(
Path(user_config['pw_console']['search_history']).expanduser(),
prefs.search_history)
+ # ui_theme should not be the project_user file setting
+ project_user_theme = project_user_config['pw_console']['ui_theme']
+ self.assertNotEqual(project_user_theme, prefs.ui_theme)
finally:
project_config_file.unlink()
+ project_user_config_file.unlink()
user_config_file.unlink()
diff --git a/pw_console/py/help_window_test.py b/pw_console/py/help_window_test.py
index 14caa73b9..2f25cac31 100644
--- a/pw_console/py/help_window_test.py
+++ b/pw_console/py/help_window_test.py
@@ -50,21 +50,21 @@ class TestHelpWindow(unittest.TestCase):
# pylint: disable=unused-variable,unused-argument
def test_add_keybind_help_text(self) -> None:
- bindings = KeyBindings()
+ key_bindings = KeyBindings()
- @bindings.add('f1')
+ @key_bindings.add('f1')
def show_help(event):
"""Toggle help window."""
- @bindings.add('c-w')
- @bindings.add('c-q')
+ @key_bindings.add('c-w')
+ @key_bindings.add('c-q')
def exit_(event):
"""Quit the application."""
app = _create_app_mock()
help_window = HelpWindow(app)
- help_window.add_keybind_help_text('Global', bindings)
+ help_window.add_keybind_help_text('Global', key_bindings)
self.assertEqual(
help_window.help_text_sections,
diff --git a/pw_console/py/log_store_test.py b/pw_console/py/log_store_test.py
index 51dfad54c..37b628733 100644
--- a/pw_console/py/log_store_test.py
+++ b/pw_console/py/log_store_test.py
@@ -22,8 +22,8 @@ from pw_console.console_prefs import ConsolePrefs
def _create_log_store():
- log_store = LogStore(
- prefs=ConsolePrefs(project_file=False, user_file=False))
+ log_store = LogStore(prefs=ConsolePrefs(
+ project_file=False, project_user_file=False, user_file=False))
assert not log_store.table.prefs.show_python_file
viewer = MagicMock()
diff --git a/pw_console/py/log_view_test.py b/pw_console/py/log_view_test.py
index f6e71c944..a21a5647d 100644
--- a/pw_console/py/log_view_test.py
+++ b/pw_console/py/log_view_test.py
@@ -24,7 +24,11 @@ from prompt_toolkit.data_structures import Point
from pw_console.console_prefs import ConsolePrefs
from pw_console.log_view import LogView
-from pw_console.text_formatting import join_adjacent_style_tuples
+from pw_console.log_screen import ScreenLine
+from pw_console.text_formatting import (
+ flatten_formatted_text_tuples,
+ join_adjacent_style_tuples,
+)
_PYTHON_3_8 = sys.version_info >= (
3,
@@ -39,7 +43,9 @@ def _create_log_view():
log_pane.current_log_pane_height = 10
application = MagicMock()
- application.prefs = ConsolePrefs()
+ application.prefs = ConsolePrefs(project_file=False,
+ project_user_file=False,
+ user_file=False)
application.prefs.reset_config()
log_view = LogView(log_pane, application)
return log_view, log_pane
@@ -213,27 +219,31 @@ class TestLogView(unittest.TestCase):
self.assertEqual(log_view.get_cursor_position(), Point(x=0, y=9))
expected_formatted_text = [
- ('', '\n\n\n\n\n\n'),
+ ('', ''),
('class:log-time', '20210713 00:00:00'),
('', ' '),
('class:log-level-10', 'DEBUG'),
- ('', ' Test log 0\n'),
+ ('', ' Test log 0'),
+
('class:log-time', '20210713 00:00:00'),
('', ' '),
('class:log-level-10', 'DEBUG'),
- ('', ' Test log 1\n'),
+ ('', ' Test log 1'),
+
('class:log-time', '20210713 00:00:00'),
('', ' '),
('class:log-level-10', 'DEBUG'),
- ('', ' Test log 2\n'),
+ ('', ' Test log 2'),
+
('class:selected-log-line class:log-time', '20210713 00:00:00'),
('class:selected-log-line ', ' '),
('class:selected-log-line class:log-level-10', 'DEBUG'),
('class:selected-log-line ',
- ' Test log 3 \n')
+ ' Test log 3 ')
] # yapf: disable
- result_text = join_adjacent_style_tuples(log_view._line_fragment_cache) # pylint: disable=protected-access
+ result_text = join_adjacent_style_tuples(
+ flatten_formatted_text_tuples(log_view._line_fragment_cache)) # pylint: disable=protected-access
self.assertEqual(result_text, expected_formatted_text)
@@ -311,6 +321,107 @@ class TestLogView(unittest.TestCase):
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 7)
+ def test_get_line_at_cursor_position(self) -> None:
+ """Tests fuctions that rely on getting a log_index for the current
+ cursor position.
+
+ Including:
+ - LogScreen.fetch_subline_up
+ - LogScreen.fetch_subline_down
+ - LogView._update_log_index
+ """
+ # pylint: disable=protected-access
+ # Create log_view with 4 logs
+ starting_log_count = 4
+ log_view, _pane = self._create_log_view_with_logs(
+ log_count=starting_log_count)
+ log_view.render_content()
+
+ # Check setup is correct
+ self.assertTrue(log_view.follow)
+ self.assertEqual(log_view.get_current_line(), 3)
+ self.assertEqual(log_view.get_total_count(), 4)
+ self.assertEqual(
+ list(log.record.message
+ for log in log_view._get_visible_log_lines()),
+ ['Test log 0', 'Test log 1', 'Test log 2', 'Test log 3'])
+
+ self.assertEqual(log_view.log_screen.cursor_position, 9)
+ # Force the cursor_position to be larger than the log_screen
+ # line_buffer.
+ log_view.log_screen.cursor_position = 10
+ # Attempt to get the current line, no exception should be raised
+ result = log_view.log_screen.get_line_at_cursor_position()
+ # Log index should be None
+ self.assertEqual(result.log_index, None)
+
+ # Force the cursor_position to be < 0. This won't produce an error but
+ # would wrap around to the beginning.
+ log_view.log_screen.cursor_position = -1
+ # Attempt to get the current line, no exception should be raised
+ result = log_view.log_screen.get_line_at_cursor_position()
+ # Result should be a blank line
+ self.assertEqual(result, ScreenLine([('', '')]))
+ # Log index should be None
+ self.assertEqual(result.log_index, None)
+
+ def test_visual_select(self) -> None:
+ """Test log line selection."""
+ log_view, log_pane = self._create_log_view_with_logs(log_count=100)
+ self.assertEqual(100, log_view.get_total_count())
+
+ # Page scrolling needs to know the current window height.
+ log_pane.pane_resized = MagicMock(return_value=True)
+ log_pane.current_log_pane_width = 80
+ log_pane.current_log_pane_height = 10
+
+ log_view.log_screen.reset_logs = MagicMock(
+ wraps=log_view.log_screen.reset_logs)
+ log_view.log_screen.get_lines = MagicMock(
+ wraps=log_view.log_screen.get_lines)
+
+ log_view.render_content()
+ log_view.log_screen.reset_logs.assert_called_once()
+ log_view.log_screen.get_lines.assert_called_once_with(
+ marked_logs_start=None, marked_logs_end=None)
+ log_view.log_screen.get_lines.reset_mock()
+ log_view.log_screen.reset_logs.reset_mock()
+
+ self.assertIsNone(log_view.marked_logs_start)
+ self.assertIsNone(log_view.marked_logs_end)
+ log_view.visual_select_line(Point(0, 9))
+ self.assertEqual(
+ (99, 99), (log_view.marked_logs_start, log_view.marked_logs_end))
+
+ log_view.visual_select_line(Point(0, 8))
+ log_view.visual_select_line(Point(0, 7))
+ self.assertEqual(
+ (97, 99), (log_view.marked_logs_start, log_view.marked_logs_end))
+
+ log_view.clear_visual_selection()
+ self.assertIsNone(log_view.marked_logs_start)
+ self.assertIsNone(log_view.marked_logs_end)
+
+ log_view.visual_select_line(Point(0, 1))
+ log_view.visual_select_line(Point(0, 2))
+ log_view.visual_select_line(Point(0, 3))
+ log_view.visual_select_line(Point(0, 4))
+ self.assertEqual(
+ (91, 94), (log_view.marked_logs_start, log_view.marked_logs_end))
+
+ # Make sure the log screen was not re-generated.
+ log_view.log_screen.reset_logs.assert_not_called()
+ log_view.log_screen.reset_logs.reset_mock()
+
+ # Render the screen
+ log_view.render_content()
+ log_view.log_screen.reset_logs.assert_called_once()
+ # Check the visual selection was specified
+ log_view.log_screen.get_lines.assert_called_once_with(
+ marked_logs_start=91, marked_logs_end=94)
+ log_view.log_screen.get_lines.reset_mock()
+ log_view.log_screen.reset_logs.reset_mock()
+
if _PYTHON_3_8:
from unittest import IsolatedAsyncioTestCase # type: ignore # pylint: disable=no-name-in-module
@@ -337,17 +448,24 @@ if _PYTHON_3_8:
@parameterized.expand([
(
+ # Test name
'regex filter',
+ # Search input_text
'log.*item',
+ # input_logs
[
('Log some item', dict()),
('Log another item', dict()),
('Some exception', dict()),
],
+ # expected_matched_lines
[
'Log some item',
'Log another item',
],
+ # expected_match_line_numbers
+ {0: 0, 1: 1},
+ # expected_export_text
(
' DEBUG Log some item\n'
' DEBUG Log another item\n'
@@ -356,8 +474,11 @@ if _PYTHON_3_8:
False, # invert
),
(
+ # Test name
'regex filter with field',
+ # Search input_text
'earth',
+ # input_logs
[
('Log some item',
dict(extra_metadata_fields={'planet': 'Jupiter'})),
@@ -366,10 +487,14 @@ if _PYTHON_3_8:
('Some exception',
dict(extra_metadata_fields={'planet': 'Earth'})),
],
+ # expected_matched_lines
[
'Log another item',
'Some exception',
],
+ # expected_match_line_numbers
+ {1: 0, 2: 1},
+ # expected_export_text
(
' DEBUG Earth Log another item\n'
' DEBUG Earth Some exception\n'
@@ -378,8 +503,11 @@ if _PYTHON_3_8:
False, # invert
),
(
+ # Test name
'regex filter with field inverted',
+ # Search input_text
'earth',
+ # input_logs
[
('Log some item',
dict(extra_metadata_fields={'planet': 'Jupiter'})),
@@ -388,9 +516,13 @@ if _PYTHON_3_8:
('Some exception',
dict(extra_metadata_fields={'planet': 'Earth'})),
],
+ # expected_matched_lines
[
'Log some item',
],
+ # expected_match_line_numbers
+ {0: 0},
+ # expected_export_text
(
' DEBUG Jupiter Log some item\n'
),
@@ -400,20 +532,27 @@ if _PYTHON_3_8:
]) # yapf: disable
async def test_log_filtering(
self,
- _name,
+ _test_name,
input_text,
- input_lines,
+ input_logs,
expected_matched_lines,
+ expected_match_line_numbers,
expected_export_text,
field=None,
invert=False,
) -> None:
"""Test run log view filtering."""
- log_view, _log_pane = self._create_log_view_from_list(input_lines)
- self.assertEqual(log_view.get_total_count(), len(input_lines))
+ log_view, _log_pane = self._create_log_view_from_list(input_logs)
+ log_view.render_content()
- # Apply the filter and wait for the background task
+ self.assertEqual(log_view.get_total_count(), len(input_logs))
+ # Apply the search and wait for the match count background task
log_view.new_search(input_text, invert=invert, field=field)
+ await log_view.search_match_count_task
+ self.assertEqual(log_view.search_matched_lines,
+ expected_match_line_numbers)
+
+ # Apply the filter and wait for the filter background task
log_view.apply_filter()
await log_view.filter_existing_logs_task
@@ -447,7 +586,7 @@ if _PYTHON_3_8:
# Clear filters and check the numbe of lines is back to normal.
log_view.clear_filters()
- self.assertEqual(log_view.get_total_count(), len(input_lines))
+ self.assertEqual(log_view.get_total_count(), len(input_logs))
if __name__ == '__main__':
diff --git a/pw_console/py/pw_console/__init__.py b/pw_console/py/pw_console/__init__.py
index d5368985c..86b1519e2 100644
--- a/pw_console/py/pw_console/__init__.py
+++ b/pw_console/py/pw_console/__init__.py
@@ -14,3 +14,4 @@
"""Pigweed interactive console."""
from pw_console.embed import PwConsoleEmbed
+from pw_console.log_store import LogStore
diff --git a/pw_console/py/pw_console/__main__.py b/pw_console/py/pw_console/__main__.py
index b4356a59f..936bd32b5 100644
--- a/pw_console/py/pw_console/__main__.py
+++ b/pw_console/py/pw_console/__main__.py
@@ -24,10 +24,12 @@ import pw_cli.argument_types
import pw_console
import pw_console.python_logging
+from pw_console.log_store import LogStore
from pw_console.plugins.calc_pane import CalcPane
from pw_console.plugins.clock_pane import ClockPane
_LOG = logging.getLogger(__package__)
+_ROOT_LOG = logging.getLogger('')
# TODO(tonymd): Remove this when no downstream projects are using it.
@@ -86,6 +88,10 @@ def main() -> int:
global_vars = None
default_loggers = {}
if args.test_mode:
+ root_log_store = LogStore()
+ _ROOT_LOG.addHandler(root_log_store)
+ _ROOT_LOG.debug('pw_console test-mode starting...')
+
fake_logger = logging.getLogger(
pw_console.console_app.FAKE_DEVICE_LOGGER_NAME)
default_loggers = {
@@ -93,6 +99,7 @@ def main() -> int:
# Add the fake logger for test_mode.
'Fake Device Logs': [fake_logger],
'PwConsole Debug': [logging.getLogger('pw_console')],
+ 'All Logs': root_log_store,
}
# Give access to adding log messages from the repl via: `LOG.warning()`
global_vars = dict(LOG=fake_logger)
@@ -123,8 +130,11 @@ def main() -> int:
# Add example plugins used to validate behavior in the Pigweed Console
# manual test procedure: https://pigweed.dev/pw_console/testing.html
if args.test_mode:
+ _ROOT_LOG.debug('pw_console.PwConsoleEmbed init complete')
+ _ROOT_LOG.debug('Adding plugins...')
console.add_window_plugin(ClockPane())
console.add_window_plugin(CalcPane())
+ _ROOT_LOG.debug('Starting prompt_toolkit full-screen application...')
console.embed()
diff --git a/pw_console/py/pw_console/command_runner.py b/pw_console/py/pw_console/command_runner.py
new file mode 100644
index 000000000..88ff984b9
--- /dev/null
+++ b/pw_console/py/pw_console/command_runner.py
@@ -0,0 +1,515 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""CommandRunner dialog classes."""
+
+from __future__ import annotations
+import functools
+import logging
+import re
+from typing import (
+ Callable,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ TYPE_CHECKING,
+ Tuple,
+)
+
+from prompt_toolkit.buffer import Buffer
+from prompt_toolkit.filters import Condition
+from prompt_toolkit.formatted_text import StyleAndTextTuples
+from prompt_toolkit.formatted_text.utils import fragment_list_to_text
+from prompt_toolkit.layout.utils import explode_text_fragments
+from prompt_toolkit.history import InMemoryHistory
+from prompt_toolkit.key_binding import (
+ KeyBindings,
+ KeyBindingsBase,
+ KeyPressEvent,
+)
+from prompt_toolkit.layout import (
+ AnyContainer,
+ ConditionalContainer,
+ DynamicContainer,
+ FormattedTextControl,
+ HSplit,
+ VSplit,
+ Window,
+ WindowAlign,
+)
+from prompt_toolkit.widgets import MenuItem
+from prompt_toolkit.widgets import TextArea
+
+import pw_console.widgets.border
+import pw_console.widgets.checkbox
+import pw_console.widgets.mouse_handlers
+
+if TYPE_CHECKING:
+ from pw_console.console_app import ConsoleApp
+
+_LOG = logging.getLogger(__package__)
+
+
+def flatten_menu_items(items: List[MenuItem],
+ prefix: str = '') -> Iterator[Tuple[str, Callable]]:
+ """Flatten nested prompt_toolkit MenuItems into text and callable tuples."""
+ for item in items:
+ new_text = []
+ if prefix:
+ new_text.append(prefix)
+ new_text.append(item.text)
+ new_prefix = ' > '.join(new_text)
+
+ if item.children:
+ yield from flatten_menu_items(item.children, new_prefix)
+ elif item.handler:
+ # Skip this item if it's a separator or disabled.
+ if item.text == '-' or item.disabled:
+ continue
+ yield (new_prefix, item.handler)
+
+
+def highlight_matches(
+ regexes: Iterable[re.Pattern],
+ line_fragments: StyleAndTextTuples) -> StyleAndTextTuples:
+ """Highlight regex matches in prompt_toolkit FormattedTextTuples."""
+ line_text = fragment_list_to_text(line_fragments)
+ exploded_fragments = explode_text_fragments(line_fragments)
+
+ def apply_highlighting(fragments: StyleAndTextTuples,
+ index: int,
+ matching_regex_index: int = 0) -> None:
+ # Expand all fragments and apply the highlighting style.
+ old_style, _text, *_ = fragments[index]
+ # There are 6 fuzzy-highlight styles defined in style.py. Get an index
+ # from 0-5 to use one style after the other in turn.
+ style_index = matching_regex_index % 6
+ fragments[index] = (
+ old_style +
+ f' class:command-runner-fuzzy-highlight-{style_index} ',
+ fragments[index][1],
+ )
+
+ # Highlight each non-overlapping search match.
+ for regex_i, regex in enumerate(regexes):
+ for match in regex.finditer(line_text):
+ for fragment_i in range(match.start(), match.end()):
+ apply_highlighting(exploded_fragments, fragment_i, regex_i)
+
+ return exploded_fragments
+
+
+class CommandRunner:
+ """CommandRunner dialog box."""
+
+ # pylint: disable=too-many-instance-attributes
+
+ def __init__(
+ self,
+ application: ConsoleApp,
+ window_title: str = None,
+ load_completions: Optional[Callable[[],
+ List[Tuple[str,
+ Callable]]]] = None,
+ width: int = 80,
+ height: int = 10):
+ # Parent pw_console application
+ self.application = application
+ # Visibility toggle
+ self.show_dialog = False
+ # Tracks the last focused container, to enable restoring focus after
+ # closing the dialog.
+ self.last_focused_pane = None
+
+ # List of all possible completion items
+ self.completions: List[Tuple[str, Callable]] = []
+ # Formatted text fragments of matched items
+ self.completion_fragments: List[StyleAndTextTuples] = []
+
+ # Current selected item tracking variables
+ self.selected_item: int = 0
+ self.selected_item_text: str = ''
+ self.selected_item_handler: Optional[Callable] = None
+ # Previous input text
+ self.last_input_field_text: str = 'EMPTY'
+ # Previous selected item
+ self.last_selected_item: int = 0
+
+ # Dialog width, height and title
+ self.width = width
+ self.height = height
+ self.window_title: str
+
+ # Callable to fetch completion items
+ self.load_completions: Callable[[], List[Tuple[str, Callable]]]
+
+ # Command runner text input field
+ self.input_field = TextArea(
+ prompt=[
+ ('class:command-runner-setting', '> ',
+ functools.partial(pw_console.widgets.mouse_handlers.on_click,
+ self.focus_self))
+ ],
+ focusable=True,
+ focus_on_click=True,
+ scrollbar=False,
+ multiline=False,
+ height=1,
+ dont_extend_height=True,
+ dont_extend_width=False,
+ accept_handler=self._command_accept_handler,
+ history=InMemoryHistory(),
+ )
+ # Set additional keybindings for the input field
+ self.input_field.control.key_bindings = self._create_key_bindings()
+
+ # Container for the Cancel and Run buttons
+ input_field_buttons_container = ConditionalContainer(
+ Window(
+ content=FormattedTextControl(
+ self._get_input_field_button_fragments,
+ focusable=False,
+ show_cursor=False,
+ ),
+ height=1,
+ align=WindowAlign.RIGHT,
+ dont_extend_width=True,
+ ),
+ filter=Condition(lambda: self.content_width() > 40),
+ )
+
+ # Container for completion matches
+ command_items_window = Window(
+ content=FormattedTextControl(
+ self.render_completion_items,
+ show_cursor=False,
+ focusable=False,
+ ),
+ align=WindowAlign.LEFT,
+ dont_extend_width=False,
+ height=self.height,
+ )
+
+ # Main content HSplit
+ self.command_runner_content = HSplit(
+ [
+ # Input field and buttons on the same line
+ VSplit([
+ self.input_field,
+ input_field_buttons_container,
+ ]),
+ # Completion items below
+ command_items_window,
+ ],
+ style='class:command-runner class:theme-fg-default',
+ )
+
+ # Set completions if passed in.
+ self.set_completions(window_title, load_completions)
+
+ # bordered_content wraps the above command_runner_content in a border.
+ self.bordered_content: AnyContainer
+ # Root prompt_toolkit container
+ self.container = ConditionalContainer(
+ DynamicContainer(lambda: self.bordered_content),
+ filter=Condition(lambda: self.show_dialog),
+ )
+
+ def _create_bordered_content(self) -> None:
+ """Wrap self.command_runner_content in a border."""
+ # This should be called whenever the window_title changes.
+ self.bordered_content = pw_console.widgets.border.create_border(
+ self.command_runner_content,
+ title=self.window_title,
+ border_style='class:command-runner-border',
+ left_margin_columns=1,
+ right_margin_columns=1,
+ )
+
+ def __pt_container__(self) -> AnyContainer:
+ """Return the prompt_toolkit root container for this dialog."""
+ return self.container
+
+ def _create_key_bindings(self) -> KeyBindingsBase:
+ """Create additional key bindings for the command input field."""
+ key_bindings = KeyBindings()
+ register = self.application.prefs.register_keybinding
+
+ @register('command-runner.cancel', key_bindings)
+ def _cancel(_event: KeyPressEvent) -> None:
+ """Clear input or close command."""
+ if self._get_input_field_text() != '':
+ self._reset_selected_item()
+ return
+
+ self.close_dialog()
+
+ @register('command-runner.select-previous-item', key_bindings)
+ def _select_previous_item(_event: KeyPressEvent) -> None:
+ """Select previous completion item."""
+ self._previous_item()
+
+ @register('command-runner.select-next-item', key_bindings)
+ def _select_next_item(_event: KeyPressEvent) -> None:
+ """Select next completion item."""
+ self._next_item()
+
+ return key_bindings
+
+ def content_width(self) -> int:
+ """Return the smaller value of self.width and the available width."""
+ window_manager_width = (
+ self.application.window_manager.current_window_manager_width)
+ if not window_manager_width:
+ window_manager_width = self.width
+ return min(self.width, window_manager_width)
+
+ def focus_self(self) -> None:
+ self.application.layout.focus(self)
+
+ def close_dialog(self) -> None:
+ """Close command runner dialog box."""
+ self.show_dialog = False
+ self._reset_selected_item()
+
+ # Restore original focus if possible.
+ if self.last_focused_pane:
+ self.application.focus_on_container(self.last_focused_pane)
+ else:
+ # Fallback to focusing on the main menu.
+ self.application.focus_main_menu()
+
+ def open_dialog(self) -> None:
+ self.show_dialog = True
+ self.last_focused_pane = self.application.focused_window()
+ self.focus_self()
+ self.application.redraw_ui()
+
+ def set_completions(
+ self,
+ window_title: str = None,
+ load_completions: Optional[Callable[[], List[Tuple[str,
+ Callable]]]] = None,
+ ) -> None:
+ """Set window title and callable to fetch possible completions.
+
+ Call this function whenever new completion items need to be loaded.
+ """
+ self.window_title = window_title if window_title else 'Menu Items'
+ self.load_completions = (load_completions
+ if load_completions else self.load_menu_items)
+ self._reset_selected_item()
+
+ self.completions = []
+ self.completion_fragments = []
+
+ # Load and filter completions
+ self.filter_completions()
+
+ # (Re)create the bordered content with the window_title set.
+ self._create_bordered_content()
+
+ def reload_completions(self) -> None:
+ self.completions = self.load_completions()
+
+ def load_menu_items(self) -> List[Tuple[str, Callable]]:
+ # pylint: disable=no-self-use
+ return list(flatten_menu_items(self.application.menu_items))
+
+ def _get_input_field_text(self) -> str:
+ return self.input_field.buffer.text
+
+ def _make_regexes(self, input_text) -> List[re.Pattern]:
+ # pylint: disable=no-self-use
+ regexes: List[re.Pattern] = []
+ if not input_text:
+ return regexes
+
+ text_tokens = input_text.split(' ')
+ if len(text_tokens) > 0:
+ regexes = [
+ re.compile(re.escape(text), re.IGNORECASE)
+ for text in text_tokens
+ ]
+
+ return regexes
+
+ def _matches_orderless(self, regexes: List[re.Pattern], text) -> bool:
+ """Check if all supplied regexs match the input text."""
+ # pylint: disable=no-self-use
+ return all(regex.search(text) for regex in regexes)
+
+ def filter_completions(self) -> None:
+ """Filter completion items if new user input detected."""
+ if not self.input_text_changed() and not self.selected_item_changed():
+ return
+
+ self.reload_completions()
+
+ input_text = self._get_input_field_text()
+ self.completion_fragments = []
+
+ regexes = self._make_regexes(input_text)
+ check_match = self._matches_orderless
+
+ i = 0
+ for text, handler in self.completions:
+ if not (input_text == '' or check_match(regexes, text)):
+ continue
+ style = ''
+ if i == self.selected_item:
+ style = 'class:command-runner-selected-item'
+ self.selected_item_text = text
+ self.selected_item_handler = handler
+ text = text.ljust(self.content_width())
+ fragments: StyleAndTextTuples = highlight_matches(
+ regexes, [(style, text + '\n')])
+ self.completion_fragments.append(fragments)
+ i += 1
+
+ def input_text_changed(self) -> bool:
+ """Return True if text in the input field has changed."""
+ input_text = self._get_input_field_text()
+ if input_text != self.last_input_field_text:
+ self.last_input_field_text = input_text
+ self.selected_item = 0
+ return True
+ return False
+
+ def selected_item_changed(self) -> bool:
+ """Check if the user pressed up or down to select a different item."""
+ return self.last_selected_item != self.selected_item
+
+ def _next_item(self) -> None:
+ self.last_selected_item = self.selected_item
+ self.selected_item = min(
+ # Don't move past the height of the window or the length of possible
+ # items.
+ min(self.height, len(self.completion_fragments)) - 1,
+ self.selected_item + 1)
+ self.application.redraw_ui()
+
+ def _previous_item(self) -> None:
+ self.last_selected_item = self.selected_item
+ self.selected_item = max(0, self.selected_item - 1)
+ self.application.redraw_ui()
+
+ def _get_input_field_button_fragments(self) -> StyleAndTextTuples:
+ # Mouse handlers
+ focus = functools.partial(pw_console.widgets.mouse_handlers.on_click,
+ self.focus_self)
+ cancel = functools.partial(pw_console.widgets.mouse_handlers.on_click,
+ self.close_dialog)
+ select_item = functools.partial(
+ pw_console.widgets.mouse_handlers.on_click,
+ self._run_selected_item)
+
+ separator_text = ('', ' ', focus)
+
+ # Default button style
+ button_style = 'class:toolbar-button-inactive'
+
+ fragments: StyleAndTextTuples = []
+
+ # Cancel button
+ fragments.extend(
+ pw_console.widgets.checkbox.to_keybind_indicator(
+ key='Ctrl-c',
+ description='Cancel',
+ mouse_handler=cancel,
+ base_style=button_style,
+ ))
+ fragments.append(separator_text)
+
+ # Run button
+ fragments.extend(
+ pw_console.widgets.checkbox.to_keybind_indicator(
+ 'Enter', 'Run', select_item, base_style=button_style))
+ return fragments
+
+ def render_completion_items(self) -> StyleAndTextTuples:
+ """Render completion items."""
+ fragments: StyleAndTextTuples = []
+
+ # Update completions if any state change since the last render (new text
+ # entered or arrow keys pressed).
+ self.filter_completions()
+
+ for completion_item in self.completion_fragments:
+ fragments.extend(completion_item)
+
+ return fragments
+
+ def _reset_selected_item(self) -> None:
+ self.selected_item = 0
+ self.last_selected_item = 0
+ self.selected_item_text = ''
+ self.selected_item_handler = None
+ self.last_input_field_text = 'EMPTY'
+ self.input_field.buffer.reset()
+
+ def _run_selected_item(self) -> None:
+ """Run the selected action."""
+ if not self.selected_item_handler:
+ return
+ # Save the selected item handler. This is reset by self.close_dialog()
+ handler = self.selected_item_handler
+
+ # Depending on what action is run, the command runner dialog may need to
+ # be closed, left open, or closed before running the selected action.
+ close_dialog = True
+ close_dialog_first = False
+
+ # Actions that launch new command runners, close_dialog should not run.
+ for command_text in [
+ '[File] > Open Logger',
+ ]:
+ if command_text in self.selected_item_text:
+ close_dialog = False
+ break
+
+ # Actions that change what is in focus should be run after closing the
+ # command runner dialog.
+ for command_text in [
+ '[View] > Focus Next Window/Tab',
+ '[View] > Focus Prev Window/Tab',
+ # All help menu entries open popup windows.
+ '[Help] > ',
+ # This focuses on a save dialog bor.
+ 'Save/Export a copy',
+ ]:
+ if command_text in self.selected_item_text:
+ close_dialog_first = True
+ break
+
+ # Close first if needed
+ if close_dialog and close_dialog_first:
+ self.close_dialog()
+
+ # Run the selected item handler
+ handler()
+
+ # If not already closed earlier.
+ if close_dialog and not close_dialog_first:
+ self.close_dialog()
+
+ def _command_accept_handler(self, _buff: Buffer) -> bool:
+ """Function run when pressing Enter in the command runner input box."""
+ # If at least one match is available
+ if len(self.completion_fragments) > 0:
+ self._run_selected_item()
+ # Erase input text
+ return False
+ # Keep input text
+ return True
diff --git a/pw_console/py/pw_console/console_app.py b/pw_console/py/pw_console/console_app.py
index 5bb5fceee..560d94672 100644
--- a/pw_console/py/pw_console/console_app.py
+++ b/pw_console/py/pw_console/console_app.py
@@ -21,7 +21,7 @@ import os
from pathlib import Path
import sys
from threading import Thread
-from typing import Callable, Iterable, Optional, Union
+from typing import Any, Callable, Iterable, List, Optional, Tuple, Union
from jinja2 import Environment, FileSystemLoader, make_logging_undefined
from prompt_toolkit.clipboard.pyperclip import PyperclipClipboard
@@ -56,8 +56,10 @@ from ptpython.key_bindings import ( # type: ignore
from pw_console.console_prefs import ConsolePrefs
from pw_console.help_window import HelpWindow
+from pw_console.command_runner import CommandRunner
import pw_console.key_bindings
from pw_console.log_pane import LogPane
+from pw_console.log_store import LogStore
from pw_console.pw_ptpython_repl import PwPtPythonRepl
from pw_console.python_logging import all_loggers
from pw_console.quit_dialog import QuitDialog
@@ -75,6 +77,9 @@ _FAKE_DEVICE_LOG = logging.getLogger(FAKE_DEVICE_LOGGER_NAME)
# Don't send fake_device logs to the root Python logger.
_FAKE_DEVICE_LOG.propagate = False
+MAX_FPS = 15
+MIN_REDRAW_INTERVAL = (60.0 / MAX_FPS) / 60.0
+
class FloatingMessageBar(ConditionalContainer):
"""Floating message bar for showing status messages."""
@@ -119,7 +124,7 @@ def get_default_colordepth(
class ConsoleApp:
"""The main ConsoleApp class that glues everything together."""
- # pylint: disable=too-many-instance-attributes
+ # pylint: disable=too-many-instance-attributes,too-many-public-methods
def __init__(
self,
global_vars=None,
@@ -129,8 +134,9 @@ class ConsoleApp:
app_title=None,
color_depth=None,
extra_completers=None,
+ prefs=None,
):
- self.prefs = ConsolePrefs()
+ self.prefs = prefs if prefs else ConsolePrefs()
self.color_depth = get_default_colordepth(color_depth)
# Create a default global and local symbol table. Values are the same
@@ -182,10 +188,10 @@ class ConsoleApp:
self.message.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
- 'F1',
- 'Help',
+ 'Ctrl-p',
+ 'Search Menu',
functools.partial(pw_console.widgets.mouse_handlers.on_click,
- self.user_guide_window.toggle_display),
+ self.open_command_runner_main_menu),
base_style='class:toolbar-button-inactive',
))
# One space separator
@@ -201,6 +207,13 @@ class ConsoleApp:
title=(self.app_title + ' Help'))
self.app_help_window.generate_help_text()
+ self.prefs_file_window = HelpWindow(self, title='.pw_console.yaml')
+ self.prefs_file_window.load_yaml_text(
+ self.prefs.current_config_as_yaml())
+
+ # Used for tracking which pane was in focus before showing help window.
+ self.last_focused_pane = None
+
# Create a ptpython repl instance.
self.pw_ptpython_repl = PwPtPythonRepl(
get_globals=lambda: global_vars,
@@ -236,63 +249,84 @@ class ConsoleApp:
# Create help window text based global key_bindings and active panes.
self._update_help_window()
+ self.command_runner = CommandRunner(
+ self,
+ width=self.prefs.command_runner_width,
+ height=self.prefs.command_runner_height,
+ )
+
+ self.floats = [
+ # Top message bar
+ Float(
+ content=FloatingMessageBar(self),
+ top=0,
+ right=0,
+ height=1,
+ ),
+ # Centered floating help windows
+ Float(
+ content=self.prefs_file_window,
+ top=2,
+ bottom=2,
+ # Callable to get width
+ width=self.prefs_file_window.content_width,
+ ),
+ Float(
+ content=self.app_help_window,
+ top=2,
+ bottom=2,
+ # Callable to get width
+ width=self.app_help_window.content_width,
+ ),
+ Float(
+ content=self.user_guide_window,
+ top=2,
+ bottom=2,
+ # Callable to get width
+ width=self.user_guide_window.content_width,
+ ),
+ Float(
+ content=self.keybind_help_window,
+ top=2,
+ bottom=2,
+ # Callable to get width
+ width=self.keybind_help_window.content_width,
+ ),
+ # Completion menu that can overlap other panes since it lives in
+ # the top level Float container.
+ Float(
+ xcursor=True,
+ ycursor=True,
+ content=ConditionalContainer(
+ content=CompletionsMenu(
+ scroll_offset=(lambda: self.pw_ptpython_repl.
+ completion_menu_scroll_offset),
+ max_height=16,
+ ),
+ # Only show our completion if ptpython's is disabled.
+ filter=Condition(
+ lambda: self.pw_ptpython_repl.completion_visualisation
+ == CompletionVisualisation.NONE),
+ ),
+ ),
+ Float(
+ content=self.command_runner,
+ # Callable to get width
+ width=self.command_runner.content_width,
+ **self.prefs.command_runner_position,
+ ),
+ Float(
+ content=self.quit_dialog,
+ top=2,
+ left=2,
+ ),
+ ]
+
# prompt_toolkit root container.
self.root_container = MenuContainer(
body=self.window_manager.create_root_container(),
menu_items=self.menu_items,
- floats=[
- # Top message bar
- Float(
- content=FloatingMessageBar(self),
- top=0,
- right=0,
- height=1,
- ),
- # Centered floating help windows
- Float(
- content=self.app_help_window,
- top=2,
- bottom=2,
- # Callable to get width
- width=self.app_help_window.content_width,
- ),
- Float(
- content=self.user_guide_window,
- top=2,
- bottom=2,
- # Callable to get width
- width=self.user_guide_window.content_width,
- ),
- Float(
- content=self.keybind_help_window,
- top=2,
- bottom=2,
- # Callable to get width
- width=self.keybind_help_window.content_width,
- ),
- Float(
- content=self.quit_dialog,
- top=2,
- left=2,
- ),
- # Completion menu that can overlap other panes since it lives in
- # the top level Float container.
- Float(
- xcursor=True,
- ycursor=True,
- content=ConditionalContainer(
- content=CompletionsMenu(
- scroll_offset=(lambda: self.pw_ptpython_repl.
- completion_menu_scroll_offset),
- max_height=16,
- ),
- # Only show our completion if ptpython's is disabled.
- filter=Condition(lambda: self.pw_ptpython_repl.
- completion_visualisation ==
- CompletionVisualisation.NONE),
- ),
- ),
- ],
+ floats=self.floats,
)
# NOTE: ptpython stores it's completion menus in this HSplit:
@@ -319,7 +353,6 @@ class ConsoleApp:
# Create the prompt_toolkit Application instance.
self.application: Application = Application(
layout=self.layout,
- after_render=self.run_after_render_hooks,
key_bindings=merge_key_bindings([
# Pull key bindings from ptpython
load_python_bindings(self.pw_ptpython_repl),
@@ -338,6 +371,7 @@ class ConsoleApp:
mouse_support=True,
color_depth=self.color_depth,
clipboard=PyperclipClipboard(),
+ min_redraw_interval=MIN_REDRAW_INTERVAL,
)
def get_template(self, file_name: str):
@@ -387,24 +421,37 @@ class ConsoleApp:
self.menu_items = self._create_menu_items()
self.root_container.menu_items = self.menu_items
- def _create_logger_submenu(self):
- submenu = [
- MenuItem(
+ def open_command_runner_main_menu(self) -> None:
+ self.command_runner.set_completions()
+ if not self.command_runner_is_open():
+ self.command_runner.open_dialog()
+
+ def open_command_runner_loggers(self) -> None:
+ self.command_runner.set_completions(
+ window_title='Open Logger',
+ load_completions=self._create_logger_completions)
+ if not self.command_runner_is_open():
+ self.command_runner.open_dialog()
+
+ def _create_logger_completions(self) -> List[Tuple[str, Callable]]:
+ completions: List[Tuple[str, Callable]] = [
+ (
'root',
- handler=functools.partial(self.open_new_log_pane_for_logger,
- '',
- window_title='root'),
- )
+ functools.partial(self.open_new_log_pane_for_logger,
+ '',
+ window_title='root'),
+ ),
]
+
all_logger_names = sorted([logger.name for logger in all_loggers()])
+
for logger_name in all_logger_names:
- submenu.append(
- MenuItem(
- logger_name,
- handler=functools.partial(
- self.open_new_log_pane_for_logger, logger_name),
- ))
- return submenu
+ completions.append((
+ logger_name,
+ functools.partial(self.open_new_log_pane_for_logger,
+ logger_name),
+ ))
+ return completions
def _create_menu_items(self):
themes_submenu = [
@@ -454,7 +501,7 @@ class ConsoleApp:
'[File]',
children=[
MenuItem('Open Logger',
- children=self._create_logger_submenu()),
+ handler=self.open_command_runner_loggers),
MenuItem(
'Log Table View',
children=[
@@ -471,6 +518,16 @@ class ConsoleApp:
'hide_date_from_log_time')),
),
MenuItem(
+ '{check} Show Source File'.format(
+ check=pw_console.widgets.checkbox.
+ to_checkbox_text(
+ self.prefs.show_source_file, end='')),
+ handler=functools.partial(
+ self.run_pane_menu_option,
+ functools.partial(self.toggle_pref_option,
+ 'show_source_file')),
+ ),
+ MenuItem(
'{check} Show Python File'.format(
check=pw_console.widgets.checkbox.
to_checkbox_text(
@@ -510,6 +567,11 @@ class ConsoleApp:
MenuItem('Paste to Python Input',
handler=self.repl_pane.
paste_system_clipboard_to_input_buffer),
+ MenuItem('-'),
+ MenuItem('Copy all Python Output',
+ handler=self.repl_pane.copy_all_output_text),
+ MenuItem('Copy all Python Input',
+ handler=self.repl_pane.copy_all_input_text),
],
),
]
@@ -588,6 +650,9 @@ class ConsoleApp:
handler=self.user_guide_window.toggle_display),
MenuItem(self.keybind_help_window.menu_title(),
handler=self.keybind_help_window.toggle_display),
+ MenuItem('-'),
+ MenuItem('View Key Binding Config',
+ handler=self.prefs_file_window.toggle_display),
]
if self.app_help_text:
@@ -613,7 +678,13 @@ class ConsoleApp:
def focus_on_container(self, pane):
"""Set application focus to a specific container."""
- self.application.layout.focus(pane)
+ # Try to focus on the given pane
+ try:
+ self.application.layout.focus(pane)
+ except ValueError:
+ # If the container can't be focused, focus on the first visible
+ # window pane.
+ self.window_manager.focus_first_visible_pane()
def toggle_light_theme(self):
"""Toggle light and dark theme colors."""
@@ -632,15 +703,19 @@ class ConsoleApp:
if theme_name:
self.prefs.set_ui_theme(theme_name)
- def _create_log_pane(self, title=None) -> 'LogPane':
+ def _create_log_pane(self,
+ title: str = '',
+ log_store: Optional[LogStore] = None) -> 'LogPane':
# Create one log pane.
- log_pane = LogPane(application=self, pane_title=title)
+ log_pane = LogPane(application=self,
+ pane_title=title,
+ log_store=log_store)
self.window_manager.add_pane(log_pane)
return log_pane
def load_clean_config(self, config_file: Path) -> None:
self.prefs.reset_config()
- self.prefs.load_config(config_file)
+ self.prefs.load_config_file(config_file)
def apply_window_config(self) -> None:
self.window_manager.apply_config(self.prefs)
@@ -653,7 +728,7 @@ class ConsoleApp:
def add_log_handler(
self,
window_title: str,
- logger_instances: Iterable[logging.Logger],
+ logger_instances: Union[Iterable[logging.Logger], LogStore],
separate_log_panes: bool = False,
log_level_name: Optional[str] = None) -> Optional[LogPane]:
"""Add the Log pane as a handler for this logger instance."""
@@ -665,11 +740,18 @@ class ConsoleApp:
existing_log_pane = pane
break
+ log_store: Optional[LogStore] = None
+ if isinstance(logger_instances, LogStore):
+ log_store = logger_instances
+
if not existing_log_pane or separate_log_panes:
- existing_log_pane = self._create_log_pane(title=window_title)
+ existing_log_pane = self._create_log_pane(title=window_title,
+ log_store=log_store)
- for logger in logger_instances:
- _add_log_handler_to_pane(logger, existing_log_pane, log_level_name)
+ if isinstance(logger_instances, list):
+ for logger in logger_instances:
+ _add_log_handler_to_pane(logger, existing_log_pane,
+ log_level_name)
self.refresh_layout()
return existing_log_pane
@@ -679,12 +761,6 @@ class ConsoleApp:
asyncio.set_event_loop(self.user_code_loop)
self.user_code_loop.run_forever()
- def run_after_render_hooks(self, *unused_args, **unused_kwargs):
- """Run each active pane's `after_render_hook` if defined."""
- for pane in self.window_manager.active_panes():
- if hasattr(pane, 'after_render_hook'):
- pane.after_render_hook()
-
def start_user_code_thread(self):
"""Create a thread for running user code so the UI isn't blocked."""
thread = Thread(target=self._user_code_thread_entry,
@@ -733,16 +809,26 @@ class ConsoleApp:
"""Return the currently focused window."""
return self.application.layout.current_window
+ def command_runner_is_open(self) -> bool:
+ return self.command_runner.show_dialog
+
+ def command_runner_last_focused_pane(self) -> Any:
+ return self.command_runner.last_focused_pane
+
def modal_window_is_open(self):
"""Return true if any modal window or dialog is open."""
if self.app_help_text:
return (self.app_help_window.show_window
or self.keybind_help_window.show_window
+ or self.prefs_file_window.show_window
or self.user_guide_window.show_window
- or self.quit_dialog.show_dialog)
+ or self.quit_dialog.show_dialog
+ or self.command_runner.show_dialog)
return (self.keybind_help_window.show_window
+ or self.prefs_file_window.show_window
or self.user_guide_window.show_window
- or self.quit_dialog.show_dialog)
+ or self.quit_dialog.show_dialog
+ or self.command_runner.show_dialog)
def exit_console(self):
"""Quit the console prompt_toolkit application UI."""
@@ -760,22 +846,17 @@ class ConsoleApp:
if test_mode:
background_log_task = asyncio.create_task(self.log_forever())
- background_menu_updater_task = asyncio.create_task(
- self.background_menu_updater())
+ # Repl pane has focus by default, if it's hidden switch focus to another
+ # visible pane.
+ if not self.repl_pane.show_pane:
+ self.window_manager.focus_first_visible_pane()
+
try:
unused_result = await self.application.run_async(
set_exception_handler=True)
finally:
if test_mode:
background_log_task.cancel()
- background_menu_updater_task.cancel()
-
- async def background_menu_updater(self):
- """Periodically update main menu items to capture new logger names."""
- while True:
- await asyncio.sleep(30)
- _LOG.debug('Update main menu items')
- self.update_menu_items()
async def log_forever(self):
"""Test mode async log generator coroutine that runs forever."""
@@ -810,9 +891,9 @@ class ConsoleApp:
' return "t({}) seconds done".format(s)\n\n')
module_name = module_names[message_count % len(module_names)]
- _FAKE_DEVICE_LOG.info(
- new_log_line,
- extra=dict(extra_metadata_fields=dict(module=module_name)))
+ _FAKE_DEVICE_LOG.info(new_log_line,
+ extra=dict(extra_metadata_fields=dict(
+ module=module_name, file='fake_app.cc')))
message_count += 1
diff --git a/pw_console/py/pw_console/console_prefs.py b/pw_console/py/pw_console/console_prefs.py
index 1a79ad7ec..1fda908f5 100644
--- a/pw_console/py/pw_console/console_prefs.py
+++ b/pw_console/py/pw_console/console_prefs.py
@@ -13,45 +13,62 @@
# the License.
"""pw_console preferences"""
-from dataclasses import dataclass, field
import os
from pathlib import Path
-from typing import Any, Dict, List, Union
+from typing import Dict, Callable, List, Union
+from prompt_toolkit.key_binding import KeyBindings
import yaml
from pw_console.style import get_theme_colors
+from pw_console.key_bindings import DEFAULT_KEY_BINDINGS
+from pw_console.yaml_config_loader_mixin import YamlConfigLoaderMixin
_DEFAULT_REPL_HISTORY: Path = Path.home() / '.pw_console_history'
_DEFAULT_SEARCH_HISTORY: Path = Path.home() / '.pw_console_search'
_DEFAULT_CONFIG = {
- 'pw_console': {
- # History files
- 'repl_history': _DEFAULT_REPL_HISTORY,
- 'search_history': _DEFAULT_SEARCH_HISTORY,
- # Appearance
- 'ui_theme': 'dark',
- 'code_theme': 'pigweed-code',
- 'swap_light_and_dark': False,
- 'spaces_between_columns': 2,
- 'column_order_omit_unspecified_columns': False,
- 'column_order': [],
- 'column_colors': {},
- 'show_python_file': False,
- 'show_python_logger': False,
- 'hide_date_from_log_time': False,
- # Window arrangement
- 'windows': {},
- 'window_column_split_method': 'vertical',
+ # History files
+ 'repl_history': _DEFAULT_REPL_HISTORY,
+ 'search_history': _DEFAULT_SEARCH_HISTORY,
+ # Appearance
+ 'ui_theme': 'dark',
+ 'code_theme': 'pigweed-code',
+ 'swap_light_and_dark': False,
+ 'spaces_between_columns': 2,
+ 'column_order_omit_unspecified_columns': False,
+ 'column_order': [],
+ 'column_colors': {},
+ 'show_python_file': False,
+ 'show_python_logger': False,
+ 'show_source_file': False,
+ 'hide_date_from_log_time': False,
+ # Window arrangement
+ 'windows': {},
+ 'window_column_split_method': 'vertical',
+ 'command_runner': {
+ 'width': 80,
+ 'height': 10,
+ 'position': {
+ 'top': 3
+ },
},
+ 'key_bindings': DEFAULT_KEY_BINDINGS,
}
+_DEFAULT_PROJECT_FILE = Path('$PW_PROJECT_ROOT/.pw_console.yaml')
+_DEFAULT_PROJECT_USER_FILE = Path('$PW_PROJECT_ROOT/.pw_console.user.yaml')
+_DEFAULT_USER_FILE = Path('$HOME/.pw_console.yaml')
+
class UnknownWindowTitle(Exception):
"""Exception for window titles not present in the window manager layout."""
+class EmptyWindowList(Exception):
+ """Exception for window lists with no content."""
+
+
def error_unknown_window(window_title: str,
existing_pane_titles: List[str]) -> None:
"""Raise an error when the window config has an unknown title.
@@ -74,52 +91,38 @@ def error_unknown_window(window_title: str,
'https://pigweed.dev/pw_console/docs/user_guide.html#example-config')
-@dataclass
-class ConsolePrefs:
+def error_empty_window_list(window_list_title: str, ) -> None:
+ """Raise an error if a window list is empty."""
+
+ raise EmptyWindowList(
+ f'\n\nError: The window layout heading "{window_list_title}" contains '
+ 'no windows.\n'
+ 'See also: '
+ 'https://pigweed.dev/pw_console/docs/user_guide.html#example-config')
+
+
+class ConsolePrefs(YamlConfigLoaderMixin):
"""Pigweed Console preferences storage class."""
- project_file: Union[Path, bool] = Path('$PW_PROJECT_ROOT/.pw_console.yaml')
- user_file: Union[Path, bool] = Path('$HOME/.pw_console.yaml')
- _config: Dict[Any, Any] = field(default_factory=dict)
-
- def __post_init__(self) -> None:
- self._update_config(_DEFAULT_CONFIG)
-
- if self.project_file:
- assert isinstance(self.project_file, Path)
- self.project_file = Path(
- os.path.expandvars(str(self.project_file.expanduser())))
- self.load_config(self.project_file)
-
- if self.user_file:
- assert isinstance(self.user_file, Path)
- self.user_file = Path(
- os.path.expandvars(str(self.user_file.expanduser())))
- self.load_config(self.user_file)
-
- # Check for a config file specified by an environment variable.
- environment_config = os.environ.get('PW_CONSOLE_CONFIG_FILE', None)
- if environment_config:
- env_file_path = Path(environment_config)
- if not env_file_path.is_file():
- raise FileNotFoundError(
- f'Cannot load config file: {env_file_path}')
- self.reset_config()
- self.load_config(env_file_path)
-
- def _update_config(self, cfg: Dict[Any, Any]) -> None:
- assert 'pw_console' in cfg
- self._config.update(cfg.get('pw_console', {}))
-
- def reset_config(self) -> None:
- self._config = {}
- self._update_config(_DEFAULT_CONFIG)
-
- def load_config(self, file_path: Path) -> None:
- if not file_path.is_file():
- return
- cfg = yaml.load(file_path.read_text(), Loader=yaml.Loader)
- self._update_config(cfg)
+ # pylint: disable=too-many-public-methods
+
+ def __init__(
+ self,
+ project_file: Union[Path, bool] = _DEFAULT_PROJECT_FILE,
+ project_user_file: Union[Path, bool] = _DEFAULT_PROJECT_USER_FILE,
+ user_file: Union[Path, bool] = _DEFAULT_USER_FILE,
+ ) -> None:
+ self.config_init(
+ config_section_title='pw_console',
+ project_file=project_file,
+ project_user_file=project_user_file,
+ user_file=user_file,
+ default_config=_DEFAULT_CONFIG,
+ environment_var='PW_CONSOLE_CONFIG_FILE',
+ )
+
+ self.registered_commands = DEFAULT_KEY_BINDINGS
+ self.registered_commands.update(self.user_key_bindings)
@property
def ui_theme(self) -> str:
@@ -171,6 +174,10 @@ class ConsolePrefs:
return self._config.get('show_python_file', False)
@property
+ def show_source_file(self) -> bool:
+ return self._config.get('show_source_file', False)
+
+ @property
def show_python_logger(self) -> bool:
return self._config.get('show_python_logger', False)
@@ -214,12 +221,74 @@ class ConsolePrefs:
return list(column_type for column_type in self.windows.keys())
@property
+ def command_runner_position(self) -> Dict[str, int]:
+ position = self._config.get('command_runner',
+ {}).get('position', {'top': 3})
+ return {
+ key: value
+ for key, value in position.items()
+ if key in ['top', 'bottom', 'left', 'right']
+ }
+
+ @property
+ def command_runner_width(self) -> int:
+ return self._config.get('command_runner', {}).get('width', 80)
+
+ @property
+ def command_runner_height(self) -> int:
+ return self._config.get('command_runner', {}).get('height', 10)
+
+ @property
+ def user_key_bindings(self) -> Dict[str, List[str]]:
+ return self._config.get('key_bindings', {})
+
+ def current_config_as_yaml(self) -> str:
+ yaml_options = dict(sort_keys=True,
+ default_style='',
+ default_flow_style=False)
+
+ title = {'config_title': 'pw_console'}
+ text = '\n'
+ text += yaml.safe_dump(title, **yaml_options) # type: ignore
+
+ keys = {'key_bindings': self.registered_commands}
+ text += '\n'
+ text += yaml.safe_dump(keys, **yaml_options) # type: ignore
+
+ return text
+
+ @property
def unique_window_titles(self) -> set:
titles = []
- for column in self.windows.values():
+ for window_list_title, column in self.windows.items():
+ if not column:
+ error_empty_window_list(window_list_title)
+
for window_key_title, window_dict in column.items():
window_options = window_dict if window_dict else {}
# Use 'duplicate_of: Title' if it exists, otherwise use the key.
titles.append(
window_options.get('duplicate_of', window_key_title))
return set(titles)
+
+ def get_function_keys(self, name: str) -> List:
+ """Return the keys for the named function."""
+ try:
+ return self.registered_commands[name]
+ except KeyError as error:
+ raise KeyError('Unbound key function: {}'.format(name)) from error
+
+ def register_named_key_function(self, name: str,
+ default_bindings: List[str]) -> None:
+ self.registered_commands[name] = default_bindings
+
+ def register_keybinding(self, name: str, key_bindings: KeyBindings,
+ **kwargs) -> Callable:
+ """Apply registered keys for the given named function."""
+ def decorator(handler: Callable) -> Callable:
+ "`handler` is a callable or Binding."
+ for keys in self.get_function_keys(name):
+ key_bindings.add(*keys.split(' '), **kwargs)(handler)
+ return handler
+
+ return decorator
diff --git a/pw_console/py/pw_console/docs/user_guide.rst b/pw_console/py/pw_console/docs/user_guide.rst
index be4ff40eb..3f1edcb15 100644
--- a/pw_console/py/pw_console/docs/user_guide.rst
+++ b/pw_console/py/pw_console/docs/user_guide.rst
@@ -5,7 +5,9 @@ User Guide
.. seealso::
- This guide can be viewed online at: https://pigweed.dev/pw_console/
+ This guide can be viewed online at:
+ https://pigweed.dev/pw_console/py/pw_console/docs/user_guide.html
+
The Pigweed Console provides a Python repl (read eval print loop) and log viewer
in a single-window terminal based interface.
@@ -27,9 +29,11 @@ There are a few ways to exit the Pigweed Console user interface:
1. Click the :guilabel:`[File]` menu and then :guilabel:`Exit`.
2. Type ``quit`` or ``exit`` in the Python Input window and press :kbd:`Enter`.
3. Press :kbd:`Ctrl-d` once to show the quit confirmation dialog. From there
- pressing :kbd:`Ctrl-d` a second time or :kbd:`y` will exit.
-4. Pressing :kbd:`Ctrl-x` quickly followed by :kbd:`Ctrl-c` will exit without
+ press :kbd:`Ctrl-d` a second time or :kbd:`y` will exit.
+4. Press :kbd:`Ctrl-x` quickly followed by :kbd:`Ctrl-c` will exit without
confirmation.
+5. Press :kbd:`Ctrl-p` to search for commands, type ``exit``, then press
+ :kbd:`Enter`.
Interface Layout
@@ -64,8 +68,36 @@ mouse wheel should work too. This requires that your terminal is able to send
mouse events.
-Main Menu Navigation with the Keyboard
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Navigation with the Keyboard
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The main menu can be searched by pressing :kbd:`Ctrl-p`. This opens a fuzzy
+search box containing all main menu item actions.
+
+Words separated by spaces are used to narrow down the match results. The order
+each word is entered does not matter.
+
+.. figure:: /pw_console/images/command_runner_main_menu.svg
+ :alt: Main menu item search dialog.
+
+============================================ =====================
+Function Keys
+============================================ =====================
+Open main menu search :kbd:`Ctrl-p`
+Cancel search :kbd:`Ctrl-c`
+Run selected item :kbd:`Enter`
+
+Select next item :kbd:`Tab`
+ :kbd:`Down`
+Select previous item :kbd:`Shift-Tab`
+ :kbd:`Up`
+============================================ =====================
+
+Switching Focus
+~~~~~~~~~~~~~~~
+
+Clicking on any window will focus on it. Alternatively, the key bindings below
+will switch focus.
============================================ =====================
Function Keys
@@ -130,6 +162,9 @@ Select the next log line :kbd:`Shift-Down`
Select the previous log line :kbd:`Shift-Up`
Select a range of log lines :guilabel:`Left Mouse Drag`
+
+Select all lines :kbd:`Ctrl-a`
+Clear Selection :kbd:`Ctrl-c`
============================================ =====================
When making log line selections a popup will appear in the upper right of the log
@@ -186,15 +221,13 @@ Here is a view of the search bar:
::
- +-------------------------------------------------------------------------------+
- | Enter : Search Ctrl-Alt-f : Add Filter Ctrl-Alt-r : Clear Filters |
- | Search Ctrl-t : Column:All Ctrl-v : [ ] Invert Ctrl-n : Matcher:REGEX |
- | / |
- +-------------------------------------------------------------------------------+
+ +--------------------------------------------------------------------------+
+ | Search Column:All Ctrl-t [ ] Invert Ctrl-v Matcher:REGEX Ctrl-n |
+ | / Search Enter Cancel Ctrl-c |
+ +--------------------------------------------------------------------------+
Across the top are various functions with keyboard shortcuts listed. Each of
-these are clickable with the mouse. The second line shows configurable search
-parameters.
+these are clickable with the mouse.
**Search Parameters**
@@ -234,7 +267,7 @@ Move to the next search result :kbd:`n`
:kbd:`Ctrl-s`
Move to the previous search result :kbd:`N`
:kbd:`Ctrl-r`
-Removes search highlighting :kbd:`Ctrl-l`
+Clear active search :kbd:`Ctrl-c`
Creates a filter using the active search :kbd:`Ctrl-Alt-f`
Reset all active filters. :kbd:`Ctrl-Alt-r`
============================================ =====================
@@ -520,16 +553,40 @@ For Windows command prompt (``cmd.exe``) use the ``set`` command:
Configuration
-------------
-Pigweed Console supports loading project and user specific settings stored in a
-YAML file. By default these files will be loaded one after the other:
+Pigweed Console supports loading project and user specific settings stored in
+YAML files. Each file follows the same format and are loaded one after the
+other. Any setting specified multiple locations will be overridden by files
+loaded later in the startup sequence.
+
+1. ``$PW_PROJECT_ROOT/.pw_console.yaml``
-- ``$PW_PROJECT_ROOT/.pw_console.yaml``
-- ``$HOME/.pw_console.yaml``
+ Project level config file. This is intended to be a file living somewhere
+ under a project folder and is checked into version control. It serves as a
+ base config for all users to inherit from.
-Each file follows the same format with settings in ``$HOME`` overriding ones in
-``$PW_PROJECT_ROOT``.
+2. ``$PW_PROJECT_ROOT/.pw_console.user.yaml``
-It's also possible to specify a config file via a shell environment variable.
+ User's personal config file for a specific project. This can be a file that
+ lives in a project folder but is git-ignored and not checked into version
+ control. This lets users change settings applicable to this project only.
+
+3. ``$HOME/.pw_console.yaml``
+
+ A global user based config file. This file is located in the user's home
+ directory and settings here apply to all projects. This is a good location to
+ set appearance options such as:
+
+ .. code-block:: yaml
+
+ ui_theme: nord
+ code_theme: pigweed-code
+ swap_light_and_dark: False
+ spaces_between_columns: 2
+ hide_date_from_log_time: False
+
+It's also possible to specify a config file via a shell environment variable. If
+this method is used only this config file is applied. Project and user config
+file options will not be set.
::
@@ -540,137 +597,329 @@ Example Config
.. code-block:: yaml
- pw_console:
-
- # Repl and Search History files
- # Setting these to a file located $PW_PROJECT_ROOT is a
- # good way to make Python repl history project specific.
-
- # Default: $HOME/.pw_console_history
- repl_history: $PW_PROJECT_ROOT/.pw_console_history
-
- # Default: $HOME/.pw_console_search
- search_history: $PW_PROJECT_ROOT/.pw_console_search
-
- # Theme Settings
-
- # Default: dark
- ui_theme: high-contrast-dark
-
- # Default: pigweed-code
- code_theme: material
-
- # Default: False
- swap_light_and_dark: False
-
- # Log Table View Settings
-
- # Number of spaces to insert between columns
- # Default: 2
- spaces_between_columns: 2
-
- # Hide the year month and day from the time column.
- hide_date_from_log_time: False
-
- # Custom Column Ordering
- # By default columns are ordered as:
- # time, level, metadata1, metadata2, ..., message
- # The log message is always the last value and not required in this list.
- column_order:
- # Column name
- - time
- - level
- - metadata1
- - metadata2
- # If Any metadata field not listed above will be hidden in table view.
- column_order_omit_unspecified_columns: False
-
- # Unique Colors for Column Values
- # Color format: 'bg:#BG-HEX #FG-HEX STYLE'
- # All parts are optional.
- # Empty strings will leave styling unchanged.
- # See prompt_toolkit style format docs here:
- # https://python-prompt-toolkit.readthedocs.io/en/latest/pages/advanced_topics/styling.html
- column_colors:
- # Column name
- time:
- level:
- metadata1:
- # Field values
- # Default will be applied if no match found
- default: '#98be65'
- BATTERY: 'bg:#6699cc #000000 bold'
- CORE1: 'bg:#da8548 #000000 bold'
- CORE2: 'bg:#66cccc #000000 bold'
- metadata2:
- default: '#ffcc66'
- APP: 'bg:#ff6c6b #000000 bold'
- WIFI: '#555555'
-
- # Each window column is normally aligned side by side in vertical
- # splits. You can change this to one group of windows on top of the other
- # with horizontal splits using this method
- # Default: vertical
- window_column_split_method: vertical
-
- # Window Layout
- windows:
- # First window column (vertical split)
- # Each split should have a unique name and include either
- # 'stacked' or 'tabbed' to select a window pane display method.
- Split 1 stacked:
- # Items here are window titles, each should be unique.
- # Window 1
- Device Logs:
- height: 33 # Weighted value for window height
- hidden: False # Hide this window if True
- # Window 2
- Python Repl:
- height: 67
- # Window 3
- Host Logs:
- hidden: True
-
- # Second window column
- Split 2 tabbed:
- # This is a duplicate of the existing 'Device Logs' window with a new title
- NEW DEVICE:
- duplicate_of: Device Logs
- # Log filters are defined here
- filters:
- # Metadata column names here or 'all'
- source_name:
- # Matching method name here
- # regex, regex-inverted, string, string-inverted
- regex: 'USB'
- module:
- # An inverted match will remove matching log lines
- regex-inverted: 'keyboard'
- NEW HOST:
- duplicate_of: Host Logs
- filters:
- all:
- string: 'FLASH'
-
- # Third window column
- Split 3 tabbed:
- # This is a brand new log Window
- Keyboard Logs - IBM:
- loggers:
- # Python logger names to include in this log window
- my_cool_keyboard_device:
- # Level the logger should be set to.
- level: DEBUG
- filters:
- all:
- regex: 'IBM Model M'
- Keyboard Logs - Apple:
- loggers:
- my_cool_keyboard_device:
- level: DEBUG
- filters:
- all:
- regex: 'Apple.*USB'
+ ---
+ config_title: pw_console
+
+ # Repl and Search History files
+ # Setting these to a file located $PW_PROJECT_ROOT is a
+ # good way to make Python repl history project specific.
+
+ # Default: $HOME/.pw_console_history
+ repl_history: $PW_PROJECT_ROOT/.pw_console_history
+
+ # Default: $HOME/.pw_console_search
+ search_history: $PW_PROJECT_ROOT/.pw_console_search
+
+ # Theme Settings
+
+ # Default: dark
+ ui_theme: high-contrast-dark
+
+ # Default: pigweed-code
+ code_theme: material
+
+ # Default: False
+ swap_light_and_dark: False
+
+ # Log Table View Settings
+
+ # Number of spaces to insert between columns
+ # Default: 2
+ spaces_between_columns: 2
+
+ # Hide the year month and day from the time column.
+ hide_date_from_log_time: False
+
+ # Show the Python file and line number responsible for creating log messages.
+ show_python_file: False
+ # Show the Python logger responsible for creating log messages.
+ show_python_logger: False
+ # Show the 'file' metadata column.
+ show_source_file: False
+
+ # Custom Column Ordering
+ # By default columns are ordered as:
+ # time, level, metadata1, metadata2, ..., message
+ # The log message is always the last value and not required in this list.
+ column_order:
+ # Column name
+ - time
+ - level
+ - metadata1
+ - metadata2
+
+ # If True, any metadata field not listed above in 'column_order'
+ # will be hidden in table view.
+ column_order_omit_unspecified_columns: False
+
+ # Unique Colors for Column Values
+ # Color format: 'bg:#BG-HEX #FG-HEX STYLE'
+ # All parts are optional.
+ # Empty strings will leave styling unchanged.
+ column_colors:
+ # Column name
+ time:
+ level:
+ metadata1:
+ # Field values
+ # Default will be applied if no match found
+ default: '#98be65'
+ BATTERY: 'bg:#6699cc #000000 bold'
+ CORE1: 'bg:#da8548 #000000 bold'
+ CORE2: 'bg:#66cccc #000000 bold'
+ metadata2:
+ default: '#ffcc66'
+ APP: 'bg:#ff6c6b #000000 bold'
+ WIFI: '#555555'
+
+ # Each window column is normally aligned side by side in vertical splits. You
+ # can change this to one group of windows on top of the other with horizontal
+ # splits using this method
+
+ # Default: vertical
+ window_column_split_method: vertical
+
+ # Window Layout
+ windows:
+ # First window column (vertical split)
+ # Each split should have a unique name and include either
+ # 'stacked' or 'tabbed' to select a window pane display method.
+ Split 1 stacked:
+ # Items here are window titles, each should be unique.
+ # Window 1
+ Device Logs:
+ height: 33 # Weighted value for window height
+ hidden: False # Hide this window if True
+ # Window 2
+ Python Repl:
+ height: 67
+ # Window 3
+ Host Logs:
+ hidden: True
+
+ # Second window column
+ Split 2 tabbed:
+ # This is a duplicate of the existing 'Device Logs' window.
+ # The title is 'NEW DEVICE'
+ NEW DEVICE:
+ duplicate_of: Device Logs
+ # Log filters are defined here
+ filters:
+ # Metadata column names here or 'all'
+ source_name:
+ # Matching method name here
+ # regex, regex-inverted, string, string-inverted
+ regex: 'USB'
+ module:
+ # An inverted match will remove matching log lines
+ regex-inverted: 'keyboard'
+ NEW HOST:
+ duplicate_of: Host Logs
+ filters:
+ all:
+ string: 'FLASH'
+
+ # Third window column
+ Split 3 tabbed:
+ # This is a brand new log Window
+ Keyboard Logs - IBM:
+ loggers:
+ # Python logger names to include in this log window
+ my_cool_keyboard_device:
+ # Level the logger should be set to.
+ level: DEBUG
+ # The empty string logger name is the root Python logger.
+ # In most cases this should capture all log messages.
+ '':
+ level: DEBUG
+ filters:
+ all:
+ regex: 'IBM Model M'
+ Keyboard Logs - Apple:
+ loggers:
+ my_cool_keyboard_device:
+ level: DEBUG
+ filters:
+ all:
+ regex: 'Apple.*USB'
+
+ # Command Runner dialog size and position
+ command_runner:
+ width: 80
+ height: 10
+ position:
+ top: 3 # 3 lines below the top edge of the screen
+ # Alternatively one of these options can be used instead:
+ # bottom: 2 # 2 lines above the bottom edge of the screen
+ # left: 2 # 2 lines away from the left edge of the screen
+ # right: 2 # 2 lines away from the right edge of the screen
+
+ # Key bindings can be changed as well with the following format:
+ # named-command: [ list_of_keys ]
+ # Where list_of_keys is a string of keys one for each alternate key
+ # To see all named commands open '[Help] > View Key Binding Config'
+ # See below for the names of special keys
+ key_bindings:
+ log-pane.move-cursor-up:
+ - j
+ - up
+ log-pane.move-cursor-down:
+ - k
+ - down
+ log-pane.search-next-match:
+ - n
+ log-pane.search-previous-match:
+ - N
+
+ # Chorded keys are supported.
+ # For example, 'z t' means pressing z quickly followed by t.
+ log-pane.shift-line-to-top:
+ - z t
+ log-pane.shift-line-to-center:
+ - z z
+
+Changing Keyboard Shortcuts
+---------------------------
+
+Pigweed Console uses `prompt_toolkit
+<https://python-prompt-toolkit.readthedocs.io/en/latest/>`_ to manage its
+keybindings.
+
+Bindings can be changed in the YAML config file under the ``key_bindings:``
+section by adding a named function followed by a of keys to bind. For example
+this config sets the keys for log pane cursor movement.
+
+- Moving down is set to :kbd:`j` or the :kbd:`Down` arrow.
+- Moving up is set to :kbd:`k` or the :kbd:`Up` arrow.
+
+.. code-block:: yaml
+
+ key_bindings:
+ log-pane.move-cursor-down:
+ - j
+ - down
+ log-pane.move-cursor-up:
+ - k
+ - up
+
+List of Special Key Names
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This table is from prompt_toolkit's :bdg-link-primary-line:`List of special keys
+<https://python-prompt-toolkit.readthedocs.io/en/latest/pages/advanced_topics/key_bindings.html#list-of-special-keys>`.
+
+.. list-table::
+ :widths: 30 70
+ :header-rows: 1
+
+ * - Keyboard Function
+ - Key Values
+
+ * - Literal characters
+ - ``a b c d e f g h i j k l m n o p q r s t u v w x y z``
+ ``A B C D E F G H I J K L M N O P Q R S T U V W X Y Z``
+ ``1 2 3 4 5 6 7 8 9 0``
+ ``! @ # $ % ^ & * ( )``
+ ``- _ + = ~``
+
+ * - Escape and Shift-Escape
+ - ``escape`` ``s-escape``
+
+ * - Arrows
+ - ``left`` ``right`` ``up`` ``down``
+
+ * - Navigation
+ - ``home`` ``end`` ``delete`` ``pageup`` ``pagedown`` ``insert``
+
+ * - Control-letter
+ - ``c-a c-b c-c c-d c-e c-f c-g c-h c-i c-j c-k c-l c-m``
+ ``c-n c-o c-p c-q c-r c-s c-t c-u c-v c-w c-x c-y c-z``
+
+ * - Control-number
+ - ``c-1`` ``c-2`` ``c-3`` ``c-4`` ``c-5`` ``c-6`` ``c-7`` ``c-8`` ``c-9`` ``c-0``
+
+ * - Control-arrow
+ - ``c-left`` ``c-right`` ``c-up`` ``c-down``
+
+ * - Other control keys
+ - ``c-@`` ``c-\`` ``c-]`` ``c-^`` ``c-_`` ``c-delete``
+
+ * - Shift-arrow
+ - ``s-left`` ``s-right`` ``s-up`` ``s-down``
+
+ * - Control-Shift-arrow
+ - ``c-s-left`` ``c-s-right`` ``c-s-up`` ``c-s-down``
+
+ * - Other Shift` keys
+ - ``s-delete`` ``s-tab``
+
+ * - F Keys
+ - ``f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12``
+ ``f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24``
+
+There are some key aliases as well. Most of these exist due to how keys are
+processed in VT100 terminals. For example when pressing :kbd:`Tab` terminal
+emulators receive :kbd:`Ctrl-i`.
+
+.. list-table::
+ :widths: 40 60
+ :header-rows: 1
+
+ * - Key
+ - Key Value Alias
+
+ * - Space
+ - ``space``
+
+ * - ``c-h``
+ - ``backspace``
+
+ * - ``c-@``
+ - ``c-space``
+
+ * - ``c-m``
+ - ``enter``
+
+ * - ``c-i``
+ - ``tab``
+
+Binding Alt / Option / Meta
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In terminals the :kbd:`Alt` key is converted into a leading :kbd:`Escape` key
+press. For example pressing :kbd:`Alt-t` actually sends the :kbd:`Escape` key
+followed by the :kbd:`t` key. Similarly :kbd:`Ctrl-Alt-t` sends :kbd:`Escape`
+followed by :kbd:`Ctrl-t`.
+
+To bind :kbd:`Alt` (or :kbd:`Option` on MacOS) add ``escape`` before the key
+that should be modified.
+
+.. code-block:: yaml
+
+ key_bindings:
+ window-manager.move-pane-down:
+ - escape c-up # Alt-Ctrl-up
+ window-manager.move-pane-left:
+ - escape c-left # Alt-Ctrl-left
+ window-manager.move-pane-right:
+ - escape c-right # Alt-Ctrl-right
+ window-manager.move-pane-up:
+ - escape c-down # Alt-Ctrl-down
+
+Key Sequence Bindings
+~~~~~~~~~~~~~~~~~~~~~
+
+Bindings can consist of multiple key presses in sequence. This is also known as
+chorded keys. Multiple keys separated by spaces define a chorded key
+binding. For example to bind :kbd:`z` quickly followed by :kbd:`t` use ``z t``.
+
+.. code-block:: yaml
+
+ key_bindings:
+ log-pane.shift-line-to-top:
+ - z t
+ log-pane.shift-line-to-center:
+ - z z
Known Issues
@@ -680,7 +929,8 @@ Log Window
~~~~~~~~~~
- Tab character rendering will not work in the log pane view. They will
- appear as ``^I`` since prompt_toolkit can't render them. See this issue for details:
+ appear as ``^I`` since prompt_toolkit can't render them. See this issue for
+ details:
https://github.com/prompt-toolkit/python-prompt-toolkit/issues/556
diff --git a/pw_console/py/pw_console/embed.py b/pw_console/py/pw_console/embed.py
index 0146ead3e..5ef83f065 100644
--- a/pw_console/py/pw_console/embed.py
+++ b/pw_console/py/pw_console/embed.py
@@ -16,7 +16,7 @@
import asyncio
import logging
from pathlib import Path
-from typing import Dict, List, Iterable, Optional, Union
+from typing import Any, Dict, List, Iterable, Optional, Union
from prompt_toolkit.completion import WordCompleter
@@ -27,6 +27,13 @@ import pw_console.python_logging
from pw_console.widgets import WindowPane, WindowPaneToolbar
+def _set_console_app_instance(plugin: Any, console_app: ConsoleApp) -> None:
+ if hasattr(plugin, 'pw_console_init'):
+ plugin.pw_console_init(console_app)
+ else:
+ plugin.application = console_app
+
+
class PwConsoleEmbed:
"""Embed class for customizing the console before startup."""
@@ -43,7 +50,9 @@ class PwConsoleEmbed:
config_file_path: Optional[Union[str, Path]] = None) -> None:
"""Call this to embed pw console at the call point within your program.
- Example usage: ::
+ Example usage:
+
+ .. code-block:: python
import logging
@@ -56,10 +65,10 @@ class PwConsoleEmbed:
loggers={
'Host Logs': [
logging.getLogger(__package__),
- logging.getLogger(__file__)
+ logging.getLogger(__file__),
],
'Device Logs': [
- logging.getLogger('usb_gadget')
+ logging.getLogger('usb_gadget'),
],
},
app_title='My Awesome Console',
@@ -84,9 +93,13 @@ class PwConsoleEmbed:
table. Similar to what is returned by `globals()`.
local_vars: Dictionary representing the desired local symbol
table. Similar to what is returned by `locals()`.
- loggers: Dict with keys of log window titles and values of
- `logging.getLogger()` instances in lists. Each key that should
- be shown in the pw console user interface.
+ loggers: Dict with keys of log window titles and values of either:
+
+ 1. List of `logging.getLogger()
+ <https://docs.python.org/3/library/logging.html#logging.getLogger>`_
+ instances.
+ 2. A single pw_console.log_store.LogStore instance.
+
app_title: Custom title text displayed in the user interface.
repl_startup_message: Custom text shown by default in the repl
output pane.
@@ -105,7 +118,7 @@ class PwConsoleEmbed:
self.config_file_path = Path(
config_file_path) if config_file_path else None
- self.console_app = None
+ self.console_app: Optional[ConsoleApp] = None
self.extra_completers: List = []
self.setup_python_logging_called = False
@@ -146,7 +159,7 @@ class PwConsoleEmbed:
def add_sentence_completer(self,
word_meta_dict: Dict[str, str],
- ignore_case=True):
+ ignore_case=True) -> None:
"""Include a custom completer that matches on the entire repl input.
Args:
@@ -170,11 +183,13 @@ class PwConsoleEmbed:
self.extra_completers.append(word_completer)
- def _setup_log_panes(self):
+ def _setup_log_panes(self) -> None:
"""Add loggers to ConsoleApp log pane(s)."""
if not self.loggers:
return
+ assert isinstance(self.console_app, ConsoleApp)
+
if isinstance(self.loggers, list):
self.console_app.add_log_handler('Logs', self.loggers)
@@ -183,27 +198,49 @@ class PwConsoleEmbed:
window_pane = self.console_app.add_log_handler(
window_title, logger_instances)
- if window_pane.pane_title() in self.hidden_by_default_windows:
+ if (window_pane and window_pane.pane_title()
+ in self.hidden_by_default_windows):
window_pane.show_pane = False
- def setup_python_logging(self, last_resort_filename: Optional[str] = None):
- """Disable log handlers for full screen prompt_toolkit applications.
+ def setup_python_logging(
+ self,
+ last_resort_filename: Optional[str] = None,
+ loggers_with_no_propagation: Optional[Iterable[logging.Logger]] = None
+ ) -> None:
+ """Setup friendly logging for full-screen prompt_toolkit applications.
+
+ This function sets up Python log handlers to be friendly for full-screen
+ prompt_toolkit applications. That is, logging to terminal STDOUT and
+ STDERR is disabled so the terminal user interface can be drawn.
+
+ Specifically, all Python STDOUT and STDERR log handlers are
+ disabled. It also sets `log propagation to True
+ <https://docs.python.org/3/library/logging.html#logging.Logger.propagate>`_.
+ to ensure that all log messages are sent to the root logger.
Args:
last_resort_filename: If specified use this file as a fallback for
- unhandled python logging messages. Normally Python will output
+ unhandled Python logging messages. Normally Python will output
any log messages with no handlers to STDERR as a fallback. If
- none, a temp file will be created instead.
+ None, a temp file will be created instead. See Python
+ documentation on `logging.lastResort
+ <https://docs.python.org/3/library/logging.html#logging.lastResort>`_
+ for more info.
+ loggers_with_no_propagation: List of logger instances to skip
+ setting ``propagate = True``. This is useful if you would like
+ log messages from a particular source to not appear in the root
+ logger.
"""
self.setup_python_logging_called = True
- pw_console.python_logging.setup_python_logging(last_resort_filename)
+ pw_console.python_logging.setup_python_logging(
+ last_resort_filename, loggers_with_no_propagation)
- def hide_windows(self, *window_titles):
+ def hide_windows(self, *window_titles) -> None:
"""Hide window panes specified by title on console startup."""
for window_title in window_titles:
self.hidden_by_default_windows.append(window_title)
- def embed(self):
+ def embed(self) -> None:
"""Start the console."""
# Create the ConsoleApp instance.
@@ -215,7 +252,7 @@ class PwConsoleEmbed:
app_title=self.app_title,
extra_completers=self.extra_completers,
)
- PW_CONSOLE_APP_CONTEXTVAR.set(self.console_app)
+ PW_CONSOLE_APP_CONTEXTVAR.set(self.console_app) # type: ignore
# Setup Python logging and log panes.
if not self.setup_python_logging_called:
self.setup_python_logging()
@@ -223,7 +260,7 @@ class PwConsoleEmbed:
# Add window pane plugins to the layout.
for window_pane in self.window_plugins:
- window_pane.application = self.console_app
+ _set_console_app_instance(window_pane, self.console_app)
# Hide window plugins if the title is hidden by default.
if window_pane.pane_title() in self.hidden_by_default_windows:
window_pane.show_pane = False
@@ -231,10 +268,10 @@ class PwConsoleEmbed:
# Add toolbar plugins to the layout.
for toolbar in self.top_toolbar_plugins:
- toolbar.application = self.console_app
+ _set_console_app_instance(toolbar, self.console_app)
self.console_app.window_manager.add_top_toolbar(toolbar)
for toolbar in self.bottom_toolbar_plugins:
- toolbar.application = self.console_app
+ _set_console_app_instance(toolbar, self.console_app)
self.console_app.window_manager.add_bottom_toolbar(toolbar)
# Rebuild prompt_toolkit containers, menu items, and help content with
@@ -247,6 +284,10 @@ class PwConsoleEmbed:
self.console_app.apply_window_config()
+ # Hide the repl pane if it's in the hidden windows list.
+ if 'Python Repl' in self.hidden_by_default_windows:
+ self.console_app.repl_pane.show_pane = False
+
# Start a thread for running user code.
self.console_app.start_user_code_thread()
diff --git a/pw_console/py/pw_console/help_window.py b/pw_console/py/pw_console/help_window.py
index 758a5f68f..d8ca7ad55 100644
--- a/pw_console/py/pw_console/help_window.py
+++ b/pw_console/py/pw_console/help_window.py
@@ -36,6 +36,7 @@ from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.widgets import Box, TextArea
from pygments.lexers.markup import RstLexer # type: ignore
+from pygments.lexers.data import YamlLexer # type: ignore
import pw_console.widgets.mouse_handlers
if TYPE_CHECKING:
@@ -64,18 +65,24 @@ class HelpWindow(ConditionalContainer):
focus_on_click=True,
scrollbar=True,
style='class:help_window_content',
+ wrap_lines=False,
**kwargs,
)
# Additional keybindings for the text area.
key_bindings = KeyBindings()
+ register = self.application.prefs.register_keybinding
- @key_bindings.add('q')
- @key_bindings.add('f1')
+ @register('help-window.close', key_bindings)
def _close_window(_event: KeyPressEvent) -> None:
"""Close the current dialog window."""
self.toggle_display()
+ @register('help-window.copy-all', key_bindings)
+ def _copy_all(_event: KeyPressEvent) -> None:
+ """Close the current dialog window."""
+ self.copy_all_text()
+
help_text_area.control.key_bindings = key_bindings
return help_text_area
@@ -109,11 +116,27 @@ class HelpWindow(ConditionalContainer):
close_mouse_handler = functools.partial(
pw_console.widgets.mouse_handlers.on_click, self.toggle_display)
+ copy_mouse_handler = functools.partial(
+ pw_console.widgets.mouse_handlers.on_click, self.copy_all_text)
toolbar_padding = 1
toolbar_title = ' ' * toolbar_padding
toolbar_title += self.pane_title()
+ buttons = []
+ buttons.extend(
+ pw_console.widgets.checkbox.to_keybind_indicator(
+ 'Ctrl-c',
+ 'Copy All',
+ copy_mouse_handler,
+ base_style='class:toolbar-button-active'))
+ buttons.append(('', ' '))
+ buttons.extend(
+ pw_console.widgets.checkbox.to_keybind_indicator(
+ 'q',
+ 'Close',
+ close_mouse_handler,
+ base_style='class:toolbar-button-active'))
top_toolbar = VSplit(
[
Window(
@@ -130,12 +153,7 @@ class HelpWindow(ConditionalContainer):
dont_extend_width=False,
),
Window(
- content=FormattedTextControl(
- pw_console.widgets.checkbox.to_keybind_indicator(
- 'q',
- 'Close',
- close_mouse_handler,
- base_style='class:toolbar-button-active')),
+ content=FormattedTextControl(buttons),
align=WindowAlign.RIGHT,
dont_extend_width=True,
),
@@ -175,6 +193,11 @@ class HelpWindow(ConditionalContainer):
object."""
return self.container
+ def copy_all_text(self):
+ """Copy all text in the Python input to the system clipboard."""
+ self.application.application.clipboard.set_text(
+ self.help_text_area.buffer.text)
+
def toggle_display(self):
"""Toggle visibility of this help window."""
# Toggle state variable.
@@ -203,9 +226,17 @@ class HelpWindow(ConditionalContainer):
scrollbar_padding = 1
scrollbar_width = 1
- return self.max_line_length + (left_side_frame_and_padding_width +
- right_side_frame_and_padding_width +
- scrollbar_padding + scrollbar_width)
+ desired_width = self.max_line_length + (
+ left_side_frame_and_padding_width +
+ right_side_frame_and_padding_width + scrollbar_padding +
+ scrollbar_width)
+ desired_width = max(60, desired_width)
+
+ window_manager_width = (
+ self.application.window_manager.current_window_manager_width)
+ if not window_manager_width:
+ window_manager_width = 80
+ return min(desired_width, window_manager_width)
def load_user_guide(self):
rstdoc = Path(__file__).parent / 'docs/user_guide.rst'
@@ -223,6 +254,18 @@ class HelpWindow(ConditionalContainer):
text=rst_text,
)
+ def load_yaml_text(self, content: str):
+ max_line_length = 0
+ for line in content.splitlines():
+ if 'https://' not in line and len(line) > max_line_length:
+ max_line_length = len(line)
+ self.max_line_length = max_line_length
+
+ self.help_text_area = self._create_help_text_area(
+ lexer=PygmentsLexer(YamlLexer),
+ text=content,
+ )
+
def generate_help_text(self):
"""Generate help text based on added key bindings."""
@@ -279,11 +322,12 @@ class HelpWindow(ConditionalContainer):
description, list())
# Save the name of the key e.g. F1, q, ControlQ, ControlUp
- key_name = '-'.join(
+ key_name = ' '.join(
[getattr(key, 'name', str(key)) for key in binding.keys])
key_name = key_name.replace('Control', 'Ctrl-')
key_name = key_name.replace('Shift', 'Shift-')
- key_name = key_name.replace('Escape-', 'Alt-')
+ key_name = key_name.replace('Escape ', 'Alt-')
+ key_name = key_name.replace('Alt-Ctrl-', 'Ctrl-Alt-')
key_name = key_name.replace('BackTab', 'Shift-Tab')
key_list.append(key_name)
diff --git a/pw_console/py/pw_console/key_bindings.py b/pw_console/py/pw_console/key_bindings.py
index cec1f613b..c44d8663c 100644
--- a/pw_console/py/pw_console/key_bindings.py
+++ b/pw_console/py/pw_console/key_bindings.py
@@ -14,6 +14,7 @@
# pylint: skip-file
"""Console key bindings."""
import logging
+from typing import Dict, List
from prompt_toolkit.filters import (
Condition,
@@ -21,6 +22,7 @@ from prompt_toolkit.filters import (
)
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous
+from prompt_toolkit.key_binding.key_bindings import Binding
import pw_console.pw_ptpython_repl
@@ -28,8 +30,72 @@ __all__ = ('create_key_bindings', )
_LOG = logging.getLogger(__package__)
-
-def create_key_bindings(console_app):
+DEFAULT_KEY_BINDINGS: Dict[str, List[str]] = {
+ 'global.open-user-guide': ['f1'],
+ 'global.open-menu-search': ['c-p'],
+ 'global.focus-previous-widget': ['c-left'],
+ 'global.focus-next-widget': ['c-right', 's-tab'],
+ 'global.exit-no-confirmation': ['c-x c-c'],
+ 'global.exit-with-confirmation': ['c-d'],
+ 'log-pane.shift-line-to-top': ['z t'],
+ 'log-pane.shift-line-to-center': ['z z'],
+ 'log-pane.toggle-follow': ['f'],
+ 'log-pane.toggle-wrap-lines': ['w'],
+ 'log-pane.toggle-table-view': ['t'],
+ 'log-pane.duplicate-log-pane': ['insert'],
+ 'log-pane.remove-duplicated-log-pane': ['delete'],
+ 'log-pane.clear-history': ['C'],
+ 'log-pane.toggle-follow': ['f'],
+ 'log-pane.move-cursor-up': ['up', 'k'],
+ 'log-pane.move-cursor-down': ['down', 'j'],
+ 'log-pane.visual-select-up': ['s-up'],
+ 'log-pane.visual-select-down': ['s-down'],
+ 'log-pane.visual-select-all': ['N', 'c-r'],
+ 'log-pane.deselect-cancel-search': ['c-c'],
+ 'log-pane.scroll-page-up': ['pageup'],
+ 'log-pane.scroll-page-down': ['pagedown'],
+ 'log-pane.scroll-to-top': ['g'],
+ 'log-pane.scroll-to-bottom': ['G'],
+ 'log-pane.save-copy': ['c-o'],
+ 'log-pane.search': ['/', 'c-f'],
+ 'log-pane.search-next-match': ['n', 'c-s', 'c-g'],
+ 'log-pane.search-previous-match': ['N', 'c-r'],
+ 'log-pane.search-apply-filter': ['escape c-f'],
+ 'log-pane.clear-filters': ['escape c-r'],
+ 'search-toolbar.toggle-column': ['c-t'],
+ 'search-toolbar.toggle-invert': ['c-v'],
+ 'search-toolbar.toggle-matcher': ['c-n'],
+ 'search-toolbar.cancel': ['escape', 'c-c', 'c-d'],
+ 'search-toolbar.create-filter': ['escape c-f'],
+ 'window-manager.move-pane-left': ['escape c-left'], # Alt-Ctrl-
+ 'window-manager.move-pane-right': ['escape c-right'], # Alt-Ctrl-
+ # NOTE: c-up and c-down seem swapped in prompt-toolkit
+ 'window-manager.move-pane-down': ['escape c-up'], # Alt-Ctrl-
+ 'window-manager.move-pane-up': ['escape c-down'], # Alt-Ctrl-
+ 'window-manager.enlarge-pane': ['escape ='], # Alt-= (mnemonic: Alt Plus)
+ 'window-manager.shrink-pane':
+ ['escape -'], # Alt-minus (mnemonic: Alt Minus)
+ 'window-manager.shrink-split': ['escape ,'], # Alt-, (mnemonic: Alt <)
+ 'window-manager.enlarge-split': ['escape .'], # Alt-. (mnemonic: Alt >)
+ 'window-manager.focus-prev-pane': ['escape c-p'], # Ctrl-Alt-p
+ 'window-manager.focus-next-pane': ['escape c-n'], # Ctrl-Alt-n
+ 'window-manager.balance-window-panes': ['c-u'],
+ 'python-repl.copy-output-selection': ['c-c'],
+ 'python-repl.copy-all-output': ['escape c-c'],
+ 'python-repl.copy-clear-or-cancel': ['c-c'],
+ 'python-repl.paste-to-input': ['c-v'],
+ 'save-as-dialog.cancel': ['escape', 'c-c', 'c-d'],
+ 'quit-dialog.no': ['escape', 'n', 'c-c'],
+ 'quit-dialog.yes': ['y', 'c-d'],
+ 'command-runner.cancel': ['escape', 'c-c'],
+ 'command-runner.select-previous-item': ['up', 's-tab'],
+ 'command-runner.select-next-item': ['down', 'tab'],
+ 'help-window.close': ['q', 'f1', 'escape'],
+ 'help-window.copy-all': ['c-c'],
+}
+
+
+def create_key_bindings(console_app) -> KeyBindings:
"""Create custom key bindings.
This starts with the key bindings, defined by `prompt-toolkit`, but adds the
@@ -37,10 +103,12 @@ def create_key_bindings(console_app):
reference is passed in so key bind functions can access it.
"""
- bindings = KeyBindings()
+ key_bindings = KeyBindings()
+ register = console_app.prefs.register_keybinding
- @bindings.add(
- 'f1', filter=Condition(lambda: not console_app.modal_window_is_open()))
+ @register('global.open-user-guide',
+ key_bindings,
+ filter=Condition(lambda: not console_app.modal_window_is_open()))
def show_help(event):
"""Toggle user guide window."""
console_app.user_guide_window.toggle_display()
@@ -48,13 +116,19 @@ def create_key_bindings(console_app):
# F2 is ptpython settings
# F3 is ptpython history
- @bindings.add('c-left')
+ @register('global.open-menu-search',
+ key_bindings,
+ filter=Condition(lambda: not console_app.modal_window_is_open()))
+ def show_command_runner(event):
+ """Open command runner window."""
+ console_app.open_command_runner_main_menu()
+
+ @register('global.focus-previous-widget', key_bindings)
def app_focus_previous(event):
"""Move focus to the previous widget."""
focus_previous(event)
- @bindings.add('s-tab')
- @bindings.add('c-right')
+ @register('global.focus-next-widget', key_bindings)
def app_focus_next(event):
"""Move focus to the next widget."""
focus_next(event)
@@ -62,18 +136,21 @@ def create_key_bindings(console_app):
# Bindings for when the ReplPane input field is in focus.
# These are hidden from help window global keyboard shortcuts since the
# method names end with `_hidden`.
- @bindings.add('c-c', filter=has_focus(console_app.pw_ptpython_repl))
+ @register('python-repl.copy-clear-or-cancel',
+ key_bindings,
+ filter=has_focus(console_app.pw_ptpython_repl))
def handle_ctrl_c_hidden(event):
"""Reset the python repl on Ctrl-c"""
console_app.repl_pane.ctrl_c()
- @bindings.add('c-x', 'c-c')
+ @register('global.exit-no-confirmation', key_bindings)
def quit_no_confirm(event):
"""Quit without confirmation."""
event.app.exit()
- @bindings.add(
- 'c-d',
+ @register(
+ 'global.exit-with-confirmation',
+ key_bindings,
filter=console_app.pw_ptpython_repl.input_empty_if_in_focus_condition(
) | has_focus(console_app.quit_dialog))
def quit(event):
@@ -82,17 +159,18 @@ def create_key_bindings(console_app):
# delete forward characters instead.
console_app.quit_dialog.open_dialog()
- @bindings.add('c-v', filter=has_focus(console_app.pw_ptpython_repl))
+ @register('python-repl.paste-to-input',
+ key_bindings,
+ filter=has_focus(console_app.pw_ptpython_repl))
def paste_into_repl(event):
"""Reset the python repl on Ctrl-c"""
console_app.repl_pane.paste_system_clipboard_to_input_buffer()
- @bindings.add(
- 'escape',
- 'c-c', # Alt-Ctrl-c
- filter=console_app.repl_pane.input_or_output_has_focus())
- def paste_into_repl(event):
+ @register('python-repl.copy-all-output',
+ key_bindings,
+ filter=console_app.repl_pane.input_or_output_has_focus())
+ def copy_repl_output_text(event):
"""Copy all Python output to the system clipboard."""
- console_app.repl_pane.copy_text()
+ console_app.repl_pane.copy_all_output_text()
- return bindings
+ return key_bindings
diff --git a/pw_console/py/pw_console/log_pane.py b/pw_console/py/pw_console/log_pane.py
index 474db9c26..eb4fbfda4 100644
--- a/pw_console/py/pw_console/log_pane.py
+++ b/pw_console/py/pw_console/log_pane.py
@@ -23,13 +23,18 @@ from prompt_toolkit.filters import (
Condition,
has_focus,
)
-from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
+from prompt_toolkit.formatted_text import StyleAndTextTuples
+from prompt_toolkit.key_binding import (
+ KeyBindings,
+ KeyPressEvent,
+ KeyBindingsBase,
+)
from prompt_toolkit.layout import (
ConditionalContainer,
Float,
FloatContainer,
- FormattedTextControl,
UIContent,
+ UIControl,
VerticalAlign,
Window,
)
@@ -44,6 +49,7 @@ from pw_console.log_pane_toolbars import (
)
from pw_console.log_pane_saveas_dialog import LogPaneSaveAsDialog
from pw_console.log_pane_selection_dialog import LogPaneSelectionDialog
+from pw_console.log_store import LogStore
from pw_console.search_toolbar import SearchToolbar
from pw_console.filter_toolbar import FilterToolbar
from pw_console.widgets import (
@@ -60,140 +66,188 @@ _LOG_OUTPUT_SCROLL_AMOUNT = 5
_LOG = logging.getLogger(__package__)
-class LogContentControl(FormattedTextControl):
+class LogContentControl(UIControl):
"""LogPane prompt_toolkit UIControl for displaying LogContainer lines."""
- def create_content(self, width: int, height: Optional[int]) -> UIContent:
- # Save redered height
- if height:
- self.log_pane.last_log_content_height = height
- return super().create_content(width, height)
-
- def __init__(self, log_pane: 'LogPane', *args, **kwargs) -> None:
+ def __init__(self, log_pane: 'LogPane') -> None:
# pylint: disable=too-many-locals
self.log_pane = log_pane
+ self.log_view = log_pane.log_view
# Mouse drag visual selection flags.
self.visual_select_mode_drag_start = False
self.visual_select_mode_drag_stop = False
+ self.uicontent: Optional[UIContent] = None
+ self.lines: List[StyleAndTextTuples] = []
+
# Key bindings.
key_bindings = KeyBindings()
+ register = log_pane.application.prefs.register_keybinding
+
+ @register('log-pane.shift-line-to-top', key_bindings)
+ def _shift_log_to_top(_event: KeyPressEvent) -> None:
+ """Shift the selected log line to the top."""
+ self.log_view.move_selected_line_to_top()
+
+ @register('log-pane.shift-line-to-center', key_bindings)
+ def _shift_log_to_center(_event: KeyPressEvent) -> None:
+ """Shift the selected log line to the center."""
+ self.log_view.center_log_line()
- @key_bindings.add('w')
+ @register('log-pane.toggle-wrap-lines', key_bindings)
def _toggle_wrap_lines(_event: KeyPressEvent) -> None:
"""Toggle log line wrapping."""
self.log_pane.toggle_wrap_lines()
- @key_bindings.add('t')
+ @register('log-pane.toggle-table-view', key_bindings)
def _toggle_table_view(_event: KeyPressEvent) -> None:
"""Toggle table view."""
self.log_pane.toggle_table_view()
- @key_bindings.add('insert')
+ @register('log-pane.duplicate-log-pane', key_bindings)
def _duplicate(_event: KeyPressEvent) -> None:
"""Duplicate this log pane."""
self.log_pane.duplicate()
- @key_bindings.add('delete')
+ @register('log-pane.remove-duplicated-log-pane', key_bindings)
def _delete(_event: KeyPressEvent) -> None:
"""Remove log pane."""
if self.log_pane.is_a_duplicate:
self.log_pane.application.window_manager.remove_pane(
self.log_pane)
- @key_bindings.add('C')
+ @register('log-pane.clear-history', key_bindings)
def _clear_history(_event: KeyPressEvent) -> None:
"""Clear log pane history."""
self.log_pane.clear_history()
- @key_bindings.add('g')
+ @register('log-pane.scroll-to-top', key_bindings)
def _scroll_to_top(_event: KeyPressEvent) -> None:
"""Scroll to top."""
- self.log_pane.log_view.scroll_to_top()
+ self.log_view.scroll_to_top()
- @key_bindings.add('G')
+ @register('log-pane.scroll-to-bottom', key_bindings)
def _scroll_to_bottom(_event: KeyPressEvent) -> None:
"""Scroll to bottom."""
- self.log_pane.log_view.scroll_to_bottom()
+ self.log_view.scroll_to_bottom()
- @key_bindings.add('f')
+ @register('log-pane.toggle-follow', key_bindings)
def _toggle_follow(_event: KeyPressEvent) -> None:
"""Toggle log line following."""
self.log_pane.toggle_follow()
- @key_bindings.add('up')
- @key_bindings.add('k')
+ @register('log-pane.move-cursor-up', key_bindings)
def _up(_event: KeyPressEvent) -> None:
"""Move cursor up."""
- self.log_pane.log_view.scroll_up()
+ self.log_view.scroll_up()
- @key_bindings.add('down')
- @key_bindings.add('j')
+ @register('log-pane.move-cursor-down', key_bindings)
def _down(_event: KeyPressEvent) -> None:
"""Move cursor down."""
- self.log_pane.log_view.scroll_down()
+ self.log_view.scroll_down()
- @key_bindings.add('s-up')
+ @register('log-pane.visual-select-up', key_bindings)
def _visual_select_up(_event: KeyPressEvent) -> None:
"""Select previous log line."""
- self.log_pane.log_view.visual_select_up()
+ self.log_view.visual_select_up()
- @key_bindings.add('s-down')
+ @register('log-pane.visual-select-down', key_bindings)
def _visual_select_down(_event: KeyPressEvent) -> None:
"""Select next log line."""
- self.log_pane.log_view.visual_select_down()
+ self.log_view.visual_select_down()
- @key_bindings.add('pageup')
+ @register('log-pane.scroll-page-up', key_bindings)
def _pageup(_event: KeyPressEvent) -> None:
"""Scroll the logs up by one page."""
- self.log_pane.log_view.scroll_up_one_page()
+ self.log_view.scroll_up_one_page()
- @key_bindings.add('pagedown')
+ @register('log-pane.scroll-page-down', key_bindings)
def _pagedown(_event: KeyPressEvent) -> None:
"""Scroll the logs down by one page."""
- self.log_pane.log_view.scroll_down_one_page()
+ self.log_view.scroll_down_one_page()
- @key_bindings.add('c-o')
+ @register('log-pane.save-copy', key_bindings)
def _start_saveas(_event: KeyPressEvent) -> None:
"""Save logs to a file."""
self.log_pane.start_saveas()
- @key_bindings.add('/')
- @key_bindings.add('c-f')
+ @register('log-pane.search', key_bindings)
def _start_search(_event: KeyPressEvent) -> None:
"""Start searching."""
self.log_pane.start_search()
- @key_bindings.add('n')
- @key_bindings.add('c-s')
- @key_bindings.add('c-g')
+ @register('log-pane.search-next-match', key_bindings)
def _next_search(_event: KeyPressEvent) -> None:
"""Next search match."""
- self.log_pane.log_view.search_forwards()
+ self.log_view.search_forwards()
- @key_bindings.add('N')
- @key_bindings.add('c-r')
+ @register('log-pane.search-previous-match', key_bindings)
def _previous_search(_event: KeyPressEvent) -> None:
"""Previous search match."""
- self.log_pane.log_view.search_backwards()
+ self.log_view.search_backwards()
- @key_bindings.add('c-l')
- def _clear_search_highlight(_event: KeyPressEvent) -> None:
- """Remove search highlighting."""
- self.log_pane.log_view.clear_search_highlighting()
+ @register('log-pane.visual-select-all', key_bindings)
+ def _select_all_logs(_event: KeyPressEvent) -> None:
+ """Clear search."""
+ self.log_pane.log_view.visual_select_all()
- @key_bindings.add('escape', 'c-f') # Alt-Ctrl-f
+ @register('log-pane.deselect-cancel-search', key_bindings)
+ def _clear_search_and_selection(_event: KeyPressEvent) -> None:
+ """Clear selection or search."""
+ if self.log_pane.log_view.visual_select_mode:
+ self.log_pane.log_view.clear_visual_selection()
+ elif self.log_pane.search_bar_active:
+ self.log_pane.search_toolbar.cancel_search()
+
+ @register('log-pane.search-apply-filter', key_bindings)
def _apply_filter(_event: KeyPressEvent) -> None:
"""Apply current search as a filter."""
- self.log_pane.log_view.apply_filter()
+ self.log_pane.search_toolbar.close_search_bar()
+ self.log_view.apply_filter()
- @key_bindings.add('escape', 'c-r') # Alt-Ctrl-r
+ @register('log-pane.clear-filters', key_bindings)
def _clear_filter(_event: KeyPressEvent) -> None:
"""Reset / erase active filters."""
- self.log_pane.log_view.clear_filters()
+ self.log_view.clear_filters()
+
+ self.key_bindings: KeyBindingsBase = key_bindings
+
+ def is_focusable(self) -> bool:
+ return True
+
+ def get_key_bindings(self) -> Optional[KeyBindingsBase]:
+ return self.key_bindings
- kwargs['key_bindings'] = key_bindings
- super().__init__(*args, **kwargs)
+ def preferred_width(self, max_available_width: int) -> int:
+ """Return the width of the longest line."""
+ line_lengths = [len(l) for l in self.lines]
+ return max(line_lengths)
+
+ def preferred_height(
+ self,
+ width: int,
+ max_available_height: int,
+ wrap_lines: bool,
+ get_line_prefix,
+ ) -> Optional[int]:
+ """Return the preferred height for the log lines."""
+ content = self.create_content(width, None)
+ return content.line_count
+
+ def create_content(self, width: int, height: Optional[int]) -> UIContent:
+ # Update lines to render
+ self.lines = self.log_view.render_content()
+
+ # Create a UIContent instance if none exists
+ if self.uicontent is None:
+ self.uicontent = UIContent(get_line=lambda i: self.lines[i],
+ line_count=len(self.lines),
+ show_cursor=False)
+
+ # Update line_count
+ self.uicontent.line_count = len(self.lines)
+
+ return self.uicontent
def mouse_handler(self, mouse_event: MouseEvent):
"""Mouse handler for this control."""
@@ -267,10 +321,12 @@ class LogPane(WindowPane):
"""LogPane class."""
# pylint: disable=too-many-instance-attributes,too-many-public-methods
+
def __init__(
self,
application: Any,
pane_title: str = 'Logs',
+ log_store: Optional[LogStore] = None,
):
super().__init__(application, pane_title)
@@ -280,7 +336,9 @@ class LogPane(WindowPane):
self.is_a_duplicate = False
# Create the log container which stores and handles incoming logs.
- self.log_view: LogView = LogView(self, self.application)
+ self.log_view: LogView = LogView(self,
+ self.application,
+ log_store=log_store)
# Log pane size variables. These are updated just befor rendering the
# pane by the LogLineHSplit class.
@@ -288,7 +346,6 @@ class LogPane(WindowPane):
self.current_log_pane_height = 0
self.last_log_pane_width = None
self.last_log_pane_height = None
- self.last_log_content_height = 0
# Search tracking
self.search_bar_active = False
@@ -297,7 +354,7 @@ class LogPane(WindowPane):
self.saveas_dialog = LogPaneSaveAsDialog(self)
self.saveas_dialog_active = False
- self.visual_selection_bar = LogPaneSelectionDialog(self)
+ self.visual_selection_dialog = LogPaneSelectionDialog(self)
# Table header bar, only shown if table view is active.
self.table_header_toolbar = TableToolbar(self)
@@ -329,16 +386,7 @@ class LogPane(WindowPane):
self.bottom_toolbar.add_button(
ToolbarButton('C', 'Clear', self.clear_history))
- self.log_content_control = LogContentControl(
- self, # parent LogPane
- # FormattedTextControl args:
- self.log_view.render_content,
- # Hide the cursor, use cursorline=True in self.log_display_window to
- # indicate currently selected line.
- show_cursor=False,
- focusable=True,
- get_cursor_position=self.log_content_control_get_cursor_position,
- )
+ self.log_content_control = LogContentControl(self)
self.log_display_window = Window(
content=self.log_content_control,
@@ -382,12 +430,11 @@ class LogPane(WindowPane):
self),
),
floats=[
- # Floating LineInfoBar
Float(top=0, right=0, height=1, content=LineInfoBar(self)),
Float(top=0,
right=0,
height=LogPaneSelectionDialog.DIALOG_HEIGHT,
- content=self.visual_selection_bar),
+ content=self.visual_selection_dialog),
Float(top=3,
left=2,
right=2,
@@ -424,7 +471,7 @@ class LogPane(WindowPane):
else:
self._pane_subtitle = self._pane_subtitle + ', ' + text
- def pane_subtitle(self):
+ def pane_subtitle(self) -> str:
if not self._pane_subtitle:
return ', '.join(self.log_view.log_store.channel_counts.keys())
logger_names = self._pane_subtitle.split(', ')
@@ -508,7 +555,7 @@ class LogPane(WindowPane):
# Menu separator
('-', None),
(
- 'Save a copy',
+ 'Save/Export a copy',
self.start_saveas,
),
('-', None),
@@ -543,7 +590,7 @@ class LogPane(WindowPane):
]
if self.is_a_duplicate:
options += [(
- 'Remove pane',
+ 'Remove/Delete pane',
functools.partial(self.application.window_manager.remove_pane,
self),
)]
@@ -561,24 +608,13 @@ class LogPane(WindowPane):
self.log_view.apply_filter,
),
(
- 'Reset active filters',
+ 'Clear/Reset active filters',
self.log_view.clear_filters,
),
]
return options
- def after_render_hook(self):
- """Run tasks after the last UI render."""
- self.reset_log_content_height()
-
- def reset_log_content_height(self):
- """Reset log line pane content height."""
- self.last_log_content_height = 0
-
- def log_content_control_get_cursor_position(self):
- return self.log_view.get_cursor_position()
-
def apply_filters_from_config(self, window_options) -> None:
if 'filters' not in window_options:
return
@@ -594,6 +630,7 @@ class LogPane(WindowPane):
invert=inverted,
field=field,
search_matcher=matcher_name,
+ interactive=False,
):
self.log_view.install_new_filter()
diff --git a/pw_console/py/pw_console/log_pane_saveas_dialog.py b/pw_console/py/pw_console/log_pane_saveas_dialog.py
index 2974abf7f..f142a8af8 100644
--- a/pw_console/py/pw_console/log_pane_saveas_dialog.py
+++ b/pw_console/py/pw_console/log_pane_saveas_dialog.py
@@ -117,10 +117,9 @@ class LogPaneSaveAsDialog(ConditionalContainer):
# Add additional keybindings for the input_field text area.
key_bindings = KeyBindings()
+ register = self.log_pane.application.prefs.register_keybinding
- @key_bindings.add('escape')
- @key_bindings.add('c-c')
- @key_bindings.add('c-d')
+ @register('save-as-dialog.cancel', key_bindings)
def _close_saveas_dialog(_event: KeyPressEvent) -> None:
"""Close save as dialog."""
self.close_dialog()
diff --git a/pw_console/py/pw_console/log_pane_selection_dialog.py b/pw_console/py/pw_console/log_pane_selection_dialog.py
index 0f72e27d7..9c76f5131 100644
--- a/pw_console/py/pw_console/log_pane_selection_dialog.py
+++ b/pw_console/py/pw_console/log_pane_selection_dialog.py
@@ -153,7 +153,7 @@ class LogPaneSelectionDialog(ConditionalContainer):
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
- key='',
+ key='Ctrl-c',
description='Cancel',
mouse_handler=select_none,
base_style=button_style,
@@ -162,7 +162,7 @@ class LogPaneSelectionDialog(ConditionalContainer):
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
- key='',
+ key='Ctrl-a',
description='Select All',
mouse_handler=select_all,
base_style=button_style,
diff --git a/pw_console/py/pw_console/log_pane_toolbars.py b/pw_console/py/pw_console/log_pane_toolbars.py
index 2892032b5..038b9530f 100644
--- a/pw_console/py/pw_console/log_pane_toolbars.py
+++ b/pw_console/py/pw_console/log_pane_toolbars.py
@@ -39,7 +39,7 @@ class LineInfoBar(ConditionalContainer):
"""One line bar for showing current and total log lines."""
def get_tokens(self):
"""Return formatted text tokens for display."""
- tokens = ' Log {} / {} '.format(
+ tokens = ' {} / {} '.format(
self.log_pane.log_view.get_current_line() + 1,
self.log_pane.log_view.get_total_count(),
)
diff --git a/pw_console/py/pw_console/log_screen.py b/pw_console/py/pw_console/log_screen.py
index ef1c95414..e3a0f0583 100644
--- a/pw_console/py/pw_console/log_screen.py
+++ b/pw_console/py/pw_console/log_screen.py
@@ -17,7 +17,7 @@ from __future__ import annotations
import collections
import dataclasses
import logging
-from typing import Callable, Dict, Optional, Tuple, TYPE_CHECKING
+from typing import Callable, List, Optional, Tuple, TYPE_CHECKING
from prompt_toolkit.formatted_text import (
to_formatted_text,
@@ -156,7 +156,7 @@ class LogScreen:
def _fill_top_with_empty_lines(self) -> None:
"""Add empty lines to fill the remaining empty screen space."""
for _ in range(self.height - len(self.line_buffer)):
- self.line_buffer.appendleft(ScreenLine([('', '\n')]))
+ self.line_buffer.appendleft(ScreenLine([('', '')]))
def clear_screen(self) -> None:
"""Erase all lines and fill with empty lines."""
@@ -207,21 +207,26 @@ class LogScreen:
self.height = height
def get_lines(
- self,
- marked_logs: Optional[Dict[int,
- int]] = None) -> StyleAndTextTuples:
+ self,
+ marked_logs_start: Optional[int] = None,
+ marked_logs_end: Optional[int] = None,
+ ) -> List[StyleAndTextTuples]:
"""Return lines for final display.
Styling is added for the line under the cursor."""
- if not marked_logs:
- marked_logs = {}
- all_lines: StyleAndTextTuples = []
+ if not marked_logs_start:
+ marked_logs_start = -1
+ if not marked_logs_end:
+ marked_logs_end = -1
+
+ all_lines: List[StyleAndTextTuples] = []
# Loop through a copy of the line_buffer in case it is mutated before
# this function is complete.
for i, line in enumerate(list(self.line_buffer)):
# Is this line the cursor_position? Apply line highlighting
if (i == self.cursor_position
+ and (self.cursor_position < len(self.line_buffer))
and not self.line_buffer[self.cursor_position].empty()):
# Fill in empty charaters to the width of the screen. This
# ensures the backgound is highlighted to the edge of the
@@ -233,10 +238,11 @@ class LogScreen:
)
# Apply a style to highlight this line.
- all_lines.extend(
+ all_lines.append(
to_formatted_text(new_fragments,
style='class:selected-log-line'))
- elif line.log_index in marked_logs:
+ elif line.log_index is not None and (
+ marked_logs_start <= line.log_index <= marked_logs_end):
new_fragments = fill_character_width(
line.fragments,
len(line.fragments) - 1, # -1 for the ending line break
@@ -244,12 +250,12 @@ class LogScreen:
)
# Apply a style to highlight this line.
- all_lines.extend(
+ all_lines.append(
to_formatted_text(new_fragments,
style='class:marked-log-line'))
else:
- all_lines.extend(line.fragments)
+ all_lines.append(line.fragments)
return all_lines
@@ -295,7 +301,8 @@ class LogScreen:
new_index = self.cursor_position - 1
if new_index < 0:
break
- if self.line_buffer[new_index].empty():
+ if (new_index < len(self.line_buffer)
+ and self.line_buffer[new_index].empty()):
# The next line is empty and has no content.
break
self.cursor_position -= 1
@@ -317,7 +324,8 @@ class LogScreen:
new_index = self.cursor_position + 1
if new_index >= self.height:
break
- if self.line_buffer[new_index].empty():
+ if (new_index < len(self.line_buffer)
+ and self.line_buffer[new_index].empty()):
# The next line is empty and has no content.
break
self.cursor_position += 1
@@ -337,6 +345,8 @@ class LogScreen:
def move_cursor_to_position(self, window_row: int) -> None:
"""Move the cursor to a line if there is a log message there."""
+ if window_row >= len(self.line_buffer):
+ return
if 0 <= window_row < self.height:
current_line = self.line_buffer[window_row]
if current_line.log_index is not None:
@@ -355,6 +365,9 @@ class LogScreen:
This moves the lines on screen and keeps the originally selected line
highlighted. Example use case: when jumping to a search match the
matched line will be shown at the top of the screen."""
+ if not 0 <= self.cursor_position < len(self.line_buffer):
+ return
+
current_line = self.line_buffer[self.cursor_position]
amount = max(self.cursor_position, current_line.height)
amount -= current_line.subline
@@ -373,6 +386,9 @@ class LogScreen:
This moves the lines on screen and keeps the originally selected line
highlighted. Example use case: when jumping to a search match the
matched line will be shown at the center of the screen."""
+ if not 0 <= self.cursor_position < len(self.line_buffer):
+ return
+
half_height = int(self.height / 2)
current_line = self.line_buffer[self.cursor_position]
@@ -459,6 +475,9 @@ class LogScreen:
def get_line_at_cursor_position(self) -> ScreenLine:
"""Returns the ScreenLine under the cursor."""
+ if (self.cursor_position >= len(self.line_buffer)
+ or self.cursor_position < 0):
+ return ScreenLine([('', '')])
return self.line_buffer[self.cursor_position]
def fetch_subline_down(self, line_count: int = 1) -> int:
@@ -510,6 +529,8 @@ class LogScreen:
"""Scan the screen for the first valid log_index and return it."""
log_index = None
for i in range(self.height):
+ if i >= len(self.line_buffer):
+ break
if self.line_buffer[i].log_index is not None:
log_index = self.line_buffer[i].log_index
break
@@ -518,17 +539,21 @@ class LogScreen:
def last_rendered_log_index(self) -> Optional[int]:
"""Return the last log_index shown on screen."""
log_index = None
+ if len(self.line_buffer) == 0:
+ return None
if self.line_buffer[-1].log_index is not None:
log_index = self.line_buffer[-1].log_index
return log_index
def _get_fragments_per_line(self,
- log_index: int) -> list[StyleAndTextTuples]:
+ log_index: int) -> List[StyleAndTextTuples]:
"""Return a list of lines wrapped to the screen width for a log.
Before fetching the log message this function updates the log_source and
formatting options."""
_start_log_index, log_source = self.get_log_source()
+ if log_index >= len(log_source):
+ return []
log = log_source[log_index]
table_formatter = self.get_log_formatter()
truncate_lines = not self.get_line_wrapping()
diff --git a/pw_console/py/pw_console/log_store.py b/pw_console/py/pw_console/log_store.py
index 8034d9d8f..ed6ebb447 100644
--- a/pw_console/py/pw_console/log_store.py
+++ b/pw_console/py/pw_console/log_store.py
@@ -18,7 +18,7 @@ import collections
import logging
import sys
from datetime import datetime
-from typing import Dict, List, TYPE_CHECKING
+from typing import Dict, List, Optional, TYPE_CHECKING
import pw_cli.color
@@ -32,8 +32,60 @@ if TYPE_CHECKING:
class LogStore(logging.Handler):
- """Class to hold many log events."""
- def __init__(self, prefs: ConsolePrefs):
+ """Pigweed Console logging handler.
+
+ This is a `Python logging.Handler
+ <https://docs.python.org/3/library/logging.html#handler-objects>`_ class
+ used to store logs for display in the pw_console user interface.
+
+ You may optionally add this as a handler to an existing logger
+ instances. This will be required if logs need to be captured for display in
+ the pw_console UI before the user interface is running.
+
+ Example usage:
+
+ .. code-block:: python
+
+ import logging
+
+ from pw_console import PwConsoleEmbed, LogStore
+
+ _DEVICE_LOG = logging.getLogger('usb_gadget')
+
+ # Create a log store and add as a handler.
+ device_log_store = LogStore()
+ _DEVICE_LOG.addHander(device_log_store)
+
+ # Start communication with your device here, before embedding
+ # pw_console.
+
+ # Create the pw_console embed instance
+ console = PwConsoleEmbed(
+ global_vars=globals(),
+ local_vars=locals(),
+ loggers={
+ 'Host Logs': [
+ logging.getLogger(__package__),
+ logging.getLogger(__file__),
+ ],
+ # Set the LogStore as the value of this logger window.
+ 'Device Logs': device_log_store,
+ },
+ app_title='My Awesome Console',
+ )
+
+ console.setup_python_logging()
+ console.embed()
+ """
+ def __init__(self, prefs: Optional[ConsolePrefs] = None):
+ """Initializes the LogStore instance."""
+
+ # ConsolePrefs may not be passed on init. For example, if the user is
+ # creating a LogStore to capture log messages before console startup.
+ if not prefs:
+ prefs = ConsolePrefs(project_file=False,
+ project_user_file=False,
+ user_file=False)
self.prefs = prefs
# Log storage deque for fast addition and deletion from the beginning
# and end of the iterable.
@@ -66,10 +118,16 @@ class LogStore(logging.Handler):
# Set formatting after logging.Handler init.
self.set_formatting()
- def register_viewer(self, viewer: 'LogView'):
+ def set_prefs(self, prefs: ConsolePrefs) -> None:
+ """Set the ConsolePrefs for this LogStore."""
+ self.prefs = prefs
+ self.table.set_prefs(prefs)
+
+ def register_viewer(self, viewer: 'LogView') -> None:
+ """Register this LogStore with a LogView."""
self.registered_viewers.append(viewer)
- def set_formatting(self):
+ def set_formatting(self) -> None:
"""Setup log formatting."""
# Copy of pw_cli log formatter
colors = pw_cli.color.colors(True)
@@ -83,7 +141,7 @@ class LogStore(logging.Handler):
# Update log time character width.
example_time_string = datetime.now().strftime(timestamp_format)
- self.table.column_width_time = len(example_time_string)
+ self.table.column_widths['time'] = len(example_time_string)
def clear_logs(self):
"""Erase all stored pane lines."""
@@ -175,7 +233,7 @@ class LogStore(logging.Handler):
if self.get_total_count() > self.max_history_size:
self.byte_size -= sys.getsizeof(self.logs.popleft())
- def emit(self, record):
+ def emit(self, record) -> None:
"""Process a new log record.
This defines the logging.Handler emit() fuction which is called by
@@ -185,7 +243,6 @@ class LogStore(logging.Handler):
self._append_log(record)
# Notify viewers of new logs
for viewer in self.registered_viewers:
- # TODO(tonymd): Type of viewer does not seem to be checked
viewer.new_logs_arrived()
def render_table_header(self):
diff --git a/pw_console/py/pw_console/log_view.py b/pw_console/py/pw_console/log_view.py
index 6d12f2605..2b54f9eb2 100644
--- a/pw_console/py/pw_console/log_view.py
+++ b/pw_console/py/pw_console/log_view.py
@@ -17,13 +17,14 @@ from __future__ import annotations
import asyncio
import collections
import copy
+from enum import Enum
import itertools
import logging
import operator
from pathlib import Path
import re
import time
-from typing import Callable, Dict, Optional, Tuple, TYPE_CHECKING
+from typing import Callable, Dict, List, Optional, Tuple, TYPE_CHECKING
from prompt_toolkit.data_structures import Point
from prompt_toolkit.formatted_text import StyleAndTextTuples
@@ -47,6 +48,12 @@ if TYPE_CHECKING:
_LOG = logging.getLogger(__package__)
+class FollowEvent(Enum):
+ """Follow mode scroll event types."""
+ SEARCH_MATCH = 'scroll_to_bottom'
+ STICKY_FOLLOW = 'scroll_to_bottom_with_sticky_follow'
+
+
class LogView:
"""Viewing window into a LogStore."""
@@ -62,9 +69,12 @@ class LogView:
self.log_pane = log_pane
self.log_store = log_store if log_store else LogStore(
prefs=application.prefs)
+ self.log_store.set_prefs(application.prefs)
self.log_store.register_viewer(self)
- self.marked_logs: Dict[int, int] = {}
+ self.marked_logs_start: Optional[int] = None
+ self.marked_logs_end: Optional[int] = None
+
# Search variables
self.search_text: Optional[str] = None
self.search_filter: Optional[LogFilter] = None
@@ -72,6 +82,21 @@ class LogView:
self.search_matcher = DEFAULT_SEARCH_MATCHER
self.search_validator = RegexValidator()
+ # Container for each log_index matched by active searches.
+ self.search_matched_lines: Dict[int, int] = {}
+ # Background task to find historical matched lines.
+ self.search_match_count_task: Optional[asyncio.Task] = None
+
+ # Flag for automatically jumping to each new search match as they
+ # appear.
+ self.follow_search_match: bool = False
+ self.last_search_matched_log: Optional[int] = None
+
+ # Follow event flag. This is set to by the new_logs_arrived() function
+ # as a signal that the log screen should be scrolled to the bottom.
+ # This is read by render_content() whenever the screen is drawn.
+ self.follow_event: Optional[FollowEvent] = None
+
self.log_screen = LogScreen(
get_log_source=self._get_log_lines,
get_line_wrapping=self.wrap_lines_enabled,
@@ -85,7 +110,7 @@ class LogView:
self.filters: 'collections.OrderedDict[str, LogFilter]' = (
collections.OrderedDict())
self.filtered_logs: collections.deque = collections.deque()
- self.filter_existing_logs_task = None
+ self.filter_existing_logs_task: Optional[asyncio.Task] = None
# Current log line index state variables:
self._last_log_index = -1
@@ -105,7 +130,7 @@ class LogView:
# Max frequency in seconds of prompt_toolkit UI redraws triggered by new
# log lines.
- self._ui_update_frequency = 0.1
+ self._ui_update_frequency = 0.05
self._last_ui_update_time = time.time()
self._last_log_store_index = 0
self._new_logs_since_last_render = True
@@ -116,7 +141,7 @@ class LogView:
self.visual_select_mode: bool = False
# Cache of formatted text tuples used in the last UI render.
- self._line_fragment_cache: StyleAndTextTuples = []
+ self._line_fragment_cache: List[StyleAndTextTuples] = []
def view_mode_changed(self) -> None:
self._reset_log_screen_on_next_render = True
@@ -145,7 +170,9 @@ class LogView:
def _set_match_position(self, position: int):
self.follow = False
self.log_index = position
+ self.save_search_matched_line(position)
self.log_screen.reset_logs(log_index=self.log_index)
+ self.log_screen.shift_selected_log_to_center()
self._user_scroll_event = True
self.log_pane.application.redraw_ui()
@@ -205,11 +232,11 @@ class LogView:
self._set_match_position(i)
return
- def _set_search_regex(self,
- text,
- invert,
- field,
- matcher: Optional[SearchMatcher] = None) -> bool:
+ def set_search_regex(self,
+ text,
+ invert,
+ field,
+ matcher: Optional[SearchMatcher] = None) -> bool:
search_matcher = matcher if matcher else self.search_matcher
_LOG.debug(search_matcher)
@@ -239,6 +266,7 @@ class LogView:
invert=False,
field: Optional[str] = None,
search_matcher: Optional[str] = None,
+ interactive: bool = True,
) -> bool:
"""Start a new search for the given text."""
valid_matchers = list(s.name for s in SearchMatcher)
@@ -247,11 +275,32 @@ class LogView:
and search_matcher.upper() in valid_matchers):
selected_matcher = SearchMatcher(search_matcher.upper())
- if self._set_search_regex(text, invert, field, selected_matcher):
- # Default search direction when hitting enter in the search bar.
- self.search_backwards()
- return True
- return False
+ if not self.set_search_regex(text, invert, field, selected_matcher):
+ return False
+
+ # Clear matched lines
+ self.search_matched_lines = {}
+
+ if interactive:
+ # Start count historical search matches task.
+ self.search_match_count_task = asyncio.create_task(
+ self.count_search_matches())
+
+ # Default search direction when hitting enter in the search bar.
+ if interactive:
+ self.search_forwards()
+ return True
+
+ def save_search_matched_line(self, log_index: int) -> None:
+ """Save the log_index at position as a matched line."""
+ self.search_matched_lines[log_index] = 0
+ # Keep matched lines sorted by position
+ self.search_matched_lines = {
+ # Save this log_index and its match number.
+ log_index: match_number
+ for match_number, log_index in enumerate(
+ sorted(self.search_matched_lines.keys()))
+ }
def disable_search_highlighting(self):
self.log_pane.log_view.search_highlight = False
@@ -282,11 +331,12 @@ class LogView:
"""Set a filter using the current search_regex."""
if not self.search_filter:
return
- self.search_highlight = False
self.filtering_on = True
self.filters[self.search_text] = copy.deepcopy(self.search_filter)
+ self.clear_search()
+
def apply_filter(self):
"""Set new filter and schedule historical log filter asyncio task."""
self.install_new_filter()
@@ -297,6 +347,7 @@ class LogView:
self._reset_log_screen_on_next_render = True
def clear_search(self):
+ self.search_matched_lines = {}
self.search_text = None
self.search_filter = None
self.search_highlight = False
@@ -348,6 +399,24 @@ class LogView:
if not self.follow:
self.toggle_follow()
+ async def count_search_matches(self):
+ """Count search matches and save their locations."""
+ # Wait for any filter_existing_logs_task to finish.
+ if self.filtering_on and self.filter_existing_logs_task:
+ await self.filter_existing_logs_task
+
+ starting_index = self.get_last_log_index()
+ ending_index, logs = self._get_log_lines()
+
+ # From the end of the log store to the beginning.
+ for i in range(starting_index, ending_index - 1, -1):
+ # Is this log a match?
+ if self.search_filter.matches(logs[i]):
+ self.save_search_matched_line(i)
+ # Pause every 100 lines or so
+ if i % 100 == 0:
+ await asyncio.sleep(.1)
+
async def filter_past_logs(self):
"""Filter past log lines."""
starting_index = self.log_store.get_last_log_index()
@@ -415,6 +484,8 @@ class LogView:
"""Toggle auto line following."""
self.follow = not self.follow
if self.follow:
+ # Disable search match follow mode.
+ self.follow_search_match = False
self.scroll_to_bottom()
def filter_scan(self, log: 'LogLine'):
@@ -430,7 +501,14 @@ class LogView:
return False
def new_logs_arrived(self):
- # If follow is on, scroll to the last line.
+ """Check newly arrived log messages.
+
+ Depending on where log statements occur ``new_logs_arrived`` may be in a
+ separate thread since it is triggerd by the Python log handler
+ ``emit()`` function. In this case the log handler is the LogStore
+ instance ``self.log_store``. This function should not redraw the screen
+ or scroll.
+ """
latest_total = self.log_store.get_total_count()
if self.filtering_on:
@@ -439,11 +517,24 @@ class LogView:
if self.filter_scan(self.log_store.logs[i]):
self.filtered_logs.append(self.log_store.logs[i])
+ if self.search_filter:
+ last_matched_log: Optional[int] = None
+ # Scan newly arived log lines
+ for i in range(self._last_log_store_index, latest_total):
+ if self.search_filter.matches(self.log_store.logs[i]):
+ self.save_search_matched_line(i)
+ last_matched_log = i
+ if last_matched_log and self.follow_search_match:
+ # Set the follow event flag for the next render_content call.
+ self.follow_event = FollowEvent.SEARCH_MATCH
+ self.last_search_matched_log = last_matched_log
+
self._last_log_store_index = latest_total
self._new_logs_since_last_render = True
if self.follow:
- self.scroll_to_bottom()
+ # Set the follow event flag for the next render_content call.
+ self.follow_event = FollowEvent.STICKY_FOLLOW
# Trigger a UI update
self._update_prompt_toolkit_ui()
@@ -494,14 +585,15 @@ class LogView:
self.log_screen.shift_selected_log_to_center()
self._user_scroll_event = True
- def scroll_to_bottom(self):
+ def scroll_to_bottom(self, with_sticky_follow: bool = True):
"""Move selected index to the end."""
# Don't change following state like scroll_to_top.
self.log_index = max(0, self.get_last_log_index())
self.log_screen.reset_logs(log_index=self.log_index)
# Sticky follow mode
- self.follow = True
+ if with_sticky_follow:
+ self.follow = True
self._user_scroll_event = True
def scroll(self, lines) -> None:
@@ -529,17 +621,21 @@ class LogView:
self.follow = True
def visual_selected_log_count(self) -> int:
- return len(self.marked_logs)
+ if self.marked_logs_start is None or self.marked_logs_end is None:
+ return 0
+ return (self.marked_logs_end - self.marked_logs_start) + 1
def clear_visual_selection(self) -> None:
- self.marked_logs = {}
+ self.marked_logs_start = None
+ self.marked_logs_end = None
self.visual_select_mode = False
self._user_scroll_event = True
self.log_pane.application.redraw_ui()
def visual_select_all(self) -> None:
- for i in range(self._scrollback_start_index, self.get_total_count()):
- self.marked_logs[i] = 1
+ self.marked_logs_start = self._scrollback_start_index
+ self.marked_logs_end = self.get_total_count() - 1
+
self.visual_select_mode = True
self._user_scroll_event = True
self.log_pane.application.redraw_ui()
@@ -562,8 +658,7 @@ class LogView:
def visual_select_line(self,
mouse_position: Point,
- deselect: bool = False,
- autoscroll: bool = True):
+ autoscroll: bool = True) -> None:
"""Mark the log under mouse_position as visually selected."""
# Check mouse_position is valid
if not 0 <= mouse_position.y < len(self.log_screen.line_buffer):
@@ -576,15 +671,15 @@ class LogView:
if screen_line.log_index is None:
return
- # If deselecting
- if deselect:
- self.marked_logs[screen_line.log_index] = 0
- if screen_line.log_index in self.marked_logs:
- del self.marked_logs[screen_line.log_index]
- # Selecting
- else:
- self.marked_logs[screen_line.log_index] = self.marked_logs.get(
- screen_line.log_index, 0) + 1
+ if self.marked_logs_start is None:
+ self.marked_logs_start = screen_line.log_index
+ if self.marked_logs_end is None:
+ self.marked_logs_end = screen_line.log_index
+
+ if screen_line.log_index < self.marked_logs_start:
+ self.marked_logs_start = screen_line.log_index
+ elif screen_line.log_index > self.marked_logs_end:
+ self.marked_logs_end = screen_line.log_index
# Update cursor position
self.log_screen.move_cursor_to_position(mouse_position.y)
@@ -596,10 +691,6 @@ class LogView:
elif mouse_position.y == self._window_height - 1:
self.scroll_down(1)
- # If no selection left, turn off visual_select_mode flag.
- if len(self.marked_logs) == 0:
- self.visual_select_mode = False
-
# Trigger a rerender.
self._user_scroll_event = True
self.log_pane.application.redraw_ui()
@@ -661,6 +752,20 @@ class LogView:
self.log_screen.resize(self._window_width, self._window_height)
self._reset_log_screen_on_next_render = True
+ if self.follow_event is not None:
+ if (self.follow_event == FollowEvent.SEARCH_MATCH
+ and self.last_search_matched_log):
+ self.log_index = self.last_search_matched_log
+ self.last_search_matched_log = None
+ self._reset_log_screen_on_next_render = True
+
+ elif self.follow_event == FollowEvent.STICKY_FOLLOW:
+ # Jump to the last log message
+ self.log_index = max(0, self.get_last_log_index())
+
+ self.follow_event = None
+ screen_update_needed = True
+
if self._reset_log_screen_on_next_render or self.log_screen.empty():
# Clear the reset flag.
self._reset_log_screen_on_next_render = False
@@ -698,7 +803,9 @@ class LogView:
if screen_update_needed:
self._line_fragment_cache = self.log_screen.get_lines(
- marked_logs=self.marked_logs)
+ marked_logs_start=self.marked_logs_start,
+ marked_logs_end=self.marked_logs_end,
+ )
return self._line_fragment_cache
def _logs_to_text(
@@ -717,13 +824,15 @@ class LogView:
_start_log_index, log_source = self._get_log_lines()
- log_indexes = (i for i in range(self._scrollback_start_index,
- self.get_total_count()))
- if selected_lines_only:
- log_indexes = (i for i in sorted(self.marked_logs.keys()))
+ log_index_range = range(self._scrollback_start_index,
+ self.get_total_count())
+ if (selected_lines_only and self.marked_logs_start is not None
+ and self.marked_logs_end is not None):
+ log_index_range = range(self.marked_logs_start,
+ self.marked_logs_end + 1)
text_output = ''
- for i in log_indexes:
+ for i in log_index_range:
log_text = formatter(log_source[i])
text_output += log_text
if not log_text.endswith('\n'):
diff --git a/pw_console/py/pw_console/plugins/calc_pane.py b/pw_console/py/pw_console/plugins/calc_pane.py
index a44a25b30..f85a60072 100644
--- a/pw_console/py/pw_console/plugins/calc_pane.py
+++ b/pw_console/py/pw_console/plugins/calc_pane.py
@@ -13,6 +13,8 @@
# the License.
"""Example text input-output Plugin."""
+from typing import TYPE_CHECKING
+
from prompt_toolkit.document import Document
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
from prompt_toolkit.layout import Window
@@ -20,6 +22,9 @@ from prompt_toolkit.widgets import SearchToolbar, TextArea
from pw_console.widgets import ToolbarButton, WindowPane, WindowPaneToolbar
+if TYPE_CHECKING:
+ from pw_console.console_app import ConsoleApp
+
class CalcPane(WindowPane):
"""Example plugin that accepts text input and displays output.
@@ -87,18 +92,6 @@ class CalcPane(WindowPane):
# handler defined in this CalcPane class.
self.input_field.accept_handler = self.accept_input
- # Add some additional keybindings for the output_field.
- key_bindings = KeyBindings()
-
- # Copy selected text in the output buffer when pressing ctrl-c.
- @key_bindings.add('c-c')
- def _copy_all_output(_event: KeyPressEvent) -> None:
- """Copy selected text from the output buffer."""
- self.copy_selected_output()
-
- # Set the output_field control's key_bindings to the new bindings.
- self.output_field.control.key_bindings = key_bindings
-
# Create a toolbar for display at the bottom of this window. It will
# show the window title and toolbar buttons.
self.bottom_toolbar = WindowPaneToolbar(self)
@@ -133,6 +126,38 @@ class CalcPane(WindowPane):
self.bottom_toolbar,
)
+ def pw_console_init(self, app: 'ConsoleApp') -> None:
+ """Set the Pigweed Console application instance.
+
+ This function is called after the Pigweed Console starts up and allows
+ access to the user preferences. Prefs is required for creating new
+ user-remappable keybinds."""
+ self.application = app
+ self.set_custom_keybinds()
+
+ def set_custom_keybinds(self) -> None:
+ # Fetch ConsoleApp preferences to load user keybindings
+ prefs = self.application.prefs
+ # Register a named keybind function that is user re-mappable
+ prefs.register_named_key_function(
+ 'calc-pane.copy-selected-text',
+ # default bindings
+ ['c-c'])
+
+ # For setting additional keybindings to the output_field.
+ key_bindings = KeyBindings()
+
+ # Map the 'calc-pane.copy-selected-text' function keybind to the
+ # _copy_all_output function below. This will set
+ @prefs.register_keybinding('calc-pane.copy-selected-text',
+ key_bindings)
+ def _copy_all_output(_event: KeyPressEvent) -> None:
+ """Copy selected text from the output buffer."""
+ self.copy_selected_output()
+
+ # Set the output_field controls key_bindings to the new bindings.
+ self.output_field.control.key_bindings = key_bindings
+
def run_calculation(self):
"""Trigger the input_field's accept_handler.
diff --git a/pw_console/py/pw_console/python_logging.py b/pw_console/py/pw_console/python_logging.py
index 1d494a8f2..7f5d9beaa 100644
--- a/pw_console/py/pw_console/python_logging.py
+++ b/pw_console/py/pw_console/python_logging.py
@@ -17,7 +17,7 @@ import copy
import logging
import tempfile
from datetime import datetime
-from typing import Iterator, Optional
+from typing import Iterable, Iterator, Optional
def all_loggers() -> Iterator[logging.Logger]:
@@ -28,19 +28,25 @@ def all_loggers() -> Iterator[logging.Logger]:
yield logging.getLogger(logger_name)
-def create_temp_log_file():
+def create_temp_log_file(prefix: Optional[str] = None,
+ add_time: bool = True) -> str:
"""Create a unique tempfile for saving logs.
Example format: /tmp/pw_console_2021-05-04_151807_8hem6iyq
"""
+ if not prefix:
+ prefix = str(__package__)
# Grab the current system timestamp as a string.
isotime = datetime.now().isoformat(sep='_', timespec='seconds')
# Timestamp string should not have colons in it.
isotime = isotime.replace(':', '')
+ if add_time:
+ prefix += f'_{isotime}'
+
log_file_name = None
- with tempfile.NamedTemporaryFile(prefix=f'{__package__}_{isotime}_',
+ with tempfile.NamedTemporaryFile(prefix=f'{prefix}_',
delete=False) as log_file:
log_file_name = log_file.name
@@ -62,18 +68,25 @@ def disable_stdout_handlers(logger: logging.Logger) -> None:
logger.removeHandler(handler)
-def setup_python_logging(last_resort_filename: Optional[str] = None) -> None:
+def setup_python_logging(
+ last_resort_filename: Optional[str] = None,
+ loggers_with_no_propagation: Optional[Iterable[logging.Logger]] = None
+) -> None:
"""Disable log handlers for full screen prompt_toolkit applications."""
+ if not loggers_with_no_propagation:
+ loggers_with_no_propagation = []
disable_stdout_handlers(logging.getLogger())
if logging.lastResort is not None:
set_logging_last_resort_file_handler(last_resort_filename)
- for logger in all_loggers():
- # Make sure all known loggers propagate to the root logger.
- logger.propagate = True
+ for logger in list(all_loggers()):
# Prevent stdout handlers from corrupting the prompt_toolkit UI.
disable_stdout_handlers(logger)
+ if logger in loggers_with_no_propagation:
+ continue
+ # Make sure all known loggers propagate to the root logger.
+ logger.propagate = True
# Prevent these loggers from propagating to the root logger.
hidden_host_loggers = [
diff --git a/pw_console/py/pw_console/quit_dialog.py b/pw_console/py/pw_console/quit_dialog.py
index 68100f02a..b466580eb 100644
--- a/pw_console/py/pw_console/quit_dialog.py
+++ b/pw_console/py/pw_console/quit_dialog.py
@@ -16,7 +16,8 @@
from __future__ import annotations
import functools
import logging
-from typing import TYPE_CHECKING
+import sys
+from typing import Optional, Callable, TYPE_CHECKING
from prompt_toolkit.data_structures import Point
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
@@ -44,25 +45,28 @@ class QuitDialog(ConditionalContainer):
DIALOG_HEIGHT = 2
- def __init__(self, application: ConsoleApp):
+ def __init__(self,
+ application: ConsoleApp,
+ on_quit: Optional[Callable] = None):
self.application = application
self.show_dialog = False
# Tracks the last focused container, to enable restoring focus after
# closing the dialog.
self.last_focused_pane = None
+ self.on_quit_function = (on_quit if on_quit else
+ self._default_on_quit_function)
+
# Quit keybindings are active when this dialog is in focus
key_bindings = KeyBindings()
+ register = self.application.prefs.register_keybinding
- @key_bindings.add('y')
- @key_bindings.add('c-d')
+ @register('quit-dialog.yes', key_bindings)
def _quit(_event: KeyPressEvent) -> None:
"""Close save as bar."""
self.quit_action()
- @key_bindings.add('escape')
- @key_bindings.add('n')
- @key_bindings.add('c-c')
+ @register('quit-dialog.no', key_bindings)
def _cancel(_event: KeyPressEvent) -> None:
"""Close save as bar."""
self.close_dialog()
@@ -116,8 +120,14 @@ class QuitDialog(ConditionalContainer):
self.focus_self()
self.application.redraw_ui()
+ def _default_on_quit_function(self):
+ if hasattr(self.application, 'application'):
+ self.application.application.exit()
+ else:
+ sys.exit()
+
def quit_action(self):
- self.application.application.exit()
+ self.on_quit_function()
def get_action_fragments(self):
"""Return FormattedText with action buttons."""
diff --git a/pw_console/py/pw_console/repl_pane.py b/pw_console/py/pw_console/repl_pane.py
index 5746dafe8..10d52a5e5 100644
--- a/pw_console/py/pw_console/repl_pane.py
+++ b/pw_console/py/pw_console/repl_pane.py
@@ -99,7 +99,7 @@ class UserCodeExecution:
class ReplPane(WindowPane):
"""Pane for reading Python input."""
- # pylint: disable=too-many-instance-attributes,too-few-public-methods
+ # pylint: disable=too-many-instance-attributes,too-many-public-methods
def __init__(
self,
application: 'ConsoleApp',
@@ -130,8 +130,9 @@ class ReplPane(WindowPane):
# Additional keybindings for the text area.
key_bindings = KeyBindings()
+ register = self.application.prefs.register_keybinding
- @key_bindings.add('c-c')
+ @register('python-repl.copy-output-selection', key_bindings)
def _copy_selection(_event: KeyPressEvent) -> None:
"""Copy selected text."""
self.copy_output_selection()
@@ -256,7 +257,8 @@ class ReplPane(WindowPane):
ToolbarButton('Ctrl-v', 'Paste',
self.paste_system_clipboard_to_input_buffer))
bottom_toolbar.add_button(
- ToolbarButton('Ctrl-c', 'Clear', self.clear_input_buffer))
+ ToolbarButton('Ctrl-c', 'Copy / Clear',
+ self.copy_or_clear_input_buffer))
bottom_toolbar.add_button(ToolbarButton('Enter', 'Run', self.run_code))
bottom_toolbar.add_button(ToolbarButton('F2', 'Settings'))
bottom_toolbar.add_button(ToolbarButton('F3', 'History'))
@@ -276,7 +278,8 @@ class ReplPane(WindowPane):
is_checkbox=True,
checked=lambda: self.wrap_output_lines))
results_toolbar.add_button(
- ToolbarButton('Ctrl-Alt-c', 'Copy All Output', self.copy_text))
+ ToolbarButton('Ctrl-Alt-c', 'Copy All Output',
+ self.copy_all_output_text))
results_toolbar.add_button(
ToolbarButton('Ctrl-c', 'Copy Selected Text',
self.copy_output_selection))
@@ -287,16 +290,25 @@ class ReplPane(WindowPane):
return results_toolbar
def copy_output_selection(self):
- """Copy the highlighted text the python output buffer to the system
- clipboard."""
+ """Copy highlighted output text to the system clipboard."""
clipboard_data = self.output_field.buffer.copy_selection()
self.application.application.clipboard.set_data(clipboard_data)
- def copy_text(self):
- """Copy visible text in this window pane to the system clipboard."""
+ def copy_input_selection(self):
+ """Copy highlighted input text to the system clipboard."""
+ clipboard_data = self.pw_ptpython_repl.default_buffer.copy_selection()
+ self.application.application.clipboard.set_data(clipboard_data)
+
+ def copy_all_output_text(self):
+ """Copy all text in the Python output to the system clipboard."""
self.application.application.clipboard.set_text(
self.output_field.buffer.text)
+ def copy_all_input_text(self):
+ """Copy all text in the Python input to the system clipboard."""
+ self.application.application.clipboard.set_text(
+ self.pw_ptpython_repl.default_buffer.text)
+
# pylint: disable=no-self-use
def get_all_key_bindings(self) -> List:
"""Return all keybinds for this plugin."""
@@ -305,9 +317,9 @@ class ReplPane(WindowPane):
# Hand-crafted bindings for display in the HelpWindow:
return [{
- 'Execute code': ['Enter', 'Option-Enter', 'Meta-Enter'],
- 'Reverse search history': ['Ctrl-R'],
- 'Erase input buffer.': ['Ctrl-C'],
+ 'Execute code': ['Enter', 'Option-Enter', 'Alt-Enter'],
+ 'Reverse search history': ['Ctrl-r'],
+ 'Erase input buffer.': ['Ctrl-c'],
'Show settings.': ['F2'],
'Show history.': ['F3'],
}]
@@ -315,18 +327,15 @@ class ReplPane(WindowPane):
def get_all_menu_options(self):
return []
- def after_render_hook(self):
- """Run tasks after the last UI render."""
-
def run_code(self):
"""Trigger a repl code execution on mouse click."""
self.pw_ptpython_repl.default_buffer.validate_and_handle()
def ctrl_c(self):
"""Ctrl-C keybinding behavior."""
- # If there is text in the input buffer, clear it.
+ # If there is text in the input buffer
if self.pw_ptpython_repl.default_buffer.text:
- self.clear_input_buffer()
+ self.copy_or_clear_input_buffer()
else:
self.interrupt_last_code_execution()
@@ -343,6 +352,14 @@ class ReplPane(WindowPane):
# Clear any displayed function signatures.
self.pw_ptpython_repl.on_reset()
+ def copy_or_clear_input_buffer(self):
+ # Copy selected text if a selection is active.
+ if self.pw_ptpython_repl.default_buffer.selection_state:
+ self.copy_input_selection()
+ return
+ # Otherwise, clear the input buffer
+ self.clear_input_buffer()
+
def interrupt_last_code_execution(self):
code = self._get_currently_running_code()
if code:
diff --git a/pw_console/py/pw_console/search_toolbar.py b/pw_console/py/pw_console/search_toolbar.py
index 912f44a19..72ad91794 100644
--- a/pw_console/py/pw_console/search_toolbar.py
+++ b/pw_console/py/pw_console/search_toolbar.py
@@ -18,13 +18,18 @@ import functools
from typing import TYPE_CHECKING
from prompt_toolkit.buffer import Buffer
-from prompt_toolkit.filters import (
- Condition, )
-from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
+from prompt_toolkit.filters import Condition, has_focus
+from prompt_toolkit.formatted_text import StyleAndTextTuples
+from prompt_toolkit.key_binding import (
+ KeyBindings,
+ KeyBindingsBase,
+ KeyPressEvent,
+)
from prompt_toolkit.layout import (
ConditionalContainer,
FormattedTextControl,
HSplit,
+ VSplit,
Window,
WindowAlign,
)
@@ -40,54 +45,224 @@ if TYPE_CHECKING:
class SearchToolbar(ConditionalContainer):
- """One line toolbar for entering search text."""
+ """Toolbar for entering search text and viewing match counts."""
+
+ TOOLBAR_HEIGHT = 2
+
+ def __init__(self, log_pane: 'LogPane'):
+ self.log_pane = log_pane
+ self.log_view = log_pane.log_view
+ self.search_validator = RegexValidator()
+ self._search_successful = False
+ self._search_invert = False
+ self._search_field = None
+
+ self.input_field = TextArea(
+ prompt=[
+ ('class:search-bar-setting', '/',
+ functools.partial(pw_console.widgets.mouse_handlers.on_click,
+ self.focus_self))
+ ],
+ focusable=True,
+ focus_on_click=True,
+ scrollbar=False,
+ multiline=False,
+ height=1,
+ dont_extend_height=True,
+ dont_extend_width=False,
+ accept_handler=self._search_accept_handler,
+ validator=DynamicValidator(self.get_search_matcher),
+ history=self.log_pane.application.search_history,
+ )
+
+ self.input_field.control.key_bindings = self._create_key_bindings()
+
+ match_count_window = Window(
+ content=FormattedTextControl(self.get_match_count_fragments),
+ height=1,
+ align=WindowAlign.LEFT,
+ dont_extend_width=True,
+ style='class:search-match-count-dialog',
+ )
+
+ match_buttons_window = Window(
+ content=FormattedTextControl(self.get_button_fragments),
+ height=1,
+ align=WindowAlign.LEFT,
+ dont_extend_width=False,
+ style='class:search-match-count-dialog',
+ )
+
+ input_field_buttons_window = Window(
+ content=FormattedTextControl(self.get_search_help_fragments),
+ height=1,
+ align=WindowAlign.RIGHT,
+ dont_extend_width=True,
+ )
+
+ settings_bar_window = Window(
+ content=FormattedTextControl(self.get_search_settings_fragments),
+ height=1,
+ align=WindowAlign.LEFT,
+ dont_extend_width=False,
+ )
+
+ super().__init__(
+ HSplit(
+ [
+ # Top row
+ VSplit([
+ # Search Settings toggles, only show if the search input
+ # field is in focus.
+ ConditionalContainer(settings_bar_window,
+ filter=has_focus(
+ self.input_field)),
+
+ # Match count numbers and buttons, only show if the
+ # search input is NOT in focus.
+ ConditionalContainer(
+ match_count_window,
+ filter=~has_focus(self.input_field)), # pylint: disable=invalid-unary-operand-type
+ ConditionalContainer(
+ match_buttons_window,
+ filter=~has_focus(self.input_field)), # pylint: disable=invalid-unary-operand-type
+ ]),
+ # Bottom row
+ VSplit([
+ self.input_field,
+ ConditionalContainer(input_field_buttons_window,
+ filter=has_focus(self))
+ ])
+ ],
+ height=SearchToolbar.TOOLBAR_HEIGHT,
+ style='class:search-bar',
+ ),
+ filter=Condition(lambda: log_pane.search_bar_active),
+ )
+
+ def _create_key_bindings(self) -> KeyBindingsBase:
+ """Create additional key bindings for the search input."""
+ # Clear filter keybind is handled by the parent log_pane.
+
+ key_bindings = KeyBindings()
+ register = self.log_pane.application.prefs.register_keybinding
+
+ @register('search-toolbar.cancel', key_bindings)
+ def _close_search_bar(_event: KeyPressEvent) -> None:
+ """Close search bar."""
+ self.cancel_search()
- TOOLBAR_HEIGHT = 3
+ @register('search-toolbar.toggle-matcher', key_bindings)
+ def _select_next_search_matcher(_event: KeyPressEvent) -> None:
+ """Select the next search matcher."""
+ self.log_pane.log_view.select_next_search_matcher()
- def focus_self(self):
+ @register('search-toolbar.create-filter', key_bindings)
+ def _create_filter(_event: KeyPressEvent) -> None:
+ """Create a filter."""
+ self.create_filter()
+
+ @register('search-toolbar.toggle-invert', key_bindings)
+ def _toggle_search_invert(_event: KeyPressEvent) -> None:
+ """Toggle inverted search matching."""
+ self._invert_search()
+
+ @register('search-toolbar.toggle-column', key_bindings)
+ def _select_next_field(_event: KeyPressEvent) -> None:
+ """Select next search field/column."""
+ self._next_field()
+
+ return key_bindings
+
+ def focus_self(self) -> None:
self.log_pane.application.application.layout.focus(self)
- def close_search_bar(self):
+ def focus_log_pane(self) -> None:
+ self.log_pane.application.focus_on_container(self.log_pane)
+
+ def _create_filter(self) -> None:
+ self.input_field.buffer.reset()
+ self.close_search_bar()
+ self.log_view.apply_filter()
+
+ def _next_match(self) -> None:
+ self.log_view.search_forwards()
+
+ def _previous_match(self) -> None:
+ self.log_view.search_backwards()
+
+ def cancel_search(self) -> None:
+ self.input_field.buffer.reset()
+ self.close_search_bar()
+ self.log_view.clear_search()
+
+ def close_search_bar(self) -> None:
"""Close search bar."""
# Reset invert setting for the next search
self._search_invert = False
+ self.log_view.follow_search_match = False
# Hide the search bar
self.log_pane.search_bar_active = False
# Focus on the log_pane.
self.log_pane.application.focus_on_container(self.log_pane)
self.log_pane.redraw_ui()
- def _start_search(self):
+ def _start_search(self) -> None:
self.input_field.buffer.validate_and_handle()
- def _invert_search(self):
+ def _invert_search(self) -> None:
self._search_invert = not self._search_invert
- def _next_field(self):
+ def _toggle_search_follow(self) -> None:
+ self.log_view.follow_search_match = (
+ not self.log_view.follow_search_match)
+ # If automatically jumping to the next search match, disable normal
+ # follow mode.
+ if self.log_view.follow_search_match:
+ self.log_view.follow = False
+
+ def _next_field(self) -> None:
fields = self.log_pane.log_view.log_store.table.all_column_names()
fields.append(None)
current_index = fields.index(self._search_field)
next_index = (current_index + 1) % len(fields)
self._search_field = fields[next_index]
- def create_filter(self):
+ def create_filter(self) -> None:
self._start_search()
if self._search_successful:
self.log_pane.log_view.apply_filter()
+ def _search_accept_handler(self, buff: Buffer) -> bool:
+ """Function run when hitting Enter in the search bar."""
+ self._search_successful = False
+ if len(buff.text) == 0:
+ self.close_search_bar()
+ # Don't apply an empty search.
+ return False
+
+ if self.log_pane.log_view.new_search(buff.text,
+ invert=self._search_invert,
+ field=self._search_field):
+ self._search_successful = True
+
+ # Don't close the search bar, instead focus on the log content.
+ self.log_pane.application.focus_on_container(
+ self.log_pane.log_display_window)
+ # Keep existing search text.
+ return True
+
+ # Keep existing text if regex error
+ return True
+
def get_search_help_fragments(self):
"""Return FormattedText with search general help keybinds."""
focus = functools.partial(pw_console.widgets.mouse_handlers.on_click,
self.focus_self)
start_search = functools.partial(
pw_console.widgets.mouse_handlers.on_click, self._start_search)
- add_filter = functools.partial(
- pw_console.widgets.mouse_handlers.on_click, self.create_filter)
- clear_filters = functools.partial(
- pw_console.widgets.mouse_handlers.on_click,
- self.log_pane.log_view.clear_filters)
close_search = functools.partial(
- pw_console.widgets.mouse_handlers.on_click, self.close_search_bar)
+ pw_console.widgets.mouse_handlers.on_click, self.cancel_search)
# Search toolbar is darker than pane toolbars, use the darker button
# style here.
@@ -108,25 +283,8 @@ class SearchToolbar(ConditionalContainer):
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
- 'Ctrl-Alt-f',
- 'Add Filter',
- add_filter,
- base_style=button_style))
- fragments.extend(separator_text)
+ 'Ctrl-c', 'Cancel', close_search, base_style=button_style))
- fragments.extend(
- pw_console.widgets.checkbox.to_keybind_indicator(
- 'Ctrl-Alt-r',
- 'Clear Filters',
- clear_filters,
- base_style=button_style))
- fragments.extend(separator_text)
-
- fragments.extend(
- pw_console.widgets.checkbox.to_keybind_indicator(
- 'Ctrl-c', 'Close', close_search, base_style=button_style))
-
- fragments.extend(separator_text)
return fragments
def get_search_settings_fragments(self):
@@ -199,109 +357,97 @@ class SearchToolbar(ConditionalContainer):
return self.log_pane.log_view.search_validator
return False
- def __init__(self, log_pane: 'LogPane'):
- self.log_pane = log_pane
- self.search_validator = RegexValidator()
- self._search_successful = False
- self._search_invert = False
- self._search_field = None
-
- # FormattedText of the search column headers.
- self.input_field = TextArea(
- prompt=[
- ('class:search-bar-setting', '/',
- functools.partial(pw_console.widgets.mouse_handlers.on_click,
- self.focus_self))
- ],
- focusable=True,
- focus_on_click=True,
- scrollbar=False,
- multiline=False,
- height=1,
- dont_extend_height=True,
- dont_extend_width=False,
- accept_handler=self._search_accept_handler,
- validator=DynamicValidator(self.get_search_matcher),
- history=self.log_pane.application.search_history,
- )
-
- search_help_bar_control = FormattedTextControl(
- self.get_search_help_fragments)
- search_help_bar_window = Window(content=search_help_bar_control,
- height=1,
- align=WindowAlign.LEFT,
- dont_extend_width=False)
-
- search_settings_bar_control = FormattedTextControl(
- self.get_search_settings_fragments)
- search_settings_bar_window = Window(
- content=search_settings_bar_control,
- height=1,
- align=WindowAlign.LEFT,
- dont_extend_width=False)
-
- # Additional keybindings for the text area.
- key_bindings = KeyBindings()
-
- @key_bindings.add('escape')
- @key_bindings.add('c-c')
- @key_bindings.add('c-d')
- def _close_search_bar(_event: KeyPressEvent) -> None:
- """Close search bar."""
- self.close_search_bar()
-
- @key_bindings.add('c-n')
- def _select_next_search_matcher(_event: KeyPressEvent) -> None:
- """Select the next search matcher."""
- self.log_pane.log_view.select_next_search_matcher()
-
- @key_bindings.add('escape', 'c-f') # Alt-Ctrl-f
- def _create_filter(_event: KeyPressEvent) -> None:
- """Create a filter."""
- self.create_filter()
+ def get_match_count_fragments(self):
+ """Return formatted text for the match count indicator."""
+ focus = functools.partial(pw_console.widgets.mouse_handlers.on_click,
+ self.focus_log_pane)
+ two_spaces = ('', ' ', focus)
+
+ # Check if this line is a search match
+ match_number = self.log_view.search_matched_lines.get(
+ self.log_view.log_index, -1)
+
+ # If valid, increment the zero indexed value by one for better human
+ # readability.
+ if match_number >= 0:
+ match_number += 1
+ # If no match, mark as zero
+ else:
+ match_number = 0
+
+ return [
+ ('class:search-match-count-dialog-title', ' Match ', focus),
+ ('', '{} / {}'.format(match_number,
+ len(self.log_view.search_matched_lines)),
+ focus),
+ two_spaces,
+ ]
- @key_bindings.add('c-v')
- def _toggle_search_invert(_event: KeyPressEvent) -> None:
- """Toggle inverted search matching."""
- self._invert_search()
+ def get_button_fragments(self) -> StyleAndTextTuples:
+ """Return formatted text for the action buttons."""
+ focus = functools.partial(pw_console.widgets.mouse_handlers.on_click,
+ self.focus_log_pane)
+
+ one_space = ('', ' ', focus)
+ two_spaces = ('', ' ', focus)
+ cancel = functools.partial(pw_console.widgets.mouse_handlers.on_click,
+ self.cancel_search)
+ create_filter = functools.partial(
+ pw_console.widgets.mouse_handlers.on_click, self._create_filter)
+ next_match = functools.partial(
+ pw_console.widgets.mouse_handlers.on_click, self._next_match)
+ previous_match = functools.partial(
+ pw_console.widgets.mouse_handlers.on_click, self._previous_match)
+ toggle_search_follow = functools.partial(
+ pw_console.widgets.mouse_handlers.on_click,
+ self._toggle_search_follow)
- @key_bindings.add('c-t')
- def _select_next_field(_event: KeyPressEvent) -> None:
- """Select next search field/column."""
- self._next_field()
+ button_style = 'class:toolbar-button-inactive'
- # Clear filter keybind is handled by the parent log_pane.
+ fragments = []
+ fragments.extend(
+ pw_console.widgets.checkbox.to_keybind_indicator(
+ key='n',
+ description='Next',
+ mouse_handler=next_match,
+ base_style=button_style,
+ ))
+ fragments.append(two_spaces)
- self.input_field.control.key_bindings = key_bindings
+ fragments.extend(
+ pw_console.widgets.checkbox.to_keybind_indicator(
+ key='N',
+ description='Previous',
+ mouse_handler=previous_match,
+ base_style=button_style,
+ ))
+ fragments.append(two_spaces)
- super().__init__(
- HSplit(
- [
- search_help_bar_window,
- search_settings_bar_window,
- self.input_field,
- ],
- height=SearchToolbar.TOOLBAR_HEIGHT,
- style='class:search-bar',
- ),
- filter=Condition(lambda: log_pane.search_bar_active),
- )
+ fragments.extend(
+ pw_console.widgets.checkbox.to_keybind_indicator(
+ key='Ctrl-c',
+ description='Cancel',
+ mouse_handler=cancel,
+ base_style=button_style,
+ ))
+ fragments.append(two_spaces)
- def _search_accept_handler(self, buff: Buffer) -> bool:
- """Function run when hitting Enter in the search bar."""
- self._search_successful = False
- if len(buff.text) == 0:
- self.close_search_bar()
- # Don't apply an empty search.
- return False
+ fragments.extend(
+ pw_console.widgets.checkbox.to_keybind_indicator(
+ key='Ctrl-Alt-f',
+ description='Add Filter',
+ mouse_handler=create_filter,
+ base_style=button_style,
+ ))
+ fragments.append(two_spaces)
- if self.log_pane.log_view.new_search(buff.text,
- invert=self._search_invert,
- field=self._search_field):
- self._search_successful = True
- self.close_search_bar()
- # Erase existing search text.
- return False
+ fragments.extend(
+ pw_console.widgets.checkbox.to_checkbox_with_keybind_indicator(
+ checked=self.log_view.follow_search_match,
+ key='',
+ description='Jump to new matches',
+ mouse_handler=toggle_search_follow,
+ base_style=button_style))
+ fragments.append(one_space)
- # Keep existing text if regex error
- return True
+ return fragments
diff --git a/pw_console/py/pw_console/style.py b/pw_console/py/pw_console/style.py
index a867a386f..da252b478 100644
--- a/pw_console/py/pw_console/style.py
+++ b/pw_console/py/pw_console/style.py
@@ -16,6 +16,8 @@
import logging
from dataclasses import dataclass
+from prompt_toolkit.formatted_text import StyleAndTextTuples
+from prompt_toolkit.formatted_text.base import OneStyleAndTextTuple
from prompt_toolkit.styles import Style
from prompt_toolkit.filters import has_focus
@@ -41,6 +43,7 @@ class HighContrastDarkColors:
inactive_fg = '#bfc0c4'
line_highlight_bg = '#2f2f2f'
+ selected_line_bg = '#4e4e4e'
dialog_bg = '#3c3c3c'
red_accent = '#ffc0bf'
@@ -72,6 +75,7 @@ class DarkColors:
inactive_fg = '#bfbfbf'
line_highlight_bg = '#525252'
+ selected_line_bg = '#626262'
dialog_bg = '#3c3c3c'
red_accent = '#ff6c6b'
@@ -103,6 +107,7 @@ class NordColors:
inactive_fg = '#d8dee9'
line_highlight_bg = '#191c25'
+ selected_line_bg = '#4c566a'
dialog_bg = '#2c333f'
red_accent = '#bf616a'
@@ -129,6 +134,7 @@ class NordLightColors:
inactive_bg = '#c2d0e7'
inactive_fg = '#60728c'
line_highlight_bg = '#f0f4fc'
+ selected_line_bg = '#f0f4fc'
dialog_bg = '#d8dee9'
red_accent = '#99324b'
@@ -155,6 +161,7 @@ class MoonlightColors:
inactive_bg = '#222436'
inactive_fg = '#a9b8e8'
line_highlight_bg = '#383e5c'
+ selected_line_bg = '#444a73'
dialog_bg = '#1e2030'
red_accent = '#d95468'
@@ -167,13 +174,46 @@ class MoonlightColors:
magenta_accent = '#e27e8d'
+@dataclass
+class AnsiTerm:
+ # pylint: disable=too-many-instance-attributes
+ default_bg = 'default'
+ default_fg = 'default'
+
+ dim_bg = 'default'
+ dim_fg = 'default'
+
+ button_active_bg = 'default underline'
+ button_inactive_bg = 'default'
+
+ active_bg = 'default'
+ active_fg = 'default'
+
+ inactive_bg = 'default'
+ inactive_fg = 'default'
+
+ line_highlight_bg = 'ansidarkgray white'
+ selected_line_bg = 'default reverse'
+ dialog_bg = 'default'
+
+ red_accent = 'ansired'
+ orange_accent = 'orange'
+ yellow_accent = 'ansiyellow'
+ green_accent = 'ansigreen'
+ cyan_accent = 'ansicyan'
+ blue_accent = 'ansiblue'
+ purple_accent = 'ansipurple'
+ magenta_accent = 'ansimagenta'
+
+
_THEME_NAME_MAPPING = {
'moonlight': MoonlightColors(),
'nord': NordColors(),
'nord-light': NordLightColors(),
'dark': DarkColors(),
'high-contrast-dark': HighContrastDarkColors(),
-} # yapf: disable
+ 'ansi': AnsiTerm(),
+} # yapf: disable
def get_theme_colors(theme_name=''):
@@ -266,7 +306,7 @@ def generate_styles(theme_name='dark'):
# Highlighted line styles
'selected-log-line': 'bg:{}'.format(theme.line_highlight_bg),
- 'marked-log-line': 'bg:{}'.format(theme.button_active_bg),
+ 'marked-log-line': 'bg:{}'.format(theme.selected_line_bg),
'cursor-line': 'bg:{} nounderline'.format(theme.line_highlight_bg),
# Messages like 'Window too small'
@@ -302,6 +342,12 @@ def generate_styles(theme_name='dark'):
'search-bar-setting': '{}'.format(theme.cyan_accent),
'search-bar-border': 'bg:{} {}'.format(theme.inactive_bg,
theme.cyan_accent),
+ 'search-match-count-dialog': 'bg:{}'.format(theme.inactive_bg),
+ 'search-match-count-dialog-title': '{}'.format(theme.cyan_accent),
+ 'search-match-count-dialog-default-fg': '{}'.format(theme.default_fg),
+ 'search-match-count-dialog-border': 'bg:{} {}'.format(
+ theme.inactive_bg,
+ theme.cyan_accent),
'filter-bar': 'bg:{}'.format(theme.inactive_bg),
'filter-bar-title': 'bg:{} {}'.format(theme.red_accent,
@@ -329,6 +375,20 @@ def generate_styles(theme_name='dark'):
'quit-dialog-border': 'bg:{} {}'.format(theme.inactive_bg,
theme.red_accent),
+ 'command-runner': 'bg:{}'.format(theme.inactive_bg),
+ 'command-runner-title': 'bg:{} {}'.format(theme.inactive_bg,
+ theme.default_fg),
+ 'command-runner-setting': '{}'.format(theme.purple_accent),
+ 'command-runner-border': 'bg:{} {}'.format(theme.inactive_bg,
+ theme.purple_accent),
+ 'command-runner-selected-item': 'bg:{}'.format(theme.selected_line_bg),
+ 'command-runner-fuzzy-highlight-0': '{}'.format(theme.blue_accent),
+ 'command-runner-fuzzy-highlight-1': '{}'.format(theme.cyan_accent),
+ 'command-runner-fuzzy-highlight-2': '{}'.format(theme.green_accent),
+ 'command-runner-fuzzy-highlight-3': '{}'.format(theme.yellow_accent),
+ 'command-runner-fuzzy-highlight-4': '{}'.format(theme.orange_accent),
+ 'command-runner-fuzzy-highlight-5': '{}'.format(theme.red_accent),
+
# Progress Bar Styles
# Entire set of ProgressBars - no title is used in pw_console
'title': '',
@@ -383,7 +443,7 @@ def generate_styles(theme_name='dark'):
'theme-bg-button-active': 'bg:{}'.format(theme.button_active_bg),
'theme-bg-button-inactive': 'bg:{}'.format(theme.button_inactive_bg),
- } # yapf: disable
+ } # yapf: disable
return Style.from_dict(pw_console_styles)
@@ -412,8 +472,14 @@ def get_pane_style(pt_container) -> str:
def get_pane_indicator(pt_container,
title,
mouse_handler=None,
- hide_indicator=False):
+ hide_indicator=False) -> StyleAndTextTuples:
"""Return formatted text for a pane indicator and title."""
+
+ inactive_indicator: OneStyleAndTextTuple
+ active_indicator: OneStyleAndTextTuple
+ inactive_title: OneStyleAndTextTuple
+ active_title: OneStyleAndTextTuple
+
if mouse_handler:
inactive_indicator = ('class:pane_indicator_inactive', ' ',
mouse_handler)
@@ -426,7 +492,7 @@ def get_pane_indicator(pt_container,
inactive_title = ('class:pane_title_inactive', title)
active_title = ('class:pane_title_active', title)
- fragments = []
+ fragments: StyleAndTextTuples = []
if has_focus(pt_container.__pt_container__())():
if not hide_indicator:
fragments.append(active_indicator)
diff --git a/pw_console/py/pw_console/text_formatting.py b/pw_console/py/pw_console/text_formatting.py
index 58f99971d..6fb53f98b 100644
--- a/pw_console/py/pw_console/text_formatting.py
+++ b/pw_console/py/pw_console/text_formatting.py
@@ -15,7 +15,7 @@
import copy
import re
-from typing import Iterable, List
+from typing import Iterable, List, Tuple
from prompt_toolkit.formatted_text import StyleAndTextTuples
from prompt_toolkit.formatted_text.base import OneStyleAndTextTuple
@@ -33,21 +33,26 @@ def split_lines(
input_fragments: StyleAndTextTuples) -> List[StyleAndTextTuples]:
"""Break a flattened list of StyleAndTextTuples into a list of lines.
- Ending line breaks are preserved."""
+ Ending line breaks are not preserved."""
lines: List[StyleAndTextTuples] = []
this_line: StyleAndTextTuples = []
for item in input_fragments:
- this_line.append(item)
if item[1].endswith('\n'):
+ # If there are no elements in this line except for a linebreak add
+ # an empty StyleAndTextTuple so this line isn't an empty list.
+ if len(this_line) == 0 and item[1] == '\n':
+ this_line.append((item[0], item[1][:-1]))
lines.append(this_line)
this_line = []
+ else:
+ this_line.append(item)
return lines
def insert_linebreaks(
input_fragments: StyleAndTextTuples,
max_line_width: int,
- truncate_long_lines: bool = True) -> tuple[StyleAndTextTuples, int]:
+ truncate_long_lines: bool = True) -> Tuple[StyleAndTextTuples, int]:
"""Add line breaks at max_line_width if truncate_long_lines is True.
Returns input_fragments with each character as it's own formatted text
diff --git a/pw_console/py/pw_console/widgets/border.py b/pw_console/py/pw_console/widgets/border.py
index d4624e9c9..0cf1170ef 100644
--- a/pw_console/py/pw_console/widgets/border.py
+++ b/pw_console/py/pw_console/widgets/border.py
@@ -13,10 +13,11 @@
# the License.
"""Wrapper fuctions to add borders around prompt_toolkit containers."""
-from typing import List
+from typing import Callable, List, Optional, Union
from prompt_toolkit.layout import (
AnyContainer,
+ FormattedTextControl,
HSplit,
VSplit,
Window,
@@ -26,9 +27,10 @@ from prompt_toolkit.layout import (
def create_border(
# pylint: disable=too-many-arguments
content: AnyContainer,
- content_height: int,
- border_style: str = '',
- base_style: str = '',
+ content_height: Optional[int] = None,
+ title: str = '',
+ border_style: Union[Callable[[], str], str] = '',
+ base_style: Union[Callable[[], str], str] = '',
top: bool = True,
bottom: bool = True,
left: bool = True,
@@ -48,9 +50,17 @@ def create_border(
if left:
top_border_items.append(
Window(width=1, height=1, char=top_left_char, style=border_style))
+
+ title_text = None
+ if title:
+ title_text = FormattedTextControl([
+ ('', f'{horizontal_char}{horizontal_char} {title} ')
+ ])
+
top_border_items.append(
Window(
- char='━',
+ title_text,
+ char=horizontal_char,
# Expand width to max available space
dont_extend_width=False,
style=border_style))
diff --git a/pw_console/py/pw_console/widgets/mouse_handlers.py b/pw_console/py/pw_console/widgets/mouse_handlers.py
index 6a84cf3ae..af3c58644 100644
--- a/pw_console/py/pw_console/widgets/mouse_handlers.py
+++ b/pw_console/py/pw_console/widgets/mouse_handlers.py
@@ -13,10 +13,15 @@
# the License.
"""Functions for handling mouse events."""
-from typing import Callable
+from typing import Callable, TYPE_CHECKING
+from prompt_toolkit.layout.mouse_handlers import MouseHandlers
from prompt_toolkit.mouse_events import MouseEvent, MouseEventType
+if TYPE_CHECKING:
+ # pylint: disable=ungrouped-imports
+ from prompt_toolkit.key_binding.key_bindings import NotImplementedOrNone
+
def on_click(on_click_function: Callable, mouse_event: MouseEvent):
"""Run a function on mouse click.
@@ -44,3 +49,16 @@ def on_click(on_click_function: Callable, mouse_event: MouseEvent):
on_click_function()
return None
return NotImplemented
+
+
+class EmptyMouseHandler(MouseHandlers):
+ """MouseHandler that does not propagate events."""
+ def set_mouse_handler_for_range(
+ self,
+ x_min: int,
+ x_max: int,
+ y_min: int,
+ y_max: int,
+ handler: Callable[[MouseEvent], 'NotImplementedOrNone'],
+ ) -> None:
+ return
diff --git a/pw_console/py/pw_console/widgets/table.py b/pw_console/py/pw_console/widgets/table.py
index 6a5d2aa76..ea3154392 100644
--- a/pw_console/py/pw_console/widgets/table.py
+++ b/pw_console/py/pw_console/widgets/table.py
@@ -30,13 +30,10 @@ class TableView:
# Should allow for string format, column color, and column ordering.
FLOAT_FORMAT = '%.3f'
INT_FORMAT = '%s'
- LAST_TABLE_COLUMN_NAMES = ['msg', 'message', 'file']
+ LAST_TABLE_COLUMN_NAMES = ['msg', 'message']
def __init__(self, prefs: ConsolePrefs):
- self.prefs = prefs
- # Max column widths of each log field
- self.column_padding = ' ' * self.prefs.spaces_between_columns
-
+ self.set_prefs(prefs)
self.column_widths: collections.OrderedDict = collections.OrderedDict()
self._header_fragment_cache = None
@@ -45,13 +42,15 @@ class TableView:
self.column_widths['time'] = self._default_time_width
self.column_widths['level'] = 3
self._year_month_day_width: int = 9
- if self.prefs.hide_date_from_log_time:
- self.column_widths['time'] = (self._default_time_width -
- self._year_month_day_width)
# Width of all columns except the final message
self.column_width_prefix_total = 0
+ def set_prefs(self, prefs: ConsolePrefs) -> None:
+ self.prefs = prefs
+ # Max column widths of each log field
+ self.column_padding = ' ' * self.prefs.spaces_between_columns
+
def all_column_names(self):
columns_names = [
name for name, _width in self._ordered_column_widths()
@@ -90,6 +89,8 @@ class TableView:
del ordered_columns['py_file']
if not self.prefs.show_python_logger and 'py_logger' in ordered_columns:
del ordered_columns['py_logger']
+ if not self.prefs.show_source_file and 'file' in ordered_columns:
+ del ordered_columns['file']
return ordered_columns.items()
@@ -121,9 +122,15 @@ class TableView:
default_style = 'bold'
fragments: collections.deque = collections.deque()
+ # Update time column width to current prefs setting
+ self.column_widths['time'] = self._default_time_width
+ if self.prefs.hide_date_from_log_time:
+ self.column_widths['time'] = (self._default_time_width -
+ self._year_month_day_width)
+
for name, width in self._ordered_column_widths():
# These fields will be shown at the end
- if name in ['msg', 'message', 'file']:
+ if name in ['msg', 'message']:
continue
fragments.append(
(default_style, name.title()[:width].ljust(width)))
@@ -156,7 +163,7 @@ class TableView:
columns = {}
for name, width in self._ordered_column_widths():
# Skip these modifying these fields
- if name in ['msg', 'message', 'file']:
+ if name in ['msg', 'message']:
continue
# hasattr checks are performed here since a log record may not have
@@ -223,9 +230,6 @@ class TableView:
# Add to columns
columns['message'] = message
- # TODO(tonymd): Display 'file' metadata right justified after the
- # message? It could also appear in the column section.
-
index_modifier = 0
# Go through columns and convert to FormattedText where needed.
for i, column in enumerate(columns.items()):
diff --git a/pw_console/py/pw_console/widgets/window_pane.py b/pw_console/py/pw_console/widgets/window_pane.py
index 3b418486f..ab2484a74 100644
--- a/pw_console/py/pw_console/widgets/window_pane.py
+++ b/pw_console/py/pw_console/widgets/window_pane.py
@@ -21,9 +21,11 @@ from prompt_toolkit.layout.dimension import AnyDimension
from prompt_toolkit.filters import Condition
from prompt_toolkit.layout import (
+ AnyContainer,
ConditionalContainer,
Dimension,
HSplit,
+ walk,
)
from pw_console.get_pw_console_app import get_pw_console_app
@@ -82,7 +84,7 @@ class WindowPane(ABC):
self.application = get_pw_console_app()
self._pane_title = pane_title
- self._pane_subtitle = None
+ self._pane_subtitle: str = ''
# Default width and height to 10 lines each. They will be resized by the
# WindowManager later.
@@ -199,3 +201,11 @@ class WindowPane(ABC):
style=functools.partial(pw_console.style.get_pane_style, self),
),
filter=Condition(lambda: self.show_pane))
+
+ def has_child_container(self, child_container: AnyContainer) -> bool:
+ if not child_container:
+ return False
+ for container in walk(self.__pt_container__()):
+ if container == child_container:
+ return True
+ return False
diff --git a/pw_console/py/pw_console/widgets/window_pane_toolbar.py b/pw_console/py/pw_console/widgets/window_pane_toolbar.py
index e594afe45..3c16beb62 100644
--- a/pw_console/py/pw_console/widgets/window_pane_toolbar.py
+++ b/pw_console/py/pw_console/widgets/window_pane_toolbar.py
@@ -39,8 +39,8 @@ import pw_console.widgets.mouse_handlers
_LOG = logging.getLogger(__package__)
-class ResizeHandle(FormattedTextControl):
- """Button to initiate window resize drag events."""
+class WindowPaneResizeHandle(FormattedTextControl):
+ """Button to initiate window pane resize drag events."""
def __init__(self, parent_window_pane: Any, *args, **kwargs) -> None:
self.parent_window_pane = parent_window_pane
super().__init__(*args, **kwargs)
@@ -66,9 +66,13 @@ class WindowPaneToolbar:
def get_left_text_tokens(self):
"""Return toolbar indicator and title."""
- title = ' {} '.format(self.title)
+ title = self.title
+ if not title and self.parent_window_pane:
+ # No title was set, fetch the parent window pane title if available.
+ parent_pane_title = self.parent_window_pane.pane_title()
+ title = parent_pane_title if parent_pane_title else title
return pw_console.style.get_pane_indicator(self.focus_check_container,
- title,
+ f' {title} ',
self.focus_mouse_handler)
def get_center_text_tokens(self):
@@ -134,7 +138,7 @@ class WindowPaneToolbar:
def get_resize_handle(self):
return pw_console.style.get_pane_indicator(self.focus_check_container,
- '====',
+ '─══─',
hide_indicator=True)
def add_button(self, button: ToolbarButton):
@@ -161,8 +165,6 @@ class WindowPaneToolbar:
# Set parent_window_pane related options
if self.parent_window_pane:
- if not title:
- self.title = self.parent_window_pane.pane_title()
if not subtitle:
self.subtitle = self.parent_window_pane.pane_subtitle
self.focus_check_container = self.parent_window_pane
@@ -219,7 +221,7 @@ class WindowPaneToolbar:
]
if self.parent_window_pane and include_resize_handle:
resize_handle = Window(
- content=ResizeHandle(
+ content=WindowPaneResizeHandle(
self.parent_window_pane,
self.get_resize_handle,
),
diff --git a/pw_console/py/pw_console/window_list.py b/pw_console/py/pw_console/window_list.py
index a38eb9a76..ec21b5810 100644
--- a/pw_console/py/pw_console/window_list.py
+++ b/pw_console/py/pw_console/window_list.py
@@ -17,7 +17,7 @@ import collections
from enum import Enum
import functools
import logging
-from typing import Any, Callable, List, Optional, TYPE_CHECKING
+from typing import Any, List, Optional, TYPE_CHECKING
from prompt_toolkit.filters import has_focus
from prompt_toolkit.layout import (
@@ -29,7 +29,6 @@ from prompt_toolkit.layout import (
Window,
WindowAlign,
)
-from prompt_toolkit.layout.mouse_handlers import MouseHandlers
from prompt_toolkit.mouse_events import MouseEvent, MouseEventType, MouseButton
import pw_console.style
@@ -37,7 +36,6 @@ import pw_console.widgets.mouse_handlers
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports
- from prompt_toolkit.key_binding.key_bindings import NotImplementedOrNone
from pw_console.window_manager import WindowManager
_LOG = logging.getLogger(__package__)
@@ -55,21 +53,6 @@ DEFAULT_DISPLAY_MODE = DisplayMode.STACK
_WINDOW_HEIGHT_ADJUST = 1
-class MouseHandlerWithOverride(MouseHandlers):
- """
- Two dimensional raster of callbacks for mouse events.
- """
- def set_mouse_handler_for_range(
- self,
- x_min: int,
- x_max: int,
- y_min: int,
- y_max: int,
- handler: Callable[[MouseEvent], 'NotImplementedOrNone'],
- ) -> None:
- return
-
-
class WindowListHSplit(HSplit):
"""PromptToolkit HSplit class with some additions for size and mouse resize.
@@ -95,7 +78,8 @@ class WindowListHSplit(HSplit):
# Is resize mode active?
if self.parent_window_list.resize_mode:
# Ignore future mouse_handler updates.
- new_mouse_handlers = MouseHandlerWithOverride()
+ new_mouse_handlers = (
+ pw_console.widgets.mouse_handlers.EmptyMouseHandler())
# Set existing mouse_handlers to the parent_window_list's
# mouse_handler. This will handle triggering resize events.
mouse_handlers.set_mouse_handler_for_range(
@@ -105,10 +89,10 @@ class WindowListHSplit(HSplit):
write_position.ypos + write_position.height,
self.parent_window_list.mouse_handler)
- # Save the width and height for the current render pass. This will be
- # used by the log pane to render the correct amount of log lines.
+ # Save the width, height, and draw position for the current render pass.
self.parent_window_list.update_window_list_size(
- write_position.width, write_position.height)
+ write_position.width, write_position.height, write_position.xpos,
+ write_position.ypos)
# Continue writing content to the screen.
super().write_to_screen(screen, new_mouse_handlers, write_position,
parent_style, erase_bg, z_index)
@@ -130,6 +114,11 @@ class WindowList:
self.last_window_list_width: int = 0
self.last_window_list_height: int = 0
+ self.current_window_list_xposition: int = 0
+ self.last_window_list_xposition: int = 0
+ self.current_window_list_yposition: int = 0
+ self.last_window_list_yposition: int = 0
+
self.display_mode = DEFAULT_DISPLAY_MODE
self.active_panes: collections.deque = collections.deque()
self.focused_pane_index: Optional[int] = None
@@ -153,6 +142,7 @@ class WindowList:
available_height = self.current_window_list_height
remaining_rows = available_height - sum(heights)
window_index = 0
+
# Distribute remaining unaccounted rows to each window in turn.
while remaining_rows > 0:
# 0 heights are hiden windows, only add +1 to visible windows.
@@ -213,8 +203,21 @@ class WindowList:
def get_current_active_pane(self):
"""Return the current active window pane."""
focused_pane = None
+
+ command_runner_focused_pane = None
+ if self.application.command_runner_is_open():
+ command_runner_focused_pane = (
+ self.application.command_runner_last_focused_pane())
+
for index, pane in enumerate(self.active_panes):
+ in_focus = False
if has_focus(pane)():
+ in_focus = True
+ elif command_runner_focused_pane and pane.has_child_container(
+ command_runner_focused_pane):
+ in_focus = True
+
+ if in_focus:
focused_pane = pane
self.focused_pane_index = index
break
@@ -297,7 +300,8 @@ class WindowList:
self._set_window_heights(new_heights)
- def update_window_list_size(self, width, height):
+ def update_window_list_size(self, width, height, xposition,
+ yposition) -> None:
"""Save width and height of the repl pane for the current UI render
pass."""
if width:
@@ -306,6 +310,14 @@ class WindowList:
if height:
self.last_window_list_height = self.current_window_list_height
self.current_window_list_height = height
+ if xposition:
+ self.last_window_list_xposition = (
+ self.current_window_list_xposition)
+ self.current_window_list_xposition = xposition
+ if yposition:
+ self.last_window_list_yposition = (
+ self.current_window_list_yposition)
+ self.current_window_list_yposition = yposition
if (self.current_window_list_width != self.last_window_list_width
or self.current_window_list_height !=
@@ -449,17 +461,29 @@ class WindowList:
if pane:
self.adjust_pane_size(pane, -_WINDOW_HEIGHT_ADJUST)
- def mouse_resize(self, _xpos, ypos):
+ def mouse_resize(self, _xpos, ypos) -> None:
+ if self.resize_target_pane_index is None:
+ return
+
target_pane = self.active_panes[self.resize_target_pane_index]
diff = ypos - self.resize_current_row
+ if not self.window_manager.vertical_window_list_spliting():
+ # The mouse ypos value includes rows from other window lists. If
+ # horizontal splitting is active we need to check the diff relative
+ # to the starting y position row. Subtract the start y position and
+ # an additional 1 for the top menu bar.
+ diff -= self.current_window_list_yposition - 1
+
if diff == 0:
return
self.adjust_pane_size(target_pane, diff)
self._update_resize_current_row()
self.application.redraw_ui()
- def adjust_pane_size(self, pane, diff: int = _WINDOW_HEIGHT_ADJUST):
+ def adjust_pane_size(self,
+ pane,
+ diff: int = _WINDOW_HEIGHT_ADJUST) -> None:
"""Increase or decrease a given pane's height."""
# Placeholder next_pane value to allow setting width and height without
# any consequences if there is no next visible pane.
diff --git a/pw_console/py/pw_console/window_manager.py b/pw_console/py/pw_console/window_manager.py
index 091e857d2..044751bf8 100644
--- a/pw_console/py/pw_console/window_manager.py
+++ b/pw_console/py/pw_console/window_manager.py
@@ -19,35 +19,148 @@ import functools
from itertools import chain
import logging
import operator
-from typing import Any, Dict, Iterable, List
+from typing import Any, Dict, Iterable, List, Optional
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout import (
Dimension,
HSplit,
VSplit,
+ FormattedTextControl,
+ Window,
+ WindowAlign,
)
+from prompt_toolkit.mouse_events import MouseEvent, MouseEventType, MouseButton
from prompt_toolkit.widgets import MenuItem
from pw_console.console_prefs import ConsolePrefs, error_unknown_window
from pw_console.log_pane import LogPane
import pw_console.widgets.checkbox
from pw_console.widgets import WindowPaneToolbar
+import pw_console.widgets.mouse_handlers
from pw_console.window_list import WindowList, DisplayMode
_LOG = logging.getLogger(__package__)
-# Weighted amount for adjusting window dimensions when enlarging and shrinking.
+# Amount for adjusting window dimensions when enlarging and shrinking.
_WINDOW_SPLIT_ADJUST = 1
+class WindowListResizeHandle(FormattedTextControl):
+ """Button to initiate window list resize drag events."""
+ def __init__(self, window_manager, window_list: Any, *args,
+ **kwargs) -> None:
+ self.window_manager = window_manager
+ self.window_list = window_list
+ super().__init__(*args, **kwargs)
+
+ def mouse_handler(self, mouse_event: MouseEvent):
+ """Mouse handler for this control."""
+ # Start resize mouse drag event
+ if mouse_event.event_type == MouseEventType.MOUSE_DOWN:
+ self.window_manager.start_resize(self.window_list)
+ # Mouse event handled, return None.
+ return None
+
+ # Mouse event not handled, return NotImplemented.
+ return NotImplemented
+
+
+class WindowManagerVSplit(VSplit):
+ """PromptToolkit VSplit class with some additions for size and mouse resize.
+
+ This VSplit has a write_to_screen function that saves the width and height
+ of the container for the current render pass. It also handles overriding
+ mouse handlers for triggering window resize adjustments.
+ """
+ def __init__(self, parent_window_manager, *args, **kwargs):
+ # Save a reference to the parent window pane.
+ self.parent_window_manager = parent_window_manager
+ super().__init__(*args, **kwargs)
+
+ def write_to_screen(
+ self,
+ screen,
+ mouse_handlers,
+ write_position,
+ parent_style: str,
+ erase_bg: bool,
+ z_index: Optional[int],
+ ) -> None:
+ new_mouse_handlers = mouse_handlers
+ # Is resize mode active?
+ if self.parent_window_manager.resize_mode:
+ # Ignore future mouse_handler updates.
+ new_mouse_handlers = (
+ pw_console.widgets.mouse_handlers.EmptyMouseHandler())
+ # Set existing mouse_handlers to the parent_window_managers's
+ # mouse_handler. This will handle triggering resize events.
+ mouse_handlers.set_mouse_handler_for_range(
+ write_position.xpos,
+ write_position.xpos + write_position.width,
+ write_position.ypos,
+ write_position.ypos + write_position.height,
+ self.parent_window_manager.mouse_handler)
+
+ # Save the width and height for the current render pass.
+ self.parent_window_manager.update_window_manager_size(
+ write_position.width, write_position.height)
+ # Continue writing content to the screen.
+ super().write_to_screen(screen, new_mouse_handlers, write_position,
+ parent_style, erase_bg, z_index)
+
+
+class WindowManagerHSplit(HSplit):
+ """PromptToolkit HSplit class with some additions for size and mouse resize.
+
+ This HSplit has a write_to_screen function that saves the width and height
+ of the container for the current render pass. It also handles overriding
+ mouse handlers for triggering window resize adjustments.
+ """
+ def __init__(self, parent_window_manager, *args, **kwargs):
+ # Save a reference to the parent window pane.
+ self.parent_window_manager = parent_window_manager
+ super().__init__(*args, **kwargs)
+
+ def write_to_screen(
+ self,
+ screen,
+ mouse_handlers,
+ write_position,
+ parent_style: str,
+ erase_bg: bool,
+ z_index: Optional[int],
+ ) -> None:
+ new_mouse_handlers = mouse_handlers
+ # Is resize mode active?
+ if self.parent_window_manager.resize_mode:
+ # Ignore future mouse_handler updates.
+ new_mouse_handlers = (
+ pw_console.widgets.mouse_handlers.EmptyMouseHandler())
+ # Set existing mouse_handlers to the parent_window_managers's
+ # mouse_handler. This will handle triggering resize events.
+ mouse_handlers.set_mouse_handler_for_range(
+ write_position.xpos,
+ write_position.xpos + write_position.width,
+ write_position.ypos,
+ write_position.ypos + write_position.height,
+ self.parent_window_manager.mouse_handler)
+
+ # Save the width and height for the current render pass.
+ self.parent_window_manager.update_window_manager_size(
+ write_position.width, write_position.height)
+ # Continue writing content to the screen.
+ super().write_to_screen(screen, new_mouse_handlers, write_position,
+ parent_style, erase_bg, z_index)
+
+
class WindowManager:
"""WindowManager class
This class handles adding/removing/resizing windows and rendering the
prompt_toolkit split layout."""
- # pylint: disable=too-many-public-methods
+ # pylint: disable=too-many-public-methods,too-many-instance-attributes
def __init__(
self,
@@ -60,67 +173,139 @@ class WindowManager:
self.top_toolbars: List[WindowPaneToolbar] = []
self.bottom_toolbars: List[WindowPaneToolbar] = []
+ self.resize_mode: bool = False
+ self.resize_target_window_list_index: Optional[int] = None
+ self.resize_target_window_list: Optional[int] = None
+ self.resize_current_row: int = 0
+ self.resize_current_column: int = 0
+
+ self.current_window_manager_width: int = 0
+ self.current_window_manager_height: int = 0
+ self.last_window_manager_width: int = 0
+ self.last_window_manager_height: int = 0
+
+ def update_window_manager_size(self, width, height):
+ """Save width and height for the current UI render pass."""
+ if width:
+ self.last_window_manager_width = self.current_window_manager_width
+ self.current_window_manager_width = width
+ if height:
+ self.last_window_manager_height = self.current_window_manager_height
+ self.current_window_manager_height = height
+
+ if (self.current_window_manager_width != self.last_window_manager_width
+ or self.current_window_manager_height !=
+ self.last_window_manager_height):
+ self.rebalance_window_list_sizes()
+
+ def _set_window_list_sizes(self, new_heights: List[int],
+ new_widths: List[int]) -> None:
+ for window_list in self.window_lists:
+ window_list.height = Dimension(preferred=new_heights[0])
+ new_heights = new_heights[1:]
+ window_list.width = Dimension(preferred=new_widths[0])
+ new_widths = new_widths[1:]
+
+ def vertical_window_list_spliting(self) -> bool:
+ return self.application.prefs.window_column_split_method == 'vertical'
+
+ def rebalance_window_list_sizes(self) -> None:
+ """Adjust relative split sizes to fill available space."""
+ available_height = self.current_window_manager_height
+ available_width = self.current_window_manager_width
+
+ old_heights = [w.height.preferred for w in self.window_lists]
+ old_widths = [w.width.preferred for w in self.window_lists]
+
+ # Make sure the old totals are not zero.
+ old_height_total = max(sum(old_heights), 1)
+ old_width_total = max(sum(old_widths), 1)
+
+ height_percentages = [
+ value / old_height_total for value in old_heights
+ ]
+ width_percentages = [value / old_width_total for value in old_widths]
+
+ new_heights = [
+ int(available_height * percentage)
+ for percentage in height_percentages
+ ]
+ new_widths = [
+ int(available_width * percentage)
+ for percentage in width_percentages
+ ]
+
+ if self.vertical_window_list_spliting():
+ new_heights = [
+ self.current_window_manager_height for h in new_heights
+ ]
+ else:
+ new_widths = [
+ self.current_window_manager_width for h in new_widths
+ ]
+
+ self._set_window_list_sizes(new_heights, new_widths)
+
def _create_key_bindings(self) -> KeyBindings:
- bindings = KeyBindings()
+ key_bindings = KeyBindings()
+ register = self.application.prefs.register_keybinding
- @bindings.add('escape', 'c-left') # Alt-Ctrl-
+ @register('window-manager.move-pane-left', key_bindings)
def move_pane_left(_event):
"""Move window pane left."""
self.move_pane_left()
- @bindings.add('escape', 'c-right') # Alt-Ctrl-
+ @register('window-manager.move-pane-right', key_bindings)
def move_pane_right(_event):
"""Move window pane right."""
self.move_pane_right()
- # NOTE: c-up and c-down seem swapped in prompt_toolkit
- @bindings.add('escape', 'c-up') # Alt-Ctrl-
+ @register('window-manager.move-pane-down', key_bindings)
def move_pane_down(_event):
"""Move window pane down."""
self.move_pane_down()
- # NOTE: c-up and c-down seem swapped in prompt_toolkit
- @bindings.add('escape', 'c-down') # Alt-Ctrl-
+ @register('window-manager.move-pane-up', key_bindings)
def move_pane_up(_event):
"""Move window pane up."""
self.move_pane_up()
- @bindings.add('escape', '=') # Alt-= (mnemonic: Alt Plus)
+ @register('window-manager.enlarge-pane', key_bindings)
def enlarge_pane(_event):
"""Enlarge the active window pane."""
self.enlarge_pane()
- @bindings.add('escape', '-') # Alt-minus (mnemonic: Alt Minus)
+ @register('window-manager.shrink-pane', key_bindings)
def shrink_pane(_event):
"""Shrink the active window pane."""
self.shrink_pane()
- @bindings.add('escape', ',') # Alt-, (mnemonic: Alt <)
+ @register('window-manager.shrink-split', key_bindings)
def shrink_split(_event):
"""Shrink the current window split."""
self.shrink_split()
- @bindings.add('escape', '.') # Alt-. (mnemonic: Alt >)
+ @register('window-manager.enlarge-split', key_bindings)
def enlarge_split(_event):
"""Enlarge the current window split."""
self.enlarge_split()
- @bindings.add('escape', 'c-p') # Ctrl-Alt-p
+ @register('window-manager.focus-prev-pane', key_bindings)
def focus_prev_pane(_event):
"""Switch focus to the previous window pane or tab."""
self.focus_previous_pane()
- @bindings.add('escape', 'c-n') # Ctrl-Alt-n
+ @register('window-manager.focus-next-pane', key_bindings)
def focus_next_pane(_event):
"""Switch focus to the next window pane or tab."""
self.focus_next_pane()
- @bindings.add('c-u')
+ @register('window-manager.balance-window-panes', key_bindings)
def balance_window_panes(_event):
"""Balance all window sizes."""
self.balance_window_sizes()
- return bindings
+ return key_bindings
def delete_empty_window_lists(self):
empty_lists = [
@@ -137,31 +322,56 @@ class WindowManager:
self.bottom_toolbars.append(toolbar)
def create_root_container(self):
- """Create a vertical or horizontal split container for all active
- panes."""
+ """Create vertical or horizontal splits for all active panes."""
self.delete_empty_window_lists()
for window_list in self.window_lists:
window_list.update_container()
- window_containers = [
- window_list.container for window_list in self.window_lists
- ]
-
- if self.application.prefs.window_column_split_method == 'horizontal':
- split = HSplit(
- window_containers,
- padding=1,
- padding_char='─',
- padding_style='class:pane_separator',
- )
- else: # vertical
- split = VSplit(
- window_containers,
- padding=1,
- padding_char='│',
- padding_style='class:pane_separator',
- )
+ vertical_split = self.vertical_window_list_spliting()
+
+ window_containers = []
+ for i, window_list in enumerate(self.window_lists):
+ window_containers.append(window_list.container)
+ if (i + 1) >= len(self.window_lists):
+ continue
+
+ if vertical_split:
+ separator_padding = Window(
+ content=WindowListResizeHandle(self, window_list, "│"),
+ char='│',
+ width=1,
+ dont_extend_height=False,
+ )
+ resize_separator = HSplit(
+ [
+ separator_padding,
+ Window(
+ content=WindowListResizeHandle(
+ self, window_list, "║\n║\n║"),
+ char='│',
+ width=1,
+ dont_extend_height=True,
+ ),
+ separator_padding,
+ ],
+ style='class:pane_separator',
+ )
+ else:
+ resize_separator = Window(
+ content=WindowListResizeHandle(self, window_list, "════"),
+ char='─',
+ height=1,
+ align=WindowAlign.CENTER,
+ dont_extend_width=False,
+ style='class:pane_separator',
+ )
+ window_containers.append(resize_separator)
+
+ if vertical_split:
+ split = WindowManagerVSplit(self, window_containers)
+ else:
+ split = WindowManagerHSplit(self, window_containers)
split_items = []
split_items.extend(self.top_toolbars)
@@ -184,7 +394,7 @@ class WindowManager:
break
return active_window_list, active_pane
- def window_list_index(self, window_list: WindowList):
+ def window_list_index(self, window_list: WindowList) -> Optional[int]:
index = None
try:
index = self.window_lists.index(window_list)
@@ -211,7 +421,7 @@ class WindowManager:
"""Focus on the next visible window pane or tab."""
active_window_list, active_pane = (
self._get_active_window_list_and_pane())
- if not active_window_list:
+ if active_window_list is None:
return
# Total count of window lists and panes
@@ -220,6 +430,8 @@ class WindowManager:
# Get currently focused indices
active_window_list_index = self.window_list_index(active_window_list)
+ if active_window_list_index is None:
+ return
active_pane_index = active_window_list.pane_index(active_pane)
increment = -1 if reverse_order else 1
@@ -286,6 +498,7 @@ class WindowManager:
# Add the new WindowList
target_window_list = WindowList(self)
self.window_lists.appendleft(target_window_list)
+ self.reset_split_sizes()
# New index is 0
target_window_list_index = 0
@@ -295,6 +508,7 @@ class WindowManager:
# Move the pane
active_window_list.remove_pane_no_checks(active_pane)
target_window_list.add_pane(active_pane, add_at_beginning=True)
+ target_window_list.reset_pane_sizes()
self.delete_empty_window_lists()
def move_pane_right(self):
@@ -312,6 +526,7 @@ class WindowManager:
# Add a new WindowList
target_window_list = WindowList(self)
self.window_lists.append(target_window_list)
+ self.reset_split_sizes()
# Get the destination window_list
target_window_list = self.window_lists[target_window_list_index]
@@ -319,6 +534,7 @@ class WindowManager:
# Move the pane
active_window_list.remove_pane_no_checks(active_pane)
target_window_list.add_pane(active_pane, add_at_beginning=True)
+ target_window_list.reset_pane_sizes()
self.delete_empty_window_lists()
def move_pane_up(self):
@@ -379,20 +595,22 @@ class WindowManager:
def reset_split_sizes(self):
"""Reset all active pane width and height to defaults"""
- for window_list in self.window_lists:
- window_list.height = Dimension(preferred=10)
- window_list.width = Dimension(preferred=10)
-
- def adjust_split_size(self,
- window_list: WindowList,
- diff: int = _WINDOW_SPLIT_ADJUST):
- """Increase or decrease a given window_list's vertical split width."""
- # No need to resize if only one split.
- if len(self.window_lists) < 2:
- return
-
- # Get the next split to subtract a weight value from.
+ available_height = self.current_window_manager_height
+ available_width = self.current_window_manager_width
+ old_heights = [w.height.preferred for w in self.window_lists]
+ old_widths = [w.width.preferred for w in self.window_lists]
+ new_heights = [int(available_height / len(old_heights))
+ ] * len(old_heights)
+ new_widths = [int(available_width / len(old_widths))] * len(old_widths)
+
+ self._set_window_list_sizes(new_heights, new_widths)
+
+ def _get_next_window_list_for_resizing(
+ self, window_list: WindowList) -> Optional[WindowList]:
window_list_index = self.window_list_index(window_list)
+ if window_list_index is None:
+ return None
+
next_window_list_index = ((window_list_index + 1) %
len(self.window_lists))
@@ -401,31 +619,62 @@ class WindowManager:
next_window_list_index = window_list_index - 1
next_window_list = self.window_lists[next_window_list_index]
+ return next_window_list
+
+ def adjust_split_size(self,
+ window_list: WindowList,
+ diff: int = _WINDOW_SPLIT_ADJUST) -> None:
+ """Increase or decrease a given window_list's vertical split width."""
+ # No need to resize if only one split.
+ if len(self.window_lists) < 2:
+ return
+
+ # Get the next split to subtract from.
+ next_window_list = self._get_next_window_list_for_resizing(window_list)
+ if not next_window_list:
+ return
- # Get current weight values
- old_width = window_list.width.preferred
- next_old_width = next_window_list.width.preferred # type: ignore
+ if self.vertical_window_list_spliting():
+ # Get current width
+ old_value = window_list.width.preferred
+ next_old_value = next_window_list.width.preferred # type: ignore
+ else:
+ # Get current height
+ old_value = window_list.height.preferred
+ next_old_value = next_window_list.height.preferred # type: ignore
# Add to the current split
- new_width = old_width + diff
- if new_width <= 0:
- new_width = old_width
+ new_value = old_value + diff
+ if new_value <= 0:
+ new_value = old_value
# Subtract from the next split
- next_new_width = next_old_width - diff
- if next_new_width <= 0:
- next_new_width = next_old_width
+ next_new_value = next_old_value - diff
+ if next_new_value <= 0:
+ next_new_value = next_old_value
+
+ # If new height is too small or no change, make no adjustments.
+ if new_value < 3 or next_new_value < 3 or old_value == new_value:
+ return
- # Set new weight values
- window_list.width.preferred = new_width
- next_window_list.width.preferred = next_new_width # type: ignore
+ if self.vertical_window_list_spliting():
+ # Set new width
+ window_list.width.preferred = new_value
+ next_window_list.width.preferred = next_new_value # type: ignore
+ else:
+ # Set new height
+ window_list.height.preferred = new_value
+ next_window_list.height.preferred = next_new_value # type: ignore
+ window_list.rebalance_window_heights()
+ next_window_list.rebalance_window_heights()
def toggle_pane(self, pane):
"""Toggle a pane on or off."""
window_list, _pane_index = (
self._find_window_list_and_pane_index(pane))
- # Don't hide if tabbed mode is enabled, the container can't be rendered.
+ # Don't hide the window if tabbed mode is enabled. Switching to a
+ # separate tab is preffered.
if window_list.display_mode == DisplayMode.TABBED:
return
pane.show_pane = not pane.show_pane
@@ -440,7 +689,7 @@ class WindowManager:
"""Focus on the first visible container."""
for pane in self.active_panes():
if pane.show_pane:
- self.application.focus_on_container(pane)
+ self.application.application.layout.focus(pane)
break
def check_for_all_hidden_panes_and_unhide(self) -> None:
@@ -470,6 +719,116 @@ class WindowManager:
window_list, pane_index = self._find_window_list_and_pane_index(pane)
window_list.start_resize(pane, pane_index)
+ def mouse_resize(self, xpos, ypos):
+ if self.resize_target_window_list_index is None:
+ return
+ target_window_list = self.window_lists[
+ self.resize_target_window_list_index]
+
+ diff = ypos - self.resize_current_row
+ if self.vertical_window_list_spliting():
+ diff = xpos - self.resize_current_column
+ if diff == 0:
+ return
+
+ self.adjust_split_size(target_window_list, diff)
+ self._resize_update_current_row_column()
+ self.application.redraw_ui()
+
+ def mouse_handler(self, mouse_event: MouseEvent):
+ """MouseHandler used when resize_mode == True."""
+ mouse_position = mouse_event.position
+
+ if (mouse_event.event_type == MouseEventType.MOUSE_MOVE
+ and mouse_event.button == MouseButton.LEFT):
+ self.mouse_resize(mouse_position.x, mouse_position.y)
+ elif mouse_event.event_type == MouseEventType.MOUSE_UP:
+ self.stop_resize()
+ # Mouse event handled, return None.
+ return None
+ else:
+ self.stop_resize()
+
+ # Mouse event not handled, return NotImplemented.
+ return NotImplemented
+
+ def _calculate_actual_widths(self) -> List[int]:
+ widths = [w.width.preferred for w in self.window_lists]
+
+ available_width = self.current_window_manager_width
+ # Subtract 1 for each separator
+ available_width -= len(self.window_lists) - 1
+ remaining_rows = available_width - sum(widths)
+ window_list_index = 0
+ # Distribute remaining unaccounted columns to each window in turn.
+ while remaining_rows > 0:
+ widths[window_list_index] += 1
+ remaining_rows -= 1
+ window_list_index = (window_list_index + 1) % len(widths)
+
+ return widths
+
+ def _calculate_actual_heights(self) -> List[int]:
+ heights = [w.height.preferred for w in self.window_lists]
+
+ available_height = self.current_window_manager_height
+ # Subtract 1 for each vertical separator
+ available_height -= len(self.window_lists) - 1
+ remaining_rows = available_height - sum(heights)
+ window_list_index = 0
+ # Distribute remaining unaccounted columns to each window in turn.
+ while remaining_rows > 0:
+ heights[window_list_index] += 1
+ remaining_rows -= 1
+ window_list_index = (window_list_index + 1) % len(heights)
+
+ return heights
+
+ def _resize_update_current_row_column(self) -> None:
+ if self.resize_target_window_list_index is None:
+ return
+
+ widths = self._calculate_actual_widths()
+ heights = self._calculate_actual_heights()
+
+ start_column = 0
+ start_row = 0
+
+ # Find the starting column
+ for i in range(self.resize_target_window_list_index + 1):
+ # If we are past the target window_list, exit the loop.
+ if i > self.resize_target_window_list_index:
+ break
+ start_column += widths[i]
+ start_row += heights[i]
+ if i < self.resize_target_window_list_index - 1:
+ start_column += 1
+ start_row += 1
+
+ self.resize_current_column = start_column
+ self.resize_current_row = start_row
+
+ def start_resize(self, window_list):
+ # Check the target window_list isn't the last one.
+ if window_list == self.window_lists[-1]:
+ return
+
+ list_index = self.window_list_index(window_list)
+ if list_index is None:
+ return
+
+ self.resize_mode = True
+ self.resize_target_window_list = window_list
+ self.resize_target_window_list_index = list_index
+ self._resize_update_current_row_column()
+
+ def stop_resize(self):
+ self.resize_mode = False
+ self.resize_target_window_list = None
+ self.resize_target_window_list_index = None
+ self.resize_current_row = 0
+ self.resize_current_column = 0
+
def _find_window_list_and_pane_index(self, pane: Any):
pane_index = None
parent_window_list = None
@@ -640,13 +999,13 @@ class WindowManager:
))
menu_items.extend(
MenuItem(
- '{index}: {title} {subtitle}'.format(
+ '{index}: {title}'.format(
index=pane_index + 1,
title=pane.menu_title(),
- subtitle=pane.pane_subtitle()),
+ ),
children=[
MenuItem(
- '{check} Show Window'.format(
+ '{check} Show/Hide Window'.format(
check=pw_console.widgets.checkbox.
to_checkbox_text(pane.show_pane, end='')),
handler=functools.partial(self.toggle_pane, pane),
diff --git a/pw_console/py/pw_console/yaml_config_loader_mixin.py b/pw_console/py/pw_console/yaml_config_loader_mixin.py
new file mode 100644
index 000000000..ff009a0f9
--- /dev/null
+++ b/pw_console/py/pw_console/yaml_config_loader_mixin.py
@@ -0,0 +1,154 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Yaml config file loader mixin."""
+
+import os
+import logging
+from pathlib import Path
+from typing import Any, Dict, Optional, Union
+
+import yaml
+
+_LOG = logging.getLogger(__package__)
+
+
+class MissingConfigTitle(Exception):
+ """Exception for when an existing YAML file is missing config_title."""
+
+
+class YamlConfigLoaderMixin:
+ """Yaml Config file loader mixin.
+
+ Use this mixin to load yaml file settings and save them into
+ ``self._config``. For example:
+
+ ::
+
+ class ConsolePrefs(YamlConfigLoaderMixin):
+ def __init__(
+ self.config_init(
+ config_section_title='pw_console',
+ project_file=Path('project_file.yaml'),
+ project_user_file=Path('project_user_file.yaml'),
+ user_file=Path('~/user_file.yaml'),
+ default_config={},
+ environment_var='PW_CONSOLE_CONFIG_FILE',
+ )
+
+ """
+ def config_init(
+ self,
+ config_section_title: str,
+ project_file: Union[Path, bool] = None,
+ project_user_file: Union[Path, bool] = None,
+ user_file: Union[Path, bool] = None,
+ default_config: Optional[Dict[Any, Any]] = None,
+ environment_var: Optional[str] = None,
+ ) -> None:
+ """Call this to load YAML config files in order of precedence.
+
+ The following files are loaded in this order:
+ 1. project_file
+ 2. project_user_file
+ 3. user_file
+
+ Lastly, if a valid file path is specified at
+ ``os.environ[environment_var]`` then load that file overriding all
+ config options.
+
+ Args:
+ config_section_title: String name of this config section. For
+ example: ``pw_console`` or ``pw_watch``. In the YAML file this
+ is represented by a ``config_title`` key.
+
+ ::
+
+ ---
+ config_title: pw_console
+
+ project_file: Project level config file. This is intended to be a
+ file living somewhere under a project folder and is checked into
+ the repo. It serves as a base config all developers can inherit
+ from.
+ project_user_file: User's personal config file for a specific
+ project. This can be a file that lives in a project folder that
+ is git-ignored and not checked into the repo.
+ user_file: A global user based config file. This is typically a file
+ in the users home directory and settings here apply to all
+ projects.
+ default_config: A Python dict representing the base default
+ config. This dict will be applied as a starting point before
+ loading any yaml files.
+ environment_var: The name of an environment variable to check for a
+ config file. If a config file exists there it will be loaded on
+ top of the default_config ignoring project and user files.
+ """
+
+ self._config_section_title: str = config_section_title
+ self.default_config = default_config if default_config else {}
+ self.reset_config()
+
+ if project_file and isinstance(project_file, Path):
+ self.project_file = Path(
+ os.path.expandvars(str(project_file.expanduser())))
+ self.load_config_file(self.project_file)
+
+ if project_user_file and isinstance(project_user_file, Path):
+ self.project_user_file = Path(
+ os.path.expandvars(str(project_user_file.expanduser())))
+ self.load_config_file(self.project_user_file)
+
+ if user_file and isinstance(user_file, Path):
+ self.user_file = Path(
+ os.path.expandvars(str(user_file.expanduser())))
+ self.load_config_file(self.user_file)
+
+ # Check for a config file specified by an environment variable.
+ if environment_var is None:
+ return
+ environment_config = os.environ.get(environment_var, None)
+ if environment_config:
+ env_file_path = Path(environment_config)
+ if not env_file_path.is_file():
+ raise FileNotFoundError(
+ f'Cannot load config file: {env_file_path}')
+ self.reset_config()
+ self.load_config_file(env_file_path)
+
+ def _update_config(self, cfg: Dict[Any, Any]) -> None:
+ if cfg is None:
+ cfg = {}
+ self._config.update(cfg)
+
+ def reset_config(self) -> None:
+ self._config: Dict[Any, Any] = {}
+ self._update_config(self.default_config)
+
+ def load_config_file(self, file_path: Path) -> None:
+ if not file_path.is_file():
+ return
+
+ cfgs = yaml.safe_load_all(file_path.read_text())
+
+ for cfg in cfgs:
+ if self._config_section_title in cfg:
+ self._update_config(cfg[self._config_section_title])
+
+ elif cfg.get('config_title', False) == self._config_section_title:
+ self._update_config(cfg)
+ else:
+ raise MissingConfigTitle(
+ '\n\nThe YAML config file "{}" is missing the expected '
+ '"config_title: {}" setting.'.format(
+ str(file_path), self._config_section_title))
diff --git a/pw_console/py/repl_pane_test.py b/pw_console/py/repl_pane_test.py
index 274e3e6fb..86f41cea7 100644
--- a/pw_console/py/repl_pane_test.py
+++ b/pw_console/py/repl_pane_test.py
@@ -30,6 +30,7 @@ from prompt_toolkit.output import (
)
from pw_console.console_app import ConsoleApp
+from pw_console.console_prefs import ConsolePrefs
from pw_console.repl_pane import ReplPane
from pw_console.pw_ptpython_repl import PwPtPythonRepl
@@ -113,7 +114,11 @@ if _PYTHON_3_8:
with create_app_session(output=FakeOutput()):
# Setup Mocks
- app = ConsoleApp(color_depth=ColorDepth.DEPTH_8_BIT)
+ app = ConsoleApp(color_depth=ColorDepth.DEPTH_8_BIT,
+ prefs=ConsolePrefs(project_file=False,
+ project_user_file=False,
+ user_file=False))
+
app.start_user_code_thread()
pw_ptpython_repl = app.pw_ptpython_repl
diff --git a/pw_console/py/table_test.py b/pw_console/py/table_test.py
index 06343d8b1..d8896c926 100644
--- a/pw_console/py/table_test.py
+++ b/pw_console/py/table_test.py
@@ -72,7 +72,9 @@ class TestTableView(unittest.TestCase):
def setUp(self):
# Show large diffs
self.maxDiff = None # pylint: disable=invalid-name
- self.prefs = ConsolePrefs(project_file=False, user_file=False)
+ self.prefs = ConsolePrefs(project_file=False,
+ project_user_file=False,
+ user_file=False)
self.prefs.reset_config()
@parameterized.expand([
diff --git a/pw_console/py/text_formatting_test.py b/pw_console/py/text_formatting_test.py
index 4ff8c7b80..340cc5816 100644
--- a/pw_console/py/text_formatting_test.py
+++ b/pw_console/py/text_formatting_test.py
@@ -212,9 +212,9 @@ class TestTextFormatting(unittest.TestCase):
'elit.\n'
).__pt_formatted_text__(),
[
- ANSI('Lorem\n').__pt_formatted_text__(),
- ANSI(' ipsum dolor\n').__pt_formatted_text__(),
- ANSI('elit.\n').__pt_formatted_text__(),
+ ANSI('Lorem').__pt_formatted_text__(),
+ ANSI(' ipsum dolor').__pt_formatted_text__(),
+ ANSI('elit.').__pt_formatted_text__(),
], # expected_lines
),
(
@@ -227,13 +227,26 @@ class TestTextFormatting(unittest.TestCase):
# [0] for the fragments, [1] is line_height
truncate_long_lines=False)[0],
[
- ANSI('Lorem\n').__pt_formatted_text__(),
- ANSI(' ipsum dolor si\n').__pt_formatted_text__(),
- ANSI('t amet, consect\n').__pt_formatted_text__(),
- ANSI('etur adipiscing\n').__pt_formatted_text__(),
- ANSI(' elit.\n').__pt_formatted_text__(),
+ ANSI('Lorem').__pt_formatted_text__(),
+ ANSI(' ipsum dolor si').__pt_formatted_text__(),
+ ANSI('t amet, consect').__pt_formatted_text__(),
+ ANSI('etur adipiscing').__pt_formatted_text__(),
+ ANSI(' elit.').__pt_formatted_text__(),
],
),
+ (
+ 'empty lines',
+ # Each line should have at least one StyleAndTextTuple but without
+ # an ending line break.
+ [
+ ('', '\n'),
+ ('', '\n'),
+ ],
+ [
+ [('', '')],
+ [('', '')],
+ ],
+ )
]) # yapf: disable
def test_split_lines(
self,
diff --git a/pw_console/py/window_manager_test.py b/pw_console/py/window_manager_test.py
index 7fd76c8f4..676d07cb4 100644
--- a/pw_console/py/window_manager_test.py
+++ b/pw_console/py/window_manager_test.py
@@ -23,12 +23,16 @@ from prompt_toolkit.output import ColorDepth
from prompt_toolkit.output import DummyOutput as FakeOutput
from pw_console.console_app import ConsoleApp
+from pw_console.console_prefs import ConsolePrefs
from pw_console.window_manager import _WINDOW_SPLIT_ADJUST
from pw_console.window_list import _WINDOW_HEIGHT_ADJUST, DisplayMode
def _create_console_app(logger_count=2):
- console_app = ConsoleApp(color_depth=ColorDepth.DEPTH_8_BIT)
+ console_app = ConsoleApp(color_depth=ColorDepth.DEPTH_8_BIT,
+ prefs=ConsolePrefs(project_file=False,
+ project_user_file=False,
+ user_file=False))
console_app.focus_on_container = MagicMock()
loggers = {}
@@ -41,18 +45,46 @@ def _create_console_app(logger_count=2):
return console_app
+_WINDOW_MANAGER_WIDTH = 80
+_WINDOW_MANAGER_HEIGHT = 30
_DEFAULT_WINDOW_WIDTH = 10
_DEFAULT_WINDOW_HEIGHT = 10
def _window_list_widths(window_manager):
+ window_manager.update_window_manager_size(_WINDOW_MANAGER_WIDTH,
+ _WINDOW_MANAGER_HEIGHT)
+
return [
window_list.width.preferred
for window_list in window_manager.window_lists
]
+def _window_list_heights(window_manager):
+ window_manager.update_window_manager_size(_WINDOW_MANAGER_WIDTH,
+ _WINDOW_MANAGER_HEIGHT)
+
+ return [
+ window_list.height.preferred
+ for window_list in window_manager.window_lists
+ ]
+
+
+def _window_pane_widths(window_manager, window_list_index=0):
+ window_manager.update_window_manager_size(_WINDOW_MANAGER_WIDTH,
+ _WINDOW_MANAGER_HEIGHT)
+
+ return [
+ pane.width.preferred
+ for pane in window_manager.window_lists[window_list_index].active_panes
+ ]
+
+
def _window_pane_heights(window_manager, window_list_index=0):
+ window_manager.update_window_manager_size(_WINDOW_MANAGER_WIDTH,
+ _WINDOW_MANAGER_HEIGHT)
+
return [
pane.height.preferred
for pane in window_manager.window_lists[window_list_index].active_panes
@@ -66,14 +98,14 @@ def _window_pane_counts(window_manager):
]
-def _window_pane_titles(window_manager):
+def window_pane_titles(window_manager):
return [[
pane.pane_title() + ' - ' + pane.pane_subtitle()
for pane in window_list.active_panes
] for window_list in window_manager.window_lists]
-def _target_list_and_pane(window_manager, list_index, pane_index):
+def target_list_and_pane(window_manager, list_index, pane_index):
# pylint: disable=protected-access
# Bypass prompt_toolkit has_focus()
window_manager._get_active_window_list_and_pane = (
@@ -100,23 +132,23 @@ class TestWindowManager(unittest.TestCase):
self.assertEqual([4], _window_pane_counts(window_manager))
# Move 2 windows to the right into their own splits
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
window_manager.move_pane_right()
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
window_manager.move_pane_right()
- _target_list_and_pane(window_manager, 1, 0)
+ target_list_and_pane(window_manager, 1, 0)
window_manager.move_pane_right()
# 3 splits, first split has 2 windows
self.assertEqual([2, 1, 1], _window_pane_counts(window_manager))
# Move the first window in the first split left
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
window_manager.move_pane_left()
# 4 splits, each with their own window
self.assertEqual([1, 1, 1, 1], _window_pane_counts(window_manager))
# Move the first window to the right
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
window_manager.move_pane_right()
# 3 splits, first split has 2 windows
self.assertEqual([2, 1, 1], _window_pane_counts(window_manager))
@@ -139,36 +171,37 @@ class TestWindowManager(unittest.TestCase):
window_manager = console_app.window_manager
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
# Should have one window list split of size 50.
self.assertEqual(
_window_list_widths(window_manager),
- [_DEFAULT_WINDOW_WIDTH],
+ [_WINDOW_MANAGER_WIDTH],
)
# Move one pane to the right, creating a new window_list split.
window_manager.move_pane_right()
- self.assertEqual(
- _window_list_widths(window_manager),
- [_DEFAULT_WINDOW_WIDTH, _DEFAULT_WINDOW_WIDTH],
- )
+
+ self.assertEqual(_window_list_widths(window_manager), [
+ int(_WINDOW_MANAGER_WIDTH / 2),
+ int(_WINDOW_MANAGER_WIDTH / 2),
+ ])
# Move another pane to the right twice, creating a third
# window_list split.
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
window_manager.move_pane_right()
# Above window pane is at a new location
- _target_list_and_pane(window_manager, 1, 0)
+ target_list_and_pane(window_manager, 1, 0)
window_manager.move_pane_right()
# Should have 3 splits now
self.assertEqual(
_window_list_widths(window_manager),
[
- _DEFAULT_WINDOW_WIDTH,
- _DEFAULT_WINDOW_WIDTH,
- _DEFAULT_WINDOW_WIDTH,
+ int(_WINDOW_MANAGER_WIDTH / 3),
+ int(_WINDOW_MANAGER_WIDTH / 3),
+ int(_WINDOW_MANAGER_WIDTH / 3),
],
)
@@ -176,44 +209,50 @@ class TestWindowManager(unittest.TestCase):
self.assertEqual(len(list(window_manager.active_panes())), 4)
# Target the middle split
- _target_list_and_pane(window_manager, 1, 0)
+ target_list_and_pane(window_manager, 1, 0)
# Shrink the middle split twice
window_manager.shrink_split()
window_manager.shrink_split()
self.assertEqual(
_window_list_widths(window_manager),
[
- _DEFAULT_WINDOW_WIDTH,
- _DEFAULT_WINDOW_WIDTH - (2 * _WINDOW_SPLIT_ADJUST),
- _DEFAULT_WINDOW_WIDTH + (2 * _WINDOW_SPLIT_ADJUST),
+ int(_WINDOW_MANAGER_WIDTH / 3),
+ int(_WINDOW_MANAGER_WIDTH / 3) -
+ (2 * _WINDOW_SPLIT_ADJUST),
+ int(_WINDOW_MANAGER_WIDTH / 3) +
+ (2 * _WINDOW_SPLIT_ADJUST),
],
)
# Target the first split
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
window_manager.reset_split_sizes()
# Shrink the first split twice
window_manager.shrink_split()
self.assertEqual(
_window_list_widths(window_manager),
[
- _DEFAULT_WINDOW_WIDTH - (1 * _WINDOW_SPLIT_ADJUST),
- _DEFAULT_WINDOW_WIDTH + (1 * _WINDOW_SPLIT_ADJUST),
- _DEFAULT_WINDOW_WIDTH,
+ int(_WINDOW_MANAGER_WIDTH / 3) -
+ (1 * _WINDOW_SPLIT_ADJUST),
+ int(_WINDOW_MANAGER_WIDTH / 3) +
+ (1 * _WINDOW_SPLIT_ADJUST),
+ int(_WINDOW_MANAGER_WIDTH / 3),
],
)
# Target the third (last) split
- _target_list_and_pane(window_manager, 2, 0)
+ target_list_and_pane(window_manager, 2, 0)
window_manager.reset_split_sizes()
# Shrink the third split once
window_manager.shrink_split()
self.assertEqual(
_window_list_widths(window_manager),
[
- _DEFAULT_WINDOW_WIDTH,
- _DEFAULT_WINDOW_WIDTH + (1 * _WINDOW_SPLIT_ADJUST),
- _DEFAULT_WINDOW_WIDTH - (1 * _WINDOW_SPLIT_ADJUST),
+ int(_WINDOW_MANAGER_WIDTH / 3),
+ int(_WINDOW_MANAGER_WIDTH / 3) +
+ (1 * _WINDOW_SPLIT_ADJUST),
+ int(_WINDOW_MANAGER_WIDTH / 3) -
+ (1 * _WINDOW_SPLIT_ADJUST),
],
)
@@ -225,23 +264,29 @@ class TestWindowManager(unittest.TestCase):
self.assertEqual(
_window_list_widths(window_manager),
[
- _DEFAULT_WINDOW_WIDTH,
- _DEFAULT_WINDOW_WIDTH - (3 * _WINDOW_SPLIT_ADJUST),
- _DEFAULT_WINDOW_WIDTH + (3 * _WINDOW_SPLIT_ADJUST),
+ int(_WINDOW_MANAGER_WIDTH / 3),
+ int(_WINDOW_MANAGER_WIDTH / 3) -
+ (3 * _WINDOW_SPLIT_ADJUST),
+ int(_WINDOW_MANAGER_WIDTH / 3) +
+ (3 * _WINDOW_SPLIT_ADJUST),
],
)
# Target the middle split
- _target_list_and_pane(window_manager, 1, 0)
+ target_list_and_pane(window_manager, 1, 0)
# Move the middle window pane left
window_manager.move_pane_left()
+ # This is called on the next render pass
+ window_manager.rebalance_window_list_sizes()
# Middle split should be removed
self.assertEqual(
_window_list_widths(window_manager),
[
- _DEFAULT_WINDOW_WIDTH,
+ int(_WINDOW_MANAGER_WIDTH / 2) -
+ (3 * _WINDOW_SPLIT_ADJUST),
# This split is removed
- _DEFAULT_WINDOW_WIDTH + (3 * _WINDOW_SPLIT_ADJUST),
+ int(_WINDOW_MANAGER_WIDTH / 2) +
+ (2 * _WINDOW_SPLIT_ADJUST),
],
)
@@ -250,8 +295,8 @@ class TestWindowManager(unittest.TestCase):
self.assertEqual(
_window_list_widths(window_manager),
[
- _DEFAULT_WINDOW_WIDTH,
- _DEFAULT_WINDOW_WIDTH,
+ int(_WINDOW_MANAGER_WIDTH / 2),
+ int(_WINDOW_MANAGER_WIDTH / 2),
],
)
@@ -360,7 +405,7 @@ class TestWindowManager(unittest.TestCase):
# Check window pane ordering
self.assertEqual(
- _window_pane_titles(window_manager),
+ window_pane_titles(window_manager),
[
[
'Log2 - test_log2',
@@ -374,7 +419,7 @@ class TestWindowManager(unittest.TestCase):
target_window_pane(0)
window_manager.move_pane_down()
self.assertEqual(
- _window_pane_titles(window_manager),
+ window_pane_titles(window_manager),
[
[
'Log1 - test_log1',
@@ -389,7 +434,7 @@ class TestWindowManager(unittest.TestCase):
target_window_pane(1)
window_manager.move_pane_up()
self.assertEqual(
- _window_pane_titles(window_manager),
+ window_pane_titles(window_manager),
[
[
'Log0 - test_log0',
@@ -402,7 +447,7 @@ class TestWindowManager(unittest.TestCase):
target_window_pane(0)
window_manager.move_pane_up()
self.assertEqual(
- _window_pane_titles(window_manager),
+ window_pane_titles(window_manager),
[
[
'Log0 - test_log0',
@@ -414,13 +459,13 @@ class TestWindowManager(unittest.TestCase):
)
def test_focus_next_and_previous_pane(self) -> None:
- """Test getting the window list for a given pane."""
+ """Test switching focus to next and previous window panes."""
with create_app_session(output=FakeOutput()):
console_app = _create_console_app(logger_count=4)
window_manager = console_app.window_manager
self.assertEqual(
- _window_pane_titles(window_manager),
+ window_pane_titles(window_manager),
[
[
'Log3 - test_log3',
@@ -435,7 +480,7 @@ class TestWindowManager(unittest.TestCase):
# Scenario: Move between panes with a single stacked window list.
# Set the first pane in focus.
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
# Switch focus to the next pane
window_manager.focus_next_pane()
# Pane index 1 should now be focused.
@@ -444,7 +489,7 @@ class TestWindowManager(unittest.TestCase):
console_app.focus_on_container.reset_mock()
# Set the first pane in focus.
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
# Switch focus to the previous pane
window_manager.focus_previous_pane()
# Previous pane should wrap around to the last pane in the first
@@ -454,7 +499,7 @@ class TestWindowManager(unittest.TestCase):
console_app.focus_on_container.reset_mock()
# Set the last pane in focus.
- _target_list_and_pane(window_manager, 0, 4)
+ target_list_and_pane(window_manager, 0, 4)
# Switch focus to the next pane
window_manager.focus_next_pane()
# Next pane should wrap around to the first pane in the first
@@ -475,7 +520,7 @@ class TestWindowManager(unittest.TestCase):
wraps=window_manager.window_lists[0].switch_to_tab)
# Set the first pane/tab in focus.
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
# Switch focus to the next pane/tab
window_manager.focus_next_pane()
# Check switch_to_tab is called
@@ -488,7 +533,7 @@ class TestWindowManager(unittest.TestCase):
window_manager.window_lists[0].switch_to_tab.reset_mock()
# Set the last pane/tab in focus.
- _target_list_and_pane(window_manager, 0, 4)
+ target_list_and_pane(window_manager, 0, 4)
# Switch focus to the next pane/tab
window_manager.focus_next_pane()
# Check switch_to_tab is called
@@ -501,7 +546,7 @@ class TestWindowManager(unittest.TestCase):
window_manager.window_lists[0].switch_to_tab.reset_mock()
# Set the first pane/tab in focus.
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
# Switch focus to the prev pane/tab
window_manager.focus_previous_pane()
# Check switch_to_tab is called
@@ -518,12 +563,12 @@ class TestWindowManager(unittest.TestCase):
# Setup: Move two panes to the right into their own stacked
# window_list.
- _target_list_and_pane(window_manager, 0, 4)
+ target_list_and_pane(window_manager, 0, 4)
window_manager.move_pane_right()
- _target_list_and_pane(window_manager, 0, 3)
+ target_list_and_pane(window_manager, 0, 3)
window_manager.move_pane_right()
self.assertEqual(
- _window_pane_titles(window_manager),
+ window_pane_titles(window_manager),
[
[
'Log3 - test_log3',
@@ -542,7 +587,7 @@ class TestWindowManager(unittest.TestCase):
wraps=window_manager.window_lists[1].switch_to_tab)
# Set Log1 in focus
- _target_list_and_pane(window_manager, 0, 2)
+ target_list_and_pane(window_manager, 0, 2)
window_manager.focus_next_pane()
# Log0 should now have focus
console_app.focus_on_container.assert_called_once_with(
@@ -550,7 +595,7 @@ class TestWindowManager(unittest.TestCase):
console_app.focus_on_container.reset_mock()
# Set Log0 in focus
- _target_list_and_pane(window_manager, 1, 0)
+ target_list_and_pane(window_manager, 1, 0)
window_manager.focus_previous_pane()
# Log1 should now have focus
console_app.focus_on_container.assert_called_once_with(
@@ -564,7 +609,7 @@ class TestWindowManager(unittest.TestCase):
console_app.focus_on_container.reset_mock()
# Set Python Repl in focus
- _target_list_and_pane(window_manager, 1, 1)
+ target_list_and_pane(window_manager, 1, 1)
window_manager.focus_next_pane()
# Log3 should now have focus
console_app.focus_on_container.assert_called_once_with(
@@ -576,7 +621,7 @@ class TestWindowManager(unittest.TestCase):
console_app.focus_on_container.reset_mock()
# Set Log3 in focus
- _target_list_and_pane(window_manager, 0, 0)
+ target_list_and_pane(window_manager, 0, 0)
window_manager.focus_next_pane()
# Log2 should now have focus
console_app.focus_on_container.assert_called_once_with(
@@ -588,7 +633,7 @@ class TestWindowManager(unittest.TestCase):
console_app.focus_on_container.reset_mock()
# Set Python Repl in focus
- _target_list_and_pane(window_manager, 1, 1)
+ target_list_and_pane(window_manager, 1, 1)
window_manager.focus_previous_pane()
# Log0 should now have focus
console_app.focus_on_container.assert_called_once_with(
@@ -600,6 +645,160 @@ class TestWindowManager(unittest.TestCase):
window_manager.window_lists[1].switch_to_tab.reset_mock()
console_app.focus_on_container.reset_mock()
+ def test_resize_vertical_splits(self) -> None:
+ """Test resizing window splits."""
+ with create_app_session(output=FakeOutput()):
+ console_app = _create_console_app(logger_count=4)
+ window_manager = console_app.window_manager
+
+ # Required before moving windows
+ window_manager.update_window_manager_size(_WINDOW_MANAGER_WIDTH,
+ _WINDOW_MANAGER_HEIGHT)
+ window_manager.create_root_container()
+
+ # Vertical split by default
+ self.assertTrue(window_manager.vertical_window_list_spliting())
+
+ # Move windows to create 3 splits
+ target_list_and_pane(window_manager, 0, 0)
+ window_manager.move_pane_right()
+ target_list_and_pane(window_manager, 0, 0)
+ window_manager.move_pane_right()
+ target_list_and_pane(window_manager, 1, 1)
+ window_manager.move_pane_right()
+
+ # Check windows are where expected
+ self.assertEqual(
+ window_pane_titles(window_manager),
+ [
+ [
+ 'Log1 - test_log1',
+ 'Log0 - test_log0',
+ 'Python Repl - ',
+ ],
+ [
+ 'Log2 - test_log2',
+ ],
+ [
+ 'Log3 - test_log3',
+ ],
+ ],
+ )
+
+ # Check initial split widths
+ widths = [
+ int(_WINDOW_MANAGER_WIDTH / 3),
+ int(_WINDOW_MANAGER_WIDTH / 3),
+ int(_WINDOW_MANAGER_WIDTH / 3),
+ ]
+ self.assertEqual(_window_list_widths(window_manager), widths)
+
+ # Decrease size of first split
+ window_manager.adjust_split_size(window_manager.window_lists[0],
+ -4)
+ widths = [
+ widths[0] - (4 * _WINDOW_SPLIT_ADJUST),
+ widths[1] + (4 * _WINDOW_SPLIT_ADJUST),
+ widths[2],
+ ]
+ self.assertEqual(_window_list_widths(window_manager), widths)
+
+ # Increase size of last split
+ widths = [
+ widths[0],
+ widths[1] - (4 * _WINDOW_SPLIT_ADJUST),
+ widths[2] + (4 * _WINDOW_SPLIT_ADJUST),
+ ]
+ window_manager.adjust_split_size(window_manager.window_lists[2], 4)
+ self.assertEqual(_window_list_widths(window_manager), widths)
+
+ # Check heights are all the same
+ window_manager.rebalance_window_list_sizes()
+ heights = [
+ int(_WINDOW_MANAGER_HEIGHT),
+ int(_WINDOW_MANAGER_HEIGHT),
+ int(_WINDOW_MANAGER_HEIGHT),
+ ]
+ self.assertEqual(_window_list_heights(window_manager), heights)
+
+ def test_resize_horizontal_splits(self) -> None:
+ """Test resizing window splits."""
+ with create_app_session(output=FakeOutput()):
+ console_app = _create_console_app(logger_count=4)
+ window_manager = console_app.window_manager
+
+ # We want horizontal window splits
+ window_manager.vertical_window_list_spliting = (MagicMock(
+ return_value=False))
+ self.assertFalse(window_manager.vertical_window_list_spliting())
+
+ # Required before moving windows
+ window_manager.update_window_manager_size(_WINDOW_MANAGER_WIDTH,
+ _WINDOW_MANAGER_HEIGHT)
+ window_manager.create_root_container()
+
+ # Move windows to create 3 splits
+ target_list_and_pane(window_manager, 0, 0)
+ window_manager.move_pane_right()
+ target_list_and_pane(window_manager, 0, 0)
+ window_manager.move_pane_right()
+ target_list_and_pane(window_manager, 1, 1)
+ window_manager.move_pane_right()
+
+ # Check windows are where expected
+ self.assertEqual(
+ window_pane_titles(window_manager),
+ [
+ [
+ 'Log1 - test_log1',
+ 'Log0 - test_log0',
+ 'Python Repl - ',
+ ],
+ [
+ 'Log2 - test_log2',
+ ],
+ [
+ 'Log3 - test_log3',
+ ],
+ ],
+ )
+
+ # Check initial split widths
+ heights = [
+ int(_WINDOW_MANAGER_HEIGHT / 3),
+ int(_WINDOW_MANAGER_HEIGHT / 3),
+ int(_WINDOW_MANAGER_HEIGHT / 3),
+ ]
+ self.assertEqual(_window_list_heights(window_manager), heights)
+
+ # Decrease size of first split
+ window_manager.adjust_split_size(window_manager.window_lists[0],
+ -4)
+ heights = [
+ heights[0] - (4 * _WINDOW_SPLIT_ADJUST),
+ heights[1] + (4 * _WINDOW_SPLIT_ADJUST),
+ heights[2],
+ ]
+ self.assertEqual(_window_list_heights(window_manager), heights)
+
+ # Increase size of last split
+ heights = [
+ heights[0],
+ heights[1] - (4 * _WINDOW_SPLIT_ADJUST),
+ heights[2] + (4 * _WINDOW_SPLIT_ADJUST),
+ ]
+ window_manager.adjust_split_size(window_manager.window_lists[2], 4)
+ self.assertEqual(_window_list_heights(window_manager), heights)
+
+ # Check widths are all the same
+ window_manager.rebalance_window_list_sizes()
+ widths = [
+ int(_WINDOW_MANAGER_WIDTH),
+ int(_WINDOW_MANAGER_WIDTH),
+ int(_WINDOW_MANAGER_WIDTH),
+ ]
+ self.assertEqual(_window_list_widths(window_manager), widths)
+
if __name__ == '__main__':
unittest.main()
diff --git a/pw_console/testing.rst b/pw_console/testing.rst
index 6aaf42a41..0f9b719f2 100644
--- a/pw_console/testing.rst
+++ b/pw_console/testing.rst
@@ -16,7 +16,8 @@ Begin each section below by running the console in test mode:
.. code-block:: shell
- pw console --test-mode
+ touch /tmp/empty.yaml
+ env PW_CONSOLE_CONFIG_FILE='/tmp/empty.yaml' pw console --test-mode
Test Sections
=============
@@ -39,75 +40,85 @@ Log Pane: Basic Actions
- |checkbox|
* - 2
+ - In the main menu enable :guilabel:`[File] > Log Table View > Hide Date`
+ - The time column shows only the time. E.g. ``09:34:53``.
+ - |checkbox|
+
+ * - 3
+ - In the main menu turn off :guilabel:`[File] > Log Table View > Hide Date`
+ - The time column shows the date and time. E.g. ``20220208 09:34:53``.
+ - |checkbox|
+
+ * - 4
- Click :guilabel:`Search` on the log toolbar
- | The search bar appears
| The cursor should appear after the ``/``
- |checkbox|
- * - 3
+ * - 5
- Press :kbd:`Ctrl-c`
- The search bar disappears
- |checkbox|
- * - 4
+ * - 6
- Click :guilabel:`Follow` on the log toolbar
- Logs stop following
- |checkbox|
- * - 5
+ * - 7
- Click :guilabel:`Table` on the log toolbar
- Table mode is disabled
- |checkbox|
- * - 6
+ * - 8
- Click :guilabel:`Wrap` on the log toolbar
- Line wrapping is enabled
- |checkbox|
- * - 7
+ * - 9
- Click :guilabel:`Clear` on the log toolbar
- | All log lines are erased
| Follow mode is on
| New lines start appearing
- |checkbox|
- * - 8
+ * - 10
- | Mouse drag across a few log messages
- | Entire logs are highlighted and a dialog
| box appears in the upper right
- |checkbox|
- * - 9
+ * - 11
- | Without scrolling mouse drag across a set
| of different log messages.
- | The old selection disappears leaving only the new selection.
- |checkbox|
- * - 10
+ * - 12
- | Click the :guilabel:`Cancel` button
| in the selection dialog box.
- | The selection and the dialog box disappears.
- |checkbox|
- * - 11
+ * - 13
- | Mouse drag across a few log messages and
| click the :guilabel:`Save as File` button.
- | The save as file dialog appears with the
| :guilabel:`[x] Selected Lines Only` opion checked.
- |checkbox|
- * - 12
+ * - 14
- | Press :kbd:`Cancel`
- | The save dialog closes
- |checkbox|
- * - 13
+ * - 15
- | Click the :guilabel:`Save` button on the log toolbar.
| A dialog appears prompting for a file.
- | The current working directory should be pre-filled.
- |checkbox|
- * - 14
+ * - 16
- | Check :guilabel:`[x] Table Formatting`
| Uncheck :guilabel:`[ ] Selected Lines Only`
| Add ``/log.txt`` to the end and press :kbd:`Enter`
@@ -143,54 +154,94 @@ Log Pane: Search and Filtering
- | Type ``lorem``
| Press :kbd:`Enter`
- | Logs stop following
- | The previous ``Lorem`` word is highlighted in yellow
- | All other ``Lorem`` words are highlighted in cyan
+ | ``Lorem`` words are highlighted in cyan
+ | The cursor on the first log message
+ | The search toolbar is un-focused and displays:
+ | ``Match 1 / 10`` where the second number (the total match count)
+ | increases once every 10 seconds when new logs arrive.
- |checkbox|
* - 4
- Press :kbd:`Ctrl-f`
- - | The search bar appears
- | The cursor should appear after the ``/``
+ - | The search bar is focused
+ | The cursor should appear after ``/Lorem``
- |checkbox|
* - 5
+ - Press :kbd:`Ctrl-c`
+ - | The search bar disappears
+ | ``Lorem`` words are no longer highlighted
+ - |checkbox|
+
+ * - 6
+ - Press :kbd:`/`
+ - | The search bar appears and is empty
+ | The cursor should appear after ``/``
+ - |checkbox|
+
+ * - 7
- Click :guilabel:`Matcher:` once
- ``Matcher:STRING`` is shown
- |checkbox|
- * - 6
+ * - 8
- | Type ``[=``
| Press :kbd:`Enter`
- - All instances of ``[=`` should be highlighted
+ - | All instances of ``[=`` should be highlighted
+ | The cursor should be on log message 2
- |checkbox|
* - 7
- Press :kbd:`/`
- - | The search bar appears
- | The cursor should appear after the ``/``
+ - | The search bar is focused
+ | The cursor should appear after the ``/[=``
- |checkbox|
* - 8
+ - Press :kbd:`Ctrl-c`
+ - | The search bar disappears
+ | ``[=`` matches are no longer highlighted
+ - |checkbox|
+
+ * - 9
+ - Press :kbd:`/`
+ - | The search bar appears and is empty
+ | The cursor should appear after ``/``
+ - |checkbox|
+
+ * - 10
- Press :kbd:`Up`
- The text ``[=`` should appear in the search input field
- |checkbox|
- * - 9
+ * - 11
+ - Click :guilabel:`Search Enter`
+ - | All instances of ``[=`` should be highlighted
+ | The cursor should be on log message 12
+ - |checkbox|
+
+ * - 12
- Click :guilabel:`Add Filter`
- | A ``Filters`` toolbar will appear
| showing the new filter: ``<\[= (X)>``.
| Only log messages matching ``[=`` appear in the logs.
+ | Follow mode is enabled
- |checkbox|
- * - 10
+ * - 13
- | Press :kbd:`/`
- | Type ``# 1``
+ - | The search bar appears and is empty
+ | The cursor should appear after ``/``
+ - |checkbox|
+
+ * - 14
+ - | Type ``# 1`` and press :kbd:`Enter`
| Click :guilabel:`Add Filter`
- | The ``Filters`` toolbar shows a new filter: ``<\#\ 1 (X)>``.
| Only log messages matching both filters will appear in the logs.
- |checkbox|
- * - 11
+ * - 15
- | Click the first :guilabel:`(X)`
| in the filter toolbar.
- | The ``Filters`` toolbar shows only one filter: ``<\#\ 1 (X)>``.
@@ -198,40 +249,64 @@ Log Pane: Search and Filtering
| Lines all end in: ``# 1.*``
- |checkbox|
- * - 12
+ * - 16
- Click :guilabel:`Clear Filters`
- | The ``Filters`` toolbar will disappear.
| All log messages will be shown in the log window.
- |checkbox|
- * - 13
+ * - 17
- | Press :kbd:`/`
| Type ``BAT``
- | Click :guilabel:`Column`
- - ``Column:Module`` is shown
+ | Click :guilabel:`Column` until ``Column:Module`` is shown
+ | Press :kbd:`Enter`
+ - | Logs stop following
+ | ``BAT`` is highlighted in cyan
+ | The cursor on the 3rd log message
+ | The search toolbar is un-focused and displays:
+ | ``Match 1 / 10`` where the second number (the total match count)
- |checkbox|
- * - 14
+ * - 18
+ - Press :kbd:`n`
+ - | ``BAT`` is highlighted in cyan
+ | The cursor on the 7th log message and is in the center of the
+ | log window (not the bottom).
+ - |checkbox|
+
+ * - 19
+ - Click :guilabel:`Jump to new matches`
+ - | :guilabel:`Jump to new matches` is checked and every 5 seconds
+ | the cursor jumps to the latest matching log message.
+ - |checkbox|
+
+ * - 20
+ - Click :guilabel:`Follow`
+ - | :guilabel:`Jump to new matches` is unchecked
+ | The cursor jumps to every new log message once a second.
+ - |checkbox|
+
+ * - 21
- | Click :guilabel:`Add Filter`
- | The Filters toolbar appears with one filter: ``<module BAT (X)>``
| Only logs with Module matching ``BAT`` appear.
- |checkbox|
- * - 15
+ * - 22
- Click :guilabel:`Clear Filters`
- | The ``Filters`` toolbar will disappear.
| All log messages will be shown in the log window.
- |checkbox|
- * - 16
+ * - 23
- | Press :kbd:`/`
| Type ``BAT``
| Click :guilabel:`Invert`
- ``[x] Invert`` setting is shown
- |checkbox|
- * - 17
- - | Click :guilabel:`Add Filter`
+ * - 24
+ - | Press :kbd:`Enter` then click :guilabel:`Add Filter`
- | The Filters toolbar appears
| One filter is shown: ``<NOT module BAT (X)>``
| Only logs with Modules other than ``BAT`` appear.
@@ -417,19 +492,50 @@ Mouse Window Resizing
- |checkbox|
* - 2
- - | Left click and hold the :guilabel:`====` of that window
+ - | Left click and hold the :guilabel:`-==-` of that window
| Drag the mouse up and down
- This log pane is resized
- |checkbox|
* - 3
- - | Left click and hold the :guilabel:`====`
+ - | Left click and hold the :guilabel:`-==-`
| of the :guilabel:`PwConsole Debug` window
| Drag the mouse up and down
- | The :guilabel:`PwConsole Debug` should NOT be focused
| The window should be resized as expected
- |checkbox|
+ * - 4
+ - Click the :guilabel:`View > Move Window Right`
+ - :guilabel:`Fake Device Logs` should appear in a right side split
+ - |checkbox|
+
+ * - 5
+ - | Left click and hold anywhere on the vertical separator
+ | Drag the mouse left and right
+ - | The window splits should be resized as expected
+ - |checkbox|
+
+ * - 6
+ - Click the :guilabel:`View > Balance Window Sizes`
+ - Window split sizes should reset to equal widths
+ - |checkbox|
+
+ * - 7
+ - | Focus on the :guilabel:`Python Repl` window
+ | Click the :guilabel:`View > Move Window Left`
+ - | :guilabel:`Python Repl` should appear in a left side split
+ | There should be 3 vertical splits in total
+ - |checkbox|
+
+ * - 8
+ - | Left click and hold anywhere on the vertical separator
+ | between the first two splits (Python Repl and the middle split)
+ | Drag the mouse left and right
+ - | The first two window splits should be resized.
+ | The 3rd split size should not change.
+ - |checkbox|
+
Copy Paste
^^^^^^^^^^
@@ -513,6 +619,15 @@ Copy Paste
- Python Input is focused
- |checkbox|
+ * - 11
+ - | Type ``print('hello there')`` into the Python input.
+ | Mouse drag select that text
+ | Press :kbd:`Ctrl-c`
+ - | The selection should disappear.
+ | Try pasting into a separate text editor, the paste should
+ | match the text you drag selected.
+ - |checkbox|
+
Incremental Stdout
^^^^^^^^^^^^^^^^^^
@@ -594,6 +709,34 @@ Python Input & Output
| the way to the beginning and end of the buffer.
- |checkbox|
+Early Startup
+^^^^^^^^^^^^^
+
+.. list-table::
+ :widths: 5 45 45 5
+ :header-rows: 1
+
+ * - #
+ - Test Action
+ - Expected Result
+ - ✅
+
+ * - 1
+ - | Start the pw console test mode by
+ | running ``pw console --test-mode``
+ - | Console starts up showing an ``All Logs`` window.
+ - |checkbox|
+
+ * - 2
+ - | Click the :guilabel:`All Logs` window title
+ | Press :kbd:`g` to jump to the top of the log history
+ - | These log messages should be at the top:
+ | ``DBG Adding plugins...``
+ | ``DBG Starting prompt_toolkit full-screen application...``
+ | ``DBG pw_console test-mode starting...``
+ | ``DBG pw_console.PwConsoleEmbed init complete``
+ - |checkbox|
+
Quit Confirmation Dialog
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pw_containers/CMakeLists.txt b/pw_containers/CMakeLists.txt
index 06d173c00..e960e3d5e 100644
--- a/pw_containers/CMakeLists.txt
+++ b/pw_containers/CMakeLists.txt
@@ -14,11 +14,124 @@
include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
-pw_auto_add_simple_module(pw_containers
+pw_add_module_library(pw_containers
PUBLIC_DEPS
- pw_assert
- pw_status
+ pw_containers.flat_map
+ pw_containers.intrusive_list
+ pw_containers.vector
)
if(Zephyr_FOUND AND CONFIG_PIGWEED_CONTAINERS)
zephyr_link_libraries(pw_containers)
endif()
+
+pw_add_module_library(pw_containers.filtered_view
+ HEADERS
+ public/pw_containers/filtered_view.h
+ PUBLIC_INCLUDES
+ public
+)
+
+pw_add_module_library(pw_containers.flat_map
+ HEADERS
+ public/pw_containers/flat_map.h
+ PUBLIC_INCLUDES
+ public
+)
+
+pw_add_module_library(pw_containers.to_array
+ HEADERS
+ public/pw_containers/to_array.h
+ PUBLIC_INCLUDES
+ public
+)
+
+pw_add_module_library(pw_containers.vector
+ HEADERS
+ public/pw_containers/vector.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_assert
+)
+
+pw_add_module_library(pw_containers.wrapped_iterator
+ HEADERS
+ public/pw_containers/wrapped_iterator.h
+ PUBLIC_INCLUDES
+ public
+)
+
+pw_add_module_library(pw_containers.intrusive_list
+ HEADERS
+ public/pw_containers/internal/intrusive_list_impl.h
+ public/pw_containers/intrusive_list.h
+ PUBLIC_INCLUDES
+ public
+ SOURCES
+ intrusive_list.cc
+ PRIVATE_DEPS
+ pw_assert
+)
+
+pw_add_test(pw_containers.filtered_view_test
+ SOURCES
+ filtered_view_test.cc
+ DEPS
+ pw_containers.filtered_view
+ pw_containers.intrusive_list
+ pw_polyfill.span
+ GROUPS
+ modules
+ pw_containers
+)
+
+pw_add_test(pw_containers.flat_map_test
+ SOURCES
+ flat_map_test.cc
+ DEPS
+ pw_containers.flat_map
+ GROUPS
+ modules
+ pw_containers
+)
+
+pw_add_test(pw_containers.to_array_test
+ SOURCES
+ to_array_test.cc
+ DEPS
+ pw_containers.to_array
+ GROUPS
+ modules
+ pw_containers
+)
+
+pw_add_test(pw_containers.vector_test
+ SOURCES
+ vector_test.cc
+ DEPS
+ pw_containers.vector
+ GROUPS
+ modules
+ pw_containers
+)
+
+pw_add_test(pw_containers.wrapped_iterator_test
+ SOURCES
+ wrapped_iterator_test.cc
+ DEPS
+ pw_containers.wrapped_iterator
+ GROUPS
+ modules
+ pw_containers
+)
+
+pw_add_test(pw_containers.intrusive_list_test
+ SOURCES
+ intrusive_list_test.cc
+ DEPS
+ pw_containers.intrusive_list
+ pw_preprocessor
+ GROUPS
+ modules
+ pw_containers
+)
diff --git a/pw_cpu_exception_cortex_m/BUILD.bazel b/pw_cpu_exception_cortex_m/BUILD.bazel
index 2f52bbea1..012331082 100644
--- a/pw_cpu_exception_cortex_m/BUILD.bazel
+++ b/pw_cpu_exception_cortex_m/BUILD.bazel
@@ -101,6 +101,7 @@ pw_cc_library(
":cortex_m_constants",
":proto_dump",
":support",
+ ":util",
# TODO(pwbug/101): Need to add support for facades/backends to Bazel.
"//pw_cpu_exception",
"//pw_preprocessor",
@@ -118,6 +119,7 @@ pw_cc_library(
":cpu_state",
":cpu_state_protos",
":proto_dump",
+ ":util",
"//pw_log",
"//pw_protobuf",
"//pw_status",
@@ -143,3 +145,14 @@ pw_cc_test(
":cpu_state",
],
)
+
+pw_cc_test(
+ name = "util_test",
+ srcs = [
+ "util_test.cc",
+ ],
+ deps = [
+ ":cpu_state",
+ ":util",
+ ],
+)
diff --git a/pw_cpu_exception_cortex_m/BUILD.gn b/pw_cpu_exception_cortex_m/BUILD.gn
index 7c2920757..dd91dea27 100644
--- a/pw_cpu_exception_cortex_m/BUILD.gn
+++ b/pw_cpu_exception_cortex_m/BUILD.gn
@@ -145,6 +145,7 @@ pw_source_set("cpu_exception.impl") {
":config",
":cortex_m_constants",
":cpu_state",
+ ":util",
"$dir_pw_cpu_exception:entry.facade",
"$dir_pw_cpu_exception:handler",
"$dir_pw_preprocessor:arch",
@@ -175,6 +176,7 @@ pw_source_set("snapshot") {
":config",
":cortex_m_constants",
":proto_dump",
+ ":util",
dir_pw_log,
]
}
@@ -222,6 +224,16 @@ pw_test("cpu_exception_entry_test") {
sources = [ "exception_entry_test.cc" ]
}
+pw_test("util_test") {
+ enable_if = pw_cpu_exception_ENTRY_BACKEND ==
+ "$dir_pw_cpu_exception_cortex_m:cpu_exception"
+ deps = [
+ ":cpu_state",
+ ":util",
+ ]
+ sources = [ "util_test.cc" ]
+}
+
pw_doc_group("docs") {
sources = [ "docs.rst" ]
}
diff --git a/pw_cpu_exception_cortex_m/CMakeLists.txt b/pw_cpu_exception_cortex_m/CMakeLists.txt
index 49a1bad41..82d12b45d 100644
--- a/pw_cpu_exception_cortex_m/CMakeLists.txt
+++ b/pw_cpu_exception_cortex_m/CMakeLists.txt
@@ -49,6 +49,7 @@ pw_add_module_library(pw_cpu_exception_cortex_m.cpu_exception
pw_cpu_exception.handler
pw_cpu_exception_cortex_m.config
pw_cpu_exception_cortex_m.constants
+ pw_cpu_exception_cortex_m.util
SOURCES
entry.cc
)
@@ -118,6 +119,7 @@ pw_add_module_library(pw_cpu_exception_cortex_m.snapshot
pw_cpu_exception_cortex_m.config
pw_cpu_exception_cortex_m.constants
pw_cpu_exception_cortex_m.proto_dump
+ pw_cpu_exception_cortex_m.util
pw_log
pw_polyfill.span
SOURCES
@@ -151,4 +153,15 @@ if("${pw_cpu_exception.entry_BACKEND}" STREQUAL
modules
pw_cpu_exception_cortex_m
)
+
+ pw_add_test(pw_cpu_exception_cortex_m.util_test
+ SOURCES
+ util_test.cc
+ DEPS
+ pw_cpu_exception_cortex_m.cpu_state
+ pw_cpu_exception_cortex_m.util
+ GROUPS
+ modules
+ pw_cpu_exception_cortex_m
+ )
endif()
diff --git a/pw_cpu_exception_cortex_m/entry.cc b/pw_cpu_exception_cortex_m/entry.cc
index d7b76b43c..0edff8258 100644
--- a/pw_cpu_exception_cortex_m/entry.cc
+++ b/pw_cpu_exception_cortex_m/entry.cc
@@ -19,6 +19,7 @@
#include "pw_cpu_exception/handler.h"
#include "pw_cpu_exception_cortex_m/cpu_state.h"
+#include "pw_cpu_exception_cortex_m/util.h"
#include "pw_cpu_exception_cortex_m_private/cortex_m_constants.h"
#include "pw_preprocessor/arch.h"
#include "pw_preprocessor/compiler.h"
@@ -30,12 +31,6 @@ pw_CpuExceptionEntry(void);
namespace pw::cpu_exception::cortex_m {
namespace {
-// Checks exc_return in the captured CPU state to determine which stack pointer
-// was in use prior to entering the exception handler.
-bool PspWasActive(const pw_cpu_exception_State& cpu_state) {
- return cpu_state.extended.exc_return & kExcReturnStackMask;
-}
-
// Checks exc_return to determine if FPU state was pushed to the stack in
// addition to the base CPU context frame.
bool FpuStateWasPushed(const pw_cpu_exception_State& cpu_state) {
@@ -113,7 +108,8 @@ uint32_t CalculatePspDelta(const pw_cpu_exception_State& cpu_state) {
// If CPU context was not pushed to program stack (because program stack
// wasn't in use, or an error occurred when pushing context), the PSP doesn't
// need to be shifted.
- if (!PspWasActive(cpu_state) || (cpu_state.extended.cfsr & kCfsrStkerrMask) ||
+ if (!ProcessStackActive(cpu_state) ||
+ (cpu_state.extended.cfsr & kCfsrStkerrMask) ||
#if _PW_ARCH_ARM_V8M_MAINLINE
(cpu_state.extended.cfsr & kCfsrStkofMask) ||
#endif // _PW_ARCH_ARM_V8M_MAINLINE
@@ -128,7 +124,7 @@ uint32_t CalculatePspDelta(const pw_cpu_exception_State& cpu_state) {
// at exception-time. On exception return, it is restored to the appropriate
// location. This calculates the delta that is used for these patch operations.
uint32_t CalculateMspDelta(const pw_cpu_exception_State& cpu_state) {
- if (PspWasActive(cpu_state)) {
+ if (ProcessStackActive(cpu_state)) {
// TODO(amontanez): Since FPU state isn't captured at this time, we ignore
// it when patching MSP. To add FPU capture support,
// delete this if block as CpuContextSize() will include
@@ -159,7 +155,7 @@ PW_USED void pw_PackageAndHandleCpuException(
// the values can be copied into in the pw_cpu_exception_State struct that is
// passed to HandleCpuException(). The cpu_state passed to the handler is
// ALWAYS stored on the main stack (MSP).
- if (PspWasActive(*cpu_state)) {
+ if (ProcessStackActive(*cpu_state)) {
CloneBaseRegistersFromPsp(cpu_state);
// If PSP wasn't active, this delta is 0.
cpu_state->extended.psp += CalculatePspDelta(*cpu_state);
@@ -182,7 +178,7 @@ PW_USED void pw_PackageAndHandleCpuException(
// If PSP was active and the CPU pushed a context frame, we must copy the
// potentially modified state from cpu_state back to the PSP so the CPU can
// resume execution with the modified values.
- if (PspWasActive(*cpu_state)) {
+ if (ProcessStackActive(*cpu_state)) {
// In this case, there's no need to touch the MSP as it's at the location
// before we entering the exception (effectively popping the state initially
// pushed to the main stack).
diff --git a/pw_cpu_exception_cortex_m/public/pw_cpu_exception_cortex_m/util.h b/pw_cpu_exception_cortex_m/public/pw_cpu_exception_cortex_m/util.h
index a760bb530..3c37c4aac 100644
--- a/pw_cpu_exception_cortex_m/public/pw_cpu_exception_cortex_m/util.h
+++ b/pw_cpu_exception_cortex_m/public/pw_cpu_exception_cortex_m/util.h
@@ -19,4 +19,18 @@ namespace pw::cpu_exception::cortex_m {
void LogExceptionAnalysis(const pw_cpu_exception_State& cpu_state);
+enum class ProcessorMode {
+ kHandlerMode, // Handling interrupts/exceptions (msp).
+ kThreadMode, // May be on either psp or msp.
+};
+ProcessorMode ActiveProcessorMode(const pw_cpu_exception_State& cpu_state);
+
+// Returns whether the msp was active in thread or handler modes.
+bool MainStackActive(const pw_cpu_exception_State& cpu_state);
+
+// Returns whether the psp was active in thread mode.
+inline bool ProcessStackActive(const pw_cpu_exception_State& cpu_state) {
+ return !MainStackActive(cpu_state);
+}
+
} // namespace pw::cpu_exception::cortex_m
diff --git a/pw_cpu_exception_cortex_m/snapshot.cc b/pw_cpu_exception_cortex_m/snapshot.cc
index f2c690d41..f4925ed49 100644
--- a/pw_cpu_exception_cortex_m/snapshot.cc
+++ b/pw_cpu_exception_cortex_m/snapshot.cc
@@ -17,6 +17,7 @@
#include "pw_cpu_exception_cortex_m/snapshot.h"
#include "pw_cpu_exception_cortex_m/proto_dump.h"
+#include "pw_cpu_exception_cortex_m/util.h"
#include "pw_cpu_exception_cortex_m_private/config.h"
#include "pw_cpu_exception_cortex_m_private/cortex_m_constants.h"
#include "pw_cpu_exception_cortex_m_protos/cpu_state.pwpb.h"
@@ -32,11 +33,6 @@ namespace {
constexpr char kMainStackHandlerModeName[] = "Main Stack (Handler Mode)";
constexpr char kMainStackThreadModeName[] = "Main Stack (Thread Mode)";
-enum class ProcessorMode {
- kHandlerMode,
- kThreadMode,
-};
-
Status CaptureMainStack(
ProcessorMode mode,
uintptr_t stack_low_addr,
@@ -136,30 +132,14 @@ Status SnapshotMainStackThread(
uintptr_t stack_high_addr,
thread::SnapshotThreadInfo::StreamEncoder& encoder,
thread::ProcessThreadStackCallback& thread_stack_callback) {
- const uint32_t exc_return = cpu_state.extended.exc_return;
-
- // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
- // return values, in particular bits 0:3.
- // Bits 0:3 of EXC_RETURN:
- // 0b0001 - 0x1 Handler mode Main
- // 0b1001 - 0x9 Thread mode Main
- // 0b1101 - 0xD Thread mode Process
-
- // First check whether the CPU state shows the main stack was active.
- if ((exc_return & kExcReturnStackMask) != 0) {
- return OkStatus(); // Main stack is not currently active.
+ if (!MainStackActive(cpu_state)) {
+ return OkStatus(); // Main stack wasn't active, nothing to capture.
}
- const uintptr_t stack_pointer = cpu_state.extended.msp;
-
- // Second, check if we're in Handler mode, AKA handling exceptions/interrupts.
- const ProcessorMode mode = ((exc_return & kExcReturnModeMask) == 0)
- ? ProcessorMode::kHandlerMode
- : ProcessorMode::kThreadMode;
- return CaptureMainStack(mode,
+ return CaptureMainStack(ActiveProcessorMode(cpu_state),
stack_low_addr,
stack_high_addr,
- stack_pointer,
+ cpu_state.extended.msp,
encoder,
thread_stack_callback);
}
diff --git a/pw_cpu_exception_cortex_m/util.cc b/pw_cpu_exception_cortex_m/util.cc
index ff817b34f..fc2925288 100644
--- a/pw_cpu_exception_cortex_m/util.cc
+++ b/pw_cpu_exception_cortex_m/util.cc
@@ -115,10 +115,10 @@ void LogExceptionAnalysis(const pw_cpu_exception_State& cpu_state) {
}
#if _PW_ARCH_ARM_V8M_MAINLINE
if (cpu_state.extended.cfsr & kCfsrStkofMask) {
- if (cpu_state.extended.exc_return & kExcReturnStackMask) {
- PW_LOG_CRITICAL("Encountered stack overflow in thread mode");
+ if (ProcessStackActive(cpu_state)) {
+ PW_LOG_CRITICAL("Encountered process stack overflow (psp)");
} else {
- PW_LOG_CRITICAL("Encountered main (interrupt handler) stack overflow");
+ PW_LOG_CRITICAL("Encountered main stack overflow (msp)");
}
}
#endif // _PW_ARCH_ARM_V8M_MAINLINE
@@ -150,4 +150,29 @@ void LogExceptionAnalysis(const pw_cpu_exception_State& cpu_state) {
#endif // PW_CPU_EXCEPTION_CORTEX_M_EXTENDED_CFSR_DUMP
}
+ProcessorMode ActiveProcessorMode(const pw_cpu_exception_State& cpu_state) {
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b0001 - 0x1 Handler mode Main
+ // 0b1001 - 0x9 Thread mode Main
+ // 0b1101 - 0xD Thread mode Process
+ // ^
+ if (cpu_state.extended.exc_return & kExcReturnModeMask) {
+ return ProcessorMode::kThreadMode;
+ }
+ return ProcessorMode::kHandlerMode;
+}
+
+bool MainStackActive(const pw_cpu_exception_State& cpu_state) {
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b0001 - 0x1 Handler mode Main
+ // 0b1001 - 0x9 Thread mode Main
+ // 0b1101 - 0xD Thread mode Process
+ // ^
+ return (cpu_state.extended.exc_return & kExcReturnStackMask) == 0;
+}
+
} // namespace pw::cpu_exception::cortex_m
diff --git a/pw_cpu_exception_cortex_m/util_test.cc b/pw_cpu_exception_cortex_m/util_test.cc
new file mode 100644
index 000000000..eec46a7c2
--- /dev/null
+++ b/pw_cpu_exception_cortex_m/util_test.cc
@@ -0,0 +1,114 @@
+// Copyright 2021 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_pw_cpu_exception_cortex_m/util.h"
+
+#include "gtest/gtest.h"
+#include "pw_pw_cpu_exception_cortex_m/cpu_state.h"
+
+namespace pw::pw_cpu_exception::cortex_m {
+namespace {
+
+TEST(ActiveProcessorMode, HandlerModeMain) {
+ pw_cpu_exception_State cpu_state = {};
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b0001 - 0x1 Handler mode Main
+ cpu_state.extended.exc_return = 0b0001;
+ EXPECT_EQ(ActiveProcessorMode(cpu_state), ProcessorMode::HandlerMode);
+}
+
+TEST(ActiveProcessorMode, ThreadModeMain) {
+ pw_cpu_exception_State cpu_state = {};
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b1001 - 0x9 Thread mode Main
+ cpu_state.extended.exc_return = 0b1001;
+ EXPECT_EQ(ActiveProcessorMode(cpu_state), ProcessorMode::ThreadMode);
+}
+
+TEST(ActiveProcessorMode, ThreadModeProcess) {
+ pw_cpu_exception_State cpu_state = {};
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b1101 - 0xD Thread mode Process
+ cpu_state.extended.exc_return = 0b1001;
+ EXPECT_EQ(ActiveProcessorMode(cpu_state), ProcessorMode::ThreadMode);
+}
+
+TEST(MainStackActive, HandlerModeMain) {
+ pw_cpu_exception_State cpu_state = {};
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b0001 - 0x1 Handler mode Main
+ cpu_state.extended.exc_return = 0b0001;
+ EXPECT_TRUE(MainStackActive(cpu_state));
+}
+
+TEST(MainStackActive, ThreadModeMain) {
+ pw_cpu_exception_State cpu_state = {};
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b1001 - 0x9 Thread mode Main
+ cpu_state.extended.exc_return = 0b1001;
+ EXPECT_TRUE(MainStackActive(cpu_state));
+}
+
+TEST(MainStackActive, ThreadModeProcess) {
+ pw_cpu_exception_State cpu_state = {};
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b1101 - 0xD Thread mode Process
+ cpu_state.extended.exc_return = 0b1001;
+ EXPECT_FALSE(MainStackActive(cpu_state));
+}
+
+TEST(ProcessStackActive, HandlerModeMain) {
+ pw_cpu_exception_State cpu_state = {};
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b0001 - 0x1 Handler mode Main
+ cpu_state.extended.exc_return = 0b0001;
+ EXPECT_FALSE(ProcessStackActive(cpu_state));
+}
+
+TEST(ProcessStackActive, ThreadModeMain) {
+ pw_cpu_exception_State cpu_state = {};
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b1001 - 0x9 Thread mode Main
+ cpu_state.extended.exc_return = 0b1001;
+ EXPECT_FALSE(ProcessStackActive(cpu_state));
+}
+
+TEST(ProcessStackActive, ThreadModeProcess) {
+ pw_cpu_exception_State cpu_state = {};
+ // See ARMv7-M Architecture Reference Manual Section B1.5.8 for the exception
+ // return values, in particular bits 0:3.
+ // Bits 0:3 of EXC_RETURN:
+ // 0b1101 - 0xD Thread mode Process
+ cpu_state.extended.exc_return = 0b1001;
+ EXPECT_TRUE(ProcessStackActive(cpu_state));
+}
+
+} // namespace
+} // namespace pw::pw_cpu_exception::cortex_m
diff --git a/pw_crypto/ecdsa_boringssl.cc b/pw_crypto/ecdsa_boringssl.cc
index 4556950f9..4d361e3fa 100644
--- a/pw_crypto/ecdsa_boringssl.cc
+++ b/pw_crypto/ecdsa_boringssl.cc
@@ -11,7 +11,7 @@
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
-#define PW_LOG_MODULE_NAME "ECDSA"
+#define PW_LOG_MODULE_NAME "ECDSA-BSSL"
#define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
#include "openssl/bn.h"
diff --git a/pw_crypto/ecdsa_mbedtls.cc b/pw_crypto/ecdsa_mbedtls.cc
index 92e6258ff..dd68eace9 100644
--- a/pw_crypto/ecdsa_mbedtls.cc
+++ b/pw_crypto/ecdsa_mbedtls.cc
@@ -11,7 +11,7 @@
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
-#define PW_LOG_MODULE_NAME "ECDSA"
+#define PW_LOG_MODULE_NAME "ECDSA-MTLS"
#define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
#include "mbedtls/ecdsa.h"
diff --git a/pw_crypto/ecdsa_uecc.cc b/pw_crypto/ecdsa_uecc.cc
index 25e37b1b1..937d79d56 100644
--- a/pw_crypto/ecdsa_uecc.cc
+++ b/pw_crypto/ecdsa_uecc.cc
@@ -11,7 +11,7 @@
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
-#define PW_LOG_MODULE_NAME "ECDSA"
+#define PW_LOG_MODULE_NAME "ECDSA-UECC"
#define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
#include "pw_crypto/ecdsa.h"
diff --git a/pw_crypto/public/pw_crypto/sha256.h b/pw_crypto/public/pw_crypto/sha256.h
index cce159a9d..1389e28df 100644
--- a/pw_crypto/public/pw_crypto/sha256.h
+++ b/pw_crypto/public/pw_crypto/sha256.h
@@ -14,9 +14,6 @@
#pragma once
-#define PW_LOG_MODULE_NAME "SHA256"
-#define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
-
#include <cstdint>
#include "pw_bytes/span.h"
diff --git a/pw_crypto/sha256_boringssl.cc b/pw_crypto/sha256_boringssl.cc
index ba558b1eb..fb9c783d1 100644
--- a/pw_crypto/sha256_boringssl.cc
+++ b/pw_crypto/sha256_boringssl.cc
@@ -11,6 +11,8 @@
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
+#define PW_LOG_MODULE_NAME "SHA256-BSSL"
+#define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
#include "pw_crypto/sha256.h"
#include "pw_status/status.h"
diff --git a/pw_crypto/sha256_mbedtls.cc b/pw_crypto/sha256_mbedtls.cc
index 0e9c48984..8cb6595ba 100644
--- a/pw_crypto/sha256_mbedtls.cc
+++ b/pw_crypto/sha256_mbedtls.cc
@@ -11,6 +11,8 @@
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
+#define PW_LOG_MODULE_NAME "SHA256-MTLS"
+#define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
#include "pw_crypto/sha256.h"
#include "pw_status/status.h"
diff --git a/pw_docgen/docs.gni b/pw_docgen/docs.gni
index 575994673..b0ae93a16 100644
--- a/pw_docgen/docs.gni
+++ b/pw_docgen/docs.gni
@@ -20,6 +20,9 @@ import("$dir_pw_build/python_action.gni")
declare_args() {
# Whether or not the current target should build docs.
pw_docgen_BUILD_DOCS = false
+
+ # Set to enable Google Analytics tracking of generated docs.
+ pw_docs_google_analytics_id = ""
}
# Defines a group of documentation files and assets.
@@ -102,10 +105,18 @@ template("pw_doc_gen") {
rebase_path(invoker.conf, root_build_dir),
"--out-dir",
rebase_path(invoker.output_directory, root_build_dir),
- "--metadata",
]
+ # Enable Google Analytics if a measurement ID is provided
+ if (pw_docs_google_analytics_id != "") {
+ _script_args += [
+ "--google-analytics-id",
+ pw_docs_google_analytics_id,
+ ]
+ }
+
# Metadata JSON file path.
+ _script_args += [ "--metadata" ]
_script_args +=
rebase_path(get_target_outputs(":$_metadata_file_target"), root_build_dir)
diff --git a/pw_docgen/docs.rst b/pw_docgen/docs.rst
index 595be1ce3..c722d0a17 100644
--- a/pw_docgen/docs.rst
+++ b/pw_docgen/docs.rst
@@ -20,7 +20,7 @@ depend on other build targets, such as report cards for binary size/profiling.
Any time the code is changed, documentation will be regenerated with the updated
reports.
-Documentation overview
+Documentation Overview
======================
Each Pigweed module provides documentation describing its functionality, use
cases, and programming API.
@@ -29,7 +29,7 @@ Included in a module's documentation are report cards which show an overview of
the module's size cost and performance benchmarks. These allow prospective users
to evaluate the impact of including the module in their projects.
-Build integration
+Build Integration
=================
Pigweed documentation files are written in `reStructuredText`_ format and
@@ -42,14 +42,6 @@ rendered to HTML using `Sphinx`_ through Pigweed's GN build system.
There are additonal Sphinx plugins used for rendering diagrams within
reStructuredText files including:
-* `Blockdiag <http://blockdiag.com/>`_ via these sphinxcontrib packages:
-
- * `sphinxcontrib-blockdiag
- <https://pypi.org/project/sphinxcontrib-blockdiag/>`_
- * `sphinxcontrib-actdiag <https://pypi.org/project/sphinxcontrib-actdiag/>`_
- * `sphinxcontrib-nwdiag <https://pypi.org/project/sphinxcontrib-nwdiag/>`_
- * `sphinxcontrib-seqdiag <https://pypi.org/project/sphinxcontrib-seqdiag/>`_
-
* `mermaid <https://mermaid-js.github.io/>`_ via the `sphinxcontrib-mermaid
<https://pypi.org/project/sphinxcontrib-mermaid/>`_ package.
@@ -60,12 +52,11 @@ target, which accumulates all of them and renders the resulting HTML. This
system can either be used directly within Pigweed, or integrated into a
downstream project.
-GN templates
+GN Templates
------------
pw_doc_group
____________
-
The main template for defining documentation files is ``pw_doc_group``. It is
used to logically group a collection of documentation source files and assets.
Each Pigweed module is expected to provide at least one ``pw_doc_group`` target
@@ -94,7 +85,6 @@ groups, causing them to be built with it.
pw_doc_gen
__________
-
The ``pw_doc_gen`` template creates a target which renders complete HTML
documentation for a project. It depends on registered ``pw_doc_group`` targets
and creates an action which collects and renders them.
@@ -124,9 +114,8 @@ to tie everything together.
]
}
-Generating documentation
+Generating Documentation
------------------------
-
All source files listed under a ``pw_doc_gen`` target and its ``pw_doc_group``
dependencies get copied out into a directory structure mirroring the original
layout of the modules in which the sources appear. This is demonstrated below
@@ -173,3 +162,20 @@ practice, relative imports from within modules' documentation groups are
identical to the project's directory structure. The only special case is the
top-level ``index.rst`` file's imports; they must start from the project's build
root.
+
+Sphinx Extensions
+=================
+This module houses Pigweed-specific extensions for the Sphinx documentation
+generator. Extensions are included and configured in ``docs/conf.py``.
+
+google_analytics
+----------------
+When this extension is included and a ``google_analytics_id`` is set in the
+Sphinx configuration, a Google Analytics tracking tag will be added to each
+page of the documentation when it is rendered to HTML.
+
+By default, the Sphinx configuration's ``google_analytics_id`` is set
+automatically based on the value of the GN argument
+``pw_docs_google_analytics_id``, allowing you to control whether tracking is
+enabled or not in your build configuration. Typically, you would only enable
+this for documentation builds intended for deployment on the web.
diff --git a/pw_docgen/py/BUILD.gn b/pw_docgen/py/BUILD.gn
index 81ff9bf25..dc23ed42f 100644
--- a/pw_docgen/py/BUILD.gn
+++ b/pw_docgen/py/BUILD.gn
@@ -25,6 +25,8 @@ pw_python_package("py") {
sources = [
"pw_docgen/__init__.py",
"pw_docgen/docgen.py",
+ "pw_docgen/sphinx/__init__.py",
+ "pw_docgen/sphinx/google_analytics.py",
]
pylintrc = "$dir_pigweed/.pylintrc"
}
diff --git a/pw_docgen/py/pw_docgen/docgen.py b/pw_docgen/py/pw_docgen/docgen.py
index 355bb34f5..7b65e7754 100644
--- a/pw_docgen/py/pw_docgen/docgen.py
+++ b/pw_docgen/py/pw_docgen/docgen.py
@@ -25,7 +25,7 @@ import subprocess
import sys
from pathlib import Path
-from typing import Dict, List, Tuple
+from typing import Dict, List, Optional, Tuple
SCRIPT_HEADER: str = '''
██████╗ ██╗ ██████╗ ██╗ ██╗███████╗███████╗██████╗ ██████╗ ██████╗ ██████╗███████╗
@@ -63,18 +63,26 @@ def parse_args() -> argparse.Namespace:
required=True,
type=argparse.FileType('r'),
help='Metadata JSON file')
+ parser.add_argument('--google-analytics-id',
+ const=None,
+ help='Enables Google Analytics with the provided ID')
return parser.parse_args()
-def build_docs(src_dir: str, dst_dir: str) -> int:
+def build_docs(src_dir: str,
+ dst_dir: str,
+ google_analytics_id: Optional[str] = None) -> int:
"""Runs Sphinx to render HTML documentation from a doc tree."""
# TODO(frolv): Specify the Sphinx script from a prebuilts path instead of
# requiring it in the tree.
- command = [
- 'sphinx-build', '-W', '-b', 'html', '-d', f'{dst_dir}/help', src_dir,
- f'{dst_dir}/html'
- ]
+ command = ['sphinx-build', '-W', '-b', 'html', '-d', f'{dst_dir}/help']
+
+ if google_analytics_id is not None:
+ command.append(f'-Dgoogle_analytics_id={google_analytics_id}')
+
+ command.extend([src_dir, f'{dst_dir}/html'])
+
return subprocess.call(command)
@@ -128,7 +136,8 @@ def main() -> int:
# Flush all script output before running Sphinx.
print('-' * 80, flush=True)
- return build_docs(args.sphinx_build_dir, args.out_dir)
+ return build_docs(args.sphinx_build_dir, args.out_dir,
+ args.google_analytics_id)
if __name__ == '__main__':
diff --git a/pw_docgen/py/pw_docgen/sphinx/__init__.py b/pw_docgen/py/pw_docgen/sphinx/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pw_docgen/py/pw_docgen/sphinx/__init__.py
diff --git a/pw_docgen/py/pw_docgen/sphinx/google_analytics.py b/pw_docgen/py/pw_docgen/sphinx/google_analytics.py
new file mode 100644
index 000000000..3816fa25f
--- /dev/null
+++ b/pw_docgen/py/pw_docgen/sphinx/google_analytics.py
@@ -0,0 +1,39 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""A Sphinx extension to add a Google Analytics tag to generated docs"""
+
+
+def add_google_analytics_tag(app, pagename, templatename, context, doctree): # pylint: disable=unused-argument
+ if app.config.google_analytics_id is None:
+ return
+
+ if 'metatags' not in context:
+ context['metatags'] = ''
+
+ # pylint: disable=line-too-long
+ context['metatags'] += (
+ f"""<script async src="https://www.googletagmanager.com/gtag/js?id={app.config.google_analytics_id}"></script>
+<script>
+ window.dataLayer = window.dataLayer || [];
+ function gtag(){{dataLayer.push(arguments);}}
+ gtag('js', new Date());
+
+ gtag('config', '{app.config.google_analytics_id}');
+</script>""")
+
+
+def setup(app):
+ app.add_config_value('google_analytics_id', None, 'html')
+ app.connect('html-page-context', add_google_analytics_tag)
+ return {'parallel_read_safe': True}
diff --git a/pw_docgen/py/setup.cfg b/pw_docgen/py/setup.cfg
index d7d1293bf..62d65c378 100644
--- a/pw_docgen/py/setup.cfg
+++ b/pw_docgen/py/setup.cfg
@@ -24,11 +24,6 @@ zip_safe = False
install_requires =
sphinx >3
sphinx-rtd-theme
- Pillow >=6.2.2, <=8.2.0
- sphinxcontrib-actdiag
- sphinxcontrib-blockdiag
- sphinxcontrib-nwdiag
- sphinxcontrib-seqdiag
sphinxcontrib-mermaid >=0.7.1
[options.package_data]
diff --git a/pw_doctor/docs.rst b/pw_doctor/docs.rst
index 1bd65783c..6fb524e1c 100644
--- a/pw_doctor/docs.rst
+++ b/pw_doctor/docs.rst
@@ -7,10 +7,19 @@ pw_doctor
it checks that things exactly match what is expected and it checks that things
look compatible without.
-Currently pw_doctor expects the running python to be Python 3.8 or 3.9.
-
Projects that adjust the behavior of pw_env_setup may need to customize
these checks, but unfortunately this is not supported yet.
+Checks carried out by pw_doctor include:
+
+* The bootstrapped OS matches the current OS.
+* ``PW_ROOT`` is defined and points to the root of the Pigweed repo.
+* The presubmit git hook is installed.
+* The current Python version is 3.8 or 3.9.
+* The Pigweed virtual env is active.
+* CIPD is set up correctly and in use.
+* The CIPD packages required by Pigweed are up to date.
+* The platform support symlinks.
+
.. note::
The documentation for this module is currently incomplete.
diff --git a/pw_doctor/py/pw_doctor/doctor.py b/pw_doctor/py/pw_doctor/doctor.py
index f91800520..1d2d37345 100755
--- a/pw_doctor/py/pw_doctor/doctor.py
+++ b/pw_doctor/py/pw_doctor/doctor.py
@@ -27,6 +27,7 @@ import tempfile
from typing import Callable, Iterable, List, Set
import pw_cli.pw_command_plugins
+import pw_env_setup.cipd_setup.update as cipd_update
def call_stdout(*args, **kwargs):
@@ -288,32 +289,52 @@ def cipd_versions(ctx: DoctorContext):
if os.environ.get('PW_DOCTOR_SKIP_CIPD_CHECKS'):
return
- try:
- root = pathlib.Path(os.environ['PW_ROOT']).resolve()
- except KeyError:
- return # This case is handled elsewhere.
+ if 'PW_CIPD_INSTALL_DIR' not in os.environ:
+ ctx.error('PW_CIPD_INSTALL_DIR not set')
+ cipd_dir = pathlib.Path(os.environ['PW_CIPD_INSTALL_DIR'])
- if 'PW_PIGWEED_CIPD_INSTALL_DIR' not in os.environ:
- ctx.error('PW_PIGWEED_CIPD_INSTALL_DIR not set')
- cipd_dir = pathlib.Path(os.environ['PW_PIGWEED_CIPD_INSTALL_DIR'])
+ with open(cipd_dir / '_all_package_files.json', 'r') as ins:
+ json_paths = [pathlib.Path(x) for x in json.load(ins)]
+
+ platform = cipd_update.platform()
+
+ def check_cipd(package, install_path):
+ if platform not in package['platforms']:
+ ctx.debug("skipping %s because it doesn't apply to %s",
+ package['path'], platform)
+ return
- versions_path = cipd_dir / '.versions'
- # Deliberately not checking luci.json--it's not required to be up-to-date.
- json_path = root.joinpath('pw_env_setup', 'py', 'pw_env_setup',
- 'cipd_setup', 'pigweed.json')
+ tags_without_refs = [x for x in package['tags'] if ':' in x]
+ if not tags_without_refs:
+ ctx.debug('skipping %s because it tracks a ref, not a tag (%s)',
+ package['path'], ', '.join(package['tags']))
+ return
- def check_cipd(package):
ctx.debug('checking version of %s', package['path'])
+
name = [
part for part in package['path'].split('/') if '{' not in part
][-1]
- path = versions_path.joinpath(f'{name}.cipd_version')
- if not path.is_file():
- ctx.debug('no version file')
- return
+
+ # If the exact path is specified in the JSON file use it, and require it
+ # exist.
+ if 'version_file' in package:
+ path = install_path / package['version_file']
+ if not path.is_file():
+ ctx.error(f'no version file for {name} at {path}')
+ return
+
+ # Otherwise, follow a heuristic to find the file but don't require the
+ # file to exist.
+ else:
+ path = install_path / '.versions' / f'{name}.cipd_version'
+ if not path.is_file():
+ ctx.debug(f'no version file for {name} at {path}')
+ return
with path.open() as ins:
installed = json.load(ins)
+ ctx.debug(f'found version file for {name} at {path}')
describe = (
'cipd',
@@ -330,11 +351,19 @@ def cipd_versions(ctx: DoctorContext):
for tag in package['tags']:
if tag not in output:
ctx.error(
- 'CIPD package %s is out of date, please rerun bootstrap',
- installed['package_name'])
-
- for package in json.loads(json_path.read_text()).get('packages', ()):
- ctx.submit(check_cipd, package)
+ 'CIPD package %s in %s is out of date, please rerun '
+ 'bootstrap', installed['package_name'], install_path)
+
+ else:
+ ctx.debug('CIPD package %s in %s is current',
+ installed['package_name'], install_path)
+
+ for json_path in json_paths:
+ ctx.debug(f'Checking packages in {json_path}')
+ install_path = pathlib.Path(
+ cipd_update.package_installation_path(cipd_dir, json_path))
+ for package in json.loads(json_path.read_text()).get('packages', ()):
+ ctx.submit(check_cipd, package, install_path)
@register_into(CHECKS)
diff --git a/pw_env_setup/bazel/cipd_setup/internal/cipd_internal.bzl b/pw_env_setup/bazel/cipd_setup/internal/cipd_internal.bzl
index 4c434f091..bbd2c95e8 100644
--- a/pw_env_setup/bazel/cipd_setup/internal/cipd_internal.bzl
+++ b/pw_env_setup/bazel/cipd_setup/internal/cipd_internal.bzl
@@ -43,6 +43,8 @@ def platform_normalized(rctx):
else:
fail("Could not normalize os:", rctx.os.name)
+# TODO(pwbug/388): Enable unused variable check.
+# buildifier: disable=unused-variable
def arch_normalized(rctx):
"""Normalizes the architecture string to match CIPDs naming system.
diff --git a/pw_env_setup/config.json b/pw_env_setup/config.json
index 9eba3e4c1..5d3e5b0a9 100644
--- a/pw_env_setup/config.json
+++ b/pw_env_setup/config.json
@@ -9,5 +9,6 @@
":python.install"
]
},
- "pw_packages": []
+ "pw_packages": [],
+ "gni_file": "build_overrides/pigweed_environment.gni"
}
diff --git a/pw_env_setup/docs.rst b/pw_env_setup/docs.rst
index 49c261b86..ecaecb303 100644
--- a/pw_env_setup/docs.rst
+++ b/pw_env_setup/docs.rst
@@ -218,8 +218,12 @@ here.
``cipd_package_files``
CIPD package file. JSON file consisting of a list of additional CIPD package
- files to import and a list of dictionaries with "path", "platforms", and
- "tags" keys. Both top-level lists are optional. An example is below.
+ files to import and a list of dictionaries with "path", "platforms", "subdir",
+ "tags", and "version_file" keys. Both top-level lists are optional. An
+ example is below. Only "path", "platforms", and "tags" are required. If
+ "version_file" is specified then ``pw doctor`` will fail if that version file
+ is not present. If "subdir" is specified then this packages will be installed
+ in a subdirectory of the directory created for packages in this file.
.. code-block:: json
@@ -236,9 +240,11 @@ here.
"mac-amd64",
"windows-amd64"
],
+ "subdir": "pa/th",
"tags": [
"version:2@1.16.3"
- ]
+ ],
+ "version_file": ".versions/go.cipd_version"
}
]
}
@@ -273,6 +279,16 @@ here.
A list of packages to install using :ref:`pw_package <module-pw_package>`
after the rest of bootstrap completes.
+``gni_file``
+ Location to write a ``.gni`` file containing paths to many things within the
+ environment directory. Defaults to
+ ``build_overrides/pigweed_environment.gni``.
+
+``json_file``
+ Location to write a ``.json`` file containing step-by-step modifications to
+ the environment, for reading by tools that don't inherit an environment from
+ a sourced ``bootstrap.sh``.
+
An example of a config file is below.
.. code-block:: json
@@ -295,7 +311,9 @@ An example of a config file is below.
"optional_submodules": [
"optional/submodule/one",
"optional/submodule/two"
- ]
+ ],
+ "gni_file": "tools/environment.gni",
+ "json_file": "tools/environment.json"
}
In case the CIPD packages need to be referenced from other scripts, variables
@@ -309,6 +327,11 @@ set the following environment variables.
- ``PW_MYPROJECTNAME_CIPD_INSTALL_DIR``
- ``PW_PIGWEED_CIPD_INSTALL_DIR``
+These directories are also referenced in the gni_file specified by the
+environment config file as ``dir_cipd_${BASENAME}``. This allows the GN build to
+reliably reference these directories without using GN ``getenv()`` calls or
+hardcoding paths.
+
In addition, ``PW_${BASENAME}_CIPD_INSTALL_DIR`` and
``PW_${BASENAME}_CIPD_INSTALL_DIR/bin`` are both added to ``PATH`` for each
package directory.
@@ -357,6 +380,8 @@ again, and run ``pw presubmit``.
Environment Variables
*********************
+Input Variables
+---------------
The following environment variables affect env setup behavior. Most users will
never need to set these.
@@ -402,6 +427,8 @@ never need to set these.
The absolute path to the Pigweed repository within ``PW_PROJECT_ROOT``. This
should be set by the project's bootstrap script.
+Output Variables
+----------------
The following environment variables are set by env setup.
``PATH``
@@ -429,9 +456,9 @@ Non-Shell Environments
**********************
If using this outside of bash—for example directly from an IDE or CI
system—users can process the ``actions.json`` file that's generated in the
-environment directory. It lists variables to set, clear, and modify. An
-example ``actions.json`` is shown below. The "append" and "prepend" actions
-are listed in the order they should be applied, so the
+location specified by the environment config. It lists variables to set, clear,
+and modify. An example ``actions.json`` is shown below. The "append" and
+"prepend" actions are listed in the order they should be applied, so the
``<pigweed-root>/out/host/host_tools`` entry should be at the beginning of
``PATH`` and not in the middle somewhere.
@@ -458,7 +485,7 @@ are listed in the order they should be applied, so the
"PW_ROOT": "<pigweed-root>",
"_PW_ACTUAL_ENVIRONMENT_ROOT": "<pigweed-root>/.environment",
"PW_CIPD_INSTALL_DIR": "<pigweed-root>/.environment/cipd",
- "CIPD_CACHE_DIR": "/usr/local/google/home/mohrr/.cipd-cache-dir",
+ "CIPD_CACHE_DIR": "<home>/.cipd-cache-dir",
"PW_PIGWEED_CIPD_INSTALL_DIR": "<pigweed-root>/.environment/cipd/pigweed",
"PW_LUCI_CIPD_INSTALL_DIR": "<pigweed-root>/.environment/cipd/luci",
"VIRTUAL_ENV": "<pigweed-root>/.environment/pigweed-venv",
@@ -467,6 +494,25 @@ are listed in the order they should be applied, so the
}
}
+Many of these variables are directly exposed to the GN build as well, through
+the GNI file specified in the environment config file.
+
+.. code-block::
+
+ declare_args() {
+ dir_cipd_pigweed = "<pigweed-root>/.environment/cipd/packages/pigweed"
+ dir_cipd_luci = "<pigweed-root>/.environment/cipd/packages/luci"
+ dir_virtual_env = "<pigweed-root>/.environment/pigweed-venv"
+ }
+
+It's straightforward to use these variables.
+
+.. code-block:: cpp
+
+ import("//build_overrides/pigweed_environment.gni")
+
+ deps = [ "$dir_cipd_pigweed/..." ]
+
Implementation
**************
diff --git a/pw_env_setup/get_pw_env_setup.sh b/pw_env_setup/get_pw_env_setup.sh
index 0317b2e7a..ff74220bd 100755
--- a/pw_env_setup/get_pw_env_setup.sh
+++ b/pw_env_setup/get_pw_env_setup.sh
@@ -46,6 +46,11 @@ if [ "$ARCH" = "x86_64" ]; then
ARCH="amd64"
fi
+# Support `mac-arm64` through Rosetta until `mac-arm64` binaries are ready
+if [[ "$OS" = "mac" ] && [ "$ARCH" = "arm64" ]]; then
+ ARCH="amd64"
+fi
+
for HASH in $(git --git-dir="$PW_ROOT/.git" --no-pager log --max-count=10 --format=format:%H); do
URL="https://storage.googleapis.com/pigweed-envsetup/$OS-$ARCH"
URL="$URL/$HASH/pw_env_setup"
diff --git a/pw_env_setup/post-checkout-hook-helper.sh b/pw_env_setup/post-checkout-hook-helper.sh
new file mode 100755
index 000000000..91d106488
--- /dev/null
+++ b/pw_env_setup/post-checkout-hook-helper.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# If we're not in a bootstrapped shell exit immediately. We won't know where
+# the config file is.
+if [ -z "$PW_PROJECT_ROOT" ]; then
+ exit 0
+fi
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+echo -n "Updating CIPD packages..."
+
+"$PW_ROOT/pw_env_setup/py/pw_env_setup/env_setup.py" \
+ --project-root "$PW_PROJECT_ROOT" \
+ --pw-root "$PW_ROOT" \
+ --config-file "$_PW_ENVIRONMENT_CONFIG_FILE" \
+ --shell-file "$_PW_ACTUAL_ENVIRONMENT_ROOT/unused.sh" \
+ --install-dir "$_PW_ACTUAL_ENVIRONMENT_ROOT" \
+ --quiet \
+ --trust-cipd-hash \
+ --cipd-only
+
+echo "done."
diff --git a/pw_env_setup/post-checkout-hook.sh b/pw_env_setup/post-checkout-hook.sh
new file mode 100755
index 000000000..065e328d1
--- /dev/null
+++ b/pw_env_setup/post-checkout-hook.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# The logic for the post-checkout hook is kept in another file so it can be
+# updated without requiring users to install updated hooks.
+
+# If we're not in a bootstrapped shell exit immediately. We won't know where
+# the helper or env_setup scripts are.
+if [ -z "$PW_ROOT" ]; then
+ exit 0
+fi
+
+"$PW_ROOT/pw_env_setup/post-checkout-hook-helper.sh"
diff --git a/pw_env_setup/py/BUILD.gn b/pw_env_setup/py/BUILD.gn
index 1775ff953..cce9a21b8 100644
--- a/pw_env_setup/py/BUILD.gn
+++ b/pw_env_setup/py/BUILD.gn
@@ -32,6 +32,7 @@ pw_python_package("py") {
"pw_env_setup/colors.py",
"pw_env_setup/env_setup.py",
"pw_env_setup/environment.py",
+ "pw_env_setup/gni_visitor.py",
"pw_env_setup/json_visitor.py",
"pw_env_setup/python_packages.py",
"pw_env_setup/shell_visitor.py",
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/arm.json b/pw_env_setup/py/pw_env_setup/cipd_setup/arm.json
index dbe0164bf..ff12cd23f 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/arm.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/arm.json
@@ -9,7 +9,8 @@
],
"tags": [
"version:10-2020-q4-major"
- ]
+ ],
+ "version_file": ".versions/gcc-arm-none-eabi.cipd_version"
}
]
}
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/bazel.json b/pw_env_setup/py/pw_env_setup/cipd_setup/bazel.json
index ece4ad26f..c053f272e 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/bazel.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/bazel.json
@@ -9,7 +9,8 @@
],
"tags": [
"version:5.0.0.1"
- ]
+ ],
+ "version_file": ".versions/bazel.cipd_version"
}
]
}
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/luci.json b/pw_env_setup/py/pw_env_setup/cipd_setup/luci.json
index fa30083a7..e7519557a 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/luci.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/luci.json
@@ -11,7 +11,8 @@
],
"tags": [
"latest"
- ]
+ ],
+ "version_file": ".versions/bb.cipd_version"
},
{
"path": "infra/tools/luci-auth/${platform}",
@@ -24,7 +25,8 @@
],
"tags": [
"latest"
- ]
+ ],
+ "version_file": ".versions/luci-auth.cipd_version"
},
{
"path": "infra/tools/luci/gerrit/${platform}",
@@ -37,7 +39,8 @@
],
"tags": [
"latest"
- ]
+ ],
+ "version_file": ".versions/gerrit.cipd_version"
},
{
"path": "infra/tools/luci/gitiles/${platform}",
@@ -50,7 +53,8 @@
],
"tags": [
"latest"
- ]
+ ],
+ "version_file": ".versions/gitiles.cipd_version"
},
{
"path": "infra/tools/luci/cas/${platform}",
@@ -63,7 +67,8 @@
],
"tags": [
"latest"
- ]
+ ],
+ "version_file": ".versions/cas.cipd_version"
},
{
"path": "infra/tools/luci/led/${platform}",
@@ -76,7 +81,8 @@
],
"tags": [
"latest"
- ]
+ ],
+ "version_file": ".versions/led.cipd_version"
},
{
"path": "infra/tools/luci/logdog/logdog/${platform}",
@@ -88,7 +94,8 @@
],
"tags": [
"latest"
- ]
+ ],
+ "version_file": ".versions/logdog.cipd_version"
}
]
}
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json b/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json
index 2f5e25ef9..6d893858d 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/pigweed.json
@@ -10,8 +10,9 @@
"windows-amd64"
],
"tags": [
- "git_revision:0725d7827575b239594fbc8fd5192873a1d62f44"
- ]
+ "git_revision:f27bae882b2178ccc3c24f314c88db9a34118992"
+ ],
+ "version_file": ".versions/gn.cipd_version"
},
{
"path": "infra/3pp/tools/ninja/${platform}",
@@ -35,8 +36,9 @@
"windows-amd64"
],
"tags": [
- "version:3.22.20211026-geeb4540"
- ]
+ "version:3.23.20220402-g6733ad4"
+ ],
+ "version_file": ".versions/cmake.cipd_version"
},
{
"path": "pigweed/third_party/bloaty-embedded/${platform}",
@@ -46,7 +48,8 @@
],
"tags": [
"git_revision:2d87d204057b419f5290f8d38b61b9c2c5b4fb52-2"
- ]
+ ],
+ "version_file": ".versions/bloaty-embedded.cipd_version"
},
{
"path": "infra/3pp/tools/protoc/${platform}",
@@ -80,7 +83,8 @@
],
"tags": [
"git_revision:1aa59ff2f789776ebfa2d4b315fd3ea589652b4a"
- ]
+ ],
+ "version_file": ".versions/clang.cipd_version"
},
{
"path": "infra/3pp/tools/go/${platform}",
@@ -92,7 +96,7 @@
"windows-amd64"
],
"tags": [
- "version:2@1.17.6"
+ "version:2@1.18"
]
},
{
@@ -107,14 +111,16 @@
]
},
{
- "_comment": "TODO(pwbug/66) Put openocd in cipd for Windows.",
- "path": "pigweed/third_party/openocd/${platform}",
+ "path": "infra/3pp/tools/openocd/${platform}",
"platforms": [
"linux-amd64",
- "mac-amd64"
+ "linux-arm64",
+ "mac-amd64",
+ "mac-arm64",
+ "windows-amd64"
],
"tags": [
- "git_revision:e41c0f4906e46d1076ce62a0da5518aa1ca280b8"
+ "version:2@0.11.0-3"
]
},
{
@@ -134,8 +140,9 @@
"windows-amd64"
],
"tags": [
- "git_revision:12fa59f95bf64147252f4fc60f923cfae8c75dc9"
- ]
+ "git_revision:3f30a2e1ac848bd3dde1322ee2ace4d4f935c29d"
+ ],
+ "version_file": ".versions/host_tools.cipd_version"
},
{
"path": "infra/rbe/client/${platform}",
@@ -155,8 +162,9 @@
"mac-amd64"
],
"tags": [
- "git_revision:d80b96f149ebdd2ee530da1cc6245e0888080e7e"
- ]
+ "git_revision:44f28df24767cf9dca1ddc9b23157737c4cbb645"
+ ],
+ "version_file": ".versions/qemu.cipd_version"
},
{
"path": "fuchsia/third_party/kythe",
@@ -198,7 +206,7 @@
"windows-amd64"
],
"tags": [
- "version:2@4.2.5"
+ "version:2@5.0.1"
]
}
]
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/python.json b/pw_env_setup/py/pw_env_setup/cipd_setup/python.json
index 6ecbff5f9..210e597f7 100644
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/python.json
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/python.json
@@ -11,7 +11,8 @@
],
"tags": [
"version:2@3.9.5.chromium.19"
- ]
+ ],
+ "version_file": ".versions/cpython3.cipd_version"
},
{
"_comment": "TODO(pwbug/455) Use 3.9 for Macs too.",
@@ -21,7 +22,8 @@
],
"tags": [
"version:2@3.8.10.chromium.21"
- ]
+ ],
+ "version_file": ".versions/cpython3.cipd_version"
}
]
}
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/update.py b/pw_env_setup/py/pw_env_setup/cipd_setup/update.py
index 380ff5742..eb2b835b1 100755
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/update.py
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/update.py
@@ -21,45 +21,15 @@ The stdout of this script is meant to be executed by the invoking shell.
from __future__ import print_function
-import argparse
+import hashlib
import json
import os
-import platform
+import platform as platform_module
import re
import subprocess
import sys
-def parse(argv=None):
- """Parse arguments."""
-
- script_root = os.path.join(os.environ['PW_ROOT'], 'pw_env_setup', 'py',
- 'pw_env_setup', 'cipd_setup')
- git_root = subprocess.check_output(
- ('git', 'rev-parse', '--show-toplevel'),
- cwd=script_root,
- ).decode('utf-8').strip()
-
- parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
- parser.add_argument(
- '--install-dir',
- dest='root_install_dir',
- default=os.path.join(git_root, '.cipd'),
- )
- parser.add_argument('--package-file',
- dest='package_files',
- metavar='PACKAGE_FILE',
- action='append')
- parser.add_argument('--cipd',
- default=os.path.join(script_root, 'wrapper.py'))
- parser.add_argument('--cache-dir',
- default=os.environ.get(
- 'CIPD_CACHE_DIR',
- os.path.expanduser('~/.cipd-cache-dir')))
-
- return parser.parse_args(argv)
-
-
def check_auth(cipd, package_files, spin):
"""Check have access to CIPD pigweed directory."""
@@ -148,23 +118,29 @@ def check_auth(cipd, package_files, spin):
return True
-def _platform():
+def platform():
osname = {
'darwin': 'mac',
'linux': 'linux',
'windows': 'windows',
- }[platform.system().lower()]
+ }[platform_module.system().lower()]
- if platform.machine().startswith(('aarch64', 'armv8')):
+ if platform_module.machine().startswith(('aarch64', 'armv8')):
arch = 'arm64'
- elif platform.machine() == 'x86_64':
+ elif platform_module.machine() == 'x86_64':
arch = 'amd64'
- elif platform.machine() == 'i686':
+ elif platform_module.machine() == 'i686':
arch = 'i386'
else:
- arch = platform.machine()
+ arch = platform_module.machine()
+
+ platform_arch = '{}-{}'.format(osname, arch).lower()
+
+ # Support `mac-arm64` through Rosetta until `mac-arm64` binaries are ready
+ if platform_arch == 'mac-arm64':
+ return 'mac-amd64'
- return '{}-{}'.format(osname, arch).lower()
+ return platform_arch
def all_package_files(env_vars, package_files):
@@ -202,9 +178,19 @@ def all_package_files(env_vars, package_files):
return result
-def write_ensure_file(package_file, ensure_file):
- with open(package_file, 'r') as ins:
- packages = json.load(ins).get('packages', ())
+def write_ensure_file(package_files, ensure_file):
+ packages = []
+
+ for package_file in package_files:
+ name = package_file_name(package_file)
+ with open(package_file, 'r') as ins:
+ file_packages = json.load(ins).get('packages', ())
+ for package in file_packages:
+ if 'subdir' in package:
+ package['subdir'] = os.path.join(name, package['subdir'])
+ else:
+ package['subdir'] = name
+ packages.extend(file_packages)
with open(ensure_file, 'w') as outs:
outs.write('$VerifiedPlatform linux-amd64\n'
@@ -214,13 +200,28 @@ def write_ensure_file(package_file, ensure_file):
for pkg in packages:
# If this is a new-style package manifest platform handling must
# be done here instead of by the cipd executable.
- if 'platforms' in pkg and _platform() not in pkg['platforms']:
+ if 'platforms' in pkg and platform() not in pkg['platforms']:
continue
outs.write('@Subdir {}\n'.format(pkg.get('subdir', '')))
outs.write('{} {}\n'.format(pkg['path'], ' '.join(pkg['tags'])))
+def package_file_name(package_file):
+ return os.path.basename(os.path.splitext(package_file)[0])
+
+
+def package_installation_path(root_install_dir, package_file):
+ """Returns the package installation path.
+
+ Args:
+ root_install_dir: The CIPD installation directory.
+ package_file: The path to the .json package definition file.
+ """
+ return os.path.join(root_install_dir, 'packages',
+ package_file_name(package_file))
+
+
def update(
cipd,
package_files,
@@ -228,89 +229,114 @@ def update(
cache_dir,
env_vars=None,
spin=None,
+ trust_hash=False,
):
"""Grab the tools listed in ensure_files."""
package_files = all_package_files(env_vars, package_files)
- if not check_auth(cipd, package_files, spin):
- return False
-
# TODO(mohrr) use os.makedirs(..., exist_ok=True).
if not os.path.isdir(root_install_dir):
os.makedirs(root_install_dir)
+ # This file is read by 'pw doctor' which needs to know which package files
+ # were used in the environment.
+ package_files_file = os.path.join(root_install_dir,
+ '_all_package_files.json')
+ with open(package_files_file, 'w') as outs:
+ json.dump(package_files, outs, indent=2)
+
if env_vars:
env_vars.prepend('PATH', root_install_dir)
env_vars.set('PW_CIPD_INSTALL_DIR', root_install_dir)
env_vars.set('CIPD_CACHE_DIR', cache_dir)
pw_root = None
+
if env_vars:
pw_root = env_vars.get('PW_ROOT', None)
if not pw_root:
pw_root = os.environ['PW_ROOT']
- # Run cipd for each json file.
- for package_file in package_files:
- if os.path.splitext(package_file)[1] == '.ensure':
- ensure_file = package_file
- else:
- ensure_file = os.path.join(
- root_install_dir,
- os.path.basename(
- os.path.splitext(package_file)[0] + '.ensure'))
- write_ensure_file(package_file, ensure_file)
-
- install_dir = os.path.join(
- root_install_dir,
- os.path.basename(os.path.splitext(package_file)[0]))
-
- name = os.path.basename(install_dir)
-
- cmd = [
- cipd,
- 'ensure',
- '-ensure-file', ensure_file,
- '-root', install_dir,
- '-log-level', 'debug',
- '-json-output',
- os.path.join(root_install_dir, '{}-output.json'.format(name)),
- '-cache-dir', cache_dir,
- '-max-threads', '0', # 0 means use CPU count.
- ] # yapf: disable
-
- # TODO(pwbug/135) Use function from common utility module.
- log = os.path.join(root_install_dir, '{}.log'.format(name))
- try:
- with open(log, 'w') as outs:
- print(*cmd, file=outs)
- subprocess.check_call(cmd,
- stdout=outs,
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError:
- with open(log, 'r') as ins:
- sys.stderr.write(ins.read())
- raise
-
- # Set environment variables so tools can later find things under, for
- # example, 'share'.
- if env_vars:
+ ensure_file = os.path.join(root_install_dir, 'packages.ensure')
+ write_ensure_file(package_files, ensure_file)
+
+ install_dir = os.path.join(root_install_dir, 'packages')
+
+ cmd = [
+ cipd,
+ 'ensure',
+ '-ensure-file', ensure_file,
+ '-root', install_dir,
+ '-log-level', 'debug',
+ '-json-output', os.path.join(root_install_dir, 'packages.json'),
+ '-cache-dir', cache_dir,
+ '-max-threads', '0', # 0 means use CPU count.
+ ] # yapf: disable
+
+ hasher = hashlib.sha256()
+ encoded = '\0'.join(cmd)
+ if hasattr(encoded, 'encode'):
+ encoded = encoded.encode()
+ hasher.update(encoded)
+ with open(ensure_file, 'rb') as ins:
+ hasher.update(ins.read())
+ digest = hasher.hexdigest()
+
+ with open(os.path.join(root_install_dir, 'hash.log'), 'w') as hashlog:
+ print('calculated digest:', digest, file=hashlog)
+
+ hash_file = os.path.join(root_install_dir, 'packages.sha256')
+ print('hash file path:', hash_file, file=hashlog)
+ print('exists:', os.path.isfile(hash_file), file=hashlog)
+ print('trust_hash:', trust_hash, file=hashlog)
+ if trust_hash and os.path.isfile(hash_file):
+ with open(hash_file, 'r') as ins:
+ digest_file = ins.read().strip()
+ print('contents:', digest_file, file=hashlog)
+ print('equal:', digest == digest_file, file=hashlog)
+ if digest == digest_file:
+ return True
+
+ if not check_auth(cipd, package_files, spin):
+ return False
+
+ # TODO(pwbug/135) Use function from common utility module.
+ log = os.path.join(root_install_dir, 'packages.log')
+ try:
+ with open(log, 'w') as outs:
+ print(*cmd, file=outs)
+ subprocess.check_call(cmd, stdout=outs, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ with open(log, 'r') as ins:
+ sys.stderr.write(ins.read())
+ raise
+
+ with open(hash_file, 'w') as outs:
+ print(digest, file=outs)
+
+ # Set environment variables so tools can later find things under, for
+ # example, 'share'.
+ if env_vars:
+ for package_file in package_files:
+ name = package_file_name(package_file)
+ file_install_dir = os.path.join(install_dir, name)
# Some executables get installed at top-level and some get
- # installed under 'bin'.
- env_vars.prepend('PATH', install_dir)
- env_vars.prepend('PATH', os.path.join(install_dir, 'bin'))
+ # installed under 'bin'. A small number of old packages prefix the
+ # entire tree with the platform (e.g., chromium/third_party/tcl).
+ for bin_dir in (
+ file_install_dir,
+ os.path.join(file_install_dir, 'bin'),
+ os.path.join(file_install_dir, platform(), 'bin'),
+ ):
+ if os.path.isdir(bin_dir):
+ env_vars.prepend('PATH', bin_dir)
env_vars.set('PW_{}_CIPD_INSTALL_DIR'.format(name.upper()),
- install_dir)
+ file_install_dir)
# Windows has its own special toolchain.
if os.name == 'nt':
- env_vars.prepend('PATH',
- os.path.join(install_dir, 'mingw64', 'bin'))
+ env_vars.prepend(
+ 'PATH', os.path.join(file_install_dir, 'mingw64', 'bin'))
return True
-
-
-if __name__ == '__main__':
- update(**vars(parse()))
- sys.exit(0)
diff --git a/pw_env_setup/py/pw_env_setup/cipd_setup/wrapper.py b/pw_env_setup/py/pw_env_setup/cipd_setup/wrapper.py
index 466f2911e..6ca7db3c9 100755
--- a/pw_env_setup/py/pw_env_setup/cipd_setup/wrapper.py
+++ b/pw_env_setup/py/pw_env_setup/cipd_setup/wrapper.py
@@ -133,6 +133,16 @@ def arch_normalized():
raise Exception('unrecognized arch: {}'.format(machine))
+def platform_arch_normalized():
+ platform_arch = '{}-{}'.format(platform_normalized(), arch_normalized())
+
+ # Support `mac-arm64` through Rosetta until `mac-arm64` binaries are ready
+ if platform_arch == 'mac-arm64':
+ platform_arch = 'mac-amd64'
+
+ return platform_arch
+
+
def user_agent():
"""Generate a user-agent based on the project name and current hash."""
@@ -160,7 +170,7 @@ def actual_hash(path):
def expected_hash():
"""Pulls expected hash from digests file."""
- expected_plat = '{}-{}'.format(platform_normalized(), arch_normalized())
+ expected_plat = platform_arch_normalized()
with open(DIGESTS_FILE, 'r') as ins:
for line in ins:
@@ -223,7 +233,7 @@ brew uninstall python && brew install python
print('=' * 70)
raise
- full_platform = '{}-{}'.format(platform_normalized(), arch_normalized())
+ full_platform = platform_arch_normalized()
if full_platform not in SUPPORTED_PLATFORMS:
raise UnsupportedPlatform(full_platform)
@@ -293,8 +303,8 @@ def bootstrap(client, silent=('PW_ENVSETUP_QUIET' in os.environ)):
os.makedirs(client_dir)
if not silent:
- print('Bootstrapping cipd client for {}-{}'.format(
- platform_normalized(), arch_normalized()))
+ print('Bootstrapping cipd client for {}'.format(
+ platform_arch_normalized()))
tmp_path = client + '.tmp'
with open(tmp_path, 'wb') as tmp:
diff --git a/pw_env_setup/py/pw_env_setup/env_setup.py b/pw_env_setup/py/pw_env_setup/env_setup.py
index 102a7c1cc..d742e4f5b 100755
--- a/pw_env_setup/py/pw_env_setup/env_setup.py
+++ b/pw_env_setup/py/pw_env_setup/env_setup.py
@@ -174,7 +174,7 @@ class EnvSetup(object):
def __init__(self, pw_root, cipd_cache_dir, shell_file, quiet, install_dir,
virtualenv_root, strict, virtualenv_gn_out_dir, json_file,
project_root, config_file, use_existing_cipd,
- use_pinned_pip_packages):
+ use_pinned_pip_packages, cipd_only, trust_cipd_hash):
self._env = environment.Environment()
self._project_root = project_root
self._pw_root = pw_root
@@ -188,6 +188,8 @@ class EnvSetup(object):
self._virtualenv_root = (virtualenv_root
or os.path.join(install_dir, 'pigweed-venv'))
self._strict = strict
+ self._cipd_only = cipd_only
+ self._trust_cipd_hash = trust_cipd_hash
if os.path.isfile(shell_file):
os.unlink(shell_file)
@@ -206,16 +208,16 @@ class EnvSetup(object):
self._pw_packages = []
self._root_variable = None
+ self._json_file = json_file
+ self._gni_file = None
+
self._config_file_name = getattr(config_file, 'name', 'config file')
+ self._env.set('_PW_ENVIRONMENT_CONFIG_FILE', self._config_file_name)
if config_file:
self._parse_config_file(config_file)
self._check_submodules()
- self._json_file = json_file
- if not self._json_file:
- self._json_file = os.path.join(self._install_dir, 'actions.json')
-
self._use_existing_cipd = use_existing_cipd
self._virtualenv_gn_out_dir = virtualenv_gn_out_dir
@@ -258,6 +260,11 @@ class EnvSetup(object):
self._root_variable = config.pop('root_variable', None)
+ if 'json_file' in config:
+ self._json_file = config.pop('json_file')
+
+ self._gni_file = config.pop('gni_file', None)
+
self._optional_submodules.extend(config.pop('optional_submodules', ()))
self._required_submodules.extend(config.pop('required_submodules', ()))
@@ -351,6 +358,15 @@ class EnvSetup(object):
raise MissingSubmodulesError(', '.join(sorted(missing)))
+ def _write_gni_file(self):
+ gni_file = os.path.join(self._project_root, 'build_overrides',
+ 'pigweed_environment.gni')
+ if self._gni_file:
+ gni_file = os.path.join(self._project_root, self._gni_file)
+
+ with open(gni_file, 'w') as outs:
+ self._env.gni(outs, self._project_root)
+
def _log(self, *args, **kwargs):
# Not using logging module because it's awkward to flush a log handler.
if self._quiet:
@@ -378,6 +394,9 @@ class EnvSetup(object):
if self._is_windows:
steps.append(("Windows scripts", self.win_scripts))
+ if self._cipd_only:
+ steps = [('CIPD package manager', self.cipd)]
+
self._log(
Color.bold('Downloading and installing packages into local '
'source directory:\n'))
@@ -438,6 +457,12 @@ Then use `set +x` to go back to normal.
with open(actions_json, 'w') as outs:
self._env.json(outs)
+ # This file needs to be written after the CIPD step and before the
+ # Python virtualenv step. It also needs to be rewritten after the
+ # Python virtualenv step, so it's easiest to just write it after
+ # every step.
+ self._write_gni_file()
+
self._log('')
self._env.echo('')
@@ -453,6 +478,10 @@ Then use `set +x` to go back to normal.
Color.bold('Environment looks good, you are ready to go!'))
self._env.echo()
+ # Don't write new files if all we did was update CIPD packages.
+ if self._cipd_only:
+ return 0
+
with open(self._shell_file, 'w') as outs:
self._env.write(outs)
@@ -475,9 +504,10 @@ Then use `set +x` to go back to normal.
outs.write(
json.dumps(config, indent=4, separators=(',', ': ')) + '\n')
- if self._json_file is not None:
- with open(self._json_file, 'w') as outs:
- self._env.json(outs)
+ json_file = (self._json_file
+ or os.path.join(self._install_dir, 'actions.json'))
+ with open(json_file, 'w') as outs:
+ self._env.json(outs)
return 0
@@ -513,7 +543,8 @@ Then use `set +x` to go back to normal.
package_files=package_files,
cache_dir=self._cipd_cache_dir,
env_vars=self._env,
- spin=spin):
+ spin=spin,
+ trust_hash=self._trust_cipd_hash):
return result(_Result.Status.FAILED)
return result(_Result.Status.DONE)
@@ -642,6 +673,14 @@ def parse(argv=None):
)
parser.add_argument(
+ '--trust-cipd-hash',
+ action='store_true',
+ help='Only run the cipd executable if the ensure file or command-line '
+ 'has changed. Defaults to false since files could have been deleted '
+ 'from the installation directory and cipd would add them back.',
+ )
+
+ parser.add_argument(
'--shell-file',
help='Where to write the file for shells to source.',
required=True,
@@ -680,12 +719,7 @@ def parse(argv=None):
default=None,
)
- parser.add_argument(
- '--json-file',
- help=('Dump environment variable operations to a JSON file. Default: '
- '<install_dir>/actions.json'),
- default=None,
- )
+ parser.add_argument('--json-file', help=argparse.SUPPRESS, default=None)
parser.add_argument(
'--use-existing-cipd',
@@ -706,6 +740,12 @@ def parse(argv=None):
action='store_false',
)
+ parser.add_argument(
+ '--cipd-only',
+ help='Skip non-CIPD steps.',
+ action='store_true',
+ )
+
args = parser.parse_args(argv)
return args
diff --git a/pw_env_setup/py/pw_env_setup/environment.py b/pw_env_setup/py/pw_env_setup/environment.py
index aada82a28..39a601ad4 100644
--- a/pw_env_setup/py/pw_env_setup/environment.py
+++ b/pw_env_setup/py/pw_env_setup/environment.py
@@ -28,6 +28,7 @@ except ImportError:
from . import apply_visitor
from . import batch_visitor
+from . import gni_visitor
from . import json_visitor
from . import shell_visitor
@@ -429,6 +430,9 @@ class Environment(object):
for action in self._actions:
action.accept(visitor)
+ def gni(self, outs, project_root):
+ gni_visitor.GNIVisitor(project_root).serialize(self, outs)
+
def json(self, outs):
json_visitor.JSONVisitor().serialize(self, outs)
diff --git a/pw_env_setup/py/pw_env_setup/gni_visitor.py b/pw_env_setup/py/pw_env_setup/gni_visitor.py
new file mode 100644
index 000000000..cd60d80ed
--- /dev/null
+++ b/pw_env_setup/py/pw_env_setup/gni_visitor.py
@@ -0,0 +1,100 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Serializes an Environment into a JSON file."""
+
+from __future__ import print_function
+
+import re
+
+# Disable super() warnings since this file must be Python 2 compatible.
+# pylint: disable=super-with-arguments
+
+
+class GNIVisitor(object): # pylint: disable=useless-object-inheritance
+ """Serializes portions of an Environment into a gni file.
+
+ Example gni file:
+
+ declare_args() {
+ dir_cipd_default = "<ENVIRONMENT_DIR>/cipd/packages/default"
+ dir_cipd_pigweed = "<ENVIRONMENT_DIR>/cipd/packages/pigweed"
+ dir_cipd_arm = "<ENVIRONMENT_DIR>/cipd/packages/arm"
+ dir_cipd_python = "<ENVIRONMENT_DIR>/cipd/packages/python"
+ dir_cipd_bazel = "<ENVIRONMENT_DIR>/cipd/packages/bazel"
+ dir_cipd_luci = "<ENVIRONMENT_DIR>/cipd/packages/luci"
+ dir_virtual_env = "<ENVIRONMENT_DIR>/pigweed-venv"
+ }
+ """
+ def __init__(self, project_root, *args, **kwargs):
+ super(GNIVisitor, self).__init__(*args, **kwargs)
+ self._project_root = project_root
+ self._lines = []
+
+ def serialize(self, env, outs):
+ self._lines.append("""
+# This file is automatically generated by Pigweed's environment setup. Do not
+# edit it manually or check it in.
+""".strip())
+
+ self._lines.append('declare_args() {')
+
+ env.accept(self)
+
+ self._lines.append('}')
+
+ for line in self._lines:
+ print(line, file=outs)
+ self._lines = []
+
+ def visit_set(self, set): # pylint: disable=redefined-builtin
+ match = re.search(r'PW_(.*)_CIPD_INSTALL_DIR', set.name)
+ if match:
+ name = 'dir_cipd_{}'.format(match.group(1).lower())
+ self._lines.append(' {} = "{}"'.format(name, set.value))
+
+ if set.name == 'VIRTUAL_ENV':
+ self._lines.append(' dir_virtual_env = "{}"'.format(set.value))
+
+ def visit_clear(self, clear):
+ pass
+
+ def visit_remove(self, remove):
+ pass
+
+ def visit_prepend(self, prepend):
+ pass
+
+ def visit_append(self, append):
+ pass
+
+ def visit_echo(self, echo):
+ pass
+
+ def visit_comment(self, comment):
+ pass
+
+ def visit_command(self, command):
+ pass
+
+ def visit_doctor(self, doctor):
+ pass
+
+ def visit_blank_line(self, blank_line):
+ pass
+
+ def visit_function(self, function):
+ pass
+
+ def visit_hash(self, hash): # pylint: disable=redefined-builtin
+ pass
diff --git a/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint.list b/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint.list
index 71ed6aba4..ef1d395a4 100644
--- a/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint.list
+++ b/pw_env_setup/py/pw_env_setup/virtualenv_setup/constraint.list
@@ -1,11 +1,9 @@
-actdiag==3.0.0
alabaster==0.7.12
appdirs==1.4.4
astroid==2.6.6
Babel==2.9.1
backcall==0.2.0
beautifulsoup4==4.10.0
-blockdiag==3.0.0
build==0.7.0
certifi==2021.10.8
cffi==1.15.0
@@ -15,7 +13,6 @@ coverage==6.3
cryptography==36.0.1
decorator==5.1.1
docutils==0.17.1
-funcparserlib==1.0.0a0
furo==2022.1.2
future==0.18.2
grpcio==1.43.0
@@ -35,14 +32,12 @@ mccabe==0.6.1
mypy==0.910
mypy-extensions==0.4.3
mypy-protobuf==2.9
-nwdiag==3.0.0
packaging==21.3
parameterized==0.8.1
parso==0.8.3
pep517==0.12.0
pexpect==4.8.0
pickleshare==0.7.5
-Pillow==8.2.0
prompt-toolkit==3.0.26
protobuf==3.19.1
psutil==5.9.0
@@ -62,23 +57,18 @@ PyYAML==6.0
requests==2.27.1
robotframework==3.1
scan-build==2.0.19
-seqdiag==3.0.0
six==1.16.0
snowballstemmer==2.2.0
soupsieve==2.3.1
Sphinx==4.3.2
sphinx-design==0.0.13
sphinx-rtd-theme==1.0.0
-sphinxcontrib-actdiag==3.0.0
sphinxcontrib-applehelp==1.0.2
-sphinxcontrib-blockdiag==3.0.0
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-mermaid==0.7.1
-sphinxcontrib-nwdiag==2.0.0
sphinxcontrib-qthelp==1.0.3
-sphinxcontrib-seqdiag==3.0.0
sphinxcontrib-serializinghtml==1.1.5
toml==0.10.2
tomli==2.0.0
@@ -95,6 +85,5 @@ typing_extensions==4.0.1
urllib3==1.26.8
watchdog==2.1.6
wcwidth==0.2.5
-webcolors==1.11.1
wrapt==1.12.1
yapf==0.31.0
diff --git a/pw_env_setup/pypi_common_setup.cfg b/pw_env_setup/pypi_common_setup.cfg
index d4f71c2cd..f4ad8fbda 100644
--- a/pw_env_setup/pypi_common_setup.cfg
+++ b/pw_env_setup/pypi_common_setup.cfg
@@ -13,7 +13,7 @@
# the License.
[metadata]
name = pigweed
-version = 0.0.5
+version = 0.0.7
author = Pigweed Authors
author_email = pigweed-developers@googlegroups.com
description = Pigweed Python modules
diff --git a/pw_env_setup/util.sh b/pw_env_setup/util.sh
index d43c0504c..d0aa2e787 100644
--- a/pw_env_setup/util.sh
+++ b/pw_env_setup/util.sh
@@ -245,7 +245,8 @@ pw_bootstrap() {
_PW_ENV_SETUP_STATUS="$?"
fi
- cp "$PW_ROOT/pw_env_setup/destination.md" "$_PW_ACTUAL_ENVIRONMENT_ROOT/README.md"
+ # Create the environment README file. Use quotes to prevent alias expansion.
+ "cp" "$PW_ROOT/pw_env_setup/destination.md" "$_PW_ACTUAL_ENVIRONMENT_ROOT/README.md"
}
pw_activate() {
diff --git a/pw_file/docs.rst b/pw_file/docs.rst
index 2029cd0f3..c96146619 100644
--- a/pw_file/docs.rst
+++ b/pw_file/docs.rst
@@ -15,7 +15,7 @@ systems, or by virtual file systems that provide a file-system like interface
with no true underlying file system.
pw_file does not define a protocol for file transfers.
-`pw_transfer <module-pw_transfer>`_ provides a generalized mechanism for
+:ref:`module-pw_transfer` provides a generalized mechanism for
performing file transfers, and is recommended to be used in tandem with pw_file.
-----------
diff --git a/pw_file/flat_file_system.cc b/pw_file/flat_file_system.cc
index ea2447895..48aaf6952 100644
--- a/pw_file/flat_file_system.cc
+++ b/pw_file/flat_file_system.cc
@@ -45,11 +45,13 @@ Status FlatFileSystemService::EnumerateFile(
{
pw::file::Path::StreamEncoder encoder = output_encoder.GetPathsEncoder();
- encoder.WritePath(reinterpret_cast<const char*>(file_name_buffer_.data()),
- sws.size());
- encoder.WriteSizeBytes(entry.SizeBytes());
- encoder.WritePermissions(entry.Permissions());
- encoder.WriteFileId(entry.FileId());
+ encoder
+ .WritePath(reinterpret_cast<const char*>(file_name_buffer_.data()),
+ sws.size())
+ .IgnoreError();
+ encoder.WriteSizeBytes(entry.SizeBytes()).IgnoreError();
+ encoder.WritePermissions(entry.Permissions()).IgnoreError();
+ encoder.WriteFileId(entry.FileId()).IgnoreError();
}
return output_encoder.status();
}
@@ -70,11 +72,13 @@ void FlatFileSystemService::EnumerateAllFiles(RawServerWriter& writer) {
Status write_status = writer.Write(encoder);
if (!write_status.ok()) {
- writer.Finish(write_status);
+ writer.Finish(write_status)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
return;
}
}
- writer.Finish(OkStatus());
+ writer.Finish(OkStatus())
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
}
void FlatFileSystemService::List(ConstByteSpan request,
@@ -90,25 +94,29 @@ void FlatFileSystemService::List(ConstByteSpan request,
std::string_view file_name_view;
if (!decoder.ReadString(&file_name_view).ok() ||
file_name_view.length() == 0) {
- writer.Finish(Status::DataLoss());
+ writer.Finish(Status::DataLoss())
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
return;
}
// Find and enumerate the file requested.
Result<Entry*> result = FindFile(file_name_view);
if (!result.ok()) {
- writer.Finish(result.status());
+ writer.Finish(result.status())
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
return;
}
pw::file::ListResponse::MemoryEncoder encoder(encoding_buffer_);
Status proto_encode_status = EnumerateFile(*result.value(), encoder);
if (!proto_encode_status.ok()) {
- writer.Finish(proto_encode_status);
+ writer.Finish(proto_encode_status)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
return;
}
- writer.Finish(writer.Write(encoder));
+ writer.Finish(writer.Write(encoder))
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
return;
}
@@ -116,7 +124,8 @@ void FlatFileSystemService::List(ConstByteSpan request,
EnumerateAllFiles(writer);
}
-StatusWithSize FlatFileSystemService::Delete(ConstByteSpan request, ByteSpan) {
+void FlatFileSystemService::Delete(ConstByteSpan request,
+ rpc::RawUnaryResponder& responder) {
protobuf::Decoder decoder(request);
while (decoder.Next().ok()) {
if (decoder.FieldNumber() !=
@@ -126,11 +135,13 @@ StatusWithSize FlatFileSystemService::Delete(ConstByteSpan request, ByteSpan) {
std::string_view file_name_view;
if (!decoder.ReadString(&file_name_view).ok()) {
- return StatusWithSize(Status::DataLoss(), 0);
+ responder.Finish({}, Status::DataLoss()).IgnoreError();
+ return;
}
- return StatusWithSize(FindAndDeleteFile(file_name_view), 0);
+ responder.Finish({}, FindAndDeleteFile(file_name_view)).IgnoreError();
+ return;
}
- return StatusWithSize(Status::InvalidArgument(), 0);
+ responder.Finish({}, Status::InvalidArgument()).IgnoreError();
}
Result<Entry*> FlatFileSystemService::FindFile(std::string_view file_name) {
diff --git a/pw_file/public/pw_file/flat_file_system.h b/pw_file/public/pw_file/flat_file_system.h
index 4efd98d07..1035dbdba 100644
--- a/pw_file/public/pw_file/flat_file_system.h
+++ b/pw_file/public/pw_file/flat_file_system.h
@@ -107,7 +107,7 @@ class FlatFileSystemService
// Returns:
// OK - File successfully deleted.
// NOT_FOUND - Could not find
- StatusWithSize Delete(ConstByteSpan request, ByteSpan);
+ void Delete(ConstByteSpan request, rpc::RawUnaryResponder& responder);
private:
// Returns the maximum size of a single encoded Path proto.
diff --git a/pw_function/function_test.cc b/pw_function/function_test.cc
index 541a4c7e3..8ba01d93e 100644
--- a/pw_function/function_test.cc
+++ b/pw_function/function_test.cc
@@ -210,6 +210,13 @@ TEST(Function, MoveAssign_Inline) {
#endif // __clang_analyzer__
}
+TEST(Function, MoveAssign_Callable) {
+ Function<int(int, int)> operation = Multiply;
+ EXPECT_EQ(operation(3, 3), 9);
+ operation = [](int a, int b) -> int { return a + b; };
+ EXPECT_EQ(operation(3, 3), 6);
+}
+
class MoveTracker {
public:
MoveTracker() : move_count_(0) {}
diff --git a/pw_function/public/pw_function/function.h b/pw_function/public/pw_function/function.h
index 0550559ae..5fd5fc259 100644
--- a/pw_function/public/pw_function/function.h
+++ b/pw_function/public/pw_function/function.h
@@ -88,7 +88,11 @@ class Function<Return(Args...)> {
template <typename Callable>
Function& operator=(Callable callable) {
holder_.DestructTarget();
- InitializeTarget(std::move(callable));
+ if (function_internal::IsNull(callable)) {
+ holder_.InitializeNullTarget();
+ } else {
+ holder_.InitializeInlineTarget(std::move(callable));
+ }
return *this;
}
diff --git a/pw_fuzzer/fuzzer.bzl b/pw_fuzzer/fuzzer.bzl
index a9df60e0a..2c82feea6 100644
--- a/pw_fuzzer/fuzzer.bzl
+++ b/pw_fuzzer/fuzzer.bzl
@@ -15,7 +15,7 @@
load("@rules_fuzzing//fuzzing:cc_defs.bzl", "cc_fuzz_test")
load(
- "//pw_build:bazel_internal/pigweed_internal.bzl",
+ "//pw_build/bazel_internal:pigweed_internal.bzl",
_add_cc_and_c_targets = "add_cc_and_c_targets",
_has_pw_assert_dep = "has_pw_assert_dep",
)
diff --git a/pw_hdlc/py/pw_hdlc/rpc.py b/pw_hdlc/py/pw_hdlc/rpc.py
index 831861b46..b41275331 100644
--- a/pw_hdlc/py/pw_hdlc/rpc.py
+++ b/pw_hdlc/py/pw_hdlc/rpc.py
@@ -139,7 +139,8 @@ class HdlcRpcClient:
output: Callable[[bytes], Any] = write_to_file,
client_impl: pw_rpc.client.ClientImpl = None,
*,
- _incoming_packet_filter_for_testing: '_PacketFilter' = None):
+ _incoming_packet_filter_for_testing: pw_rpc.
+ ChannelManipulator = None):
"""Creates an RPC client configured to communicate using HDLC.
Args:
@@ -159,10 +160,13 @@ class HdlcRpcClient:
self.client = pw_rpc.Client.from_modules(client_impl, channels,
self.protos.modules())
- self._test_filter = _incoming_packet_filter_for_testing
+ rpc_output: Callable[[bytes], Any] = self._handle_rpc_packet
+ if _incoming_packet_filter_for_testing is not None:
+ _incoming_packet_filter_for_testing.send_packet = rpc_output
+ rpc_output = _incoming_packet_filter_for_testing
frame_handlers: FrameHandlers = {
- DEFAULT_ADDRESS: self._handle_rpc_packet,
+ DEFAULT_ADDRESS: lambda frame: rpc_output(frame.data),
STDOUT_ADDRESS: lambda frame: output(frame.data),
}
@@ -184,15 +188,12 @@ class HdlcRpcClient:
return self.client.channel(channel_id).rpcs
- def _handle_rpc_packet(self, frame: Frame) -> None:
- if self._test_filter and not self._test_filter.keep_packet(frame.data):
- return
+ def _handle_rpc_packet(self, packet: bytes) -> None:
+ if not self.client.process_packet(packet):
+ _LOG.error('Packet not handled by RPC client: %s', packet)
- if not self.client.process_packet(frame.data):
- _LOG.error('Packet not handled by RPC client: %s', frame.data)
-
-def _try_connect(sock: socket.socket, port: int, attempts: int = 10) -> None:
+def _try_connect(port: int, attempts: int = 10) -> socket.socket:
"""Tries to connect to the specified port up to the given number of times.
This is helpful when connecting to a process that was started by this
@@ -205,9 +206,11 @@ def _try_connect(sock: socket.socket, port: int, attempts: int = 10) -> None:
time.sleep(0.001)
try:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', port))
- return
+ return sock
except ConnectionRefusedError:
+ sock.close()
if attempts <= 0:
raise
@@ -218,21 +221,13 @@ class SocketSubprocess:
self._server_process = subprocess.Popen(command, stdin=subprocess.PIPE)
self.stdin = self._server_process.stdin
- sock = None
-
try:
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- _try_connect(sock, port)
+ self.socket: socket.socket = _try_connect(port) # 🧦
except:
- if sock:
- sock.close()
-
self._server_process.terminate()
self._server_process.communicate()
raise
- self.socket: socket.socket = sock # 🧦
-
def close(self) -> None:
try:
self.socket.close()
@@ -247,16 +242,21 @@ class SocketSubprocess:
self.close()
-class _PacketFilter:
+class PacketFilter(pw_rpc.ChannelManipulator):
"""Determines if a packet should be kept or dropped for testing purposes."""
_Action = Callable[[int], Tuple[bool, bool]]
_KEEP = lambda _: (True, False)
_DROP = lambda _: (False, False)
def __init__(self, name: str) -> None:
+ super().__init__()
self.name = name
self.packet_count = 0
- self._actions: Deque[_PacketFilter._Action] = collections.deque()
+ self._actions: Deque[PacketFilter._Action] = collections.deque()
+
+ def process_and_send(self, packet: bytes):
+ if self.keep_packet(packet):
+ self.send_packet(packet)
def reset(self) -> None:
self.packet_count = 0
@@ -264,11 +264,11 @@ class _PacketFilter:
def keep(self, count: int) -> None:
"""Keeps the next count packets."""
- self._actions.extend(_PacketFilter._KEEP for _ in range(count))
+ self._actions.extend(PacketFilter._KEEP for _ in range(count))
def drop(self, count: int) -> None:
"""Drops the next count packets."""
- self._actions.extend(_PacketFilter._DROP for _ in range(count))
+ self._actions.extend(PacketFilter._DROP for _ in range(count))
def drop_every(self, every: int) -> None:
"""Drops every Nth packet forever."""
@@ -296,33 +296,21 @@ class _PacketFilter:
return keep
-class _TestChannelOutput:
- def __init__(self, send: Callable[[bytes], Any]) -> None:
- self._send = send
- self.packets = _PacketFilter('outgoing RPC')
-
- def __call__(self, data: bytes) -> None:
- if self.packets.keep_packet(data):
- self._send(data)
-
-
class HdlcRpcLocalServerAndClient:
"""Runs an RPC server in a subprocess and connects to it over a socket.
This can be used to run a local RPC server in an integration test.
"""
- def __init__(self,
- server_command: Sequence,
- port: int,
- protos: PathsModulesOrProtoLibrary,
- *,
- for_testing: bool = False) -> None:
- """Creates a new HdlcRpcLocalServerAndClient.
-
- If for_testing=True, the HdlcRpcLocalServerAndClient will have
- outgoing_packets and incoming_packets _PacketFilter members that can be
- used to program packet loss for testing purposes.
- """
+ def __init__(
+ self,
+ server_command: Sequence,
+ port: int,
+ protos: PathsModulesOrProtoLibrary,
+ *,
+ incoming_processor: Optional[pw_rpc.ChannelManipulator] = None,
+ outgoing_processor: Optional[pw_rpc.ChannelManipulator] = None
+ ) -> None:
+ """Creates a new HdlcRpcLocalServerAndClient."""
self.server = SocketSubprocess(server_command, port)
@@ -333,20 +321,18 @@ class HdlcRpcLocalServerAndClient:
self.output = io.BytesIO()
self.channel_output: Any = self.server.socket.sendall
- if for_testing:
- self.channel_output = _TestChannelOutput(self.channel_output)
- self.outgoing_packets = self.channel_output.packets
- self.incoming_packets = _PacketFilter('incoming RPC')
- incoming_filter: Optional[_PacketFilter] = self.incoming_packets
- else:
- incoming_filter = None
+
+ self._incoming_processor = incoming_processor
+ if outgoing_processor is not None:
+ outgoing_processor.send_packet = self.channel_output
+ self.channel_output = outgoing_processor
self.client = HdlcRpcClient(
self._bytes_queue.get,
protos,
default_channels(self.channel_output),
self.output.write,
- _incoming_packet_filter_for_testing=incoming_filter).client
+ _incoming_packet_filter_for_testing=incoming_processor).client
def _read_from_socket(self):
while True:
diff --git a/pw_hdlc/py/pw_hdlc/rpc_console.py b/pw_hdlc/py/pw_hdlc/rpc_console.py
index 27d402880..38bc6e0f5 100644
--- a/pw_hdlc/py/pw_hdlc/rpc_console.py
+++ b/pw_hdlc/py/pw_hdlc/rpc_console.py
@@ -256,7 +256,11 @@ def console(device: str,
serial_impl = SerialWithLogging
if socket_addr is None:
- serial_device = serial_impl(device, baudrate, timeout=1)
+ serial_device = serial_impl(
+ device,
+ baudrate,
+ timeout=0, # Non-blocking mode
+ )
read = lambda: serial_device.read(8192)
write = serial_device.write
else:
diff --git a/pw_hdlc/rpc_example/docs.rst b/pw_hdlc/rpc_example/docs.rst
index 8021f0a27..0db8bfa5e 100644
--- a/pw_hdlc/rpc_example/docs.rst
+++ b/pw_hdlc/rpc_example/docs.rst
@@ -129,6 +129,6 @@ Run pw_rpc server
.. code-block:: sh
- out/host_clang_debug/obj/pw_hdlc/rpc_example/bin/rpc_example
+ out/pw_strict_host_clang_debug/obj/pw_hdlc/rpc_example/bin/rpc_example
Then you can invoke RPCs from the interactive console on the client side.
diff --git a/pw_hdlc/rpc_example/hdlc_rpc_server.cc b/pw_hdlc/rpc_example/hdlc_rpc_server.cc
index ef5efb369..b878e64b7 100644
--- a/pw_hdlc/rpc_example/hdlc_rpc_server.cc
+++ b/pw_hdlc/rpc_example/hdlc_rpc_server.cc
@@ -16,6 +16,7 @@
#include <span>
#include <string_view>
+#include "pw_assert/check.h"
#include "pw_hdlc/encoder.h"
#include "pw_hdlc/rpc_packets.h"
#include "pw_log/log.h"
@@ -41,7 +42,7 @@ void Start() {
RegisterServices();
PW_LOG_INFO("Starting pw_rpc server");
- pw::rpc::system_server::Start();
+ PW_CHECK_OK(pw::rpc::system_server::Start());
}
} // namespace hdlc_example
diff --git a/pw_i2c_mcuxpresso/BUILD.bazel b/pw_i2c_mcuxpresso/BUILD.bazel
new file mode 100644
index 000000000..baaa7f147
--- /dev/null
+++ b/pw_i2c_mcuxpresso/BUILD.bazel
@@ -0,0 +1,36 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+load(
+ "//pw_build:pigweed.bzl",
+ "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+pw_cc_library(
+ name = "pw_i2c_mcuxpresso",
+ srcs = ["initiator.cc"],
+ hdrs = ["public/pw_i2c_mcuxpresso/initiator.h"],
+ deps = [
+ "//pw_chrono:system_clock",
+ "//pw_i2c:initiator",
+ "//pw_status",
+ "//pw_sync:interrupt_spin_lock",
+ "//pw_sync:lock_annotations",
+ "//pw_sync:mutex",
+ "//pw_sync:timed_thread_notification",
+ ],
+)
diff --git a/pw_i2c_mcuxpresso/BUILD.gn b/pw_i2c_mcuxpresso/BUILD.gn
new file mode 100644
index 000000000..b3b62b401
--- /dev/null
+++ b/pw_i2c_mcuxpresso/BUILD.gn
@@ -0,0 +1,44 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_third_party/mcuxpresso/mcuxpresso.gni")
+
+config("default_config") {
+ include_dirs = [ "public" ]
+}
+
+if (pw_third_party_mcuxpresso_SDK != "") {
+ pw_source_set("pw_i2c_mcuxpresso") {
+ public_configs = [ ":default_config" ]
+ public = [ "public/pw_i2c_mcuxpresso/initiator.h" ]
+ public_deps = [
+ "$dir_pw_chrono:system_clock",
+ "$dir_pw_i2c:initiator",
+ "$dir_pw_status",
+ "$dir_pw_sync:interrupt_spin_lock",
+ "$dir_pw_sync:lock_annotations",
+ "$dir_pw_sync:mutex",
+ "$dir_pw_sync:timed_thread_notification",
+ "$pw_third_party_mcuxpresso_SDK",
+ ]
+ sources = [ "initiator.cc" ]
+ }
+}
+
+pw_doc_group("docs") {
+ sources = [ "docs.rst" ]
+}
diff --git a/pw_i2c_mcuxpresso/OWNERS b/pw_i2c_mcuxpresso/OWNERS
new file mode 100644
index 000000000..504a930e5
--- /dev/null
+++ b/pw_i2c_mcuxpresso/OWNERS
@@ -0,0 +1,2 @@
+ewout@google.com
+swatiwagh@google.com \ No newline at end of file
diff --git a/pw_i2c_mcuxpresso/docs.rst b/pw_i2c_mcuxpresso/docs.rst
new file mode 100644
index 000000000..449f6e266
--- /dev/null
+++ b/pw_i2c_mcuxpresso/docs.rst
@@ -0,0 +1,26 @@
+.. _module-pw_i2c_mcuxpresso:
+
+-----------------
+pw_i2c_mcuxpresso
+-----------------
+
+``pw_i2c_mcuxpresso`` implements the ``pw_i2c`` interface using the
+NXP MCUXpresso SDK.
+
+The implementation is based on the i2c driver in SDK. I2C transfers use
+non-blocking driver API.
+
+Setup
+=====
+
+This module requires following setup:
+
+ 1. Use ``pw_build_mcuxpresso`` to create a ``pw_source_set`` for an
+ MCUXpresso SDK.
+ 2. Include the i2c driver component in this SDK definition.
+ 3. Specify the ``pw_third_party_mcuxpresso_SDK`` GN global variable to specify
+ the name of this source set.
+ 4. Use ``pw::i2c::McuxpressoInitiator`` implementation of
+ ``pw::i2c::Initiator`` while creating ``pw::i2c::Device`` or
+ ``pw::i2c::RegisterDevice`` interface to access the I2C devices connected to
+ target. \ No newline at end of file
diff --git a/pw_i2c_mcuxpresso/initiator.cc b/pw_i2c_mcuxpresso/initiator.cc
new file mode 100644
index 000000000..55b5717a6
--- /dev/null
+++ b/pw_i2c_mcuxpresso/initiator.cc
@@ -0,0 +1,156 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#include "pw_i2c_mcuxpresso/initiator.h"
+
+#include <mutex>
+
+#include "fsl_i2c.h"
+#include "pw_chrono/system_clock.h"
+#include "pw_status/status.h"
+#include "pw_status/try.h"
+
+namespace pw::i2c {
+namespace {
+
+Status HalStatusToPwStatus(status_t status) {
+ switch (status) {
+ case kStatus_Success:
+ return OkStatus();
+ case kStatus_I2C_Nak:
+ case kStatus_I2C_Addr_Nak:
+ return Status::Unavailable();
+ case kStatus_I2C_InvalidParameter:
+ return Status::InvalidArgument();
+ case kStatus_I2C_Timeout:
+ return Status::DeadlineExceeded();
+ default:
+ return Status::Unknown();
+ }
+}
+} // namespace
+
+// inclusive-language: disable
+McuxpressoInitiator::McuxpressoInitiator(I2C_Type* base,
+ uint32_t baud_rate_bps,
+ uint32_t src_clock_hz)
+ : base_(base) {
+ i2c_master_config_t master_config;
+ I2C_MasterGetDefaultConfig(&master_config);
+ master_config.baudRate_Bps = baud_rate_bps;
+ I2C_MasterInit(base_, &master_config, src_clock_hz);
+
+ // Create the handle for the non-blocking transfer and register callback.
+ I2C_MasterTransferCreateHandle(
+ base_, &handle_, McuxpressoInitiator::TransferCompleteCallback, this);
+}
+
+McuxpressoInitiator::~McuxpressoInitiator() { I2C_MasterDeinit(base_); }
+
+void McuxpressoInitiator::TransferCompleteCallback(I2C_Type* base,
+ i2c_master_handle_t* handle,
+ status_t status,
+ void* initiator_ptr) {
+ McuxpressoInitiator& initiator =
+ *static_cast<McuxpressoInitiator*>(initiator_ptr);
+ initiator.callback_isl_.lock();
+ initiator.transfer_status_ = status;
+ initiator.callback_isl_.unlock();
+ initiator.callback_complete_notification_.release();
+}
+
+Status McuxpressoInitiator::InitiateNonBlockingTransfer(
+ chrono::SystemClock::duration rw_timeout, i2c_master_transfer_t* transfer) {
+ const status_t status =
+ I2C_MasterTransferNonBlocking(base_, &handle_, transfer);
+ if (status != kStatus_Success) {
+ return HalStatusToPwStatus(status);
+ }
+
+ if (!callback_complete_notification_.try_acquire_for(rw_timeout)) {
+ I2C_MasterTransferAbort(base_, &handle_);
+ return Status::DeadlineExceeded();
+ }
+
+ callback_isl_.lock();
+ const status_t transfer_status = transfer_status_;
+ callback_isl_.unlock();
+
+ return HalStatusToPwStatus(transfer_status);
+}
+
+// Performs non-blocking I2C write, read and read-after-write depending on the
+// tx and rx buffer states.
+Status McuxpressoInitiator::DoWriteReadFor(
+ Address device_address,
+ ConstByteSpan tx_buffer,
+ ByteSpan rx_buffer,
+ chrono::SystemClock::duration timeout) {
+ if (timeout <= chrono::SystemClock::duration::zero()) {
+ return Status::DeadlineExceeded();
+ }
+
+ const uint8_t address = device_address.GetSevenBit();
+ std::lock_guard lock(mutex_);
+
+ if (!tx_buffer.empty() && rx_buffer.empty()) {
+ i2c_master_transfer_t transfer{kI2C_TransferDefaultFlag,
+ address,
+ kI2C_Write,
+ 0,
+ 0,
+ const_cast<std::byte*>(tx_buffer.data()),
+ tx_buffer.size()};
+ return InitiateNonBlockingTransfer(timeout, &transfer);
+ } else if (tx_buffer.empty() && !rx_buffer.empty()) {
+ i2c_master_transfer_t transfer{kI2C_TransferDefaultFlag,
+ address,
+ kI2C_Read,
+ 0,
+ 0,
+ rx_buffer.data(),
+ rx_buffer.size()};
+ return InitiateNonBlockingTransfer(timeout, &transfer);
+ } else if (!tx_buffer.empty() && !rx_buffer.empty()) {
+ i2c_master_transfer_t w_transfer{kI2C_TransferNoStopFlag,
+ address,
+ kI2C_Write,
+ 0,
+ 0,
+ const_cast<std::byte*>(tx_buffer.data()),
+ tx_buffer.size()};
+ const chrono::SystemClock::time_point deadline =
+ chrono::SystemClock::TimePointAfterAtLeast(timeout);
+ PW_TRY(InitiateNonBlockingTransfer(timeout, &w_transfer));
+ i2c_master_transfer_t r_transfer{kI2C_TransferRepeatedStartFlag,
+ address,
+ kI2C_Read,
+ 0,
+ 0,
+ rx_buffer.data(),
+ rx_buffer.size()};
+ const chrono::SystemClock::duration time_remaining =
+ deadline - chrono::SystemClock::now();
+ if (time_remaining <= chrono::SystemClock::duration::zero()) {
+ // Abort transfer in an unlikely scenario of timeout even with
+ // successful write.
+ I2C_MasterTransferAbort(base_, &handle_);
+ return Status::DeadlineExceeded();
+ }
+ return InitiateNonBlockingTransfer(time_remaining, &r_transfer);
+ } else {
+ return Status::InvalidArgument();
+ }
+}
+// inclusive-language: enable
+} // namespace pw::i2c
diff --git a/pw_i2c_mcuxpresso/public/pw_i2c_mcuxpresso/initiator.h b/pw_i2c_mcuxpresso/public/pw_i2c_mcuxpresso/initiator.h
new file mode 100644
index 000000000..ab32beb90
--- /dev/null
+++ b/pw_i2c_mcuxpresso/public/pw_i2c_mcuxpresso/initiator.h
@@ -0,0 +1,68 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "fsl_i2c.h"
+#include "pw_i2c/initiator.h"
+#include "pw_sync/interrupt_spin_lock.h"
+#include "pw_sync/lock_annotations.h"
+#include "pw_sync/mutex.h"
+#include "pw_sync/timed_thread_notification.h"
+
+namespace pw::i2c {
+
+// Initiator interface implementation based on I2C driver in NXP MCUXpresso SDK.
+// Currently supports only devices with 7 bit adresses.
+class McuxpressoInitiator final : public Initiator {
+ public:
+ McuxpressoInitiator(I2C_Type* base,
+ uint32_t baud_rate_bps,
+ uint32_t src_clock_hz);
+
+ ~McuxpressoInitiator();
+
+ private:
+ Status DoWriteReadFor(Address device_address,
+ ConstByteSpan tx_buffer,
+ ByteSpan rx_buffer,
+ chrono::SystemClock::duration timeout) override
+ PW_LOCKS_EXCLUDED(mutex_);
+
+ // inclusive-language: disable
+ Status InitiateNonBlockingTransfer(chrono::SystemClock::duration rw_timeout,
+ i2c_master_transfer_t* transfer)
+ PW_LOCKS_EXCLUDED(callback_isl_);
+
+ // Non-blocking I2C transfer callback.
+ static void TransferCompleteCallback(I2C_Type* base,
+ i2c_master_handle_t* handle,
+ status_t status,
+ void* initiator_ptr)
+ PW_GUARDED_BY(callback_isl_);
+ // inclusive-language: enable
+
+ sync::Mutex mutex_;
+ I2C_Type* base_ PW_GUARDED_BY(mutex_);
+
+ // Transfer completion status for non-blocking I2C transfer.
+ sync::TimedThreadNotification callback_complete_notification_;
+ sync::InterruptSpinLock callback_isl_;
+ status_t transfer_status_ PW_GUARDED_BY(callback_isl_);
+
+ // inclusive-language: disable
+ i2c_master_handle_t handle_;
+ // inclusive-language: enable
+};
+
+} // namespace pw::i2c \ No newline at end of file
diff --git a/pw_interrupt/BUILD.gn b/pw_interrupt/BUILD.gn
index fb3581e6c..37604bbe3 100644
--- a/pw_interrupt/BUILD.gn
+++ b/pw_interrupt/BUILD.gn
@@ -17,11 +17,7 @@ import("//build_overrides/pigweed.gni")
import("$dir_pw_build/facade.gni")
import("$dir_pw_docgen/docs.gni")
import("$dir_pw_unit_test/test.gni")
-
-declare_args() {
- # Backend for the pw_interrupt module.
- pw_interrupt_CONTEXT_BACKEND = ""
-}
+import("backend.gni")
config("public_include_path") {
include_dirs = [ "public" ]
diff --git a/pw_interrupt/backend.gni b/pw_interrupt/backend.gni
new file mode 100644
index 000000000..535648a2d
--- /dev/null
+++ b/pw_interrupt/backend.gni
@@ -0,0 +1,18 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+declare_args() {
+ # Backend for the pw_interrupt module.
+ pw_interrupt_CONTEXT_BACKEND = ""
+}
diff --git a/pw_kvs/BUILD.bazel b/pw_kvs/BUILD.bazel
index e36bcdada..41359d7b8 100644
--- a/pw_kvs/BUILD.bazel
+++ b/pw_kvs/BUILD.bazel
@@ -59,8 +59,10 @@ pw_cc_library(
"//pw_containers",
"//pw_log",
"//pw_log:facade",
+ "//pw_polyfill",
"//pw_span",
"//pw_status",
+ "//pw_stream",
],
)
@@ -262,6 +264,18 @@ pw_cc_test(
)
pw_cc_test(
+ name = "flash_partition_stream_test",
+ srcs = ["flash_partition_stream_test.cc"],
+ deps = [
+ ":fake_flash",
+ ":pw_kvs",
+ "//pw_log",
+ "//pw_random",
+ "//pw_unit_test",
+ ],
+)
+
+pw_cc_test(
name = "flash_partition_1_alignment_test",
srcs = ["flash_partition_test.cc"],
defines = [
diff --git a/pw_kvs/BUILD.gn b/pw_kvs/BUILD.gn
index 3f1a7618f..10b054532 100644
--- a/pw_kvs/BUILD.gn
+++ b/pw_kvs/BUILD.gn
@@ -18,6 +18,7 @@ import("$dir_pw_bloat/bloat.gni")
import("$dir_pw_build/module_config.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_toolchain/generate_toolchain.gni")
import("$dir_pw_unit_test/test.gni")
declare_args() {
@@ -65,6 +66,7 @@ pw_source_set("pw_kvs") {
dir_pw_bytes,
dir_pw_containers,
dir_pw_status,
+ dir_pw_stream,
dir_pw_string,
]
deps = [
@@ -208,6 +210,30 @@ pw_source_set("fake_flash_test_key_value_store") {
]
}
+pw_source_set("flash_partition_stream_test") {
+ public_configs = [ ":public_include_path" ]
+ public = [ "public/pw_kvs/flash_memory.h" ]
+ sources = [ "flash_partition_stream_test.cc" ]
+ public_deps = [
+ "$dir_pw_sync:borrow",
+ dir_pw_bytes,
+ dir_pw_kvs,
+ dir_pw_polyfill,
+ dir_pw_preprocessor,
+ dir_pw_status,
+ dir_pw_stream,
+ ]
+ deps = [
+ ":config",
+ ":fake_flash",
+ ":flash_test_partition",
+ dir_pw_kvs,
+ dir_pw_log,
+ dir_pw_random,
+ dir_pw_unit_test,
+ ]
+}
+
pw_source_set("flash_partition_test_100_iterations") {
deps = [
":config",
@@ -322,28 +348,41 @@ pw_test_group("tests") {
":alignment_test",
":checksum_test",
":converts_to_span_test",
- ":entry_test",
- ":entry_cache_test",
- ":flash_partition_1_alignment_test",
- ":flash_partition_16_alignment_test",
- ":flash_partition_64_alignment_test",
- ":flash_partition_256_alignment_test",
- ":flash_partition_256_write_size_test",
- ":key_value_store_test",
- ":key_value_store_1_alignment_flash_test",
- ":key_value_store_16_alignment_flash_test",
- ":key_value_store_64_alignment_flash_test",
- ":key_value_store_256_alignment_flash_test",
- ":key_value_store_fuzz_1_alignment_flash_test",
- ":key_value_store_fuzz_64_alignment_flash_test",
- ":key_value_store_binary_format_test",
- ":key_value_store_put_test",
- ":key_value_store_map_test",
- ":fake_flash_test_key_value_store_test",
- ":sectors_test",
":key_test",
- ":key_value_store_wear_test",
]
+
+ if (defined(pw_toolchain_SCOPE.is_host_toolchain) &&
+ pw_toolchain_SCOPE.is_host_toolchain) {
+ # TODO(pwbug/196): KVS tests are not compatible with device builds as they
+ # use features such as std::map and are computationally expensive. Solving
+ # this requires a more complex capabilities-based build and configuration
+ # system which allowing enabling specific tests for targets that support
+ # them and modifying test parameters for different targets.
+
+ tests += [
+ ":entry_test",
+ ":entry_cache_test",
+ ":flash_partition_1_stream_test",
+ ":flash_partition_1_alignment_test",
+ ":flash_partition_16_alignment_test",
+ ":flash_partition_64_alignment_test",
+ ":flash_partition_256_alignment_test",
+ ":flash_partition_256_write_size_test",
+ ":key_value_store_test",
+ ":key_value_store_1_alignment_flash_test",
+ ":key_value_store_16_alignment_flash_test",
+ ":key_value_store_64_alignment_flash_test",
+ ":key_value_store_256_alignment_flash_test",
+ ":key_value_store_fuzz_1_alignment_flash_test",
+ ":key_value_store_fuzz_64_alignment_flash_test",
+ ":key_value_store_binary_format_test",
+ ":key_value_store_put_test",
+ ":key_value_store_map_test",
+ ":key_value_store_wear_test",
+ ":fake_flash_test_key_value_store_test",
+ ":sectors_test",
+ ]
+ }
}
pw_test("alignment_test") {
@@ -384,6 +423,15 @@ pw_test("entry_cache_test") {
sources = [ "entry_cache_test.cc" ]
}
+pw_test("flash_partition_1_stream_test") {
+ deps = [
+ ":fake_flash",
+ ":fake_flash_1_aligned_partition",
+ ":flash_partition_stream_test",
+ dir_pw_log,
+ ]
+}
+
pw_test("flash_partition_1_alignment_test") {
deps = [
":fake_flash",
diff --git a/pw_kvs/CMakeLists.txt b/pw_kvs/CMakeLists.txt
index 9b2775fa6..8e20e5b13 100644
--- a/pw_kvs/CMakeLists.txt
+++ b/pw_kvs/CMakeLists.txt
@@ -27,6 +27,8 @@ pw_auto_add_simple_module(pw_kvs
pw_checksum
${pw_kvs_CONFIG}
pw_log
+ pw_random
+ pw_stream
pw_string
)
diff --git a/pw_kvs/docs.rst b/pw_kvs/docs.rst
index a8f56bc66..fd2199654 100644
--- a/pw_kvs/docs.rst
+++ b/pw_kvs/docs.rst
@@ -85,6 +85,8 @@ they are part of. Partition logical sectors may be smaller due to partition
overhead (encryption, wear tracking, etc) or larger due to combining raw
sectors into larger logical sectors.
+FlashPartition supports access via NonSeekableWriter and SeekableReader.
+
Size report
-----------
The following size report showcases the memory usage of the KVS and
diff --git a/pw_kvs/flash_memory.cc b/pw_kvs/flash_memory.cc
index 4f28e07e8..217ad16ef 100644
--- a/pw_kvs/flash_memory.cc
+++ b/pw_kvs/flash_memory.cc
@@ -31,6 +31,44 @@ namespace pw::kvs {
using std::byte;
+#if PW_CXX_STANDARD_IS_SUPPORTED(17)
+
+Status FlashPartition::Writer::DoWrite(ConstByteSpan data) {
+ if (partition_.size_bytes() <= position_) {
+ return Status::OutOfRange();
+ }
+ if (data.size_bytes() > (partition_.size_bytes() - position_)) {
+ return Status::ResourceExhausted();
+ }
+ if (data.size_bytes() == 0) {
+ return OkStatus();
+ }
+
+ const StatusWithSize sws = partition_.Write(position_, data);
+ if (sws.ok()) {
+ position_ += data.size_bytes();
+ }
+ return sws.status();
+}
+
+StatusWithSize FlashPartition::Reader::DoRead(ByteSpan data) {
+ if (position_ >= partition_.size_bytes()) {
+ return StatusWithSize::OutOfRange();
+ }
+
+ size_t bytes_to_read =
+ std::min(data.size_bytes(), partition_.size_bytes() - position_);
+
+ const StatusWithSize sws =
+ partition_.Read(position_, data.first(bytes_to_read));
+ if (sws.ok()) {
+ position_ += bytes_to_read;
+ }
+ return sws;
+}
+
+#endif // PW_CXX_STANDARD_IS_SUPPORTED(17)
+
StatusWithSize FlashPartition::Output::DoWrite(std::span<const byte> data) {
PW_TRY_WITH_SIZE(flash_.Write(address_, data));
address_ += data.size();
diff --git a/pw_kvs/flash_partition_stream_test.cc b/pw_kvs/flash_partition_stream_test.cc
new file mode 100644
index 000000000..47fd974db
--- /dev/null
+++ b/pw_kvs/flash_partition_stream_test.cc
@@ -0,0 +1,396 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <algorithm>
+#include <array>
+#include <cstddef>
+#include <cstring>
+#include <span>
+
+#include "gtest/gtest.h"
+#include "public/pw_kvs/flash_memory.h"
+#include "pw_kvs/fake_flash_memory.h"
+#include "pw_kvs/flash_memory.h"
+#include "pw_kvs_private/config.h"
+#include "pw_log/log.h"
+#include "pw_random/xor_shift.h"
+
+#if PW_CXX_STANDARD_IS_SUPPORTED(17)
+
+#ifndef PW_FLASH_TEST_ALIGNMENT
+#define PW_FLASH_TEST_ALIGNMENT 1
+#endif
+
+namespace pw::kvs {
+namespace {
+
+class FlashStreamTest : public ::testing::Test {
+ protected:
+ FlashStreamTest() : flash_(kFlashAlignment), partition_(&flash_) {}
+
+ void InitBufferToFill(ByteSpan buffer_span, char fill) {
+ std::memset(buffer_span.data(), fill, buffer_span.size_bytes());
+ }
+
+ void InitBufferToRandom(ByteSpan buffer_span, uint64_t seed) {
+ random::XorShiftStarRng64 rng(seed);
+
+ std::memset(buffer_span.data(),
+ static_cast<int>(flash_.erased_memory_content()),
+ buffer_span.size());
+ ASSERT_EQ(OkStatus(), rng.Get(buffer_span).status());
+ }
+
+ void VerifyFlash(ConstByteSpan verify_bytes, size_t offset = 0) {
+ // Should be defined as same size.
+ EXPECT_EQ(source_buffer_.size(), flash_.buffer().size_bytes());
+
+ // Can't allow it to march off the end of source_buffer_.
+ ASSERT_LE((verify_bytes.size_bytes() + offset), source_buffer_.size());
+
+ for (size_t i = 0; i < verify_bytes.size_bytes(); i++) {
+ ASSERT_EQ(source_buffer_[i + offset], verify_bytes[i]);
+ }
+ }
+
+ void VerifyFlashContent(ConstByteSpan verify_bytes, size_t offset = 0) {
+ // Can't allow it to march off the end of source_buffer_.
+ ASSERT_LE((verify_bytes.size_bytes() + offset),
+ flash_.buffer().size_bytes());
+
+ for (size_t i = 0; i < verify_bytes.size_bytes(); i++) {
+ ASSERT_EQ(flash_.buffer()[i + offset], verify_bytes[i]);
+ }
+ }
+
+ void DoWriteInChunks(size_t chunk_write_size_bytes, uint64_t seed) {
+ InitBufferToRandom(std::span(source_buffer_), seed);
+ ConstByteSpan write_data = std::span(source_buffer_);
+
+ ASSERT_EQ(OkStatus(), partition_.Erase());
+
+ FlashPartition::Writer writer(partition_);
+
+ while (write_data.size_bytes() > 0) {
+ size_t offset_before_write = writer.Tell();
+ size_t write_chunk_size =
+ std::min(chunk_write_size_bytes, write_data.size_bytes());
+
+ ConstByteSpan write_chunk = write_data.first(write_chunk_size);
+ ASSERT_EQ(OkStatus(), writer.Write(write_chunk));
+ VerifyFlashContent(write_chunk, offset_before_write);
+
+ write_data = write_data.subspan(write_chunk_size);
+
+ ASSERT_EQ(writer.ConservativeWriteLimit(), write_data.size_bytes());
+ }
+
+ VerifyFlashContent(std::span(source_buffer_));
+ }
+
+ void DoReadInChunks(size_t chunk_read_size_bytes,
+ uint64_t seed,
+ size_t start_offset,
+ size_t bytes_to_read) {
+ InitBufferToRandom(flash_.buffer(), seed);
+
+ ASSERT_LE((start_offset + bytes_to_read), flash_.buffer().size_bytes());
+
+ FlashPartition::Reader reader(partition_);
+ ASSERT_EQ(reader.ConservativeReadLimit(), flash_.buffer().size_bytes());
+
+ ASSERT_EQ(reader.Seek(start_offset), OkStatus());
+ ASSERT_EQ(reader.ConservativeReadLimit(),
+ flash_.buffer().size_bytes() - start_offset);
+
+ while (bytes_to_read > 0) {
+ ASSERT_EQ(start_offset, reader.Tell());
+
+ size_t chunk_size = std::min(chunk_read_size_bytes, bytes_to_read);
+
+ ByteSpan read_chunk = std::span(source_buffer_).first(chunk_size);
+ InitBufferToFill(read_chunk, 0);
+ ASSERT_EQ(read_chunk.size_bytes(), chunk_size);
+
+ auto result = reader.Read(read_chunk);
+ ASSERT_EQ(result.status(), OkStatus());
+ ASSERT_EQ(result.value().size_bytes(), chunk_size);
+ VerifyFlashContent(read_chunk, start_offset);
+
+ start_offset += chunk_size;
+ bytes_to_read -= chunk_size;
+
+ ASSERT_EQ(reader.ConservativeReadLimit(),
+ flash_.buffer().size_bytes() - start_offset);
+ }
+ }
+
+ static constexpr size_t kFlashAlignment = PW_FLASH_TEST_ALIGNMENT;
+ static constexpr size_t kSectorSize = 2048;
+ static constexpr size_t kSectorCount = 2;
+ static constexpr size_t kFPDataSize = (kSectorCount * kSectorSize);
+
+ FakeFlashMemoryBuffer<kSectorSize, kSectorCount> flash_;
+ FlashPartition partition_;
+ std::array<std::byte, kFPDataSize> source_buffer_;
+ size_t size_bytes_;
+};
+
+TEST_F(FlashStreamTest, Write_1_Byte_Chunks) {
+ // Write in 1 byte chunks.
+ DoWriteInChunks(1, 0xab1234);
+}
+
+TEST_F(FlashStreamTest, Write_5_Byte_Chunks) {
+ // Write in 5 byte chunks.
+ DoWriteInChunks(5, 0xdc2274);
+}
+
+TEST_F(FlashStreamTest, Write_16_Byte_Chunks) {
+ // Write in 16 byte chunks.
+ DoWriteInChunks(16, 0xef8224);
+}
+
+TEST_F(FlashStreamTest, Write_255_Byte_Chunks) {
+ // Write in 255 byte chunks.
+ DoWriteInChunks(255, 0xffe1348);
+}
+
+TEST_F(FlashStreamTest, Write_256_Byte_Chunks) {
+ // Write in 256 byte chunks.
+ DoWriteInChunks(256, 0xe11234);
+}
+
+TEST_F(FlashStreamTest, Read_1_Byte_Chunks) {
+ // Read in 1 byte chunks.
+ DoReadInChunks(1, 0x7643ff, 0, flash_.buffer().size_bytes());
+}
+
+TEST_F(FlashStreamTest, Read_16_Byte_Chunks) {
+ // Read in 16 byte chunks.
+ DoReadInChunks(16, 0x61e234, 0, flash_.buffer().size_bytes());
+}
+
+TEST_F(FlashStreamTest, Read_255_Byte_Chunks) {
+ // Read in 256 byte chunks.
+ DoReadInChunks(255, 0xe13514, 0, flash_.buffer().size_bytes());
+}
+
+TEST_F(FlashStreamTest, Read_256_Byte_Chunks) {
+ // Read in 256 byte chunks.
+ DoReadInChunks(256, 0xe11234, 0, flash_.buffer().size_bytes());
+}
+
+TEST_F(FlashStreamTest, Read_256_Byte_Chunks_With_Offset) {
+ // Read in 256 byte chunks.
+ DoReadInChunks(256, 0xfffe34, 1024, (flash_.buffer().size_bytes() - 1024));
+}
+
+TEST_F(FlashStreamTest, Read_Multiple_Seeks) {
+ static const size_t kSeekReadSizeBytes = 512;
+ static const size_t kSeekReadIterations = 4;
+ ASSERT_GE(flash_.buffer().size_bytes(),
+ (kSeekReadIterations * (2 * kSeekReadSizeBytes)));
+
+ InitBufferToRandom(flash_.buffer(), 0xffde176);
+ FlashPartition::Reader reader(partition_);
+
+ for (size_t i = 0; i < kSeekReadIterations; i++) {
+ size_t start_offset = kSeekReadSizeBytes + (i * 2 * kSeekReadSizeBytes);
+ ASSERT_EQ(reader.Seek(start_offset), OkStatus());
+ ASSERT_EQ(start_offset, reader.Tell());
+
+ ByteSpan read_chunk = std::span(source_buffer_).first(kSeekReadSizeBytes);
+ InitBufferToFill(read_chunk, 0);
+
+ auto result = reader.Read(read_chunk);
+ ASSERT_EQ(result.status(), OkStatus());
+ ASSERT_EQ(result.value().size_bytes(), kSeekReadSizeBytes);
+ VerifyFlashContent(read_chunk, start_offset);
+ ASSERT_EQ(start_offset + kSeekReadSizeBytes, reader.Tell());
+ }
+}
+
+TEST_F(FlashStreamTest, Read_Seek_Forward_and_Back) {
+ static const size_t kSeekReadSizeBytes = 256;
+ static const size_t kTotalIterations = 3;
+ static const size_t kSeekReadIterations =
+ flash_.buffer().size_bytes() / (2 * kSeekReadSizeBytes);
+
+ InitBufferToRandom(flash_.buffer(), 0xffde176);
+ FlashPartition::Reader reader(partition_);
+
+ for (size_t outer_count = 0; outer_count < kTotalIterations; outer_count++) {
+ // Seek and read going forward.
+ for (size_t i = 0; i < kSeekReadIterations; i++) {
+ size_t start_offset = kSeekReadSizeBytes + (i * 2 * kSeekReadSizeBytes);
+ ASSERT_EQ(reader.Seek(start_offset), OkStatus());
+ ASSERT_EQ(start_offset, reader.Tell());
+
+ ByteSpan read_chunk = std::span(source_buffer_).first(kSeekReadSizeBytes);
+ InitBufferToFill(read_chunk, 0);
+
+ auto result = reader.Read(read_chunk);
+ ASSERT_EQ(result.status(), OkStatus());
+ ASSERT_EQ(result.value().size_bytes(), kSeekReadSizeBytes);
+ VerifyFlashContent(read_chunk, start_offset);
+ ASSERT_EQ(start_offset + kSeekReadSizeBytes, reader.Tell());
+ }
+
+ // Seek and read going backward.
+ for (size_t j = (kSeekReadIterations * 2); j > 0; j--) {
+ size_t start_offset = (j - 1) * kSeekReadSizeBytes;
+ ASSERT_EQ(reader.Seek(start_offset), OkStatus());
+ ASSERT_EQ(start_offset, reader.Tell());
+ ASSERT_GE(reader.ConservativeReadLimit(), kSeekReadSizeBytes);
+
+ ByteSpan read_chunk = std::span(source_buffer_).first(kSeekReadSizeBytes);
+ InitBufferToFill(read_chunk, 0);
+
+ auto result = reader.Read(read_chunk);
+ ASSERT_EQ(result.status(), OkStatus());
+ ASSERT_EQ(result.value().size_bytes(), kSeekReadSizeBytes);
+ VerifyFlashContent(read_chunk, start_offset);
+ ASSERT_EQ(start_offset + kSeekReadSizeBytes, reader.Tell());
+ }
+ }
+}
+
+TEST_F(FlashStreamTest, Read_Past_End) {
+ InitBufferToRandom(flash_.buffer(), 0xcccde176);
+ FlashPartition::Reader reader(partition_);
+
+ static const size_t kBytesForFinalRead = 50;
+
+ ByteSpan read_chunk = std::span(source_buffer_)
+ .first(source_buffer_.size() - kBytesForFinalRead);
+
+ auto result = reader.Read(read_chunk);
+ ASSERT_EQ(result.status(), OkStatus());
+ ASSERT_EQ(result.value().size_bytes(), read_chunk.size_bytes());
+ ASSERT_EQ(reader.Tell(), read_chunk.size_bytes());
+ ASSERT_EQ(reader.ConservativeReadLimit(), kBytesForFinalRead);
+ ASSERT_EQ(result.value().data(), read_chunk.data());
+ VerifyFlashContent(read_chunk);
+
+ result = reader.Read(read_chunk);
+ ASSERT_EQ(result.status(), OkStatus());
+ ASSERT_EQ(result.value().size_bytes(), kBytesForFinalRead);
+ ASSERT_EQ(reader.Tell(), flash_.buffer().size_bytes());
+ ASSERT_EQ(reader.ConservativeReadLimit(), 0U);
+ ASSERT_EQ(result.value().data(), read_chunk.data());
+ VerifyFlashContent(result.value(), read_chunk.size_bytes());
+}
+
+TEST_F(FlashStreamTest, Read_Past_End_After_Seek) {
+ InitBufferToRandom(flash_.buffer(), 0xddcde176);
+ FlashPartition::Reader reader(partition_);
+
+ static const size_t kBytesForFinalRead = 50;
+ size_t start_offset = flash_.buffer().size_bytes() - kBytesForFinalRead;
+ ASSERT_EQ(reader.Seek(start_offset), OkStatus());
+
+ ASSERT_EQ(start_offset, reader.Tell());
+ ASSERT_EQ(reader.ConservativeReadLimit(), kBytesForFinalRead);
+
+ ByteSpan read_chunk = std::span(source_buffer_);
+ auto result = reader.Read(read_chunk);
+ ASSERT_EQ(result.status(), OkStatus());
+ ASSERT_EQ(result.value().size_bytes(), kBytesForFinalRead);
+ ASSERT_EQ(reader.Tell(), flash_.buffer().size_bytes());
+ ASSERT_EQ(reader.ConservativeReadLimit(), 0U);
+ ASSERT_EQ(result.value().data(), read_chunk.data());
+ VerifyFlashContent(result.value(), start_offset);
+}
+
+TEST_F(FlashStreamTest, Read_Out_Of_Range) {
+ InitBufferToRandom(flash_.buffer(), 0x531de176);
+ FlashPartition::Reader reader(partition_);
+
+ ByteSpan read_chunk = std::span(source_buffer_);
+
+ auto result = reader.Read(read_chunk);
+ ASSERT_EQ(result.status(), OkStatus());
+ ASSERT_EQ(result.value().size_bytes(), read_chunk.size_bytes());
+ ASSERT_EQ(reader.Tell(), read_chunk.size_bytes());
+ ASSERT_EQ(reader.ConservativeReadLimit(), 0U);
+ ASSERT_EQ(result.value().data(), read_chunk.data());
+ VerifyFlashContent(read_chunk);
+
+ result = reader.Read(read_chunk);
+ ASSERT_EQ(result.status(), Status::OutOfRange());
+ ASSERT_EQ(reader.Tell(), flash_.buffer().size_bytes());
+ ASSERT_EQ(reader.ConservativeReadLimit(), 0U);
+}
+
+TEST_F(FlashStreamTest, Read_Out_Of_Range_After_Seek) {
+ InitBufferToRandom(flash_.buffer(), 0x8c94566);
+ FlashPartition::Reader reader(partition_);
+
+ ByteSpan read_chunk = std::span(source_buffer_);
+
+ ASSERT_EQ(reader.Seek(flash_.buffer().size_bytes()), OkStatus());
+ ASSERT_EQ(reader.Tell(), flash_.buffer().size_bytes());
+ ASSERT_EQ(reader.ConservativeReadLimit(), 0U);
+
+ auto result = reader.Read(read_chunk);
+ ASSERT_EQ(result.status(), Status::OutOfRange());
+ ASSERT_EQ(reader.Tell(), flash_.buffer().size_bytes());
+ ASSERT_EQ(reader.ConservativeReadLimit(), 0U);
+}
+
+TEST_F(FlashStreamTest, Reader_Seek_Ops) {
+ size_t kPartitionSizeBytes = flash_.buffer().size_bytes();
+ FlashPartition::Reader reader(partition_);
+
+ // Seek from 0 to past end.
+ ASSERT_EQ(reader.Seek(kPartitionSizeBytes + 5), Status::OutOfRange());
+ ASSERT_EQ(reader.Tell(), 0U);
+
+ // Seek to end then seek again going past end.
+ ASSERT_EQ(reader.Seek(0), OkStatus());
+ ASSERT_EQ(reader.Tell(), 0U);
+ ASSERT_EQ(reader.ConservativeReadLimit(), kPartitionSizeBytes);
+
+ ASSERT_EQ(reader.Seek(kPartitionSizeBytes,
+ FlashPartition::Reader::Whence::kCurrent),
+ OkStatus());
+ ASSERT_EQ(reader.Tell(), kPartitionSizeBytes);
+ ASSERT_EQ(reader.ConservativeReadLimit(), 0U);
+
+ ASSERT_EQ(reader.Seek(5, FlashPartition::Reader::Whence::kCurrent),
+ Status::OutOfRange());
+ ASSERT_EQ(reader.Tell(), kPartitionSizeBytes);
+ ASSERT_EQ(reader.ConservativeReadLimit(), 0U);
+
+ // Seek to beginning then seek backwards going past start.
+ ASSERT_EQ(reader.Seek(0), OkStatus());
+ ASSERT_EQ(reader.Seek(-5, FlashPartition::Reader::Whence::kCurrent),
+ Status::OutOfRange());
+ ASSERT_EQ(reader.Tell(), 0U);
+ ASSERT_EQ(reader.ConservativeReadLimit(), kPartitionSizeBytes);
+}
+
+TEST_F(FlashStreamTest, Invald_Ops) {
+ FlashPartition::Reader reader(partition_);
+ ASSERT_EQ(reader.ConservativeWriteLimit(), 0U);
+
+ FlashPartition::Writer writer(partition_);
+ ASSERT_EQ(writer.ConservativeReadLimit(), 0U);
+}
+
+} // namespace
+} // namespace pw::kvs
+
+#endif // PW_CXX_STANDARD_IS_SUPPORTED(17)
diff --git a/pw_kvs/key_value_store_initialized_test.cc b/pw_kvs/key_value_store_initialized_test.cc
index 9a840e73d..431456458 100644
--- a/pw_kvs/key_value_store_initialized_test.cc
+++ b/pw_kvs/key_value_store_initialized_test.cc
@@ -409,6 +409,34 @@ TEST_F(EmptyInitializedKvs, Iteration_EmptyAfterDeletion) {
}
}
+TEST_F(EmptyInitializedKvs, Iterator) {
+ ASSERT_EQ(OkStatus(), kvs_.Put("kEy", std::as_bytes(std::span("123"))));
+
+ for (KeyValueStore::iterator it = kvs_.begin(); it != kvs_.end(); ++it) {
+ EXPECT_STREQ(it->key(), "kEy");
+
+ char temp[sizeof("123")] = {};
+ EXPECT_EQ(OkStatus(), it->Get(&temp));
+ EXPECT_STREQ("123", temp);
+ }
+}
+
+TEST_F(EmptyInitializedKvs, Iterator_PostIncrement) {
+ ASSERT_EQ(OkStatus(), kvs_.Put("kEy", std::as_bytes(std::span("123"))));
+
+ KeyValueStore::iterator it = kvs_.begin();
+ EXPECT_EQ(it++, kvs_.begin());
+ EXPECT_EQ(it, kvs_.end());
+}
+
+TEST_F(EmptyInitializedKvs, Iterator_PreIncrement) {
+ ASSERT_EQ(OkStatus(), kvs_.Put("kEy", std::as_bytes(std::span("123"))));
+
+ KeyValueStore::iterator it = kvs_.begin();
+ EXPECT_EQ(++it, kvs_.end());
+ EXPECT_EQ(it, kvs_.end());
+}
+
TEST_F(EmptyInitializedKvs, Basic) {
// Add some data
uint8_t value1 = 0xDA;
diff --git a/pw_kvs/public/pw_kvs/flash_memory.h b/pw_kvs/public/pw_kvs/flash_memory.h
index 8fe6076c3..073db84da 100644
--- a/pw_kvs/public/pw_kvs/flash_memory.h
+++ b/pw_kvs/public/pw_kvs/flash_memory.h
@@ -20,9 +20,15 @@
#include "pw_assert/assert.h"
#include "pw_kvs/alignment.h"
+#include "pw_polyfill/standard.h"
#include "pw_status/status.h"
#include "pw_status/status_with_size.h"
+#if PW_CXX_STANDARD_IS_SUPPORTED(17) // Requires C++17 for pw::Result
+#include "pw_stream/seek.h"
+#include "pw_stream/stream.h"
+#endif // PW_CXX_STANDARD_IS_SUPPORTED(17)
+
namespace pw {
namespace kvs {
@@ -140,6 +146,52 @@ class FlashPartition {
// The flash address is in the range of: 0 to PartitionSize.
using Address = uint32_t;
+#if PW_CXX_STANDARD_IS_SUPPORTED(17) // Requires C++17 for pw::Result
+ class Writer final : public stream::NonSeekableWriter {
+ public:
+ constexpr Writer(kvs::FlashPartition& partition)
+ : partition_(partition), position_(0) {}
+
+ private:
+ Status DoWrite(ConstByteSpan data) override;
+
+ size_t DoTell() const override { return position_; }
+
+ size_t ConservativeLimit(LimitType type) const override {
+ return type == LimitType::kWrite ? partition_.size_bytes() - position_
+ : 0;
+ }
+
+ FlashPartition& partition_;
+ size_t position_;
+ };
+
+ class Reader final : public stream::SeekableReader {
+ public:
+ constexpr Reader(kvs::FlashPartition& partition)
+ : partition_(partition), position_(0) {}
+
+ Reader(const Reader&) = delete;
+ Reader& operator=(const Reader&) = delete;
+
+ private:
+ StatusWithSize DoRead(ByteSpan data) override;
+
+ size_t DoTell() const override { return position_; }
+
+ Status DoSeek(ptrdiff_t offset, Whence origin) override {
+ return CalculateSeek(offset, origin, partition_.size_bytes(), position_);
+ }
+
+ size_t ConservativeLimit(LimitType type) const override {
+ return type == LimitType::kRead ? partition_.size_bytes() - position_ : 0;
+ }
+
+ FlashPartition& partition_;
+ size_t position_;
+ };
+#endif // PW_CXX_STANDARD_IS_SUPPORTED(17)
+
// Implement Output for the Write method.
class Output final : public pw::Output {
public:
diff --git a/pw_kvs/public/pw_kvs/key_value_store.h b/pw_kvs/public/pw_kvs/key_value_store.h
index b3c135dce..086dcb8eb 100644
--- a/pw_kvs/public/pw_kvs/key_value_store.h
+++ b/pw_kvs/public/pw_kvs/key_value_store.h
@@ -66,7 +66,8 @@ struct Options {
// garbage collection is attempted if space for an entry cannot be found. This
// is a relatively lengthy operation. If kDisabled, Put calls that would
// require garbage collection fail with RESOURCE_EXHAUSTED.
- GargbageCollectOnWrite gc_on_write = GargbageCollectOnWrite::kOneSector;
+ GargbageCollectOnWrite gc_on_write =
+ GargbageCollectOnWrite::kAsManySectorsNeeded;
// When the KVS handles errors that are discovered, such as corrupt entries,
// not enough redundant copys of an entry, etc.
@@ -261,7 +262,11 @@ class KeyValueStore {
public:
iterator& operator++();
- iterator& operator++(int) { return operator++(); }
+ iterator operator++(int) {
+ const iterator original(item_.kvs_, item_.iterator_);
+ operator++();
+ return original;
+ }
// Reads the entry's key from flash.
const Item& operator*() {
diff --git a/pw_transfer/chunk_data_buffer.cc b/pw_log/Android.bp
index a8a5e1dda..d9cadd4b8 100644
--- a/pw_transfer/chunk_data_buffer.cc
+++ b/pw_log/Android.bp
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -12,22 +12,18 @@
// License for the specific language governing permissions and limitations under
// the License.
-#include "pw_transfer/internal/chunk_data_buffer.h"
-
-#include <algorithm>
-#include <cstring>
-
-#include "pw_assert/assert.h"
-
-namespace pw::transfer::internal {
-
-void ChunkDataBuffer::Write(ConstByteSpan data, bool last_chunk) {
- PW_DASSERT(data.size() <= buffer_.size());
-
- std::copy(data.begin(), data.end(), buffer_.begin());
- size_ = data.size();
-
- last_chunk_ = last_chunk;
+cc_library {
+ name: "libpw_log",
+ vendor_available: true,
+ cpp_std: "c++2a",
+ export_include_dirs: [
+ "public",
+ ],
}
-} // namespace pw::transfer::internal
+android_library {
+ name: "pw_log_android_java",
+ srcs: ["java/android_main/dev/pigweed/pw_log/*.java"],
+ visibility: ["//visibility:public"],
+ sdk_version: "current",
+}
diff --git a/pw_log/AndroidManifest.xml b/pw_log/AndroidManifest.xml
new file mode 100644
index 000000000..6b359c274
--- /dev/null
+++ b/pw_log/AndroidManifest.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ Copyright 2022 The Pigweed Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ use this file except in compliance with the License. You may obtain a copy of
+ the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations under
+ the License.
+-->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="dev.pigweed.pw_log" >
+
+ <uses-sdk
+ android:minSdkVersion="14"
+ android:targetSdkVersion="32" />
+</manifest>
diff --git a/pw_log/BUILD.bazel b/pw_log/BUILD.bazel
index ee9bff004..3afe0afd3 100644
--- a/pw_log/BUILD.bazel
+++ b/pw_log/BUILD.bazel
@@ -49,6 +49,22 @@ pw_cc_library(
)
pw_cc_library(
+ name = "glog_adapter",
+ hdrs = [
+ "public/pw_log/glog_adapter.h",
+ "public/pw_log/glog_adapter_config.h",
+ "public/pw_log/internal/glog_adapter.h",
+ ],
+ includes = ["public"],
+ deps = [
+ "//pw_assert",
+ "//pw_log",
+ "//pw_preprocessor",
+ "//pw_string",
+ ],
+)
+
+pw_cc_library(
name = "proto_utils",
srcs = [
"proto_utils.cc",
@@ -58,9 +74,10 @@ pw_cc_library(
],
deps = [
":facade",
- ":log_pwpb",
+ ":log_proto_cc.pwpb",
"//pw_bytes",
"//pw_log_tokenized:headers",
+ "//pw_protobuf",
"//pw_result",
],
)
@@ -79,7 +96,7 @@ proto_library(
)
pw_proto_library(
- name = "log_pwpb",
+ name = "log_proto_cc",
deps = [":log_proto"],
)
@@ -104,12 +121,24 @@ pw_cc_test(
)
pw_cc_test(
+ name = "glog_adapter_test",
+ srcs = [
+ "glog_adapter_test.cc",
+ ],
+ deps = [
+ ":glog_adapter",
+ "//pw_unit_test",
+ ],
+)
+
+pw_cc_test(
name = "proto_utils_test",
srcs = [
"proto_utils_test.cc",
],
deps = [
":facade",
+ ":log_proto_cc.pwpb",
":proto_utils",
"//pw_preprocessor",
"//pw_protobuf",
diff --git a/pw_log/BUILD.gn b/pw_log/BUILD.gn
index 0da366b7d..1fbeee110 100644
--- a/pw_log/BUILD.gn
+++ b/pw_log/BUILD.gn
@@ -27,6 +27,10 @@ declare_args() {
# module. This should point to a source set that provides defines through a
# public config (which may -include a file or add defines directly).
pw_log_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
+
+ # The build target that overrides the default configuration options for the
+ # glog adapter portion of this module.
+ pw_log_GLOG_ADAPTER_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
}
config("public_include_path") {
@@ -55,16 +59,34 @@ pw_facade("pw_log") {
require_link_deps = [ ":impl" ]
}
+pw_source_set("glog_adapter") {
+ public_configs = [ ":public_include_path" ]
+ public = [
+ "public/pw_log/glog_adapter.h",
+ "public/pw_log/glog_adapter_config.h",
+ ]
+ public_deps = [
+ ":config",
+ "$dir_pw_assert",
+ "$dir_pw_log",
+ "$dir_pw_preprocessor",
+ "$dir_pw_string",
+ pw_log_GLOG_ADAPTER_CONFIG,
+ ]
+ sources = [ "public/pw_log/internal/glog_adapter.h" ]
+}
+
pw_source_set("proto_utils") {
public_configs = [ ":public_include_path" ]
public = [ "public/pw_log/proto_utils.h" ]
public_deps = [
":pw_log.facade",
"$dir_pw_bytes",
+ "$dir_pw_log:protos.pwpb",
"$dir_pw_log_tokenized:metadata",
"$dir_pw_result",
]
- deps = [ "$dir_pw_log:protos.pwpb" ]
+ deps = [ "$dir_pw_protobuf" ]
sources = [ "proto_utils.cc" ]
}
@@ -90,6 +112,7 @@ group("impl") {
pw_test_group("tests") {
tests = [
":basic_log_test",
+ ":glog_adapter_test",
":proto_utils_test",
]
}
@@ -108,11 +131,21 @@ pw_test("basic_log_test") {
]
}
+pw_test("glog_adapter_test") {
+ enable_if = pw_log_BACKEND != ""
+ deps = [
+ ":glog_adapter",
+ pw_log_BACKEND,
+ ]
+ sources = [ "glog_adapter_test.cc" ]
+}
+
pw_test("proto_utils_test") {
enable_if = pw_log_BACKEND != ""
deps = [
":proto_utils",
":pw_log.facade",
+ "$dir_pw_log:protos.pwpb",
"$dir_pw_preprocessor",
"$dir_pw_protobuf",
"$dir_pw_protobuf:bytes_utils",
diff --git a/pw_log/CMakeLists.txt b/pw_log/CMakeLists.txt
index a6c77c155..9ee4eeaf8 100644
--- a/pw_log/CMakeLists.txt
+++ b/pw_log/CMakeLists.txt
@@ -17,6 +17,8 @@ include($ENV{PW_ROOT}/pw_protobuf_compiler/proto.cmake)
pw_add_module_config(pw_log_CONFIG)
+pw_add_module_config(pw_log_GLOG_ADAPTER_CONFIG)
+
pw_add_module_library(pw_log.config
HEADERS
public/pw_log/config.h
@@ -39,6 +41,22 @@ pw_add_facade(pw_log
pw_log.config
)
+pw_add_module_library(pw_log.glog_adapter
+ HEADERS
+ public/pw_log/glog_adapter.h
+ public/pw_log/glog_adapter_config.h
+ public/pw_log/internal/glog_adapter.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_assert
+ pw_log
+ pw_log.config
+ pw_preprocessor
+ pw_string
+ ${pw_log_GLOG_ADAPTER_CONFIG}
+)
+
pw_add_module_library(pw_log.proto_utils
HEADERS
public/pw_log/proto_utils.h
@@ -47,10 +65,11 @@ pw_add_module_library(pw_log.proto_utils
PUBLIC_DEPS
pw_log.facade
pw_bytes
+ pw_log.protos.pwpb
pw_log_tokenized.metadata
pw_result
PRIVATE_DEPS
- pw_log.protos.pwpb
+ pw_protobuf
SOURCES
proto_utils.cc
)
@@ -77,15 +96,24 @@ if(NOT "${pw_log_BACKEND}" STREQUAL "pw_log.NO_BACKEND_SET")
modules
pw_log
)
-endif()
-if(NOT "${pw_log_BACKEND}" STREQUAL "pw_log.NO_BACKEND_SET")
+ pw_add_test(pw_log.glog_adapter_test
+ SOURCES
+ glog_adapter_test.cc
+ DEPS
+ pw_log.glog_adapter
+ GROUPS
+ modules
+ pw_log
+ )
+
pw_add_test(pw_log.proto_utils_test
SOURCES
proto_utils_test.cc
DEPS
pw_log
pw_log.proto_utils
+ pw_log.protos.pwpb
pw_preprocessor
pw_protobuf
pw_protobuf.bytes_utils
diff --git a/pw_log/docs.rst b/pw_log/docs.rst
index 406c08e0f..b3eac4376 100644
--- a/pw_log/docs.rst
+++ b/pw_log/docs.rst
@@ -310,10 +310,36 @@ to directly provide dependencies through include paths only, rather than GN
``public_deps``. In this case, GN header checking can be disabled with
``check_includes = false``.
+----------------------
+Google Logging Adapter
+----------------------
+Pigweed provides a minimal C++ stream-style Google Log set of adapter
+macros around PW_LOG under ``pw_log/glog_adapter.h`` for compatibility with
+non-embedded code. While it is effective for porting server code to
+microcontrollers quickly, we do not advise embedded projects use that approach
+unless absolutely necessary.
+
+Configuration
+==============
+
+.. c:macro:: PW_LOG_CFG_GLOG_BUFFER_SIZE_BYTES
+
+ The size of the stack-allocated buffer used by the Google Logging (glog)
+ macros. This only affects the glog macros provided through pw_log/glog.h.
+
+ Pigweed strongly recommends sticking to printf-style logging instead
+ of C++ stream-style Google Log logging unless absolutely necessary. The glog
+ macros are only provided for compatibility with non-embedded code. See
+ :ref:`module-pw_log-design-discussion` for more details.
+
+ Undersizing this buffer will result in truncated log messages.
+
-----------------
Design discussion
-----------------
+.. _module-pw_log-design-discussion:
+
Why not use C++ style stream logging operators like Google Log?
===============================================================
There are multiple reasons to avoid the C++ stream logging style in embedded,
@@ -361,15 +387,11 @@ because it:
- is C compatibile
- has smaller call sites
-The Pigweed authors additionally maintain a C++ stream-style embedded logging
-library for compatibility with non-embedded code. While it is effective for
-porting server code to microcontrollers quickly, we do not advise embedded
-projects use that approach unless absolutely necessary.
+See also :ref:`module-pw_log_tokenized` for details on leveraging Pigweed's
+tokenizer module for logging.
-- See also :ref:`module-pw_log_tokenized` for details on leveraging Pigweed's
- tokenizer module for logging.
-- See also :ref:`module-pw_tokenizer` for details on Pigweed's tokenizer,
- which is useful for more than just logging.
+See also :ref:`module-pw_tokenizer` for details on Pigweed's tokenizer,
+which is useful for more than just logging.
Why does the facade use header redirection instead of C functions?
==================================================================
@@ -390,3 +412,14 @@ argument to each macro call seemed like too much. On the other hand, flags are
something that are typically added on a per-log-statement basis, and is why the
flags are added on a per-call basis (though hidden through the high-level
macros).
+
+--------------
+pw_log in Java
+--------------
+``pw_log`` provides a thin Java logging class that uses Google's `Flogger
+<https://google.github.io/flogger/>`_ API. The purpose of this wrapper is to
+support logging on platforms that do not support Flogger. The main
+implementation in ``pw_log/java/main`` simply wraps a
+``com.google.common.flogger.FluentLogger``. An implementation that logs to
+Android's ``android.util.Log`` instead is provided in
+``pw_log/java/android_main``.
diff --git a/pw_log/glog_adapter_test.cc b/pw_log/glog_adapter_test.cc
new file mode 100644
index 000000000..f8265031f
--- /dev/null
+++ b/pw_log/glog_adapter_test.cc
@@ -0,0 +1,102 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+// TODO(pwbug/88): Add verification of the actually logged statements.
+
+// clang-format off
+#define PW_LOG_MODULE_NAME "TST"
+#define PW_LOG_LEVEL PW_LOG_LEVEL_DEBUG
+
+#include "pw_log/glog_adapter.h"
+
+#include "gtest/gtest.h"
+// clang-format on
+
+namespace pw::log {
+namespace {
+
+volatile bool conditional;
+
+TEST(Glog, Debug) { LOG(DEBUG) << "LOG(DEBUG) works"; }
+
+TEST(Glog, ConditionalDebug) {
+ conditional = true;
+ LOG_IF(DEBUG, conditional) << "LOG_IF(DEBUG, true) works";
+ conditional = false;
+ LOG_IF(DEBUG, conditional) << "You should not see this log";
+}
+
+TEST(Glog, Info) { LOG(INFO) << "LOG(INFO) works"; }
+
+TEST(Glog, ConditionalInfo) {
+ conditional = true;
+ LOG_IF(INFO, conditional) << "LOG_IF(INFO, true) works";
+ conditional = false;
+ LOG_IF(INFO, conditional) << "You should not see this log";
+}
+
+TEST(Glog, Warning) { LOG(WARNING) << "LOG(WARNING) works"; }
+
+TEST(Glog, ConditionalWarning) {
+ conditional = true;
+ LOG_IF(WARNING, conditional) << "LOG_IF(WARNING, true) works";
+ conditional = false;
+ LOG_IF(WARNING, conditional) << "You should not see this log";
+}
+
+TEST(Glog, Error) { LOG(ERROR) << "LOG(ERROR) works"; }
+
+TEST(Glog, ConditionalError) {
+ conditional = true;
+ LOG_IF(ERROR, conditional) << "LOG_IF(ERROR, true) works";
+ conditional = false;
+ LOG_IF(ERROR, conditional) << "You should not see this log";
+}
+
+TEST(Glog, Fatal) {
+ conditional = false;
+ if (conditional) {
+ LOG(FATAL) << "LOG(FATAL) compiles but you should not see this log";
+ }
+}
+
+TEST(Glog, ConditionalFatal) {
+ conditional = false;
+ LOG_IF(FATAL, conditional) << "LOG_IF(FATAL, false) compiles but you should "
+ << "not see this log";
+}
+
+TEST(Glog, Dfatal) {
+ conditional = false;
+ if (conditional) {
+#if defined(NDEBUG)
+ LOG(DFATAL) << "LOG(DFATAL) works through PW_LOG_ERROR as NDEBUG is set";
+#else // !defined(NDEBUG)
+ LOG(DFATAL) << "LOG(DFATAL) compiles but you should not see this log";
+#endif // defined(NDEBUG)
+ }
+}
+
+TEST(Glog, ConditionalDfatal) {
+#if defined(NDEBUG)
+ conditional = true;
+ LOG_IF(DFATAL, conditional) << "LOG_IF(DFATAL, true) works through "
+ << "PW_LOG_ERROR as NDEBUG is set";
+#endif // defined(NDEBUG)
+ conditional = false;
+ LOG_IF(DFATAL, conditional) << "LOG_IF(DFATAL, false) compiles but you "
+ << "should not see this log";
+}
+
+} // namespace
+} // namespace pw::log
diff --git a/pw_log/java/android_main/dev/pigweed/pw_log/Logger.java b/pw_log/java/android_main/dev/pigweed/pw_log/Logger.java
new file mode 100644
index 000000000..3390011a4
--- /dev/null
+++ b/pw_log/java/android_main/dev/pigweed/pw_log/Logger.java
@@ -0,0 +1,103 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+package dev.pigweed.pw_log;
+
+import android.util.Log;
+import java.util.logging.Level;
+
+/**
+ * Partial implementation of the com.google.common.flogger.FluentLogger API that
+ * logs to android.util.Log.
+ */
+public final class Logger {
+ private final String tag;
+
+ public final class AndroidLogApi {
+ private final int level;
+
+ private Throwable cause = null;
+
+ private AndroidLogApi(Level level) {
+ if (level == Level.FINEST || level == Level.FINER) {
+ this.level = Log.VERBOSE;
+ } else if (level == Level.FINE || level == Level.CONFIG) {
+ this.level = Log.DEBUG;
+ } else if (level == Level.WARNING) {
+ this.level = Log.WARN;
+ } else if (level == Level.SEVERE) {
+ this.level = Log.ERROR;
+ } else {
+ this.level = Log.INFO;
+ }
+ }
+
+ public AndroidLogApi withCause(Throwable cause) {
+ this.cause = cause;
+ return this;
+ }
+
+ public void log(String message) {
+ if (cause != null) {
+ message = String.format("%s: %s", cause, message);
+ }
+
+ Log.println(level, tag, message);
+ }
+
+ public void log(String message, Object... args) {
+ log(String.format(message, args));
+ }
+ }
+
+ public static Logger forClass(Class<?> enclosingClass) {
+ return new Logger(enclosingClass.getSimpleName());
+ }
+
+ private Logger(String tag) {
+ this.tag = tag;
+ }
+
+ public AndroidLogApi at(Level level) {
+ return new AndroidLogApi(level);
+ }
+
+ public AndroidLogApi atSevere() {
+ return at(Level.SEVERE);
+ }
+
+ public AndroidLogApi atWarning() {
+ return at(Level.WARNING);
+ }
+
+ public AndroidLogApi atInfo() {
+ return at(Level.INFO);
+ }
+
+ public AndroidLogApi atConfig() {
+ return at(Level.CONFIG);
+ }
+
+ public AndroidLogApi atFine() {
+ return at(Level.FINE);
+ }
+
+ public AndroidLogApi atFiner() {
+ return at(Level.FINER);
+ }
+
+ public AndroidLogApi atFinest() {
+ return at(Level.FINEST);
+ }
+}
diff --git a/pw_log/java/main/dev/pigweed/pw_log/BUILD.bazel b/pw_log/java/main/dev/pigweed/pw_log/BUILD.bazel
new file mode 100644
index 000000000..8ebacb05a
--- /dev/null
+++ b/pw_log/java/main/dev/pigweed/pw_log/BUILD.bazel
@@ -0,0 +1,25 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# Logging API that maps to Google's Flogger (https://google.github.io/flogger/),
+# or an alternate API if Flogger is not supported.
+
+java_library(
+ name = "pw_log",
+ srcs = ["Logger.java"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "@maven//:com_google_flogger_flogger",
+ ],
+)
diff --git a/pw_log/java/main/dev/pigweed/pw_log/Logger.java b/pw_log/java/main/dev/pigweed/pw_log/Logger.java
new file mode 100644
index 000000000..f1ba7449c
--- /dev/null
+++ b/pw_log/java/main/dev/pigweed/pw_log/Logger.java
@@ -0,0 +1,71 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+package dev.pigweed.pw_log;
+
+import com.google.common.flogger.FluentLogger;
+import java.util.logging.Level;
+
+/**
+ * Partial implementation of the com.google.common.flogger.FluentLogger API that
+ * wraps a FluentLogger instance.
+ *
+ * This class is used instead of directly logging to FluentLogger to support
+ * swapping the implementation on systems that don't support FluentLogger (i.e.
+ * Android).
+ */
+@SuppressWarnings("FloggerSplitLogStatement")
+public final class Logger {
+ private final FluentLogger wrappedLogger;
+
+ public static Logger forClass(Class<?> enclosingClass) {
+ return new Logger(FluentLogger.forEnclosingClass());
+ }
+
+ private Logger(FluentLogger fluentLogger) {
+ this.wrappedLogger = fluentLogger;
+ }
+
+ public FluentLogger.Api at(Level level) {
+ return wrappedLogger.at(level);
+ }
+
+ public FluentLogger.Api atSevere() {
+ return at(Level.SEVERE);
+ }
+
+ public FluentLogger.Api atWarning() {
+ return at(Level.WARNING);
+ }
+
+ public FluentLogger.Api atInfo() {
+ return at(Level.INFO);
+ }
+
+ public FluentLogger.Api atConfig() {
+ return at(Level.CONFIG);
+ }
+
+ public FluentLogger.Api atFine() {
+ return at(Level.FINE);
+ }
+
+ public FluentLogger.Api atFiner() {
+ return at(Level.FINER);
+ }
+
+ public FluentLogger.Api atFinest() {
+ return at(Level.FINEST);
+ }
+}
diff --git a/pw_log/log.proto b/pw_log/log.proto
index ff2490bce..611724a26 100644
--- a/pw_log/log.proto
+++ b/pw_log/log.proto
@@ -29,10 +29,10 @@ option java_outer_classname = "Log";
//
// Size analysis for tokenized log messages, including each field's proto tag:
//
-// - message - 6-12 bytes, depending on number and value of arguments
+// - message - 6-12 bytes; depending on number and value of arguments
// - line_level - 3 bytes; 4 bytes if line > 2048 (uncommon)
// - timestamp - 3 bytes; assuming delta encoding
-// - thread_name - 6 bytes
+// - thread - 2-6 bytes; depending on whether value is a token or string
//
// Adding the fields gives the total proto message size:
//
@@ -138,6 +138,10 @@ message LogEntry {
// The file path where this log was created, if not encoded in the message.
optional bytes file = 8 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
+ // The task or thread name that created the log message. If the log was not
+ // created on a thread, it should use a name appropriate to that context.
+ optional bytes thread = 9 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
+
// The following fields are planned but will not be added until they are
// needed. Protobuf field numbers over 15 use an extra byte, so these fields
// are left out for now to avoid reserving field numbers unnecessarily.
@@ -146,11 +150,6 @@ message LogEntry {
// field is implementation defined
// optional uint32 source_id = ?;
- // The task or thread name that created the log message. If the log was not
- // created on a thread, it should use a name appropriate to that context.
- // optional bytes thread_name = ?
- // [(tokenizer.format) = TOKENIZATION_OPTIONAL];
-
// Some messages are associated with trace events, which may carry additional
// contextual data. This is a tuple of a data format string which could be
// used by the decoder to identify the data (e.g. printf-style tokens) and the
diff --git a/pw_log/proto_utils.cc b/pw_log/proto_utils.cc
index ff8eec251..23739aaf9 100644
--- a/pw_log/proto_utils.cc
+++ b/pw_log/proto_utils.cc
@@ -19,7 +19,6 @@
#include "pw_bytes/endian.h"
#include "pw_log/levels.h"
-#include "pw_log/proto/log.pwpb.h"
#include "pw_log_tokenized/metadata.h"
#include "pw_protobuf/wire_format.h"
@@ -28,6 +27,7 @@ namespace pw::log {
Result<ConstByteSpan> EncodeLog(int level,
unsigned int flags,
std::string_view module_name,
+ std::string_view thread_name,
std::string_view file_name,
int line_number,
int64_t ticks_since_epoch,
@@ -55,14 +55,18 @@ Result<ConstByteSpan> EncodeLog(int level,
if (!file_name.empty()) {
status = encoder.WriteFile(std::as_bytes(std::span(file_name)));
}
+ if (!thread_name.empty()) {
+ status = encoder.WriteThread(std::as_bytes(std::span(thread_name)));
+ }
PW_TRY(encoder.status());
return ConstByteSpan(encoder);
}
-Result<ConstByteSpan> EncodeTokenizedLog(pw::log_tokenized::Metadata metadata,
- ConstByteSpan tokenized_data,
- int64_t ticks_since_epoch,
- ByteSpan encode_buffer) {
+LogEntry::MemoryEncoder CreateEncoderAndEncodeTokenizedLog(
+ pw::log_tokenized::Metadata metadata,
+ ConstByteSpan tokenized_data,
+ int64_t ticks_since_epoch,
+ ByteSpan encode_buffer) {
// Encode message to the LogEntry protobuf.
LogEntry::MemoryEncoder encoder(encode_buffer);
@@ -80,8 +84,7 @@ Result<ConstByteSpan> EncodeTokenizedLog(pw::log_tokenized::Metadata metadata,
status =
encoder.WriteModule(std::as_bytes(std::span(&little_endian_module, 1)));
}
- PW_TRY(encoder.status());
- return ConstByteSpan(encoder);
+ return encoder;
}
} // namespace pw::log
diff --git a/pw_log/proto_utils_test.cc b/pw_log/proto_utils_test.cc
index 9e3c22043..7b018006d 100644
--- a/pw_log/proto_utils_test.cc
+++ b/pw_log/proto_utils_test.cc
@@ -17,6 +17,7 @@
#include "gtest/gtest.h"
#include "pw_bytes/span.h"
#include "pw_log/levels.h"
+#include "pw_log/proto/log.pwpb.h"
#include "pw_protobuf/bytes_utils.h"
#include "pw_protobuf/decoder.h"
@@ -25,10 +26,12 @@ namespace pw::log {
void VerifyTokenizedLogEntry(pw::protobuf::Decoder& entry_decoder,
pw::log_tokenized::Metadata expected_metadata,
ConstByteSpan expected_tokenized_data,
- const int64_t expected_timestamp) {
+ const int64_t expected_timestamp,
+ ConstByteSpan expected_thread_name) {
ConstByteSpan tokenized_data;
EXPECT_TRUE(entry_decoder.Next().ok()); // message [tokenized]
- EXPECT_EQ(1U, entry_decoder.FieldNumber());
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::MESSAGE));
EXPECT_TRUE(entry_decoder.ReadBytes(&tokenized_data).ok());
EXPECT_TRUE(std::memcmp(tokenized_data.begin(),
expected_tokenized_data.begin(),
@@ -36,7 +39,8 @@ void VerifyTokenizedLogEntry(pw::protobuf::Decoder& entry_decoder,
uint32_t line_level;
EXPECT_TRUE(entry_decoder.Next().ok()); // line_level
- EXPECT_EQ(2U, entry_decoder.FieldNumber());
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::LINE_LEVEL));
EXPECT_TRUE(entry_decoder.ReadUint32(&line_level).ok());
uint32_t line_number;
@@ -48,38 +52,57 @@ void VerifyTokenizedLogEntry(pw::protobuf::Decoder& entry_decoder,
if (expected_metadata.flags() != 0) {
uint32_t flags;
EXPECT_TRUE(entry_decoder.Next().ok()); // flags
- EXPECT_EQ(3U, entry_decoder.FieldNumber());
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::FLAGS));
EXPECT_TRUE(entry_decoder.ReadUint32(&flags).ok());
EXPECT_EQ(expected_metadata.flags(), flags);
}
int64_t timestamp;
EXPECT_TRUE(entry_decoder.Next().ok()); // timestamp
- EXPECT_EQ(4U, entry_decoder.FieldNumber());
+ EXPECT_TRUE(
+ entry_decoder.FieldNumber() ==
+ static_cast<uint32_t>(log::LogEntry::Fields::TIMESTAMP) ||
+ entry_decoder.FieldNumber() ==
+ static_cast<uint32_t>(log::LogEntry::Fields::TIME_SINCE_LAST_ENTRY));
EXPECT_TRUE(entry_decoder.ReadInt64(&timestamp).ok());
EXPECT_EQ(expected_timestamp, timestamp);
if (expected_metadata.module() != 0) {
EXPECT_TRUE(entry_decoder.Next().ok()); // module name
- EXPECT_EQ(7U, entry_decoder.FieldNumber());
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::MODULE));
const Result<uint32_t> module =
protobuf::DecodeBytesToUint32(entry_decoder);
ASSERT_TRUE(module.ok());
EXPECT_EQ(expected_metadata.module(), module.value());
}
+
+ if (!expected_thread_name.empty()) {
+ ConstByteSpan tokenized_thread_name;
+ EXPECT_TRUE(entry_decoder.Next().ok()); // thread [tokenized]
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::THREAD));
+ EXPECT_TRUE(entry_decoder.ReadBytes(&tokenized_thread_name).ok());
+ EXPECT_TRUE(std::memcmp(tokenized_thread_name.begin(),
+ expected_thread_name.begin(),
+ expected_thread_name.size()) == 0);
+ }
}
void VerifyLogEntry(pw::protobuf::Decoder& entry_decoder,
int expected_level,
unsigned int expected_flags,
std::string_view expected_module,
+ std::string_view expected_thread_name,
std::string_view expected_file_name,
int expected_line_number,
int64_t expected_ticks_since_epoch,
std::string_view expected_message) {
std::string_view message;
EXPECT_TRUE(entry_decoder.Next().ok()); // message
- EXPECT_EQ(1U, entry_decoder.FieldNumber());
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::MESSAGE));
EXPECT_TRUE(entry_decoder.ReadString(&message).ok());
EXPECT_TRUE(std::equal(message.begin(),
message.end(),
@@ -88,7 +111,8 @@ void VerifyLogEntry(pw::protobuf::Decoder& entry_decoder,
uint32_t line_level;
EXPECT_TRUE(entry_decoder.Next().ok()); // line_level
- EXPECT_EQ(2U, entry_decoder.FieldNumber());
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::LINE_LEVEL));
EXPECT_TRUE(entry_decoder.ReadUint32(&line_level).ok());
uint32_t line_number;
uint8_t level;
@@ -99,21 +123,27 @@ void VerifyLogEntry(pw::protobuf::Decoder& entry_decoder,
if (expected_flags != 0) {
uint32_t flags;
EXPECT_TRUE(entry_decoder.Next().ok()); // flags
- EXPECT_EQ(3U, entry_decoder.FieldNumber());
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::FLAGS));
EXPECT_TRUE(entry_decoder.ReadUint32(&flags).ok());
EXPECT_EQ(expected_flags, flags);
}
int64_t timestamp;
EXPECT_TRUE(entry_decoder.Next().ok()); // timestamp
- EXPECT_EQ(4U, entry_decoder.FieldNumber());
+ EXPECT_TRUE(
+ entry_decoder.FieldNumber() ==
+ static_cast<uint32_t>(log::LogEntry::Fields::TIMESTAMP) ||
+ entry_decoder.FieldNumber() ==
+ static_cast<uint32_t>(log::LogEntry::Fields::TIME_SINCE_LAST_ENTRY));
EXPECT_TRUE(entry_decoder.ReadInt64(&timestamp).ok());
EXPECT_EQ(expected_ticks_since_epoch, timestamp);
if (!expected_module.empty()) {
std::string_view module_name;
EXPECT_TRUE(entry_decoder.Next().ok()); // module
- EXPECT_EQ(7U, entry_decoder.FieldNumber());
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::MODULE));
EXPECT_TRUE(entry_decoder.ReadString(&module_name).ok());
EXPECT_TRUE(std::equal(module_name.begin(),
module_name.end(),
@@ -124,13 +154,26 @@ void VerifyLogEntry(pw::protobuf::Decoder& entry_decoder,
if (!expected_file_name.empty()) {
std::string_view file_name;
EXPECT_TRUE(entry_decoder.Next().ok()); // file
- EXPECT_EQ(8U, entry_decoder.FieldNumber());
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::FILE));
EXPECT_TRUE(entry_decoder.ReadString(&file_name).ok());
EXPECT_TRUE(std::equal(file_name.begin(),
file_name.end(),
expected_file_name.begin(),
expected_file_name.end()));
}
+
+ if (!expected_thread_name.empty()) {
+ std::string_view thread_name;
+ EXPECT_TRUE(entry_decoder.Next().ok()); // file
+ EXPECT_EQ(entry_decoder.FieldNumber(),
+ static_cast<uint32_t>(log::LogEntry::Fields::THREAD));
+ EXPECT_TRUE(entry_decoder.ReadString(&thread_name).ok());
+ EXPECT_TRUE(std::equal(thread_name.begin(),
+ thread_name.end(),
+ expected_thread_name.begin(),
+ expected_thread_name.end()));
+ }
}
TEST(UtilsTest, LineLevelPacking) {
@@ -172,61 +215,83 @@ TEST(UtilsTest, LineLevelPackAndUnpack) {
}
TEST(UtilsTest, EncodeTokenizedLog) {
- constexpr std::byte kTokenizedData[1] = {(std::byte)0x01};
+ constexpr std::byte kTokenizedData[1] = {std::byte(0x01)};
constexpr int64_t kExpectedTimestamp = 1;
+ constexpr std::byte kExpectedThreadName[1] = {std::byte(0x02)};
std::byte encode_buffer[32];
pw::log_tokenized::Metadata metadata =
pw::log_tokenized::Metadata::Set<1, 2, 3, 4>();
- Result<ConstByteSpan> result = EncodeTokenizedLog(
- metadata, kTokenizedData, kExpectedTimestamp, encode_buffer);
+ Result<ConstByteSpan> result = EncodeTokenizedLog(metadata,
+ kTokenizedData,
+ kExpectedTimestamp,
+ kExpectedThreadName,
+ encode_buffer);
EXPECT_TRUE(result.ok());
pw::protobuf::Decoder log_decoder(result.value());
- VerifyTokenizedLogEntry(
- log_decoder, metadata, kTokenizedData, kExpectedTimestamp);
+ VerifyTokenizedLogEntry(log_decoder,
+ metadata,
+ kTokenizedData,
+ kExpectedTimestamp,
+ kExpectedThreadName);
result = EncodeTokenizedLog(metadata,
reinterpret_cast<const uint8_t*>(kTokenizedData),
sizeof(kTokenizedData),
kExpectedTimestamp,
+ kExpectedThreadName,
encode_buffer);
EXPECT_TRUE(result.ok());
log_decoder.Reset(result.value());
- VerifyTokenizedLogEntry(
- log_decoder, metadata, kTokenizedData, kExpectedTimestamp);
+ VerifyTokenizedLogEntry(log_decoder,
+ metadata,
+ kTokenizedData,
+ kExpectedTimestamp,
+ kExpectedThreadName);
}
TEST(UtilsTest, EncodeTokenizedLog_EmptyFlags) {
- constexpr std::byte kTokenizedData[1] = {(std::byte)0x01};
+ constexpr std::byte kTokenizedData[1] = {std::byte(0x01)};
constexpr int64_t kExpectedTimestamp = 1;
+ constexpr std::byte kExpectedThreadName[1] = {std::byte(0x02)};
std::byte encode_buffer[32];
// Create an empty flags set.
pw::log_tokenized::Metadata metadata =
pw::log_tokenized::Metadata::Set<1, 2, 0, 4>();
- Result<ConstByteSpan> result = EncodeTokenizedLog(
- metadata, kTokenizedData, kExpectedTimestamp, encode_buffer);
+ Result<ConstByteSpan> result = EncodeTokenizedLog(metadata,
+ kTokenizedData,
+ kExpectedTimestamp,
+ kExpectedThreadName,
+ encode_buffer);
EXPECT_TRUE(result.ok());
pw::protobuf::Decoder log_decoder(result.value());
- VerifyTokenizedLogEntry(
- log_decoder, metadata, kTokenizedData, kExpectedTimestamp);
+ VerifyTokenizedLogEntry(log_decoder,
+ metadata,
+ kTokenizedData,
+ kExpectedTimestamp,
+ kExpectedThreadName);
}
TEST(UtilsTest, EncodeTokenizedLog_InsufficientSpace) {
- constexpr std::byte kTokenizedData[1] = {(std::byte)0x01};
+ constexpr std::byte kTokenizedData[1] = {std::byte(0x01)};
constexpr int64_t kExpectedTimestamp = 1;
+ constexpr std::byte kExpectedThreadName[1] = {std::byte(0x02)};
std::byte encode_buffer[1];
pw::log_tokenized::Metadata metadata =
pw::log_tokenized::Metadata::Set<1, 2, 3, 4>();
- Result<ConstByteSpan> result = EncodeTokenizedLog(
- metadata, kTokenizedData, kExpectedTimestamp, encode_buffer);
+ Result<ConstByteSpan> result = EncodeTokenizedLog(metadata,
+ kTokenizedData,
+ kExpectedTimestamp,
+ kExpectedThreadName,
+ encode_buffer);
EXPECT_TRUE(result.status().IsResourceExhausted());
}
@@ -234,6 +299,7 @@ TEST(UtilsTest, EncodeLog) {
constexpr int kExpectedLevel = PW_LOG_LEVEL_INFO;
constexpr unsigned int kExpectedFlags = 2;
constexpr std::string_view kExpectedModule("TST");
+ constexpr std::string_view kExpectedThread("thread");
constexpr std::string_view kExpectedFile("proto_test.cc");
constexpr int kExpectedLine = 14;
constexpr int64_t kExpectedTimestamp = 1;
@@ -243,6 +309,7 @@ TEST(UtilsTest, EncodeLog) {
Result<ConstByteSpan> result = EncodeLog(kExpectedLevel,
kExpectedFlags,
kExpectedModule,
+ kExpectedThread,
kExpectedFile,
kExpectedLine,
kExpectedTimestamp,
@@ -255,6 +322,7 @@ TEST(UtilsTest, EncodeLog) {
kExpectedLevel,
kExpectedFlags,
kExpectedModule,
+ kExpectedThread,
kExpectedFile,
kExpectedLine,
kExpectedTimestamp,
@@ -265,6 +333,7 @@ TEST(UtilsTest, EncodeLog_EmptyFlags) {
constexpr int kExpectedLevel = PW_LOG_LEVEL_INFO;
constexpr unsigned int kExpectedFlags = 0;
constexpr std::string_view kExpectedModule("TST");
+ constexpr std::string_view kExpectedThread("thread");
constexpr std::string_view kExpectedFile("proto_test.cc");
constexpr int kExpectedLine = 14;
constexpr int64_t kExpectedTimestamp = 1;
@@ -274,6 +343,7 @@ TEST(UtilsTest, EncodeLog_EmptyFlags) {
Result<ConstByteSpan> result = EncodeLog(kExpectedLevel,
kExpectedFlags,
kExpectedModule,
+ kExpectedThread,
kExpectedFile,
kExpectedLine,
kExpectedTimestamp,
@@ -286,6 +356,7 @@ TEST(UtilsTest, EncodeLog_EmptyFlags) {
kExpectedLevel,
kExpectedFlags,
kExpectedModule,
+ kExpectedThread,
kExpectedFile,
kExpectedLine,
kExpectedTimestamp,
@@ -296,6 +367,7 @@ TEST(UtilsTest, EncodeLog_EmptyFile) {
constexpr int kExpectedLevel = PW_LOG_LEVEL_INFO;
constexpr unsigned int kExpectedFlags = 0;
constexpr std::string_view kExpectedModule("TST");
+ constexpr std::string_view kExpectedThread("thread");
constexpr std::string_view kExpectedFile;
constexpr int kExpectedLine = 14;
constexpr int64_t kExpectedTimestamp = 1;
@@ -305,6 +377,7 @@ TEST(UtilsTest, EncodeLog_EmptyFile) {
Result<ConstByteSpan> result = EncodeLog(kExpectedLevel,
kExpectedFlags,
kExpectedModule,
+ kExpectedThread,
kExpectedFile,
kExpectedLine,
kExpectedTimestamp,
@@ -317,6 +390,7 @@ TEST(UtilsTest, EncodeLog_EmptyFile) {
kExpectedLevel,
kExpectedFlags,
kExpectedModule,
+ kExpectedThread,
kExpectedFile,
kExpectedLine,
kExpectedTimestamp,
@@ -327,6 +401,7 @@ TEST(UtilsTest, EncodeLog_EmptyModule) {
constexpr int kExpectedLevel = PW_LOG_LEVEL_INFO;
constexpr unsigned int kExpectedFlags = 3;
constexpr std::string_view kExpectedModule;
+ constexpr std::string_view kExpectedThread("thread");
constexpr std::string_view kExpectedFile("test.cc");
constexpr int kExpectedLine = 14;
constexpr int64_t kExpectedTimestamp = 1;
@@ -336,6 +411,41 @@ TEST(UtilsTest, EncodeLog_EmptyModule) {
Result<ConstByteSpan> result = EncodeLog(kExpectedLevel,
kExpectedFlags,
kExpectedModule,
+ kExpectedThread,
+ kExpectedFile,
+ kExpectedLine,
+ kExpectedTimestamp,
+ kExpectedMessage,
+ encode_buffer);
+ EXPECT_TRUE(result.ok());
+
+ pw::protobuf::Decoder log_decoder(result.value());
+ VerifyLogEntry(log_decoder,
+ kExpectedLevel,
+ kExpectedFlags,
+ kExpectedModule,
+ kExpectedThread,
+ kExpectedFile,
+ kExpectedLine,
+ kExpectedTimestamp,
+ kExpectedMessage);
+}
+
+TEST(UtilsTest, EncodeLog_EmptyThread) {
+ constexpr int kExpectedLevel = PW_LOG_LEVEL_INFO;
+ constexpr unsigned int kExpectedFlags = 2;
+ constexpr std::string_view kExpectedModule("TST");
+ constexpr std::string_view kExpectedThread;
+ constexpr std::string_view kExpectedFile("proto_test.cc");
+ constexpr int kExpectedLine = 14;
+ constexpr int64_t kExpectedTimestamp = 1;
+ constexpr std::string_view kExpectedMessage("msg");
+ std::byte encode_buffer[64];
+
+ Result<ConstByteSpan> result = EncodeLog(kExpectedLevel,
+ kExpectedFlags,
+ kExpectedModule,
+ kExpectedThread,
kExpectedFile,
kExpectedLine,
kExpectedTimestamp,
@@ -348,6 +458,7 @@ TEST(UtilsTest, EncodeLog_EmptyModule) {
kExpectedLevel,
kExpectedFlags,
kExpectedModule,
+ kExpectedThread,
kExpectedFile,
kExpectedLine,
kExpectedTimestamp,
@@ -358,6 +469,7 @@ TEST(UtilsTest, EncodeLog_EmptyMessage) {
constexpr int kExpectedLevel = PW_LOG_LEVEL_INFO;
constexpr unsigned int kExpectedFlags = 0;
constexpr std::string_view kExpectedModule;
+ constexpr std::string_view kExpectedThread;
constexpr std::string_view kExpectedFile;
constexpr int kExpectedLine = 14;
constexpr int64_t kExpectedTimestamp = 1;
@@ -367,6 +479,7 @@ TEST(UtilsTest, EncodeLog_EmptyMessage) {
Result<ConstByteSpan> result = EncodeLog(kExpectedLevel,
kExpectedFlags,
kExpectedModule,
+ kExpectedThread,
kExpectedFile,
kExpectedLine,
kExpectedTimestamp,
@@ -380,6 +493,7 @@ TEST(UtilsTest, EncodeLog_InsufficientSpace) {
constexpr int kExpectedLevel = PW_LOG_LEVEL_INFO;
constexpr unsigned int kExpectedFlags = 0;
constexpr std::string_view kExpectedModule;
+ constexpr std::string_view kExpectedThread;
constexpr std::string_view kExpectedFile;
constexpr int kExpectedLine = 14;
constexpr int64_t kExpectedTimestamp = 1;
@@ -389,6 +503,7 @@ TEST(UtilsTest, EncodeLog_InsufficientSpace) {
Result<ConstByteSpan> result = EncodeLog(kExpectedLevel,
kExpectedFlags,
kExpectedModule,
+ kExpectedThread,
kExpectedFile,
kExpectedLine,
kExpectedTimestamp,
diff --git a/pw_log/protobuf.rst b/pw_log/protobuf.rst
index 52f630b5c..32f9ba2e0 100644
--- a/pw_log/protobuf.rst
+++ b/pw_log/protobuf.rst
@@ -43,11 +43,11 @@ at a time:
Optionally tokenized text fields
--------------------------------
-Several fields in the ``pw_log`` proto store text. Examples include ``message``
-and ``thread_name``. These fields may contain either plain or tokenized text,
-either of which is represented as a single bytes field. These fields are marked
-with a protocol buffer option so the ``pw_tokenizer.proto`` module can detect
-and detokenize tokenized fields as appropriate.
+Several fields in the ``pw_log`` proto store text. Examples include ``message``,
+``module``, and ``thread``. These fields may contain either plain or tokenized
+text, either of which is represented as a single bytes field. These fields are
+marked with a protocol buffer option so the ``pw_tokenizer.proto`` module can
+detect and detokenize tokenized fields as appropriate.
See :ref:`module-pw_tokenizer-proto` for details.
diff --git a/pw_log/public/pw_log/glog_adapter.h b/pw_log/public/pw_log/glog_adapter.h
new file mode 100644
index 000000000..05116588d
--- /dev/null
+++ b/pw_log/public/pw_log/glog_adapter.h
@@ -0,0 +1,43 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_log/internal/glog_adapter.h"
+#include "pw_preprocessor/concat.h"
+
+// WARNING: Pigweed strongly recommends sticking to printf-style logging instead
+// of C++ stream-style Google Log logging unless absolutely necessary. These
+// macros are only provided for compatibility with non-embedded code. See
+// https://pigweed.dev/pw_log/ for more details.
+
+// A subset of the streaming Google logging (glog) macros are supported:
+// - LOG(glog_level)
+// - LOG_IF(glog_level, condition)
+//
+// The supported glog levels are DEBUG, INFO, WARNING, ERROR, FATAL & DFATAL
+//
+// This means the following are NOT supported:
+// - glog level DFATAL
+// - {D,P,SYS}LOG*
+// - {,D}VLOG*
+// - {,D}CHECK*
+// - LOG_EVERY_*, LOG_EVERY_*, LOG_IF_EVERY_*, LOG_FIRST_N
+#define LOG(glog_level) \
+ _PW_LOG_GLOG(_PW_LOG_GLOG_DECLARATION_##glog_level, \
+ PW_CONCAT(GlogStreamingLog, __COUNTER__))
+
+#define LOG_IF(glog_level, expr) \
+ _PW_LOG_GLOG_IF(_PW_LOG_GLOG_DECLARATION_##glog_level, \
+ expr, \
+ PW_CONCAT(GlogStreamingLog, __COUNTER__))
diff --git a/pw_log/public/pw_log/glog_adapter_config.h b/pw_log/public/pw_log/glog_adapter_config.h
new file mode 100644
index 000000000..3907fd354
--- /dev/null
+++ b/pw_log/public/pw_log/glog_adapter_config.h
@@ -0,0 +1,27 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+// The size of the stack-allocated buffer used by the Google Logging (glog)
+// macros. This only affects the glog macros provided through pw_log/glog.h.
+//
+// Pigweed strongly recommends sticking to printf-style logging instead
+// of C++ stream-style Google Log logging unless absolutely necessary. The glog
+// macros are only provided for compatibility with non-embedded code. See
+// https://pigweed.dev/pw_log/ for more details.
+//
+// Undersizing this buffer will result in truncated log messages.
+#ifndef PW_LOG_CFG_GLOG_BUFFER_SIZE_BYTES
+#define PW_LOG_CFG_GLOG_BUFFER_SIZE_BYTES 128
+#endif // PW_LOG_CFG_GLOG_BUFFER_SIZE_BYTES
diff --git a/pw_log/public/pw_log/internal/glog_adapter.h b/pw_log/public/pw_log/internal/glog_adapter.h
new file mode 100644
index 000000000..3e63b90df
--- /dev/null
+++ b/pw_log/public/pw_log/internal/glog_adapter.h
@@ -0,0 +1,91 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_assert/check.h"
+#include "pw_log/glog_adapter_config.h"
+#include "pw_log/levels.h"
+#include "pw_log/log.h"
+#include "pw_string/string_builder.h"
+
+namespace pw::log::internal {
+
+class GlogStreamingLog {
+ public:
+ GlogStreamingLog() = default;
+
+ template <typename T>
+ GlogStreamingLog& operator<<(const T& value) {
+ string_builder_ << value;
+ return *this;
+ }
+
+ protected:
+ pw::StringBuffer<PW_LOG_CFG_GLOG_BUFFER_SIZE_BYTES> string_builder_;
+};
+
+} // namespace pw::log::internal
+
+// Declares a unique GlogStreamingLog class definition with a destructor which
+// matches the desired pw_log_level.
+#define _PW_LOG_GLOG_DECLARATION_PW_LOG(pw_log_level, unique) \
+ class unique : public ::pw::log::internal::GlogStreamingLog { \
+ public: \
+ ~unique() { \
+ PW_HANDLE_LOG( \
+ pw_log_level, PW_LOG_FLAGS, "%s", string_builder_.c_str()); \
+ } \
+ }
+
+// Declares a unique GlogStreamingLog class definition with a destructor which
+// invokes PW_CRASH.
+#define _PW_LOG_GLOG_DECLARATION_PW_CRASH(unique) \
+ class unique : public ::pw::log::internal::GlogStreamingLog { \
+ public: \
+ ~unique() { PW_CRASH("%s", string_builder_.c_str()); } \
+ }
+
+// Dispatching macros to translate the glog level to PW_LOG and PW_CRASH.
+#define _PW_LOG_GLOG_DECLARATION_DEBUG(unique) \
+ _PW_LOG_GLOG_DECLARATION_PW_LOG(PW_LOG_LEVEL_DEBUG, unique)
+
+#define _PW_LOG_GLOG_DECLARATION_INFO(unique) \
+ _PW_LOG_GLOG_DECLARATION_PW_LOG(PW_LOG_LEVEL_INFO, unique)
+
+#define _PW_LOG_GLOG_DECLARATION_WARNING(unique) \
+ _PW_LOG_GLOG_DECLARATION_PW_LOG(PW_LOG_LEVEL_WARN, unique)
+
+#define _PW_LOG_GLOG_DECLARATION_ERROR(unique) \
+ _PW_LOG_GLOG_DECLARATION_PW_LOG(PW_LOG_LEVEL_ERROR, unique)
+
+#define _PW_LOG_GLOG_DECLARATION_FATAL(unique) \
+ _PW_LOG_GLOG_DECLARATION_PW_CRASH(unique)
+
+#if defined(NDEBUG)
+#define _PW_LOG_GLOG_DECLARATION_DFATAL(unique) \
+ _PW_LOG_GLOG_DECLARATION_PW_LOG(PW_LOG_LEVEL_ERROR, unique)
+#else // !defined(NDEBUG)
+#define _PW_LOG_GLOG_DECLARATION_DFATAL(unique) \
+ _PW_LOG_GLOG_DECLARATION_PW_CRASH(unique)
+#endif // defined(NDEBUG)
+
+#define _PW_LOG_GLOG(glog_declaration, unique) \
+ glog_declaration(unique); \
+ unique()
+
+#define _PW_LOG_GLOG_IF(glog_declaration, expr, unique) \
+ glog_declaration(unique); \
+ if (!(expr)) { \
+ } else \
+ unique()
diff --git a/pw_log/public/pw_log/proto_utils.h b/pw_log/public/pw_log/proto_utils.h
index 93a9bdddb..a9bada5d5 100644
--- a/pw_log/public/pw_log/proto_utils.h
+++ b/pw_log/public/pw_log/proto_utils.h
@@ -17,8 +17,10 @@
#include "pw_bytes/span.h"
#include "pw_log/levels.h"
+#include "pw_log/proto/log.pwpb.h"
#include "pw_log_tokenized/metadata.h"
#include "pw_result/result.h"
+#include "pw_status/try.h"
namespace pw::log {
@@ -53,12 +55,22 @@ constexpr inline std::tuple<uint32_t, uint8_t> UnpackLineLevel(
Result<ConstByteSpan> EncodeLog(int level,
unsigned int flags,
std::string_view module_name,
+ std::string_view thread_name,
std::string_view file_name,
int line_number,
int64_t ticks_since_epoch,
std::string_view message,
ByteSpan encode_buffer);
+// Encodes tokenized message and metadata, with a timestamp as a log proto.
+// Extra fields can be encoded into the returned encoder. The caller must check
+// the encoder status.
+LogEntry::MemoryEncoder CreateEncoderAndEncodeTokenizedLog(
+ log_tokenized::Metadata metadata,
+ ConstByteSpan tokenized_data,
+ int64_t ticks_since_epoch,
+ ByteSpan encode_buffer);
+
// Convenience functions to convert from tokenized metadata to the log proto
// format.
//
@@ -66,10 +78,16 @@ Result<ConstByteSpan> EncodeLog(int level,
// OK - A byte span containing the encoded log proto.
// RESOURCE_EXHAUSTED - The provided buffer was not large enough to store the
// proto.
-Result<ConstByteSpan> EncodeTokenizedLog(log_tokenized::Metadata metadata,
- ConstByteSpan tokenized_data,
- int64_t ticks_since_epoch,
- ByteSpan encode_buffer);
+inline Result<ConstByteSpan> EncodeTokenizedLog(
+ log_tokenized::Metadata metadata,
+ ConstByteSpan tokenized_data,
+ int64_t ticks_since_epoch,
+ ByteSpan encode_buffer) {
+ LogEntry::MemoryEncoder encoder = CreateEncoderAndEncodeTokenizedLog(
+ metadata, tokenized_data, ticks_since_epoch, encode_buffer);
+ PW_TRY(encoder.status());
+ return ConstByteSpan(encoder);
+}
inline Result<ConstByteSpan> EncodeTokenizedLog(
log_tokenized::Metadata metadata,
@@ -84,4 +102,51 @@ inline Result<ConstByteSpan> EncodeTokenizedLog(
encode_buffer);
}
+// Encodes tokenized message (passed as pointer and size), tokenized metadata,
+// timestamp, and thread name as a log proto.
+//
+// Returns:
+// OK - A byte span containing the encoded log proto.
+// RESOURCE_EXHAUSTED - The provided buffer was not large enough to store the
+// proto.
+inline Result<ConstByteSpan> EncodeTokenizedLog(
+ log_tokenized::Metadata metadata,
+ const uint8_t* tokenized_data,
+ size_t tokenized_data_size,
+ int64_t ticks_since_epoch,
+ ConstByteSpan thread_name,
+ ByteSpan encode_buffer) {
+ LogEntry::MemoryEncoder encoder = CreateEncoderAndEncodeTokenizedLog(
+ metadata,
+ std::as_bytes(std::span(tokenized_data, tokenized_data_size)),
+ ticks_since_epoch,
+ encode_buffer);
+ if (!thread_name.empty()) {
+ encoder.WriteThread(thread_name).IgnoreError();
+ }
+ PW_TRY(encoder.status());
+ return ConstByteSpan(encoder);
+}
+
+// Encodes tokenized message (passed as a byte span), tokenized metadata,
+// timestamp, and thread name as a log proto.
+//
+// Returns:
+// OK - A byte span containing the encoded log proto.
+// RESOURCE_EXHAUSTED - The provided buffer was not large enough to store the
+// proto.
+inline Result<ConstByteSpan> EncodeTokenizedLog(
+ log_tokenized::Metadata metadata,
+ ConstByteSpan tokenized_data,
+ int64_t ticks_since_epoch,
+ ConstByteSpan thread_name,
+ ByteSpan encode_buffer) {
+ LogEntry::MemoryEncoder encoder = CreateEncoderAndEncodeTokenizedLog(
+ metadata, tokenized_data, ticks_since_epoch, encode_buffer);
+ if (!thread_name.empty()) {
+ encoder.WriteThread(thread_name).IgnoreError();
+ }
+ PW_TRY(encoder.status());
+ return ConstByteSpan(encoder);
+}
} // namespace pw::log
diff --git a/pw_log_android/Android.bp b/pw_log_android/Android.bp
new file mode 100644
index 000000000..179249e1a
--- /dev/null
+++ b/pw_log_android/Android.bp
@@ -0,0 +1,29 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+cc_library {
+ name: "libpw_log_android",
+ vendor_available: true,
+ cpp_std: "c++2a",
+ export_include_dirs: [
+ "public",
+ "public_overrides",
+ ],
+ export_static_lib_headers: [
+ "libpw_log",
+ ],
+ static_libs: [
+ "libpw_log",
+ ],
+}
diff --git a/pw_log_android/BUILD.gn b/pw_log_android/BUILD.gn
new file mode 100644
index 000000000..8316fd84a
--- /dev/null
+++ b/pw_log_android/BUILD.gn
@@ -0,0 +1,15 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# Android only uses Soong Blueprints, so this file is empty.
diff --git a/pw_log_android/public/pw_log_android/log_android.h b/pw_log_android/public/pw_log_android/log_android.h
new file mode 100644
index 000000000..410f149e6
--- /dev/null
+++ b/pw_log_android/public/pw_log_android/log_android.h
@@ -0,0 +1,43 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <log/log.h>
+
+// This backend supports PW_LOG_MODULE_NAME as a fallback for Android logging's
+// LOG_TAG if and only if LOG_TAG is not already set.
+#if !defined(LOG_TAG) && defined(PW_LOG_MODULE_NAME)
+#define LOG_TAG PW_LOG_MODULE_NAME
+#endif
+
+// #define PW_LOG_LEVEL_DEBUG 1
+#define _PW_LOG_ANDROID_LEVEL_1(...) ALOGD(__VA_ARGS__)
+
+// #define PW_LOG_LEVEL_INFO 2
+#define _PW_LOG_ANDROID_LEVEL_2(...) ALOGI(__VA_ARGS__)
+
+// #define PW_LOG_LEVEL_WARN 3
+#define _PW_LOG_ANDROID_LEVEL_3(...) ALOGW(__VA_ARGS__)
+
+// #define PW_LOG_LEVEL_ERROR 4
+#define _PW_LOG_ANDROID_LEVEL_4(...) ALOGE(__VA_ARGS__)
+
+// #define PW_LOG_LEVEL_CRITICAL 5
+#define _PW_LOG_ANDROID_LEVEL_5(...) ALOGE(__VA_ARGS__)
+
+// #define PW_LOG_LEVEL_FATAL 7
+#define _PW_LOG_ANDROID_LEVEL_7(...) LOG_ALWAYS_FATAL(__VA_ARGS__)
+
+#define PW_HANDLE_LOG(level, flags, ...) \
+ _PW_LOG_ANDROID_LEVEL_##level(__VA_ARGS__)
diff --git a/pw_log_android/public_overrides/pw_log_backend/log_backend.h b/pw_log_android/public_overrides/pw_log_backend/log_backend.h
new file mode 100644
index 000000000..dab27b48d
--- /dev/null
+++ b/pw_log_android/public_overrides/pw_log_backend/log_backend.h
@@ -0,0 +1,16 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_log_android/log_android.h"
diff --git a/pw_log_rpc/BUILD.bazel b/pw_log_rpc/BUILD.bazel
index 78031bff2..dab90830d 100644
--- a/pw_log_rpc/BUILD.bazel
+++ b/pw_log_rpc/BUILD.bazel
@@ -34,8 +34,8 @@ pw_cc_library(
":log_filter",
":rpc_log_drain",
"//pw_log",
- "//pw_log:log_pwpb",
- "//pw_log:protos.raw_rpc",
+ "//pw_log:log_proto_cc.pwpb",
+ "//pw_log:log_proto_cc.raw_rpc",
],
)
@@ -47,8 +47,8 @@ pw_cc_library(
deps = [
":log_filter",
"//pw_log",
- "//pw_log:log_pwpb",
- "//pw_log:protos.raw_rpc",
+ "//pw_log:log_proto_cc.pwpb",
+ "//pw_log:log_proto_cc.raw_rpc",
"//pw_protobuf",
"//pw_protobuf:bytes_utils",
],
@@ -56,20 +56,21 @@ pw_cc_library(
pw_cc_library(
name = "log_filter",
- srcs = ["log_filter.cc"],
+ srcs = [
+ "log_filter.cc",
+ "public/pw_log_rpc/internal/config.h",
+ ],
hdrs = [
"public/pw_log_rpc/log_filter.h",
"public/pw_log_rpc/log_filter_map.h",
],
includes = ["public"],
deps = [
- "public/pw_log_rpc/internal/config.h",
"//pw_assert",
"//pw_bytes",
"//pw_containers:vector",
"//pw_log",
- "//pw_log:log_pwpb",
- "//pw_log:protos.pwpb",
+ "//pw_log:log_proto_cc.pwpb",
"//pw_protobuf",
"//pw_status",
],
@@ -77,7 +78,10 @@ pw_cc_library(
pw_cc_library(
name = "rpc_log_drain",
- srcs = ["rpc_log_drain.cc"],
+ srcs = [
+ "public/pw_log_rpc/internal/config.h",
+ "rpc_log_drain.cc",
+ ],
hdrs = [
"public/pw_log_rpc/rpc_log_drain.h",
"public/pw_log_rpc/rpc_log_drain_map.h",
@@ -86,8 +90,9 @@ pw_cc_library(
deps = [
":log_filter",
"//pw_assert",
- "//pw_log:log_pwpb",
- "//pw_log:protos.raw_rpc",
+ "//pw_function",
+ "//pw_log:log_proto_cc.pwpb",
+ "//pw_log:log_proto_cc.raw_rpc",
"//pw_multisink",
"//pw_protobuf",
"//pw_result",
@@ -139,9 +144,9 @@ pw_cc_test(
":test_utils",
"//pw_containers:vector",
"//pw_log",
- "//pw_log:log_pwpb",
+ "//pw_log:log_proto_cc.pwpb",
"//pw_log:proto_utils",
- "//pw_log_tokenized:metadata",
+ "//pw_log_tokenized:headers",
"//pw_protobuf",
"//pw_protobuf:bytes_utils",
"//pw_result",
@@ -157,7 +162,7 @@ pw_cc_test(
deps = [
":log_filter",
":log_filter_service",
- "//pw_log:log_pwpb",
+ "//pw_log:log_proto_cc.pwpb",
"//pw_protobuf",
"//pw_protobuf:bytes_utils",
"//pw_result",
@@ -171,9 +176,9 @@ pw_cc_test(
srcs = ["log_filter_test.cc"],
deps = [
":log_filter",
- "//pw_log:log_pwpb",
+ "//pw_log:log_proto_cc.pwpb",
"//pw_log:proto_utils",
- "//pw_log_tokenized:metadata",
+ "//pw_log_tokenized:headers",
"//pw_result",
"//pw_status",
"//pw_unit_test",
diff --git a/pw_log_rpc/BUILD.gn b/pw_log_rpc/BUILD.gn
index 79486fd5c..bfe2f6adf 100644
--- a/pw_log_rpc/BUILD.gn
+++ b/pw_log_rpc/BUILD.gn
@@ -99,9 +99,11 @@ pw_source_set("rpc_log_drain") {
]
sources = [ "rpc_log_drain.cc" ]
public_deps = [
+ ":config",
":log_filter",
"$dir_pw_assert",
"$dir_pw_chrono:system_clock",
+ "$dir_pw_function",
"$dir_pw_log:protos.pwpb",
"$dir_pw_log:protos.raw_rpc",
"$dir_pw_multisink",
diff --git a/pw_log_rpc/OWNERS b/pw_log_rpc/OWNERS
index 601cb2994..df7095dd1 100644
--- a/pw_log_rpc/OWNERS
+++ b/pw_log_rpc/OWNERS
@@ -1,3 +1,4 @@
+cachinchilla@google.com
frolv@google.com
hepler@google.com
keir@google.com
diff --git a/pw_log_rpc/docs.rst b/pw_log_rpc/docs.rst
index c34b9bd9f..cf98d85e1 100644
--- a/pw_log_rpc/docs.rst
+++ b/pw_log_rpc/docs.rst
@@ -47,7 +47,9 @@ log should be kept or dropped. This callback can be ``Filter::ShouldDropLog``.
Depending on the product's requirements, create a thread to flush all
``RpcLogDrain``\s or one thread per drain. The thread(s) must continuously call
``RpcLogDrain::Flush()`` to pull entries from the ``MultiSink`` and send them to
-the log listeners.
+the log listeners. Alternatively, use ``RpcLogDrain::Trickle`` to control the
+rate of log entries streamed. Optionally, set up a callback to notify the
+thread(s) when a drain is open.
Logging over RPC diagrams
=========================
@@ -190,6 +192,9 @@ the output buffers if they don't have sufficient headroom.
Calling ``OpenUnrequestedLogStream()`` is a convenient way to set up a log
stream that is started without the need to receive an RCP request for logs.
+The ``RpcLogDrainThread`` sets up a callback for each drain, to be notified when
+a drain is opened and flushing must resume.
+
---------
Log Drops
---------
@@ -200,21 +205,50 @@ possible. Logs can be dropped when
- They don't pass a filter. This is the expected behavior, so filtered logs will
not be tracked as dropped logs.
- The drains are too slow to keep up. In this case, the ring buffer is full of
- undrained entries; when new logs come in, old entries are dropped. [#f1]_
+ undrained entries; when new logs come in, old entries are dropped. The log
+ stream will contain a ``LogEntry`` message with the number of dropped logs.
+ E.g.
+
+ Dropped 15 logs due to slow reader
+
- There is an error creating or adding a new log entry, and the ring buffer is
- notified that the log had to be dropped. [#f1]_
-- A log entry is too large for the outbound buffer. [#f2]_
-- There are detected errors transmitting log entries. [#f2]_
+ notified that the log had to be dropped. The log stream will contain a
+ ``LogEntry`` message with the number of dropped logs.
+ E.g.
+
+ Dropped 15 logs due to slow reader
+
+- A log entry is too large for the stack buffer. The log stream will contain
+ an error message with the drop count. Provide a log buffer that fits the
+ largest entry added to the MultiSink to avoid this error.
+ E.g.
+
+ Dropped 1 log due to stack buffer too small
+
+- A log entry is too large for the outbound buffer. The log stream will contain
+ an error message with the drop count. Provide a log buffer that fits the
+ largest entry added to the MultiSink to avoid this error.
+ E.g.
+
+ Dropped 1 log due to outbound buffer too small
+
+- There are detected errors transmitting log entries. The log stream will
+ contain a ``LogEntry`` with an error message and the number of dropped logs
+ the next time the stream is flushed only if the drain's error handling is set
+ to close the stream on error.
+ E.g.
+
+ Dropped 10 logs due to writer error
+
- There are undetected errors transmitting or receiving log entries, such as an
- interface interruption. [#f3]_
-
-.. [#f1] The log stream will contain a ``LogEntry`` message with the number of
- dropped logs.
-.. [#f2] The log stream will contain a ``LogEntry`` message with the number of
- dropped logs the next time the stream is flushed only if the drain's
- error handling is set to close the stream on error.
-.. [#f3] Clients can calculate the number of logs lost in transit using the
- sequence ID and number of entries in each stream packet.
+ interface interruption. Clients can calculate the number of logs lost in
+ transit using the sequence ID and number of entries in each stream packet.
+ E.g.
+
+ Dropped 50 logs due to transmission error
+
+The drop count is combined when possible, and reported only when an entry, that
+passes any filters, is going to be sent.
-------------
Log Filtering
diff --git a/pw_log_rpc/log_filter_service.cc b/pw_log_rpc/log_filter_service.cc
index b56f3968e..65e397411 100644
--- a/pw_log_rpc/log_filter_service.cc
+++ b/pw_log_rpc/log_filter_service.cc
@@ -19,33 +19,34 @@
#include "pw_protobuf/decoder.h"
namespace pw::log_rpc {
-StatusWithSize FilterService::SetFilter(ConstByteSpan request, ByteSpan) {
+
+Status FilterService::SetFilterImpl(ConstByteSpan request) {
protobuf::Decoder decoder(request);
- PW_TRY_WITH_SIZE(decoder.Next());
+ PW_TRY(decoder.Next());
if (static_cast<log::SetFilterRequest::Fields>(decoder.FieldNumber()) !=
log::SetFilterRequest::Fields::FILTER_ID) {
- return StatusWithSize::InvalidArgument();
+ return Status::InvalidArgument();
}
ConstByteSpan filter_id;
- PW_TRY_WITH_SIZE(decoder.ReadBytes(&filter_id));
+ PW_TRY(decoder.ReadBytes(&filter_id));
Result<Filter*> filter = filter_map_.GetFilterFromId(filter_id);
if (!filter.ok()) {
- return StatusWithSize::NotFound();
+ return Status::NotFound();
}
- PW_TRY_WITH_SIZE(decoder.Next());
+ PW_TRY(decoder.Next());
ConstByteSpan filter_buffer;
if (static_cast<log::SetFilterRequest::Fields>(decoder.FieldNumber()) !=
log::SetFilterRequest::Fields::FILTER) {
- return StatusWithSize::InvalidArgument();
+ return Status::InvalidArgument();
}
- PW_TRY_WITH_SIZE(decoder.ReadBytes(&filter_buffer));
- PW_TRY_WITH_SIZE(filter.value()->UpdateRulesFromProto(filter_buffer));
- return StatusWithSize();
+ PW_TRY(decoder.ReadBytes(&filter_buffer));
+
+ return filter.value()->UpdateRulesFromProto(filter_buffer);
}
-StatusWithSize FilterService::GetFilter(ConstByteSpan request,
- ByteSpan response) {
+StatusWithSize FilterService::GetFilterImpl(ConstByteSpan request,
+ ByteSpan response) {
protobuf::Decoder decoder(request);
PW_TRY_WITH_SIZE(decoder.Next());
if (static_cast<log::GetFilterRequest::Fields>(decoder.FieldNumber()) !=
@@ -75,7 +76,7 @@ StatusWithSize FilterService::GetFilter(ConstByteSpan request,
return StatusWithSize(encoder.size());
}
-StatusWithSize FilterService::ListFilterIds(ConstByteSpan, ByteSpan response) {
+StatusWithSize FilterService::ListFilterIdsImpl(ByteSpan response) {
log::FilterIdListResponse::MemoryEncoder encoder(response);
for (auto& filter : filter_map_.filters()) {
PW_TRY_WITH_SIZE(encoder.WriteFilterId(filter.id()));
diff --git a/pw_log_rpc/log_filter_service_test.cc b/pw_log_rpc/log_filter_service_test.cc
index c42777974..d0d9afc24 100644
--- a/pw_log_rpc/log_filter_service_test.cc
+++ b/pw_log_rpc/log_filter_service_test.cc
@@ -60,6 +60,7 @@ TEST_F(FilterServiceTest, GetFilterIds) {
PW_RAW_TEST_METHOD_CONTEXT(FilterService, ListFilterIds, 1)
context(filter_map_);
context.call({});
+ ASSERT_EQ(OkStatus(), context.status());
ASSERT_TRUE(context.done());
ASSERT_EQ(context.responses().size(), 1u);
protobuf::Decoder decoder(context.responses()[0]);
@@ -80,6 +81,7 @@ TEST_F(FilterServiceTest, GetFilterIds) {
PW_RAW_TEST_METHOD_CONTEXT(FilterService, ListFilterIds, 1)
no_filter_context(empty_filter_map);
no_filter_context.call({});
+ ASSERT_EQ(OkStatus(), no_filter_context.status());
ASSERT_TRUE(no_filter_context.done());
ASSERT_EQ(no_filter_context.responses().size(), 1u);
protobuf::Decoder no_filter_decoder(no_filter_context.responses()[0]);
@@ -169,6 +171,7 @@ TEST_F(FilterServiceTest, SetFilterRules) {
PW_RAW_TEST_METHOD_CONTEXT(FilterService, SetFilter, 1)
context(filter_map_);
context.call(request.value());
+ ASSERT_EQ(OkStatus(), context.status());
size_t i = 0;
for (const auto& rule : filters_[0].rules()) {
@@ -214,6 +217,7 @@ TEST_F(FilterServiceTest, SetFilterRulesWhenUsedByDrain) {
PW_RAW_TEST_METHOD_CONTEXT(FilterService, SetFilter, 1)
context(filter_map_);
context.call(request.value());
+ ASSERT_EQ(OkStatus(), context.status());
size_t i = 0;
for (const auto& rule : filter.rules()) {
@@ -224,6 +228,8 @@ TEST_F(FilterServiceTest, SetFilterRulesWhenUsedByDrain) {
PW_RAW_TEST_METHOD_CONTEXT(FilterService, SetFilter, 1)
context_no_filter(filter_map_);
context_no_filter.call({});
+ EXPECT_EQ(Status::OutOfRange(), context_no_filter.status());
+
i = 0;
for (const auto& rule : filter.rules()) {
VerifyRule(rule, new_filter_rules[i++]);
@@ -267,6 +273,7 @@ TEST_F(FilterServiceTest, SetFilterRulesWhenUsedByDrain) {
PW_RAW_TEST_METHOD_CONTEXT(FilterService, SetFilter, 1)
context_new_filter(filter_map_);
context_new_filter.call(second_filter_request.value());
+ ASSERT_EQ(OkStatus(), context.status());
i = 0;
for (const auto& rule : filter.rules()) {
@@ -331,9 +338,10 @@ TEST_F(FilterServiceTest, GetFilterRules) {
std::byte request_buffer[64];
log::GetFilterRequest::MemoryEncoder encoder(request_buffer);
- encoder.WriteFilterId(filter_id1_);
+ ASSERT_EQ(OkStatus(), encoder.WriteFilterId(filter_id1_));
const auto request = ConstByteSpan(encoder);
context.call(request);
+ ASSERT_EQ(OkStatus(), context.status());
ASSERT_TRUE(context.done());
ASSERT_EQ(context.responses().size(), 1u);
@@ -354,6 +362,7 @@ TEST_F(FilterServiceTest, GetFilterRules) {
PW_RAW_TEST_METHOD_CONTEXT(FilterService, GetFilter, 1)
context2(filter_map_);
context2.call(request);
+ ASSERT_EQ(OkStatus(), context2.status());
ASSERT_EQ(context2.responses().size(), 1u);
protobuf::Decoder decoder2(context2.responses()[0]);
VerifyFilterRules(decoder2, rules1_);
@@ -369,6 +378,7 @@ TEST_F(FilterServiceTest, GetFilterRules) {
PW_RAW_TEST_METHOD_CONTEXT(FilterService, GetFilter, 1)
context3(filter_map_);
context3.call(request);
+ ASSERT_EQ(OkStatus(), context3.status());
ASSERT_EQ(context3.responses().size(), 1u);
protobuf::Decoder decoder3(context3.responses()[0]);
VerifyFilterRules(decoder3, rules1_);
diff --git a/pw_log_rpc/log_filter_test.cc b/pw_log_rpc/log_filter_test.cc
index fa412158e..be1fcdd20 100644
--- a/pw_log_rpc/log_filter_test.cc
+++ b/pw_log_rpc/log_filter_test.cc
@@ -47,6 +47,7 @@ Result<ConstByteSpan> EncodeLogEntry(std::string_view message,
return log::EncodeTokenizedLog(metadata,
std::as_bytes(std::span(message)),
/*ticks_since_epoch=*/0,
+ /*thread_name=*/{},
buffer);
}
diff --git a/pw_log_rpc/log_service_test.cc b/pw_log_rpc/log_service_test.cc
index dc8fe44ac..1072ec55b 100644
--- a/pw_log_rpc/log_service_test.cc
+++ b/pw_log_rpc/log_service_test.cc
@@ -46,7 +46,7 @@ using log::pw_rpc::raw::Logs;
constexpr size_t kMaxMessageSize = 50;
constexpr size_t kMaxLogEntrySize =
- RpcLogDrain::kMinEntrySizeWithoutPayload + kMaxMessageSize;
+ RpcLogDrain::kMinEntryBufferSize + kMaxMessageSize;
static_assert(RpcLogDrain::kMinEntryBufferSize < kMaxLogEntrySize);
constexpr size_t kMultiSinkBufferSize = kMaxLogEntrySize * 10;
constexpr size_t kMaxDrains = 3;
@@ -57,7 +57,8 @@ constexpr char kMessage[] = "message";
constexpr char kLongMessage[] =
"This is a long log message that will be dropped.";
static_assert(sizeof(kLongMessage) < kMaxMessageSize);
-static_assert(sizeof(kLongMessage) > RpcLogDrain::kMinEntryBufferSize);
+static_assert(sizeof(kLongMessage) + RpcLogDrain::kMinEntrySizeWithoutPayload >
+ RpcLogDrain::kMinEntryBufferSize);
std::array<std::byte, 1> rpc_request_buffer;
constexpr auto kSampleMetadata =
log_tokenized::Metadata::Set<PW_LOG_LEVEL_INFO, 123, 0x03, __LINE__>();
@@ -94,6 +95,7 @@ class LogServiceTest : public ::testing::Test {
log::EncodeTokenizedLog(metadata,
std::as_bytes(std::span(message)),
timestamp,
+ /*thread_name=*/{},
entry_encode_buffer_);
PW_TRY_WITH_SIZE(encoded_log_result.status());
multisink_.HandleEntry(encoded_log_result.value());
@@ -209,7 +211,7 @@ TEST_F(LogServiceTest, StartAndEndStream) {
// Not done until the stream is finished.
ASSERT_FALSE(context.done());
- active_drain.Close();
+ EXPECT_EQ(OkStatus(), active_drain.Close());
ASSERT_TRUE(context.done());
EXPECT_EQ(context.status(), OkStatus());
@@ -217,19 +219,22 @@ TEST_F(LogServiceTest, StartAndEndStream) {
EXPECT_GE(context.responses().size(), 1u);
// Verify data in responses.
- Vector<TestLogEntry, total_entries> message_stack;
+ Vector<TestLogEntry, total_entries> expected_messages;
for (size_t i = 0; i < total_entries; ++i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ expected_messages.push_back({.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(kMessage)))});
}
size_t entries_found = 0;
uint32_t drop_count_found = 0;
for (auto& response : context.responses()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
EXPECT_EQ(entries_found, total_entries);
EXPECT_EQ(drop_count_found, 0u);
@@ -243,28 +248,44 @@ TEST_F(LogServiceTest, HandleDropped) {
// Add log entries.
const size_t total_entries = 5;
+ const size_t entries_before_drop = 1;
const uint32_t total_drop_count = 2;
- AddLogEntries(total_entries, kMessage, kSampleMetadata, kSampleTimestamp);
+
+ // Force a drop entry in between entries.
+ AddLogEntries(
+ entries_before_drop, kMessage, kSampleMetadata, kSampleTimestamp);
multisink_.HandleDropped(total_drop_count);
+ AddLogEntries(total_entries - entries_before_drop,
+ kMessage,
+ kSampleMetadata,
+ kSampleTimestamp);
// Request logs.
context.call(rpc_request_buffer);
EXPECT_EQ(active_drain.Flush(encoding_buffer_), OkStatus());
- active_drain.Close();
+ EXPECT_EQ(OkStatus(), active_drain.Close());
ASSERT_EQ(context.status(), OkStatus());
// There is at least 1 response with multiple log entries packed.
ASSERT_GE(context.responses().size(), 1u);
- // Add create expected messages in a stack to match the order they arrive
- // in.
- Vector<TestLogEntry, total_entries + 1> message_stack;
- message_stack.push_back(
- {.metadata = kDropMessageMetadata, .dropped = total_drop_count});
- for (size_t i = 0; i < total_entries; ++i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ Vector<TestLogEntry, total_entries + 1> expected_messages;
+ size_t i = 0;
+ for (; i < entries_before_drop; ++i) {
+ expected_messages.push_back({.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(kMessage)))});
+ }
+ expected_messages.push_back(
+ {.metadata = kDropMessageMetadata,
+ .dropped = total_drop_count,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(RpcLogDrain::kIngressErrorMessage)))});
+ for (; i < total_entries; ++i) {
+ expected_messages.push_back({.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(kMessage)))});
}
// Verify data in responses.
@@ -272,45 +293,123 @@ TEST_F(LogServiceTest, HandleDropped) {
uint32_t drop_count_found = 0;
for (auto& response : context.responses()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
EXPECT_EQ(entries_found, total_entries);
EXPECT_EQ(drop_count_found, total_drop_count);
}
-TEST_F(LogServiceTest, HandleSmallBuffer) {
+TEST_F(LogServiceTest, HandleDroppedBetweenFilteredOutLogs) {
+ RpcLogDrain& active_drain = drains_[0];
+ const uint32_t drain_channel_id = active_drain.channel_id();
+ LOG_SERVICE_METHOD_CONTEXT context(drain_map_);
+ context.set_channel_id(drain_channel_id);
+ // Set filter to drop INFO+ and keep DEBUG logs
+ rules1_[0].action = Filter::Rule::Action::kDrop;
+ rules1_[0].level_greater_than_or_equal = log::FilterRule::Level::INFO_LEVEL;
+
+ // Add log entries.
+ const size_t total_entries = 5;
+ const uint32_t total_drop_count = total_entries - 1;
+
+ // Force a drop entry in between entries that will be filtered out.
+ for (size_t i = 1; i < total_entries; ++i) {
+ ASSERT_EQ(
+ OkStatus(),
+ AddLogEntry(kMessage, kSampleMetadata, kSampleTimestamp).status());
+ multisink_.HandleDropped(1);
+ }
+ // Add message that won't be filtered out.
+ constexpr auto metadata =
+ log_tokenized::Metadata::Set<PW_LOG_LEVEL_DEBUG, 0, 0, __LINE__>();
+ ASSERT_EQ(OkStatus(),
+ AddLogEntry(kMessage, metadata, kSampleTimestamp).status());
+
+ // Request logs.
+ context.call(rpc_request_buffer);
+ EXPECT_EQ(active_drain.Flush(encoding_buffer_), OkStatus());
+ EXPECT_EQ(OkStatus(), active_drain.Close());
+ ASSERT_EQ(context.status(), OkStatus());
+ // There is at least 1 response with multiple log entries packed.
+ ASSERT_GE(context.responses().size(), 1u);
+
+ Vector<TestLogEntry, 2> expected_messages;
+ expected_messages.push_back(
+ {.metadata = kDropMessageMetadata,
+ .dropped = total_drop_count,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(RpcLogDrain::kIngressErrorMessage)))});
+ expected_messages.push_back(
+ {.metadata = metadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))});
+
+ // Verify data in responses.
+ size_t entries_found = 0;
+ uint32_t drop_count_found = 0;
+ for (auto& response : context.responses()) {
+ protobuf::Decoder entry_decoder(response);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
+ }
+ EXPECT_EQ(entries_found, 1u);
+ EXPECT_EQ(drop_count_found, total_drop_count);
+}
+
+TEST_F(LogServiceTest, HandleSmallLogEntryBuffer) {
LOG_SERVICE_METHOD_CONTEXT context(drain_map_);
context.set_channel_id(kSmallBufferDrainId);
auto small_buffer_drain =
drain_map_.GetDrainFromChannelId(kSmallBufferDrainId);
ASSERT_TRUE(small_buffer_drain.ok());
- // Add log entries.
+ // Add long entries that don't fit the drain's log entry buffer, except for
+ // one, since drop count messages are only sent when a log entry can be sent.
const size_t total_entries = 5;
- const uint32_t total_drop_count = total_entries;
- AddLogEntries(total_entries, kLongMessage, kSampleMetadata, kSampleTimestamp);
+ const uint32_t total_drop_count = total_entries - 1;
+ AddLogEntries(
+ total_drop_count, kLongMessage, kSampleMetadata, kSampleTimestamp);
+ EXPECT_EQ(OkStatus(),
+ AddLogEntry(kMessage, kSampleMetadata, kSampleTimestamp).status());
+
// Request logs.
context.call(rpc_request_buffer);
EXPECT_EQ(small_buffer_drain.value()->Flush(encoding_buffer_), OkStatus());
EXPECT_EQ(small_buffer_drain.value()->Close(), OkStatus());
ASSERT_EQ(context.status(), OkStatus());
- ASSERT_GE(context.responses().size(), 1u);
+ ASSERT_EQ(context.responses().size(), 1u);
- Vector<TestLogEntry, total_entries + 1> message_stack;
- message_stack.push_back(
- {.metadata = kDropMessageMetadata, .dropped = total_drop_count});
+ Vector<TestLogEntry, total_entries + 1> expected_messages;
+ expected_messages.push_back(
+ {.metadata = kDropMessageMetadata,
+ .dropped = total_drop_count,
+ .tokenized_data = std::as_bytes(std::span(
+ std::string_view(RpcLogDrain::kSmallStackBufferErrorMessage)))});
+ expected_messages.push_back(
+ {.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))});
- // Verify data in responses.
+ // Expect one drop message with the total drop count, and the only message
+ // that fits the buffer.
size_t entries_found = 0;
uint32_t drop_count_found = 0;
for (auto& response : context.responses()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
- // No messages fit the buffer, expect a drop message.
- EXPECT_EQ(entries_found, 0u);
+ EXPECT_EQ(entries_found, 1u);
EXPECT_EQ(drop_count_found, total_drop_count);
}
@@ -366,7 +465,7 @@ TEST_F(LogServiceTest, LargeLogEntry) {
context.set_channel_id(drain_channel_id);
context.call(rpc_request_buffer);
ASSERT_EQ(active_drain.Flush(encoding_buffer_), OkStatus());
- active_drain.Close();
+ EXPECT_EQ(OkStatus(), active_drain.Close());
ASSERT_EQ(context.status(), OkStatus());
ASSERT_EQ(context.responses().size(), 1u);
@@ -423,19 +522,22 @@ TEST_F(LogServiceTest, InterruptedLogStreamSendsDropCount) {
ASSERT_EQ(output.payloads<Logs::Listen>().size(), successful_packets_sent);
// Verify data in responses.
- Vector<TestLogEntry, max_entries> message_stack;
+ Vector<TestLogEntry, max_entries> expected_messages;
for (size_t i = 0; i < total_entries; ++i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ expected_messages.push_back({.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(kMessage)))});
}
size_t entries_found = 0;
uint32_t drop_count_found = 0;
for (auto& response : output.payloads<Logs::Listen>()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
// Verify that not all the entries were sent.
@@ -451,30 +553,37 @@ TEST_F(LogServiceTest, InterruptedLogStreamSendsDropCount) {
EXPECT_EQ(drain.value()->Open(writer), OkStatus());
EXPECT_EQ(drain.value()->Flush(encoding_buffer_), OkStatus());
- // Add expected messages to the stack in the reverse order they are
- // received.
- message_stack.clear();
// One full packet was dropped. Since all messages are the same length,
// there are entries_found / successful_packets_sent per packet.
const uint32_t total_drop_count = entries_found / successful_packets_sent;
+ Vector<TestLogEntry, max_entries> expected_messages_after_reset;
+ expected_messages_after_reset.push_back(
+ {.metadata = kDropMessageMetadata,
+ .dropped = total_drop_count,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(RpcLogDrain::kWriterErrorMessage)))});
+
const uint32_t remaining_entries = total_entries - total_drop_count;
for (size_t i = 0; i < remaining_entries; ++i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ expected_messages_after_reset.push_back(
+ {.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data =
+ std::as_bytes(std::span(std::string_view(kMessage)))});
}
- message_stack.push_back(
- {.metadata = kDropMessageMetadata, .dropped = total_drop_count});
+ size_t entries_found_after_reset = 0;
for (auto& response : output.payloads<Logs::Listen>()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(entry_decoder,
- message_stack,
- entries_found + total_drop_count,
- drop_count_found);
+ uint32_t expected_sequence_id =
+ entries_found + entries_found_after_reset + total_drop_count;
+ VerifyLogEntries(entry_decoder,
+ expected_messages_after_reset,
+ expected_sequence_id,
+ entries_found_after_reset,
+ drop_count_found);
}
- EXPECT_EQ(entries_found, remaining_entries);
+ EXPECT_EQ(entries_found + entries_found_after_reset, remaining_entries);
EXPECT_EQ(drop_count_found, total_drop_count);
}
@@ -529,12 +638,12 @@ TEST_F(LogServiceTest, InterruptedLogStreamIgnoresErrors) {
// Verify that all messages were sent.
const uint32_t total_drop_count = total_entries - entries_found;
- Vector<TestLogEntry, max_entries> message_stack;
+ Vector<TestLogEntry, max_entries> expected_messages;
for (size_t i = 0; i < entries_found; ++i) {
- message_stack.push_back({.metadata = kSampleMetadata,
- .timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(
- std::span(std::string_view(kMessage)))});
+ expected_messages.push_back({.metadata = kSampleMetadata,
+ .timestamp = kSampleTimestamp,
+ .tokenized_data = std::as_bytes(
+ std::span(std::string_view(kMessage)))});
}
entries_found = 0;
@@ -542,15 +651,19 @@ TEST_F(LogServiceTest, InterruptedLogStreamIgnoresErrors) {
uint32_t i = 0;
for (; i < error_on_packet_count; ++i) {
protobuf::Decoder entry_decoder(output.payloads<Logs::Listen>()[i]);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
for (; i < output.payloads<Logs::Listen>().size(); ++i) {
protobuf::Decoder entry_decoder(output.payloads<Logs::Listen>()[i]);
- entries_found += VerifyLogEntries(entry_decoder,
- message_stack,
- entries_found + total_drop_count,
- drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found + total_drop_count,
+ entries_found,
+ drop_count_found);
}
// This drain ignores errors and thus doesn't report drops on its own.
EXPECT_EQ(drop_count_found, 0u);
@@ -595,20 +708,17 @@ TEST_F(LogServiceTest, FilterLogs) {
ASSERT_TRUE(
AddLogEntry(kMessage, different_module_metadata, kSampleTimestamp).ok());
- // Add messages to the stack in the reverse order they are sent.
- Vector<TestLogEntry, 3> message_stack;
- message_stack.push_back(
- {.metadata = error_metadata,
+ Vector<TestLogEntry, 3> expected_messages{
+ {.metadata = info_metadata,
.timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))});
- message_stack.push_back(
+ .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))},
{.metadata = warn_metadata,
.timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))});
- message_stack.push_back(
- {.metadata = info_metadata,
+ .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))},
+ {.metadata = error_metadata,
.timestamp = kSampleTimestamp,
- .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))});
+ .tokenized_data = std::as_bytes(std::span(std::string_view(kMessage)))},
+ };
// Set up filter rules for drain at drains_[1].
RpcLogDrain& drain = drains_[1];
@@ -639,8 +749,11 @@ TEST_F(LogServiceTest, FilterLogs) {
uint32_t drop_count_found = 0;
for (auto& response : context.responses()) {
protobuf::Decoder entry_decoder(response);
- entries_found += VerifyLogEntries(
- entry_decoder, message_stack, entries_found, drop_count_found);
+ VerifyLogEntries(entry_decoder,
+ expected_messages,
+ entries_found,
+ entries_found,
+ drop_count_found);
}
EXPECT_EQ(entries_found, 3u);
EXPECT_EQ(drop_count_found, 0u);
diff --git a/pw_log_rpc/public/pw_log_rpc/internal/config.h b/pw_log_rpc/public/pw_log_rpc/internal/config.h
index 89a051d91..9fbcfbfc7 100644
--- a/pw_log_rpc/public/pw_log_rpc/internal/config.h
+++ b/pw_log_rpc/public/pw_log_rpc/internal/config.h
@@ -43,6 +43,37 @@
#define PW_LOG_RPC_CONFIG_LOG_MODULE_NAME "PW_LOG_RPC"
#endif // PW_LOG_RPC_CONFIG_LOG_MODULE_NAME
+// Messages to descrive the log drop reasons.
+// See https://pigweed.dev/pw_log_rpc/#log-drops
+//
+// Message for when an entry could not be added to the MultiSink.
+#ifndef PW_LOG_RPC_INGRESS_ERROR_MSG
+#define PW_LOG_RPC_INGRESS_ERROR_MSG "Ingress error"
+#endif // PW_LOG_RPC_INGRESS_ERROR_MSG
+
+// Message for when a drain drains too slow and has to be advanced, dropping
+// logs.
+#ifndef PW_LOG_RPC_SLOW_DRAIN_MSG
+#define PW_LOG_RPC_SLOW_DRAIN_MSG "Slow drain"
+#endif // PW_LOG_RPC_SLOW_DRAIN_MSG
+
+// Message for when a is too too large to fit in the outbound buffer, so it is
+// dropped.
+#ifndef PW_LOG_RPC_SMALL_OUTBOUND_BUFFER_MSG
+#define PW_LOG_RPC_SMALL_OUTBOUND_BUFFER_MSG "Outbound log buffer too small"
+#endif // PW_LOG_RPC_SMALL_OUTBOUND_BUFFER_MSG
+
+// Message for when the log entry in the MultiSink is too large to be peeked or
+// popped out, so it is dropped.
+#ifndef PW_LOG_RPC_SMALL_STACK_BUFFER_MSG
+#define PW_LOG_RPC_SMALL_STACK_BUFFER_MSG "Stack log buffer too small"
+#endif // PW_LOG_RPC_SMALL_STACK_BUFFER_MSG
+
+// Message for when a bulk of logs cannot be sent due to a writer error.
+#ifndef PW_LOG_RPC_WRITER_ERROR_MSG
+#define PW_LOG_RPC_WRITER_ERROR_MSG "Writer error"
+#endif // PW_LOG_RPC_WRITER_ERROR_MSG
+
namespace pw::log_rpc::cfg {
inline constexpr size_t kMaxModuleNameBytes =
PW_LOG_RPC_CONFIG_MAX_FILTER_RULE_MODULE_NAME_SIZE;
diff --git a/pw_log_rpc/public/pw_log_rpc/log_filter_service.h b/pw_log_rpc/public/pw_log_rpc/log_filter_service.h
index 88b4fe298..bb074afbd 100644
--- a/pw_log_rpc/public/pw_log_rpc/log_filter_service.h
+++ b/pw_log_rpc/public/pw_log_rpc/log_filter_service.h
@@ -28,15 +28,50 @@ class FilterService final
// Modifies a log filter and its rules. The filter must be registered in the
// provided filter map.
- StatusWithSize SetFilter(ConstByteSpan request, ByteSpan);
+ void SetFilter(ConstByteSpan request, rpc::RawUnaryResponder& responder) {
+ responder.Finish({}, SetFilterImpl(request)).IgnoreError();
+ }
// Retrieves a log filter and its rules. The filter must be registered in the
// provided filter map.
- StatusWithSize GetFilter(ConstByteSpan request, ByteSpan response);
+ void GetFilter(ConstByteSpan request, rpc::RawUnaryResponder& responder) {
+ std::byte buffer[kFilterResponseBufferSize] = {};
+ StatusWithSize result = GetFilterImpl(request, buffer);
+ responder.Finish(std::span(buffer).first(result.size()), result.status())
+ .IgnoreError();
+ }
- StatusWithSize ListFilterIds(ConstByteSpan, ByteSpan response);
+ void ListFilterIds(ConstByteSpan, rpc::RawUnaryResponder& responder) {
+ std::byte buffer[kFilterIdsResponseBufferSize] = {};
+ StatusWithSize result = ListFilterIdsImpl(buffer);
+ responder.Finish(std::span(buffer).first(result.size()), result.status())
+ .IgnoreError();
+ }
private:
+ static constexpr size_t kMinSupportedFilters = 4;
+
+ static constexpr size_t kFilterResponseBufferSize =
+ protobuf::FieldNumberSizeBytes(log::Filter::Fields::RULE) +
+ protobuf::kMaxSizeOfLength +
+ kMinSupportedFilters *
+ (protobuf::SizeOfFieldEnum(
+ log::FilterRule::Fields::LEVEL_GREATER_THAN_OR_EQUAL, 7) +
+ protobuf::SizeOfFieldBytes(log::FilterRule::Fields::MODULE_EQUALS,
+ 6) +
+ protobuf::SizeOfFieldUint32(log::FilterRule::Fields::ANY_FLAGS_SET,
+ 1) +
+ protobuf::SizeOfFieldEnum(log::FilterRule::Fields::ACTION, 2));
+
+ static constexpr size_t kFilterIdsResponseBufferSize =
+ kMinSupportedFilters *
+ protobuf::SizeOfFieldBytes(log::FilterIdListResponse::Fields::FILTER_ID,
+ 4);
+
+ Status SetFilterImpl(ConstByteSpan request);
+ StatusWithSize GetFilterImpl(ConstByteSpan request, ByteSpan response);
+ StatusWithSize ListFilterIdsImpl(ByteSpan response);
+
FilterMap& filter_map_;
};
diff --git a/pw_log_rpc/public/pw_log_rpc/rpc_log_drain.h b/pw_log_rpc/public/pw_log_rpc/rpc_log_drain.h
index 518539f4b..76dd7e372 100644
--- a/pw_log_rpc/public/pw_log_rpc/rpc_log_drain.h
+++ b/pw_log_rpc/public/pw_log_rpc/rpc_log_drain.h
@@ -14,15 +14,19 @@
#pragma once
+#include <algorithm>
#include <array>
#include <cstdint>
#include <limits>
#include <optional>
+#include <string_view>
#include "pw_assert/assert.h"
#include "pw_bytes/span.h"
#include "pw_chrono/system_clock.h"
+#include "pw_function/function.h"
#include "pw_log/proto/log.pwpb.h"
+#include "pw_log_rpc/internal/config.h"
#include "pw_log_rpc/log_filter.h"
#include "pw_multisink/multisink.h"
#include "pw_protobuf/serialized_size.h"
@@ -64,10 +68,32 @@ class RpcLogDrain : public multisink::MultiSink::Drain {
protobuf::SizeOfFieldUint32(log::LogEntry::Fields::LINE_LEVEL) +
protobuf::SizeOfFieldUint32(log::LogEntry::Fields::FLAGS) +
protobuf::SizeOfFieldInt64(log::LogEntry::Fields::TIMESTAMP) +
- protobuf::SizeOfFieldBytes(log::LogEntry::Fields::MODULE, 0);
-
- // The smallest buffer size must be able to fit a typical token size: 4 bytes.
- static constexpr size_t kMinEntryBufferSize = kMinEntrySizeWithoutPayload + 4;
+ protobuf::SizeOfFieldBytes(log::LogEntry::Fields::MODULE, 0) +
+ protobuf::SizeOfFieldBytes(log::LogEntry::Fields::FILE, 0) +
+ protobuf::SizeOfFieldBytes(log::LogEntry::Fields::THREAD, 0);
+
+ // Error messages sent when logs are dropped.
+ static constexpr std::string_view kIngressErrorMessage{
+ PW_LOG_RPC_INGRESS_ERROR_MSG};
+ static constexpr std::string_view kSlowDrainErrorMessage{
+ PW_LOG_RPC_SLOW_DRAIN_MSG};
+ static constexpr std::string_view kSmallOutboundBufferErrorMessage{
+ PW_LOG_RPC_SMALL_OUTBOUND_BUFFER_MSG};
+ static constexpr std::string_view kSmallStackBufferErrorMessage{
+ PW_LOG_RPC_SMALL_STACK_BUFFER_MSG};
+ static constexpr std::string_view kWriterErrorMessage{
+ PW_LOG_RPC_WRITER_ERROR_MSG};
+ // The smallest entry buffer must fit the largest error message, or a typical
+ // token size (4B), whichever is largest.
+ static constexpr size_t kLargestErrorMessageOrTokenSize =
+ std::max({size_t(4),
+ kIngressErrorMessage.size(),
+ kSlowDrainErrorMessage.size(),
+ kSmallOutboundBufferErrorMessage.size(),
+ kSmallStackBufferErrorMessage.size(),
+ kWriterErrorMessage.size()});
+ static constexpr size_t kMinEntryBufferSize =
+ kMinEntrySizeWithoutPayload + sizeof(kLargestErrorMessageOrTokenSize);
// When encoding LogEntry in LogEntries, there are kLogEntriesEncodeFrameSize
// bytes added to the encoded LogEntry. This constant and kMinEntryBufferSize
@@ -96,13 +122,18 @@ class RpcLogDrain : public multisink::MultiSink::Drain {
error_handling_(error_handling),
server_writer_(),
log_entry_buffer_(log_entry_buffer),
- committed_entry_drop_count_(0),
+ drop_count_ingress_error_(0),
+ drop_count_slow_drain_(0),
+ drop_count_small_outbound_buffer_(0),
+ drop_count_small_stack_buffer_(0),
+ drop_count_writer_error_(0),
mutex_(mutex),
filter_(filter),
sequence_id_(0),
max_bundles_per_trickle_(max_bundles_per_trickle),
trickle_delay_(trickle_delay),
- no_writes_until_(chrono::SystemClock::now()) {
+ no_writes_until_(chrono::SystemClock::now()),
+ on_open_callback_(nullptr) {
PW_ASSERT(log_entry_buffer.size_bytes() >= kMinEntryBufferSize);
}
@@ -161,6 +192,13 @@ class RpcLogDrain : public multisink::MultiSink::Drain {
trickle_delay_ = trickle_delay;
}
+ // Stores a function that is called when Open() is successful. Pass nulltpr to
+ // clear it. This is useful in cases where the owner of the drain needs to be
+ // notified that the drain was opened.
+ void set_on_open_callback(pw::Function<void()>&& callback) {
+ on_open_callback_ = std::move(callback);
+ }
+
private:
enum class LogDrainState {
kCaughtUp,
@@ -180,13 +218,18 @@ class RpcLogDrain : public multisink::MultiSink::Drain {
const LogDrainErrorHandling error_handling_;
rpc::RawServerWriter server_writer_ PW_GUARDED_BY(mutex_);
const ByteSpan log_entry_buffer_ PW_GUARDED_BY(mutex_);
- uint32_t committed_entry_drop_count_ PW_GUARDED_BY(mutex_);
+ uint32_t drop_count_ingress_error_ PW_GUARDED_BY(mutex_);
+ uint32_t drop_count_slow_drain_ PW_GUARDED_BY(mutex_);
+ uint32_t drop_count_small_outbound_buffer_ PW_GUARDED_BY(mutex_);
+ uint32_t drop_count_small_stack_buffer_ PW_GUARDED_BY(mutex_);
+ uint32_t drop_count_writer_error_ PW_GUARDED_BY(mutex_);
sync::Mutex& mutex_;
Filter* filter_;
uint32_t sequence_id_;
size_t max_bundles_per_trickle_;
pw::chrono::SystemClock::duration trickle_delay_;
pw::chrono::SystemClock::time_point no_writes_until_;
+ pw::Function<void()> on_open_callback_;
};
} // namespace pw::log_rpc
diff --git a/pw_log_rpc/public/pw_log_rpc/rpc_log_drain_thread.h b/pw_log_rpc/public/pw_log_rpc/rpc_log_drain_thread.h
index fbf6b0c0f..7b8600730 100644
--- a/pw_log_rpc/public/pw_log_rpc/rpc_log_drain_thread.h
+++ b/pw_log_rpc/public/pw_log_rpc/rpc_log_drain_thread.h
@@ -46,13 +46,15 @@ class RpcLogDrainThread : public thread::ThreadCore,
encoding_buffer_(encoding_buffer) {}
void OnNewEntryAvailable() override {
- new_log_available_notification_.release();
+ ready_to_flush_notification_.release();
}
// Sequentially flushes each log stream.
void Run() override {
for (auto& drain : drain_map_.drains()) {
multisink_.AttachDrain(drain);
+ drain.set_on_open_callback(
+ [this]() { this->ready_to_flush_notification_.release(); });
}
multisink_.AttachListener(*this);
@@ -61,9 +63,9 @@ class RpcLogDrainThread : public thread::ThreadCore,
chrono::SystemClock::duration::zero();
while (true) {
if (drains_pending && min_delay.has_value()) {
- new_log_available_notification_.try_acquire_for(min_delay.value());
+ ready_to_flush_notification_.try_acquire_for(min_delay.value());
} else {
- new_log_available_notification_.acquire();
+ ready_to_flush_notification_.acquire();
}
drains_pending = false;
min_delay = std::nullopt;
@@ -93,7 +95,7 @@ class RpcLogDrainThread : public thread::ThreadCore,
}
private:
- sync::TimedThreadNotification new_log_available_notification_;
+ sync::TimedThreadNotification ready_to_flush_notification_;
RpcLogDrainMap& drain_map_;
multisink::MultiSink& multisink_;
std::span<std::byte> encoding_buffer_;
diff --git a/pw_log_rpc/pw_log_rpc_private/test_utils.h b/pw_log_rpc/pw_log_rpc_private/test_utils.h
index bdc901167..f89f229af 100644
--- a/pw_log_rpc/pw_log_rpc_private/test_utils.h
+++ b/pw_log_rpc/pw_log_rpc_private/test_utils.h
@@ -27,6 +27,8 @@ struct TestLogEntry {
int64_t timestamp = 0;
uint32_t dropped = 0;
ConstByteSpan tokenized_data = {};
+ ConstByteSpan file = {};
+ ConstByteSpan thread = {};
};
// Unpacks a `LogEntry` proto buffer to compare it with the expected data and
@@ -35,11 +37,12 @@ void VerifyLogEntry(protobuf::Decoder& entry_decoder,
const TestLogEntry& expected_entry,
uint32_t& drop_count_out);
-// Verifies a stream of log entries and updates the total drop count found.
-size_t VerifyLogEntries(protobuf::Decoder& entries_decoder,
- Vector<TestLogEntry>& expected_entries_stack,
- uint32_t expected_first_entry_sequence_id,
- uint32_t& drop_count_out);
+// Verifies a stream of log entries and updates the total entry and drop counts.
+void VerifyLogEntries(protobuf::Decoder& entries_decoder,
+ const Vector<TestLogEntry>& expected_entries,
+ uint32_t expected_first_entry_sequence_id,
+ size_t& entries_count_out,
+ uint32_t& drop_count_out);
size_t CountLogEntries(protobuf::Decoder& entries_decoder);
diff --git a/pw_log_rpc/rpc_log_drain.cc b/pw_log_rpc/rpc_log_drain.cc
index 05884f404..fe12ba0c8 100644
--- a/pw_log_rpc/rpc_log_drain.cc
+++ b/pw_log_rpc/rpc_log_drain.cc
@@ -17,6 +17,8 @@
#include <limits>
#include <mutex>
#include <optional>
+#include <span>
+#include <string_view>
#include "pw_assert/check.h"
#include "pw_chrono/system_clock.h"
@@ -29,14 +31,29 @@
namespace pw::log_rpc {
namespace {
-// Creates an encoded drop message on the provided buffer.
-Result<ConstByteSpan> CreateEncodedDropMessage(
- uint32_t drop_count, ByteSpan encoded_drop_message_buffer) {
- // Encode message in protobuf.
+// Creates an encoded drop message on the provided buffer and adds it to the
+// bulk log entries. Resets the drop count when successfull.
+void TryEncodeDropMessage(ByteSpan encoded_drop_message_buffer,
+ std::string_view reason,
+ uint32_t& drop_count,
+ log::LogEntries::MemoryEncoder& entries_encoder) {
+ // Encode drop count and reason, if any, in log proto.
log::LogEntry::MemoryEncoder encoder(encoded_drop_message_buffer);
- encoder.WriteDropped(drop_count);
- PW_TRY(encoder.status());
- return ConstByteSpan(encoder);
+ if (!reason.empty()) {
+ encoder.WriteMessage(std::as_bytes(std::span(reason))).IgnoreError();
+ }
+ encoder.WriteDropped(drop_count).IgnoreError();
+ if (!encoder.status().ok()) {
+ return;
+ }
+ // Add encoded drop messsage if fits in buffer.
+ ConstByteSpan drop_message(encoder);
+ if (drop_message.size() + RpcLogDrain::kLogEntriesEncodeFrameSize <
+ entries_encoder.ConservativeWriteLimit()) {
+ PW_CHECK_OK(entries_encoder.WriteBytes(
+ static_cast<uint32_t>(log::LogEntries::Fields::ENTRIES), drop_message));
+ drop_count = 0;
+ }
}
} // namespace
@@ -50,6 +67,9 @@ Status RpcLogDrain::Open(rpc::RawServerWriter& writer) {
return Status::AlreadyExists();
}
server_writer_ = std::move(writer);
+ if (on_open_callback_ != nullptr) {
+ on_open_callback_();
+ }
return OkStatus();
}
@@ -102,7 +122,8 @@ RpcLogDrain::LogDrainState RpcLogDrain::SendLogs(size_t max_num_bundles,
continue;
}
- encoder.WriteFirstEntrySequenceId(sequence_id_);
+ encoder.WriteFirstEntrySequenceId(sequence_id_)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
sequence_id_ += packed_entry_count;
const Status status = server_writer_.Write(encoder);
sent_bundle_count++;
@@ -110,7 +131,7 @@ RpcLogDrain::LogDrainState RpcLogDrain::SendLogs(size_t max_num_bundles,
if (!status.ok() &&
error_handling_ == LogDrainErrorHandling::kCloseStreamOnWriterError) {
// Only update this drop count when writer errors are not ignored.
- committed_entry_drop_count_ += packed_entry_count;
+ drop_count_writer_error_ += packed_entry_count;
server_writer_.Finish().IgnoreError();
encoding_status_out = Status::Aborted();
return log_sink_state;
@@ -123,64 +144,106 @@ RpcLogDrain::LogDrainState RpcLogDrain::EncodeOutgoingPacket(
log::LogEntries::MemoryEncoder& encoder, uint32_t& packed_entry_count_out) {
const size_t total_buffer_size = encoder.ConservativeWriteLimit();
do {
- // Get entry and drop count from drain.
+ // Peek entry and get drop count from multisink.
uint32_t drop_count = 0;
+ uint32_t ingress_drop_count = 0;
Result<multisink::MultiSink::Drain::PeekedEntry> possible_entry =
- PeekEntry(log_entry_buffer_, drop_count);
+ PeekEntry(log_entry_buffer_, drop_count, ingress_drop_count);
+ drop_count_ingress_error_ += ingress_drop_count;
+
+ // Check if the entry fits in the entry buffer.
if (possible_entry.status().IsResourceExhausted()) {
+ ++drop_count_small_stack_buffer_;
continue;
}
- // Report drop count if messages were dropped.
- if (committed_entry_drop_count_ > 0 || drop_count > 0) {
- // Reuse the log_entry_buffer_ to send a drop message.
- const Result<ConstByteSpan> drop_message_result =
- CreateEncodedDropMessage(committed_entry_drop_count_ + drop_count,
- log_entry_buffer_);
- // Add encoded drop messsage if fits in buffer.
- if (drop_message_result.ok() &&
- drop_message_result.value().size() + kLogEntriesEncodeFrameSize <
- encoder.ConservativeWriteLimit()) {
- PW_CHECK_OK(encoder.WriteBytes(
- static_cast<uint32_t>(log::LogEntries::Fields::ENTRIES),
- drop_message_result.value()));
- committed_entry_drop_count_ = 0;
- }
- if (possible_entry.ok()) {
- PW_CHECK_OK(PeekEntry(log_entry_buffer_, drop_count).status());
- }
- }
-
+ // Check if there are any entries left.
if (possible_entry.status().IsOutOfRange()) {
+ // Stash multisink's reported drop count that will be reported later with
+ // any other drop counts.
+ drop_count_slow_drain_ += drop_count;
return LogDrainState::kCaughtUp; // There are no more entries.
}
- // At this point all expected error modes have been handled.
+ // At this point all expected errors have been handled.
PW_CHECK_OK(possible_entry.status());
- // TODO(pwbug/559): avoid sending multiple drop counts between filtered out
- // log entries.
+ // Check if the entry passes any set filter rules.
if (filter_ != nullptr &&
filter_->ShouldDropLog(possible_entry.value().entry())) {
+ // Add the drop count from the multisink peek, stored in `drop_count`, to
+ // the total drop count. Then drop the entry without counting it towards
+ // the total drop count. Drops will be reported later all together.
+ drop_count_slow_drain_ += drop_count;
PW_CHECK_OK(PopEntry(possible_entry.value()));
- return LogDrainState::kMoreEntriesRemaining;
+ continue;
}
- // Check if the entry fits in encoder buffer.
+ // Check if the entry fits in the encoder buffer by itself.
const size_t encoded_entry_size =
possible_entry.value().entry().size() + kLogEntriesEncodeFrameSize;
if (encoded_entry_size + kLogEntriesEncodeFrameSize > total_buffer_size) {
// Entry is larger than the entire available buffer.
- ++committed_entry_drop_count_;
+ ++drop_count_small_outbound_buffer_;
PW_CHECK_OK(PopEntry(possible_entry.value()));
continue;
- } else if (encoded_entry_size > encoder.ConservativeWriteLimit()) {
- // Entry does not fit in the partially filled encoder buffer. Notify the
- // caller there are more entries to send.
+ }
+
+ // At this point, we have a valid entry that may fit in the encode buffer.
+ // Report any drop counts combined reusing the log_entry_buffer_ to encode a
+ // drop message.
+ drop_count_slow_drain_ += drop_count;
+ // Account for dropped entries too large for stack buffer, which PeekEntry()
+ // also reports.
+ drop_count_slow_drain_ -= drop_count_small_stack_buffer_;
+ bool log_entry_buffer_has_valid_entry = possible_entry.ok();
+ if (drop_count_slow_drain_ > 0) {
+ TryEncodeDropMessage(log_entry_buffer_,
+ std::string_view(kSlowDrainErrorMessage),
+ drop_count_slow_drain_,
+ encoder);
+ log_entry_buffer_has_valid_entry = false;
+ }
+ if (drop_count_ingress_error_ > 0) {
+ TryEncodeDropMessage(log_entry_buffer_,
+ std::string_view(kIngressErrorMessage),
+ drop_count_ingress_error_,
+ encoder);
+ log_entry_buffer_has_valid_entry = false;
+ }
+ if (drop_count_small_stack_buffer_ > 0) {
+ TryEncodeDropMessage(log_entry_buffer_,
+ std::string_view(kSmallStackBufferErrorMessage),
+ drop_count_small_stack_buffer_,
+ encoder);
+ log_entry_buffer_has_valid_entry = false;
+ }
+ if (drop_count_small_outbound_buffer_ > 0) {
+ TryEncodeDropMessage(log_entry_buffer_,
+ std::string_view(kSmallOutboundBufferErrorMessage),
+ drop_count_small_outbound_buffer_,
+ encoder);
+ log_entry_buffer_has_valid_entry = false;
+ }
+ if (drop_count_writer_error_ > 0) {
+ TryEncodeDropMessage(log_entry_buffer_,
+ std::string_view(kWriterErrorMessage),
+ drop_count_writer_error_,
+ encoder);
+ log_entry_buffer_has_valid_entry = false;
+ }
+ if (possible_entry.ok() && !log_entry_buffer_has_valid_entry) {
+ PW_CHECK_OK(PeekEntry(log_entry_buffer_, drop_count, ingress_drop_count)
+ .status());
+ }
+
+ // Check if the entry fits in the partially filled encoder buffer.
+ if (encoded_entry_size > encoder.ConservativeWriteLimit()) {
+ // Notify the caller there are more entries to send.
return LogDrainState::kMoreEntriesRemaining;
}
- // Encode log entry and remove it from multisink.
+ // Encode the entry and remove it from multisink.
PW_CHECK_OK(encoder.WriteBytes(
static_cast<uint32_t>(log::LogEntries::Fields::ENTRIES),
possible_entry.value().entry()));
diff --git a/pw_log_rpc/rpc_log_drain_test.cc b/pw_log_rpc/rpc_log_drain_test.cc
index 83918a7ac..157e1158d 100644
--- a/pw_log_rpc/rpc_log_drain_test.cc
+++ b/pw_log_rpc/rpc_log_drain_test.cc
@@ -43,48 +43,6 @@ namespace {
static constexpr size_t kBufferSize =
RpcLogDrain::kMinEntrySizeWithoutPayload + 32;
-// Verifies a stream of log entries and updates the total drop count found.
-// expected_entries is expected to be in the same order that messages were
-// added to the multisink.
-void VerifyLogEntriesInCorrectOrder(
- protobuf::Decoder& entries_decoder,
- const Vector<TestLogEntry>& expected_entries,
- uint32_t expected_first_entry_sequence_id,
- uint32_t& drop_count_out) {
- size_t entries_found = 0;
-
- while (entries_decoder.Next().ok()) {
- if (static_cast<pw::log::LogEntries::Fields>(
- entries_decoder.FieldNumber()) ==
- log::LogEntries::Fields::ENTRIES) {
- ConstByteSpan entry;
- EXPECT_EQ(entries_decoder.ReadBytes(&entry), OkStatus());
- protobuf::Decoder entry_decoder(entry);
- if (expected_entries.empty()) {
- break;
- }
-
- ASSERT_LT(entries_found, expected_entries.size());
-
- // Keep track of entries and drops respective counts.
- uint32_t current_drop_count = 0;
- VerifyLogEntry(
- entry_decoder, expected_entries[entries_found], current_drop_count);
- drop_count_out += current_drop_count;
- if (current_drop_count == 0) {
- ++entries_found;
- }
- } else if (static_cast<pw::log::LogEntries::Fields>(
- entries_decoder.FieldNumber()) ==
- log::LogEntries::Fields::FIRST_ENTRY_SEQUENCE_ID) {
- uint32_t first_entry_sequence_id = 0;
- EXPECT_EQ(entries_decoder.ReadUint32(&first_entry_sequence_id),
- OkStatus());
- EXPECT_EQ(expected_first_entry_sequence_id, first_entry_sequence_id);
- }
- }
-}
-
TEST(RpcLogDrain, TryFlushDrainWithClosedWriter) {
// Drain without a writer.
const uint32_t drain_id = 1;
@@ -245,12 +203,11 @@ class TrickleTest : public ::testing::Test {
server_(std::span(&channel_, 1)) {}
TestLogEntry BasicLog(std::string_view message) {
- constexpr log_tokenized::Metadata kSampleMetadata =
- log_tokenized::Metadata::Set<PW_LOG_LEVEL_INFO, 123, 0x03, __LINE__>();
return {.metadata = kSampleMetadata,
- .timestamp = 123,
+ .timestamp = kSampleTimestamp,
.dropped = 0,
- .tokenized_data = std::as_bytes(std::span(message))};
+ .tokenized_data = std::as_bytes(std::span(message)),
+ .thread = std::as_bytes(std::span(kSampleThreadName))};
}
void AttachDrain() { multisink_.AttachDrain(drains_[0]); }
@@ -264,14 +221,13 @@ class TrickleTest : public ::testing::Test {
log::EncodeTokenizedLog(entry.metadata,
entry.tokenized_data,
entry.timestamp,
+ entry.thread,
log_message_encode_buffer_);
ASSERT_EQ(encoded_log_result.status(), OkStatus());
EXPECT_LE(encoded_log_result.value().size(), kMaxMessageSize);
multisink_.HandleEntry(encoded_log_result.value());
}
- // VerifyLogEntriesInCorrectOrder() expects logs to be in the opposite
- // direction compared to when they were added to the multisink.
void AddLogEntries(const Vector<TestLogEntry>& entries) {
for (const TestLogEntry& entry : entries) {
AddLogEntry(entry);
@@ -280,8 +236,29 @@ class TrickleTest : public ::testing::Test {
static constexpr uint32_t kDrainChannelId = 1;
static constexpr size_t kMaxMessageSize = 60;
+
+ // Use the size of the encoded BasicLog entry to calculate buffer sizes and
+ // better control the number of entries in each sent bulk.
+ static constexpr log_tokenized::Metadata kSampleMetadata =
+ log_tokenized::Metadata::Set<PW_LOG_LEVEL_INFO, 123, 0x03, 300>();
+ static constexpr uint64_t kSampleTimestamp = 9000;
+ static constexpr std::string_view kSampleThreadName = "thread";
+ static constexpr size_t kBasicLogSizeWithoutPayload =
+ protobuf::SizeOfFieldBytes(log::LogEntry::Fields::MESSAGE, 0) +
+ protobuf::SizeOfFieldUint32(
+ log::LogEntry::Fields::LINE_LEVEL,
+ log::PackLineLevel(kSampleMetadata.line_number(),
+ kSampleMetadata.level())) +
+ protobuf::SizeOfFieldUint32(log::LogEntry::Fields::FLAGS,
+ kSampleMetadata.flags()) +
+ protobuf::SizeOfFieldInt64(log::LogEntry::Fields::TIMESTAMP,
+ kSampleTimestamp) +
+ protobuf::SizeOfFieldBytes(log::LogEntry::Fields::MODULE,
+ sizeof(kSampleMetadata.module())) +
+ protobuf::SizeOfFieldBytes(log::LogEntry::Fields::THREAD,
+ kSampleThreadName.size());
static constexpr size_t kDrainEncodeBufferSize =
- RpcLogDrain::kMinEntrySizeWithoutPayload + kMaxMessageSize;
+ kBasicLogSizeWithoutPayload + kMaxMessageSize;
static constexpr size_t kChannelEncodeBufferSize = kDrainEncodeBufferSize * 2;
std::array<std::byte, kMaxMessageSize> log_message_encode_buffer_;
std::array<std::byte, kDrainEncodeBufferSize> drain_encode_buffer_;
@@ -325,11 +302,13 @@ TEST_F(TrickleTest, EntriesAreFlushedToSinglePayload) {
EXPECT_EQ(payloads.size(), 1u);
uint32_t drop_count = 0;
+ size_t entries_count = 0;
protobuf::Decoder payload_decoder(payloads[0]);
payload_decoder.Reset(payloads[0]);
- VerifyLogEntriesInCorrectOrder(
- payload_decoder, kExpectedEntries, 0, drop_count);
+ VerifyLogEntries(
+ payload_decoder, kExpectedEntries, 0, entries_count, drop_count);
EXPECT_EQ(drop_count, 0u);
+ EXPECT_EQ(entries_count, 3u);
}
TEST_F(TrickleTest, ManyLogsOverflowToNextPayload) {
@@ -361,16 +340,20 @@ TEST_F(TrickleTest, ManyLogsOverflowToNextPayload) {
ASSERT_EQ(payloads.size(), 2u);
uint32_t drop_count = 0;
+ size_t entries_count = 0;
protobuf::Decoder payload_decoder(payloads[0]);
payload_decoder.Reset(payloads[0]);
- VerifyLogEntriesInCorrectOrder(
- payload_decoder, kFirstFlushedBundle, 0, drop_count);
+ VerifyLogEntries(
+ payload_decoder, kFirstFlushedBundle, 0, entries_count, drop_count);
EXPECT_EQ(drop_count, 0u);
+ EXPECT_EQ(entries_count, 3u);
+ entries_count = 0;
payload_decoder.Reset(payloads[1]);
- VerifyLogEntriesInCorrectOrder(
- payload_decoder, kSecondFlushedBundle, 3, drop_count);
+ VerifyLogEntries(
+ payload_decoder, kSecondFlushedBundle, 3, entries_count, drop_count);
EXPECT_EQ(drop_count, 0u);
+ EXPECT_EQ(entries_count, 3u);
}
TEST_F(TrickleTest, LimitedFlushOverflowsToNextPayload) {
@@ -406,22 +389,78 @@ TEST_F(TrickleTest, LimitedFlushOverflowsToNextPayload) {
output_.payloads<log::pw_rpc::raw::Logs::Listen>(kDrainChannelId);
ASSERT_EQ(first_flush_payloads.size(), 1u);
uint32_t drop_count = 0;
+ size_t entries_count = 0;
protobuf::Decoder payload_decoder(first_flush_payloads[0]);
payload_decoder.Reset(first_flush_payloads[0]);
- VerifyLogEntriesInCorrectOrder(
- payload_decoder, kFirstFlushedBundle, 0, drop_count);
+ VerifyLogEntries(
+ payload_decoder, kFirstFlushedBundle, 0, entries_count, drop_count);
+ EXPECT_EQ(entries_count, 3u);
// An additional flush should produce another payload.
min_delay = drains_[0].Trickle(channel_encode_buffer_);
EXPECT_EQ(min_delay.has_value(), false);
drop_count = 0;
+ entries_count = 0;
+
rpc::PayloadsView second_flush_payloads =
output_.payloads<log::pw_rpc::raw::Logs::Listen>(kDrainChannelId);
ASSERT_EQ(second_flush_payloads.size(), 2u);
payload_decoder.Reset(second_flush_payloads[1]);
- VerifyLogEntriesInCorrectOrder(
- payload_decoder, kSecondFlushedBundle, 3, drop_count);
+ VerifyLogEntries(
+ payload_decoder, kSecondFlushedBundle, 3, entries_count, drop_count);
EXPECT_EQ(drop_count, 0u);
+ EXPECT_EQ(entries_count, 3u);
+}
+
+TEST(RpcLogDrain, OnOpenCallbackCalled) {
+ // Create drain and log components.
+ const uint32_t drain_id = 1;
+ std::array<std::byte, kBufferSize> buffer;
+ sync::Mutex mutex;
+ RpcLogDrain drain(
+ drain_id,
+ buffer,
+ mutex,
+ RpcLogDrain::LogDrainErrorHandling::kCloseStreamOnWriterError,
+ nullptr);
+ RpcLogDrainMap drain_map(std::span(&drain, 1));
+ LogService log_service(drain_map);
+ std::array<std::byte, kBufferSize * 2> multisink_buffer;
+ multisink::MultiSink multisink(multisink_buffer);
+ multisink.AttachDrain(drain);
+
+ // Create server writer.
+ rpc::RawFakeChannelOutput<3> output;
+ rpc::Channel channel(rpc::Channel::Create<drain_id>(&output));
+ rpc::Server server(std::span(&channel, 1));
+ rpc::RawServerWriter writer =
+ rpc::RawServerWriter::Open<log::pw_rpc::raw::Logs::Listen>(
+ server, drain_id, log_service);
+
+ int callback_call_times = 0;
+ Function<void()> callback = [&callback_call_times]() {
+ ++callback_call_times;
+ };
+
+ // Callback not called when not set.
+ ASSERT_TRUE(writer.active());
+ ASSERT_EQ(drain.Open(writer), OkStatus());
+ EXPECT_EQ(callback_call_times, 0);
+
+ drain.set_on_open_callback(std::move(callback));
+
+ // Callback called when writer is open.
+ writer = rpc::RawServerWriter::Open<log::pw_rpc::raw::Logs::Listen>(
+ server, drain_id, log_service);
+ ASSERT_TRUE(writer.active());
+ ASSERT_EQ(drain.Open(writer), OkStatus());
+ EXPECT_EQ(callback_call_times, 1);
+
+ // Callback not called when writer is closed.
+ rpc::RawServerWriter closed_writer;
+ ASSERT_FALSE(closed_writer.active());
+ ASSERT_EQ(drain.Open(closed_writer), Status::FailedPrecondition());
+ EXPECT_EQ(callback_call_times, 1);
}
} // namespace
diff --git a/pw_log_rpc/test_utils.cc b/pw_log_rpc/test_utils.cc
index 452156b4b..3a06ec2b2 100644
--- a/pw_log_rpc/test_utils.cc
+++ b/pw_log_rpc/test_utils.cc
@@ -26,30 +26,35 @@
#include "pw_protobuf/decoder.h"
namespace pw::log_rpc {
+namespace {
+void VerifyOptionallyTokenizedField(protobuf::Decoder& entry_decoder,
+ log::LogEntry::Fields field_number,
+ ConstByteSpan expected_data) {
+ if (expected_data.empty()) {
+ return;
+ }
+ ConstByteSpan tokenized_data;
+ ASSERT_EQ(entry_decoder.Next(), OkStatus());
+ ASSERT_EQ(entry_decoder.FieldNumber(), static_cast<uint32_t>(field_number));
+ ASSERT_EQ(entry_decoder.ReadBytes(&tokenized_data), OkStatus());
+ std::string_view data_as_string(
+ reinterpret_cast<const char*>(tokenized_data.begin()),
+ tokenized_data.size());
+ std::string_view expected_data_as_string(
+ reinterpret_cast<const char*>(expected_data.begin()),
+ expected_data.size());
+ EXPECT_EQ(data_as_string, expected_data_as_string);
+}
+} // namespace
// Unpacks a `LogEntry` proto buffer to compare it with the expected data and
// updates the total drop count found.
void VerifyLogEntry(protobuf::Decoder& entry_decoder,
const TestLogEntry& expected_entry,
uint32_t& drop_count_out) {
- ConstByteSpan tokenized_data;
- if (!expected_entry.tokenized_data.empty()) {
- ASSERT_EQ(entry_decoder.Next(), OkStatus());
- ASSERT_EQ(entry_decoder.FieldNumber(),
- static_cast<uint32_t>(log::LogEntry::Fields::MESSAGE));
- ASSERT_TRUE(entry_decoder.ReadBytes(&tokenized_data).ok());
- if (tokenized_data.size() != expected_entry.tokenized_data.size()) {
- PW_LOG_ERROR(
- "actual: '%s', expected: '%s'",
- reinterpret_cast<const char*>(tokenized_data.begin()),
- reinterpret_cast<const char*>(expected_entry.tokenized_data.begin()));
- }
- EXPECT_EQ(tokenized_data.size(), expected_entry.tokenized_data.size());
- EXPECT_EQ(std::memcmp(tokenized_data.begin(),
- expected_entry.tokenized_data.begin(),
- expected_entry.tokenized_data.size()),
- 0);
- }
+ VerifyOptionallyTokenizedField(entry_decoder,
+ log::LogEntry::Fields::MESSAGE,
+ expected_entry.tokenized_data);
if (expected_entry.metadata.level()) {
ASSERT_EQ(entry_decoder.Next(), OkStatus());
ASSERT_EQ(entry_decoder.FieldNumber(),
@@ -98,14 +103,22 @@ void VerifyLogEntry(protobuf::Decoder& entry_decoder,
ASSERT_EQ(module.status(), OkStatus());
EXPECT_EQ(expected_entry.metadata.module(), module.value());
}
+ VerifyOptionallyTokenizedField(
+ entry_decoder, log::LogEntry::Fields::FILE, expected_entry.file);
+ VerifyOptionallyTokenizedField(
+ entry_decoder, log::LogEntry::Fields::THREAD, expected_entry.thread);
}
-// Verifies a stream of log entries and updates the total drop count found.
-size_t VerifyLogEntries(protobuf::Decoder& entries_decoder,
- Vector<TestLogEntry>& expected_entries_stack,
- uint32_t expected_first_entry_sequence_id,
- uint32_t& drop_count_out) {
- size_t entries_found = 0;
+// Compares an encoded LogEntry's fields against the expected sequence ID and
+// LogEntries, and updates the total entry and drop counts. Starts comparing at
+// `expected_entries[entries_count_out]`. `expected_entries` must be in the same
+// order that messages were added to the MultiSink.
+void VerifyLogEntries(protobuf::Decoder& entries_decoder,
+ const Vector<TestLogEntry>& expected_entries,
+ uint32_t expected_first_entry_sequence_id,
+ size_t& entries_count_out,
+ uint32_t& drop_count_out) {
+ size_t entry_index = entries_count_out;
while (entries_decoder.Next().ok()) {
if (static_cast<pw::log::LogEntries::Fields>(
entries_decoder.FieldNumber()) ==
@@ -113,18 +126,21 @@ size_t VerifyLogEntries(protobuf::Decoder& entries_decoder,
ConstByteSpan entry;
EXPECT_EQ(entries_decoder.ReadBytes(&entry), OkStatus());
protobuf::Decoder entry_decoder(entry);
- if (expected_entries_stack.empty()) {
+ if (expected_entries.empty()) {
break;
}
+
+ ASSERT_LT(entry_index, expected_entries.size());
+
// Keep track of entries and drops respective counts.
uint32_t current_drop_count = 0;
VerifyLogEntry(
- entry_decoder, expected_entries_stack.back(), current_drop_count);
+ entry_decoder, expected_entries[entry_index], current_drop_count);
+ ++entry_index;
drop_count_out += current_drop_count;
if (current_drop_count == 0) {
- ++entries_found;
+ ++entries_count_out;
}
- expected_entries_stack.pop_back();
} else if (static_cast<pw::log::LogEntries::Fields>(
entries_decoder.FieldNumber()) ==
log::LogEntries::Fields::FIRST_ENTRY_SEQUENCE_ID) {
@@ -134,7 +150,6 @@ size_t VerifyLogEntries(protobuf::Decoder& entries_decoder,
EXPECT_EQ(expected_first_entry_sequence_id, first_entry_sequence_id);
}
}
- return entries_found;
}
size_t CountLogEntries(protobuf::Decoder& entries_decoder) {
diff --git a/pw_log_string/BUILD.bazel b/pw_log_string/BUILD.bazel
index 933e3b91d..74deb510e 100644
--- a/pw_log_string/BUILD.bazel
+++ b/pw_log_string/BUILD.bazel
@@ -14,6 +14,7 @@
load(
"//pw_build:pigweed.bzl",
+ "pw_cc_facade",
"pw_cc_library",
)
@@ -32,6 +33,23 @@ pw_cc_library(
"public_overrides",
],
deps = [
+ ":handler",
"//pw_preprocessor",
],
)
+
+pw_cc_facade(
+ name = "handler_facade",
+ hdrs = ["public/pw_log_string/handler.h"],
+ includes = ["public"],
+ deps = ["//pw_preprocessor"],
+)
+
+pw_cc_library(
+ name = "handler",
+ srcs = ["handler.cc"],
+ deps = [
+ ":handler_facade",
+ "@pigweed_config//:pw_log_string_handler_backend",
+ ],
+)
diff --git a/pw_log_string/BUILD.gn b/pw_log_string/BUILD.gn
index 67ab9031f..0909a2a0e 100644
--- a/pw_log_string/BUILD.gn
+++ b/pw_log_string/BUILD.gn
@@ -15,6 +15,7 @@
import("//build_overrides/pigweed.gni")
import("$dir_pw_build/error.gni")
+import("$dir_pw_build/facade.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_docgen/docs.gni")
import("backend.gni")
@@ -27,8 +28,8 @@ config("backend_config") {
include_dirs = [ "public_overrides" ]
}
-# This source set only provides pw_log_string's backend interface. The implementation is
-# pulled in through pw_build_LINK_DEPS.
+# This source set only provides pw_log's backend interface by invoking the
+# :handler facade.
pw_source_set("pw_log_string") {
public_configs = [
":backend_config",
@@ -38,24 +39,53 @@ pw_source_set("pw_log_string") {
"public/pw_log_string/log_string.h",
"public_overrides/pw_log_backend/log_backend.h",
]
- public_deps = [ dir_pw_preprocessor ]
+ public_deps = [
+ ":handler",
+ "$dir_pw_preprocessor",
+ ]
+}
+
+pw_source_set("pw_log_string.impl") {
+ deps = [ ":handler.impl" ]
+}
+
+# This facade is a C API for string based logging which may be used to back
+# pw_log or for example to mix tokenized and string based logging.
+pw_facade("handler") {
+ backend = pw_log_string_HANDLER_BACKEND
+ public_configs = [ ":public_include_path" ]
+ public = [ "public/pw_log_string/handler.h" ]
+ public_deps = [ "$dir_pw_preprocessor" ]
+ sources = [ "handler.cc" ]
+
+ require_link_deps = [ ":handler.impl" ]
}
-# The log backend deps that might cause circular dependencies, since
-# pw_log is so ubiquitous. These deps are kept separate so they can be
-# depended on from elsewhere.
-if (pw_log_string_BACKEND != "") {
- pw_source_set("pw_log_string.impl") {
- deps = [ pw_log_string_BACKEND ]
+# Logging is low-level and ubiquitous. Because of this, it can often cause
+# circular dependencies. This target collects dependencies from the backend that
+# cannot be used because they would cause circular deps.
+#
+# This group ("$dir_pw_log_string:handler_impl") must be listed in
+# pw_build_LINK_DEPS if pw_log_string_HANDLER_BACKEND is set.
+#
+# pw_log_string:handler backends must provide their own "impl" target that
+# collects their actual dependencies. The backend "impl" group may be empty
+# if everything can go directly in the backend target without causing circular
+# dependencies.
+if (pw_log_string_HANDLER_BACKEND != "") {
+ pw_source_set("handler.impl") {
+ deps = [ get_label_info(pw_log_string_HANDLER_BACKEND,
+ "label_no_toolchain") + ".impl" ]
}
} else {
- pw_error("pw_log_string.impl") {
- message = string_join(
- " ",
- [
- "To use pw_log_string, please direct pw_log_string_BACKEND",
- "to the source set that implements the C API.",
- ])
+ pw_error("handler.impl") {
+ message =
+ string_join(" ",
+ [
+ "To use pw_log_string:handler, please direct",
+ "pw_log_string_HANDLER_BACKEND to the source set that",
+ "implements the C API.",
+ ])
}
}
diff --git a/pw_log_string/CMakeLists.txt b/pw_log_string/CMakeLists.txt
index dfb18108d..87dfabdf6 100644
--- a/pw_log_string/CMakeLists.txt
+++ b/pw_log_string/CMakeLists.txt
@@ -17,6 +17,24 @@ include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
pw_add_module_library(pw_log_string
IMPLEMENTS_FACADES
pw_log
+ HEADERS
+ public/pw_log_string/log_string.h
+ public_overrides/pw_log_backend/log_backend.h
+ PUBLIC_INCLUDES
+ public
+ public_overrides
PUBLIC_DEPS
pw_preprocessor
+ pw_log_string.handler
+)
+
+pw_add_facade(pw_log_string.handler
+ HEADERS
+ public/pw_log_string/handler.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_preprocessor
+ SOURCES
+ handler.cc
)
diff --git a/pw_log_string/OWNERS b/pw_log_string/OWNERS
new file mode 100644
index 000000000..307b1deb5
--- /dev/null
+++ b/pw_log_string/OWNERS
@@ -0,0 +1 @@
+amontanez@google.com
diff --git a/pw_log_string/backend.gni b/pw_log_string/backend.gni
index b6dd80ebc..7d1cc1b8f 100644
--- a/pw_log_string/backend.gni
+++ b/pw_log_string/backend.gni
@@ -15,7 +15,8 @@
import("//build_overrides/pigweed.gni")
declare_args() {
- # The pw_log_string backend implements the pw_Log C API laid out by the
- # pw_log_string module's implementation of the pw_log facade.
- pw_log_string_BACKEND = ""
+ # The pw_log_string:message_handler backend implements the pw_Log C API
+ # which is also used by the pw_log_string module's implementation of the
+ # pw_log facade.
+ pw_log_string_HANDLER_BACKEND = ""
}
diff --git a/pw_log_string/docs.rst b/pw_log_string/docs.rst
index 63d7fadbf..d8f440917 100644
--- a/pw_log_string/docs.rst
+++ b/pw_log_string/docs.rst
@@ -3,28 +3,34 @@
=============
pw_log_string
=============
-``pw_log_string`` is a partial backend for ``pw_log``. This backend defines a
-C API that the ``PW_LOG_*`` macros will call out to. ``pw_log_string`` does not
-implement the C API, leaving projects to provide their own implementation.
-See ``pw_log_basic`` for a similar ``pw_log`` backend that also provides an
-implementation.
+``pw_log_string`` is a partial backend for ``pw_log``. This backend fowards the
+``PW_LOG_*`` macros to the ``pw_log_string:handler`` facade which is backed by
+a C API. ``pw_log_string:handler`` does not implement the full C API, leaving
+projects to provide their own implementation of
+``pw_log_string_HandleMessageVaList``. See ``pw_log_basic`` for a similar
+``pw_log`` backend that also provides an implementation.
As this module passes the log message, file name, and module name as a string to
the handler function, it's relatively expensive and not well suited for
space-constrained devices. This module is oriented towards usage on a host
(e.g. a simulated device).
+Note that ``pw_log_string:handler`` may be used even when it's not used
+as the backend for ``pw_log`` via ``pw_log_string``. For example it can be
+useful to mix tokenized and string based logging in case you have a C ABI where
+tokenization can not be used on the other side.
+
---------------
Getting started
---------------
This module is extremely minimal to set up:
-1. Implement ``pw_log_string_HandleMessage()``
+1. Implement ``pw_log_string_HandleMessageVaList()``
2. Set ``pw_log_BACKEND`` to ``"$dir_pw_log_string"``
-3. Set ``pw_log_string_BACKEND`` to point to the source set that implements
- ``pw_log_string_HandleMessage()``
+3. Set ``pw_log_string_HANDLER_BACKEND`` to point to the source set that
+ implements ``pw_log_string_HandleMessageVaList()``
-What exactly ``pw_log_string_HandleMessage()`` should do is entirely up to the
-implementation. ``pw_log_basic``'s log handler is one example, but it's also
+What exactly ``pw_log_string_HandleMessageVaList()`` should do is entirely up to
+the implementation. ``pw_log_basic``'s log handler is one example, but it's also
possible to encode as protobuf and send over a TCP port, write to a file, or
blink an LED to log as morse code.
diff --git a/pw_log_string/handler.cc b/pw_log_string/handler.cc
new file mode 100644
index 000000000..0578a51e5
--- /dev/null
+++ b/pw_log_string/handler.cc
@@ -0,0 +1,35 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_log_string/handler.h"
+
+#include <cstdarg>
+
+namespace pw::log_string {
+
+extern "C" void pw_log_string_HandleMessage(int level,
+ unsigned int flags,
+ const char* module_name,
+ const char* file_name,
+ int line_number,
+ const char* message,
+ ...) {
+ va_list args;
+ va_start(args, message);
+ pw_log_string_HandleMessageVaList(
+ level, flags, module_name, file_name, line_number, message, args);
+ va_end(args);
+}
+
+} // namespace pw::log_string
diff --git a/pw_log_string/public/pw_log_string/handler.h b/pw_log_string/public/pw_log_string/handler.h
new file mode 100644
index 000000000..f8c84432f
--- /dev/null
+++ b/pw_log_string/public/pw_log_string/handler.h
@@ -0,0 +1,42 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <stdarg.h>
+
+#include "pw_preprocessor/compiler.h"
+#include "pw_preprocessor/util.h"
+
+PW_EXTERN_C_START
+
+// Invokes pw_log_string_HandleMessageVaList, this is implemented by the facade.
+void pw_log_string_HandleMessage(int level,
+ unsigned int flags,
+ const char* module_name,
+ const char* file_name,
+ int line_number,
+ const char* message,
+ ...) PW_PRINTF_FORMAT(6, 7);
+
+// Log a message with the listed attributes, this must be implemented by the
+// backend.
+void pw_log_string_HandleMessageVaList(int level,
+ unsigned int flags,
+ const char* module_name,
+ const char* file_name,
+ int line_number,
+ const char* message,
+ va_list args);
+
+PW_EXTERN_C_END
diff --git a/pw_log_string/public/pw_log_string/log_string.h b/pw_log_string/public/pw_log_string/log_string.h
index 7200d8a02..4f529198c 100644
--- a/pw_log_string/public/pw_log_string/log_string.h
+++ b/pw_log_string/public/pw_log_string/log_string.h
@@ -13,29 +13,15 @@
// the License.
#pragma once
+#include "pw_log_string/handler.h"
#include "pw_preprocessor/arguments.h"
-#include "pw_preprocessor/compiler.h"
-#include "pw_preprocessor/util.h"
-
-PW_EXTERN_C_START
-
-// Log a message with the listed attributes.
-void pw_log_string_HandleMessage(int level,
- unsigned int flags,
- const char* module_name,
- const char* file_name,
- int line_number,
- const char* message,
- ...) PW_PRINTF_FORMAT(6, 7);
-
-PW_EXTERN_C_END
// Log a message with many attributes included. This is a backend implementation
// for the logging facade in pw_log/log.h.
//
-// This is the log macro frontend that funnels everything into the C handler
-// above, pw_log_string_HandleMessage. It's not efficient at the callsite, since
-// it passes many arguments.
+// This is the log macro frontend that funnels everything into the C-based
+// message hangler facade, i.e. pw_log_string_HandleMessage. It's not efficient
+// at the callsite, since it passes many arguments.
#define PW_HANDLE_LOG(level, flags, message, ...) \
do { \
pw_log_string_HandleMessage((level), \
diff --git a/pw_log_zephyr/BUILD.gn b/pw_log_zephyr/BUILD.gn
new file mode 100644
index 000000000..68ffb756b
--- /dev/null
+++ b/pw_log_zephyr/BUILD.gn
@@ -0,0 +1,15 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# Zephyr only uses CMake, so this file is empty.
diff --git a/pw_metric/metric_service_nanopb.cc b/pw_metric/metric_service_nanopb.cc
index a69bd847b..0a1e7ccb3 100644
--- a/pw_metric/metric_service_nanopb.cc
+++ b/pw_metric/metric_service_nanopb.cc
@@ -71,7 +71,8 @@ class MetricWriter {
void Flush() {
if (response_.metrics_count) {
- response_writer_.Write(response_);
+ response_writer_.Write(response_)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
response_ = pw_metric_MetricResponse_init_zero;
}
}
diff --git a/pw_minimal_cpp_stdlib/BUILD.gn b/pw_minimal_cpp_stdlib/BUILD.gn
index 332e18c5c..bda41782c 100644
--- a/pw_minimal_cpp_stdlib/BUILD.gn
+++ b/pw_minimal_cpp_stdlib/BUILD.gn
@@ -70,10 +70,14 @@ pw_source_set("pw_minimal_cpp_stdlib") {
}
pw_test_group("tests") {
- tests = [
- ":minimal_cpp_stdlib_test",
- ":standard_library_test",
- ]
+ tests = []
+ if (host_os != "win") {
+ # TODO(amontanez): pw_minimal_cpp_stdlib tests do not build on windows.
+ tests += [
+ ":minimal_cpp_stdlib_test",
+ ":standard_library_test",
+ ]
+ }
}
pw_source_set("minimal_cpp_stdlib_isolated_test") {
diff --git a/pw_multisink/BUILD.bazel b/pw_multisink/BUILD.bazel
index 56b6e9c39..acd90027e 100644
--- a/pw_multisink/BUILD.bazel
+++ b/pw_multisink/BUILD.bazel
@@ -61,7 +61,7 @@ pw_cc_library(
"//pw_bytes",
"//pw_function",
"//pw_log",
- "//pw_log:log_pwpb",
+ "//pw_log:log_proto_cc.pwpb",
"//pw_status",
],
)
diff --git a/pw_multisink/CMakeLists.txt b/pw_multisink/CMakeLists.txt
index 25a01ac18..4d751805d 100644
--- a/pw_multisink/CMakeLists.txt
+++ b/pw_multisink/CMakeLists.txt
@@ -16,7 +16,112 @@ include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
pw_add_module_config(pw_multisink_CONFIG)
-pw_auto_add_simple_module(pw_multisink
+pw_add_module_library(pw_multisink.config
+ HEADERS
+ public/pw_multisink/config.h
+ PUBLIC_INCLUDES
+ public
PUBLIC_DEPS
${pw_multisink_CONFIG}
)
+
+pw_add_module_library(pw_multisink
+ HEADERS
+ public/pw_multisink/multisink.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_bytes
+ pw_containers
+ pw_function
+ pw_multisink.config
+ pw_result
+ pw_ring_buffer
+ pw_status
+ pw_sync.interrupt_spin_lock
+ pw_sync.lock_annotations
+ pw_sync.mutex
+ SOURCES
+ multisink.cc
+ PRIVATE_DEPS
+ pw_assert
+ pw_log
+ pw_varint
+)
+
+pw_add_module_library(pw_multisink.util
+ HEADERS
+ public/pw_multisink/util.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_log.protos.pwpb
+ pw_multisink
+ pw_status
+ SOURCES
+ util.cc
+ PRIVATE_DEPS
+ pw_bytes
+ pw_function
+)
+
+pw_add_module_library(pw_multisink.test_thread
+ HEADERS
+ public/pw_multisink/test_thread.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_thread.thread
+)
+
+# Tests that use threads.
+# To instantiate this test based on a thread backend, create a pw_add_test
+# target that depends on this pw_add_module_library and a pw_add_module_library
+# that provides the implementaiton of pw_multisink.test_thread. See
+# pw_multisink.stl_multisink_test as an example.
+pw_add_module_library(pw_multisink.multisink_threaded_test
+ SOURCES
+ multisink_threaded_test.cc
+ PRIVATE_DEPS
+ pw_multisink
+ pw_multisink.test_thread
+ pw_thread.thread
+ pw_thread.yield
+ pw_unit_test
+)
+
+pw_add_test(pw_multisink.multisink_test
+ SOURCES
+ multisink_test.cc
+ DEPS
+ pw_function
+ pw_multisink
+ pw_polyfill.cstddef
+ pw_polyfill.span
+ pw_status
+ GROUPS
+ modules
+ pw_multisink
+)
+
+pw_add_module_library(pw_multisink.stl_test_thread
+ SOURCES
+ stl_test_thread.cc
+ PRIVATE_DEPS
+ pw_multisink.test_thread
+ pw_thread.thread
+ pw_thread_stl.thread
+)
+
+if("${pw_thread.thread_BACKEND}" STREQUAL "pw_thread_stl.thread")
+ pw_add_test(pw_multisink.stl_multisink_threaded_test
+ DEPS
+ pw_polyfill.cstddef
+ pw_polyfill.span
+ pw_multisink.multisink_threaded_test
+ pw_multisink.stl_test_thread
+ GROUPS
+ modules
+ pw_multisink
+ )
+endif()
diff --git a/pw_multisink/docs.rst b/pw_multisink/docs.rst
index 3dcd81330..c8cb8f31b 100644
--- a/pw_multisink/docs.rst
+++ b/pw_multisink/docs.rst
@@ -179,3 +179,10 @@ the drain to remove the peeked entry from the multisink and advance one entry.
// ... Handle send error ...
}
}
+
+Drop Counts
+===========
+The `PeekEntry` and `PopEntry` return two different drop counts, one for the
+number of entries a drain was skipped forward for providing a small buffer or
+draining too slow, and the other for entries that failed to be added to the
+MultiSink.
diff --git a/pw_multisink/multisink.cc b/pw_multisink/multisink.cc
index d6fe13942..b1b95f769 100644
--- a/pw_multisink/multisink.cc
+++ b/pw_multisink/multisink.cc
@@ -36,7 +36,10 @@ void MultiSink::HandleEntry(ConstByteSpan entry) {
void MultiSink::HandleDropped(uint32_t drop_count) {
std::lock_guard lock(lock_);
+ // Updating the sequence ID helps identify where the ingress drop happend when
+ // a drain peeks or pops.
sequence_id_ += drop_count;
+ total_ingress_drops_ += drop_count;
NotifyListeners();
}
@@ -73,10 +76,12 @@ Result<ConstByteSpan> MultiSink::PeekOrPopEntry(
ByteSpan buffer,
Request request,
uint32_t& drop_count_out,
+ uint32_t& ingress_drop_count_out,
uint32_t& entry_sequence_id_out) {
size_t bytes_read = 0;
entry_sequence_id_out = 0;
drop_count_out = 0;
+ ingress_drop_count_out = 0;
std::lock_guard lock(lock_);
PW_DCHECK_PTR_EQ(drain.multisink_, this);
@@ -106,6 +111,22 @@ Result<ConstByteSpan> MultiSink::PeekOrPopEntry(
drop_count_out = entry_sequence_id_out - drain.last_handled_sequence_id_ -
(peek_status.ok() ? 1 : 0);
+ // Only report the ingress drop count when the drain catches up to where the
+ // drop happened, accounting only for the drops found and no more, as
+ // indicated by the gap in sequence IDs.
+ if (drop_count_out > 0) {
+ ingress_drop_count_out =
+ std::min(drop_count_out,
+ total_ingress_drops_ - drain.last_handled_ingress_drop_count_);
+ // Remove the ingress drop count duplicated in drop_count_out.
+ drop_count_out -= ingress_drop_count_out;
+ // Check if all the ingress drops were reported.
+ drain.last_handled_ingress_drop_count_ =
+ total_ingress_drops_ > ingress_drop_count_out
+ ? total_ingress_drops_ - ingress_drop_count_out
+ : total_ingress_drops_;
+ }
+
// The Peek above may have failed due to OutOfRange, now that we've set the
// drop count see if we should return before attempting to pop.
if (peek_status.IsOutOfRange()) {
@@ -133,6 +154,7 @@ void MultiSink::AttachDrain(Drain& drain) {
oldest_entry_drain_.last_handled_sequence_id_;
}
drain.last_peek_sequence_id_ = drain.last_handled_sequence_id_;
+ drain.last_handled_ingress_drop_count_ = 0;
}
void MultiSink::DetachDrain(Drain& drain) {
@@ -202,23 +224,36 @@ Status MultiSink::Drain::PopEntry(const PeekedEntry& entry) {
}
Result<MultiSink::Drain::PeekedEntry> MultiSink::Drain::PeekEntry(
- ByteSpan buffer, uint32_t& drop_count_out) {
+ ByteSpan buffer,
+ uint32_t& drop_count_out,
+ uint32_t& ingress_drop_count_out) {
PW_DCHECK_NOTNULL(multisink_);
uint32_t entry_sequence_id_out;
- Result<ConstByteSpan> peek_result = multisink_->PeekOrPopEntry(
- *this, buffer, Request::kPeek, drop_count_out, entry_sequence_id_out);
+ Result<ConstByteSpan> peek_result =
+ multisink_->PeekOrPopEntry(*this,
+ buffer,
+ Request::kPeek,
+ drop_count_out,
+ ingress_drop_count_out,
+ entry_sequence_id_out);
if (!peek_result.ok()) {
return peek_result.status();
}
return PeekedEntry(peek_result.value(), entry_sequence_id_out);
}
-Result<ConstByteSpan> MultiSink::Drain::PopEntry(ByteSpan buffer,
- uint32_t& drop_count_out) {
+Result<ConstByteSpan> MultiSink::Drain::PopEntry(
+ ByteSpan buffer,
+ uint32_t& drop_count_out,
+ uint32_t& ingress_drop_count_out) {
PW_DCHECK_NOTNULL(multisink_);
uint32_t entry_sequence_id_out;
- return multisink_->PeekOrPopEntry(
- *this, buffer, Request::kPop, drop_count_out, entry_sequence_id_out);
+ return multisink_->PeekOrPopEntry(*this,
+ buffer,
+ Request::kPop,
+ drop_count_out,
+ ingress_drop_count_out,
+ entry_sequence_id_out);
}
} // namespace multisink
diff --git a/pw_multisink/multisink_test.cc b/pw_multisink/multisink_test.cc
index 4a6dc6b68..1f5bea91e 100644
--- a/pw_multisink/multisink_test.cc
+++ b/pw_multisink/multisink_test.cc
@@ -16,7 +16,9 @@
#include <array>
#include <cstdint>
+#include <cstring>
#include <optional>
+#include <span>
#include <string_view>
#include "gtest/gtest.h"
@@ -55,10 +57,12 @@ class MultiSinkTest : public ::testing::Test {
// Expects the peeked or popped message to equal the provided non-empty
// message, and the drop count to match. If `expected_message` is empty, the
// Pop call status expected is OUT_OF_RANGE.
- void ExpectMessageAndDropCount(Result<ConstByteSpan>& result,
- uint32_t result_drop_count,
- std::optional<ConstByteSpan> expected_message,
- uint32_t expected_drop_count) {
+ void ExpectMessageAndDropCounts(Result<ConstByteSpan>& result,
+ uint32_t result_drop_count,
+ uint32_t result_ingress_drop_count,
+ std::optional<ConstByteSpan> expected_message,
+ uint32_t expected_drop_count,
+ uint32_t expected_ingress_drop_count) {
if (!expected_message.has_value()) {
EXPECT_EQ(Status::OutOfRange(), result.status());
} else {
@@ -74,28 +78,40 @@ class MultiSinkTest : public ::testing::Test {
}
}
EXPECT_EQ(result_drop_count, expected_drop_count);
+ EXPECT_EQ(result_ingress_drop_count, expected_ingress_drop_count);
}
void VerifyPopEntry(Drain& drain,
std::optional<ConstByteSpan> expected_message,
- uint32_t expected_drop_count) {
+ uint32_t expected_drop_count,
+ uint32_t expected_ingress_drop_count) {
uint32_t drop_count = 0;
- Result<ConstByteSpan> result = drain.PopEntry(entry_buffer_, drop_count);
- ExpectMessageAndDropCount(
- result, drop_count, expected_message, expected_drop_count);
+ uint32_t ingress_drop_count = 0;
+ Result<ConstByteSpan> result =
+ drain.PopEntry(entry_buffer_, drop_count, ingress_drop_count);
+ ExpectMessageAndDropCounts(result,
+ drop_count,
+ ingress_drop_count,
+ expected_message,
+ expected_drop_count,
+ expected_ingress_drop_count);
}
void VerifyPeekResult(const Result<Drain::PeekedEntry>& peek_result,
uint32_t result_drop_count,
+ uint32_t result_ingress_drop_count,
std::optional<ConstByteSpan> expected_message,
- uint32_t expected_drop_count) {
+ uint32_t expected_drop_count,
+ uint32_t expected_ingress_drop_count) {
if (peek_result.ok()) {
ASSERT_FALSE(peek_result.value().entry().empty());
Result<ConstByteSpan> verify_result(peek_result.value().entry());
- ExpectMessageAndDropCount(verify_result,
- result_drop_count,
- expected_message,
- expected_drop_count);
+ ExpectMessageAndDropCounts(verify_result,
+ result_drop_count,
+ result_ingress_drop_count,
+ expected_message,
+ expected_drop_count,
+ expected_ingress_drop_count);
return;
}
if (expected_message.has_value()) {
@@ -126,29 +142,28 @@ TEST_F(MultiSinkTest, SingleDrain) {
// Single entry push and pop.
ExpectNotificationCount(listeners_[0], 1u);
- VerifyPopEntry(drains_[0], kMessage, 0u);
-
+ VerifyPopEntry(drains_[0], kMessage, 0u, 0u);
// Single empty entry push and pop.
multisink_.HandleEntry(ConstByteSpan());
ExpectNotificationCount(listeners_[0], 1u);
- VerifyPopEntry(drains_[0], ConstByteSpan(), 0u);
+ VerifyPopEntry(drains_[0], ConstByteSpan(), 0u, 0u);
// Multiple entries with intermittent drops.
multisink_.HandleEntry(kMessage);
multisink_.HandleDropped();
multisink_.HandleEntry(kMessage);
ExpectNotificationCount(listeners_[0], 3u);
- VerifyPopEntry(drains_[0], kMessage, 0u);
- VerifyPopEntry(drains_[0], kMessage, 1u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 1u);
// Send drops only.
multisink_.HandleDropped();
ExpectNotificationCount(listeners_[0], 1u);
- VerifyPopEntry(drains_[0], std::nullopt, 1u);
+ VerifyPopEntry(drains_[0], std::nullopt, 0u, 1u);
// Confirm out-of-range if no entries are expected.
ExpectNotificationCount(listeners_[0], 0u);
- VerifyPopEntry(drains_[0], std::nullopt, 0u);
+ VerifyPopEntry(drains_[0], std::nullopt, 0u, 0u);
}
TEST_F(MultiSinkTest, MultipleDrain) {
@@ -168,20 +183,20 @@ TEST_F(MultiSinkTest, MultipleDrain) {
// Drain one drain entirely.
ExpectNotificationCount(listeners_[0], 5u);
ExpectNotificationCount(listeners_[1], 5u);
- VerifyPopEntry(drains_[0], kMessage, 0u);
- VerifyPopEntry(drains_[0], kMessage, 0u);
- VerifyPopEntry(drains_[0], kMessage, 1u);
- VerifyPopEntry(drains_[0], std::nullopt, 1u);
- VerifyPopEntry(drains_[0], std::nullopt, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 1u);
+ VerifyPopEntry(drains_[0], std::nullopt, 0u, 1u);
+ VerifyPopEntry(drains_[0], std::nullopt, 0u, 0u);
// Confirm the other drain can be drained separately.
ExpectNotificationCount(listeners_[0], 0u);
ExpectNotificationCount(listeners_[1], 0u);
- VerifyPopEntry(drains_[1], kMessage, 0u);
- VerifyPopEntry(drains_[1], kMessage, 0u);
- VerifyPopEntry(drains_[1], kMessage, 1u);
- VerifyPopEntry(drains_[1], std::nullopt, 1u);
- VerifyPopEntry(drains_[1], std::nullopt, 0u);
+ VerifyPopEntry(drains_[1], kMessage, 0u, 0u);
+ VerifyPopEntry(drains_[1], kMessage, 0u, 0u);
+ VerifyPopEntry(drains_[1], kMessage, 0u, 1u);
+ VerifyPopEntry(drains_[1], std::nullopt, 0u, 1u);
+ VerifyPopEntry(drains_[1], std::nullopt, 0u, 0u);
}
TEST_F(MultiSinkTest, LateDrainRegistration) {
@@ -192,13 +207,13 @@ TEST_F(MultiSinkTest, LateDrainRegistration) {
multisink_.AttachDrain(drains_[0]);
multisink_.AttachListener(listeners_[0]);
ExpectNotificationCount(listeners_[0], 1u);
- VerifyPopEntry(drains_[0], kMessage, 0u);
- VerifyPopEntry(drains_[0], std::nullopt, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 0u);
+ VerifyPopEntry(drains_[0], std::nullopt, 0u, 0u);
multisink_.HandleEntry(kMessage);
ExpectNotificationCount(listeners_[0], 1u);
- VerifyPopEntry(drains_[0], kMessage, 0u);
- VerifyPopEntry(drains_[0], std::nullopt, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 0u);
+ VerifyPopEntry(drains_[0], std::nullopt, 0u, 0u);
}
TEST_F(MultiSinkTest, DynamicDrainRegistration) {
@@ -213,7 +228,7 @@ TEST_F(MultiSinkTest, DynamicDrainRegistration) {
// Drain out one message and detach it.
ExpectNotificationCount(listeners_[0], 4u);
- VerifyPopEntry(drains_[0], kMessage, 1u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 1u);
multisink_.DetachDrain(drains_[0]);
multisink_.DetachListener(listeners_[0]);
@@ -223,14 +238,14 @@ TEST_F(MultiSinkTest, DynamicDrainRegistration) {
multisink_.AttachDrain(drains_[0]);
multisink_.AttachListener(listeners_[0]);
ExpectNotificationCount(listeners_[0], 1u);
- VerifyPopEntry(drains_[0], kMessage, 1u);
- VerifyPopEntry(drains_[0], kMessage, 1u);
- VerifyPopEntry(drains_[0], std::nullopt, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 1u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 1u);
+ VerifyPopEntry(drains_[0], std::nullopt, 0u, 0u);
multisink_.HandleEntry(kMessage);
ExpectNotificationCount(listeners_[0], 1u);
- VerifyPopEntry(drains_[0], kMessage, 0u);
- VerifyPopEntry(drains_[0], std::nullopt, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 0u);
+ VerifyPopEntry(drains_[0], std::nullopt, 0u, 0u);
}
TEST_F(MultiSinkTest, TooSmallBuffer) {
@@ -238,16 +253,17 @@ TEST_F(MultiSinkTest, TooSmallBuffer) {
// Insert an entry and a drop, then try to read into an insufficient buffer.
uint32_t drop_count = 0;
+ uint32_t ingress_drop_count = 0;
multisink_.HandleDropped();
multisink_.HandleEntry(kMessage);
// Attempting to acquire an entry with a small buffer should result in
// RESOURCE_EXHAUSTED and remove it.
- Result<ConstByteSpan> result =
- drains_[0].PopEntry(std::span(entry_buffer_, 1), drop_count);
+ Result<ConstByteSpan> result = drains_[0].PopEntry(
+ std::span(entry_buffer_, 1), drop_count, ingress_drop_count);
EXPECT_EQ(result.status(), Status::ResourceExhausted());
- VerifyPopEntry(drains_[0], std::nullopt, 2u);
+ VerifyPopEntry(drains_[0], std::nullopt, 1u, 1u);
}
TEST_F(MultiSinkTest, Iterator) {
@@ -258,9 +274,9 @@ TEST_F(MultiSinkTest, Iterator) {
multisink_.HandleEntry(kMessage);
multisink_.HandleEntry(kMessage);
- VerifyPopEntry(drains_[0], kMessage, 0u);
- VerifyPopEntry(drains_[0], kMessage, 0u);
- VerifyPopEntry(drains_[0], kMessage, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 0u);
+ VerifyPopEntry(drains_[0], kMessage, 0u, 0u);
// Confirm that the iterator still observes the messages in the ring buffer.
size_t iterated_entries = 0;
@@ -301,8 +317,10 @@ TEST_F(MultiSinkTest, PeekEntryNoEntries) {
// Peek empty multisink.
uint32_t drop_count = 0;
- auto peek_result = drains_[0].PeekEntry(entry_buffer_, drop_count);
- VerifyPeekResult(peek_result, drop_count, std::nullopt, 0);
+ uint32_t ingress_drop_count = 0;
+ auto peek_result =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(peek_result, 0, drop_count, std::nullopt, 0, 0);
}
TEST_F(MultiSinkTest, PeekAndPop) {
@@ -313,42 +331,176 @@ TEST_F(MultiSinkTest, PeekAndPop) {
multisink_.HandleEntry(kMessage);
multisink_.HandleEntry(kMessageOther);
uint32_t drop_count = 0;
- auto first_peek_result = drains_[0].PeekEntry(entry_buffer_, drop_count);
- VerifyPeekResult(first_peek_result, drop_count, kMessage, 0);
+ uint32_t ingress_drop_count = 0;
+ auto first_peek_result =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(
+ first_peek_result, drop_count, ingress_drop_count, kMessage, 0, 0);
// Multiple peeks must return the front message.
- auto peek_duplicate = drains_[0].PeekEntry(entry_buffer_, drop_count);
- VerifyPeekResult(peek_duplicate, drop_count, kMessage, 0);
+ auto peek_duplicate =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(
+ peek_duplicate, drop_count, ingress_drop_count, kMessage, 0, 0);
// A second drain must peek the front message.
- auto peek_other_drain = drains_[1].PeekEntry(entry_buffer_, drop_count);
- VerifyPeekResult(peek_other_drain, drop_count, kMessage, 0);
+ auto peek_other_drain =
+ drains_[1].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(
+ peek_other_drain, drop_count, ingress_drop_count, kMessage, 0, 0);
// After a drain pops a peeked entry, the next peek call must return the next
// message.
ASSERT_EQ(drains_[0].PopEntry(first_peek_result.value()), OkStatus());
- auto second_peek_result = drains_[0].PeekEntry(entry_buffer_, drop_count);
- VerifyPeekResult(second_peek_result, drop_count, kMessageOther, 0);
+ auto second_peek_result =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(
+ second_peek_result, drop_count, ingress_drop_count, kMessageOther, 0, 0);
// Slower readers must be unchanged.
auto peek_other_drain_duplicate =
- drains_[1].PeekEntry(entry_buffer_, drop_count);
- VerifyPeekResult(peek_other_drain_duplicate, drop_count, kMessage, 0);
+ drains_[1].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(peek_other_drain_duplicate,
+ drop_count,
+ ingress_drop_count,
+ kMessage,
+ 0,
+ 0);
// PopEntry prior to popping the previously peeked entry.
- VerifyPopEntry(drains_[0], kMessageOther, 0);
+ VerifyPopEntry(drains_[0], kMessageOther, 0, 0);
// Popping an entry already handled must not trigger errors.
ASSERT_EQ(drains_[0].PopEntry(second_peek_result.value()), OkStatus());
// Popping with an old peek context must not trigger errors.
ASSERT_EQ(drains_[0].PopEntry(first_peek_result.value()), OkStatus());
// Multisink is empty, pops and peeks should trigger OUT_OF_RANGE.
- VerifyPopEntry(drains_[0], std::nullopt, 0);
- auto empty_peek_result = drains_[0].PeekEntry(entry_buffer_, drop_count);
- VerifyPeekResult(empty_peek_result, drop_count, std::nullopt, 0);
+ VerifyPopEntry(drains_[0], std::nullopt, 0, 0);
+ auto empty_peek_result =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(
+ empty_peek_result, drop_count, ingress_drop_count, std::nullopt, 0, 0);
// // Slower readers must be unchanged.
auto peek_other_drain_unchanged =
- drains_[1].PeekEntry(entry_buffer_, drop_count);
- VerifyPeekResult(peek_other_drain_unchanged, drop_count, kMessage, 0);
+ drains_[1].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(peek_other_drain_unchanged,
+ drop_count,
+ ingress_drop_count,
+ kMessage,
+ 0,
+ 0);
+}
+
+TEST_F(MultiSinkTest, PeekReportsIngressDropCount) {
+ multisink_.AttachDrain(drains_[0]);
+
+ // Peek entry after multisink has some entries.
+ multisink_.HandleEntry(kMessage);
+ const uint32_t ingress_drops = 10;
+ multisink_.HandleDropped(ingress_drops);
+
+ uint32_t drop_count = 0;
+ uint32_t ingress_drop_count = 0;
+ auto peek_result1 =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ // No drops reported until the drain finds a gap in the sequence IDs.
+ VerifyPeekResult(
+ peek_result1, drop_count, ingress_drop_count, kMessage, 0, 0);
+
+ // Popping the peeked entry advances the drain, and a new peek will find the
+ // gap in sequence IDs.
+ ASSERT_EQ(drains_[0].PopEntry(peek_result1.value()), OkStatus());
+ auto peek_result2 =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ ASSERT_EQ(peek_result2.status(), Status::OutOfRange());
+ EXPECT_EQ(drop_count, 0u);
+ EXPECT_EQ(ingress_drop_count, ingress_drops);
+}
+
+TEST_F(MultiSinkTest, PeekReportsSlowDrainDropCount) {
+ multisink_.AttachDrain(drains_[0]);
+
+ // Add entries until buffer is full and drain has to be advanced.
+ // The sequence ID takes 1 byte when less than 128.
+ const size_t max_multisink_messages = 128;
+ const size_t buffer_entry_size = kBufferSize / max_multisink_messages;
+ // Account for 1 byte of preamble (sequnce ID) and 1 byte of data size.
+ const size_t message_size = buffer_entry_size - 2;
+ std::array<std::byte, message_size> message;
+ std::memset(message.data(), 'a', message.size());
+ for (size_t i = 0; i < max_multisink_messages; ++i) {
+ multisink_.HandleEntry(message);
+ }
+
+ // At this point the buffer is full, but the sequence ID will take 1 more byte
+ // in the preamble, meaning that adding N new entries, drops N + 1 entries.
+ // Account for that offset.
+ const size_t expected_drops = 5;
+ for (size_t i = 1; i < expected_drops; ++i) {
+ multisink_.HandleEntry(message);
+ }
+
+ uint32_t drop_count = 0;
+ uint32_t ingress_drop_count = 0;
+ auto peek_result =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(
+ peek_result, drop_count, ingress_drop_count, message, expected_drops, 0);
+}
+
+TEST_F(MultiSinkTest, IngressDropCountOverflow) {
+ multisink_.AttachDrain(drains_[0]);
+
+ // Make drain's last handled drop larger than multisink drop count, which
+ // overflowed.
+ const uint32_t drop_count_close_to_overflow =
+ std::numeric_limits<uint32_t>::max() - 3;
+ multisink_.HandleDropped(drop_count_close_to_overflow);
+ multisink_.HandleEntry(kMessage);
+
+ // Catch up drain's drop count.
+ uint32_t drop_count = 0;
+ uint32_t ingress_drop_count = 0;
+ auto peek_result1 =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(peek_result1,
+ drop_count,
+ ingress_drop_count,
+ kMessage,
+ 0,
+ drop_count_close_to_overflow);
+ // Popping the peeked entry advances the drain, and a new peek will find the
+ // gap in sequence IDs.
+ ASSERT_EQ(drains_[0].PopEntry(peek_result1.value()), OkStatus());
+
+ // Overflow multisink's drop count.
+ const uint32_t expected_ingress_drop_count = 10;
+ multisink_.HandleDropped(expected_ingress_drop_count);
+
+ auto peek_result2 =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ ASSERT_EQ(peek_result2.status(), Status::OutOfRange());
+ EXPECT_EQ(drop_count, 0u);
+ EXPECT_EQ(ingress_drop_count, expected_ingress_drop_count);
+
+ multisink_.HandleEntry(kMessage);
+ auto peek_result3 =
+ drains_[0].PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ VerifyPeekResult(
+ peek_result3, drop_count, ingress_drop_count, kMessage, 0, 0);
+}
+
+TEST_F(MultiSinkTest, DetachedDrainReportsDropCount) {
+ multisink_.AttachDrain(drains_[0]);
+
+ const uint32_t ingress_drops = 10;
+ multisink_.HandleDropped(ingress_drops);
+ multisink_.HandleEntry(kMessage);
+ VerifyPopEntry(drains_[0], kMessage, 0, ingress_drops);
+
+ // Detaching and attaching drain should report the same drops.
+ multisink_.DetachDrain(drains_[0]);
+ multisink_.AttachDrain(drains_[0]);
+ VerifyPopEntry(drains_[0], kMessage, 0, ingress_drops);
}
TEST(UnsafeIteration, NoLimit) {
diff --git a/pw_multisink/multisink_threaded_test.cc b/pw_multisink/multisink_threaded_test.cc
index 88eb1c45c..f9941b55a 100644
--- a/pw_multisink/multisink_threaded_test.cc
+++ b/pw_multisink/multisink_threaded_test.cc
@@ -12,7 +12,9 @@
// License for the specific language governing permissions and limitations under
// the License.
+#include <cstddef>
#include <cstdint>
+#include <span>
#include "gtest/gtest.h"
#include "pw_containers/vector.h"
@@ -98,9 +100,10 @@ class LogPopReaderThread : public thread::ThreadCore {
virtual void ReadAllEntries() {
do {
uint32_t drop_count = 0;
+ uint32_t ingress_drop_count = 0;
const Result<ConstByteSpan> possible_entry =
- drain_.PopEntry(entry_buffer_, drop_count);
- total_drop_count_ += drop_count;
+ drain_.PopEntry(entry_buffer_, drop_count, ingress_drop_count);
+ total_drop_count_ += drop_count + ingress_drop_count;
if (possible_entry.status().IsOutOfRange()) {
pw::this_thread::yield();
continue;
@@ -136,9 +139,10 @@ class LogPeekAndCommitReaderThread : public LogPopReaderThread {
virtual void ReadAllEntries() {
do {
uint32_t drop_count = 0;
+ uint32_t ingress_drop_count = 0;
const Result<MultiSink::Drain::PeekedEntry> possible_entry =
- drain_.PeekEntry(entry_buffer_, drop_count);
- total_drop_count_ += drop_count;
+ drain_.PeekEntry(entry_buffer_, drop_count, ingress_drop_count);
+ total_drop_count_ += drop_count + ingress_drop_count;
if (possible_entry.status().IsOutOfRange()) {
pw::this_thread::yield();
continue;
diff --git a/pw_multisink/public/pw_multisink/multisink.h b/pw_multisink/public/pw_multisink/multisink.h
index 6627840d3..5942304f4 100644
--- a/pw_multisink/public/pw_multisink/multisink.h
+++ b/pw_multisink/public/pw_multisink/multisink.h
@@ -63,15 +63,18 @@ class MultiSink {
constexpr Drain()
: last_handled_sequence_id_(0),
last_peek_sequence_id_(0),
+ last_handled_ingress_drop_count_(0),
multisink_(nullptr) {}
// Returns the next available entry if it exists and acquires the latest
// drop count in parallel.
//
- // The `drop_count_out` is set to the number of entries that were dropped
- // since the last call to PopEntry, if the read operation was successful or
- // returned OutOfRange (i.e. no entries to read). Otherwise, it is set to
- // zero, so should always be processed.
+ // If the read operation was successful or returned OutOfRange (i.e. no
+ // entries to read) then the `drop_count_out` is set to the number of
+ // entries that were dropped since the last call to PopEntry due to
+ // advancing the drain, and `ingress_drop_count_out` is set to the number of
+ // logs that were dropped before being added to the MultiSink. Otherwise,
+ // the drop counts are set to zero, so should always be processed.
//
// Drop counts are internally maintained with a 32-bit counter. If
// UINT32_MAX entries have been handled by the attached multisink between
@@ -129,8 +132,22 @@ class MultiSink {
// FAILED_PRECONDITION - The drain must be attached to a sink.
// RESOURCE_EXHAUSTED - The provided buffer was not large enough to store
// the next available entry, which was discarded.
- Result<ConstByteSpan> PopEntry(ByteSpan buffer, uint32_t& drop_count_out)
+ Result<ConstByteSpan> PopEntry(ByteSpan buffer,
+ uint32_t& drop_count_out,
+ uint32_t& ingress_drop_count)
PW_LOCKS_EXCLUDED(multisink_->lock_);
+ // Overload that combines drop counts.
+ // TODO(cachinchilla): remove when downstream projects migrated to new API.
+ [[deprecated("Use PopEntry with different drop count outputs")]] Result<
+ ConstByteSpan>
+ PopEntry(ByteSpan buffer, uint32_t& drop_count_out)
+ PW_LOCKS_EXCLUDED(multisink_->lock_) {
+ uint32_t ingress_drop_count = 0;
+ Result<ConstByteSpan> result =
+ PopEntry(buffer, drop_count_out, ingress_drop_count);
+ drop_count_out += ingress_drop_count;
+ return result;
+ }
// Removes the previously peeked entry from the multisink.
//
@@ -159,7 +176,8 @@ class MultiSink {
PW_LOCKS_EXCLUDED(multisink_->lock_);
// Returns a copy of the next available entry if it exists and acquires the
- // latest drop count, without moving the drain forward, except if there is a
+ // latest drop count if the drain was advanced, and the latest ingress drop
+ // count, without moving the drain forward, except if there is a
// RESOURCE_EXHAUSTED error when peeking, in which case the drain is
// automatically advanced.
// The `drop_count_out` follows the same logic as `PopEntry`. The user must
@@ -174,7 +192,9 @@ class MultiSink {
// FAILED_PRECONDITION - The drain must be attached to a sink.
// RESOURCE_EXHAUSTED - The provided buffer was not large enough to store
// the next available entry, which was discarded.
- Result<PeekedEntry> PeekEntry(ByteSpan buffer, uint32_t& drop_count_out)
+ Result<PeekedEntry> PeekEntry(ByteSpan buffer,
+ uint32_t& drop_count_out,
+ uint32_t& ingress_drop_count)
PW_LOCKS_EXCLUDED(multisink_->lock_);
// Drains are not copyable or movable.
@@ -191,6 +211,7 @@ class MultiSink {
ring_buffer::PrefixedEntryRingBufferMulti::Reader reader_;
uint32_t last_handled_sequence_id_;
uint32_t last_peek_sequence_id_;
+ uint32_t last_handled_ingress_drop_count_;
MultiSink* multisink_;
};
@@ -289,7 +310,8 @@ class MultiSink {
}
// Constructs a multisink using a ring buffer backed by the provided buffer.
- MultiSink(ByteSpan buffer) : ring_buffer_(true), sequence_id_(0) {
+ MultiSink(ByteSpan buffer)
+ : ring_buffer_(true), sequence_id_(0), total_ingress_drops_(0) {
ring_buffer_.SetBuffer(buffer)
.IgnoreError(); // TODO(pwbug/387): Handle Status properly
AttachDrain(oldest_entry_drain_);
@@ -385,6 +407,7 @@ class MultiSink {
ByteSpan buffer,
Request request,
uint32_t& drop_count_out,
+ uint32_t& ingress_drop_count_out,
uint32_t& entry_sequence_id_out)
PW_LOCKS_EXCLUDED(lock_);
@@ -396,6 +419,7 @@ class MultiSink {
ring_buffer::PrefixedEntryRingBufferMulti ring_buffer_ PW_GUARDED_BY(lock_);
Drain oldest_entry_drain_ PW_GUARDED_BY(lock_);
uint32_t sequence_id_ PW_GUARDED_BY(lock_);
+ uint32_t total_ingress_drops_ PW_GUARDED_BY(lock_);
LockType lock_;
};
diff --git a/pw_package/docs.rst b/pw_package/docs.rst
index 24037e55a..78d99b8f4 100644
--- a/pw_package/docs.rst
+++ b/pw_package/docs.rst
@@ -16,6 +16,13 @@ this module instead are listed below.
* The dependency needs to be "installed" into the system in some manner beyond
just extraction and thus isn't a good match for distribution with CIPD.
+Pigweed itself includes a number of packages that simply clone git repositories.
+In general, these should not be used by projects using Pigweed. Pigweed uses
+these packages to avoid using submodules so downstream projects don't have
+multiple copies of a given repository in their source tree. Projects using
+Pigweed should use submodules instead of packages because submodules are
+supported by much more mature tooling: git.
+
-----
Usage
-----
diff --git a/pw_package/py/BUILD.gn b/pw_package/py/BUILD.gn
index 3f86f8709..1af696e78 100644
--- a/pw_package/py/BUILD.gn
+++ b/pw_package/py/BUILD.gn
@@ -32,10 +32,13 @@ pw_python_package("py") {
"pw_package/packages/chromium_verifier.py",
"pw_package/packages/crlset.py",
"pw_package/packages/freertos.py",
+ "pw_package/packages/googletest.py",
"pw_package/packages/mbedtls.py",
"pw_package/packages/micro_ecc.py",
"pw_package/packages/nanopb.py",
+ "pw_package/packages/pico_sdk.py",
"pw_package/packages/protobuf.py",
+ "pw_package/packages/smartfusion_mss.py",
"pw_package/packages/stm32cube.py",
"pw_package/pigweed_packages.py",
]
diff --git a/pw_package/py/pw_package/packages/googletest.py b/pw_package/py/pw_package/packages/googletest.py
new file mode 100644
index 000000000..adfb4e8d8
--- /dev/null
+++ b/pw_package/py/pw_package/packages/googletest.py
@@ -0,0 +1,40 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Install and check status of googletest."""
+
+import pathlib
+from typing import Sequence
+
+import pw_package.git_repo
+import pw_package.package_manager
+
+
+class Googletest(pw_package.git_repo.GitRepo):
+ """Install and check status of googletest."""
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args,
+ name='googletest',
+ url='https://github.com/google/googletest',
+ commit='073293463e1733c5e931313da1c3f1de044e1db3',
+ **kwargs)
+
+ def info(self, path: pathlib.Path) -> Sequence[str]:
+ return (
+ f'{self.name} installed in: {path}',
+ "Enable by running 'gn args out' and adding this line:",
+ f' dir_pw_third_party_googletest = "{path}"',
+ )
+
+
+pw_package.package_manager.register(Googletest)
diff --git a/pw_package/py/pw_package/packages/pico_sdk.py b/pw_package/py/pw_package/packages/pico_sdk.py
new file mode 100644
index 000000000..2a68fc6e1
--- /dev/null
+++ b/pw_package/py/pw_package/packages/pico_sdk.py
@@ -0,0 +1,40 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Install and check status of the Raspberry Pi Pico SDK."""
+
+import pathlib
+from typing import Sequence
+
+import pw_package.git_repo
+import pw_package.package_manager
+
+
+class PiPicoSdk(pw_package.git_repo.GitRepo):
+ """Install and check status of the Raspberry Pi Pico SDK."""
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args,
+ name='pico_sdk',
+ url='https://github.com/raspberrypi/pico-sdk',
+ commit='2062372d203b372849d573f252cf7c6dc2800c0a',
+ **kwargs)
+
+ def info(self, path: pathlib.Path) -> Sequence[str]:
+ return (
+ f'{self.name} installed in: {path}',
+ "Enable by running 'gn args out' and adding this line:",
+ f' PICO_SRC_DIR = "{path}"',
+ )
+
+
+pw_package.package_manager.register(PiPicoSdk)
diff --git a/pw_package/py/pw_package/packages/smartfusion_mss.py b/pw_package/py/pw_package/packages/smartfusion_mss.py
new file mode 100644
index 000000000..b9dbe49c7
--- /dev/null
+++ b/pw_package/py/pw_package/packages/smartfusion_mss.py
@@ -0,0 +1,40 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Install and check status of SmartFusion MSS."""
+
+import pathlib
+from typing import Sequence
+
+import pw_package.git_repo
+import pw_package.package_manager
+
+
+class SmartfusionMss(pw_package.git_repo.GitRepo):
+ """Install and check status of SmartFusion MSS."""
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args,
+ name='smartfusion_mss',
+ url='https://github.com/seank/smartfusion_mss',
+ commit='9f47db73d3df786eab04d082645da5e735e63d28',
+ **kwargs)
+
+ def info(self, path: pathlib.Path) -> Sequence[str]:
+ return (
+ f'{self.name} installed in: {path}',
+ "Enable by running 'gn args out' and adding this line:",
+ f' dir_pw_third_party_smartfusion_mss = "{path}"',
+ )
+
+
+pw_package.package_manager.register(SmartfusionMss)
diff --git a/pw_package/py/pw_package/pigweed_packages.py b/pw_package/py/pw_package/pigweed_packages.py
index de4a9e0f7..c60468c9b 100644
--- a/pw_package/py/pw_package/pigweed_packages.py
+++ b/pw_package/py/pw_package/pigweed_packages.py
@@ -21,10 +21,13 @@ from pw_package.packages import boringssl # pylint: disable=unused-import
from pw_package.packages import chromium_verifier # pylint: disable=unused-import
from pw_package.packages import crlset # pylint: disable=unused-import
from pw_package.packages import freertos # pylint: disable=unused-import
+from pw_package.packages import googletest # pylint: disable=unused-import
from pw_package.packages import mbedtls # pylint: disable=unused-import
from pw_package.packages import micro_ecc # pylint: disable=unused-import
from pw_package.packages import nanopb
+from pw_package.packages import pico_sdk # pylint: disable=unused-import
from pw_package.packages import protobuf # pylint: disable=unused-import
+from pw_package.packages import smartfusion_mss # pylint: disable=unused-import
from pw_package.packages import stm32cube # pylint: disable=unused-import
diff --git a/pw_preprocessor/docs.rst b/pw_preprocessor/docs.rst
index ac6fd9991..15e9104e4 100644
--- a/pw_preprocessor/docs.rst
+++ b/pw_preprocessor/docs.rst
@@ -166,6 +166,12 @@ Macros for compiler-specific features, such as attributes or builtins.
evaluates to a non zero constant integer if the attribute is supported or 0
if not.
+.. c:macro:: PW_HAVE_CPP_ATTRIBUTE(x)
+
+ Wrapper around `__has_cpp_attribute`, which was introduced in the C++20
+ standard. It is supported by compilers even if C++20 is not in use. Evaluates
+ to a non zero constant integer if the C++ attribute is supported or 0 if not.
+
.. c:macro:: PW_PRAGMA(contents)
Expands to a _Pragma with the contents as a string. _Pragma must take a
@@ -197,6 +203,25 @@ Macros for compiler-specific features, such as attributes or builtins.
// Driver handler replaced with default unless overridden.
void USART_DriverHandler(void) PW_ALIAS(DefaultDriverHandler);
+.. c:macro:: PW_ATTRIBUTE_LIFETIME_BOUND
+
+ PW_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function
+ parameter or implicit object parameter is retained by the return value of the
+ annotated function (or, for a parameter of a constructor, in the value of the
+ constructed object). This attribute causes warnings to be produced if a
+ temporary object does not live long enough.
+
+ When applied to a reference parameter, the referenced object is assumed to be
+ retained by the return value of the function. When applied to a non-reference
+ parameter (for example, a pointer or a class type), all temporaries
+ referenced by the parameter are assumed to be retained by the return value of
+ the function.
+
+ See also the upstream documentation:
+ https://clang.llvm.org/docs/AttributeReference.html#lifetimebound
+
+ This is a copy of ABSL_ATTRIBUTE_LIFETIME_BOUND.
+
Modifying compiler diagnostics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``pw_preprocessor/compiler.h`` provides macros for enabling or disabling
diff --git a/pw_preprocessor/public/pw_preprocessor/compiler.h b/pw_preprocessor/public/pw_preprocessor/compiler.h
index d682260b4..690a061b9 100644
--- a/pw_preprocessor/public/pw_preprocessor/compiler.h
+++ b/pw_preprocessor/public/pw_preprocessor/compiler.h
@@ -127,7 +127,17 @@
#define PW_HAVE_ATTRIBUTE(x) __has_attribute(x)
#else
#define PW_HAVE_ATTRIBUTE(x) 0
-#endif
+#endif // __has_attribute
+
+// A function-like feature checking macro that accepts C++11 style attributes.
+// It's a wrapper around __has_cpp_attribute
+// (https://en.cppreference.com/w/cpp/feature_test), borrowed from
+// ABSL_HAVE_CPP_ATTRIBUTE. If there is no __has_cpp_attribute, evaluates to 0.
+#if defined(__cplusplus) && defined(__has_cpp_attribute)
+#define PW_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
+#else
+#define PW_HAVE_CPP_ATTRIBUTE(x) 0
+#endif // defined(__cplusplus) && defined(__has_cpp_attribute)
#define _PW_REQUIRE_SEMICOLON \
static_assert(1, "This macro must be terminated with a semicolon")
@@ -182,3 +192,27 @@
// // Driver handler replaced with default unless overridden.
// void USART_DriverHandler(void) PW_ALIAS(DefaultDriverHandler);
#define PW_ALIAS(aliased_to) __attribute__((weak, alias(#aliased_to)))
+
+// PW_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function
+// parameter or implicit object parameter is retained by the return value of the
+// annotated function (or, for a parameter of a constructor, in the value of the
+// constructed object). This attribute causes warnings to be produced if a
+// temporary object does not live long enough.
+//
+// When applied to a reference parameter, the referenced object is assumed to be
+// retained by the return value of the function. When applied to a non-reference
+// parameter (for example, a pointer or a class type), all temporaries
+// referenced by the parameter are assumed to be retained by the return value of
+// the function.
+//
+// See also the upstream documentation:
+// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound
+//
+// This is a copy of ABSL_ATTRIBUTE_LIFETIME_BOUND.
+#if PW_HAVE_CPP_ATTRIBUTE(clang::lifetimebound)
+#define PW_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]]
+#elif PW_HAVE_ATTRIBUTE(lifetimebound)
+#define PW_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound))
+#else
+#define PW_ATTRIBUTE_LIFETIME_BOUND
+#endif // PW_ATTRIBUTE_LIFETIME_BOUND
diff --git a/pw_presubmit/py/pw_presubmit/build.py b/pw_presubmit/py/pw_presubmit/build.py
index 1af34e928..9da43e13f 100644
--- a/pw_presubmit/py/pw_presubmit/build.py
+++ b/pw_presubmit/py/pw_presubmit/build.py
@@ -52,7 +52,7 @@ def bazel(ctx: PresubmitContext, cmd: str, *args: str) -> None:
'--verbose_failures',
'--verbose_explanations',
'--worker_verbose',
- f'--symlink_prefix={ctx.output_dir / "bazel-"}',
+ f'--symlink_prefix={ctx.output_dir / ".bazel-"}',
*args,
cwd=ctx.root,
env=env_with_clang_vars())
diff --git a/pw_presubmit/py/pw_presubmit/cli.py b/pw_presubmit/py/pw_presubmit/cli.py
index 3cb363eed..c03f5cba2 100644
--- a/pw_presubmit/py/pw_presubmit/cli.py
+++ b/pw_presubmit/py/pw_presubmit/cli.py
@@ -27,7 +27,8 @@ DEFAULT_PATH = Path('out', 'presubmit')
_OUTPUT_PATH_README = '''\
This directory was created by pw_presubmit to run presubmit checks for the
-{repo} repository. This directory may be deleted safely.
+{repo} repository. This directory is not used by the regular GN or CMake Ninja
+builds. It may be deleted safely.
'''
diff --git a/pw_presubmit/py/pw_presubmit/format_code.py b/pw_presubmit/py/pw_presubmit/format_code.py
index bce7b7055..623b5444d 100755
--- a/pw_presubmit/py/pw_presubmit/format_code.py
+++ b/pw_presubmit/py/pw_presubmit/format_code.py
@@ -41,6 +41,7 @@ except ImportError:
os.path.abspath(__file__))))
import pw_presubmit
+import pw_cli.env
from pw_presubmit import cli, git_repo
from pw_presubmit.tools import exclude_paths, file_summary, log_run, plural
@@ -433,9 +434,11 @@ def format_paths_in_repo(paths: Collection[Union[Path, str]],
# If this is a Git repo, list the original paths with git ls-files or diff.
if repo:
+ project_root = Path(pw_cli.env.pigweed_environment().PW_PROJECT_ROOT)
_LOG.info(
'Formatting %s',
- git_repo.describe_files(repo, Path.cwd(), base, paths, exclude))
+ git_repo.describe_files(repo, Path.cwd(), base, paths, exclude,
+ project_root))
# Add files from Git and remove duplicates.
files = sorted(
diff --git a/pw_presubmit/py/pw_presubmit/git_repo.py b/pw_presubmit/py/pw_presubmit/git_repo.py
index e1afff892..25b3afa86 100644
--- a/pw_presubmit/py/pw_presubmit/git_repo.py
+++ b/pw_presubmit/py/pw_presubmit/git_repo.py
@@ -178,16 +178,24 @@ def _describe_constraints(git_root: Path, repo_path: Path,
', '.join(p.pattern for p in exclude) + ')')
-def describe_files(git_root: Path, repo_path: Path, commit: Optional[str],
+def describe_files(git_root: Path,
+ repo_path: Path,
+ commit: Optional[str],
pathspecs: Collection[PathOrStr],
- exclude: Collection[Pattern]) -> str:
+ exclude: Collection[Pattern],
+ project_root: Path = None) -> str:
"""Completes 'Doing something to ...' for a set of files in a Git repo."""
constraints = list(
_describe_constraints(git_root, repo_path, commit, pathspecs, exclude))
+
+ name = git_root.name
+ if project_root and project_root != git_root:
+ name = str(git_root.relative_to(project_root))
+
if not constraints:
- return f'all files in the {git_root.name} repo'
+ return f'all files in the {name} repo'
- msg = f'files in the {git_root.name} repo'
+ msg = f'files in the {name} repo'
if len(constraints) == 1:
return f'{msg} {constraints[0]}'
diff --git a/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py b/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
index 57d3c1c0c..d7b65d32c 100755
--- a/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
+++ b/pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
@@ -242,6 +242,23 @@ def gn_software_update_build(ctx: PresubmitContext):
@filter_paths(endswith=_BUILD_EXTENSIONS)
+def gn_pw_system_demo_build(ctx: PresubmitContext):
+ build.install_package(ctx.package_root, 'freertos')
+ build.install_package(ctx.package_root, 'nanopb')
+ build.install_package(ctx.package_root, 'stm32cube_f4')
+ build.gn_gen(
+ ctx.root,
+ ctx.output_dir,
+ dir_pw_third_party_freertos='"{}"'.format(ctx.package_root /
+ 'freertos'),
+ dir_pw_third_party_nanopb='"{}"'.format(ctx.package_root / 'nanopb'),
+ dir_pw_third_party_stm32cube_f4='"{}"'.format(ctx.package_root /
+ 'stm32cube_f4'),
+ )
+ build.ninja(ctx.output_dir, 'pw_system_demo')
+
+
+@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_qemu_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, *_at_all_optimization_levels('qemu_gcc'))
@@ -503,6 +520,7 @@ _EXCLUDE_FROM_COPYRIGHT_NOTICE: Sequence[str] = (
r'\bAUTHORS$',
r'\bLICENSE$',
r'\bOWNERS$',
+ r'\bPIGWEED_MODULES$',
r'\brequirements.txt$',
r'\bgo.(mod|sum)$',
r'\bpackage.json$',
@@ -514,6 +532,7 @@ _EXCLUDE_FROM_COPYRIGHT_NOTICE: Sequence[str] = (
r'\.json$',
r'\.png$',
r'\.svg$',
+ r'\.xml$',
# Documentation
r'\.md$',
r'\.rst$',
@@ -625,7 +644,7 @@ _GN_SOURCES_IN_BUILD = ('setup.cfg', '.toml', '.rst', '.py',
@filter_paths(endswith=(*_GN_SOURCES_IN_BUILD, 'BUILD', '.bzl', '.gn', '.gni'),
- exclude=['zephyr.*/'])
+ exclude=['zephyr.*/', 'android.*/'])
def source_is_in_build_files(ctx: PresubmitContext):
"""Checks that source files are in the GN and Bazel builds."""
missing = build.check_builds_for_files(
@@ -767,6 +786,7 @@ OTHER_CHECKS = (
gn_full_qemu_check,
gn_clang_build,
gn_gcc_build,
+ gn_pw_system_demo_build,
renode_check,
stm32f429i,
)
diff --git a/pw_presubmit/py/pw_presubmit/presubmit.py b/pw_presubmit/py/pw_presubmit/presubmit.py
index 599ef1736..08585d04e 100644
--- a/pw_presubmit/py/pw_presubmit/presubmit.py
+++ b/pw_presubmit/py/pw_presubmit/presubmit.py
@@ -444,7 +444,8 @@ def run(program: Sequence[Callable],
_LOG.info(
'Checking %s',
- git_repo.describe_files(repo, repo, base, pathspecs, exclude))
+ git_repo.describe_files(repo, repo, base, pathspecs, exclude,
+ root))
if output_directory is None:
output_directory = root / '.presubmit'
diff --git a/pw_protobuf/BUILD.bazel b/pw_protobuf/BUILD.bazel
index 247ba156b..316576e2b 100644
--- a/pw_protobuf/BUILD.bazel
+++ b/pw_protobuf/BUILD.bazel
@@ -58,6 +58,7 @@ pw_cc_library(
":config",
"//pw_assert",
"//pw_bytes",
+ "//pw_containers:vector",
"//pw_polyfill:bit",
"//pw_polyfill:overrides",
"//pw_result",
@@ -180,17 +181,33 @@ proto_library(
)
pw_proto_library(
- name = "codegen_test_protos_pwpb",
- deps = [":codegen_test_proto"],
+ name = "codegen_test_proto_cc",
+ deps = [
+ ":codegen_test_proto",
+ ":common_protos",
+ ],
+)
+
+pw_cc_test(
+ name = "codegen_decoder_test",
+ srcs = [
+ "codegen_decoder_test.cc",
+ ],
+ deps = [
+ ":codegen_test_proto_cc.pwpb",
+ ":pw_protobuf",
+ "//pw_span",
+ "//pw_unit_test",
+ ],
)
pw_cc_test(
- name = "codegen_test",
+ name = "codegen_encoder_test",
srcs = [
- "codegen_test.cc",
+ "codegen_encoder_test.cc",
],
deps = [
- ":codegen_test_protos_pwpb",
+ ":codegen_test_proto_cc.pwpb",
":pw_protobuf",
"//pw_span",
"//pw_unit_test",
diff --git a/pw_protobuf/BUILD.gn b/pw_protobuf/BUILD.gn
index e5b06cd78..626e5cc5d 100644
--- a/pw_protobuf/BUILD.gn
+++ b/pw_protobuf/BUILD.gn
@@ -45,6 +45,7 @@ pw_source_set("pw_protobuf") {
public_configs = [ ":public_include_path" ]
public_deps = [
":config",
+ "$dir_pw_containers:vector",
"$dir_pw_stream:interval_reader",
"$dir_pw_varint:stream",
dir_pw_assert,
@@ -97,7 +98,8 @@ pw_doc_group("docs") {
pw_test_group("tests") {
tests = [
- ":codegen_test",
+ ":codegen_decoder_test",
+ ":codegen_encoder_test",
":decoder_test",
":encoder_test",
":encoder_fuzzer",
@@ -125,9 +127,14 @@ pw_test("find_test") {
sources = [ "find_test.cc" ]
}
-pw_test("codegen_test") {
+pw_test("codegen_decoder_test") {
deps = [ ":codegen_test_protos.pwpb" ]
- sources = [ "codegen_test.cc" ]
+ sources = [ "codegen_decoder_test.cc" ]
+}
+
+pw_test("codegen_encoder_test") {
+ deps = [ ":codegen_test_protos.pwpb" ]
+ sources = [ "codegen_encoder_test.cc" ]
}
pw_test("serialized_size_test") {
diff --git a/pw_protobuf/CMakeLists.txt b/pw_protobuf/CMakeLists.txt
index 271c06a75..73987db21 100644
--- a/pw_protobuf/CMakeLists.txt
+++ b/pw_protobuf/CMakeLists.txt
@@ -42,6 +42,7 @@ pw_add_module_library(pw_protobuf
PUBLIC_DEPS
pw_assert
pw_bytes
+ pw_containers.vector
pw_protobuf.config
pw_polyfill.span
pw_polyfill.cstddef
@@ -102,9 +103,20 @@ pw_add_test(pw_protobuf.find_test
pw_protobuf
)
-pw_add_test(pw_protobuf.codegen_test
+pw_add_test(pw_protobuf.codegen_decoder_test
SOURCES
- codegen_test.cc
+ codegen_decoder_test.cc
+ DEPS
+ pw_protobuf
+ pw_protobuf.codegen_test_protos.pwpb
+ GROUPS
+ modules
+ pw_protobuf
+)
+
+pw_add_test(pw_protobuf.codegen_encoder_test
+ SOURCES
+ codegen_encoder_test.cc
DEPS
pw_protobuf
pw_protobuf.codegen_test_protos.pwpb
diff --git a/pw_protobuf/codegen_decoder_test.cc b/pw_protobuf/codegen_decoder_test.cc
new file mode 100644
index 000000000..657e6e9d0
--- /dev/null
+++ b/pw_protobuf/codegen_decoder_test.cc
@@ -0,0 +1,936 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#include <array>
+#include <span>
+#include <stdexcept>
+#include <string_view>
+
+#include "gtest/gtest.h"
+#include "pw_bytes/span.h"
+#include "pw_containers/vector.h"
+#include "pw_status/status.h"
+#include "pw_status/status_with_size.h"
+#include "pw_stream/memory_stream.h"
+
+// These header files contain the code generated by the pw_protobuf plugin.
+// They are re-generated every time the tests are built and are used by the
+// tests to ensure that the interface remains consistent.
+//
+// The purpose of the tests in this file is primarily to verify that the
+// generated C++ interface is valid rather than the correctness of the
+// low-level encoder.
+#include "pw_protobuf_test_protos/full_test.pwpb.h"
+#include "pw_protobuf_test_protos/importer.pwpb.h"
+#include "pw_protobuf_test_protos/non_pw_package.pwpb.h"
+#include "pw_protobuf_test_protos/proto2.pwpb.h"
+#include "pw_protobuf_test_protos/repeated.pwpb.h"
+
+namespace pw::protobuf {
+namespace {
+
+using namespace pw::protobuf::test;
+
+TEST(Codegen, StreamDecoder) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // pigweed.magic_number
+ 0x08, 0x49,
+ // pigweed.ziggy
+ 0x10, 0xdd, 0x01,
+ // pigweed.error_message
+ 0x2a, 0x10, 'n', 'o', 't', ' ', 'a', ' ',
+ 't', 'y', 'p', 'e', 'w', 'r', 'i', 't', 'e', 'r',
+ // pigweed.bin
+ 0x40, 0x01,
+ // pigweed.pigweed
+ 0x3a, 0x02,
+ // pigweed.pigweed.status
+ 0x08, 0x02,
+ // pigweed.proto
+ 0x4a, 0x56,
+ // pigweed.proto.bin
+ 0x10, 0x00,
+ // pigweed.proto.pigweed_pigweed_bin
+ 0x18, 0x00,
+ // pigweed.proto.pigweed_protobuf_bin
+ 0x20, 0x01,
+ // pigweed.proto.meta
+ 0x2a, 0x0f,
+ // pigweed.proto.meta.file_name
+ 0x0a, 0x0b, '/', 'e', 't', 'c', '/', 'p', 'a', 's', 's', 'w', 'd',
+ // pigweed.proto.meta.status
+ 0x10, 0x02,
+ // pigweed.proto.nested_pigweed
+ 0x0a, 0x3d,
+ // pigweed.proto.nested_pigweed.error_message
+ 0x2a, 0x10, 'h', 'e', 'r', 'e', ' ', 'w', 'e', ' ',
+ 'g', 'o', ' ', 'a', 'g', 'a', 'i', 'n',
+ // pigweed.proto.nested_pigweed.magic_number
+ 0x08, 0xe8, 0x04,
+ // pigweed.proto.nested_pigweed.device_info
+ 0x32, 0x26,
+ // pigweed.proto.nested_pigweed.device_info.attributes[0]
+ 0x22, 0x10,
+ // pigweed.proto.nested_pigweed.device_info.attributes[0].key
+ 0x0a, 0x07, 'v', 'e', 'r', 's', 'i', 'o', 'n',
+ // pigweed.proto.nested_pigweed.device_info.attributes[0].value
+ 0x12, 0x05, '5', '.', '3', '.', '1',
+ // pigweed.proto.nested_pigweed.device_info.attributes[1]
+ 0x22, 0x10,
+ // pigweed.proto.nested_pigweed.device_info.attributes[1].key
+ 0x0a, 0x04, 'c', 'h', 'i', 'p',
+ // pigweed.proto.nested_pigweed.device_info.attributes[1].value
+ 0x12, 0x08, 'l', 'e', 'f', 't', '-', 's', 'o', 'c',
+ // pigweed.proto.nested_pigweed.device_info.status
+ 0x18, 0x03,
+ // pigweed.id[0]
+ 0x52, 0x02,
+ // pigweed.id[0].id
+ 0x08, 0x31,
+ // pigweed.id[1]
+ 0x52, 0x02,
+ // pigweed.id[1].id
+ 0x08, 0x39,
+ // pigweed.id[2]
+ 0x52, 0x02,
+ // pigweed.id[2].id
+ 0x08, 0x4b,
+ // pigweed.id[3]
+ 0x52, 0x02,
+ // pigweed.id[3].id
+ 0x08, 0x67,
+ // pigweed.id[4]
+ 0x52, 0x03,
+ // pigweed.id[4].id
+ 0x08, 0x8d, 0x01
+
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ Pigweed::StreamDecoder pigweed(reader);
+
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::MAGIC_NUMBER);
+ Result<uint32_t> magic_number = pigweed.ReadMagicNumber();
+ EXPECT_EQ(magic_number.status(), OkStatus());
+ EXPECT_EQ(magic_number.value(), 0x49u);
+
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::ZIGGY);
+ Result<int32_t> ziggy = pigweed.ReadZiggy();
+ EXPECT_EQ(ziggy.status(), OkStatus());
+ EXPECT_EQ(ziggy.value(), -111);
+
+ constexpr std::string_view kExpectedErrorMessage{"not a typewriter"};
+
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::ERROR_MESSAGE);
+ std::array<char, 32> error_message{};
+ StatusWithSize error_message_status = pigweed.ReadErrorMessage(error_message);
+ EXPECT_EQ(error_message_status.status(), OkStatus());
+ EXPECT_EQ(error_message_status.size(), kExpectedErrorMessage.size());
+ EXPECT_EQ(std::memcmp(error_message.data(),
+ kExpectedErrorMessage.data(),
+ kExpectedErrorMessage.size()),
+ 0);
+
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::BIN);
+ Result<Pigweed::Protobuf::Binary> bin = pigweed.ReadBin();
+ EXPECT_EQ(bin.status(), OkStatus());
+ EXPECT_EQ(bin.value(), Pigweed::Protobuf::Binary::ZERO);
+
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::PIGWEED);
+ {
+ Pigweed::Pigweed::StreamDecoder pigweed_pigweed =
+ pigweed.GetPigweedDecoder();
+
+ EXPECT_EQ(pigweed_pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed_pigweed.Field().value(),
+ Pigweed::Pigweed::Fields::STATUS);
+ Result<Bool> pigweed_status = pigweed_pigweed.ReadStatus();
+ EXPECT_EQ(pigweed_status.status(), OkStatus());
+ EXPECT_EQ(pigweed_status.value(), Bool::FILE_NOT_FOUND);
+
+ EXPECT_EQ(pigweed_pigweed.Next(), Status::OutOfRange());
+ }
+
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::PROTO);
+ {
+ Proto::StreamDecoder proto = pigweed.GetProtoDecoder();
+
+ EXPECT_EQ(proto.Next(), OkStatus());
+ EXPECT_EQ(proto.Field().value(), Proto::Fields::BIN);
+ Result<Proto::Binary> proto_bin = proto.ReadBin();
+ EXPECT_EQ(proto_bin.status(), OkStatus());
+ EXPECT_EQ(proto_bin.value(), Proto::Binary::OFF);
+
+ EXPECT_EQ(proto.Next(), OkStatus());
+ EXPECT_EQ(proto.Field().value(), Proto::Fields::PIGWEED_PIGWEED_BIN);
+ Result<Pigweed::Pigweed::Binary> proto_pigweed_bin =
+ proto.ReadPigweedPigweedBin();
+ EXPECT_EQ(proto_pigweed_bin.status(), OkStatus());
+ EXPECT_EQ(proto_pigweed_bin.value(), Pigweed::Pigweed::Binary::ZERO);
+
+ EXPECT_EQ(proto.Next(), OkStatus());
+ EXPECT_EQ(proto.Field().value(), Proto::Fields::PIGWEED_PROTOBUF_BIN);
+ Result<Pigweed::Protobuf::Binary> proto_protobuf_bin =
+ proto.ReadPigweedProtobufBin();
+ EXPECT_EQ(proto_protobuf_bin.status(), OkStatus());
+ EXPECT_EQ(proto_protobuf_bin.value(), Pigweed::Protobuf::Binary::ZERO);
+
+ EXPECT_EQ(proto.Next(), OkStatus());
+ EXPECT_EQ(proto.Field().value(), Proto::Fields::META);
+ {
+ Pigweed::Protobuf::Compiler::StreamDecoder meta = proto.GetMetaDecoder();
+
+ constexpr std::string_view kExpectedFileName{"/etc/passwd"};
+
+ EXPECT_EQ(meta.Next(), OkStatus());
+ EXPECT_EQ(meta.Field().value(),
+ Pigweed::Protobuf::Compiler::Fields::FILE_NAME);
+ std::array<char, 32> meta_file_name{};
+ StatusWithSize meta_file_name_status = meta.ReadFileName(meta_file_name);
+ EXPECT_EQ(meta_file_name_status.status(), OkStatus());
+ EXPECT_EQ(meta_file_name_status.size(), kExpectedFileName.size());
+ EXPECT_EQ(std::memcmp(meta_file_name.data(),
+ kExpectedFileName.data(),
+ kExpectedFileName.size()),
+ 0);
+
+ EXPECT_EQ(meta.Next(), OkStatus());
+ EXPECT_EQ(meta.Field().value(),
+ Pigweed::Protobuf::Compiler::Fields::STATUS);
+ Result<Pigweed::Protobuf::Compiler::Status> meta_status =
+ meta.ReadStatus();
+ EXPECT_EQ(meta_status.status(), OkStatus());
+ EXPECT_EQ(meta_status.value(),
+ Pigweed::Protobuf::Compiler::Status::FUBAR);
+
+ EXPECT_EQ(meta.Next(), Status::OutOfRange());
+ }
+
+ EXPECT_EQ(proto.Next(), OkStatus());
+ EXPECT_EQ(proto.Field().value(), Proto::Fields::PIGWEED);
+ {
+ Pigweed::StreamDecoder proto_pigweed = proto.GetPigweedDecoder();
+
+ constexpr std::string_view kExpectedProtoErrorMessage{"here we go again"};
+
+ EXPECT_EQ(proto_pigweed.Next(), OkStatus());
+ EXPECT_EQ(proto_pigweed.Field().value(), Pigweed::Fields::ERROR_MESSAGE);
+ std::array<char, 32> proto_pigweed_error_message{};
+ StatusWithSize proto_pigweed_error_message_status =
+ proto_pigweed.ReadErrorMessage(proto_pigweed_error_message);
+ EXPECT_EQ(proto_pigweed_error_message_status.status(), OkStatus());
+ EXPECT_EQ(proto_pigweed_error_message_status.size(),
+ kExpectedProtoErrorMessage.size());
+ EXPECT_EQ(std::memcmp(proto_pigweed_error_message.data(),
+ kExpectedProtoErrorMessage.data(),
+ kExpectedProtoErrorMessage.size()),
+ 0);
+
+ EXPECT_EQ(proto_pigweed.Next(), OkStatus());
+ EXPECT_EQ(proto_pigweed.Field().value(), Pigweed::Fields::MAGIC_NUMBER);
+ Result<uint32_t> proto_pigweed_magic_number =
+ proto_pigweed.ReadMagicNumber();
+ EXPECT_EQ(proto_pigweed_magic_number.status(), OkStatus());
+ EXPECT_EQ(proto_pigweed_magic_number.value(), 616u);
+
+ EXPECT_EQ(proto_pigweed.Next(), OkStatus());
+ EXPECT_EQ(proto_pigweed.Field().value(), Pigweed::Fields::DEVICE_INFO);
+ {
+ DeviceInfo::StreamDecoder device_info =
+ proto_pigweed.GetDeviceInfoDecoder();
+
+ EXPECT_EQ(device_info.Next(), OkStatus());
+ EXPECT_EQ(device_info.Field().value(), DeviceInfo::Fields::ATTRIBUTES);
+ {
+ KeyValuePair::StreamDecoder key_value_pair =
+ device_info.GetAttributesDecoder();
+
+ constexpr std::string_view kExpectedKey{"version"};
+ constexpr std::string_view kExpectedValue{"5.3.1"};
+
+ EXPECT_EQ(key_value_pair.Next(), OkStatus());
+ EXPECT_EQ(key_value_pair.Field().value(), KeyValuePair::Fields::KEY);
+ std::array<char, 32> key{};
+ StatusWithSize key_status = key_value_pair.ReadKey(key);
+ EXPECT_EQ(key_status.status(), OkStatus());
+ EXPECT_EQ(key_status.size(), kExpectedKey.size());
+ EXPECT_EQ(
+ std::memcmp(key.data(), kExpectedKey.data(), kExpectedKey.size()),
+ 0);
+
+ EXPECT_EQ(key_value_pair.Next(), OkStatus());
+ EXPECT_EQ(key_value_pair.Field().value(),
+ KeyValuePair::Fields::VALUE);
+ std::array<char, 32> value{};
+ StatusWithSize value_status = key_value_pair.ReadValue(value);
+ EXPECT_EQ(value_status.status(), OkStatus());
+ EXPECT_EQ(value_status.size(), kExpectedValue.size());
+ EXPECT_EQ(
+ std::memcmp(
+ value.data(), kExpectedValue.data(), kExpectedValue.size()),
+ 0);
+
+ EXPECT_EQ(key_value_pair.Next(), Status::OutOfRange());
+ }
+
+ EXPECT_EQ(device_info.Next(), OkStatus());
+ EXPECT_EQ(device_info.Field().value(), DeviceInfo::Fields::ATTRIBUTES);
+ {
+ KeyValuePair::StreamDecoder key_value_pair =
+ device_info.GetAttributesDecoder();
+
+ constexpr std::string_view kExpectedKey{"chip"};
+ constexpr std::string_view kExpectedValue{"left-soc"};
+
+ EXPECT_EQ(key_value_pair.Next(), OkStatus());
+ EXPECT_EQ(key_value_pair.Field().value(), KeyValuePair::Fields::KEY);
+ std::array<char, 32> key{};
+ StatusWithSize key_status = key_value_pair.ReadKey(key);
+ EXPECT_EQ(key_status.status(), OkStatus());
+ EXPECT_EQ(key_status.size(), kExpectedKey.size());
+ EXPECT_EQ(
+ std::memcmp(key.data(), kExpectedKey.data(), kExpectedKey.size()),
+ 0);
+
+ EXPECT_EQ(key_value_pair.Next(), OkStatus());
+ EXPECT_EQ(key_value_pair.Field().value(),
+ KeyValuePair::Fields::VALUE);
+ std::array<char, 32> value{};
+ StatusWithSize value_status = key_value_pair.ReadValue(value);
+ EXPECT_EQ(value_status.status(), OkStatus());
+ EXPECT_EQ(value_status.size(), kExpectedValue.size());
+ EXPECT_EQ(
+ std::memcmp(
+ value.data(), kExpectedValue.data(), kExpectedValue.size()),
+ 0);
+
+ EXPECT_EQ(key_value_pair.Next(), Status::OutOfRange());
+ }
+
+ EXPECT_EQ(device_info.Next(), OkStatus());
+ EXPECT_EQ(device_info.Field().value(), DeviceInfo::Fields::STATUS);
+ Result<DeviceInfo::DeviceStatus> device_info_status =
+ device_info.ReadStatus();
+ EXPECT_EQ(device_info_status.status(), OkStatus());
+ EXPECT_EQ(device_info_status.value(), DeviceInfo::DeviceStatus::PANIC);
+
+ EXPECT_EQ(device_info.Next(), Status::OutOfRange());
+ }
+
+ EXPECT_EQ(proto_pigweed.Next(), Status::OutOfRange());
+ }
+
+ EXPECT_EQ(proto.Next(), Status::OutOfRange());
+ }
+
+ for (int i = 0; i < 5; ++i) {
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::ID);
+
+ Proto::ID::StreamDecoder id = pigweed.GetIdDecoder();
+
+ EXPECT_EQ(id.Next(), OkStatus());
+ EXPECT_EQ(id.Field().value(), Proto::ID::Fields::ID);
+ Result<uint32_t> id_id = id.ReadId();
+ EXPECT_EQ(id_id.status(), OkStatus());
+ EXPECT_EQ(id_id.value(), 5u * i * i + 3 * i + 49);
+
+ EXPECT_EQ(id.Next(), Status::OutOfRange());
+ }
+
+ EXPECT_EQ(pigweed.Next(), Status::OutOfRange());
+}
+
+TEST(Codegen, ResourceExhausted) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // pigweed.error_message
+ 0x2a, 0x10, 'n', 'o', 't', ' ', 'a', ' ',
+ 't', 'y', 'p', 'e', 'w', 'r', 'i', 't', 'e', 'r',
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ Pigweed::StreamDecoder pigweed(reader);
+
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::ERROR_MESSAGE);
+ std::array<char, 8> error_message{};
+ StatusWithSize error_message_status = pigweed.ReadErrorMessage(error_message);
+ EXPECT_EQ(error_message_status.status(), Status::ResourceExhausted());
+ EXPECT_EQ(error_message_status.size(), 0u);
+
+ EXPECT_EQ(pigweed.Next(), Status::OutOfRange());
+}
+
+TEST(Codegen, BytesReader) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // pigweed.error_message
+ 0x2a, 0x10, 'n', 'o', 't', ' ', 'a', ' ',
+ 't', 'y', 'p', 'e', 'w', 'r', 'i', 't', 'e', 'r',
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ Pigweed::StreamDecoder pigweed(reader);
+
+ constexpr std::string_view kExpectedErrorMessage{"not a typewriter"};
+
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::ERROR_MESSAGE);
+ {
+ StreamDecoder::BytesReader bytes_reader = pigweed.GetErrorMessageReader();
+ EXPECT_EQ(bytes_reader.field_size(), kExpectedErrorMessage.size());
+
+ std::array<std::byte, 32> error_message{};
+ Result<ByteSpan> result = bytes_reader.Read(error_message);
+ EXPECT_EQ(result.status(), OkStatus());
+ EXPECT_EQ(result.value().size(), kExpectedErrorMessage.size());
+ EXPECT_EQ(std::memcmp(result.value().data(),
+ kExpectedErrorMessage.data(),
+ kExpectedErrorMessage.size()),
+ 0);
+
+ result = bytes_reader.Read(error_message);
+ EXPECT_EQ(result.status(), Status::OutOfRange());
+ }
+
+ EXPECT_EQ(pigweed.Next(), Status::OutOfRange());
+}
+
+TEST(Codegen, Enum) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // pigweed.bin (value value)
+ 0x40, 0x01,
+ // pigweed.bin (invalid value)
+ 0x40, 0xff,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ Pigweed::StreamDecoder pigweed(reader);
+
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::BIN);
+ Result<Pigweed::Protobuf::Binary> bin = pigweed.ReadBin();
+ EXPECT_EQ(bin.status(), OkStatus());
+ EXPECT_EQ(bin.value(), Pigweed::Protobuf::Binary::ZERO);
+
+ EXPECT_EQ(pigweed.Next(), OkStatus());
+ EXPECT_EQ(pigweed.Field().value(), Pigweed::Fields::BIN);
+ bin = pigweed.ReadBin();
+ EXPECT_EQ(bin.status(), Status::DataLoss());
+}
+
+TEST(Codegen, ImportedEnum) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // result.status (value value)
+ 0x08, 0x01,
+ // result.status (invalid value)
+ 0x08, 0xff,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ TestResult::StreamDecoder test_result(reader);
+
+ EXPECT_EQ(test_result.Next(), OkStatus());
+ EXPECT_EQ(test_result.Field().value(), TestResult::Fields::STATUS);
+ Result<imported::Status> status = test_result.ReadStatus();
+ EXPECT_EQ(status.status(), OkStatus());
+ EXPECT_EQ(status.value(), imported::Status::NOT_OK);
+
+ EXPECT_EQ(test_result.Next(), OkStatus());
+ EXPECT_EQ(test_result.Field().value(), TestResult::Fields::STATUS);
+ status = test_result.ReadStatus();
+ EXPECT_EQ(status.status(), Status::DataLoss());
+}
+
+TEST(CodegenRepeated, NonPackedScalar) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // uint32s[], v={0, 16, 32, 48}
+ 0x08, 0x00,
+ 0x08, 0x10,
+ 0x08, 0x20,
+ 0x08, 0x30,
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x35, 0x00, 0x00, 0x00, 0x00,
+ 0x35, 0x10, 0x00, 0x00, 0x00,
+ 0x35, 0x20, 0x00, 0x00, 0x00,
+ 0x35, 0x30, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+
+ Result<uint32_t> result = repeated_test.ReadUint32s();
+ EXPECT_EQ(result.status(), OkStatus());
+ EXPECT_EQ(result.value(), i * 16u);
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+
+ Result<uint32_t> result = repeated_test.ReadFixed32s();
+ EXPECT_EQ(result.status(), OkStatus());
+ EXPECT_EQ(result.value(), i * 16u);
+ }
+
+ EXPECT_EQ(repeated_test.Next(), Status::OutOfRange());
+}
+
+TEST(CodegenRepeated, NonPackedScalarVector) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // uint32s[], v={0, 16, 32, 48}
+ 0x08, 0x00,
+ 0x08, 0x10,
+ 0x08, 0x20,
+ 0x08, 0x30,
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x35, 0x00, 0x00, 0x00, 0x00,
+ 0x35, 0x10, 0x00, 0x00, 0x00,
+ 0x35, 0x20, 0x00, 0x00, 0x00,
+ 0x35, 0x30, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ pw::Vector<uint32_t, 8> uint32s{};
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+
+ Status status = repeated_test.ReadUint32s(uint32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32s.size(), i + 1u);
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(uint32s[i], i * 16u);
+ }
+
+ pw::Vector<uint32_t, 8> fixed32s{};
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+
+ Status status = repeated_test.ReadFixed32s(fixed32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(fixed32s.size(), i + 1u);
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(fixed32s[i], i * 16u);
+ }
+
+ EXPECT_EQ(repeated_test.Next(), Status::OutOfRange());
+}
+
+TEST(CodegenRepeated, NonPackedVarintScalarVectorFull) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // uint32s[], v={0, 16, 32, 48}
+ 0x08, 0x00,
+ 0x08, 0x10,
+ 0x08, 0x20,
+ 0x08, 0x30,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ pw::Vector<uint32_t, 2> uint32s{};
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+ Status status = repeated_test.ReadUint32s(uint32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32s.size(), 1u);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+ status = repeated_test.ReadUint32s(uint32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32s.size(), 2u);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+ status = repeated_test.ReadUint32s(uint32s);
+ EXPECT_EQ(status, Status::ResourceExhausted());
+ EXPECT_EQ(uint32s.size(), 2u);
+
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_EQ(uint32s[i], i * 16u);
+ }
+}
+
+TEST(CodegenRepeated, NonPackedFixedScalarVectorFull) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x35, 0x00, 0x00, 0x00, 0x00,
+ 0x35, 0x10, 0x00, 0x00, 0x00,
+ 0x35, 0x20, 0x00, 0x00, 0x00,
+ 0x35, 0x30, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ pw::Vector<uint32_t, 2> fixed32s{};
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+ Status status = repeated_test.ReadFixed32s(fixed32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(fixed32s.size(), 1u);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+ status = repeated_test.ReadFixed32s(fixed32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(fixed32s.size(), 2u);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+ status = repeated_test.ReadFixed32s(fixed32s);
+ EXPECT_EQ(status, Status::ResourceExhausted());
+ EXPECT_EQ(fixed32s.size(), 2u);
+
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_EQ(fixed32s[i], i * 16u);
+ }
+}
+
+TEST(CodegenRepeated, PackedScalar) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // uint32s[], v={0, 16, 32, 48}
+ 0x0a, 0x04,
+ 0x00,
+ 0x10,
+ 0x20,
+ 0x30,
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x32, 0x10,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+ std::array<uint32_t, 8> uint32s{};
+ StatusWithSize sws = repeated_test.ReadUint32s(uint32s);
+ EXPECT_EQ(sws.status(), OkStatus());
+ EXPECT_EQ(sws.size(), 4u);
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(uint32s[i], i * 16u);
+ }
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+ std::array<uint32_t, 8> fixed32s{};
+ sws = repeated_test.ReadFixed32s(fixed32s);
+ EXPECT_EQ(sws.status(), OkStatus());
+ EXPECT_EQ(sws.size(), 4u);
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(fixed32s[i], i * 16u);
+ }
+
+ EXPECT_EQ(repeated_test.Next(), Status::OutOfRange());
+}
+
+TEST(CodegenRepeated, PackedVarintScalarExhausted) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // uint32s[], v={0, 16, 32, 48}
+ 0x0a, 0x04,
+ 0x00,
+ 0x10,
+ 0x20,
+ 0x30,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+ std::array<uint32_t, 2> uint32s{};
+ StatusWithSize sws = repeated_test.ReadUint32s(uint32s);
+ EXPECT_EQ(sws.status(), Status::ResourceExhausted());
+ EXPECT_EQ(sws.size(), 2u);
+
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_EQ(uint32s[i], i * 16u);
+ }
+}
+
+TEST(CodegenRepeated, PackedFixedScalarExhausted) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x32, 0x10,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+ std::array<uint32_t, 2> fixed32s{};
+ StatusWithSize sws = repeated_test.ReadFixed32s(fixed32s);
+ EXPECT_EQ(sws.status(), Status::ResourceExhausted());
+ EXPECT_EQ(sws.size(), 0u);
+}
+
+TEST(CodegenRepeated, PackedScalarVector) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // uint32s[], v={0, 16, 32, 48}
+ 0x0a, 0x04,
+ 0x00,
+ 0x10,
+ 0x20,
+ 0x30,
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x32, 0x10,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+ pw::Vector<uint32_t, 8> uint32s{};
+ Status status = repeated_test.ReadUint32s(uint32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32s.size(), 4u);
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(uint32s[i], i * 16u);
+ }
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+ pw::Vector<uint32_t, 8> fixed32s{};
+ status = repeated_test.ReadFixed32s(fixed32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(fixed32s.size(), 4u);
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(fixed32s[i], i * 16u);
+ }
+
+ EXPECT_EQ(repeated_test.Next(), Status::OutOfRange());
+}
+
+TEST(CodegenRepeated, PackedVarintScalarVectorFull) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // uint32s[], v={0, 16, 32, 48}
+ 0x0a, 0x04,
+ 0x00,
+ 0x10,
+ 0x20,
+ 0x30,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+ pw::Vector<uint32_t, 2> uint32s{};
+ Status status = repeated_test.ReadUint32s(uint32s);
+ EXPECT_EQ(status, Status::ResourceExhausted());
+ EXPECT_EQ(uint32s.size(), 2u);
+
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_EQ(uint32s[i], i * 16u);
+ }
+}
+
+TEST(CodegenRepeated, PackedFixedScalarVectorFull) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x32, 0x10,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+ pw::Vector<uint32_t, 2> fixed32s{};
+ Status status = repeated_test.ReadFixed32s(fixed32s);
+ EXPECT_EQ(status, Status::ResourceExhausted());
+ EXPECT_EQ(fixed32s.size(), 0u);
+}
+
+TEST(CodegenRepeated, PackedScalarVectorRepeated) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // uint32s[], v={0, 16, 32, 48}
+ 0x0a, 0x04,
+ 0x00,
+ 0x10,
+ 0x20,
+ 0x30,
+ // uint32s[], v={64, 80, 96, 112}
+ 0x0a, 0x04,
+ 0x40,
+ 0x50,
+ 0x60,
+ 0x70,
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x32, 0x10,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00,
+ // fixed32s[]. v={64, 80, 96, 112}
+ 0x32, 0x10,
+ 0x40, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x60, 0x00, 0x00, 0x00,
+ 0x70, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+ pw::Vector<uint32_t, 8> uint32s{};
+ Status status = repeated_test.ReadUint32s(uint32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32s.size(), 4u);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::UINT32S);
+ status = repeated_test.ReadUint32s(uint32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32s.size(), 8u);
+
+ for (int i = 0; i < 8; ++i) {
+ EXPECT_EQ(uint32s[i], i * 16u);
+ }
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+ pw::Vector<uint32_t, 8> fixed32s{};
+ status = repeated_test.ReadFixed32s(fixed32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(fixed32s.size(), 4u);
+
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::FIXED32S);
+ status = repeated_test.ReadFixed32s(fixed32s);
+ EXPECT_EQ(status, OkStatus());
+ EXPECT_EQ(fixed32s.size(), 8u);
+
+ for (int i = 0; i < 8; ++i) {
+ EXPECT_EQ(fixed32s[i], i * 16u);
+ }
+
+ EXPECT_EQ(repeated_test.Next(), Status::OutOfRange());
+}
+
+TEST(CodegenRepeated, NonScalar) {
+ // clang-format off
+ constexpr uint8_t proto_data[] = {
+ // strings[], v={"the", "quick", "brown", "fox"}
+ 0x1a, 0x03, 't', 'h', 'e',
+ 0x1a, 0x5, 'q', 'u', 'i', 'c', 'k',
+ 0x1a, 0x5, 'b', 'r', 'o', 'w', 'n',
+ 0x1a, 0x3, 'f', 'o', 'x'
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(proto_data)));
+ RepeatedTest::StreamDecoder repeated_test(reader);
+
+ constexpr std::array<std::string_view, 4> kExpectedString{
+ {{"the"}, {"quick"}, {"brown"}, {"fox"}}};
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_EQ(repeated_test.Next(), OkStatus());
+ EXPECT_EQ(repeated_test.Field().value(), RepeatedTest::Fields::STRINGS);
+ std::array<char, 32> string{};
+ StatusWithSize sws = repeated_test.ReadStrings(string);
+ EXPECT_EQ(sws.status(), OkStatus());
+ EXPECT_EQ(sws.size(), kExpectedString[i].size());
+ EXPECT_EQ(std::memcmp(string.data(),
+ kExpectedString[i].data(),
+ kExpectedString[i].size()),
+ 0);
+ }
+
+ EXPECT_EQ(repeated_test.Next(), Status::OutOfRange());
+}
+
+} // namespace
+} // namespace pw::protobuf
diff --git a/pw_protobuf/codegen_test.cc b/pw_protobuf/codegen_encoder_test.cc
index b1cc7dae9..93bb02db3 100644
--- a/pw_protobuf/codegen_test.cc
+++ b/pw_protobuf/codegen_encoder_test.cc
@@ -258,8 +258,25 @@ TEST(CodegenRepeated, NonPackedScalar) {
.IgnoreError(); // TODO(pwbug/387): Handle Status properly
}
+ for (int i = 0; i < 4; ++i) {
+ repeated_test.WriteFixed32s(i * 16)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+ }
+
+ // clang-format off
constexpr uint8_t expected_proto[] = {
- 0x08, 0x00, 0x08, 0x10, 0x08, 0x20, 0x08, 0x30};
+ // uint32s[], v={0, 16, 32, 48}
+ 0x08, 0x00,
+ 0x08, 0x10,
+ 0x08, 0x20,
+ 0x08, 0x30,
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x35, 0x00, 0x00, 0x00, 0x00,
+ 0x35, 0x10, 0x00, 0x00, 0x00,
+ 0x35, 0x20, 0x00, 0x00, 0x00,
+ 0x35, 0x30, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
ConstByteSpan result = writer.WrittenData();
ASSERT_EQ(repeated_test.status(), OkStatus());
@@ -276,8 +293,84 @@ TEST(CodegenRepeated, PackedScalar) {
constexpr uint32_t values[] = {0, 16, 32, 48};
repeated_test.WriteUint32s(values)
.IgnoreError(); // TODO(pwbug/387): Handle Status properly
+ repeated_test.WriteFixed32s(values)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+
+ // clang-format off
+ constexpr uint8_t expected_proto[] = {
+ // uint32s[], v={0, 16, 32, 48}
+ 0x0a, 0x04,
+ 0x00,
+ 0x10,
+ 0x20,
+ 0x30,
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x32, 0x10,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ ConstByteSpan result = writer.WrittenData();
+ ASSERT_EQ(repeated_test.status(), OkStatus());
+ EXPECT_EQ(result.size(), sizeof(expected_proto));
+ EXPECT_EQ(std::memcmp(result.data(), expected_proto, sizeof(expected_proto)),
+ 0);
+}
+
+TEST(CodegenRepeated, PackedBool) {
+ std::byte encode_buffer[32];
+
+ stream::MemoryWriter writer(encode_buffer);
+ RepeatedTest::StreamEncoder repeated_test(writer, ByteSpan());
+ constexpr bool values[] = {true, false, true, true, false};
+ repeated_test.WriteBools(std::span(values))
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+
+ // clang-format off
+ constexpr uint8_t expected_proto[] = {
+ // bools[], v={true, false, true, true, false}
+ 0x3a, 0x05, 0x01, 0x00, 0x01, 0x01, 0x00,
+ };
+ // clang-format on
+
+ ConstByteSpan result = writer.WrittenData();
+ ASSERT_EQ(repeated_test.status(), OkStatus());
+ EXPECT_EQ(result.size(), sizeof(expected_proto));
+ EXPECT_EQ(std::memcmp(result.data(), expected_proto, sizeof(expected_proto)),
+ 0);
+}
+
+TEST(CodegenRepeated, PackedScalarVector) {
+ std::byte encode_buffer[32];
+
+ stream::MemoryWriter writer(encode_buffer);
+ RepeatedTest::StreamEncoder repeated_test(writer, ByteSpan());
+ const pw::Vector<uint32_t, 4> values = {0, 16, 32, 48};
+ repeated_test.WriteUint32s(values)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+ repeated_test.WriteFixed32s(values)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+
+ // clang-format off
+ constexpr uint8_t expected_proto[] = {
+ // uint32s[], v={0, 16, 32, 48}
+ 0x0a, 0x04,
+ 0x00,
+ 0x10,
+ 0x20,
+ 0x30,
+ // fixed32s[]. v={0, 16, 32, 48}
+ 0x32, 0x10,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00,
+ 0x30, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
- constexpr uint8_t expected_proto[] = {0x0a, 0x04, 0x00, 0x10, 0x20, 0x30};
ConstByteSpan result = writer.WrittenData();
ASSERT_EQ(repeated_test.status(), OkStatus());
EXPECT_EQ(result.size(), sizeof(expected_proto));
@@ -335,7 +428,7 @@ TEST(Codegen, Proto2) {
std::byte encode_buffer[64];
Foo::MemoryEncoder foo(encode_buffer);
- foo.WriteInt(3).IgnoreError(); // TODO(pwbug/387): Handle Status properly
+ foo.WriteInteger(3).IgnoreError(); // TODO(pwbug/387): Handle Status properly
{
constexpr std::byte data[] = {
diff --git a/pw_protobuf/docs.rst b/pw_protobuf/docs.rst
index cbc1792fa..ecc3f5593 100644
--- a/pw_protobuf/docs.rst
+++ b/pw_protobuf/docs.rst
@@ -10,7 +10,7 @@ the Protocol Buffer wire format.
The protobuf module is a work in progress. Wire format encoding and decoding
is supported, though the APIs are not final. C++ code generation exists for
- encoding, but not decoding.
+ encoding and decoding, but does not cover all message types.
------
Design
@@ -166,107 +166,34 @@ space to allocate to account for nested submessage encoding overhead.
created the nested encoder will trigger a crash. To resume using the parent
encoder, destroy the submessage encoder first.
+Repeated Fields
+===============
+Repeated fields can be encoded a value at a time by repeatedly calling
+`WriteInt32` etc., or as a packed field by calling e.g. `WritePackedInt32` with
+a `std::span<Type>` or `WriteRepeatedInt32` with a `pw::Vector<Type>` (see
+:ref:`module-pw_containers` for details).
+
Error Handling
==============
While individual write calls on a proto encoder return pw::Status objects, the
encoder tracks all status returns and "latches" onto the first error
encountered. This status can be accessed via ``StreamEncoder::status()``.
-Codegen
-=======
-pw_protobuf encoder codegen integration is supported in GN, Bazel, and CMake.
-The codegen is just a light wrapper around the ``StreamEncoder`` and
-``MemoryEncoder`` objects, providing named helper functions to write proto
-fields rather than requiring that field numbers are directly passed to an
-encoder. Namespaced proto enums are also generated, and used as the arguments
-when writing enum fields of a proto message.
-
-All generated messages provide a ``Fields`` enum that can be used directly for
-out-of-band encoding, or with the ``pw::protobuf::Decoder``.
-
-This module's codegen is available through the ``*.pwpb`` sub-target of a
-``pw_proto_library`` in GN, CMake, and Bazel. See :ref:`pw_protobuf_compiler's
-documentation <module-pw_protobuf_compiler>` for more information on build
-system integration for pw_protobuf codegen.
-
-Example ``BUILD.gn``:
-
-.. Code:: none
-
- import("//build_overrides/pigweed.gni")
-
- import("$dir_pw_build/target_types.gni")
- import("$dir_pw_protobuf_compiler/proto.gni")
-
- # This target controls where the *.pwpb.h headers end up on the include path.
- # In this example, it's at "pet_daycare_protos/client.pwpb.h".
- pw_proto_library("pet_daycare_protos") {
- sources = [
- "pet_daycare_protos/client.proto",
- ]
- }
-
- pw_source_set("example_client") {
- sources = [ "example_client.cc" ]
- deps = [
- ":pet_daycare_protos.pwpb",
- dir_pw_bytes,
- dir_pw_stream,
- ]
- }
-
-Example ``pet_daycare_protos/client.proto``:
-
-.. Code:: none
-
- syntax = "proto3";
- // The proto package controls the namespacing of the codegen. If this package
- // were fuzzy.friends, the namespace for codegen would be fuzzy::friends::*.
- package fuzzy_friends;
-
- message Pet {
- string name = 1;
- string pet_type = 2;
- }
-
- message Client {
- repeated Pet pets = 1;
- }
-
-Example ``example_client.cc``:
-
-.. Code:: cpp
-
- #include "pet_daycare_protos/client.pwpb.h"
- #include "pw_protobuf/encoder.h"
- #include "pw_stream/sys_io_stream.h"
- #include "pw_bytes/span.h"
-
- pw::stream::SysIoWriter sys_io_writer;
- std::byte submessage_scratch_buffer[64];
- // The constructor is the same as a pw::protobuf::StreamEncoder.
- fuzzy_friends::Client::StreamEncoder client(sys_io_writer,
- submessage_scratch_buffer);
- {
- fuzzy_friends::Pet::StreamEncoder pet1 = client.GetPetsEncoder();
- pet1.WriteName("Spot");
- pet1.WritePetType("dog");
- }
+Proto map encoding utils
+========================
- {
- fuzzy_friends::Pet::StreamEncoder pet2 = client.GetPetsEncoder();
- pet2.WriteName("Slippers");
- pet2.WritePetType("rabbit");
- }
+Some additional helpers for encoding more complex but common protobuf
+submessages (e.g. map<string, bytes>) are provided in
+``pw_protobuf/map_utils.h``.
- if (!client.status().ok()) {
- PW_LOG_INFO("Failed to encode proto; %s", client.status().str());
- }
+.. Note::
+ The helper API are currently in-development and may not remain stable.
--------
Decoding
--------
-``pw_protobuf`` provides two decoder implementations, which are described below.
+``pw_protobuf`` provides three decoder implementations, which are described
+below.
Decoder
=======
@@ -425,15 +352,17 @@ its parent decoder cannot be used.
// parent decoder can be used again.
}
-Proto map encoding utils
-========================
-
-Some additional helpers for encoding more complex but common protobuf
-submessages (e.g. map<string, bytes>) are provided in
-``pw_protobuf/map_utils.h``.
+Repeated Fields
+---------------
+The ``StreamDecoder`` supports two encoded forms of repeated fields: value at a
+time, by repeatedly calling `ReadInt32` etc., and packed fields by calling
+e.g. `ReadPackedInt32`.
-.. Note::
- The helper API are currently in-development and may not remain stable.
+Since protobuf encoders are permitted to choose either format, including
+splitting repeated fields up into multiple packed fields, ``StreamDecoder``
+also provides method `ReadRepeatedInt32` etc. methods that accept a
+``pw::Vector`` (see :ref:`module-pw_containers` for details). These methods
+correctly extend the vector for either encoding.
Message
=======
@@ -599,11 +528,172 @@ single fields directly.
.. Note::
The helper API are currently in-development and may not remain stable.
+-------
+Codegen
+-------
+
+pw_protobuf codegen integration is supported in GN, Bazel, and CMake.
+The codegen is a light wrapper around the ``StreamEncoder``, ``MemoryEncoder``,
+and ``StreamDecoder`` objects, providing named helper functions to write and
+read proto fields rather than requiring that field numbers are directly passed
+to an encoder.
+
+All generated messages provide a ``Fields`` enum that can be used directly for
+out-of-band encoding, or with the ``pw::protobuf::Decoder``.
+
+This module's codegen is available through the ``*.pwpb`` sub-target of a
+``pw_proto_library`` in GN, CMake, and Bazel. See :ref:`pw_protobuf_compiler's
+documentation <module-pw_protobuf_compiler>` for more information on build
+system integration for pw_protobuf codegen.
+
+Example ``BUILD.gn``:
+
+.. Code:: none
+
+ import("//build_overrides/pigweed.gni")
+
+ import("$dir_pw_build/target_types.gni")
+ import("$dir_pw_protobuf_compiler/proto.gni")
+
+ # This target controls where the *.pwpb.h headers end up on the include path.
+ # In this example, it's at "pet_daycare_protos/client.pwpb.h".
+ pw_proto_library("pet_daycare_protos") {
+ sources = [
+ "pet_daycare_protos/client.proto",
+ ]
+ }
+
+ pw_source_set("example_client") {
+ sources = [ "example_client.cc" ]
+ deps = [
+ ":pet_daycare_protos.pwpb",
+ dir_pw_bytes,
+ dir_pw_stream,
+ ]
+ }
+
+ pw_source_set("example_server") {
+ sources = [ "example_server.cc" ]
+ deps = [
+ ":pet_daycare_protos.pwpb",
+ dir_pw_bytes,
+ dir_pw_stream,
+ ]
+ }
+
+Example ``pet_daycare_protos/client.proto``:
+
+.. Code:: none
+
+ syntax = "proto3";
+ // The proto package controls the namespacing of the codegen. If this package
+ // were fuzzy.friends, the namespace for codegen would be fuzzy::friends::*.
+ package fuzzy_friends;
+
+ message Pet {
+ string name = 1;
+ string pet_type = 2;
+ }
+
+ message Client {
+ repeated Pet pets = 1;
+ }
+
+Example ``example_client.cc``:
+
+.. Code:: cpp
+
+ #include "pet_daycare_protos/client.pwpb.h"
+ #include "pw_protobuf/encoder.h"
+ #include "pw_stream/sys_io_stream.h"
+ #include "pw_bytes/span.h"
+
+ pw::stream::SysIoWriter sys_io_writer;
+ std::byte submessage_scratch_buffer[64];
+ // The constructor is the same as a pw::protobuf::StreamEncoder.
+ fuzzy_friends::Client::StreamEncoder client(sys_io_writer,
+ submessage_scratch_buffer);
+ {
+ fuzzy_friends::Pet::StreamEncoder pet1 = client.GetPetsEncoder();
+ pet1.WriteName("Spot");
+ pet1.WritePetType("dog");
+ }
+
+ {
+ fuzzy_friends::Pet::StreamEncoder pet2 = client.GetPetsEncoder();
+ pet2.WriteName("Slippers");
+ pet2.WritePetType("rabbit");
+ }
+
+ if (!client.status().ok()) {
+ PW_LOG_INFO("Failed to encode proto; %s", client.status().str());
+ }
+
+Example ``example_server.cc``:
+
+.. Code:: cpp
+
+ #include "pet_daycare_protos/client.pwpb.h"
+ #include "pw_protobuf/stream_decoder.h"
+ #include "pw_stream/sys_io_stream.h"
+ #include "pw_bytes/span.h"
+
+ pw::stream::SysIoReader sys_io_reader;
+ // The constructor is the same as a pw::protobuf::StreamDecoder.
+ fuzzy_friends::Client::StreamDecoder client(sys_io_reader);
+ while (client.Next().ok()) {
+ switch (client.Field().value) {
+ case fuzzy_friends::Client::Fields::PET: {
+ std::array<char, 32> name{};
+ std::array<char, 32> pet_type{};
+
+ fuzzy_friends::Pet::StreamDecoder pet = client.GetPetsDecoder();
+ while (pet.Next().ok()) {
+ switch (pet.Field().value) {
+ case fuzzy_friends::Pet::NAME:
+ pet.ReadName(name);
+ break;
+ case fuzzy_friends::Pet::TYPE:
+ pet.ReadPetType(pet_type);
+ break;
+ }
+ }
+
+ break;
+ }
+ }
+ }
+
+ if (!client.status().ok()) {
+ PW_LOG_INFO("Failed to decode proto; %s", client.status().str());
+ }
+
+Enums
+=====
+Namespaced proto enums are generated, and used as the arguments when writing
+enum fields of a proto message. When reading enum fields of a proto message,
+the enum value is validated and returned as the correct type, or
+``Status::DataLoss()`` if the decoded enum value was not given in the proto.
+
+Repeated Fields
+===============
+For encoding, the wrappers provide a `WriteFieldName` method with three
+signatures. One that encodes a single value at a time, one that encodes a packed
+field from a `std::span<Type>`, and one that encodes a packed field from a
+`pw::Vector<Type>`. All three return `Status`.
+
+For decoding, the wrappers provide a `ReadFieldName` method with three
+signatures. One that reads a single value at a time, returning a `Result<Type>`,
+one that reads a packed field into a `std::span<Type>` and returning a
+`StatusWithSize`, and one that supports all formats reading into a
+`pw::Vector<Type>` and returning `Status`.
+
+-----------
Size report
-===========
+-----------
Full size report
-----------------
+================
This report demonstrates the size of using the entire decoder with all of its
decode methods and a decode callback for a proto message containing each of the
@@ -613,7 +703,7 @@ protobuf field types.
Incremental size report
------------------------
+=======================
This report is generated using the full report as a base and adding some int32
fields to the decode callback to demonstrate the incremental cost of decoding
diff --git a/pw_protobuf/encoder_test.cc b/pw_protobuf/encoder_test.cc
index 1ab005aca..6417f699f 100644
--- a/pw_protobuf/encoder_test.cc
+++ b/pw_protobuf/encoder_test.cc
@@ -307,6 +307,57 @@ TEST(StreamEncoder, PackedVarintInsufficientSpace) {
EXPECT_EQ(encoder.status(), Status::ResourceExhausted());
}
+TEST(StreamEncoder, PackedVarintVector) {
+ std::byte encode_buffer[32];
+ MemoryEncoder encoder(encode_buffer);
+
+ // repeated uint32 values = 1;
+ const pw::Vector<uint32_t, 5> values = {0, 50, 100, 150, 200};
+ encoder.WriteRepeatedUint32(1, values)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+
+ constexpr uint8_t encoded_proto[] = {
+ 0x0a, 0x07, 0x00, 0x32, 0x64, 0x96, 0x01, 0xc8, 0x01};
+ // key size v[0] v[1] v[2] v[3] v[4]
+
+ ASSERT_EQ(encoder.status(), OkStatus());
+ ConstByteSpan result(encoder);
+ EXPECT_EQ(result.size(), sizeof(encoded_proto));
+ EXPECT_EQ(std::memcmp(result.data(), encoded_proto, sizeof(encoded_proto)),
+ 0);
+}
+
+TEST(StreamEncoder, PackedVarintVectorInsufficientSpace) {
+ std::byte encode_buffer[8];
+ MemoryEncoder encoder(encode_buffer);
+
+ const pw::Vector<uint32_t, 5> values = {0, 50, 100, 150, 200};
+ encoder.WriteRepeatedUint32(1, values)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+
+ EXPECT_EQ(encoder.status(), Status::ResourceExhausted());
+}
+
+TEST(StreamEncoder, PackedBool) {
+ std::byte encode_buffer[32];
+ MemoryEncoder encoder(encode_buffer);
+
+ // repeated bool values = 1;
+ constexpr bool values[] = {true, false, true, true, false};
+ encoder.WritePackedBool(1, values)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+
+ constexpr uint8_t encoded_proto[] = {
+ 0x0a, 0x05, 0x01, 0x00, 0x01, 0x01, 0x00};
+ // key size v[0] v[1] v[2] v[3] v[4]
+
+ ASSERT_EQ(encoder.status(), OkStatus());
+ ConstByteSpan result(encoder);
+ EXPECT_EQ(result.size(), sizeof(encoded_proto));
+ EXPECT_EQ(std::memcmp(result.data(), encoded_proto, sizeof(encoded_proto)),
+ 0);
+}
+
TEST(StreamEncoder, PackedFixed) {
std::byte encode_buffer[32];
MemoryEncoder encoder(encode_buffer);
@@ -333,6 +384,32 @@ TEST(StreamEncoder, PackedFixed) {
0);
}
+TEST(StreamEncoder, PackedFixedVector) {
+ std::byte encode_buffer[32];
+ MemoryEncoder encoder(encode_buffer);
+
+ // repeated fixed32 values = 1;
+ const pw::Vector<uint32_t, 5> values = {0, 50, 100, 150, 200};
+ encoder.WriteRepeatedFixed32(1, values)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+
+ // repeated fixed64 values64 = 2;
+ const pw::Vector<uint64_t, 1> values64 = {0x0102030405060708};
+ encoder.WriteRepeatedFixed64(2, values64)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+
+ constexpr uint8_t encoded_proto[] = {
+ 0x0a, 0x14, 0x00, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x64,
+ 0x00, 0x00, 0x00, 0x96, 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00,
+ 0x12, 0x08, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01};
+
+ ASSERT_EQ(encoder.status(), OkStatus());
+ ConstByteSpan result(encoder);
+ EXPECT_EQ(result.size(), sizeof(encoded_proto));
+ EXPECT_EQ(std::memcmp(result.data(), encoded_proto, sizeof(encoded_proto)),
+ 0);
+}
+
TEST(StreamEncoder, PackedZigzag) {
std::byte encode_buffer[32];
MemoryEncoder encoder(encode_buffer);
@@ -352,6 +429,25 @@ TEST(StreamEncoder, PackedZigzag) {
0);
}
+TEST(StreamEncoder, PackedZigzagVector) {
+ std::byte encode_buffer[32];
+ MemoryEncoder encoder(encode_buffer);
+
+ // repeated sint32 values = 1;
+ const pw::Vector<int32_t, 7> values = {-100, -25, -1, 0, 1, 25, 100};
+ encoder.WriteRepeatedSint32(1, values)
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+
+ constexpr uint8_t encoded_proto[] = {
+ 0x0a, 0x09, 0xc7, 0x01, 0x31, 0x01, 0x00, 0x02, 0x32, 0xc8, 0x01};
+
+ ASSERT_EQ(encoder.status(), OkStatus());
+ ConstByteSpan result(encoder);
+ EXPECT_EQ(result.size(), sizeof(encoded_proto));
+ EXPECT_EQ(std::memcmp(result.data(), encoded_proto, sizeof(encoded_proto)),
+ 0);
+}
+
TEST(StreamEncoder, ParentUnavailable) {
std::byte encode_buffer[32];
MemoryEncoder parent(encode_buffer);
diff --git a/pw_protobuf/public/pw_protobuf/encoder.h b/pw_protobuf/public/pw_protobuf/encoder.h
index c3d68ad6b..f5b427382 100644
--- a/pw_protobuf/public/pw_protobuf/encoder.h
+++ b/pw_protobuf/public/pw_protobuf/encoder.h
@@ -24,6 +24,7 @@
#include "pw_assert/assert.h"
#include "pw_bytes/endian.h"
#include "pw_bytes/span.h"
+#include "pw_containers/vector.h"
#include "pw_protobuf/config.h"
#include "pw_protobuf/wire_format.h"
#include "pw_status/status.h"
@@ -177,6 +178,16 @@ class StreamEncoder {
return WritePackedVarints(field_number, values, VarintEncodeType::kNormal);
}
+ // Writes a repeated uint32 using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedUint32(uint32_t field_number,
+ const pw::Vector<uint32_t>& values) {
+ return WritePackedVarints(field_number,
+ std::span(values.data(), values.size()),
+ VarintEncodeType::kNormal);
+ }
+
// Writes a proto uint64 key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -192,6 +203,16 @@ class StreamEncoder {
return WritePackedVarints(field_number, values, VarintEncodeType::kNormal);
}
+ // Writes a repeated uint64 using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedUint64(uint32_t field_number,
+ const pw::Vector<uint64_t>& values) {
+ return WritePackedVarints(field_number,
+ std::span(values.data(), values.size()),
+ VarintEncodeType::kNormal);
+ }
+
// Writes a proto int32 key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -211,6 +232,18 @@ class StreamEncoder {
VarintEncodeType::kNormal);
}
+ // Writes a repeated int32 using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedInt32(uint32_t field_number,
+ const pw::Vector<int32_t>& values) {
+ return WritePackedVarints(
+ field_number,
+ std::span(reinterpret_cast<const uint32_t*>(values.data()),
+ values.size()),
+ VarintEncodeType::kNormal);
+ }
+
// Writes a proto int64 key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -230,6 +263,18 @@ class StreamEncoder {
VarintEncodeType::kNormal);
}
+ // Writes a repeated int64 using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedInt64(uint32_t field_number,
+ const pw::Vector<int64_t>& values) {
+ return WritePackedVarints(
+ field_number,
+ std::span(reinterpret_cast<const uint64_t*>(values.data()),
+ values.size()),
+ VarintEncodeType::kNormal);
+ }
+
// Writes a proto sint32 key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -249,6 +294,18 @@ class StreamEncoder {
VarintEncodeType::kZigZag);
}
+ // Writes a repeated sint32 using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedSint32(uint32_t field_number,
+ const pw::Vector<int32_t>& values) {
+ return WritePackedVarints(
+ field_number,
+ std::span(reinterpret_cast<const uint32_t*>(values.data()),
+ values.size()),
+ VarintEncodeType::kZigZag);
+ }
+
// Writes a proto sint64 key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -268,6 +325,18 @@ class StreamEncoder {
VarintEncodeType::kZigZag);
}
+ // Writes a repeated sint64 using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedSint64(uint32_t field_number,
+ const pw::Vector<int64_t>& values) {
+ return WritePackedVarints(
+ field_number,
+ std::span(reinterpret_cast<const uint64_t*>(values.data()),
+ values.size()),
+ VarintEncodeType::kZigZag);
+ }
+
// Writes a proto bool key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -275,6 +344,34 @@ class StreamEncoder {
return WriteUint32(field_number, static_cast<uint32_t>(value));
}
+ // Writes a repeated bool using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WritePackedBool(uint32_t field_number, std::span<const bool> values) {
+ static_assert(sizeof(bool) == sizeof(uint8_t),
+ "bool must be same size as uint8_t");
+ return WritePackedVarints(
+ field_number,
+ std::span(reinterpret_cast<const uint8_t*>(values.data()),
+ values.size()),
+ VarintEncodeType::kNormal);
+ }
+
+ // Writes a repeated bool using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedBool(uint32_t field_number,
+ const pw::Vector<bool>& values) {
+ static_assert(sizeof(bool) == sizeof(uint8_t),
+ "bool must be same size as uint8_t");
+
+ return WritePackedVarints(
+ field_number,
+ std::span(reinterpret_cast<const uint8_t*>(values.data()),
+ values.size()),
+ VarintEncodeType::kNormal);
+ }
+
// Writes a proto fixed32 key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -293,6 +390,17 @@ class StreamEncoder {
field_number, std::as_bytes(values), sizeof(uint32_t));
}
+ // Writes a repeated fixed32 field using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedFixed32(uint32_t field_number,
+ const pw::Vector<uint32_t>& values) {
+ return WritePackedFixed(
+ field_number,
+ std::as_bytes(std::span(values.data(), values.size())),
+ sizeof(uint32_t));
+ }
+
// Writes a proto fixed64 key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -311,6 +419,17 @@ class StreamEncoder {
field_number, std::as_bytes(values), sizeof(uint64_t));
}
+ // Writes a repeated fixed64 field using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedFixed64(uint32_t field_number,
+ const pw::Vector<uint64_t>& values) {
+ return WritePackedFixed(
+ field_number,
+ std::as_bytes(std::span(values.data(), values.size())),
+ sizeof(uint64_t));
+ }
+
// Writes a proto sfixed32 key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -327,6 +446,17 @@ class StreamEncoder {
field_number, std::as_bytes(values), sizeof(int32_t));
}
+ // Writes a repeated fixed32 field using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedSfixed32(uint32_t field_number,
+ const pw::Vector<int32_t>& values) {
+ return WritePackedFixed(
+ field_number,
+ std::as_bytes(std::span(values.data(), values.size())),
+ sizeof(int32_t));
+ }
+
// Writes a proto sfixed64 key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -343,6 +473,17 @@ class StreamEncoder {
field_number, std::as_bytes(values), sizeof(int64_t));
}
+ // Writes a repeated fixed64 field using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedFixed64(uint32_t field_number,
+ const pw::Vector<int64_t>& values) {
+ return WritePackedFixed(
+ field_number,
+ std::as_bytes(std::span(values.data(), values.size())),
+ sizeof(int64_t));
+ }
+
// Writes a proto float key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -364,6 +505,17 @@ class StreamEncoder {
return WritePackedFixed(field_number, std::as_bytes(values), sizeof(float));
}
+ // Writes a repeated float field using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedFloat(uint32_t field_number,
+ const pw::Vector<float>& values) {
+ return WritePackedFixed(
+ field_number,
+ std::as_bytes(std::span(values.data(), values.size())),
+ sizeof(float));
+ }
+
// Writes a proto double key-value pair.
//
// Precondition: Encoder has no active child encoder.
@@ -386,6 +538,17 @@ class StreamEncoder {
field_number, std::as_bytes(values), sizeof(double));
}
+ // Writes a repeated double field using packed encoding.
+ //
+ // Precondition: Encoder has no active child encoder.
+ Status WriteRepeatedDouble(uint32_t field_number,
+ const pw::Vector<double>& values) {
+ return WritePackedFixed(
+ field_number,
+ std::as_bytes(std::span(values.data(), values.size())),
+ sizeof(double));
+ }
+
// Writes a proto `bytes` field as a key-value pair. This can also be used to
// write a pre-encoded nested submessage directly without using a nested
// encoder.
@@ -531,12 +694,13 @@ class StreamEncoder {
Status WritePackedVarints(uint32_t field_number,
std::span<T> values,
VarintEncodeType encode_type) {
- static_assert(std::is_same<T, const uint32_t>::value ||
+ static_assert(std::is_same<T, const uint8_t>::value ||
+ std::is_same<T, const uint32_t>::value ||
std::is_same<T, const int32_t>::value ||
std::is_same<T, const uint64_t>::value ||
std::is_same<T, const int64_t>::value,
- "Packed varints must be of type uint32_t, int32_t, uint64_t, "
- "or int64_t");
+ "Packed varints must be of type bool, uint32_t, int32_t, "
+ "uint64_t, or int64_t");
size_t payload_size = 0;
for (T val : values) {
diff --git a/pw_protobuf/public/pw_protobuf/stream_decoder.h b/pw_protobuf/public/pw_protobuf/stream_decoder.h
index fda6f4eb2..37869b3a4 100644
--- a/pw_protobuf/public/pw_protobuf/stream_decoder.h
+++ b/pw_protobuf/public/pw_protobuf/stream_decoder.h
@@ -16,12 +16,17 @@
#include <array>
#include <cstring>
#include <limits>
+#include <span>
+#include <type_traits>
#include "pw_assert/assert.h"
-#include "pw_bytes/endian.h"
+#include "pw_containers/vector.h"
#include "pw_protobuf/wire_format.h"
+#include "pw_status/status.h"
#include "pw_status/status_with_size.h"
#include "pw_stream/stream.h"
+#include "pw_varint/stream.h"
+#include "pw_varint/varint.h"
namespace pw::protobuf {
@@ -152,89 +157,285 @@ class StreamDecoder {
//
// Reads a proto int32 value from the current position.
- Result<int32_t> ReadInt32();
+ Result<int32_t> ReadInt32() {
+ return ReadVarintField<int32_t>(VarintDecodeType::kNormal);
+ }
+
+ // Reads repeated int32 values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read. In the case of error, the return value
+ // indicates the number of values successfully read, in addition to the error.
+ StatusWithSize ReadPackedInt32(std::span<int32_t> out) {
+ return ReadPackedVarintField(std::as_writable_bytes(out),
+ sizeof(int32_t),
+ VarintDecodeType::kNormal);
+ }
+
+ // Reads repeated int32 values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedInt32(pw::Vector<int32_t>& out) {
+ return ReadRepeatedVarintField<int32_t>(out, VarintDecodeType::kNormal);
+ }
// Reads a proto uint32 value from the current position.
- Result<uint32_t> ReadUint32();
+ Result<uint32_t> ReadUint32() {
+ return ReadVarintField<uint32_t>(VarintDecodeType::kUnsigned);
+ }
+
+ // Reads repeated uint32 values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read. In the case of error, the return value
+ // indicates the number of values successfully read, in addition to the error.
+ StatusWithSize ReadPackedUint32(std::span<uint32_t> out) {
+ return ReadPackedVarintField(std::as_writable_bytes(out),
+ sizeof(uint32_t),
+ VarintDecodeType::kUnsigned);
+ }
+
+ // Reads repeated uint32 values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedUint32(pw::Vector<uint32_t>& out) {
+ return ReadRepeatedVarintField<uint32_t>(out, VarintDecodeType::kUnsigned);
+ }
// Reads a proto int64 value from the current position.
- Result<int64_t> ReadInt64();
+ Result<int64_t> ReadInt64() {
+ return ReadVarintField<int64_t>(VarintDecodeType::kNormal);
+ }
+
+ // Reads repeated int64 values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read. In the case of error, the return value
+ // indicates the number of values successfully read, in addition to the
+ // error.
+ StatusWithSize ReadPackedInt64(std::span<int64_t> out) {
+ return ReadPackedVarintField(std::as_writable_bytes(out),
+ sizeof(int64_t),
+ VarintDecodeType::kNormal);
+ }
+
+ // Reads repeated int64 values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedInt64(pw::Vector<int64_t>& out) {
+ return ReadRepeatedVarintField<int64_t>(out, VarintDecodeType::kNormal);
+ }
// Reads a proto uint64 value from the current position.
Result<uint64_t> ReadUint64() {
- uint64_t varint;
- if (Status status = ReadVarintField(&varint); !status.ok()) {
- return status;
- }
- return varint;
+ return ReadVarintField<uint64_t>(VarintDecodeType::kUnsigned);
+ }
+
+ // Reads repeated uint64 values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read. In the case of error, the return value
+ // indicates the number of values successfully read, in addition to the
+ // error.
+ StatusWithSize ReadPackedUint64(std::span<uint64_t> out) {
+ return ReadPackedVarintField(std::as_writable_bytes(out),
+ sizeof(uint64_t),
+ VarintDecodeType::kUnsigned);
+ }
+
+ // Reads repeated uint64 values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedUint64(pw::Vector<uint64_t>& out) {
+ return ReadRepeatedVarintField<uint64_t>(out, VarintDecodeType::kUnsigned);
}
// Reads a proto sint32 value from the current position.
- Result<int32_t> ReadSint32();
+ Result<int32_t> ReadSint32() {
+ return ReadVarintField<int32_t>(VarintDecodeType::kZigZag);
+ }
+
+ // Reads repeated sint32 values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read. In the case of error, the return value
+ // indicates the number of values successfully read, in addition to the
+ // error.
+ StatusWithSize ReadPackedSint32(std::span<int32_t> out) {
+ return ReadPackedVarintField(std::as_writable_bytes(out),
+ sizeof(int32_t),
+ VarintDecodeType::kZigZag);
+ }
+
+ // Reads repeated sint32 values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedSint32(pw::Vector<int32_t>& out) {
+ return ReadRepeatedVarintField<int32_t>(out, VarintDecodeType::kZigZag);
+ }
// Reads a proto sint64 value from the current position.
- Result<int64_t> ReadSint64();
+ Result<int64_t> ReadSint64() {
+ return ReadVarintField<int64_t>(VarintDecodeType::kZigZag);
+ }
+
+ // Reads repeated int64 values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read. In the case of error, the return value
+ // indicates the number of values successfully read, in addition to the
+ // error.
+ StatusWithSize ReadPackedSint64(std::span<int64_t> out) {
+ return ReadPackedVarintField(std::as_writable_bytes(out),
+ sizeof(int64_t),
+ VarintDecodeType::kZigZag);
+ }
+
+ // Reads repeated sint64 values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedSint64(pw::Vector<int64_t>& out) {
+ return ReadRepeatedVarintField<int64_t>(out, VarintDecodeType::kZigZag);
+ }
// Reads a proto bool value from the current position.
- Result<bool> ReadBool();
+ Result<bool> ReadBool() {
+ return ReadVarintField<bool>(VarintDecodeType::kUnsigned);
+ }
+
+ // Reads repeated bool values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read. In the case of error, the return value
+ // indicates the number of values successfully read, in addition to the
+ // error.
+ StatusWithSize ReadPackedBool(std::span<bool> out) {
+ return ReadPackedVarintField(
+ std::as_writable_bytes(out), sizeof(bool), VarintDecodeType::kUnsigned);
+ }
+
+ // Reads repeated bool values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedBool(pw::Vector<bool>& out) {
+ return ReadRepeatedVarintField<bool>(out, VarintDecodeType::kUnsigned);
+ }
// Reads a proto fixed32 value from the current position.
Result<uint32_t> ReadFixed32() { return ReadFixedField<uint32_t>(); }
+ // Reads repeated fixed32 values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read.
+ StatusWithSize ReadPackedFixed32(std::span<uint32_t> out) {
+ return ReadPackedFixedField(std::as_writable_bytes(out), sizeof(uint32_t));
+ }
+
+ // Reads repeated fixed32 values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedFixed32(pw::Vector<uint32_t>& out) {
+ return ReadRepeatedFixedField<uint32_t>(out);
+ }
+
// Reads a proto fixed64 value from the current position.
Result<uint64_t> ReadFixed64() { return ReadFixedField<uint64_t>(); }
+ // Reads repeated fixed64 values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read.
+ StatusWithSize ReadPackedFixed64(std::span<uint64_t> out) {
+ return ReadPackedFixedField(std::as_writable_bytes(out), sizeof(uint64_t));
+ }
+
+ // Reads repeated fixed64 values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedFixed64(pw::Vector<uint64_t>& out) {
+ return ReadRepeatedFixedField<uint64_t>(out);
+ }
+
// Reads a proto sfixed32 value from the current position.
- Result<int32_t> ReadSfixed32() {
- Result<uint32_t> fixed32 = ReadFixed32();
- if (!fixed32.ok()) {
- return fixed32.status();
- }
- return fixed32.value();
+ Result<int32_t> ReadSfixed32() { return ReadFixedField<int32_t>(); }
+
+ // Reads repeated sfixed32 values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read.
+ StatusWithSize ReadPackedSfixed32(std::span<int32_t> out) {
+ return ReadPackedFixedField(std::as_writable_bytes(out), sizeof(int32_t));
+ }
+
+ // Reads repeated sfixed32 values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedSfixed32(pw::Vector<int32_t>& out) {
+ return ReadRepeatedFixedField<int32_t>(out);
}
// Reads a proto sfixed64 value from the current position.
- Result<int64_t> ReadSfixed64() {
- Result<uint64_t> fixed64 = ReadFixed64();
- if (!fixed64.ok()) {
- return fixed64.status();
- }
- return fixed64.value();
+ Result<int64_t> ReadSfixed64() { return ReadFixedField<int64_t>(); }
+
+ // Reads repeated sfixed64 values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read.
+ StatusWithSize ReadPackedSfixed64(std::span<int64_t> out) {
+ return ReadPackedFixedField(std::as_writable_bytes(out), sizeof(int64_t));
+ }
+
+ // Reads repeated sfixed64 values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedSfixed64(pw::Vector<int64_t>& out) {
+ return ReadRepeatedFixedField<int64_t>(out);
}
// Reads a proto float value from the current position.
Result<float> ReadFloat() {
static_assert(sizeof(float) == sizeof(uint32_t),
"Float and uint32_t must be the same size for protobufs");
- float f;
- if (Status status =
- ReadFixedField(std::as_writable_bytes(std::span(&f, 1)));
- !status.ok()) {
- return status;
- }
- return f;
+ return ReadFixedField<float>();
+ }
+
+ // Reads repeated float values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read.
+ StatusWithSize ReadPackedFloat(std::span<float> out) {
+ static_assert(sizeof(float) == sizeof(uint32_t),
+ "Float and uint32_t must be the same size for protobufs");
+ return ReadPackedFixedField(std::as_writable_bytes(out), sizeof(float));
+ }
+
+ // Reads repeated float values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedFloat(pw::Vector<float>& out) {
+ return ReadRepeatedFixedField<float>(out);
}
// Reads a proto double value from the current position.
Result<double> ReadDouble() {
static_assert(sizeof(double) == sizeof(uint64_t),
"Double and uint64_t must be the same size for protobufs");
- double d;
- if (Status status =
- ReadFixedField(std::as_writable_bytes(std::span(&d, 1)));
- !status.ok()) {
- return status;
- }
- return d;
+ return ReadFixedField<double>();
+ }
+
+ // Reads repeated double values from the current position using packed
+ // encoding.
+ //
+ // Returns the number of values read.
+ StatusWithSize ReadPackedDouble(std::span<double> out) {
+ static_assert(sizeof(double) == sizeof(uint64_t),
+ "Double and uint64_t must be the same size for protobufs");
+ return ReadPackedFixedField(std::as_writable_bytes(out), sizeof(double));
}
- // Reads a proto string value from the current position. The string is copied
- // into the provided buffer and the read size is returned. The copied string
- // will NOT be null terminated; this should be done manually if desired.
+ // Reads repeated double values from the current position into the vector,
+ // supporting either repeated single field elements or packed encoding.
+ Status ReadRepeatedDouble(pw::Vector<double>& out) {
+ return ReadRepeatedFixedField<double>(out);
+ }
+
+ // Reads a proto string value from the current position. The string is
+ // copied into the provided buffer and the read size is returned. The copied
+ // string will NOT be null terminated; this should be done manually if
+ // desired.
//
// If the buffer is too small to fit the string value, RESOURCE_EXHAUSTED is
- // returned and no data is read. The decoder's position remains on the string
- // field.
+ // returned and no data is read. The decoder's position remains on the
+ // string field.
StatusWithSize ReadString(std::span<char> out) {
return ReadBytes(std::as_writable_bytes(out));
}
@@ -303,9 +504,40 @@ class StreamDecoder {
// relative to the given reader.
Result<Bounds> GetLengthDelimitedPayloadBounds();
+ protected:
+ // Specialized move constructor used only for codegen.
+ //
+ // Postcondition: The other decoder is invalidated and cannot be used as it
+ // acts like a parent decoder with an active child decoder.
+ constexpr StreamDecoder(StreamDecoder&& other)
+ : reader_(other.reader_),
+ stream_bounds_(other.stream_bounds_),
+ position_(other.position_),
+ current_field_(other.current_field_),
+ delimited_field_size_(other.delimited_field_size_),
+ delimited_field_offset_(other.delimited_field_offset_),
+ parent_(other.parent_),
+ field_consumed_(other.field_consumed_),
+ nested_reader_open_(other.nested_reader_open_),
+ status_(other.status_) {
+ PW_ASSERT(!nested_reader_open_);
+ // Make the nested decoder look like it has an open child to block reads for
+ // the remainder of the object's life, and an invalid status to ensure it
+ // doesn't advance the stream on destruction.
+ other.nested_reader_open_ = true;
+ other.parent_ = nullptr;
+ other.status_ = pw::Status::Cancelled();
+ }
+
private:
friend class BytesReader;
+ enum class VarintDecodeType {
+ kUnsigned,
+ kNormal,
+ kZigZag,
+ };
+
// The FieldKey class can't store an invalid key, so pick a random large key
// to set as the initial value. This will be overwritten the first time Next()
// is called, and FieldKey() fails if Next() is not called first -- ensuring
@@ -354,25 +586,112 @@ class StreamDecoder {
Status ReadFieldKey();
Status SkipField();
- Status ReadVarintField(uint64_t* out);
+ Status ReadVarintField(std::span<std::byte> out,
+ VarintDecodeType decode_type);
+
+ StatusWithSize ReadOneVarint(std::span<std::byte> out,
+ VarintDecodeType decode_type);
+
+ template <typename T>
+ Result<T> ReadVarintField(VarintDecodeType decode_type) {
+ static_assert(
+ std::is_same_v<T, bool> || std::is_same_v<T, uint32_t> ||
+ std::is_same_v<T, int32_t> || std::is_same_v<T, uint64_t> ||
+ std::is_same_v<T, int64_t>,
+ "Protobuf varints must be of type bool, uint32_t, int32_t, uint64_t, "
+ "or int64_t");
+
+ T result;
+ if (Status status = ReadVarintField(
+ std::as_writable_bytes(std::span(&result, 1)), decode_type);
+ !status.ok()) {
+ return status;
+ }
+
+ return result;
+ }
Status ReadFixedField(std::span<std::byte> out);
template <typename T>
Result<T> ReadFixedField() {
- static_assert(std::is_same_v<T, uint32_t> || std::is_same_v<T, uint64_t>,
- "Protobuf fixed-size fields must be 32- or 64-bit");
+ static_assert(
+ sizeof(T) == sizeof(uint32_t) || sizeof(T) == sizeof(uint64_t),
+ "Protobuf fixed-size fields must be 32- or 64-bit");
- std::array<std::byte, sizeof(T)> buffer;
- if (Status status = ReadFixedField(std::span(buffer)); !status.ok()) {
+ T result;
+ if (Status status =
+ ReadFixedField(std::as_writable_bytes(std::span(&result, 1)));
+ !status.ok()) {
return status;
}
- return bytes::ReadInOrder<T>(std::endian::little, buffer);
+ return result;
}
StatusWithSize ReadDelimitedField(std::span<std::byte> out);
+ StatusWithSize ReadPackedFixedField(std::span<std::byte> out,
+ size_t elem_size);
+
+ StatusWithSize ReadPackedVarintField(std::span<std::byte> out,
+ size_t elem_size,
+ VarintDecodeType decode_type);
+
+ template <typename T>
+ Status ReadRepeatedFixedField(pw::Vector<T>& out) {
+ if (out.full()) {
+ return Status::ResourceExhausted();
+ }
+ const size_t old_size = out.size();
+ if (current_field_.wire_type() == WireType::kDelimited) {
+ out.resize(out.capacity());
+ const auto sws = ReadPackedFixedField(
+ std::as_writable_bytes(
+ std::span(out.data() + old_size, out.size() - old_size)),
+ sizeof(T));
+ out.resize(old_size + sws.size());
+ return sws.status();
+ } else {
+ out.resize(old_size + 1);
+ const auto status = ReadFixedField(std::as_writable_bytes(
+ std::span(out.data() + old_size, out.size() - old_size)));
+ if (!status.ok()) {
+ out.resize(old_size);
+ }
+ return status;
+ }
+ }
+
+ template <typename T>
+ Status ReadRepeatedVarintField(pw::Vector<T>& out,
+ VarintDecodeType decode_type) {
+ if (out.full()) {
+ return Status::ResourceExhausted();
+ }
+ const size_t old_size = out.size();
+ if (current_field_.wire_type() == WireType::kDelimited) {
+ out.resize(out.capacity());
+ const auto sws = ReadPackedVarintField(
+ std::as_writable_bytes(
+ std::span(out.data() + old_size, out.size() - old_size)),
+ sizeof(T),
+ decode_type);
+ out.resize(old_size + sws.size());
+ return sws.status();
+ } else {
+ out.resize(old_size + 1);
+ const auto status =
+ ReadVarintField(std::as_writable_bytes(std::span(
+ out.data() + old_size, out.size() - old_size)),
+ decode_type);
+ if (!status.ok()) {
+ out.resize(old_size);
+ }
+ return status;
+ }
+ }
+
Status CheckOkToRead(WireType type);
stream::Reader& reader_;
diff --git a/pw_protobuf/pw_protobuf_test_protos/imported.proto b/pw_protobuf/pw_protobuf_test_protos/imported.proto
index c37538064..1ba1403e2 100644
--- a/pw_protobuf/pw_protobuf_test_protos/imported.proto
+++ b/pw_protobuf/pw_protobuf_test_protos/imported.proto
@@ -19,3 +19,9 @@ message Timestamp {
uint64 seconds = 1;
uint32 nanoseconds = 2;
}
+
+enum Status {
+ UNKNOWN = 0;
+ NOT_OK = 1;
+ OK = 2;
+}
diff --git a/pw_protobuf/pw_protobuf_test_protos/importer.proto b/pw_protobuf/pw_protobuf_test_protos/importer.proto
index 39ad8b928..00bec36cd 100644
--- a/pw_protobuf/pw_protobuf_test_protos/importer.proto
+++ b/pw_protobuf/pw_protobuf_test_protos/importer.proto
@@ -26,3 +26,7 @@ message Period {
message Nothing {
pw.protobuf.Empty nothing = 1;
}
+
+message TestResult {
+ imported.Status status = 1;
+}
diff --git a/pw_protobuf/pw_protobuf_test_protos/proto2.proto b/pw_protobuf/pw_protobuf_test_protos/proto2.proto
index c8d9e8e08..58d9a87d4 100644
--- a/pw_protobuf/pw_protobuf_test_protos/proto2.proto
+++ b/pw_protobuf/pw_protobuf_test_protos/proto2.proto
@@ -16,7 +16,7 @@ syntax = "proto2";
package pw.protobuf.test;
message Foo {
- required uint32 int = 1;
+ required uint32 integer = 1;
optional string str = 2;
repeated Bar bar = 3;
optional pb pb = 4;
diff --git a/pw_protobuf/pw_protobuf_test_protos/repeated.proto b/pw_protobuf/pw_protobuf_test_protos/repeated.proto
index c281db071..590d2e00c 100644
--- a/pw_protobuf/pw_protobuf_test_protos/repeated.proto
+++ b/pw_protobuf/pw_protobuf_test_protos/repeated.proto
@@ -21,6 +21,8 @@ message RepeatedTest {
repeated string strings = 3;
repeated double doubles = 4;
repeated Struct structs = 5;
+ repeated fixed32 fixed32s = 6;
+ repeated bool bools = 7;
};
message Struct {
diff --git a/pw_protobuf/py/pw_protobuf/codegen_pwpb.py b/pw_protobuf/py/pw_protobuf/codegen_pwpb.py
index b239c80e1..fc28fba72 100644
--- a/pw_protobuf/py/pw_protobuf/codegen_pwpb.py
+++ b/pw_protobuf/py/pw_protobuf/codegen_pwpb.py
@@ -37,27 +37,45 @@ PROTO_CC_EXTENSION = '.pwpb.cc'
PROTOBUF_NAMESPACE = '::pw::protobuf'
-class EncoderType(enum.Enum):
- MEMORY = 1
- STREAMING = 2
+class ClassType(enum.Enum):
+ """Type of class."""
+ MEMORY_ENCODER = 1
+ STREAMING_ENCODER = 2
+ # MEMORY_DECODER = 3
+ STREAMING_DECODER = 4
def base_class_name(self) -> str:
- """Returns the base class used by this encoder type."""
- if self is self.STREAMING:
+ """Returns the base class used by this class type."""
+ if self is self.STREAMING_ENCODER:
return 'StreamEncoder'
- if self is self.MEMORY:
+ if self is self.MEMORY_ENCODER:
return 'MemoryEncoder'
+ if self is self.STREAMING_DECODER:
+ return 'StreamDecoder'
- raise ValueError('Unknown encoder type')
+ raise ValueError('Unknown class type')
def codegen_class_name(self) -> str:
- """Returns the base class used by this encoder type."""
- if self is self.STREAMING:
+ """Returns the base class used by this class type."""
+ if self is self.STREAMING_ENCODER:
return 'StreamEncoder'
- if self is self.MEMORY:
+ if self is self.MEMORY_ENCODER:
return 'MemoryEncoder'
+ if self is self.STREAMING_DECODER:
+ return 'StreamDecoder'
- raise ValueError('Unknown encoder type')
+ raise ValueError('Unknown class type')
+
+ def is_encoder(self) -> bool:
+ """Returns True if this class type is an encoder."""
+ if self is self.STREAMING_ENCODER:
+ return True
+ if self is self.MEMORY_ENCODER:
+ return True
+ if self is self.STREAMING_DECODER:
+ return False
+
+ raise ValueError('Unknown class type')
# protoc captures stdout, so we need to printf debug to stderr.
@@ -152,29 +170,6 @@ class ProtoMethod(abc.ABC):
return namespace
-class SubMessageMethod(ProtoMethod):
- """Method which returns a sub-message encoder."""
- def name(self) -> str:
- return 'Get{}Encoder'.format(self._field.name())
-
- def return_type(self, from_root: bool = False) -> str:
- return '{}::StreamEncoder'.format(
- self._relative_type_namespace(from_root))
-
- def params(self) -> List[Tuple[str, str]]:
- return []
-
- def body(self) -> List[str]:
- line = 'return {}::StreamEncoder(GetNestedEncoder({}));'.format(
- self._relative_type_namespace(), self.field_cast())
- return [line]
-
- # Submessage methods are not defined within the class itself because the
- # submessage class may not yet have been defined.
- def in_class_definition(self) -> bool:
- return False
-
-
class WriteMethod(ProtoMethod):
"""Base class representing an encoder write method.
@@ -214,8 +209,8 @@ class WriteMethod(ProtoMethod):
raise NotImplementedError()
-class PackedMethod(WriteMethod):
- """A method for a packed repeated field.
+class PackedWriteMethod(WriteMethod):
+ """A method for a writing a packed repeated field.
Same as a WriteMethod, but is only generated for repeated fields.
"""
@@ -226,13 +221,166 @@ class PackedMethod(WriteMethod):
raise NotImplementedError()
+class ReadMethod(ProtoMethod):
+ """Base class representing an decoder read method.
+
+ Read methods have following format (for the proto field foo):
+
+ Result<{ctype}> ReadFoo({params...}) {
+ Result<uint32_t> field_number = FieldNumber();
+ PW_ASSERT(field_number.ok());
+ PW_ASSERT(field_number.value() == static_cast<uint32_t>(Fields::FOO));
+ return decoder_->Read{type}({params...});
+ }
+
+ """
+ def name(self) -> str:
+ return 'Read{}'.format(self._field.name())
+
+ def return_type(self, from_root: bool = False) -> str:
+ return '::pw::Result<{}>'.format(self._result_type())
+
+ def _result_type(self) -> str:
+ """The type returned by the deoder function.
+
+ Defined in subclasses.
+
+ e.g. 'uint32_t', 'std::span<std::byte>', etc.
+ """
+ raise NotImplementedError()
+
+ def body(self) -> List[str]:
+ lines: List[str] = []
+ lines += ['::pw::Result<uint32_t> field_number = FieldNumber();']
+ lines += ['PW_ASSERT(field_number.ok());']
+ lines += [
+ 'PW_ASSERT(field_number.value() == {});'.format(self.field_cast())
+ ]
+ lines += self._decoder_body()
+ return lines
+
+ def _decoder_body(self) -> List[str]:
+ """Returns the decoder body part as a list of source code lines."""
+ params = ', '.join([pair[1] for pair in self.params()])
+ line = 'return {}({});'.format(self._decoder_fn(), params)
+ return [line]
+
+ def _decoder_fn(self) -> str:
+ """The decoder function to call.
+
+ Defined in subclasses.
+
+ e.g. 'ReadUint32', 'ReadBytes', etc.
+ """
+ raise NotImplementedError()
+
+ def params(self) -> List[Tuple[str, str]]:
+ """Method parameters, can be overriden in subclasses."""
+ return []
+
+ def in_class_definition(self) -> bool:
+ return True
+
+
+class PackedReadMethod(ReadMethod):
+ """A method for a reading a packed repeated field.
+
+ Same as ReadMethod, but is only generated for repeated fields.
+ """
+ def should_appear(self) -> bool:
+ return self._field.is_repeated()
+
+ def return_type(self, from_root: bool = False) -> str:
+ return '::pw::StatusWithSize'
+
+ def params(self) -> List[Tuple[str, str]]:
+ return [('std::span<{}>'.format(self._result_type()), 'out')]
+
+
+class PackedReadVectorMethod(ReadMethod):
+ """A method for a reading a packed repeated field.
+
+ An alternative to ReadMethod for repeated fields that appends values into
+ a pw::Vector.
+ """
+ def should_appear(self) -> bool:
+ return self._field.is_repeated()
+
+ def return_type(self, from_root: bool = False) -> str:
+ return '::pw::Status'
+
+ def params(self) -> List[Tuple[str, str]]:
+ return [('::pw::Vector<{}>&'.format(self._result_type()), 'out')]
+
+
#
-# The following code defines write methods for each of the
+# The following code defines write and read methods for each of the
+# complex protobuf types.
+#
+
+
+class SubMessageEncoderMethod(ProtoMethod):
+ """Method which returns a sub-message encoder."""
+ def name(self) -> str:
+ return 'Get{}Encoder'.format(self._field.name())
+
+ def return_type(self, from_root: bool = False) -> str:
+ return '{}::StreamEncoder'.format(
+ self._relative_type_namespace(from_root))
+
+ def params(self) -> List[Tuple[str, str]]:
+ return []
+
+ def body(self) -> List[str]:
+ line = 'return {}::StreamEncoder(GetNestedEncoder({}));'.format(
+ self._relative_type_namespace(), self.field_cast())
+ return [line]
+
+ # Submessage methods are not defined within the class itself because the
+ # submessage class may not yet have been defined.
+ def in_class_definition(self) -> bool:
+ return False
+
+
+class SubMessageDecoderMethod(ReadMethod):
+ """Method which returns a sub-message decoder."""
+ def name(self) -> str:
+ return 'Get{}Decoder'.format(self._field.name())
+
+ def return_type(self, from_root: bool = False) -> str:
+ return '{}::StreamDecoder'.format(
+ self._relative_type_namespace(from_root))
+
+ def _decoder_body(self) -> List[str]:
+ line = 'return {}::StreamDecoder(GetNestedDecoder());'.format(
+ self._relative_type_namespace())
+ return [line]
+
+ # Submessage methods are not defined within the class itself because the
+ # submessage class may not yet have been defined.
+ def in_class_definition(self) -> bool:
+ return False
+
+
+class BytesReaderMethod(ReadMethod):
+ """Method which returns a bytes reader."""
+ def name(self) -> str:
+ return 'Get{}Reader'.format(self._field.name())
+
+ def return_type(self, from_root: bool = False) -> str:
+ return '::pw::protobuf::StreamDecoder::BytesReader'
+
+ def _decoder_fn(self) -> str:
+ return 'GetBytesReader'
+
+
+#
+# The following code defines write and read methods for each of the
# primitive protobuf types.
#
-class DoubleMethod(WriteMethod):
+class DoubleWriteMethod(WriteMethod):
"""Method which writes a proto double value."""
def params(self) -> List[Tuple[str, str]]:
return [('double', 'value')]
@@ -241,7 +389,7 @@ class DoubleMethod(WriteMethod):
return 'WriteDouble'
-class PackedDoubleMethod(PackedMethod):
+class PackedDoubleWriteMethod(PackedWriteMethod):
"""Method which writes a packed list of doubles."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const double>', 'values')]
@@ -250,7 +398,43 @@ class PackedDoubleMethod(PackedMethod):
return 'WritePackedDouble'
-class FloatMethod(WriteMethod):
+class PackedDoubleWriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of doubles."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<double>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedDouble'
+
+
+class DoubleReadMethod(ReadMethod):
+ """Method which reads a proto double value."""
+ def _result_type(self) -> str:
+ return 'double'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadDouble'
+
+
+class PackedDoubleReadMethod(PackedReadMethod):
+ """Method which reads packed double values."""
+ def _result_type(self) -> str:
+ return 'double'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedDouble'
+
+
+class PackedDoubleReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed double values."""
+ def _result_type(self) -> str:
+ return 'double'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedDouble'
+
+
+class FloatWriteMethod(WriteMethod):
"""Method which writes a proto float value."""
def params(self) -> List[Tuple[str, str]]:
return [('float', 'value')]
@@ -259,7 +443,7 @@ class FloatMethod(WriteMethod):
return 'WriteFloat'
-class PackedFloatMethod(PackedMethod):
+class PackedFloatWriteMethod(PackedWriteMethod):
"""Method which writes a packed list of floats."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const float>', 'values')]
@@ -268,7 +452,43 @@ class PackedFloatMethod(PackedMethod):
return 'WritePackedFloat'
-class Int32Method(WriteMethod):
+class PackedFloatWriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of floats."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<float>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedFloat'
+
+
+class FloatReadMethod(ReadMethod):
+ """Method which reads a proto float value."""
+ def _result_type(self) -> str:
+ return 'float'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadFloat'
+
+
+class PackedFloatReadMethod(PackedReadMethod):
+ """Method which reads packed float values."""
+ def _result_type(self) -> str:
+ return 'float'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedFloat'
+
+
+class PackedFloatReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed float values."""
+ def _result_type(self) -> str:
+ return 'float'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedFloat'
+
+
+class Int32WriteMethod(WriteMethod):
"""Method which writes a proto int32 value."""
def params(self) -> List[Tuple[str, str]]:
return [('int32_t', 'value')]
@@ -277,7 +497,7 @@ class Int32Method(WriteMethod):
return 'WriteInt32'
-class PackedInt32Method(PackedMethod):
+class PackedInt32WriteMethod(PackedWriteMethod):
"""Method which writes a packed list of int32."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const int32_t>', 'values')]
@@ -286,7 +506,43 @@ class PackedInt32Method(PackedMethod):
return 'WritePackedInt32'
-class Sint32Method(WriteMethod):
+class PackedInt32WriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of int32."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<int32_t>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedInt32'
+
+
+class Int32ReadMethod(ReadMethod):
+ """Method which reads a proto int32 value."""
+ def _result_type(self) -> str:
+ return 'int32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadInt32'
+
+
+class PackedInt32ReadMethod(PackedReadMethod):
+ """Method which reads packed int32 values."""
+ def _result_type(self) -> str:
+ return 'int32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedInt32'
+
+
+class PackedInt32ReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed int32 values."""
+ def _result_type(self) -> str:
+ return 'int32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedInt32'
+
+
+class Sint32WriteMethod(WriteMethod):
"""Method which writes a proto sint32 value."""
def params(self) -> List[Tuple[str, str]]:
return [('int32_t', 'value')]
@@ -295,7 +551,7 @@ class Sint32Method(WriteMethod):
return 'WriteSint32'
-class PackedSint32Method(PackedMethod):
+class PackedSint32WriteMethod(PackedWriteMethod):
"""Method which writes a packed list of sint32."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const int32_t>', 'values')]
@@ -304,7 +560,43 @@ class PackedSint32Method(PackedMethod):
return 'WritePackedSint32'
-class Sfixed32Method(WriteMethod):
+class PackedSint32WriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of sint32."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<int32_t>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedSint32'
+
+
+class Sint32ReadMethod(ReadMethod):
+ """Method which reads a proto sint32 value."""
+ def _result_type(self) -> str:
+ return 'int32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadSint32'
+
+
+class PackedSint32ReadMethod(PackedReadMethod):
+ """Method which reads packed sint32 values."""
+ def _result_type(self) -> str:
+ return 'int32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedSint32'
+
+
+class PackedSint32ReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed sint32 values."""
+ def _result_type(self) -> str:
+ return 'int32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedSint32'
+
+
+class Sfixed32WriteMethod(WriteMethod):
"""Method which writes a proto sfixed32 value."""
def params(self) -> List[Tuple[str, str]]:
return [('int32_t', 'value')]
@@ -313,7 +605,7 @@ class Sfixed32Method(WriteMethod):
return 'WriteSfixed32'
-class PackedSfixed32Method(PackedMethod):
+class PackedSfixed32WriteMethod(PackedWriteMethod):
"""Method which writes a packed list of sfixed32."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const int32_t>', 'values')]
@@ -322,7 +614,43 @@ class PackedSfixed32Method(PackedMethod):
return 'WritePackedSfixed32'
-class Int64Method(WriteMethod):
+class PackedSfixed32WriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of sfixed32."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<int32_t>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedSfixed32'
+
+
+class Sfixed32ReadMethod(ReadMethod):
+ """Method which reads a proto sfixed32 value."""
+ def _result_type(self) -> str:
+ return 'int32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadSfixed32'
+
+
+class PackedSfixed32ReadMethod(PackedReadMethod):
+ """Method which reads packed sfixed32 values."""
+ def _result_type(self) -> str:
+ return 'int32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedSfixed32'
+
+
+class PackedSfixed32ReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed sfixed32 values."""
+ def _result_type(self) -> str:
+ return 'int32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedSfixed32'
+
+
+class Int64WriteMethod(WriteMethod):
"""Method which writes a proto int64 value."""
def params(self) -> List[Tuple[str, str]]:
return [('int64_t', 'value')]
@@ -331,8 +659,8 @@ class Int64Method(WriteMethod):
return 'WriteInt64'
-class PackedInt64Method(PackedMethod):
- """Method which writes a proto int64 value."""
+class PackedInt64WriteMethod(PackedWriteMethod):
+ """Method which writes a packed list of int64."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const int64_t>', 'values')]
@@ -340,7 +668,43 @@ class PackedInt64Method(PackedMethod):
return 'WritePackedInt64'
-class Sint64Method(WriteMethod):
+class PackedInt64WriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of int64."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<int64_t>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedInt64'
+
+
+class Int64ReadMethod(ReadMethod):
+ """Method which reads a proto int64 value."""
+ def _result_type(self) -> str:
+ return 'int64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadInt64'
+
+
+class PackedInt64ReadMethod(PackedReadMethod):
+ """Method which reads packed int64 values."""
+ def _result_type(self) -> str:
+ return 'int64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedInt64'
+
+
+class PackedInt64ReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed int64 values."""
+ def _result_type(self) -> str:
+ return 'int64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedInt64'
+
+
+class Sint64WriteMethod(WriteMethod):
"""Method which writes a proto sint64 value."""
def params(self) -> List[Tuple[str, str]]:
return [('int64_t', 'value')]
@@ -349,8 +713,8 @@ class Sint64Method(WriteMethod):
return 'WriteSint64'
-class PackedSint64Method(PackedMethod):
- """Method which writes a proto sint64 value."""
+class PackedSint64WriteMethod(PackedWriteMethod):
+ """Method which writes a packst list of sint64."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const int64_t>', 'values')]
@@ -358,7 +722,43 @@ class PackedSint64Method(PackedMethod):
return 'WritePackedSint64'
-class Sfixed64Method(WriteMethod):
+class PackedSint64WriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of sint64."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<int64_t>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedSint64'
+
+
+class Sint64ReadMethod(ReadMethod):
+ """Method which reads a proto sint64 value."""
+ def _result_type(self) -> str:
+ return 'int64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadSint64'
+
+
+class PackedSint64ReadMethod(PackedReadMethod):
+ """Method which reads packed sint64 values."""
+ def _result_type(self) -> str:
+ return 'int64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedSint64'
+
+
+class PackedSint64ReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed sint64 values."""
+ def _result_type(self) -> str:
+ return 'int64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedSint64'
+
+
+class Sfixed64WriteMethod(WriteMethod):
"""Method which writes a proto sfixed64 value."""
def params(self) -> List[Tuple[str, str]]:
return [('int64_t', 'value')]
@@ -367,8 +767,8 @@ class Sfixed64Method(WriteMethod):
return 'WriteSfixed64'
-class PackedSfixed64Method(PackedMethod):
- """Method which writes a proto sfixed64 value."""
+class PackedSfixed64WriteMethod(PackedWriteMethod):
+ """Method which writes a packed list of sfixed64."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const int64_t>', 'values')]
@@ -376,7 +776,43 @@ class PackedSfixed64Method(PackedMethod):
return 'WritePackedSfixed4'
-class Uint32Method(WriteMethod):
+class PackedSfixed64WriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of sfixed64."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<int64_t>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedSfixed4'
+
+
+class Sfixed64ReadMethod(ReadMethod):
+ """Method which reads a proto sfixed64 value."""
+ def _result_type(self) -> str:
+ return 'int64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadSfixed64'
+
+
+class PackedSfixed64ReadMethod(PackedReadMethod):
+ """Method which reads packed sfixed64 values."""
+ def _result_type(self) -> str:
+ return 'int64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedSfixed64'
+
+
+class PackedSfixed64ReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed sfixed64 values."""
+ def _result_type(self) -> str:
+ return 'int64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedSfixed64'
+
+
+class Uint32WriteMethod(WriteMethod):
"""Method which writes a proto uint32 value."""
def params(self) -> List[Tuple[str, str]]:
return [('uint32_t', 'value')]
@@ -385,8 +821,8 @@ class Uint32Method(WriteMethod):
return 'WriteUint32'
-class PackedUint32Method(PackedMethod):
- """Method which writes a proto uint32 value."""
+class PackedUint32WriteMethod(PackedWriteMethod):
+ """Method which writes a packed list of uint32."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const uint32_t>', 'values')]
@@ -394,7 +830,43 @@ class PackedUint32Method(PackedMethod):
return 'WritePackedUint32'
-class Fixed32Method(WriteMethod):
+class PackedUint32WriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of uint32."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<uint32_t>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedUint32'
+
+
+class Uint32ReadMethod(ReadMethod):
+ """Method which reads a proto uint32 value."""
+ def _result_type(self) -> str:
+ return 'uint32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadUint32'
+
+
+class PackedUint32ReadMethod(PackedReadMethod):
+ """Method which reads packed uint32 values."""
+ def _result_type(self) -> str:
+ return 'uint32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedUint32'
+
+
+class PackedUint32ReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed uint32 values."""
+ def _result_type(self) -> str:
+ return 'uint32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedUint32'
+
+
+class Fixed32WriteMethod(WriteMethod):
"""Method which writes a proto fixed32 value."""
def params(self) -> List[Tuple[str, str]]:
return [('uint32_t', 'value')]
@@ -403,8 +875,8 @@ class Fixed32Method(WriteMethod):
return 'WriteFixed32'
-class PackedFixed32Method(PackedMethod):
- """Method which writes a proto fixed32 value."""
+class PackedFixed32WriteMethod(PackedWriteMethod):
+ """Method which writes a packed list of fixed32."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const uint32_t>', 'values')]
@@ -412,7 +884,43 @@ class PackedFixed32Method(PackedMethod):
return 'WritePackedFixed32'
-class Uint64Method(WriteMethod):
+class PackedFixed32WriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of fixed32."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<uint32_t>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedFixed32'
+
+
+class Fixed32ReadMethod(ReadMethod):
+ """Method which reads a proto fixed32 value."""
+ def _result_type(self) -> str:
+ return 'uint32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadFixed32'
+
+
+class PackedFixed32ReadMethod(PackedReadMethod):
+ """Method which reads packed fixed32 values."""
+ def _result_type(self) -> str:
+ return 'uint32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedFixed32'
+
+
+class PackedFixed32ReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed fixed32 values."""
+ def _result_type(self) -> str:
+ return 'uint32_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedFixed32'
+
+
+class Uint64WriteMethod(WriteMethod):
"""Method which writes a proto uint64 value."""
def params(self) -> List[Tuple[str, str]]:
return [('uint64_t', 'value')]
@@ -421,8 +929,8 @@ class Uint64Method(WriteMethod):
return 'WriteUint64'
-class PackedUint64Method(PackedMethod):
- """Method which writes a proto uint64 value."""
+class PackedUint64WriteMethod(PackedWriteMethod):
+ """Method which writes a packed list of uint64."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const uint64_t>', 'values')]
@@ -430,7 +938,43 @@ class PackedUint64Method(PackedMethod):
return 'WritePackedUint64'
-class Fixed64Method(WriteMethod):
+class PackedUint64WriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of uint64."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<uint64_t>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedUint64'
+
+
+class Uint64ReadMethod(ReadMethod):
+ """Method which reads a proto uint64 value."""
+ def _result_type(self) -> str:
+ return 'uint64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadUint64'
+
+
+class PackedUint64ReadMethod(PackedReadMethod):
+ """Method which reads packed uint64 values."""
+ def _result_type(self) -> str:
+ return 'uint64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedUint64'
+
+
+class PackedUint64ReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed uint64 values."""
+ def _result_type(self) -> str:
+ return 'uint64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedUint64'
+
+
+class Fixed64WriteMethod(WriteMethod):
"""Method which writes a proto fixed64 value."""
def params(self) -> List[Tuple[str, str]]:
return [('uint64_t', 'value')]
@@ -439,8 +983,8 @@ class Fixed64Method(WriteMethod):
return 'WriteFixed64'
-class PackedFixed64Method(PackedMethod):
- """Method which writes a proto fixed64 value."""
+class PackedFixed64WriteMethod(PackedWriteMethod):
+ """Method which writes a packed list of fixed64."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const uint64_t>', 'values')]
@@ -448,7 +992,43 @@ class PackedFixed64Method(PackedMethod):
return 'WritePackedFixed64'
-class BoolMethod(WriteMethod):
+class PackedFixed64WriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed list of fixed64."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<uint64_t>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedFixed64'
+
+
+class Fixed64ReadMethod(ReadMethod):
+ """Method which reads a proto fixed64 value."""
+ def _result_type(self) -> str:
+ return 'uint64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadFixed64'
+
+
+class PackedFixed64ReadMethod(PackedReadMethod):
+ """Method which reads packed fixed64 values."""
+ def _result_type(self) -> str:
+ return 'uint64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedFixed64'
+
+
+class PackedFixed64ReadVectorMethod(PackedReadVectorMethod):
+ """Method which reads packed fixed64 values."""
+ def _result_type(self) -> str:
+ return 'uint64_t'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadRepeatedFixed64'
+
+
+class BoolWriteMethod(WriteMethod):
"""Method which writes a proto bool value."""
def params(self) -> List[Tuple[str, str]]:
return [('bool', 'value')]
@@ -457,7 +1037,43 @@ class BoolMethod(WriteMethod):
return 'WriteBool'
-class BytesMethod(WriteMethod):
+class PackedBoolWriteMethod(PackedWriteMethod):
+ """Method which writes a packed list of bools."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('std::span<const bool>', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WritePackedBool'
+
+
+class PackedBoolWriteVectorMethod(PackedWriteMethod):
+ """Method which writes a packed vector of bools."""
+ def params(self) -> List[Tuple[str, str]]:
+ return [('const ::pw::Vector<bool>&', 'values')]
+
+ def _encoder_fn(self) -> str:
+ return 'WriteRepeatedBool'
+
+
+class BoolReadMethod(ReadMethod):
+ """Method which reads a proto bool value."""
+ def _result_type(self) -> str:
+ return 'bool'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadBool'
+
+
+class PackedBoolReadMethod(PackedReadMethod):
+ """Method which reads packed bool values."""
+ def _result_type(self) -> str:
+ return 'bool'
+
+ def _decoder_fn(self) -> str:
+ return 'ReadPackedBool'
+
+
+class BytesWriteMethod(WriteMethod):
"""Method which writes a proto bytes value."""
def params(self) -> List[Tuple[str, str]]:
return [('std::span<const std::byte>', 'value')]
@@ -466,7 +1082,19 @@ class BytesMethod(WriteMethod):
return 'WriteBytes'
-class StringLenMethod(WriteMethod):
+class BytesReadMethod(ReadMethod):
+ """Method which reads a proto bytes value."""
+ def return_type(self, from_root: bool = False) -> str:
+ return '::pw::StatusWithSize'
+
+ def params(self) -> List[Tuple[str, str]]:
+ return [('std::span<std::byte>', 'out')]
+
+ def _decoder_fn(self) -> str:
+ return 'ReadBytes'
+
+
+class StringLenWriteMethod(WriteMethod):
"""Method which writes a proto string value with length."""
def params(self) -> List[Tuple[str, str]]:
return [('const char*', 'value'), ('size_t', 'len')]
@@ -475,7 +1103,7 @@ class StringLenMethod(WriteMethod):
return 'WriteString'
-class StringMethod(WriteMethod):
+class StringWriteMethod(WriteMethod):
"""Method which writes a proto string value."""
def params(self) -> List[Tuple[str, str]]:
return [('std::string_view', 'value')]
@@ -484,7 +1112,19 @@ class StringMethod(WriteMethod):
return 'WriteString'
-class EnumMethod(WriteMethod):
+class StringReadMethod(ReadMethod):
+ """Method which reads a proto string value."""
+ def return_type(self, from_root: bool = False) -> str:
+ return '::pw::StatusWithSize'
+
+ def params(self) -> List[Tuple[str, str]]:
+ return [('std::span<char>', 'out')]
+
+ def _decoder_fn(self) -> str:
+ return 'ReadString'
+
+
+class EnumWriteMethod(WriteMethod):
"""Method which writes a proto enum value."""
def params(self) -> List[Tuple[str, str]]:
return [(self._relative_type_namespace(), 'value')]
@@ -501,81 +1141,185 @@ class EnumMethod(WriteMethod):
raise NotImplementedError()
+class EnumReadMethod(ReadMethod):
+ """Method which reads a proto enum value."""
+ def _result_type(self):
+ return self._relative_type_namespace()
+
+ def _decoder_body(self) -> List[str]:
+ lines: List[str] = []
+ lines += ['::pw::Result<uint32_t> value = ReadUint32();']
+ lines += ['if (!value.ok()) {']
+ lines += [' return value.status();']
+ lines += ['}']
+
+ name_parts = self._relative_type_namespace().split('::')
+ enum_name = name_parts.pop()
+ function_name = '::'.join(name_parts + [f'Get{enum_name}'])
+
+ lines += [f'return {function_name}(value.value());']
+ return lines
+
+
# Mapping of protobuf field types to their method definitions.
-PROTO_FIELD_METHODS: Dict[int, List] = {
+PROTO_FIELD_WRITE_METHODS: Dict[int, List] = {
+ descriptor_pb2.FieldDescriptorProto.TYPE_DOUBLE: [
+ DoubleWriteMethod, PackedDoubleWriteMethod,
+ PackedDoubleWriteVectorMethod
+ ],
+ descriptor_pb2.FieldDescriptorProto.TYPE_FLOAT:
+ [FloatWriteMethod, PackedFloatWriteMethod, PackedFloatWriteVectorMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_INT32:
+ [Int32WriteMethod, PackedInt32WriteMethod, PackedInt32WriteVectorMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_SINT32: [
+ Sint32WriteMethod, PackedSint32WriteMethod,
+ PackedSint32WriteVectorMethod
+ ],
+ descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED32: [
+ Sfixed32WriteMethod, PackedSfixed32WriteMethod,
+ PackedSfixed32WriteVectorMethod
+ ],
+ descriptor_pb2.FieldDescriptorProto.TYPE_INT64:
+ [Int64WriteMethod, PackedInt64WriteMethod, PackedInt64WriteVectorMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_SINT64: [
+ Sint64WriteMethod, PackedSint64WriteMethod,
+ PackedSint64WriteVectorMethod
+ ],
+ descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED64: [
+ Sfixed64WriteMethod, PackedSfixed64WriteMethod,
+ PackedSfixed64WriteVectorMethod
+ ],
+ descriptor_pb2.FieldDescriptorProto.TYPE_UINT32: [
+ Uint32WriteMethod, PackedUint32WriteMethod,
+ PackedUint32WriteVectorMethod
+ ],
+ descriptor_pb2.FieldDescriptorProto.TYPE_FIXED32: [
+ Fixed32WriteMethod, PackedFixed32WriteMethod,
+ PackedFixed32WriteVectorMethod
+ ],
+ descriptor_pb2.FieldDescriptorProto.TYPE_UINT64: [
+ Uint64WriteMethod, PackedUint64WriteMethod,
+ PackedUint64WriteVectorMethod
+ ],
+ descriptor_pb2.FieldDescriptorProto.TYPE_FIXED64: [
+ Fixed64WriteMethod, PackedFixed64WriteMethod,
+ PackedFixed64WriteVectorMethod
+ ],
+ descriptor_pb2.FieldDescriptorProto.TYPE_BOOL:
+ [BoolWriteMethod, PackedBoolWriteMethod, PackedBoolWriteVectorMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_BYTES: [BytesWriteMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_STRING:
+ [StringLenWriteMethod, StringWriteMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_MESSAGE:
+ [SubMessageEncoderMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_ENUM: [EnumWriteMethod],
+}
+
+PROTO_FIELD_READ_METHODS: Dict[int, List] = {
descriptor_pb2.FieldDescriptorProto.TYPE_DOUBLE:
- [DoubleMethod, PackedDoubleMethod],
+ [DoubleReadMethod, PackedDoubleReadMethod, PackedDoubleReadVectorMethod],
descriptor_pb2.FieldDescriptorProto.TYPE_FLOAT:
- [FloatMethod, PackedFloatMethod],
+ [FloatReadMethod, PackedFloatReadMethod, PackedFloatReadVectorMethod],
descriptor_pb2.FieldDescriptorProto.TYPE_INT32:
- [Int32Method, PackedInt32Method],
+ [Int32ReadMethod, PackedInt32ReadMethod, PackedInt32ReadVectorMethod],
descriptor_pb2.FieldDescriptorProto.TYPE_SINT32:
- [Sint32Method, PackedSint32Method],
- descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED32:
- [Sfixed32Method, PackedSfixed32Method],
+ [Sint32ReadMethod, PackedSint32ReadMethod, PackedSint32ReadVectorMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED32: [
+ Sfixed32ReadMethod, PackedSfixed32ReadMethod,
+ PackedSfixed32ReadVectorMethod
+ ],
descriptor_pb2.FieldDescriptorProto.TYPE_INT64:
- [Int64Method, PackedInt64Method],
+ [Int64ReadMethod, PackedInt64ReadMethod, PackedInt64ReadVectorMethod],
descriptor_pb2.FieldDescriptorProto.TYPE_SINT64:
- [Sint64Method, PackedSint64Method],
- descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED64:
- [Sfixed64Method, PackedSfixed64Method],
+ [Sint64ReadMethod, PackedSint64ReadMethod, PackedSint64ReadVectorMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_SFIXED64: [
+ Sfixed64ReadMethod, PackedSfixed64ReadMethod,
+ PackedSfixed64ReadVectorMethod
+ ],
descriptor_pb2.FieldDescriptorProto.TYPE_UINT32:
- [Uint32Method, PackedUint32Method],
- descriptor_pb2.FieldDescriptorProto.TYPE_FIXED32:
- [Fixed32Method, PackedFixed32Method],
+ [Uint32ReadMethod, PackedUint32ReadMethod, PackedUint32ReadVectorMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_FIXED32: [
+ Fixed32ReadMethod, PackedFixed32ReadMethod,
+ PackedFixed32ReadVectorMethod
+ ],
descriptor_pb2.FieldDescriptorProto.TYPE_UINT64:
- [Uint64Method, PackedUint64Method],
- descriptor_pb2.FieldDescriptorProto.TYPE_FIXED64:
- [Fixed64Method, PackedFixed64Method],
- descriptor_pb2.FieldDescriptorProto.TYPE_BOOL: [BoolMethod],
- descriptor_pb2.FieldDescriptorProto.TYPE_BYTES: [BytesMethod],
- descriptor_pb2.FieldDescriptorProto.TYPE_STRING: [
- StringLenMethod, StringMethod
+ [Uint64ReadMethod, PackedUint64ReadMethod, PackedUint64ReadVectorMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_FIXED64: [
+ Fixed64ReadMethod, PackedFixed64ReadMethod,
+ PackedFixed64ReadVectorMethod
],
- descriptor_pb2.FieldDescriptorProto.TYPE_MESSAGE: [SubMessageMethod],
- descriptor_pb2.FieldDescriptorProto.TYPE_ENUM: [EnumMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_BOOL:
+ [BoolReadMethod, PackedBoolReadMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_BYTES:
+ [BytesReadMethod, BytesReaderMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_STRING:
+ [StringReadMethod, BytesReaderMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_MESSAGE:
+ [SubMessageDecoderMethod],
+ descriptor_pb2.FieldDescriptorProto.TYPE_ENUM: [EnumReadMethod],
}
-def generate_code_for_message(message: ProtoMessage, root: ProtoNode,
- output: OutputFile,
- encoder_type: EncoderType) -> None:
- """Creates a C++ class for a protobuf message."""
+def proto_field_methods(class_type: ClassType, field_type: int) -> List:
+ return (PROTO_FIELD_WRITE_METHODS[field_type] if class_type.is_encoder()
+ else PROTO_FIELD_READ_METHODS[field_type])
+
+
+def generate_class_for_message(message: ProtoMessage, root: ProtoNode,
+ output: OutputFile,
+ class_type: ClassType) -> None:
+ """Creates a C++ class to encode or decoder a protobuf message."""
assert message.type() == ProtoNode.Type.MESSAGE
- base_class_name = encoder_type.base_class_name()
- encoder_name = encoder_type.codegen_class_name()
+ base_class_name = class_type.base_class_name()
+ class_name = class_type.codegen_class_name()
# Message classes inherit from the base proto message class in codegen.h
# and use its constructor.
base_class = f'{PROTOBUF_NAMESPACE}::{base_class_name}'
output.write_line(
- f'class {message.cpp_namespace(root)}::{encoder_name} ' \
+ f'class {message.cpp_namespace(root)}::{class_name} ' \
f': public {base_class} {{'
)
output.write_line(' public:')
with output.indent():
- # Inherit the constructors from the base encoder.
+ # Inherit the constructors from the base class.
output.write_line(f'using {base_class}::{base_class_name};')
- # Declare a move constructor that takes a base encoder.
- output.write_line(f'constexpr {encoder_name}({base_class}&& parent) '
+ # Declare a move constructor that takes a base class.
+ output.write_line(f'constexpr {class_name}({base_class}&& parent) '
f': {base_class}(std::move(parent)) {{}}')
# Allow MemoryEncoder& to be converted to StreamEncoder&.
- if encoder_type == EncoderType.MEMORY:
- stream_type = (f'::{message.cpp_namespace()}::'
- f'{EncoderType.STREAMING.codegen_class_name()}')
+ if class_type == ClassType.MEMORY_ENCODER:
+ stream_type = (
+ f'::{message.cpp_namespace()}::'
+ f'{ClassType.STREAMING_ENCODER.codegen_class_name()}')
output.write_line(
f'operator {stream_type}&() '
f' {{ return static_cast<{stream_type}&>('
f'*static_cast<{PROTOBUF_NAMESPACE}::StreamEncoder*>(this));}}'
)
+ # Add a typed Field() member to StreamDecoder
+ if class_type == ClassType.STREAMING_DECODER:
+ output.write_line()
+ output.write_line('::pw::Result<Fields> Field() {')
+ with output.indent():
+ output.write_line('::pw::Result<uint32_t> result '
+ '= FieldNumber();')
+ output.write_line('if (!result.ok()) {')
+ with output.indent():
+ output.write_line('return result.status();')
+ output.write_line('}')
+ output.write_line(
+ 'return static_cast<Fields>(result.value());')
+ output.write_line('}')
+
# Generate methods for each of the message's fields.
for field in message.fields():
- for method_class in PROTO_FIELD_METHODS[field.type()]:
+ for method_class in proto_field_methods(class_type, field.type()):
method = method_class(field, message, root)
if not method.should_appear():
continue
@@ -602,19 +1346,19 @@ def generate_code_for_message(message: ProtoMessage, root: ProtoNode,
def define_not_in_class_methods(message: ProtoMessage, root: ProtoNode,
output: OutputFile,
- encoder_type: EncoderType) -> None:
+ class_type: ClassType) -> None:
"""Defines methods for a message class that were previously declared."""
assert message.type() == ProtoNode.Type.MESSAGE
for field in message.fields():
- for method_class in PROTO_FIELD_METHODS[field.type()]:
+ for method_class in proto_field_methods(class_type, field.type()):
method = method_class(field, message, root)
if not method.should_appear() or method.in_class_definition():
continue
output.write_line()
class_name = (f'{message.cpp_namespace(root)}::'
- f'{encoder_type.codegen_class_name()}')
+ f'{class_type.codegen_class_name()}')
method_signature = (
f'inline {method.return_type(from_root=True)} '
f'{class_name}::{method.name()}({method.param_string()})')
@@ -637,6 +1381,26 @@ def generate_code_for_enum(proto_enum: ProtoEnum, root: ProtoNode,
output.write_line('};')
+def generate_function_for_enum(proto_enum: ProtoEnum, root: ProtoNode,
+ output: OutputFile) -> None:
+ """Creates a C++ validation function for for a proto enum."""
+ assert proto_enum.type() == ProtoNode.Type.ENUM
+
+ enum_name = proto_enum.cpp_namespace(root)
+ output.write_line(
+ f'constexpr ::pw::Result<{enum_name}> Get{enum_name}(uint32_t value) {{'
+ )
+ with output.indent():
+ output.write_line('switch (value) {')
+ with output.indent():
+ for name, number in proto_enum.values():
+ output.write_line(
+ f'case {number}: return {enum_name}::{name};')
+ output.write_line('default: return ::pw::Status::DataLoss();')
+ output.write_line('}')
+ output.write_line('}')
+
+
def forward_declare(node: ProtoMessage, root: ProtoNode,
output: OutputFile) -> None:
"""Generates code forward-declaring entities in a message's namespace."""
@@ -651,34 +1415,41 @@ def forward_declare(node: ProtoMessage, root: ProtoNode,
output.write_line(f'{field.enum_name()} = {field.number()},')
output.write_line('};')
- # Declare the message's encoder class and all of its enums.
+ # Declare the message's encoder classes.
output.write_line()
output.write_line('class StreamEncoder;')
output.write_line('class MemoryEncoder;')
+ # Declare the message's decoder classes.
+ output.write_line()
+ output.write_line('class StreamDecoder;')
+
+ # Declare the message's enums.
for child in node.children():
if child.type() == ProtoNode.Type.ENUM:
output.write_line()
generate_code_for_enum(cast(ProtoEnum, child), node, output)
+ output.write_line()
+ generate_function_for_enum(cast(ProtoEnum, child), node, output)
output.write_line(f'}} // namespace {namespace}')
-def generate_encoder_wrappers(package: ProtoNode, encoder_type: EncoderType,
- output: OutputFile):
+def generate_class_wrappers(package: ProtoNode, class_type: ClassType,
+ output: OutputFile):
# Run through all messages in the file, generating a class for each.
for node in package:
if node.type() == ProtoNode.Type.MESSAGE:
output.write_line()
- generate_code_for_message(cast(ProtoMessage, node), package,
- output, encoder_type)
+ generate_class_for_message(cast(ProtoMessage, node), package,
+ output, class_type)
# Run a second pass through the classes, this time defining all of the
# methods which were previously only declared.
for node in package:
if node.type() == ProtoNode.Type.MESSAGE:
define_not_in_class_methods(cast(ProtoMessage, node), package,
- output, encoder_type)
+ output, class_type)
def _proto_filename_to_generated_header(proto_file: str) -> str:
@@ -700,7 +1471,13 @@ def generate_code_for_package(file_descriptor_proto, package: ProtoNode,
output.write_line('#include <cstdint>')
output.write_line('#include <span>')
output.write_line('#include <string_view>\n')
+ output.write_line('#include "pw_assert/assert.h"')
+ output.write_line('#include "pw_containers/vector.h"')
output.write_line('#include "pw_protobuf/encoder.h"')
+ output.write_line('#include "pw_protobuf/stream_decoder.h"')
+ output.write_line('#include "pw_result/result.h"')
+ output.write_line('#include "pw_status/status.h"')
+ output.write_line('#include "pw_status/status_with_size.h"')
for imported_file in file_descriptor_proto.dependency:
generated_header = _proto_filename_to_generated_header(imported_file)
@@ -722,9 +1499,13 @@ def generate_code_for_package(file_descriptor_proto, package: ProtoNode,
if node.type() == ProtoNode.Type.ENUM:
output.write_line()
generate_code_for_enum(cast(ProtoEnum, node), package, output)
+ output.write_line()
+ generate_function_for_enum(cast(ProtoEnum, node), package, output)
+
+ generate_class_wrappers(package, ClassType.STREAMING_ENCODER, output)
+ generate_class_wrappers(package, ClassType.MEMORY_ENCODER, output)
- generate_encoder_wrappers(package, EncoderType.STREAMING, output)
- generate_encoder_wrappers(package, EncoderType.MEMORY, output)
+ generate_class_wrappers(package, ClassType.STREAMING_DECODER, output)
if package.cpp_namespace():
output.write_line(f'\n}} // namespace {package.cpp_namespace()}')
diff --git a/pw_protobuf/stream_decoder.cc b/pw_protobuf/stream_decoder.cc
index 8df6f2860..e6a88aea0 100644
--- a/pw_protobuf/stream_decoder.cc
+++ b/pw_protobuf/stream_decoder.cc
@@ -14,6 +14,10 @@
#include "pw_protobuf/stream_decoder.h"
+#include <algorithm>
+#include <bit>
+#include <cstdint>
+#include <cstring>
#include <limits>
#include "pw_assert/check.h"
@@ -112,59 +116,6 @@ Status StreamDecoder::Next() {
return status_;
}
-Result<int32_t> StreamDecoder::ReadInt32() {
- uint64_t varint = 0;
- PW_TRY(ReadVarintField(&varint));
-
- int64_t signed_value = static_cast<int64_t>(varint);
- if (signed_value > std::numeric_limits<int32_t>::max() ||
- signed_value < std::numeric_limits<int32_t>::min()) {
- return Status::OutOfRange();
- }
-
- return signed_value;
-}
-
-Result<uint32_t> StreamDecoder::ReadUint32() {
- uint64_t varint = 0;
- PW_TRY(ReadVarintField(&varint));
-
- if (varint > std::numeric_limits<uint32_t>::max()) {
- return Status::OutOfRange();
- }
- return varint;
-}
-
-Result<int64_t> StreamDecoder::ReadInt64() {
- uint64_t varint = 0;
- PW_TRY(ReadVarintField(&varint));
- return varint;
-}
-
-Result<int32_t> StreamDecoder::ReadSint32() {
- uint64_t varint = 0;
- PW_TRY(ReadVarintField(&varint));
-
- int64_t signed_value = varint::ZigZagDecode(varint);
- if (signed_value > std::numeric_limits<int32_t>::max() ||
- signed_value < std::numeric_limits<int32_t>::min()) {
- return Status::OutOfRange();
- }
- return signed_value;
-}
-
-Result<int64_t> StreamDecoder::ReadSint64() {
- uint64_t varint = 0;
- PW_TRY(ReadVarintField(&varint));
- return varint::ZigZagDecode(varint);
-}
-
-Result<bool> StreamDecoder::ReadBool() {
- uint64_t varint = 0;
- PW_TRY(ReadVarintField(&varint));
- return varint;
-}
-
StreamDecoder::BytesReader StreamDecoder::GetBytesReader() {
Status status = CheckOkToRead(WireType::kDelimited);
@@ -328,24 +279,68 @@ Status StreamDecoder::SkipField() {
return OkStatus();
}
-Status StreamDecoder::ReadVarintField(uint64_t* out) {
+Status StreamDecoder::ReadVarintField(std::span<std::byte> out,
+ VarintDecodeType decode_type) {
+ PW_CHECK(out.size() == sizeof(bool) || out.size() == sizeof(uint32_t) ||
+ out.size() == sizeof(uint64_t),
+ "Protobuf varints must only be used with bool, int32_t, uint32_t, "
+ "int64_t, or uint64_t");
PW_TRY(CheckOkToRead(WireType::kVarint));
+ const StatusWithSize sws = ReadOneVarint(out, decode_type);
+ if (sws.status() != Status::DataLoss())
+ field_consumed_ = true;
+ return sws.status();
+}
+
+StatusWithSize StreamDecoder::ReadOneVarint(std::span<std::byte> out,
+ VarintDecodeType decode_type) {
uint64_t value;
StatusWithSize sws = varint::Read(reader_, &value);
if (sws.IsOutOfRange()) {
// Out of range indicates the end of the stream. As a value is expected
// here, report it as a data loss and terminate the decode operation.
status_ = Status::DataLoss();
- return status_;
+ return StatusWithSize(status_, sws.size());
+ }
+ if (!sws.ok()) {
+ return sws;
}
- PW_TRY(sws);
position_ += sws.size();
- field_consumed_ = true;
- *out = value;
- return OkStatus();
+ if (out.size() == sizeof(uint64_t)) {
+ if (decode_type == VarintDecodeType::kUnsigned) {
+ std::memcpy(out.data(), &value, out.size());
+ } else {
+ const int64_t signed_value = decode_type == VarintDecodeType::kZigZag
+ ? varint::ZigZagDecode(value)
+ : static_cast<int64_t>(value);
+ std::memcpy(out.data(), &signed_value, out.size());
+ }
+ } else if (out.size() == sizeof(uint32_t)) {
+ if (decode_type == VarintDecodeType::kUnsigned) {
+ if (value > std::numeric_limits<uint32_t>::max()) {
+ return StatusWithSize(Status::OutOfRange(), sws.size());
+ }
+ std::memcpy(out.data(), &value, out.size());
+ } else {
+ const int64_t signed_value = decode_type == VarintDecodeType::kZigZag
+ ? varint::ZigZagDecode(value)
+ : static_cast<int64_t>(value);
+ if (signed_value > std::numeric_limits<int32_t>::max() ||
+ signed_value < std::numeric_limits<int32_t>::min()) {
+ return StatusWithSize(Status::OutOfRange(), sws.size());
+ }
+ std::memcpy(out.data(), &signed_value, out.size());
+ }
+ } else if (out.size() == sizeof(bool)) {
+ PW_CHECK(decode_type == VarintDecodeType::kUnsigned,
+ "Protobuf bool can never be signed");
+ std::memcpy(out.data(), &value, out.size());
+ }
+
+ return sws;
}
Status StreamDecoder::ReadFixedField(std::span<std::byte> out) {
@@ -362,6 +357,10 @@ Status StreamDecoder::ReadFixedField(std::span<std::byte> out) {
position_ += out.size();
field_consumed_ = true;
+ if (std::endian::native != std::endian::little) {
+ std::reverse(out.begin(), out.end());
+ }
+
return OkStatus();
}
@@ -392,17 +391,91 @@ StatusWithSize StreamDecoder::ReadDelimitedField(std::span<std::byte> out) {
return StatusWithSize(result.value().size());
}
+StatusWithSize StreamDecoder::ReadPackedFixedField(std::span<std::byte> out,
+ size_t elem_size) {
+ if (Status status = CheckOkToRead(WireType::kDelimited); !status.ok()) {
+ return StatusWithSize(status, 0);
+ }
+
+ if (reader_.ConservativeReadLimit() < delimited_field_size_) {
+ status_ = Status::DataLoss();
+ return StatusWithSize(status_, 0);
+ }
+
+ if (out.size() < delimited_field_size_) {
+ // Value can't fit into the provided buffer. Don't advance the cursor so
+ // that the field can be re-read with a larger buffer or through the stream
+ // API.
+ return StatusWithSize::ResourceExhausted();
+ }
+
+ Result<ByteSpan> result = reader_.Read(out.first(delimited_field_size_));
+ if (!result.ok()) {
+ return StatusWithSize(result.status(), 0);
+ }
+
+ position_ += result.value().size();
+ field_consumed_ = true;
+
+ // Decode little-endian serialized packed fields.
+ if (std::endian::native != std::endian::little) {
+ for (auto out_start = out.begin(); out_start != out.end();
+ out_start += elem_size) {
+ std::reverse(out_start, out_start + elem_size);
+ }
+ }
+
+ return StatusWithSize(result.value().size() / elem_size);
+}
+
+StatusWithSize StreamDecoder::ReadPackedVarintField(
+ std::span<std::byte> out, size_t elem_size, VarintDecodeType decode_type) {
+ PW_CHECK(elem_size == sizeof(bool) || elem_size == sizeof(uint32_t) ||
+ elem_size == sizeof(uint64_t),
+ "Protobuf varints must only be used with bool, int32_t, uint32_t, "
+ "int64_t, or uint64_t");
+
+ if (Status status = CheckOkToRead(WireType::kDelimited); !status.ok()) {
+ return StatusWithSize(status, 0);
+ }
+
+ if (reader_.ConservativeReadLimit() < delimited_field_size_) {
+ status_ = Status::DataLoss();
+ return StatusWithSize(status_, 0);
+ }
+
+ size_t bytes_read = 0;
+ size_t number_out = 0;
+ while (bytes_read < delimited_field_size_ && !out.empty()) {
+ const StatusWithSize sws = ReadOneVarint(out.first(elem_size), decode_type);
+ if (!sws.ok()) {
+ return StatusWithSize(sws.status(), number_out);
+ }
+
+ bytes_read += sws.size();
+ out = out.subspan(elem_size);
+ ++number_out;
+ }
+
+ if (bytes_read < delimited_field_size_) {
+ return StatusWithSize(Status::ResourceExhausted(), number_out);
+ }
+
+ field_consumed_ = true;
+ return StatusWithSize(OkStatus(), number_out);
+}
+
Status StreamDecoder::CheckOkToRead(WireType type) {
PW_CHECK(!nested_reader_open_,
"Cannot read from a decoder while a nested decoder is open");
- PW_CHECK(
- !field_consumed_,
- "Attempting to read from protobuf decoder without first calling Next()");
-
- // Attempting to read the wrong type is typically a programmer error; however,
- // it could also occur due to data corruption. As we don't want to crash on
- // bad data, return NOT_FOUND here to distinguish it from other corruption
- // cases.
+ PW_CHECK(!field_consumed_,
+ "Attempting to read from protobuf decoder without first calling "
+ "Next()");
+
+ // Attempting to read the wrong type is typically a programmer error;
+ // however, it could also occur due to data corruption. As we don't want to
+ // crash on bad data, return NOT_FOUND here to distinguish it from other
+ // corruption cases.
if (current_field_.wire_type() != type) {
status_ = Status::NotFound();
}
diff --git a/pw_protobuf/stream_decoder_test.cc b/pw_protobuf/stream_decoder_test.cc
index f1385c4ca..0cff426b8 100644
--- a/pw_protobuf/stream_decoder_test.cc
+++ b/pw_protobuf/stream_decoder_test.cc
@@ -14,6 +14,8 @@
#include "pw_protobuf/stream_decoder.h"
+#include <array>
+
#include "gtest/gtest.h"
#include "pw_status/status.h"
#include "pw_status/status_with_size.h"
@@ -59,6 +61,12 @@ TEST(StreamDecoder, Decode) {
0x2d, 0xef, 0xbe, 0xad, 0xde,
// type=string, k=6, v="Hello world"
0x32, 0x0b, 'H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd',
+ // type=sfixed32, k=7, v=-50
+ 0x3d, 0xce, 0xff, 0xff, 0xff,
+ // type=sfixed64, k=8, v=-1647993274
+ 0x41, 0x46, 0x9e, 0xc5, 0x9d, 0xff, 0xff, 0xff, 0xff,
+ // type=float, k=9, v=2.718
+ 0x4d, 0xb6, 0xf3, 0x2d, 0x40,
};
// clang-format on
@@ -103,6 +111,24 @@ TEST(StreamDecoder, Decode) {
buffer[sws.size()] = '\0';
EXPECT_STREQ(buffer, "Hello world");
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 7u);
+ Result<int32_t> sfixed32 = decoder.ReadSfixed32();
+ ASSERT_EQ(sfixed32.status(), OkStatus());
+ EXPECT_EQ(sfixed32.value(), -50);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 8u);
+ Result<int64_t> sfixed64 = decoder.ReadSfixed64();
+ ASSERT_EQ(sfixed64.status(), OkStatus());
+ EXPECT_EQ(sfixed64.value(), -1647993274);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 9u);
+ Result<float> flt = decoder.ReadFloat();
+ ASSERT_EQ(flt.status(), OkStatus());
+ EXPECT_EQ(flt.value(), 2.718f);
+
EXPECT_EQ(decoder.Next(), Status::OutOfRange());
}
@@ -761,5 +787,521 @@ TEST(StreamDecoder, Decode_WithLength_SkipsToEnd) {
EXPECT_EQ(reader.Tell(), 13u);
}
+TEST(StreamDecoder, RepeatedField) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=uint32, k=1, v=0
+ 0x08, 0x00,
+ // type=uint32, k=1, v=50
+ 0x08, 0x32,
+ // type=uint32, k=1, v=100
+ 0x08, 0x64,
+ // type=uint32, k=1, v=150
+ 0x08, 0x96, 0x01,
+ // type=uint32, k=1, v=200
+ 0x08, 0xc8, 0x01
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ Result<uint32_t> uint32 = decoder.ReadUint32();
+ ASSERT_EQ(uint32.status(), OkStatus());
+ EXPECT_EQ(uint32.value(), 0u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ uint32 = decoder.ReadUint32();
+ ASSERT_EQ(uint32.status(), OkStatus());
+ EXPECT_EQ(uint32.value(), 50u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ uint32 = decoder.ReadUint32();
+ ASSERT_EQ(uint32.status(), OkStatus());
+ EXPECT_EQ(uint32.value(), 100u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ uint32 = decoder.ReadUint32();
+ ASSERT_EQ(uint32.status(), OkStatus());
+ EXPECT_EQ(uint32.value(), 150u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ uint32 = decoder.ReadUint32();
+ ASSERT_EQ(uint32.status(), OkStatus());
+ EXPECT_EQ(uint32.value(), 200u);
+
+ EXPECT_EQ(decoder.Next(), Status::OutOfRange());
+}
+
+TEST(StreamDecoder, RepeatedFieldVector) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=uint32, k=1, v=0
+ 0x08, 0x00,
+ // type=uint32, k=1, v=50
+ 0x08, 0x32,
+ // type=uint32, k=1, v=100
+ 0x08, 0x64,
+ // type=uint32, k=1, v=150
+ 0x08, 0x96, 0x01,
+ // type=uint32, k=1, v=200
+ 0x08, 0xc8, 0x01
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ pw::Vector<uint32_t, 8> uint32{};
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ Status status = decoder.ReadRepeatedUint32(uint32);
+ ASSERT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32.size(), 1u);
+ EXPECT_EQ(uint32[0], 0u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ status = decoder.ReadRepeatedUint32(uint32);
+ ASSERT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32.size(), 2u);
+ EXPECT_EQ(uint32[1], 50u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ status = decoder.ReadRepeatedUint32(uint32);
+ ASSERT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32.size(), 3u);
+ EXPECT_EQ(uint32[2], 100u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ status = decoder.ReadRepeatedUint32(uint32);
+ ASSERT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32.size(), 4u);
+ EXPECT_EQ(uint32[3], 150u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ status = decoder.ReadRepeatedUint32(uint32);
+ ASSERT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32.size(), 5u);
+ EXPECT_EQ(uint32[4], 200u);
+
+ EXPECT_EQ(decoder.Next(), Status::OutOfRange());
+}
+
+TEST(StreamDecoder, RepeatedFieldVectorFull) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=uint32, k=1, v=0
+ 0x08, 0x00,
+ // type=uint32, k=1, v=50
+ 0x08, 0x32,
+ // type=uint32, k=1, v=100
+ 0x08, 0x64,
+ // type=uint32, k=1, v=150
+ 0x08, 0x96, 0x01,
+ // type=uint32, k=1, v=200
+ 0x08, 0xc8, 0x01
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ pw::Vector<uint32_t, 2> uint32{};
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ Status status = decoder.ReadRepeatedUint32(uint32);
+ ASSERT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32.size(), 1u);
+ EXPECT_EQ(uint32[0], 0u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ status = decoder.ReadRepeatedUint32(uint32);
+ ASSERT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32.size(), 2u);
+ EXPECT_EQ(uint32[1], 50u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ status = decoder.ReadRepeatedUint32(uint32);
+ ASSERT_EQ(status, Status::ResourceExhausted());
+ EXPECT_EQ(uint32.size(), 2u);
+}
+
+TEST(StreamDecoder, PackedVarint) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=uint32[], k=1, v={0, 50, 100, 150, 200}
+ 0x0a, 0x07,
+ 0x00,
+ 0x32,
+ 0x64,
+ 0x96, 0x01,
+ 0xc8, 0x01
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ std::array<uint32_t, 8> uint32{};
+ StatusWithSize size = decoder.ReadPackedUint32(uint32);
+ ASSERT_EQ(size.status(), OkStatus());
+ EXPECT_EQ(size.size(), 5u);
+
+ EXPECT_EQ(uint32[0], 0u);
+ EXPECT_EQ(uint32[1], 50u);
+ EXPECT_EQ(uint32[2], 100u);
+ EXPECT_EQ(uint32[3], 150u);
+ EXPECT_EQ(uint32[4], 200u);
+}
+
+TEST(StreamDecoder, PackedVarintInsufficientSpace) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=uint32[], k=1, v={0, 50, 100, 150, 200}
+ 0x0a, 0x07,
+ 0x00,
+ 0x32,
+ 0x64,
+ 0x96, 0x01,
+ 0xc8, 0x01
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ std::array<uint32_t, 2> uint32{};
+ StatusWithSize size = decoder.ReadPackedUint32(uint32);
+ ASSERT_EQ(size.status(), Status::ResourceExhausted());
+ EXPECT_EQ(size.size(), 2u);
+
+ // Still returns values in case of error.
+ EXPECT_EQ(uint32[0], 0u);
+ EXPECT_EQ(uint32[1], 50u);
+}
+
+TEST(StreamDecoder, PackedVarintVector) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=uint32[], k=1, v={0, 50, 100, 150, 200}
+ 0x0a, 0x07,
+ 0x00,
+ 0x32,
+ 0x64,
+ 0x96, 0x01,
+ 0xc8, 0x01
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ pw::Vector<uint32_t, 8> uint32{};
+ Status status = decoder.ReadRepeatedUint32(uint32);
+ ASSERT_EQ(status, OkStatus());
+ EXPECT_EQ(uint32.size(), 5u);
+
+ EXPECT_EQ(uint32[0], 0u);
+ EXPECT_EQ(uint32[1], 50u);
+ EXPECT_EQ(uint32[2], 100u);
+ EXPECT_EQ(uint32[3], 150u);
+ EXPECT_EQ(uint32[4], 200u);
+}
+
+TEST(StreamDecoder, PackedVarintVectorFull) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=uint32[], k=1, v={0, 50, 100, 150, 200}
+ 0x0a, 0x07,
+ 0x00,
+ 0x32,
+ 0x64,
+ 0x96, 0x01,
+ 0xc8, 0x01
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ pw::Vector<uint32_t, 2> uint32{};
+ Status status = decoder.ReadRepeatedUint32(uint32);
+ ASSERT_EQ(status, Status::ResourceExhausted());
+ EXPECT_EQ(uint32.size(), 2u);
+
+ // Still returns values in case of error.
+ EXPECT_EQ(uint32[0], 0u);
+ EXPECT_EQ(uint32[1], 50u);
+}
+
+TEST(StreamDecoder, PackedZigZag) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=sint32[], k=1, v={-100, -25, -1, 0, 1, 25, 100}
+ 0x0a, 0x09,
+ 0xc7, 0x01,
+ 0x31,
+ 0x01,
+ 0x00,
+ 0x02,
+ 0x32,
+ 0xc8, 0x01
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ std::array<int32_t, 8> sint32{};
+ StatusWithSize size = decoder.ReadPackedSint32(sint32);
+ ASSERT_EQ(size.status(), OkStatus());
+ EXPECT_EQ(size.size(), 7u);
+
+ EXPECT_EQ(sint32[0], -100);
+ EXPECT_EQ(sint32[1], -25);
+ EXPECT_EQ(sint32[2], -1);
+ EXPECT_EQ(sint32[3], 0);
+ EXPECT_EQ(sint32[4], 1);
+ EXPECT_EQ(sint32[5], 25);
+ EXPECT_EQ(sint32[6], 100);
+}
+
+TEST(StreamDecoder, PackedZigZagVector) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=sint32[], k=1, v={-100, -25, -1, 0, 1, 25, 100}
+ 0x0a, 0x09,
+ 0xc7, 0x01,
+ 0x31,
+ 0x01,
+ 0x00,
+ 0x02,
+ 0x32,
+ 0xc8, 0x01
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ pw::Vector<int32_t, 8> sint32{};
+ Status status = decoder.ReadRepeatedSint32(sint32);
+ ASSERT_EQ(status, OkStatus());
+ EXPECT_EQ(sint32.size(), 7u);
+
+ EXPECT_EQ(sint32[0], -100);
+ EXPECT_EQ(sint32[1], -25);
+ EXPECT_EQ(sint32[2], -1);
+ EXPECT_EQ(sint32[3], 0);
+ EXPECT_EQ(sint32[4], 1);
+ EXPECT_EQ(sint32[5], 25);
+ EXPECT_EQ(sint32[6], 100);
+}
+
+TEST(StreamDecoder, PackedFixed) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=fixed32[], k=1, v={0, 50, 100, 150, 200}
+ 0x0a, 0x14,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x32, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x00, 0x00,
+ 0x96, 0x00, 0x00, 0x00,
+ 0xc8, 0x00, 0x00, 0x00,
+ // type=fixed64[], v=2, v={0x0102030405060708}
+ 0x12, 0x08,
+ 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01,
+ // type=sfixed32[], k=3, v={0, -50, 100, -150, 200}
+ 0x1a, 0x14,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xce, 0xff, 0xff, 0xff,
+ 0x64, 0x00, 0x00, 0x00,
+ 0x6a, 0xff, 0xff, 0xff,
+ 0xc8, 0x00, 0x00, 0x00,
+ // type=sfixed64[], v=4, v={-1647993274}
+ 0x22, 0x08,
+ 0x46, 0x9e, 0xc5, 0x9d, 0xff, 0xff, 0xff, 0xff,
+ // type=double[], k=5, v=3.14159
+ 0x2a, 0x08,
+ 0x6e, 0x86, 0x1b, 0xf0, 0xf9, 0x21, 0x09, 0x40,
+ // type=float[], k=6, v=2.718
+ 0x32, 0x04,
+ 0xb6, 0xf3, 0x2d, 0x40,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ std::array<uint32_t, 8> fixed32{};
+ StatusWithSize size = decoder.ReadPackedFixed32(fixed32);
+ ASSERT_EQ(size.status(), OkStatus());
+ EXPECT_EQ(size.size(), 5u);
+
+ EXPECT_EQ(fixed32[0], 0u);
+ EXPECT_EQ(fixed32[1], 50u);
+ EXPECT_EQ(fixed32[2], 100u);
+ EXPECT_EQ(fixed32[3], 150u);
+ EXPECT_EQ(fixed32[4], 200u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 2u);
+ std::array<uint64_t, 8> fixed64{};
+ size = decoder.ReadPackedFixed64(fixed64);
+ ASSERT_EQ(size.status(), OkStatus());
+ EXPECT_EQ(size.size(), 1u);
+
+ EXPECT_EQ(fixed64[0], 0x0102030405060708u);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 3u);
+ std::array<int32_t, 8> sfixed32{};
+ size = decoder.ReadPackedSfixed32(sfixed32);
+ ASSERT_EQ(size.status(), OkStatus());
+ EXPECT_EQ(size.size(), 5u);
+
+ EXPECT_EQ(sfixed32[0], 0);
+ EXPECT_EQ(sfixed32[1], -50);
+ EXPECT_EQ(sfixed32[2], 100);
+ EXPECT_EQ(sfixed32[3], -150);
+ EXPECT_EQ(sfixed32[4], 200);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 4u);
+ std::array<int64_t, 8> sfixed64{};
+ size = decoder.ReadPackedSfixed64(sfixed64);
+ ASSERT_EQ(size.status(), OkStatus());
+ EXPECT_EQ(size.size(), 1u);
+
+ EXPECT_EQ(sfixed64[0], -1647993274);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 5u);
+ std::array<double, 8> dbl{};
+ size = decoder.ReadPackedDouble(dbl);
+ ASSERT_EQ(size.status(), OkStatus());
+ EXPECT_EQ(size.size(), 1u);
+
+ EXPECT_EQ(dbl[0], 3.14159);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 6u);
+ std::array<float, 8> flt{};
+ size = decoder.ReadPackedFloat(flt);
+ ASSERT_EQ(size.status(), OkStatus());
+ EXPECT_EQ(size.size(), 1u);
+
+ EXPECT_EQ(flt[0], 2.718f);
+
+ EXPECT_EQ(decoder.Next(), Status::OutOfRange());
+}
+
+TEST(StreamDecoder, PackedFixedInsufficientSpace) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=fixed32[], k=1, v={0, 50, 100, 150, 200}
+ 0x0a, 0x14,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x32, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x00, 0x00,
+ 0x96, 0x00, 0x00, 0x00,
+ 0xc8, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ std::array<uint32_t, 2> fixed32{};
+ StatusWithSize size = decoder.ReadPackedFixed32(fixed32);
+ ASSERT_EQ(size.status(), Status::ResourceExhausted());
+}
+
+TEST(StreamDecoder, PackedFixedVector) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=sfixed32[], k=1, v={0, -50, 100, -150, 200}
+ 0x0a, 0x14,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xce, 0xff, 0xff, 0xff,
+ 0x64, 0x00, 0x00, 0x00,
+ 0x6a, 0xff, 0xff, 0xff,
+ 0xc8, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ pw::Vector<int32_t, 8> sfixed32{};
+ Status status = decoder.ReadRepeatedSfixed32(sfixed32);
+ ASSERT_EQ(status, OkStatus());
+ EXPECT_EQ(sfixed32.size(), 5u);
+
+ EXPECT_EQ(sfixed32[0], 0);
+ EXPECT_EQ(sfixed32[1], -50);
+ EXPECT_EQ(sfixed32[2], 100);
+ EXPECT_EQ(sfixed32[3], -150);
+ EXPECT_EQ(sfixed32[4], 200);
+
+ EXPECT_EQ(decoder.Next(), Status::OutOfRange());
+}
+
+TEST(StreamDecoder, PackedFixedVectorFull) {
+ // clang-format off
+ constexpr uint8_t encoded_proto[] = {
+ // type=sfixed32[], k=1, v={0, -50, 100, -150, 200}
+ 0x0a, 0x14,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xce, 0xff, 0xff, 0xff,
+ 0x64, 0x00, 0x00, 0x00,
+ 0x6a, 0xff, 0xff, 0xff,
+ 0xc8, 0x00, 0x00, 0x00,
+ };
+ // clang-format on
+
+ stream::MemoryReader reader(std::as_bytes(std::span(encoded_proto)));
+ StreamDecoder decoder(reader);
+
+ EXPECT_EQ(decoder.Next(), OkStatus());
+ ASSERT_EQ(decoder.FieldNumber().value(), 1u);
+ pw::Vector<int32_t, 2> sfixed32{};
+ Status status = decoder.ReadRepeatedSfixed32(sfixed32);
+ ASSERT_EQ(status, Status::ResourceExhausted());
+ EXPECT_EQ(sfixed32.size(), 0u);
+}
+
} // namespace
} // namespace pw::protobuf
diff --git a/pw_protobuf_compiler/BUILD.bazel b/pw_protobuf_compiler/BUILD.bazel
index a96bed43c..045285e84 100644
--- a/pw_protobuf_compiler/BUILD.bazel
+++ b/pw_protobuf_compiler/BUILD.bazel
@@ -32,18 +32,11 @@ py_proto_library(
],
)
-filegroup(
- name = "protos",
- srcs = [
- "pw_protobuf_compiler_protos/nested/more_nesting/test.proto",
- "pw_protobuf_compiler_protos/test.proto",
- ],
-)
-
proto_library(
name = "test_protos",
srcs = [
- ":protos",
+ "pw_protobuf_compiler_protos/nested/more_nesting/test.proto",
+ "pw_protobuf_compiler_protos/test.proto",
],
)
diff --git a/pw_protobuf_compiler/docs.rst b/pw_protobuf_compiler/docs.rst
index 9362ca1bd..31160e865 100644
--- a/pw_protobuf_compiler/docs.rst
+++ b/pw_protobuf_compiler/docs.rst
@@ -375,13 +375,52 @@ compile them. e.g.
deps = [":my_proto"],
)
- # Library that depends on generated proto targets.
+ # Library that depends on only pw_protobuf generated proto targets.
+ pw_cc_library(
+ name = "my_proto_only_lib",
+ srcs = ["my/proto_only.cc"],
+ deps = [":my_cc_proto.pwpb"],
+ )
+
+ # Library that depends on only Nanopb generated proto targets.
+ pw_cc_library(
+ name = "my_nanopb_only_lib",
+ srcs = ["my/nanopb_only.cc"],
+ deps = [":my_cc_proto.nanopb"],
+ )
+
+ # Library that depends on pw_protobuf and pw_rpc/raw.
+ pw_cc_library(
+ name = "my_raw_rpc_lib",
+ srcs = ["my/raw_rpc.cc"],
+ deps = [
+ ":my_cc_proto.pwpb",
+ ":my_cc_proto.raw_rpc",
+ ],
+ )
+ pw_cc_library(
+ name = "my_nanopb_rpc_lib",
+ srcs = ["my/proto_only.cc"],
+ deps = [
+ ":my_cc_proto.nanopb_rpc",
+ ],
+ )
+
+
+ # Library that depends on generated proto targets. Prefer to depend only on
+ # those generated targets ("my_lib.pwpb", "my_lib.nanopb") that are actually
+ # required. Note that the .nanopb target may not compile for some proto
+ # messages, e.g. self-referring messages;
+ # see https://github.com/nanopb/nanopb/issues/433.
pw_cc_library(
name = "my_lib",
srcs = ["my/lib.cc"],
+ # This target depends on all generated proto targets
+ # e.g. name.{pwpb, nanopb, raw_rpc, nanopb_rpc}
deps = [":my_cc_proto"],
)
+
From ``my/lib.cc`` you can now include the generated headers.
e.g.
@@ -390,10 +429,10 @@ e.g.
#include "my_protos/bar.pwpb.h"
// and/or RPC headers
#include "my_protos/bar.raw_rpc.pb.h
+ // or
+ #include "my_protos/bar.nanopb_rpc.pb.h"
-.. note::
- Currently only raw RPC is supported by the Bazel build.
**Supported Codegen**
@@ -401,5 +440,8 @@ Bazel supports the following compiled proto libraries via the specified
sub-targets generated by a ``pw_proto_library``.
* ``${NAME}.pwpb`` - Generated C++ pw_protobuf code
+* ``${NAME}.nanopb`` - Generated C++ nanopb code
* ``${NAME}.raw_rpc`` - Generated C++ raw pw_rpc code (no protobuf library)
+* ``${NAME}.nanopb_rpc`` - Generated C++ Nanopb pw_rpc code
+
diff --git a/pw_protobuf_compiler/proto.bzl b/pw_protobuf_compiler/proto.bzl
index 37ac82d69..effe3de70 100644
--- a/pw_protobuf_compiler/proto.bzl
+++ b/pw_protobuf_compiler/proto.bzl
@@ -1,4 +1,4 @@
-# Copyright 2021 The Pigweed Authors
+# Copyright 2022 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -13,94 +13,10 @@
# the License.
"""Embedded-friendly replacement for native.cc_proto_library."""
-load("//pw_build:pigweed.bzl", "pw_cc_library")
-load("@rules_proto//proto:defs.bzl", "ProtoInfo")
load(
- "@rules_proto_grpc//:defs.bzl",
- "ProtoLibraryAspectNodeInfo",
- "ProtoPluginInfo",
- "proto_compile_aspect_attrs",
- "proto_compile_aspect_impl",
- "proto_compile_attrs",
- "proto_compile_impl",
+ "//third_party/rules_proto_grpc:internal_proto.bzl",
+ _pw_proto_library = "pw_proto_library",
)
-# Create aspect for cc_proto_compile
-cc_proto_compile_aspect = aspect(
- implementation = proto_compile_aspect_impl,
- provides = [ProtoLibraryAspectNodeInfo],
- attr_aspects = ["deps"],
- attrs = dict(
- proto_compile_aspect_attrs,
- _plugins = attr.label_list(
- doc = "List of protoc plugins to apply",
- providers = [ProtoPluginInfo],
- default = [
- Label("@pigweed//pw_protobuf:pw_cc_plugin"),
- Label("@pigweed//pw_rpc:pw_cc_plugin"),
- ],
- ),
- _prefix = attr.string(
- doc = "String used to disambiguate aspects when generating outputs",
- default = "cc_proto_compile_aspect",
- ),
- ),
- toolchains = [str(Label("@rules_proto_grpc//protobuf:toolchain_type"))],
-)
-
-# Create compile rule to apply aspect
-_rule = rule(
- implementation = proto_compile_impl,
- attrs = dict(
- proto_compile_attrs,
- deps = attr.label_list(
- mandatory = True,
- providers = [ProtoInfo, ProtoLibraryAspectNodeInfo],
- aspects = [cc_proto_compile_aspect],
- ),
- protos = attr.label_list(
- providers = [ProtoInfo],
- doc = "List of proto_library targets.",
- ),
- ),
-)
-
-# Create macro for converting attrs and passing to compile
-def _cc_proto_compile(**kwargs):
- _rule(
- verbose_string = "{}".format(kwargs.get("verbose", 0)),
- **kwargs
- )
-
-def pw_proto_library(**kwargs):
- """ Embedded friendly replacement for native.cc_proto_library
-
- This Protobuf implementation is designed to run on embedded
- computers. Because of this the cc API differs from the standard
- Protobuf cc plugin. The generated headers in this library are not a drop in
- replacement for the standard cc_proto_library.
-
- Args:
- **kwargs: Equivalent inputs to cc_proto_library
- """
-
- # Compile protos
- name_pb = kwargs.get("name") + "_pb"
- _cc_proto_compile(
- name = name_pb,
- # Forward deps and verbose tags to implementation
- **{k: v for (k, v) in kwargs.items() if k in ("deps", "verbose")}
- )
-
- # Create cc_library
- pw_cc_library(
- name = kwargs.get("name"),
- srcs = [name_pb],
- deps = [
- "@pigweed//pw_protobuf",
- ],
- includes = [name_pb],
- strip_include_prefix = ".",
- visibility = kwargs.get("visibility"),
- linkstatic = 1,
- )
+# Export internal symbols.
+pw_proto_library = _pw_proto_library
diff --git a/pw_protobuf_compiler/proto.gni b/pw_protobuf_compiler/proto.gni
index 0a3d8e9eb..e6fd4a8b6 100644
--- a/pw_protobuf_compiler/proto.gni
+++ b/pw_protobuf_compiler/proto.gni
@@ -154,7 +154,13 @@ template("_pw_pwpb_proto_library") {
forward_variables_from(invoker, _forwarded_vars)
public_configs = [ ":$target_name._include_path" ]
deps = [ ":$target_name._gen($pw_protobuf_compiler_TOOLCHAIN)" ]
- public_deps = [ dir_pw_protobuf ] + invoker.deps
+ public_deps = [
+ "$dir_pw_containers:vector",
+ dir_pw_assert,
+ dir_pw_protobuf,
+ dir_pw_result,
+ dir_pw_status,
+ ] + invoker.deps
sources = invoker.outputs
public = filter_include(sources, [ "*.pwpb.h" ])
}
diff --git a/pw_protobuf_compiler/pw_nanopb_cc_library.bzl b/pw_protobuf_compiler/pw_nanopb_cc_library.bzl
new file mode 100644
index 000000000..a5ae2d26a
--- /dev/null
+++ b/pw_protobuf_compiler/pw_nanopb_cc_library.bzl
@@ -0,0 +1,37 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""WORK IN PROGRESS!
+
+Nanopb C++ library generating targets.
+"""
+
+# TODO(pwbug/621) Enable unused variable check.
+# buildifier: disable=unused-variable
+def pw_nanopb_cc_library(
+ name,
+ deps,
+ options = None,
+ **kwargs):
+ """Generates the nanopb C++ library.
+
+ deps: proto_library targets to convert using nanopb.
+ options: Path to the nanopb .options file. See
+ https://jpa.kapsi.fi/nanopb/docs/reference.html#proto-file-options
+ for the syntax.
+ """
+
+ # TODO(tpudlik): Implement this rule. Just a placeholder for now.
+ native.cc_library(
+ name = name,
+ )
diff --git a/pw_protobuf_compiler/pw_proto_library.bzl b/pw_protobuf_compiler/pw_proto_library.bzl
new file mode 100644
index 000000000..f9a304846
--- /dev/null
+++ b/pw_protobuf_compiler/pw_proto_library.bzl
@@ -0,0 +1,289 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""WORK IN PROGRESS!
+
+This is intended to be a replacement for the proto codegen in proto.bzl, which
+relies on the transitive proto compilation support removed from newer versions
+of rules_proto_grpc. However, the version checked in here does not yet support,
+
+1. Proto libraries with dependencies in external repositories.
+2. Proto libraries with strip_import_prefix or import_prefix attributes.
+
+In addition, nanopb proto files are not yet generated.
+
+TODO(pwbug/621): Close these gaps and start using this implementation.
+
+# Overview of implementation
+
+(If you just want to use pw_proto_library, see its docstring; this section is
+intended to orient future maintainers.)
+
+Proto code generation is carried out by the _pw_proto_library,
+_pw_raw_rpc_proto_library and _pw_nanopb_rpc_proto_library rules using aspects
+(https://docs.bazel.build/versions/main/skylark/aspects.html). A
+_pw_proto_library has a single proto_library as a dependency, but that
+proto_library may depend on other proto_library targets; as a result, the
+generated .pwpb.h file #include's .pwpb.h files generated from the dependency
+proto_libraries. The aspect propagates along the proto_library dependency
+graph, running the proto compiler on each proto_library in the original
+target's transitive dependencies, ensuring that we're not missing any .pwpb.h
+files at C++ compile time.
+
+Although we have a separate rule for each protocol compiler plugin
+(_pw_proto_library, _pw_raw_rpc_proto_library, _pw_nanopb_rpc_proto_library),
+they actually share an implementation (_pw _impl_pw_proto_library) and use
+similar aspects, all generated by _proto_compiler_aspect. The only difference
+between the rules are captured in the PIGWEED_PLUGIN dictonary and the aspect
+instantiations (_pw_proto_compiler_aspect, etc).
+
+"""
+
+load("//pw_build:pigweed.bzl", "pw_cc_library")
+load("@rules_proto//proto:defs.bzl", "ProtoInfo")
+load("//pw_protobuf_compiler:pw_nanopb_cc_library", "pw_nanopb_cc_library")
+
+def pw_proto_library(name = "", deps = [], nanopb_options = None):
+ """Generate Pigweed proto C++ code.
+
+ This is the only public symbol in this file: everything else is
+ implementation details.
+
+ Args:
+ name: The name of the target.
+ deps: proto_library targets from which to generate Pigweed C++.
+ nanopb_options: path to file containing nanopb options, if any
+ (https://jpa.kapsi.fi/nanopb/docs/reference.html#proto-file-options).
+
+ Example usage:
+
+ proto_library(
+ name = "benchmark_proto",
+ srcs = [
+ "benchmark.proto",
+ ],
+ )
+
+ pw_proto_library(
+ name = "benchmark_pw_proto",
+ deps = [":benchmark_proto"],
+ )
+
+ pw_cc_binary(
+ name = "proto_user",
+ srcs = ["proto_user.cc"],
+ deps = [":benchmark_pw_proto.pwpb"],
+ )
+
+ The pw_proto_library generates the following targets in this example:
+
+ "benchmark_pw_proto.pwpb": C++ library exposing the "benchmark.pwpb.h" header.
+ "benchmark_pw_proto.raw_rpc": C++ library exposing the "benchmark.raw_rpc.h"
+ header.
+ "benchmark_pw_proto.nanopb": C++ library exposing the "benchmark.pb.h"
+ header.
+ "benchmark_pw_proto.nanopb_rpc": C++ library exposing the
+ "benchmark.rpc.pb.h" header.
+ """
+
+ # Use nanopb to generate the pb.h and pb.c files, and the target exposing
+ # them.
+ pw_nanopb_cc_library(name + ".nanopb", deps, options = nanopb_options)
+
+ # Use Pigweed proto plugins to generate the remaining files and targets.
+ for plugin_name, info in PIGWEED_PLUGIN.items():
+ name_pb = name + "_pb." + plugin_name
+ info["compiler"](
+ name = name_pb,
+ deps = deps,
+ )
+
+ # The rpc.pb.h header depends on the generated nanopb code.
+ if info["include_nanopb_dep"]:
+ lib_deps = info["deps"] + [":" + name + ".nanopb"]
+ else:
+ lib_deps = info["deps"]
+
+ pw_cc_library(
+ name = name + "." + plugin_name,
+ hdrs = [name_pb],
+ deps = lib_deps,
+ linkstatic = 1,
+ )
+
+PwProtoInfo = provider(
+ "Returned by PW proto compilation aspect",
+ fields = {
+ "genfiles": "generated C++ header files",
+ },
+)
+
+def _get_short_path(source):
+ return source.short_path
+
+def _get_path(file):
+ return file.path
+
+def _proto_compiler_aspect_impl(target, ctx):
+ # List the files we will generate for this proto_library target.
+ genfiles = []
+ for src in target[ProtoInfo].direct_sources:
+ path = src.basename[:-len("proto")] + ctx.attr._extension
+ genfiles.append(ctx.actions.declare_file(path, sibling = src))
+
+ args = ctx.actions.args()
+ args.add("--plugin=protoc-gen-pwpb={}".format(ctx.executable._protoc_plugin.path))
+ args.add("--pwpb_out={}".format(ctx.bin_dir.path))
+ args.add_joined(
+ "--descriptor_set_in",
+ target[ProtoInfo].transitive_descriptor_sets,
+ join_with = ctx.host_configuration.host_path_separator,
+ map_each = _get_path,
+ )
+ args.add_all(target[ProtoInfo].direct_sources, map_each = _get_short_path)
+
+ ctx.actions.run(
+ inputs = depset(target[ProtoInfo].transitive_sources.to_list(), transitive = [target[ProtoInfo].transitive_descriptor_sets]),
+ progress_message = "Generating %s C++ files for %s" % (ctx.attr._extension, ctx.label.name),
+ tools = [ctx.executable._protoc_plugin],
+ outputs = genfiles,
+ executable = ctx.executable._protoc,
+ arguments = [args],
+ )
+
+ transitive_genfiles = genfiles
+ for dep in ctx.rule.attr.deps:
+ transitive_genfiles += dep[PwProtoInfo].genfiles
+ return [PwProtoInfo(genfiles = transitive_genfiles)]
+
+def _proto_compiler_aspect(extension, protoc_plugin):
+ """Returns an aspect that runs the proto compiler.
+
+ The aspect propagates through the deps of proto_library targets, running
+ the proto compiler with the specified plugin for each of their source
+ files. The proto compiler is assumed to produce one output file per input
+ .proto file. That file is placed under bazel-bin at the same path as the
+ input file, but with the specified extension (i.e., with _extension =
+ .pwpb.h, the aspect converts pw_log/log.proto into
+ bazel-bin/pw_log/log.pwpb.h).
+
+ The aspect returns a provider exposing all the File objects generated from
+ the dependency graph.
+ """
+ return aspect(
+ attr_aspects = ["deps"],
+ attrs = {
+ "_extension": attr.string(default = extension),
+ "_protoc": attr.label(
+ default = Label("@com_google_protobuf//:protoc"),
+ executable = True,
+ cfg = "host",
+ ),
+ "_protoc_plugin": attr.label(
+ default = Label(protoc_plugin),
+ executable = True,
+ cfg = "host",
+ ),
+ },
+ implementation = _proto_compiler_aspect_impl,
+ )
+
+def _impl_pw_proto_library(ctx):
+ """Implementation of the proto codegen rule.
+
+ The work of actually generating the code is done by the aspect, so here we
+ just gather up all the generated files and return them.
+ """
+
+ # Note that we don't distinguish between the files generated from the
+ # target, and the files generated from its dependencies. We return all of
+ # them together, and in pw_proto_library expose all of them as hdrs.
+ # Pigweed's plugins happen to only generate .h files, so this works, but
+ # strictly speaking we should expose only the files generated from the
+ # target itself in hdrs, and place the headers generated from dependencies
+ # in srcs. We don't perform layering_check in Pigweed, so this is not a big
+ # deal.
+ #
+ # TODO(pwbug/621): Tidy this up.
+ all_genfiles = []
+ for dep in ctx.attr.deps:
+ for f in dep[PwProtoInfo].genfiles:
+ all_genfiles.append(f)
+
+ return [DefaultInfo(files = depset(all_genfiles))]
+
+# Instantiate the aspects and rules for generating code using specific plugins.
+_pw_proto_compiler_aspect = _proto_compiler_aspect("pwpb.h", "//pw_protobuf/py:plugin")
+
+_pw_proto_library = rule(
+ implementation = _impl_pw_proto_library,
+ attrs = {
+ "deps": attr.label_list(
+ providers = [ProtoInfo],
+ aspects = [_pw_proto_compiler_aspect],
+ ),
+ },
+)
+
+_pw_raw_rpc_proto_compiler_aspect = _proto_compiler_aspect("raw_rpc.pb.h", "//pw_rpc/py:plugin_raw")
+
+_pw_raw_rpc_proto_library = rule(
+ implementation = _impl_pw_proto_library,
+ attrs = {
+ "deps": attr.label_list(
+ providers = [ProtoInfo],
+ aspects = [_pw_raw_rpc_proto_compiler_aspect],
+ ),
+ },
+)
+
+_pw_nanopb_rpc_proto_compiler_aspect = _proto_compiler_aspect("rpc.pb.h", "//pw_rpc/py:plugin_nanopb")
+
+_pw_nanopb_rpc_proto_library = rule(
+ implementation = _impl_pw_proto_library,
+ attrs = {
+ "deps": attr.label_list(
+ providers = [ProtoInfo],
+ aspects = [_pw_nanopb_rpc_proto_compiler_aspect],
+ ),
+ },
+)
+
+PIGWEED_PLUGIN = {
+ "pwpb": {
+ "compiler": _pw_proto_library,
+ "deps": [
+ "//pw_span",
+ "//pw_protobuf:pw_protobuf",
+ ],
+ "include_nanopb_dep": False,
+ },
+ "raw_rpc": {
+ "compiler": _pw_raw_rpc_proto_library,
+ "deps": [
+ "//pw_rpc",
+ "//pw_rpc/raw:client_api",
+ "//pw_rpc/raw:server_api",
+ ],
+ "include_nanopb_dep": False,
+ },
+ "nanopb_rpc": {
+ "compiler": _pw_nanopb_rpc_proto_library,
+ "deps": [
+ "//pw_rpc",
+ "//pw_rpc/nanopb:client_api",
+ "//pw_rpc/nanopb:server_api",
+ ],
+ "include_nanopb_dep": True,
+ },
+}
diff --git a/pw_protobuf_compiler/ts/BUILD.bazel b/pw_protobuf_compiler/ts/BUILD.bazel
index 5e995915e..020da8fe4 100644
--- a/pw_protobuf_compiler/ts/BUILD.bazel
+++ b/pw_protobuf_compiler/ts/BUILD.bazel
@@ -56,7 +56,7 @@ ts_library(
deps = [
":test_proto_collection",
"//pw_protobuf_compiler:test_protos_tspb",
- "//pw_rpc:packet_proto_tspb",
+ "//pw_rpc/ts:packet_proto_tspb",
"@npm//@types/google-protobuf",
"@npm//@types/jasmine",
],
diff --git a/pw_protobuf_compiler/ts/codegen/BUILD.bazel b/pw_protobuf_compiler/ts/codegen/BUILD.bazel
index 80c0e7e84..f1959cfcf 100644
--- a/pw_protobuf_compiler/ts/codegen/BUILD.bazel
+++ b/pw_protobuf_compiler/ts/codegen/BUILD.bazel
@@ -23,7 +23,7 @@ ts_library(
"template_replacement.ts",
],
deps = [
- "@//pw_rpc:packet_proto_tspb",
+ "@//pw_rpc/ts:packet_proto_tspb",
"@npm//@types/argparse",
"@npm//@types/google-protobuf",
"@npm//@types/node",
diff --git a/pw_protobuf_compiler/ts/ts_proto_collection.bzl b/pw_protobuf_compiler/ts/ts_proto_collection.bzl
index fe57e28c5..fe9af959e 100644
--- a/pw_protobuf_compiler/ts/ts_proto_collection.bzl
+++ b/pw_protobuf_compiler/ts/ts_proto_collection.bzl
@@ -37,7 +37,7 @@ def _lib(name, proto_library, js_proto_library):
deps = [
js_proto_library,
"@//pw_protobuf_compiler/ts:pw_protobuf_compiler",
- "@//pw_rpc:packet_proto_tspb",
+ "@//pw_rpc/ts:packet_proto_tspb",
"@npm//@types/google-protobuf",
"@npm//@types/node",
"@npm//base64-js",
diff --git a/pw_result/BUILD.bazel b/pw_result/BUILD.bazel
index 867bf7b10..e5b05c2b2 100644
--- a/pw_result/BUILD.bazel
+++ b/pw_result/BUILD.bazel
@@ -24,9 +24,8 @@ licenses(["notice"])
pw_cc_library(
name = "pw_result",
- hdrs = [
- "public/pw_result/result.h",
- ],
+ srcs = ["public/pw_result/internal/result_internal.h"],
+ hdrs = ["public/pw_result/result.h"],
includes = ["public"],
deps = [
"//pw_assert:facade",
@@ -43,3 +42,13 @@ pw_cc_test(
"//pw_unit_test",
],
)
+
+pw_cc_test(
+ name = "statusor_test",
+ srcs = ["statusor_test.cc"],
+ deps = [
+ ":pw_result",
+ "//pw_status",
+ "//pw_unit_test",
+ ],
+)
diff --git a/pw_result/BUILD.gn b/pw_result/BUILD.gn
index aa09d7520..1fac1ad03 100644
--- a/pw_result/BUILD.gn
+++ b/pw_result/BUILD.gn
@@ -31,6 +31,7 @@ pw_source_set("pw_result") {
"$dir_pw_status",
]
public = [ "public/pw_result/result.h" ]
+ sources = [ "public/pw_result/internal/result_internal.h" ]
}
pw_test_group("tests") {
@@ -42,7 +43,10 @@ pw_test("result_test") {
":pw_result",
dir_pw_status,
]
- sources = [ "result_test.cc" ]
+ sources = [
+ "result_test.cc",
+ "statusor_test.cc",
+ ]
}
pw_doc_group("docs") {
diff --git a/pw_result/docs.rst b/pw_result/docs.rst
index 223e6b2c8..09f9e9852 100644
--- a/pw_result/docs.rst
+++ b/pw_result/docs.rst
@@ -1,11 +1,46 @@
.. _module-pw_result:
----------
+=========
pw_result
----------
-``pw::Result`` is a convenient wrapper around returning a Status along side some
-data when the status is OK. This is meant for returning lightweight result
-types or references to larger results.
+=========
+``pw::Result<T>`` is a class template for use in returning either a
+``pw::Status`` error or an object of type ``T``.
+
+.. inclusive-language: disable
+
+``pw::Result<T>``'s implementation is closely based on Abseil's `StatusOr<T>
+class <https://github.com/abseil/abseil-cpp/blob/master/absl/status/statusor.h>`_.
+There are a few differences:
+
+.. inclusive-language: enable
+
+* ``pw::Result<T>`` uses ``pw::Status``, which is much less sophisticated than
+ ``absl::Status``.
+* ``pw::Result<T>``'s functions are ``constexpr`` and ``pw::Result<T>`` may be
+ used in ``constexpr`` statements if ``T`` is trivially destructible.
+
+-----
+Usage
+-----
+Usage of ``pw::Result<T>`` is identical to Abseil's ``absl::StatusOr<T>``.
+See Abseil's `documentation
+<https://abseil.io/docs/cpp/guides/status#returning-a-status-or-a-value>`_ and
+`usage tips <https://abseil.io/tips/181>`_ for guidance.
+
+``pw::Result<T>`` is returned from a function that may return ``pw::OkStatus()``
+and a value or an error status and no value. If ``ok()`` is true, the
+``pw::Result<T>`` contains a valid ``T``. Otherwise, it does not contain a ``T``
+and attempting to access the value is an error.
+
+``pw::Result<T>`` can be used to directly access the contained type:
+
+.. code-block:: cpp
+
+ #include "pw_result/result.h"
+
+ if (pw::Result<Foo> foo = TryCreateFoo(); foo.ok()) {
+ foo->DoBar();
+ }
``pw::Result`` is compatible with ``PW_TRY`` and ``PW_TRY_ASSIGN``, for example:
@@ -44,28 +79,6 @@ types or references to larger results.
return pw::OkStatus();
}
-``pw::Result`` can be used to directly access the contained type:
-
-.. code-block:: cpp
-
- #include "pw_result/result.h"
-
- pw::Result<Foo> foo = TryCreateFoo();
- if (foo.ok()) {
- foo->DoBar();
- }
-
-or in C++17:
-
-.. code-block:: cpp
-
- if (pw::Result<Foo> foo = TryCreateFoo(); foo.ok()) {
- foo->DoBar();
- }
-
-See `Abseil's StatusOr <https://abseil.io/tips/181>`_ for guidance on using a
-similar type.
-
.. warning::
Be careful not to use larger types by value as this can quickly consume
@@ -76,12 +89,9 @@ similar type.
This module is experimental. Its impact on code size and stack usage has not
yet been profiled. Use at your own risk.
-Compatibility
-=============
-Works with C++14, but some features require C++17.
-
+-----------
Size report
-===========
+-----------
The table below showcases the difference in size between functions returning a
Status with an output pointer, and functions returning a Result, in various
situations.
@@ -92,7 +102,8 @@ check if Result is suitable for you.
.. include:: result_size
+------
Zephyr
-======
+------
To enable ``pw_result`` for Zephyr add ``CONFIG_PIGWEED_RESULT=y`` to the
project's configuration.
diff --git a/pw_result/public/pw_result/internal/result_internal.h b/pw_result/public/pw_result/internal/result_internal.h
new file mode 100644
index 000000000..895bfd934
--- /dev/null
+++ b/pw_result/public/pw_result/internal/result_internal.h
@@ -0,0 +1,379 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <type_traits>
+#include <utility>
+
+#include "pw_assert/assert.h"
+#include "pw_status/status.h"
+
+namespace pw {
+
+template <typename T>
+class [[nodiscard]] Result;
+
+namespace internal_result {
+
+// Detects whether `U` has conversion operator to `Result<T>`, i.e. `operator
+// Result<T>()`.
+template <typename T, typename U, typename = void>
+struct HasConversionOperatorToResult : std::false_type {};
+
+template <typename T, typename U>
+void test(char (*)[sizeof(std::declval<U>().operator Result<T>())]);
+
+template <typename T, typename U>
+struct HasConversionOperatorToResult<T, U, decltype(test<T, U>(0))>
+ : std::true_type {};
+
+// Detects whether `T` is constructible or convertible from `Result<U>`.
+template <typename T, typename U>
+using IsConstructibleOrConvertibleFromResult =
+ std::disjunction<std::is_constructible<T, Result<U>&>,
+ std::is_constructible<T, const Result<U>&>,
+ std::is_constructible<T, Result<U>&&>,
+ std::is_constructible<T, const Result<U>&&>,
+ std::is_convertible<Result<U>&, T>,
+ std::is_convertible<const Result<U>&, T>,
+ std::is_convertible<Result<U>&&, T>,
+ std::is_convertible<const Result<U>&&, T>>;
+
+// Detects whether `T` is constructible or convertible or assignable from
+// `Result<U>`.
+template <typename T, typename U>
+using IsConstructibleOrConvertibleOrAssignableFromResult =
+ std::disjunction<IsConstructibleOrConvertibleFromResult<T, U>,
+ std::is_assignable<T&, Result<U>&>,
+ std::is_assignable<T&, const Result<U>&>,
+ std::is_assignable<T&, Result<U>&&>,
+ std::is_assignable<T&, const Result<U>&&>>;
+
+// Detects whether direct initializing `Result<T>` from `U` is ambiguous, i.e.
+// when `U` is `Result<V>` and `T` is constructible or convertible from `V`.
+template <typename T, typename U>
+struct IsDirectInitializationAmbiguous
+ : public std::conditional_t<
+ std::is_same<std::remove_cv_t<std::remove_reference_t<U>>, U>::value,
+ std::false_type,
+ IsDirectInitializationAmbiguous<
+ T,
+ std::remove_cv_t<std::remove_reference_t<U>>>> {};
+
+template <typename T, typename V>
+struct IsDirectInitializationAmbiguous<T, Result<V>>
+ : public IsConstructibleOrConvertibleFromResult<T, V> {};
+
+// Checks against the constraints of the direction initialization, i.e. when
+// `Result<T>::Result(U&&)` should participate in overload resolution.
+template <typename T, typename U>
+using IsDirectInitializationValid = std::disjunction<
+ // Short circuits if T is basically U.
+ std::is_same<T, std::remove_cv_t<std::remove_reference_t<U>>>,
+ std::negation<std::disjunction<
+ std::is_same<Result<T>, std::remove_cv_t<std::remove_reference_t<U>>>,
+ std::is_same<Status, std::remove_cv_t<std::remove_reference_t<U>>>,
+ std::is_same<std::in_place_t,
+ std::remove_cv_t<std::remove_reference_t<U>>>,
+ IsDirectInitializationAmbiguous<T, U>>>>;
+
+// This trait detects whether `Result<T>::operator=(U&&)` is ambiguous, which
+// is equivalent to whether all the following conditions are met:
+// 1. `U` is `Result<V>`.
+// 2. `T` is constructible and assignable from `V`.
+// 3. `T` is constructible and assignable from `U` (i.e. `Result<V>`).
+// For example, the following code is considered ambiguous:
+// (`T` is `bool`, `U` is `Result<bool>`, `V` is `bool`)
+// Result<bool> s1 = true; // s1.ok() && s1.ValueOrDie() == true
+// Result<bool> s2 = false; // s2.ok() && s2.ValueOrDie() == false
+// s1 = s2; // ambiguous, `s1 = s2.ValueOrDie()` or `s1 = bool(s2)`?
+template <typename T, typename U>
+struct IsForwardingAssignmentAmbiguous
+ : public std::conditional_t<
+ std::is_same<std::remove_cv_t<std::remove_reference_t<U>>, U>::value,
+ std::false_type,
+ IsForwardingAssignmentAmbiguous<
+ T,
+ std::remove_cv_t<std::remove_reference_t<U>>>> {};
+
+template <typename T, typename U>
+struct IsForwardingAssignmentAmbiguous<T, Result<U>>
+ : public IsConstructibleOrConvertibleOrAssignableFromResult<T, U> {};
+
+// Checks against the constraints of the forwarding assignment, i.e. whether
+// `Result<T>::operator(U&&)` should participate in overload resolution.
+template <typename T, typename U>
+using IsForwardingAssignmentValid = std::disjunction<
+ // Short circuits if T is basically U.
+ std::is_same<T, std::remove_cv_t<std::remove_reference_t<U>>>,
+ std::negation<std::disjunction<
+ std::is_same<Result<T>, std::remove_cv_t<std::remove_reference_t<U>>>,
+ std::is_same<Status, std::remove_cv_t<std::remove_reference_t<U>>>,
+ std::is_same<std::in_place_t,
+ std::remove_cv_t<std::remove_reference_t<U>>>,
+ IsForwardingAssignmentAmbiguous<T, U>>>>;
+
+PW_MODIFY_DIAGNOSTICS_PUSH();
+PW_MODIFY_DIAGNOSTIC_GCC(ignored, "-Wmaybe-uninitialized");
+
+// Construct an instance of T in `p` through placement new, passing Args... to
+// the constructor.
+// This abstraction is here mostly for the gcc performance fix.
+template <typename T, typename... Args>
+void PlacementNew(void* p, Args&&... args) {
+ new (p) T(std::forward<Args>(args)...);
+}
+
+// Helper base class to hold the data and all operations.
+// We move all this to a base class to allow mixing with the appropriate
+// TraitsBase specialization.
+//
+// Pigweed addition: Specialize StatusOrData for trivially destructible types.
+// This makes a Result usable in a constexpr statement.
+//
+// Note: in C++20, this entire file can be greatly simplfied with the requires
+// statement.
+template <typename T, bool = std::is_trivially_destructible<T>::value>
+class StatusOrData;
+
+// Place the implementation of StatusOrData in a macro so it can be shared
+// between both specializations.
+#define PW_RESULT_STATUS_OR_DATA_IMPL \
+ template <typename U, bool> \
+ friend class StatusOrData; \
+ \
+ public: \
+ StatusOrData() = delete; \
+ \
+ constexpr StatusOrData(const StatusOrData& other) \
+ : status_(other.status_), unused_() { \
+ if (other.ok()) { \
+ MakeValue(other.data_); \
+ } \
+ } \
+ \
+ constexpr StatusOrData(StatusOrData&& other) noexcept \
+ : status_(std::move(other.status_)), unused_() { \
+ if (other.ok()) { \
+ MakeValue(std::move(other.data_)); \
+ } \
+ } \
+ \
+ template <typename U> \
+ explicit constexpr StatusOrData(const StatusOrData<U>& other) { \
+ if (other.ok()) { \
+ MakeValue(other.data_); \
+ status_ = OkStatus(); \
+ } else { \
+ status_ = other.status_; \
+ } \
+ } \
+ \
+ template <typename U> \
+ explicit constexpr StatusOrData(StatusOrData<U>&& other) { \
+ if (other.ok()) { \
+ MakeValue(std::move(other.data_)); \
+ status_ = OkStatus(); \
+ } else { \
+ status_ = std::move(other.status_); \
+ } \
+ } \
+ \
+ template <typename... Args> \
+ explicit constexpr StatusOrData(std::in_place_t, Args&&... args) \
+ : status_(), data_(std::forward<Args>(args)...) {} \
+ \
+ explicit constexpr StatusOrData(const T& value) : status_(), data_(value) {} \
+ explicit constexpr StatusOrData(T&& value) \
+ : status_(), data_(std::move(value)) {} \
+ \
+ template <typename U, \
+ std::enable_if_t<std::is_constructible<Status, U&&>::value, int> = \
+ 0> \
+ explicit constexpr StatusOrData(U&& v) \
+ : status_(std::forward<U>(v)), unused_() { \
+ PW_ASSERT(!status_.ok()); \
+ } \
+ \
+ constexpr StatusOrData& operator=(const StatusOrData& other) { \
+ if (this == &other) { \
+ return *this; \
+ } \
+ \
+ if (other.ok()) { \
+ Assign(other.data_); \
+ } else { \
+ AssignStatus(other.status_); \
+ } \
+ return *this; \
+ } \
+ \
+ constexpr StatusOrData& operator=(StatusOrData&& other) { \
+ if (this == &other) { \
+ return *this; \
+ } \
+ \
+ if (other.ok()) { \
+ Assign(std::move(other.data_)); \
+ } else { \
+ AssignStatus(std::move(other.status_)); \
+ } \
+ return *this; \
+ } \
+ \
+ template <typename U> \
+ constexpr void Assign(U&& value) { \
+ if (ok()) { \
+ data_ = std::forward<U>(value); \
+ } else { \
+ MakeValue(std::forward<U>(value)); \
+ status_ = OkStatus(); \
+ } \
+ } \
+ \
+ template <typename U> \
+ constexpr void AssignStatus(U&& v) { \
+ Clear(); \
+ status_ = static_cast<Status>(std::forward<U>(v)); \
+ PW_ASSERT(!status_.ok()); \
+ } \
+ \
+ constexpr bool ok() const { return status_.ok(); } \
+ \
+ protected: \
+ union { \
+ Status status_; \
+ }; \
+ \
+ struct Empty {}; \
+ union { \
+ Empty unused_; \
+ T data_; \
+ }; \
+ \
+ constexpr void Clear() { \
+ if (ok()) { \
+ data_.~T(); \
+ } \
+ } \
+ \
+ template <typename... Arg> \
+ void MakeValue(Arg&&... arg) { \
+ internal_result::PlacementNew<T>(&unused_, std::forward<Arg>(arg)...); \
+ }
+
+template <typename T>
+class StatusOrData<T, true> {
+ PW_RESULT_STATUS_OR_DATA_IMPL;
+};
+
+template <typename T>
+class StatusOrData<T, false> {
+ PW_RESULT_STATUS_OR_DATA_IMPL;
+
+ public:
+ // Add a destructor since T is not trivially destructible.
+ ~StatusOrData() {
+ if (ok()) {
+ data_.~T();
+ }
+ }
+};
+
+#undef PW_RESULT_STATUS_OR_DATA_IMPL
+
+PW_MODIFY_DIAGNOSTICS_POP();
+
+// Helper base classes to allow implicitly deleted constructors and assignment
+// operators in `Result`. For example, `CopyCtorBase` will explicitly delete
+// the copy constructor when T is not copy constructible and `Result` will
+// inherit that behavior implicitly.
+template <typename T, bool = std::is_copy_constructible<T>::value>
+struct CopyCtorBase {
+ CopyCtorBase() = default;
+ CopyCtorBase(const CopyCtorBase&) = default;
+ CopyCtorBase(CopyCtorBase&&) = default;
+ CopyCtorBase& operator=(const CopyCtorBase&) = default;
+ CopyCtorBase& operator=(CopyCtorBase&&) = default;
+};
+
+template <typename T>
+struct CopyCtorBase<T, false> {
+ CopyCtorBase() = default;
+ CopyCtorBase(const CopyCtorBase&) = delete;
+ CopyCtorBase(CopyCtorBase&&) = default;
+ CopyCtorBase& operator=(const CopyCtorBase&) = default;
+ CopyCtorBase& operator=(CopyCtorBase&&) = default;
+};
+
+template <typename T, bool = std::is_move_constructible<T>::value>
+struct MoveCtorBase {
+ MoveCtorBase() = default;
+ MoveCtorBase(const MoveCtorBase&) = default;
+ MoveCtorBase(MoveCtorBase&&) = default;
+ MoveCtorBase& operator=(const MoveCtorBase&) = default;
+ MoveCtorBase& operator=(MoveCtorBase&&) = default;
+};
+
+template <typename T>
+struct MoveCtorBase<T, false> {
+ MoveCtorBase() = default;
+ MoveCtorBase(const MoveCtorBase&) = default;
+ MoveCtorBase(MoveCtorBase&&) = delete;
+ MoveCtorBase& operator=(const MoveCtorBase&) = default;
+ MoveCtorBase& operator=(MoveCtorBase&&) = default;
+};
+
+template <typename T,
+ bool = std::is_copy_constructible<T>::value&&
+ std::is_copy_assignable<T>::value>
+struct CopyAssignBase {
+ CopyAssignBase() = default;
+ CopyAssignBase(const CopyAssignBase&) = default;
+ CopyAssignBase(CopyAssignBase&&) = default;
+ CopyAssignBase& operator=(const CopyAssignBase&) = default;
+ CopyAssignBase& operator=(CopyAssignBase&&) = default;
+};
+
+template <typename T>
+struct CopyAssignBase<T, false> {
+ CopyAssignBase() = default;
+ CopyAssignBase(const CopyAssignBase&) = default;
+ CopyAssignBase(CopyAssignBase&&) = default;
+ CopyAssignBase& operator=(const CopyAssignBase&) = delete;
+ CopyAssignBase& operator=(CopyAssignBase&&) = default;
+};
+
+template <typename T,
+ bool = std::is_move_constructible<T>::value&&
+ std::is_move_assignable<T>::value>
+struct MoveAssignBase {
+ MoveAssignBase() = default;
+ MoveAssignBase(const MoveAssignBase&) = default;
+ MoveAssignBase(MoveAssignBase&&) = default;
+ MoveAssignBase& operator=(const MoveAssignBase&) = default;
+ MoveAssignBase& operator=(MoveAssignBase&&) = default;
+};
+
+template <typename T>
+struct MoveAssignBase<T, false> {
+ MoveAssignBase() = default;
+ MoveAssignBase(const MoveAssignBase&) = default;
+ MoveAssignBase(MoveAssignBase&&) = default;
+ MoveAssignBase& operator=(const MoveAssignBase&) = default;
+ MoveAssignBase& operator=(MoveAssignBase&&) = delete;
+};
+
+} // namespace internal_result
+} // namespace pw
diff --git a/pw_result/public/pw_result/result.h b/pw_result/public/pw_result/result.h
index 2366e03e3..fe504dc76 100644
--- a/pw_result/public/pw_result/result.h
+++ b/pw_result/public/pw_result/result.h
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -11,130 +11,700 @@
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
+//
+// -----------------------------------------------------------------------------
+// File: result.h
+// -----------------------------------------------------------------------------
+//
+// An `Result<T>` represents a union of an `pw::Status` object and an object of
+// type `T`. The `Result<T>` will either contain an object of type `T`
+// (indicating a successful operation), or an error (of type `Status`)
+// explaining why such a value is not present.
+//
+// In general, check the success of an operation returning an `Result<T>` like
+// you would an `pw::Status` by using the `ok()` member function.
+//
+// Example:
+//
+// Result<Foo> result = Calculation();
+// if (result.ok()) {
+// result->DoSomethingCool();
+// } else {
+// PW_LOG_ERROR("Calculation failed: %s", result.status().str());
+// }
#pragma once
-#include <algorithm>
+#include <exception>
+#include <initializer_list>
+#include <new>
+#include <string>
+#include <type_traits>
#include <utility>
-#include "pw_assert/assert.h"
#include "pw_preprocessor/compiler.h"
+#include "pw_result/internal/result_internal.h"
#include "pw_status/status.h"
namespace pw {
-// A Result represents the result of an operation which can fail. It is a
-// convenient wrapper around returning a Status alongside some data when the
-// status is OK.
+// Returned Result objects may not be ignored.
+template <typename T>
+class [[nodiscard]] Result;
+
+// Result<T>
+//
+// The `Result<T>` class template is a union of an `pw::Status` object and an
+// object of type `T`. The `Result<T>` models an object that is either a usable
+// object, or an error (of type `Status`) explaining why such an object is not
+// present. An `Result<T>` is typically the return value of a function which may
+// fail.
+//
+// An `Result<T>` can never hold an "OK" status; instead, the presence of an
+// object of type `T` indicates success. Instead of checking for a `kOk` value,
+// use the `Result<T>::ok()` member function. (It is for this reason, and code
+// readability, that using the `ok()` function is preferred for `Status` as
+// well.)
+//
+// Example:
+//
+// Result<Foo> result = DoBigCalculationThatCouldFail();
+// if (result.ok()) {
+// result->DoSomethingCool();
+// } else {
+// PW_LOG_ERROR("Calculation failed: %s", result.status().str());
+// }
+//
+// Accessing the object held by an `Result<T>` should be performed via
+// `operator*` or `operator->`, after a call to `ok()` confirms that the
+// `Result<T>` holds an object of type `T`:
+//
+// Example:
+//
+// Result<int> i = GetCount();
+// if (i.ok()) {
+// updated_total += *i
+// }
+//
+// NOTE: using `Result<T>::value()` when no valid value is present will trigger
+// a PW_ASSERT.
+//
+// Example:
//
-// TODO(pwbug/363): Refactor pw::Result to properly support non-default move
-// and/or copy assignment operators and/or constructors.
+// Result<Foo> result = DoBigCalculationThatCouldFail();
+// const Foo& foo = result.value(); // Crash/exception if no value present
+// foo.DoSomethingCool();
+//
+// A `Result<T*>` can be constructed from a null pointer like any other pointer
+// value, and the result will be that `ok()` returns `true` and `value()`
+// returns `nullptr`. Checking the value of pointer in an `Result<T>` generally
+// requires a bit more care, to ensure both that a value is present and that
+// value is not null:
+//
+// Result<Foo*> result = LookUpTheFoo(arg);
+// if (!result.ok()) {
+// PW_LOG_ERROR("Unable to look up the Foo: %s", result.status().str());
+// } else if (*result == nullptr) {
+// PW_LOG_ERROR("Unexpected null pointer");
+// } else {
+// (*result)->DoSomethingCool();
+// }
+//
+// Example factory implementation returning Result<T>:
+//
+// Result<Foo> FooFactory::MakeFoo(int arg) {
+// if (arg <= 0) {
+// return pw::Status::InvalidArgument();
+// }
+// return Foo(arg);
+// }
template <typename T>
-class [[nodiscard]] Result {
+class Result : private internal_result::StatusOrData<T>,
+ private internal_result::CopyCtorBase<T>,
+ private internal_result::MoveCtorBase<T>,
+ private internal_result::CopyAssignBase<T>,
+ private internal_result::MoveAssignBase<T> {
+ template <typename U>
+ friend class Result;
+
+ using Base = internal_result::StatusOrData<T>;
+
public:
- constexpr Result(T&& value) : value_(std::move(value)), status_(OkStatus()) {}
- constexpr Result(const T& value) : value_(value), status_(OkStatus()) {}
+ // Result<T>::value_type
+ //
+ // This instance data provides a generic `value_type` member for use within
+ // generic programming. This usage is analogous to that of
+ // `optional::value_type` in the case of `std::optional`.
+ typedef T value_type;
- template <typename... Args>
- constexpr Result(std::in_place_t, Args&&... args)
- : value_(std::forward<Args>(args)...), status_(OkStatus()) {}
+ // Constructors
- constexpr Result(Status status) : unused_({}), status_(status) {
- PW_ASSERT(!status_.ok());
- }
- constexpr Result(Status::Code code) : unused_({}), status_(code) {
- PW_ASSERT(!status_.ok());
- }
+ // Constructs a new `Result` with an `pw::Status::Unknown()` status. This
+ // constructor is marked 'explicit' to prevent usages in return values such as
+ // 'return {};', under the misconception that `Result<std::vector<int>>` will
+ // be initialized with an empty vector, instead of a `Status::Unknown()` error
+ // code.
+ explicit constexpr Result();
+ // `Result<T>` is copy constructible if `T` is copy constructible.
constexpr Result(const Result&) = default;
+ // `Result<T>` is copy assignable if `T` is copy constructible and copy
+ // assignable.
constexpr Result& operator=(const Result&) = default;
+ // `Result<T>` is move constructible if `T` is move constructible.
constexpr Result(Result&&) = default;
+ // `Result<T>` is moveAssignable if `T` is move constructible and move
+ // assignable.
constexpr Result& operator=(Result&&) = default;
- [[nodiscard]] constexpr Status status() const { return status_; }
- [[nodiscard]] constexpr bool ok() const { return status_.ok(); }
+ // Converting Constructors
- constexpr T& value() & {
- PW_ASSERT(status_.ok());
- return value_;
- }
+ // Constructs a new `Result<T>` from an `pw::Result<U>`, when `T` is
+ // constructible from `U`. To avoid ambiguity, these constructors are disabled
+ // if `T` is also constructible from `Result<U>.`. This constructor is
+ // explicit if and only if the corresponding construction of `T` from `U` is
+ // explicit. (This constructor inherits its explicitness from the underlying
+ // constructor.)
+ template <
+ typename U,
+ std::enable_if_t<
+ std::conjunction<
+ std::negation<std::is_same<T, U>>,
+ std::is_constructible<T, const U&>,
+ std::is_convertible<const U&, T>,
+ std::negation<internal_result::
+ IsConstructibleOrConvertibleFromResult<T, U>>>::
+ value,
+ int> = 0>
+ constexpr Result(const Result<U>& other) // NOLINT
+ : Base(static_cast<const typename Result<U>::Base&>(other)) {}
+ template <
+ typename U,
+ std::enable_if_t<
+ std::conjunction<
+ std::negation<std::is_same<T, U>>,
+ std::is_constructible<T, const U&>,
+ std::negation<std::is_convertible<const U&, T>>,
+ std::negation<internal_result::
+ IsConstructibleOrConvertibleFromResult<T, U>>>::
+ value,
+ int> = 0>
+ explicit constexpr Result(const Result<U>& other)
+ : Base(static_cast<const typename Result<U>::Base&>(other)) {}
- constexpr const T& value() const& {
- PW_ASSERT(status_.ok());
- return value_;
- }
+ template <
+ typename U,
+ std::enable_if_t<
+ std::conjunction<
+ std::negation<std::is_same<T, U>>,
+ std::is_constructible<T, U&&>,
+ std::is_convertible<U&&, T>,
+ std::negation<internal_result::
+ IsConstructibleOrConvertibleFromResult<T, U>>>::
+ value,
+ int> = 0>
+ constexpr Result(Result<U>&& other) // NOLINT
+ : Base(static_cast<typename Result<U>::Base&&>(other)) {}
+ template <
+ typename U,
+ std::enable_if_t<
+ std::conjunction<
+ std::negation<std::is_same<T, U>>,
+ std::is_constructible<T, U&&>,
+ std::negation<std::is_convertible<U&&, T>>,
+ std::negation<internal_result::
+ IsConstructibleOrConvertibleFromResult<T, U>>>::
+ value,
+ int> = 0>
+ explicit constexpr Result(Result<U>&& other)
+ : Base(static_cast<typename Result<U>::Base&&>(other)) {}
- constexpr T&& value() && {
- PW_ASSERT(status_.ok());
- return std::move(value_);
- }
+ // Converting Assignment Operators
- constexpr T& operator*() const& {
- PW_ASSERT(status_.ok());
- return value_;
+ // Creates an `Result<T>` through assignment from an
+ // `Result<U>` when:
+ //
+ // * Both `Result<T>` and `pw::Result<U>` are OK by assigning
+ // `U` to `T` directly.
+ // * `Result<T>` is OK and `pw::Result<U>` contains an error
+ // code by destroying `Result<T>`'s value and assigning from
+ // `Result<U>'
+ // * `Result<T>` contains an error code and `pw::Result<U>` is
+ // OK by directly initializing `T` from `U`.
+ // * Both `Result<T>` and `pw::Result<U>` contain an error
+ // code by assigning the `Status` in `Result<U>` to
+ // `Result<T>`
+ //
+ // These overloads only apply if `Result<T>` is constructible and
+ // assignable from `Result<U>` and `Result<T>` cannot be directly
+ // assigned from `Result<U>`.
+ template <typename U,
+ std::enable_if_t<
+ std::conjunction<
+ std::negation<std::is_same<T, U>>,
+ std::is_constructible<T, const U&>,
+ std::is_assignable<T, const U&>,
+ std::negation<
+ internal_result::
+ IsConstructibleOrConvertibleOrAssignableFromResult<
+ T,
+ U>>>::value,
+ int> = 0>
+ constexpr Result& operator=(const Result<U>& other) {
+ this->Assign(other);
+ return *this;
}
-
- T& operator*() & {
- PW_ASSERT(status_.ok());
- return value_;
+ template <typename U,
+ std::enable_if_t<
+ std::conjunction<
+ std::negation<std::is_same<T, U>>,
+ std::is_constructible<T, U&&>,
+ std::is_assignable<T, U&&>,
+ std::negation<
+ internal_result::
+ IsConstructibleOrConvertibleOrAssignableFromResult<
+ T,
+ U>>>::value,
+ int> = 0>
+ constexpr Result& operator=(Result<U>&& other) {
+ this->Assign(std::move(other));
+ return *this;
}
- constexpr T&& operator*() const&& {
- PW_ASSERT(status_.ok());
- return std::move(value_);
- }
+ // Constructs a new `Result<T>` with a non-ok status. After calling this
+ // constructor, `this->ok()` will be `false` and calls to `value()` will
+ // crash, or produce an exception if exceptions are enabled.
+ //
+ // The constructor also takes any type `U` that is convertible to `Status`.
+ // This constructor is explicit if an only if `U` is not of type `Status` and
+ // the conversion from `U` to `Status` is explicit.
+ //
+ // REQUIRES: !Status(std::forward<U>(v)).ok(). This requirement is DCHECKed.
+ // In optimized builds, passing OkStatus() here will have the effect of
+ // passing Status::Internal() as a fallback.
+ template <
+ typename U = Status,
+ std::enable_if_t<
+ std::conjunction<
+ std::is_convertible<U&&, Status>,
+ std::is_constructible<Status, U&&>,
+ std::negation<std::is_same<std::decay_t<U>, Result<T>>>,
+ std::negation<std::is_same<std::decay_t<U>, T>>,
+ std::negation<std::is_same<std::decay_t<U>, std::in_place_t>>,
+ std::negation<internal_result::
+ HasConversionOperatorToResult<T, U&&>>>::value,
+ int> = 0>
+ constexpr Result(U&& v) : Base(std::forward<U>(v)) {}
- T&& operator*() && {
- PW_ASSERT(status_.ok());
- return std::move(value_);
- }
+ template <
+ typename U = Status,
+ std::enable_if_t<
+ std::conjunction<
+ std::negation<std::is_convertible<U&&, Status>>,
+ std::is_constructible<Status, U&&>,
+ std::negation<std::is_same<std::decay_t<U>, Result<T>>>,
+ std::negation<std::is_same<std::decay_t<U>, T>>,
+ std::negation<std::is_same<std::decay_t<U>, std::in_place_t>>,
+ std::negation<internal_result::
+ HasConversionOperatorToResult<T, U&&>>>::value,
+ int> = 0>
+ constexpr explicit Result(U&& v) : Base(std::forward<U>(v)) {}
- constexpr T* operator->() const {
- PW_ASSERT(status_.ok());
- return &value_;
+ template <
+ typename U = Status,
+ std::enable_if_t<
+ std::conjunction<
+ std::is_convertible<U&&, Status>,
+ std::is_constructible<Status, U&&>,
+ std::negation<std::is_same<std::decay_t<U>, Result<T>>>,
+ std::negation<std::is_same<std::decay_t<U>, T>>,
+ std::negation<std::is_same<std::decay_t<U>, std::in_place_t>>,
+ std::negation<internal_result::
+ HasConversionOperatorToResult<T, U&&>>>::value,
+ int> = 0>
+ constexpr Result& operator=(U&& v) {
+ this->AssignStatus(std::forward<U>(v));
+ return *this;
}
- T* operator->() {
- PW_ASSERT(status_.ok());
- return &value_;
+ // Perfect-forwarding value assignment operator.
+
+ // If `*this` contains a `T` value before the call, the contained value is
+ // assigned from `std::forward<U>(v)`; Otherwise, it is directly-initialized
+ // from `std::forward<U>(v)`.
+ // This function does not participate in overload unless:
+ // 1. `std::is_constructible_v<T, U>` is true,
+ // 2. `std::is_assignable_v<T&, U>` is true.
+ // 3. `std::is_same_v<Result<T>, std::remove_cvref_t<U>>` is false.
+ // 4. Assigning `U` to `T` is not ambiguous:
+ // If `U` is `Result<V>` and `T` is constructible and assignable from
+ // both `Result<V>` and `V`, the assignment is considered bug-prone and
+ // ambiguous thus will fail to compile. For example:
+ // Result<bool> s1 = true; // s1.ok() && *s1 == true
+ // Result<bool> s2 = false; // s2.ok() && *s2 == false
+ // s1 = s2; // ambiguous, `s1 = *s2` or `s1 = bool(s2)`?
+ template <
+ typename U = T,
+ typename = typename std::enable_if<std::conjunction<
+ std::is_constructible<T, U&&>,
+ std::is_assignable<T&, U&&>,
+ std::disjunction<
+ std::is_same<std::remove_cv_t<std::remove_reference_t<U>>, T>,
+ std::conjunction<
+ std::negation<std::is_convertible<U&&, Status>>,
+ std::negation<
+ internal_result::HasConversionOperatorToResult<T, U&&>>>>,
+ internal_result::IsForwardingAssignmentValid<T, U&&>>::value>::type>
+ constexpr Result& operator=(U&& v) {
+ this->Assign(std::forward<U>(v));
+ return *this;
}
+ // Constructs the inner value `T` in-place using the provided args, using the
+ // `T(args...)` constructor.
+ template <typename... Args>
+ explicit constexpr Result(std::in_place_t, Args&&... args);
+ template <typename U, typename... Args>
+ explicit constexpr Result(std::in_place_t,
+ std::initializer_list<U> ilist,
+ Args&&... args);
+
+ // Constructs the inner value `T` in-place using the provided args, using the
+ // `T(U)` (direct-initialization) constructor. This constructor is only valid
+ // if `T` can be constructed from a `U`. Can accept move or copy constructors.
+ //
+ // This constructor is explicit if `U` is not convertible to `T`. To avoid
+ // ambiguity, this constructor is disabled if `U` is a `Result<J>`, where
+ // `J` is convertible to `T`.
+ template <
+ typename U = T,
+ std::enable_if_t<
+ std::conjunction<
+ internal_result::IsDirectInitializationValid<T, U&&>,
+ std::is_constructible<T, U&&>,
+ std::is_convertible<U&&, T>,
+ std::disjunction<
+ std::is_same<std::remove_cv_t<std::remove_reference_t<U>>, T>,
+ std::conjunction<
+ std::negation<std::is_convertible<U&&, Status>>,
+ std::negation<
+ internal_result::
+ HasConversionOperatorToResult<T, U&&>>>>>::value,
+ int> = 0>
+ constexpr Result(U&& u) // NOLINT
+ : Result(std::in_place, std::forward<U>(u)) {}
+
+ template <
+ typename U = T,
+ std::enable_if_t<
+ std::conjunction<
+ internal_result::IsDirectInitializationValid<T, U&&>,
+ std::disjunction<
+ std::is_same<std::remove_cv_t<std::remove_reference_t<U>>, T>,
+ std::conjunction<
+ std::negation<std::is_constructible<Status, U&&>>,
+ std::negation<
+ internal_result::
+ HasConversionOperatorToResult<T, U&&>>>>,
+ std::is_constructible<T, U&&>,
+ std::negation<std::is_convertible<U&&, T>>>::value,
+ int> = 0>
+ explicit constexpr Result(U&& u) // NOLINT
+ : Result(std::in_place, std::forward<U>(u)) {}
+
+ // Result<T>::ok()
+ //
+ // Returns whether or not this `Result<T>` holds a `T` value. This
+ // member function is analagous to `Status::ok()` and should be used
+ // similarly to check the status of return values.
+ //
+ // Example:
+ //
+ // Result<Foo> result = DoBigCalculationThatCouldFail();
+ // if (result.ok()) {
+ // // Handle result
+ // else {
+ // // Handle error
+ // }
+ [[nodiscard]] constexpr bool ok() const { return this->status_.ok(); }
+
+ // Result<T>::status()
+ //
+ // Returns a reference to the current `Status` contained within the
+ // `Result<T>`. If `pw::Result<T>` contains a `T`, then this function returns
+ // `OkStatus()`.
+ constexpr const Status& status() const&;
+ constexpr Status status() &&;
+
+ // Result<T>::value()
+ //
+ // Returns a reference to the held value if `this->ok()`. Otherwise,
+ // terminates the process.
+ //
+ // If you have already checked the status using `this->ok()`, you probably
+ // want to use `operator*()` or `operator->()` to access the value instead of
+ // `value`.
+ //
+ // Note: for value types that are cheap to copy, prefer simple code:
+ //
+ // T value = result.value();
+ //
+ // Otherwise, if the value type is expensive to copy, but can be left
+ // in the Result, simply assign to a reference:
+ //
+ // T& value = result.value(); // or `const T&`
+ //
+ // Otherwise, if the value type supports an efficient move, it can be
+ // used as follows:
+ //
+ // T value = std::move(result).value();
+ //
+ // The `std::move` on result instead of on the whole expression enables
+ // warnings about possible uses of the result object after the move.
+ constexpr const T& value() const& PW_ATTRIBUTE_LIFETIME_BOUND;
+ constexpr T& value() & PW_ATTRIBUTE_LIFETIME_BOUND;
+ constexpr const T&& value() const&& PW_ATTRIBUTE_LIFETIME_BOUND;
+ constexpr T&& value() && PW_ATTRIBUTE_LIFETIME_BOUND;
+
+ // Result<T>:: operator*()
+ //
+ // Returns a reference to the current value.
+ //
+ // REQUIRES: `this->ok() == true`, otherwise the behavior is undefined.
+ //
+ // Use `this->ok()` to verify that there is a current value within the
+ // `Result<T>`. Alternatively, see the `value()` member function for a
+ // similar API that guarantees crashing or throwing an exception if there is
+ // no current value.
+ constexpr const T& operator*() const& PW_ATTRIBUTE_LIFETIME_BOUND;
+ constexpr T& operator*() & PW_ATTRIBUTE_LIFETIME_BOUND;
+ constexpr const T&& operator*() const&& PW_ATTRIBUTE_LIFETIME_BOUND;
+ constexpr T&& operator*() && PW_ATTRIBUTE_LIFETIME_BOUND;
+
+ // Result<T>::operator->()
+ //
+ // Returns a pointer to the current value.
+ //
+ // REQUIRES: `this->ok() == true`, otherwise the behavior is undefined.
+ //
+ // Use `this->ok()` to verify that there is a current value.
+ constexpr const T* operator->() const PW_ATTRIBUTE_LIFETIME_BOUND;
+ constexpr T* operator->() PW_ATTRIBUTE_LIFETIME_BOUND;
+
+ // Result<T>::value_or()
+ //
+ // Returns the current value if `this->ok() == true`. Otherwise constructs a
+ // value using the provided `default_value`.
+ //
+ // Unlike `value`, this function returns by value, copying the current value
+ // if necessary. If the value type supports an efficient move, it can be used
+ // as follows:
+ //
+ // T value = std::move(result).value_or(def);
+ //
+ // Unlike with `value`, calling `std::move()` on the result of `value_or` will
+ // still trigger a copy.
template <typename U>
- constexpr T value_or(U&& default_value) const& {
+ constexpr T value_or(U&& default_value) const&;
+ template <typename U>
+ constexpr T value_or(U&& default_value) &&;
+
+ // Result<T>::IgnoreError()
+ //
+ // Ignores any errors. This method does nothing except potentially suppress
+ // complaints from any tools that are checking that errors are not dropped on
+ // the floor.
+ constexpr void IgnoreError() const;
+
+ // Result<T>::emplace()
+ //
+ // Reconstructs the inner value T in-place using the provided args, using the
+ // T(args...) constructor. Returns reference to the reconstructed `T`.
+ template <typename... Args>
+ T& emplace(Args&&... args) {
if (ok()) {
- PW_MODIFY_DIAGNOSTICS_PUSH();
- // GCC 10 emits -Wmaybe-uninitialized warnings about value_.
- PW_MODIFY_DIAGNOSTIC_GCC(ignored, "-Wmaybe-uninitialized");
- return value_;
- PW_MODIFY_DIAGNOSTICS_POP();
+ this->Clear();
+ this->MakeValue(std::forward<Args>(args)...);
+ } else {
+ this->MakeValue(std::forward<Args>(args)...);
+ this->status_ = OkStatus();
}
- return std::forward<U>(default_value);
+ return this->data_;
}
- template <typename U>
- constexpr T value_or(U&& default_value) && {
+ template <
+ typename U,
+ typename... Args,
+ std::enable_if_t<
+ std::is_constructible<T, std::initializer_list<U>&, Args&&...>::value,
+ int> = 0>
+ T& emplace(std::initializer_list<U> ilist, Args&&... args) {
if (ok()) {
- return std::move(value_);
+ this->Clear();
+ this->MakeValue(ilist, std::forward<Args>(args)...);
+ } else {
+ this->MakeValue(ilist, std::forward<Args>(args)...);
+ this->status_ = OkStatus();
}
- return std::forward<U>(default_value);
+ return this->data_;
}
- // Ignores any errors. This method does nothing except potentially suppress
- // complaints from any tools that are checking that errors are not dropped on
- // the floor.
- constexpr void IgnoreError() const {}
-
private:
- struct Unused {};
+ using Base::Assign;
+ template <typename U>
+ constexpr void Assign(const Result<U>& other);
+ template <typename U>
+ constexpr void Assign(Result<U>&& other);
+};
- union {
- T value_;
+// operator==()
+//
+// This operator checks the equality of two `Result<T>` objects.
+template <typename T>
+constexpr bool operator==(const Result<T>& lhs, const Result<T>& rhs) {
+ if (lhs.ok() && rhs.ok()) {
+ return *lhs == *rhs;
+ }
+ return lhs.status() == rhs.status();
+}
- // Ensure that there is always a trivial constructor for the union.
- Unused unused_;
- };
- Status status_;
-};
+// operator!=()
+//
+// This operator checks the inequality of two `Result<T>` objects.
+template <typename T>
+constexpr bool operator!=(const Result<T>& lhs, const Result<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+//------------------------------------------------------------------------------
+// Implementation details for Result<T>
+//------------------------------------------------------------------------------
+
+template <typename T>
+constexpr Result<T>::Result() : Base(Status::Unknown()) {}
+
+template <typename T>
+template <typename U>
+constexpr inline void Result<T>::Assign(const Result<U>& other) {
+ if (other.ok()) {
+ this->Assign(*other);
+ } else {
+ this->AssignStatus(other.status());
+ }
+}
+
+template <typename T>
+template <typename U>
+constexpr inline void Result<T>::Assign(Result<U>&& other) {
+ if (other.ok()) {
+ this->Assign(*std::move(other));
+ } else {
+ this->AssignStatus(std::move(other).status());
+ }
+}
+template <typename T>
+template <typename... Args>
+constexpr Result<T>::Result(std::in_place_t, Args&&... args)
+ : Base(std::in_place, std::forward<Args>(args)...) {}
+
+template <typename T>
+template <typename U, typename... Args>
+constexpr Result<T>::Result(std::in_place_t,
+ std::initializer_list<U> ilist,
+ Args&&... args)
+ : Base(std::in_place, ilist, std::forward<Args>(args)...) {}
+
+template <typename T>
+constexpr const Status& Result<T>::status() const& {
+ return this->status_;
+}
+template <typename T>
+constexpr Status Result<T>::status() && {
+ return ok() ? OkStatus() : std::move(this->status_);
+}
+
+template <typename T>
+constexpr const T& Result<T>::value() const& {
+ PW_ASSERT(this->status_.ok());
+ return this->data_;
+}
+
+template <typename T>
+constexpr T& Result<T>::value() & {
+ PW_ASSERT(this->status_.ok());
+ return this->data_;
+}
+
+template <typename T>
+constexpr const T&& Result<T>::value() const&& {
+ PW_ASSERT(this->status_.ok());
+ return std::move(this->data_);
+}
+
+template <typename T>
+constexpr T&& Result<T>::value() && {
+ PW_ASSERT(this->status_.ok());
+ return std::move(this->data_);
+}
+
+template <typename T>
+constexpr const T& Result<T>::operator*() const& {
+ PW_ASSERT(this->status_.ok());
+ return this->data_;
+}
+
+template <typename T>
+constexpr T& Result<T>::operator*() & {
+ PW_ASSERT(this->status_.ok());
+ return this->data_;
+}
+
+template <typename T>
+constexpr const T&& Result<T>::operator*() const&& {
+ PW_ASSERT(this->status_.ok());
+ return std::move(this->data_);
+}
+
+template <typename T>
+constexpr T&& Result<T>::operator*() && {
+ PW_ASSERT(this->status_.ok());
+ return std::move(this->data_);
+}
+
+template <typename T>
+constexpr const T* Result<T>::operator->() const {
+ PW_ASSERT(this->status_.ok());
+ return &this->data_;
+}
+
+template <typename T>
+constexpr T* Result<T>::operator->() {
+ PW_ASSERT(this->status_.ok());
+ return &this->data_;
+}
+
+template <typename T>
+template <typename U>
+constexpr T Result<T>::value_or(U&& default_value) const& {
+ if (ok()) {
+ return this->data_;
+ }
+ return std::forward<U>(default_value);
+}
+
+template <typename T>
+template <typename U>
+constexpr T Result<T>::value_or(U&& default_value) && {
+ if (ok()) {
+ return std::move(this->data_);
+ }
+ return std::forward<U>(default_value);
+}
+
+template <typename T>
+constexpr void Result<T>::IgnoreError() const {
+ // no-op
+}
namespace internal {
diff --git a/pw_result/result_test.cc b/pw_result/result_test.cc
index 93f1200e1..b65a3cf70 100644
--- a/pw_result/result_test.cc
+++ b/pw_result/result_test.cc
@@ -1,4 +1,4 @@
-// Copyright 2020 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -12,6 +12,14 @@
// License for the specific language governing permissions and limitations under
// the License.
+// pw::Result is derived from absl::StatusOr, but has some small differences.
+// This test covers basic pw::Result functionality and as well as the features
+// supported by pw::Result that are not supported by absl::StatusOr (constexpr
+// use in particular).
+//
+// The complete, thorough pw::Result tests are in statusor_test.cc, which is
+// derived from Abseil's tests for absl::StatusOr.
+
#include "pw_result/result.h"
#include "gtest/gtest.h"
@@ -46,7 +54,23 @@ TEST(Result, Deref) {
constexpr bool False() { return false; };
};
- auto tester = Result(Tester());
+ auto tester = Result<Tester>(Tester());
+ EXPECT_TRUE(tester.ok());
+ EXPECT_TRUE(tester->True());
+ EXPECT_FALSE(tester->False());
+ EXPECT_TRUE((*tester).True());
+ EXPECT_FALSE((*tester).False());
+ EXPECT_EQ(tester.value().True(), tester->True());
+ EXPECT_EQ(tester.value().False(), tester->False());
+}
+
+TEST(Result, ConstDeref) {
+ struct Tester {
+ constexpr bool True() const { return true; };
+ constexpr bool False() const { return false; };
+ };
+
+ const auto tester = Result<Tester>(Tester());
EXPECT_TRUE(tester.ok());
EXPECT_TRUE(tester->True());
EXPECT_FALSE(tester->False());
@@ -112,5 +136,49 @@ TEST(Result, TryAssign) {
EXPECT_EQ(TryResultAssign(true), OkStatus());
}
+struct Value {
+ int number;
+};
+
+TEST(Result, ConstexprOk) {
+ static constexpr pw::Result<Value> kResult(Value{123});
+
+ static_assert(kResult.status() == pw::OkStatus());
+ static_assert(kResult.ok());
+
+ static_assert((*kResult).number == 123);
+ static_assert((*std::move(kResult)).number == 123);
+
+ static_assert(kResult->number == 123);
+ static_assert(std::move(kResult)->number == 123);
+
+ static_assert(kResult.value().number == 123);
+ static_assert(std::move(kResult).value().number == 123);
+
+ static_assert(kResult.value_or(Value{99}).number == 123);
+ static_assert(std::move(kResult).value_or(Value{99}).number == 123);
+}
+
+TEST(Result, ConstexprNotOk) {
+ static constexpr pw::Result<Value> kResult(pw::Status::NotFound());
+
+ static_assert(kResult.status() == pw::Status::NotFound());
+ static_assert(!kResult.ok());
+
+ static_assert(kResult.value_or(Value{99}).number == 99);
+ static_assert(std::move(kResult).value_or(Value{99}).number == 99);
+}
+
+TEST(Result, ConstexprNotOkCopy) {
+ static constexpr pw::Result<Value> kResult(pw::Status::NotFound());
+ constexpr pw::Result<Value> kResultCopy(kResult);
+
+ static_assert(kResultCopy.status() == pw::Status::NotFound());
+ static_assert(!kResultCopy.ok());
+
+ static_assert(kResultCopy.value_or(Value{99}).number == 99);
+ static_assert(std::move(kResultCopy).value_or(Value{99}).number == 99);
+}
+
} // namespace
} // namespace pw
diff --git a/pw_result/statusor_test.cc b/pw_result/statusor_test.cc
new file mode 100644
index 000000000..62dab32a9
--- /dev/null
+++ b/pw_result/statusor_test.cc
@@ -0,0 +1,1735 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// These tests are a modified version of the tests for absl::StatusOr:
+// inclusive-language: disable
+// https://github.com/abseil/abseil-cpp/blob/master/absl/status/statusor_test.cc
+// inclusive-language: enable
+
+#include <any>
+#include <array>
+#include <initializer_list>
+#include <map>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <type_traits>
+#include <utility>
+#include <variant>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "pw_result/result.h"
+
+namespace {
+
+#define EXPECT_OK(expression) EXPECT_EQ(::pw::OkStatus(), expression)
+#define ASSERT_OK(expression) ASSERT_EQ(::pw::OkStatus(), expression)
+
+struct CopyDetector {
+ CopyDetector() = default;
+ explicit CopyDetector(int xx) : x(xx) {}
+ CopyDetector(CopyDetector&& d) noexcept
+ : x(d.x), copied(false), moved(true) {}
+ CopyDetector(const CopyDetector& d) : x(d.x), copied(true), moved(false) {}
+ CopyDetector& operator=(const CopyDetector& c) {
+ x = c.x;
+ copied = true;
+ moved = false;
+ return *this;
+ }
+ CopyDetector& operator=(CopyDetector&& c) noexcept {
+ x = c.x;
+ copied = false;
+ moved = true;
+ return *this;
+ }
+ int x = 0;
+ bool copied = false;
+ bool moved = false;
+};
+
+// Define custom macros instead of the CopyDetectorHas matcher.
+#define EXPECT_COPY_DETECTOR_HAS( \
+ value, expected_x, expected_moved, expected_copied) \
+ EXPECT_EQ(value.x, expected_x); \
+ EXPECT_EQ(value.moved, expected_moved); \
+ EXPECT_EQ(value.copied, expected_copied)
+
+#define EXPECT_OK_AND_COPY_DETECTOR_HAS( \
+ statusor_expr, expected_x, expected_moved, expected_copied) \
+ do { \
+ auto&& temp_status_or = statusor_expr; \
+ ASSERT_EQ(::pw::OkStatus(), temp_status_or.status()); \
+ EXPECT_COPY_DETECTOR_HAS( \
+ temp_status_or.value(), expected_x, expected_moved, expected_copied); \
+ } while (0)
+
+#define EXPECT_OK_AND_ANY_WITH_COPY_DETECTOR_HAS( \
+ statusor_expr, expected_x, expected_moved, expected_copied) \
+ do { \
+ auto&& temp_status_or = statusor_expr; \
+ ASSERT_EQ(::pw::OkStatus(), temp_status_or.status()); \
+ const auto& temp_any_value = \
+ std::any_cast<const CopyDetector&>(temp_status_or.value()); \
+ EXPECT_COPY_DETECTOR_HAS( \
+ temp_any_value, expected_x, expected_moved, expected_copied); \
+ } while (0)
+
+class Base1 {
+ public:
+ virtual ~Base1() {}
+ int pad;
+};
+
+class Base2 {
+ public:
+ virtual ~Base2() {}
+ int yetotherpad;
+};
+
+class Derived : public Base1, public Base2 {
+ public:
+ virtual ~Derived() {}
+ int evenmorepad;
+};
+
+class CopyNoAssign {
+ public:
+ explicit CopyNoAssign(int value) : foo(value) {}
+ CopyNoAssign(const CopyNoAssign& other) : foo(other.foo) {}
+ const CopyNoAssign& operator=(const CopyNoAssign&) = delete;
+
+ int foo;
+};
+
+pw::Result<std::unique_ptr<int>> ReturnUniquePtr() {
+ // Uses implicit constructor from T&&
+ return std::make_unique<int>(0);
+}
+
+TEST(Result, ElementType) {
+ static_assert(std::is_same<pw::Result<int>::value_type, int>(), "");
+ static_assert(std::is_same<pw::Result<char>::value_type, char>(), "");
+}
+
+TEST(Result, TestMoveOnlyInitialization) {
+ pw::Result<std::unique_ptr<int>> thing(ReturnUniquePtr());
+ ASSERT_TRUE(thing.ok());
+ EXPECT_EQ(0, **thing);
+ int* previous = thing->get();
+
+ thing = ReturnUniquePtr();
+ EXPECT_TRUE(thing.ok());
+ EXPECT_EQ(0, **thing);
+ EXPECT_NE(previous, thing->get());
+}
+
+TEST(Result, TestMoveOnlyValueExtraction) {
+ pw::Result<std::unique_ptr<int>> thing(ReturnUniquePtr());
+ ASSERT_TRUE(thing.ok());
+ std::unique_ptr<int> ptr = *std::move(thing);
+ EXPECT_EQ(0, *ptr);
+
+ thing = std::move(ptr);
+ ptr = std::move(*thing);
+ EXPECT_EQ(0, *ptr);
+}
+
+TEST(Result, TestMoveOnlyInitializationFromTemporaryByValueOrDie) {
+ std::unique_ptr<int> ptr(*ReturnUniquePtr());
+ EXPECT_EQ(0, *ptr);
+}
+
+TEST(Result, TestValueOrDieOverloadForConstTemporary) {
+ static_assert(
+ std::is_same<const int&&,
+ decltype(std::declval<const pw::Result<int>&&>().value())>(),
+ "value() for const temporaries should return const T&&");
+}
+
+TEST(Result, TestMoveOnlyConversion) {
+ pw::Result<std::unique_ptr<const int>> const_thing(ReturnUniquePtr());
+ EXPECT_TRUE(const_thing.ok());
+ EXPECT_EQ(0, **const_thing);
+
+ // Test rvalue converting assignment
+ const int* const_previous = const_thing->get();
+ const_thing = ReturnUniquePtr();
+ EXPECT_TRUE(const_thing.ok());
+ EXPECT_EQ(0, **const_thing);
+ EXPECT_NE(const_previous, const_thing->get());
+}
+
+TEST(Result, TestMoveOnlyVector) {
+ // Check that pw::Result<MoveOnly> works in vector.
+ std::vector<pw::Result<std::unique_ptr<int>>> vec;
+ vec.push_back(ReturnUniquePtr());
+ vec.resize(2);
+ auto another_vec = std::move(vec);
+ EXPECT_EQ(0, **another_vec[0]);
+ EXPECT_EQ(pw::Status::Unknown(), another_vec[1].status());
+}
+
+TEST(Result, TestDefaultCtor) {
+ pw::Result<int> thing;
+ EXPECT_FALSE(thing.ok());
+ EXPECT_EQ(thing.status().code(), pw::Status::Unknown().code());
+}
+
+TEST(Result, StatusCtorForwards) {
+ pw::Status status = pw::Status::Internal();
+
+ EXPECT_EQ(pw::Result<int>(status).status(), pw::Status::Internal());
+
+ EXPECT_EQ(pw::Result<int>(std::move(status)).status(),
+ pw::Status::Internal());
+}
+
+#define EXPECT_DEATH_OR_THROW(statement, status) \
+ EXPECT_DEATH_IF_SUPPORTED(statement, status.str());
+
+TEST(ResultDeathTest, TestDefaultCtorValue) {
+ pw::Result<int> thing;
+ EXPECT_DEATH_OR_THROW(thing.value(), pw::Status::Unknown());
+ const pw::Result<int> thing2;
+ EXPECT_DEATH_OR_THROW(thing2.value(), pw::Status::Unknown());
+}
+
+TEST(ResultDeathTest, TestValueNotOk) {
+ pw::Result<int> thing(pw::Status::Cancelled());
+ EXPECT_DEATH_OR_THROW(thing.value(), pw::Status::Cancelled());
+}
+
+TEST(ResultDeathTest, TestValueNotOkConst) {
+ const pw::Result<int> thing(pw::Status::Unknown());
+ EXPECT_DEATH_OR_THROW(thing.value(), pw::Status::Unknown());
+}
+
+TEST(ResultDeathTest, TestPointerDefaultCtorValue) {
+ pw::Result<int*> thing;
+ EXPECT_DEATH_OR_THROW(thing.value(), pw::Status::Unknown());
+}
+
+TEST(ResultDeathTest, TestPointerValueNotOk) {
+ pw::Result<int*> thing(pw::Status::Cancelled());
+ EXPECT_DEATH_OR_THROW(thing.value(), pw::Status::Cancelled());
+}
+
+TEST(ResultDeathTest, TestPointerValueNotOkConst) {
+ const pw::Result<int*> thing(pw::Status::Cancelled());
+ EXPECT_DEATH_OR_THROW(thing.value(), pw::Status::Cancelled());
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(ResultDeathTest, TestStatusCtorStatusOk) {
+ EXPECT_DEBUG_DEATH(
+ {
+ // This will DCHECK
+ pw::Result<int> thing(pw::OkStatus());
+ // In optimized mode, we are actually going to get error::INTERNAL for
+ // status here, rather than crashing, so check that.
+ EXPECT_FALSE(thing.ok());
+ EXPECT_EQ(thing.status().code(), pw::Status::Internal().code());
+ },
+ "An OK status is not a valid constructor argument");
+}
+
+TEST(ResultDeathTest, TestPointerStatusCtorStatusOk) {
+ EXPECT_DEBUG_DEATH(
+ {
+ pw::Result<int*> thing(pw::OkStatus());
+ // In optimized mode, we are actually going to get error::INTERNAL for
+ // status here, rather than crashing, so check that.
+ EXPECT_FALSE(thing.ok());
+ EXPECT_EQ(thing.status().code(), pw::Status::Internal().code());
+ },
+ "An OK status is not a valid constructor argument");
+}
+#endif
+
+TEST(Result, ValueAccessor) {
+ const int kIntValue = 110;
+ {
+ pw::Result<int> status_or(kIntValue);
+ EXPECT_EQ(kIntValue, status_or.value());
+ EXPECT_EQ(kIntValue, std::move(status_or).value());
+ }
+ {
+ pw::Result<CopyDetector> status_or(kIntValue);
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(status_or, kIntValue, false, false);
+ CopyDetector copy_detector = status_or.value();
+ EXPECT_COPY_DETECTOR_HAS(copy_detector, kIntValue, false, true);
+ copy_detector = std::move(status_or).value();
+ EXPECT_COPY_DETECTOR_HAS(copy_detector, kIntValue, true, false);
+ }
+}
+
+TEST(Result, BadValueAccess) {
+ const pw::Status kError = pw::Status::Cancelled();
+ pw::Result<int> status_or(kError);
+ EXPECT_DEATH_OR_THROW(status_or.value(), kError);
+}
+
+TEST(Result, TestStatusCtor) {
+ pw::Result<int> thing(pw::Status::Cancelled());
+ EXPECT_FALSE(thing.ok());
+ EXPECT_EQ(thing.status().code(), pw::Status::Cancelled().code());
+}
+
+TEST(Result, TestValueCtor) {
+ const int kI = 4;
+ const pw::Result<int> thing(kI);
+ EXPECT_TRUE(thing.ok());
+ EXPECT_EQ(kI, *thing);
+}
+
+struct Foo {
+ const int x;
+ explicit Foo(int y) : x(y) {}
+};
+
+TEST(Result, InPlaceConstruction) {
+ pw::Result<Foo> status_or(std::in_place, 10);
+ ASSERT_TRUE(status_or.ok());
+ EXPECT_EQ(status_or->x, 10);
+}
+
+struct InPlaceHelper {
+ InPlaceHelper(std::initializer_list<int> xs, std::unique_ptr<int> yy)
+ : x(xs), y(std::move(yy)) {}
+ const std::vector<int> x;
+ std::unique_ptr<int> y;
+};
+
+TEST(Result, InPlaceInitListConstruction) {
+ pw::Result<InPlaceHelper> status_or(
+ std::in_place, {10, 11, 12}, std::make_unique<int>(13));
+ ASSERT_TRUE(status_or.ok());
+ ASSERT_EQ(status_or->x.size(), 3u);
+ EXPECT_EQ(status_or->x[0], 10);
+ EXPECT_EQ(status_or->x[1], 11);
+ EXPECT_EQ(status_or->x[2], 12);
+ EXPECT_EQ(*(status_or->y), 13);
+}
+
+TEST(Result, Emplace) {
+ pw::Result<Foo> status_or_foo(10);
+ status_or_foo.emplace(20);
+
+ ASSERT_TRUE(status_or_foo.ok());
+ EXPECT_EQ(status_or_foo->x, 20);
+
+ status_or_foo = pw::Status::InvalidArgument();
+ EXPECT_FALSE(status_or_foo.ok());
+ EXPECT_EQ(status_or_foo.status().code(),
+ pw::Status::InvalidArgument().code());
+ status_or_foo.emplace(20);
+ ASSERT_TRUE(status_or_foo.ok());
+ EXPECT_EQ(status_or_foo->x, 20);
+}
+
+TEST(Result, EmplaceInitializerList) {
+ pw::Result<InPlaceHelper> status_or(
+ std::in_place, {10, 11, 12}, std::make_unique<int>(13));
+ status_or.emplace({1, 2, 3}, std::make_unique<int>(4));
+ ASSERT_TRUE(status_or.ok());
+ ASSERT_EQ(status_or->x.size(), 3u);
+ EXPECT_EQ(status_or->x[0], 1);
+ EXPECT_EQ(status_or->x[1], 2);
+ EXPECT_EQ(status_or->x[2], 3);
+ EXPECT_EQ(*(status_or->y), 4);
+
+ status_or = pw::Status::InvalidArgument();
+ EXPECT_FALSE(status_or.ok());
+ EXPECT_EQ(status_or.status().code(), pw::Status::InvalidArgument().code());
+
+ status_or.emplace({1, 2, 3}, std::make_unique<int>(4));
+ ASSERT_TRUE(status_or.ok());
+ ASSERT_EQ(status_or->x.size(), 3u);
+ EXPECT_EQ(status_or->x[0], 1);
+ EXPECT_EQ(status_or->x[1], 2);
+ EXPECT_EQ(status_or->x[2], 3);
+ EXPECT_EQ(*(status_or->y), 4);
+}
+
+TEST(Result, TestCopyCtorStatusOk) {
+ const int kI = 4;
+ const pw::Result<int> original(kI);
+ const pw::Result<int> copy(original);
+ EXPECT_OK(copy.status());
+ EXPECT_EQ(*original, *copy);
+}
+
+TEST(Result, TestCopyCtorStatusNotOk) {
+ pw::Result<int> original(pw::Status::Cancelled());
+ pw::Result<int> copy(original);
+ EXPECT_EQ(copy.status().code(), pw::Status::Cancelled().code());
+}
+
+TEST(Result, TestCopyCtorNonAssignable) {
+ const int kI = 4;
+ CopyNoAssign value(kI);
+ pw::Result<CopyNoAssign> original(value);
+ pw::Result<CopyNoAssign> copy(original);
+ EXPECT_OK(copy.status());
+ EXPECT_EQ(original->foo, copy->foo);
+}
+
+TEST(Result, TestCopyCtorStatusOKConverting) {
+ const int kI = 4;
+ pw::Result<int> original(kI);
+ pw::Result<double> copy(original);
+ EXPECT_OK(copy.status());
+ EXPECT_EQ(*original, *copy);
+}
+
+TEST(Result, TestCopyCtorStatusNotOkConverting) {
+ pw::Result<int> original(pw::Status::Cancelled());
+ pw::Result<double> copy(original);
+ EXPECT_EQ(copy.status(), original.status());
+}
+
+TEST(Result, TestAssignmentStatusOk) {
+ // Copy assignmment
+ {
+ const auto p = std::make_shared<int>(17);
+ pw::Result<std::shared_ptr<int>> source(p);
+
+ pw::Result<std::shared_ptr<int>> target;
+ target = source;
+
+ ASSERT_TRUE(target.ok());
+ EXPECT_OK(target.status());
+ EXPECT_EQ(p, *target);
+
+ ASSERT_TRUE(source.ok());
+ EXPECT_OK(source.status());
+ EXPECT_EQ(p, *source);
+ }
+
+ // Move asssignment
+ {
+ const auto p = std::make_shared<int>(17);
+ pw::Result<std::shared_ptr<int>> source(p);
+
+ pw::Result<std::shared_ptr<int>> target;
+ target = std::move(source);
+
+ ASSERT_TRUE(target.ok());
+ EXPECT_OK(target.status());
+ EXPECT_EQ(p, *target);
+
+ ASSERT_TRUE(source.ok()); // NOLINT(bugprone-use-after-move)
+ EXPECT_OK(source.status());
+ EXPECT_EQ(nullptr, *source);
+ }
+}
+
+TEST(Result, TestAssignmentStatusNotOk) {
+ // Copy assignment
+ {
+ const pw::Status expected = pw::Status::Cancelled();
+ pw::Result<int> source(expected);
+
+ pw::Result<int> target;
+ target = source;
+
+ EXPECT_FALSE(target.ok());
+ EXPECT_EQ(expected, target.status());
+
+ EXPECT_FALSE(source.ok());
+ EXPECT_EQ(expected, source.status());
+ }
+
+ // Move assignment
+ {
+ const pw::Status expected = pw::Status::Cancelled();
+ pw::Result<int> source(expected);
+
+ pw::Result<int> target;
+ target = std::move(source);
+
+ EXPECT_FALSE(target.ok());
+ EXPECT_EQ(expected, target.status());
+
+ EXPECT_FALSE(source.ok()); // NOLINT(bugprone-use-after-move)
+ // absl::Status sets itself to INTERNAL when moved, but pw::Status does not.
+ // EXPECT_EQ(source.status().code(), pw::Status::Internal().code());
+ }
+}
+
+TEST(Result, TestAssignmentStatusOKConverting) {
+ // Copy assignment
+ {
+ const int kI = 4;
+ pw::Result<int> source(kI);
+
+ pw::Result<double> target;
+ target = source;
+
+ ASSERT_TRUE(target.ok());
+ EXPECT_OK(target.status());
+ EXPECT_EQ(kI, *target);
+
+ ASSERT_TRUE(source.ok());
+ EXPECT_OK(source.status());
+ EXPECT_EQ(kI, *source);
+ }
+
+ // Move assignment
+ {
+ const auto p = new int(17);
+ pw::Result<std::unique_ptr<int>> source(p);
+
+ pw::Result<std::shared_ptr<int>> target;
+ target = std::move(source);
+
+ ASSERT_TRUE(target.ok());
+ EXPECT_OK(target.status());
+ EXPECT_EQ(p, target->get());
+
+ ASSERT_TRUE(source.ok()); // NOLINT(bugprone-use-after-move)
+ EXPECT_OK(source.status());
+ EXPECT_EQ(nullptr, source->get());
+ }
+}
+
+// implicit_cast
+template <class T>
+struct type_identity {
+ using type = T;
+};
+
+template <typename To>
+constexpr To implicit_cast(typename type_identity<To>::type to) {
+ return to;
+}
+
+struct A {
+ int x;
+};
+
+struct ImplicitConstructibleFromA {
+ int x;
+ bool moved;
+ ImplicitConstructibleFromA(const A& a) // NOLINT
+ : x(a.x), moved(false) {}
+ ImplicitConstructibleFromA(A&& a) // NOLINT
+ : x(a.x), moved(true) {}
+};
+
+TEST(Result, ImplicitConvertingConstructor) {
+ auto status_or = implicit_cast<pw::Result<ImplicitConstructibleFromA>>(
+ pw::Result<A>(A{11}));
+ ASSERT_OK(status_or.status());
+ EXPECT_EQ(status_or->x, 11);
+ EXPECT_TRUE(status_or->moved);
+
+ pw::Result<A> a(A{12});
+ auto status_or_2 = implicit_cast<pw::Result<ImplicitConstructibleFromA>>(a);
+ ASSERT_OK(status_or_2.status());
+ EXPECT_EQ(status_or_2->x, 12);
+ EXPECT_FALSE(status_or_2->moved);
+}
+
+struct ExplicitConstructibleFromA {
+ int x;
+ bool moved;
+ explicit ExplicitConstructibleFromA(const A& a) : x(a.x), moved(false) {}
+ explicit ExplicitConstructibleFromA(A&& a) : x(a.x), moved(true) {}
+};
+
+TEST(Result, ExplicitConvertingConstructor) {
+ EXPECT_FALSE(
+ (std::is_convertible<const pw::Result<A>&,
+ pw::Result<ExplicitConstructibleFromA>>::value));
+ EXPECT_FALSE(
+ (std::is_convertible<pw::Result<A>&&,
+ pw::Result<ExplicitConstructibleFromA>>::value));
+ auto a1 = pw::Result<ExplicitConstructibleFromA>(pw::Result<A>(A{11}));
+ ASSERT_OK(a1.status());
+ EXPECT_EQ(a1->x, 11);
+ EXPECT_TRUE(a1->moved);
+
+ pw::Result<A> a(A{12});
+ auto a2 = pw::Result<ExplicitConstructibleFromA>(a);
+ ASSERT_OK(a2.status());
+ EXPECT_EQ(a2->x, 12);
+ EXPECT_FALSE(a2->moved);
+}
+
+struct ImplicitConstructibleFromBool {
+ ImplicitConstructibleFromBool(bool y) : x(y) {} // NOLINT
+ bool x = false;
+};
+
+struct ConvertibleToBool {
+ explicit ConvertibleToBool(bool y) : x(y) {}
+ operator bool() const { return x; } // NOLINT
+ bool x = false;
+};
+
+TEST(Result, ImplicitBooleanConstructionWithImplicitCasts) {
+ auto a = pw::Result<bool>(pw::Result<ConvertibleToBool>(true));
+ ASSERT_OK(a.status());
+ EXPECT_TRUE(*a);
+
+ auto b = pw::Result<bool>(pw::Result<ConvertibleToBool>(false));
+ ASSERT_OK(b.status());
+ EXPECT_FALSE(*b);
+
+ auto c = pw::Result<ImplicitConstructibleFromBool>(pw::Result<bool>(false));
+ ASSERT_OK(c.status());
+ EXPECT_EQ(c->x, false);
+ EXPECT_FALSE(
+ (std::is_convertible<pw::Result<ConvertibleToBool>,
+ pw::Result<ImplicitConstructibleFromBool>>::value));
+}
+
+TEST(Result, BooleanConstructionWithImplicitCasts) {
+ auto a = pw::Result<bool>(pw::Result<ConvertibleToBool>(true));
+ ASSERT_OK(a.status());
+ EXPECT_TRUE(*a);
+
+ auto b = pw::Result<bool>(pw::Result<ConvertibleToBool>(false));
+ ASSERT_OK(b.status());
+ EXPECT_FALSE(*b);
+
+ auto c = pw::Result<ImplicitConstructibleFromBool>{pw::Result<bool>(false)};
+ ASSERT_OK(c.status());
+ EXPECT_FALSE(c->x);
+
+ auto d = pw::Result<ImplicitConstructibleFromBool>{
+ pw::Result<bool>(pw::Status::InvalidArgument())};
+ EXPECT_FALSE(d.ok());
+
+ auto e = pw::Result<ImplicitConstructibleFromBool>{
+ pw::Result<ConvertibleToBool>(ConvertibleToBool{false})};
+ ASSERT_OK(e.status());
+ EXPECT_FALSE(e->x);
+
+ auto f = pw::Result<ImplicitConstructibleFromBool>{
+ pw::Result<ConvertibleToBool>(pw::Status::InvalidArgument())};
+ EXPECT_FALSE(f.ok());
+}
+
+TEST(Result, ConstImplicitCast) {
+ auto a = implicit_cast<pw::Result<bool>>(pw::Result<const bool>(true));
+ ASSERT_OK(a.status());
+ EXPECT_TRUE(*a);
+ auto b = implicit_cast<pw::Result<bool>>(pw::Result<const bool>(false));
+ ASSERT_OK(b.status());
+ EXPECT_FALSE(*b);
+ auto c = implicit_cast<pw::Result<const bool>>(pw::Result<bool>(true));
+ ASSERT_OK(c.status());
+ EXPECT_TRUE(*c);
+ auto d = implicit_cast<pw::Result<const bool>>(pw::Result<bool>(false));
+ ASSERT_OK(d.status());
+ EXPECT_FALSE(*d);
+ auto e = implicit_cast<pw::Result<const std::string>>(
+ pw::Result<std::string>("foo"));
+ ASSERT_OK(e.status());
+ EXPECT_EQ(*e, "foo");
+ auto f = implicit_cast<pw::Result<std::string>>(
+ pw::Result<const std::string>("foo"));
+ ASSERT_OK(f.status());
+ EXPECT_EQ(*f, "foo");
+ auto g = implicit_cast<pw::Result<std::shared_ptr<const std::string>>>(
+ pw::Result<std::shared_ptr<std::string>>(
+ std::make_shared<std::string>("foo")));
+ ASSERT_OK(g.status());
+ EXPECT_EQ(*(*g), "foo");
+}
+
+TEST(Result, ConstExplicitConstruction) {
+ auto a = pw::Result<bool>(pw::Result<const bool>(true));
+ ASSERT_OK(a.status());
+ EXPECT_TRUE(*a);
+ auto b = pw::Result<bool>(pw::Result<const bool>(false));
+ ASSERT_OK(b.status());
+ EXPECT_FALSE(*b);
+ auto c = pw::Result<const bool>(pw::Result<bool>(true));
+ ASSERT_OK(c.status());
+ EXPECT_TRUE(*c);
+ auto d = pw::Result<const bool>(pw::Result<bool>(false));
+ ASSERT_OK(d.status());
+ EXPECT_FALSE(*d);
+}
+
+struct ExplicitConstructibleFromInt {
+ int x;
+ explicit ExplicitConstructibleFromInt(int y) : x(y) {}
+};
+
+TEST(Result, ExplicitConstruction) {
+ auto a = pw::Result<ExplicitConstructibleFromInt>(10);
+ ASSERT_OK(a.status());
+ EXPECT_EQ(a->x, 10);
+}
+
+TEST(Result, ImplicitConstruction) {
+ // Check implicit casting works.
+ auto status_or =
+ implicit_cast<pw::Result<std::variant<int, std::string>>>(10);
+ ASSERT_OK(status_or.status());
+ EXPECT_EQ(std::get<int>(*status_or), 10);
+}
+
+TEST(Result, ImplicitConstructionFromInitliazerList) {
+ // Note: dropping the explicit std::initializer_list<int> is not supported
+ // by pw::Result or std::optional.
+ auto status_or = implicit_cast<pw::Result<std::vector<int>>>({{10, 20, 30}});
+ ASSERT_OK(status_or.status());
+ ASSERT_EQ(status_or->size(), 3u);
+ EXPECT_EQ((*status_or)[0], 10);
+ EXPECT_EQ((*status_or)[1], 20);
+ EXPECT_EQ((*status_or)[2], 30);
+}
+
+TEST(Result, UniquePtrImplicitConstruction) {
+ auto status_or = implicit_cast<pw::Result<std::unique_ptr<Base1>>>(
+ std::make_unique<Derived>());
+ ASSERT_OK(status_or.status());
+ EXPECT_NE(status_or->get(), nullptr);
+}
+
+TEST(Result, NestedResultCopyAndMoveConstructorTests) {
+ pw::Result<pw::Result<CopyDetector>> status_or = CopyDetector(10);
+ pw::Result<pw::Result<CopyDetector>> status_error =
+ pw::Status::InvalidArgument();
+ ASSERT_OK(status_or.status());
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(*status_or, 10, true, false);
+ pw::Result<pw::Result<CopyDetector>> a = status_or;
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(*a, 10, false, true);
+ pw::Result<pw::Result<CopyDetector>> a_err = status_error;
+ EXPECT_FALSE(a_err.ok());
+
+ const pw::Result<pw::Result<CopyDetector>>& cref = status_or;
+ pw::Result<pw::Result<CopyDetector>> b = cref; // NOLINT
+ ASSERT_OK(b.status());
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(*b, 10, false, true);
+ const pw::Result<pw::Result<CopyDetector>>& cref_err = status_error;
+ pw::Result<pw::Result<CopyDetector>> b_err = cref_err; // NOLINT
+ EXPECT_FALSE(b_err.ok());
+
+ pw::Result<pw::Result<CopyDetector>> c = std::move(status_or);
+ ASSERT_OK(c.status());
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(*c, 10, true, false);
+ pw::Result<pw::Result<CopyDetector>> c_err = std::move(status_error);
+ EXPECT_FALSE(c_err.ok());
+}
+
+TEST(Result, NestedResultCopyAndMoveAssignment) {
+ pw::Result<pw::Result<CopyDetector>> status_or = CopyDetector(10);
+ pw::Result<pw::Result<CopyDetector>> status_error =
+ pw::Status::InvalidArgument();
+ pw::Result<pw::Result<CopyDetector>> a;
+ a = status_or;
+ ASSERT_TRUE(a.ok());
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(*a, 10, false, true);
+ a = status_error;
+ EXPECT_FALSE(a.ok());
+
+ const pw::Result<pw::Result<CopyDetector>>& cref = status_or;
+ a = cref;
+ ASSERT_TRUE(a.ok());
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(*a, 10, false, true);
+ const pw::Result<pw::Result<CopyDetector>>& cref_err = status_error;
+ a = cref_err;
+ EXPECT_FALSE(a.ok());
+ a = std::move(status_or);
+ ASSERT_TRUE(a.ok());
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(*a, 10, true, false);
+ a = std::move(status_error);
+ EXPECT_FALSE(a.ok());
+}
+
+struct Copyable {
+ Copyable() {}
+ Copyable(const Copyable&) {}
+ Copyable& operator=(const Copyable&) { return *this; }
+};
+
+struct MoveOnly {
+ MoveOnly() {}
+ MoveOnly(MoveOnly&&) {}
+ MoveOnly& operator=(MoveOnly&&) { return *this; }
+};
+
+struct NonMovable {
+ NonMovable() {}
+ NonMovable(const NonMovable&) = delete;
+ NonMovable(NonMovable&&) = delete;
+ NonMovable& operator=(const NonMovable&) = delete;
+ NonMovable& operator=(NonMovable&&) = delete;
+};
+
+TEST(Result, CopyAndMoveAbility) {
+ EXPECT_TRUE(std::is_copy_constructible<Copyable>::value);
+ EXPECT_TRUE(std::is_copy_assignable<Copyable>::value);
+ EXPECT_TRUE(std::is_move_constructible<Copyable>::value);
+ EXPECT_TRUE(std::is_move_assignable<Copyable>::value);
+ EXPECT_FALSE(std::is_copy_constructible<MoveOnly>::value);
+ EXPECT_FALSE(std::is_copy_assignable<MoveOnly>::value);
+ EXPECT_TRUE(std::is_move_constructible<MoveOnly>::value);
+ EXPECT_TRUE(std::is_move_assignable<MoveOnly>::value);
+ EXPECT_FALSE(std::is_copy_constructible<NonMovable>::value);
+ EXPECT_FALSE(std::is_copy_assignable<NonMovable>::value);
+ EXPECT_FALSE(std::is_move_constructible<NonMovable>::value);
+ EXPECT_FALSE(std::is_move_assignable<NonMovable>::value);
+}
+
+TEST(Result, ResultAnyCopyAndMoveConstructorTests) {
+ pw::Result<std::any> status_or = CopyDetector(10);
+ pw::Result<std::any> status_error = pw::Status::InvalidArgument();
+ EXPECT_OK_AND_ANY_WITH_COPY_DETECTOR_HAS(status_or, 10, true, false);
+ pw::Result<std::any> a = status_or;
+ EXPECT_OK_AND_ANY_WITH_COPY_DETECTOR_HAS(a, 10, false, true);
+ pw::Result<std::any> a_err = status_error;
+ EXPECT_FALSE(a_err.ok());
+
+ const pw::Result<std::any>& cref = status_or;
+ // No lint for no-change copy.
+ pw::Result<std::any> b = cref; // NOLINT
+ EXPECT_OK_AND_ANY_WITH_COPY_DETECTOR_HAS(b, 10, false, true);
+ const pw::Result<std::any>& cref_err = status_error;
+ // No lint for no-change copy.
+ pw::Result<std::any> b_err = cref_err; // NOLINT
+ EXPECT_FALSE(b_err.ok());
+
+ pw::Result<std::any> c = std::move(status_or);
+ EXPECT_OK_AND_ANY_WITH_COPY_DETECTOR_HAS(c, 10, true, false);
+ pw::Result<std::any> c_err = std::move(status_error);
+ EXPECT_FALSE(c_err.ok());
+}
+
+TEST(Result, ResultAnyCopyAndMoveAssignment) {
+ pw::Result<std::any> status_or = CopyDetector(10);
+ pw::Result<std::any> status_error = pw::Status::InvalidArgument();
+ pw::Result<std::any> a;
+ a = status_or;
+ EXPECT_OK_AND_ANY_WITH_COPY_DETECTOR_HAS(a, 10, false, true);
+ a = status_error;
+ EXPECT_FALSE(a.ok());
+
+ const pw::Result<std::any>& cref = status_or;
+ a = cref;
+ EXPECT_OK_AND_ANY_WITH_COPY_DETECTOR_HAS(a, 10, false, true);
+ const pw::Result<std::any>& cref_err = status_error;
+ a = cref_err;
+ EXPECT_FALSE(a.ok());
+ a = std::move(status_or);
+ EXPECT_OK_AND_ANY_WITH_COPY_DETECTOR_HAS(a, 10, true, false);
+ a = std::move(status_error);
+ EXPECT_FALSE(a.ok());
+}
+
+TEST(Result, ResultCopyAndMoveTestsConstructor) {
+ pw::Result<CopyDetector> status_or(10);
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(status_or, 10, false, false);
+ pw::Result<CopyDetector> a(status_or);
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(a, 10, false, true);
+ const pw::Result<CopyDetector>& cref = status_or;
+ pw::Result<CopyDetector> b(cref); // NOLINT
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(b, 10, false, true);
+ pw::Result<CopyDetector> c(std::move(status_or));
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(c, 10, true, false);
+}
+
+TEST(Result, ResultCopyAndMoveTestsAssignment) {
+ pw::Result<CopyDetector> status_or(10);
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(status_or, 10, false, false);
+ pw::Result<CopyDetector> a;
+ a = status_or;
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(a, 10, false, true);
+ const pw::Result<CopyDetector>& cref = status_or;
+ pw::Result<CopyDetector> b;
+ b = cref;
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(b, 10, false, true);
+ pw::Result<CopyDetector> c;
+ c = std::move(status_or);
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(c, 10, true, false);
+}
+
+TEST(Result, StdAnyAssignment) {
+ EXPECT_FALSE(
+ (std::is_assignable<pw::Result<std::any>, pw::Result<int>>::value));
+ pw::Result<std::any> status_or;
+ status_or = pw::Status::InvalidArgument();
+ EXPECT_FALSE(status_or.ok());
+}
+
+TEST(Result, ImplicitAssignment) {
+ pw::Result<std::variant<int, std::string>> status_or;
+ status_or = 10;
+ ASSERT_OK(status_or.status());
+ EXPECT_EQ(std::get<int>(*status_or), 10);
+}
+
+TEST(Result, SelfDirectInitAssignment) {
+ pw::Result<std::vector<int>> status_or = {{10, 20, 30}};
+ status_or = *status_or;
+ ASSERT_OK(status_or.status());
+ ASSERT_EQ(status_or->size(), 3u);
+ EXPECT_EQ((*status_or)[0], 10);
+ EXPECT_EQ((*status_or)[1], 20);
+ EXPECT_EQ((*status_or)[2], 30);
+}
+
+TEST(Result, ImplicitCastFromInitializerList) {
+ pw::Result<std::vector<int>> status_or = {{10, 20, 30}};
+ ASSERT_OK(status_or.status());
+ ASSERT_EQ(status_or->size(), 3u);
+ EXPECT_EQ((*status_or)[0], 10);
+ EXPECT_EQ((*status_or)[1], 20);
+ EXPECT_EQ((*status_or)[2], 30);
+}
+
+TEST(Result, UniquePtrImplicitAssignment) {
+ pw::Result<std::unique_ptr<Base1>> status_or;
+ status_or = std::make_unique<Derived>();
+ ASSERT_OK(status_or.status());
+ EXPECT_NE(status_or->get(), nullptr);
+}
+
+TEST(Result, Pointer) {
+ struct Base {};
+ struct B : public Base {};
+ struct C : private Base {};
+
+ EXPECT_TRUE((std::is_constructible<pw::Result<Base*>, B*>::value));
+ EXPECT_TRUE((std::is_convertible<B*, pw::Result<Base*>>::value));
+ EXPECT_FALSE((std::is_constructible<pw::Result<Base*>, C*>::value));
+ EXPECT_FALSE((std::is_convertible<C*, pw::Result<Base*>>::value));
+}
+
+TEST(Result, TestAssignmentStatusNotOkConverting) {
+ // Copy assignment
+ {
+ const pw::Status expected = pw::Status::Cancelled();
+ pw::Result<int> source(expected);
+
+ pw::Result<double> target;
+ target = source;
+
+ EXPECT_FALSE(target.ok());
+ EXPECT_EQ(expected, target.status());
+
+ EXPECT_FALSE(source.ok());
+ EXPECT_EQ(expected, source.status());
+ }
+
+ // Move assignment
+ {
+ const pw::Status expected = pw::Status::Cancelled();
+ pw::Result<int> source(expected);
+
+ pw::Result<double> target;
+ target = std::move(source);
+
+ EXPECT_FALSE(target.ok());
+ EXPECT_EQ(expected, target.status());
+
+ EXPECT_FALSE(source.ok()); // NOLINT(bugprone-use-after-move)
+
+ // absl::Status sets itself to INTERNAL when moved, but pw::Status does not.
+ // EXPECT_EQ(source.status().code(), pw::Status::Internal().code());
+ }
+}
+
+TEST(Result, SelfAssignment) {
+ // Copy-assignment, status OK
+ {
+ // A string long enough that it's likely to defeat any inline representation
+ // optimization.
+ const std::string long_str(128, 'a');
+
+ pw::Result<std::string> so = long_str;
+ so = *&so;
+
+ ASSERT_TRUE(so.ok());
+ EXPECT_OK(so.status());
+ EXPECT_EQ(long_str, *so);
+ }
+
+ // Copy-assignment, error status
+ {
+ pw::Result<int> so = pw::Status::NotFound();
+ so = *&so;
+
+ EXPECT_FALSE(so.ok());
+ EXPECT_EQ(so.status().code(), pw::Status::NotFound().code());
+ }
+
+ // Move-assignment with copyable type, status OK
+ {
+ pw::Result<int> so = 17;
+
+ // Fool the compiler, which otherwise complains.
+ auto& same = so;
+ so = std::move(same);
+
+ ASSERT_TRUE(so.ok());
+ EXPECT_OK(so.status());
+ EXPECT_EQ(17, *so);
+ }
+
+ // Move-assignment with copyable type, error status
+ {
+ pw::Result<int> so = pw::Status::NotFound();
+
+ // Fool the compiler, which otherwise complains.
+ auto& same = so;
+ so = std::move(same);
+
+ EXPECT_FALSE(so.ok());
+ EXPECT_EQ(so.status().code(), pw::Status::NotFound().code());
+ }
+
+ // Move-assignment with non-copyable type, status OK
+ {
+ const auto raw = new int(17);
+ pw::Result<std::unique_ptr<int>> so = std::unique_ptr<int>(raw);
+
+ // Fool the compiler, which otherwise complains.
+ auto& same = so;
+ so = std::move(same);
+
+ ASSERT_TRUE(so.ok());
+ EXPECT_OK(so.status());
+ EXPECT_EQ(raw, so->get());
+ }
+
+ // Move-assignment with non-copyable type, error status
+ {
+ pw::Result<std::unique_ptr<int>> so = pw::Status::NotFound();
+
+ // Fool the compiler, which otherwise complains.
+ auto& same = so;
+ so = std::move(same);
+
+ EXPECT_FALSE(so.ok());
+ EXPECT_EQ(so.status().code(), pw::Status::NotFound().code());
+ }
+}
+
+// These types form the overload sets of the constructors and the assignment
+// operators of `MockValue`. They distinguish construction from assignment,
+// lvalue from rvalue.
+struct FromConstructibleAssignableLvalue {};
+struct FromConstructibleAssignableRvalue {};
+struct FromImplicitConstructibleOnly {};
+struct FromAssignableOnly {};
+
+// This class is for testing the forwarding value assignments of `Result`.
+// `from_rvalue` indicates whether the constructor or the assignment taking
+// rvalue reference is called. `from_assignment` indicates whether any
+// assignment is called.
+struct MockValue {
+ // Constructs `MockValue` from `FromConstructibleAssignableLvalue`.
+ MockValue(const FromConstructibleAssignableLvalue&) // NOLINT
+ : from_rvalue(false), assigned(false) {}
+ // Constructs `MockValue` from `FromConstructibleAssignableRvalue`.
+ MockValue(FromConstructibleAssignableRvalue&&) // NOLINT
+ : from_rvalue(true), assigned(false) {}
+ // Constructs `MockValue` from `FromImplicitConstructibleOnly`.
+ // `MockValue` is not assignable from `FromImplicitConstructibleOnly`.
+ MockValue(const FromImplicitConstructibleOnly&) // NOLINT
+ : from_rvalue(false), assigned(false) {}
+ // Assigns `FromConstructibleAssignableLvalue`.
+ MockValue& operator=(const FromConstructibleAssignableLvalue&) {
+ from_rvalue = false;
+ assigned = true;
+ return *this;
+ }
+ // Assigns `FromConstructibleAssignableRvalue` (rvalue only).
+ MockValue& operator=(FromConstructibleAssignableRvalue&&) {
+ from_rvalue = true;
+ assigned = true;
+ return *this;
+ }
+ // Assigns `FromAssignableOnly`, but not constructible from
+ // `FromAssignableOnly`.
+ MockValue& operator=(const FromAssignableOnly&) {
+ from_rvalue = false;
+ assigned = true;
+ return *this;
+ }
+ bool from_rvalue;
+ bool assigned;
+};
+
+// operator=(U&&)
+TEST(Result, PerfectForwardingAssignment) {
+ // U == T
+ constexpr int kValue1 = 10, kValue2 = 20;
+ pw::Result<CopyDetector> status_or;
+ CopyDetector lvalue(kValue1);
+ status_or = lvalue;
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(status_or, kValue1, false, true);
+ status_or = CopyDetector(kValue2);
+ EXPECT_OK_AND_COPY_DETECTOR_HAS(status_or, kValue2, true, false);
+
+ // U != T
+ EXPECT_TRUE(
+ (std::is_assignable<pw::Result<MockValue>&,
+ const FromConstructibleAssignableLvalue&>::value));
+ EXPECT_TRUE((std::is_assignable<pw::Result<MockValue>&,
+ FromConstructibleAssignableLvalue&&>::value));
+ EXPECT_FALSE(
+ (std::is_assignable<pw::Result<MockValue>&,
+ const FromConstructibleAssignableRvalue&>::value));
+ EXPECT_TRUE((std::is_assignable<pw::Result<MockValue>&,
+ FromConstructibleAssignableRvalue&&>::value));
+ EXPECT_TRUE(
+ (std::is_assignable<pw::Result<MockValue>&,
+ const FromImplicitConstructibleOnly&>::value));
+ EXPECT_FALSE((std::is_assignable<pw::Result<MockValue>&,
+ const FromAssignableOnly&>::value));
+
+ pw::Result<MockValue> from_lvalue(FromConstructibleAssignableLvalue{});
+ EXPECT_FALSE(from_lvalue->from_rvalue);
+ EXPECT_FALSE(from_lvalue->assigned);
+ from_lvalue = FromConstructibleAssignableLvalue{};
+ EXPECT_FALSE(from_lvalue->from_rvalue);
+ EXPECT_TRUE(from_lvalue->assigned);
+
+ pw::Result<MockValue> from_rvalue(FromConstructibleAssignableRvalue{});
+ EXPECT_TRUE(from_rvalue->from_rvalue);
+ EXPECT_FALSE(from_rvalue->assigned);
+ from_rvalue = FromConstructibleAssignableRvalue{};
+ EXPECT_TRUE(from_rvalue->from_rvalue);
+ EXPECT_TRUE(from_rvalue->assigned);
+
+ pw::Result<MockValue> from_implicit_constructible(
+ FromImplicitConstructibleOnly{});
+ EXPECT_FALSE(from_implicit_constructible->from_rvalue);
+ EXPECT_FALSE(from_implicit_constructible->assigned);
+ // construct a temporary `Result` object and invoke the `Result` move
+ // assignment operator.
+ from_implicit_constructible = FromImplicitConstructibleOnly{};
+ EXPECT_FALSE(from_implicit_constructible->from_rvalue);
+ EXPECT_FALSE(from_implicit_constructible->assigned);
+}
+
+TEST(Result, TestStatus) {
+ pw::Result<int> good(4);
+ EXPECT_TRUE(good.ok());
+ pw::Result<int> bad(pw::Status::Cancelled());
+ EXPECT_FALSE(bad.ok());
+ EXPECT_EQ(bad.status().code(), pw::Status::Cancelled().code());
+}
+
+TEST(Result, OperatorStarRefQualifiers) {
+ static_assert(
+ std::is_same<const int&,
+ decltype(*std::declval<const pw::Result<int>&>())>(),
+ "Unexpected ref-qualifiers");
+ static_assert(
+ std::is_same<int&, decltype(*std::declval<pw::Result<int>&>())>(),
+ "Unexpected ref-qualifiers");
+ static_assert(
+ std::is_same<const int&&,
+ decltype(*std::declval<const pw::Result<int>&&>())>(),
+ "Unexpected ref-qualifiers");
+ static_assert(
+ std::is_same<int&&, decltype(*std::declval<pw::Result<int>&&>())>(),
+ "Unexpected ref-qualifiers");
+}
+
+TEST(Result, OperatorStar) {
+ const pw::Result<std::string> const_lvalue("hello");
+ EXPECT_EQ("hello", *const_lvalue);
+
+ pw::Result<std::string> lvalue("hello");
+ EXPECT_EQ("hello", *lvalue);
+
+ // Note: Recall that std::move() is equivalent to a static_cast to an rvalue
+ // reference type.
+ const pw::Result<std::string> const_rvalue("hello");
+ EXPECT_EQ("hello", *std::move(const_rvalue)); // NOLINT
+
+ pw::Result<std::string> rvalue("hello");
+ EXPECT_EQ("hello", *std::move(rvalue));
+}
+
+TEST(Result, OperatorArrowQualifiers) {
+ static_assert(
+ std::is_same<
+ const int*,
+ decltype(std::declval<const pw::Result<int>&>().operator->())>(),
+ "Unexpected qualifiers");
+ static_assert(
+ std::is_same<int*,
+ decltype(std::declval<pw::Result<int>&>().operator->())>(),
+ "Unexpected qualifiers");
+ static_assert(
+ std::is_same<
+ const int*,
+ decltype(std::declval<const pw::Result<int>&&>().operator->())>(),
+ "Unexpected qualifiers");
+ static_assert(
+ std::is_same<int*,
+ decltype(std::declval<pw::Result<int>&&>().operator->())>(),
+ "Unexpected qualifiers");
+}
+
+TEST(Result, OperatorArrow) {
+ const pw::Result<std::string> const_lvalue("hello");
+ EXPECT_EQ(std::string("hello"), const_lvalue->c_str());
+
+ pw::Result<std::string> lvalue("hello");
+ EXPECT_EQ(std::string("hello"), lvalue->c_str());
+}
+
+TEST(Result, RValueStatus) {
+ pw::Result<int> so(pw::Status::NotFound());
+ const pw::Status s = std::move(so).status();
+
+ EXPECT_EQ(s.code(), pw::Status::NotFound().code());
+
+ // Check that !ok() still implies !status().ok(), even after moving out of the
+ // object. See the note on the rvalue ref-qualified status method.
+ EXPECT_FALSE(so.ok()); // NOLINT
+ EXPECT_FALSE(so.status().ok());
+
+ // absl::Status sets itself to INTERNAL when moved, but pw::Status does not.
+ // EXPECT_EQ(so.status().code(), pw::Status::Internal().code());
+}
+
+TEST(Result, TestValue) {
+ const int kI = 4;
+ pw::Result<int> thing(kI);
+ EXPECT_EQ(kI, *thing);
+}
+
+TEST(Result, TestValueConst) {
+ const int kI = 4;
+ const pw::Result<int> thing(kI);
+ EXPECT_EQ(kI, *thing);
+}
+
+TEST(Result, TestPointerDefaultCtor) {
+ pw::Result<int*> thing;
+ EXPECT_FALSE(thing.ok());
+ EXPECT_EQ(thing.status().code(), pw::Status::Unknown().code());
+}
+
+TEST(Result, TestPointerStatusCtor) {
+ pw::Result<int*> thing(pw::Status::Cancelled());
+ EXPECT_FALSE(thing.ok());
+ EXPECT_EQ(thing.status().code(), pw::Status::Cancelled().code());
+}
+
+TEST(Result, TestPointerValueCtor) {
+ const int kI = 4;
+
+ // Construction from a non-null pointer
+ {
+ pw::Result<const int*> so(&kI);
+ EXPECT_TRUE(so.ok());
+ EXPECT_OK(so.status());
+ EXPECT_EQ(&kI, *so);
+ }
+
+ // Construction from a null pointer constant
+ {
+ pw::Result<const int*> so(nullptr);
+ EXPECT_TRUE(so.ok());
+ EXPECT_OK(so.status());
+ EXPECT_EQ(nullptr, *so);
+ }
+
+ // Construction from a non-literal null pointer
+ {
+ const int* const p = nullptr;
+
+ pw::Result<const int*> so(p);
+ EXPECT_TRUE(so.ok());
+ EXPECT_OK(so.status());
+ EXPECT_EQ(nullptr, *so);
+ }
+}
+
+TEST(Result, TestPointerCopyCtorStatusOk) {
+ const int kI = 0;
+ pw::Result<const int*> original(&kI);
+ pw::Result<const int*> copy(original);
+ EXPECT_OK(copy.status());
+ EXPECT_EQ(*original, *copy);
+}
+
+TEST(Result, TestPointerCopyCtorStatusNotOk) {
+ pw::Result<int*> original(pw::Status::Cancelled());
+ pw::Result<int*> copy(original);
+ EXPECT_EQ(copy.status().code(), pw::Status::Cancelled().code());
+}
+
+TEST(Result, TestPointerCopyCtorStatusOKConverting) {
+ Derived derived;
+ pw::Result<Derived*> original(&derived);
+ pw::Result<Base2*> copy(original);
+ EXPECT_OK(copy.status());
+ EXPECT_EQ(static_cast<const Base2*>(*original), *copy);
+}
+
+TEST(Result, TestPointerCopyCtorStatusNotOkConverting) {
+ pw::Result<Derived*> original(pw::Status::Cancelled());
+ pw::Result<Base2*> copy(original);
+ EXPECT_EQ(copy.status().code(), pw::Status::Cancelled().code());
+}
+
+TEST(Result, TestPointerAssignmentStatusOk) {
+ const int kI = 0;
+ pw::Result<const int*> source(&kI);
+ pw::Result<const int*> target;
+ target = source;
+ EXPECT_OK(target.status());
+ EXPECT_EQ(*source, *target);
+}
+
+TEST(Result, TestPointerAssignmentStatusNotOk) {
+ pw::Result<int*> source(pw::Status::Cancelled());
+ pw::Result<int*> target;
+ target = source;
+ EXPECT_EQ(target.status().code(), pw::Status::Cancelled().code());
+}
+
+TEST(Result, TestPointerAssignmentStatusOKConverting) {
+ Derived derived;
+ pw::Result<Derived*> source(&derived);
+ pw::Result<Base2*> target;
+ target = source;
+ EXPECT_OK(target.status());
+ EXPECT_EQ(static_cast<const Base2*>(*source), *target);
+}
+
+TEST(Result, TestPointerAssignmentStatusNotOkConverting) {
+ pw::Result<Derived*> source(pw::Status::Cancelled());
+ pw::Result<Base2*> target;
+ target = source;
+ EXPECT_EQ(target.status(), source.status());
+}
+
+TEST(Result, TestPointerStatus) {
+ const int kI = 0;
+ pw::Result<const int*> good(&kI);
+ EXPECT_TRUE(good.ok());
+ pw::Result<const int*> bad(pw::Status::Cancelled());
+ EXPECT_EQ(bad.status().code(), pw::Status::Cancelled().code());
+}
+
+TEST(Result, TestPointerValue) {
+ const int kI = 0;
+ pw::Result<const int*> thing(&kI);
+ EXPECT_EQ(&kI, *thing);
+}
+
+TEST(Result, TestPointerValueConst) {
+ const int kI = 0;
+ const pw::Result<const int*> thing(&kI);
+ EXPECT_EQ(&kI, *thing);
+}
+
+TEST(Result, ResultVectorOfUniquePointerCanReserveAndResize) {
+ using EvilType = std::vector<std::unique_ptr<int>>;
+ static_assert(std::is_copy_constructible<EvilType>::value, "");
+ std::vector<::pw::Result<EvilType>> v(5);
+ v.reserve(v.capacity() + 10);
+ v.resize(v.capacity() + 10);
+}
+
+TEST(Result, ConstPayload) {
+ // A reduced version of a problematic type found in the wild. All of the
+ // operations below should compile.
+ pw::Result<const int> a;
+
+ // Copy-construction
+ pw::Result<const int> b(a);
+
+ // Copy-assignment
+ EXPECT_FALSE(std::is_copy_assignable<pw::Result<const int>>::value);
+
+ // Move-construction
+ pw::Result<const int> c(std::move(a));
+
+ // Move-assignment
+ EXPECT_FALSE(std::is_move_assignable<pw::Result<const int>>::value);
+}
+
+TEST(Result, MapToResultUniquePtr) {
+ // A reduced version of a problematic type found in the wild. All of the
+ // operations below should compile.
+ using MapType = std::map<std::string, pw::Result<std::unique_ptr<int>>>;
+
+ MapType a;
+
+ // Move-construction
+ MapType b(std::move(a));
+
+ // Move-assignment
+ a = std::move(b);
+}
+
+TEST(Result, ValueOrOk) {
+ const pw::Result<int> status_or = 0;
+ EXPECT_EQ(status_or.value_or(-1), 0);
+}
+
+TEST(Result, ValueOrDefault) {
+ const pw::Result<int> status_or = pw::Status::Cancelled();
+ EXPECT_EQ(status_or.value_or(-1), -1);
+}
+
+TEST(Result, MoveOnlyValueOrOk) {
+ pw::Result<std::unique_ptr<int>> status_or = std::make_unique<int>(0);
+ ASSERT_TRUE(status_or.ok());
+ auto value = std::move(status_or).value_or(std::make_unique<int>(-1));
+ EXPECT_EQ(*value, 0);
+}
+
+TEST(Result, MoveOnlyValueOrDefault) {
+ pw::Result<std::unique_ptr<int>> status_or(pw::Status::Cancelled());
+ ASSERT_FALSE(status_or.ok());
+ auto value = std::move(status_or).value_or(std::make_unique<int>(-1));
+ EXPECT_EQ(*value, -1);
+}
+
+static pw::Result<int> MakeStatus() { return 100; }
+
+TEST(Result, TestIgnoreError) { MakeStatus().IgnoreError(); }
+
+TEST(Result, EqualityOperator) {
+ constexpr int kNumCases = 4;
+ std::array<pw::Result<int>, kNumCases> group1 = {
+ pw::Result<int>(1),
+ pw::Result<int>(2),
+ pw::Result<int>(pw::Status::InvalidArgument()),
+ pw::Result<int>(pw::Status::Internal())};
+ std::array<pw::Result<int>, kNumCases> group2 = {
+ pw::Result<int>(1),
+ pw::Result<int>(2),
+ pw::Result<int>(pw::Status::InvalidArgument()),
+ pw::Result<int>(pw::Status::Internal())};
+ for (int i = 0; i < kNumCases; ++i) {
+ for (int j = 0; j < kNumCases; ++j) {
+ if (i == j) {
+ EXPECT_TRUE(group1[i] == group2[j]);
+ EXPECT_FALSE(group1[i] != group2[j]);
+ } else {
+ EXPECT_FALSE(group1[i] == group2[j]);
+ EXPECT_TRUE(group1[i] != group2[j]);
+ }
+ }
+ }
+}
+
+struct MyType {
+ bool operator==(const MyType&) const { return true; }
+};
+
+enum class ConvTraits { kNone = 0, kImplicit = 1, kExplicit = 2 };
+
+// This class has conversion operator to `Result<T>` based on value of
+// `conv_traits`.
+template <typename T, ConvTraits conv_traits = ConvTraits::kNone>
+struct ResultConversionBase {};
+
+template <typename T>
+struct ResultConversionBase<T, ConvTraits::kImplicit> {
+ operator pw::Result<T>() const& { // NOLINT
+ return pw::Status::InvalidArgument();
+ }
+ operator pw::Result<T>() && { // NOLINT
+ return pw::Status::InvalidArgument();
+ }
+};
+
+template <typename T>
+struct ResultConversionBase<T, ConvTraits::kExplicit> {
+ explicit operator pw::Result<T>() const& {
+ return pw::Status::InvalidArgument();
+ }
+ explicit operator pw::Result<T>() && { return pw::Status::InvalidArgument(); }
+};
+
+// This class has conversion operator to `T` based on the value of
+// `conv_traits`.
+template <typename T, ConvTraits conv_traits = ConvTraits::kNone>
+struct ConversionBase {};
+
+template <typename T>
+struct ConversionBase<T, ConvTraits::kImplicit> {
+ operator T() const& { return t; } // NOLINT
+ operator T() && { return std::move(t); } // NOLINT
+ T t;
+};
+
+template <typename T>
+struct ConversionBase<T, ConvTraits::kExplicit> {
+ explicit operator T() const& { return t; }
+ explicit operator T() && { return std::move(t); }
+ T t;
+};
+
+// This class has conversion operator to `pw::Status` based on the value of
+// `conv_traits`.
+template <ConvTraits conv_traits = ConvTraits::kNone>
+struct StatusConversionBase {};
+
+template <>
+struct StatusConversionBase<ConvTraits::kImplicit> {
+ operator pw::Status() const& { // NOLINT
+ return pw::Status::Internal();
+ }
+ operator pw::Status() && { // NOLINT
+ return pw::Status::Internal();
+ }
+};
+
+template <>
+struct StatusConversionBase<ConvTraits::kExplicit> {
+ explicit operator pw::Status() const& { // NOLINT
+ return pw::Status::Internal();
+ }
+ explicit operator pw::Status() && { // NOLINT
+ return pw::Status::Internal();
+ }
+};
+
+static constexpr int kConvToStatus = 1;
+static constexpr int kConvToResult = 2;
+static constexpr int kConvToT = 4;
+static constexpr int kConvExplicit = 8;
+
+constexpr ConvTraits GetConvTraits(int bit, int config) {
+ return (config & bit) == 0
+ ? ConvTraits::kNone
+ : ((config & kConvExplicit) == 0 ? ConvTraits::kImplicit
+ : ConvTraits::kExplicit);
+}
+
+// This class conditionally has conversion operator to `pw::Status`, `T`,
+// `Result<T>`, based on values of the template parameters.
+template <typename T, int config>
+struct CustomType
+ : ResultConversionBase<T, GetConvTraits(kConvToResult, config)>,
+ ConversionBase<T, GetConvTraits(kConvToT, config)>,
+ StatusConversionBase<GetConvTraits(kConvToStatus, config)> {};
+
+struct ConvertibleToAnyResult {
+ template <typename T>
+ operator pw::Result<T>() const { // NOLINT
+ return pw::Status::InvalidArgument();
+ }
+};
+
+// Test the rank of overload resolution for `Result<T>` constructor and
+// assignment, from highest to lowest:
+// 1. T/Status
+// 2. U that has conversion operator to pw::Result<T>
+// 3. U that is convertible to Status
+// 4. U that is convertible to T
+TEST(Result, ConstructionFromT) {
+ // Construct pw::Result<T> from T when T is convertible to
+ // pw::Result<T>
+ {
+ ConvertibleToAnyResult v;
+ pw::Result<ConvertibleToAnyResult> statusor(v);
+ EXPECT_TRUE(statusor.ok());
+ }
+ {
+ ConvertibleToAnyResult v;
+ pw::Result<ConvertibleToAnyResult> statusor = v;
+ EXPECT_TRUE(statusor.ok());
+ }
+ // Construct pw::Result<T> from T when T is explicitly convertible to
+ // Status
+ {
+ CustomType<MyType, kConvToStatus | kConvExplicit> v;
+ pw::Result<CustomType<MyType, kConvToStatus | kConvExplicit>> statusor(v);
+ EXPECT_TRUE(statusor.ok());
+ }
+ {
+ CustomType<MyType, kConvToStatus | kConvExplicit> v;
+ pw::Result<CustomType<MyType, kConvToStatus | kConvExplicit>> statusor = v;
+ EXPECT_TRUE(statusor.ok());
+ }
+}
+
+// Construct pw::Result<T> from U when U is explicitly convertible to T
+TEST(Result, ConstructionFromTypeConvertibleToT) {
+ {
+ CustomType<MyType, kConvToT | kConvExplicit> v;
+ pw::Result<MyType> statusor(v);
+ EXPECT_TRUE(statusor.ok());
+ }
+ {
+ CustomType<MyType, kConvToT> v;
+ pw::Result<MyType> statusor = v;
+ EXPECT_TRUE(statusor.ok());
+ }
+}
+
+// Construct pw::Result<T> from U when U has explicit conversion operator to
+// pw::Result<T>
+TEST(Result, ConstructionFromTypeWithConversionOperatorToResultT) {
+ {
+ CustomType<MyType, kConvToResult | kConvExplicit> v;
+ pw::Result<MyType> statusor(v);
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+ {
+ CustomType<MyType, kConvToT | kConvToResult | kConvExplicit> v;
+ pw::Result<MyType> statusor(v);
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+ {
+ CustomType<MyType, kConvToResult | kConvToStatus | kConvExplicit> v;
+ pw::Result<MyType> statusor(v);
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+ {
+ CustomType<MyType, kConvToT | kConvToResult | kConvToStatus | kConvExplicit>
+ v;
+ pw::Result<MyType> statusor(v);
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+ {
+ CustomType<MyType, kConvToResult> v;
+ pw::Result<MyType> statusor = v;
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+ {
+ CustomType<MyType, kConvToT | kConvToResult> v;
+ pw::Result<MyType> statusor = v;
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+ {
+ CustomType<MyType, kConvToResult | kConvToStatus> v;
+ pw::Result<MyType> statusor = v;
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+ {
+ CustomType<MyType, kConvToT | kConvToResult | kConvToStatus> v;
+ pw::Result<MyType> statusor = v;
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+}
+
+TEST(Result, ConstructionFromTypeConvertibleToStatus) {
+ // Construction fails because conversion to `Status` is explicit.
+ {
+ CustomType<MyType, kConvToStatus | kConvExplicit> v;
+ pw::Result<MyType> statusor(v);
+ EXPECT_FALSE(statusor.ok());
+ EXPECT_EQ(statusor.status(), static_cast<pw::Status>(v));
+ }
+ {
+ CustomType<MyType, kConvToT | kConvToStatus | kConvExplicit> v;
+ pw::Result<MyType> statusor(v);
+ EXPECT_FALSE(statusor.ok());
+ EXPECT_EQ(statusor.status(), static_cast<pw::Status>(v));
+ }
+ {
+ CustomType<MyType, kConvToStatus> v;
+ pw::Result<MyType> statusor = v;
+ EXPECT_FALSE(statusor.ok());
+ EXPECT_EQ(statusor.status(), static_cast<pw::Status>(v));
+ }
+ {
+ CustomType<MyType, kConvToT | kConvToStatus> v;
+ pw::Result<MyType> statusor = v;
+ EXPECT_FALSE(statusor.ok());
+ EXPECT_EQ(statusor.status(), static_cast<pw::Status>(v));
+ }
+}
+
+TEST(Result, AssignmentFromT) {
+ // Assign to pw::Result<T> from T when T is convertible to
+ // pw::Result<T>
+ {
+ ConvertibleToAnyResult v;
+ pw::Result<ConvertibleToAnyResult> statusor;
+ statusor = v;
+ EXPECT_TRUE(statusor.ok());
+ }
+ // Assign to pw::Result<T> from T when T is convertible to Status
+ {
+ CustomType<MyType, kConvToStatus> v;
+ pw::Result<CustomType<MyType, kConvToStatus>> statusor;
+ statusor = v;
+ EXPECT_TRUE(statusor.ok());
+ }
+}
+
+TEST(Result, AssignmentFromTypeConvertibleToT) {
+ // Assign to pw::Result<T> from U when U is convertible to T
+ {
+ CustomType<MyType, kConvToT> v;
+ pw::Result<MyType> statusor;
+ statusor = v;
+ EXPECT_TRUE(statusor.ok());
+ }
+}
+
+TEST(Result, AssignmentFromTypeWithConversionOperatortoResultT) {
+ // Assign to pw::Result<T> from U when U has conversion operator to
+ // pw::Result<T>
+ {
+ CustomType<MyType, kConvToResult> v;
+ pw::Result<MyType> statusor;
+ statusor = v;
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+ {
+ CustomType<MyType, kConvToT | kConvToResult> v;
+ pw::Result<MyType> statusor;
+ statusor = v;
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+ {
+ CustomType<MyType, kConvToResult | kConvToStatus> v;
+ pw::Result<MyType> statusor;
+ statusor = v;
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+ {
+ CustomType<MyType, kConvToT | kConvToResult | kConvToStatus> v;
+ pw::Result<MyType> statusor;
+ statusor = v;
+ EXPECT_EQ(statusor, v.operator pw::Result<MyType>());
+ }
+}
+
+TEST(Result, AssignmentFromTypeConvertibleToStatus) {
+ // Assign to pw::Result<T> from U when U is convertible to Status
+ {
+ CustomType<MyType, kConvToStatus> v;
+ pw::Result<MyType> statusor;
+ statusor = v;
+ EXPECT_FALSE(statusor.ok());
+ EXPECT_EQ(statusor.status(), static_cast<pw::Status>(v));
+ }
+ {
+ CustomType<MyType, kConvToT | kConvToStatus> v;
+ pw::Result<MyType> statusor;
+ statusor = v;
+ EXPECT_FALSE(statusor.ok());
+ EXPECT_EQ(statusor.status(), static_cast<pw::Status>(v));
+ }
+}
+
+} // namespace
diff --git a/pw_ring_buffer/CMakeLists.txt b/pw_ring_buffer/CMakeLists.txt
index 28bed4af4..b866f0ad7 100644
--- a/pw_ring_buffer/CMakeLists.txt
+++ b/pw_ring_buffer/CMakeLists.txt
@@ -14,13 +14,31 @@
include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
-pw_auto_add_simple_module(pw_ring_buffer
+pw_add_module_library(pw_ring_buffer
+ HEADERS
+ public/pw_ring_buffer/prefixed_entry_ring_buffer.h
+ PUBLIC_INCLUDES
+ public
PUBLIC_DEPS
pw_containers
+ pw_polyfill.cstddef
+ pw_polyfill.span
pw_result
- pw_span
pw_status
+ SOURCES
+ prefixed_entry_ring_buffer.cc
PRIVATE_DEPS
pw_assert
pw_varint
)
+
+pw_add_test(pw_ring_buffer.prefixed_entry_ring_buffer_test
+ SOURCES
+ prefixed_entry_ring_buffer_test.cc
+ DEPS
+ pw_ring_buffer
+ pw_assert
+ GROUPS
+ modules
+ pw_ring_buffer
+)
diff --git a/pw_rpc/Android.bp b/pw_rpc/Android.bp
index aeca94b0f..3913566a8 100644
--- a/pw_rpc/Android.bp
+++ b/pw_rpc/Android.bp
@@ -14,7 +14,6 @@
java_library {
name: "pw_rpc_java_client",
- host_supported: true,
srcs: ["java/main/dev/pigweed/pw_rpc/*.java"],
visibility: ["//visibility:public"],
static_libs: [
@@ -25,6 +24,7 @@ java_library {
"guava",
"jsr305",
"libprotobuf-java-lite",
+ "pw_log_android_java",
],
plugins: ["auto_value_plugin"],
sdk_version: "current",
diff --git a/pw_rpc/BUILD.bazel b/pw_rpc/BUILD.bazel
index ebba4e744..e29ff15eb 100644
--- a/pw_rpc/BUILD.bazel
+++ b/pw_rpc/BUILD.bazel
@@ -17,7 +17,6 @@ load("//pw_protobuf_compiler:proto.bzl", "pw_proto_library")
load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@rules_proto_grpc//:defs.bzl", "proto_plugin")
-load("@rules_proto_grpc//js:defs.bzl", "js_proto_library")
package(default_visibility = ["//visibility:public"])
@@ -31,7 +30,7 @@ proto_library(
)
pw_proto_library(
- name = "benchmark_pwpb",
+ name = "benchmark_cc",
deps = [":benchmark_proto"],
)
@@ -41,11 +40,8 @@ pw_cc_library(
hdrs = ["public/pw_rpc/benchmark.h"],
includes = ["public"],
deps = [
- ":benchmark_pwpb",
- # TODO(hepler): RPC deps not used directly should be provided by the proto library
- ":pw_rpc",
- "//pw_rpc/raw:server_api",
- "//pw_rpc/raw:client_api",
+ ":benchmark_cc.pwpb",
+ ":benchmark_cc.raw_rpc",
],
)
@@ -108,7 +104,7 @@ pw_cc_library(
],
includes = ["public"],
deps = [
- ":internal_packet_pwpb",
+ ":internal_packet_cc.pwpb",
"//pw_assert",
"//pw_bytes",
"//pw_containers",
@@ -123,6 +119,17 @@ pw_cc_library(
)
pw_cc_library(
+ name = "thread_testing",
+ hdrs = ["public/pw_rpc/thread_testing.h"],
+ includes = ["public"],
+ deps = [
+ ":internal_test_utils",
+ "//pw_assert",
+ "//pw_sync:counting_semaphore",
+ ],
+)
+
+pw_cc_library(
name = "internal_test_utils",
srcs = ["fake_channel_output.cc"],
hdrs = [
@@ -149,6 +156,7 @@ pw_cc_library(
"//pw_containers:wrapped_iterator",
"//pw_rpc/raw:fake_channel_output",
"//pw_span",
+ "//pw_sync:mutex",
],
)
@@ -180,41 +188,6 @@ filegroup(
srcs = ["client_integration_test.cc"],
)
-# TODO(pwbug/507): Cannot build nanopb-dependent code in Bazel at the moment. Need
-# to determine how best to support Nanopb builds and protobuf generation.
-filegroup(
- name = "nanopb",
- srcs = [
- "nanopb/client_call_test.cc",
- "nanopb/client_integration_test.cc",
- "nanopb/client_reader_writer_test.cc",
- "nanopb/codegen_test.cc",
- "nanopb/common.cc",
- "nanopb/echo_service_test.cc",
- "nanopb/fake_channel_output_test.cc",
- "nanopb/method.cc",
- "nanopb/method_info_test.cc",
- "nanopb/method_lookup_test.cc",
- "nanopb/method_test.cc",
- "nanopb/method_union_test.cc",
- "nanopb/public/pw_rpc/echo_service_nanopb.h",
- "nanopb/public/pw_rpc/nanopb/client_reader_writer.h",
- "nanopb/public/pw_rpc/nanopb/client_testing.h",
- "nanopb/public/pw_rpc/nanopb/fake_channel_output.h",
- "nanopb/public/pw_rpc/nanopb/internal/common.h",
- "nanopb/public/pw_rpc/nanopb/internal/method.h",
- "nanopb/public/pw_rpc/nanopb/internal/method_union.h",
- "nanopb/public/pw_rpc/nanopb/server_reader_writer.h",
- "nanopb/public/pw_rpc/nanopb/test_method_context.h",
- "nanopb/pw_rpc_nanopb_private/internal_test_utils.h",
- "nanopb/serde_test.cc",
- "nanopb/server_callback_test.cc",
- "nanopb/server_reader_writer.cc",
- "nanopb/server_reader_writer_test.cc",
- "nanopb/stub_generation_test.cc",
- ],
-)
-
pw_cc_test(
name = "call_test",
srcs = [
@@ -297,12 +270,7 @@ pw_cc_test(
proto_library(
name = "internal_packet_proto",
srcs = ["internal/packet.proto"],
- visibility = ["//visibility:private"],
-)
-
-js_proto_library(
- name = "packet_proto_tspb",
- protos = [":internal_packet_proto"],
+ visibility = [":__subpackages__"],
)
java_lite_proto_library(
@@ -316,7 +284,7 @@ py_proto_library(
)
pw_proto_library(
- name = "internal_packet_pwpb",
+ name = "internal_packet_cc",
deps = [":internal_packet_proto"],
)
@@ -327,24 +295,57 @@ proto_library(
)
pw_proto_library(
- name = "pw_rpc_test_pwpb",
+ name = "pw_rpc_test_cc",
deps = [":pw_rpc_test_proto"],
)
proto_plugin(
- name = "pw_cc_plugin",
+ name = "pw_cc_plugin_raw",
outputs = [
"{protopath}.raw_rpc.pb.h",
],
protoc_plugin_name = "raw_rpc",
- tool = "@pigweed//pw_rpc/py:plugin",
+ tool = "@pigweed//pw_rpc/py:plugin_raw",
use_built_in_shell_environment = True,
visibility = ["//visibility:public"],
)
-filegroup(
- name = "echo",
+proto_plugin(
+ name = "pw_cc_plugin_nanopb_rpc",
+ outputs = [
+ "{protopath}.rpc.pb.h",
+ ],
+ protoc_plugin_name = "nanopb_rpc",
+ tool = "@pigweed//pw_rpc/py:plugin_nanopb",
+ use_built_in_shell_environment = True,
+ visibility = ["//visibility:public"],
+)
+
+proto_plugin(
+ name = "nanopb_plugin",
+ options = [
+ "--library-include-format='#include\"%s\"'",
+ ],
+ outputs = [
+ "{protopath}.pb.h",
+ "{protopath}.pb.c",
+ ],
+ separate_options_flag = True,
+ tool = "@com_github_nanopb_nanopb//:bazel_generator",
+ use_built_in_shell_environment = True,
+ visibility = ["//visibility:public"],
+)
+
+proto_library(
+ name = "echo_proto",
srcs = [
"echo.proto",
],
)
+
+pw_proto_library(
+ name = "echo_cc",
+ deps = [":echo_proto"],
+ # TODO(tpudlik): We should provide echo.options to nanopb here, but the
+ # current proto codegen implementation provides no mechanism for doing so.
+)
diff --git a/pw_rpc/BUILD.gn b/pw_rpc/BUILD.gn
index d2c9bd0a7..14539c593 100644
--- a/pw_rpc/BUILD.gn
+++ b/pw_rpc/BUILD.gn
@@ -179,13 +179,24 @@ pw_source_set("fake_channel_output") {
"$dir_pw_containers:filtered_view",
"$dir_pw_containers:vector",
"$dir_pw_containers:wrapped_iterator",
+ "$dir_pw_sync:mutex",
dir_pw_assert,
dir_pw_bytes,
+ dir_pw_function,
]
deps = [ ":log_config" ]
visibility = [ "./*" ]
}
+pw_source_set("thread_testing") {
+ public = [ "public/pw_rpc/thread_testing.h" ]
+ public_deps = [
+ ":fake_channel_output",
+ "$dir_pw_sync:counting_semaphore",
+ dir_pw_assert,
+ ]
+}
+
pw_source_set("test_utils") {
public = [
"public/pw_rpc/internal/fake_channel_output.h",
diff --git a/pw_rpc/benchmark.cc b/pw_rpc/benchmark.cc
index 8980e59ff..b906e0c48 100644
--- a/pw_rpc/benchmark.cc
+++ b/pw_rpc/benchmark.cc
@@ -31,9 +31,12 @@ StatusWithSize CopyBuffer(ConstByteSpan input, ByteSpan output) {
} // namespace
-StatusWithSize BenchmarkService::UnaryEcho(ConstByteSpan request,
- ByteSpan response) {
- return CopyBuffer(request, response);
+void BenchmarkService::UnaryEcho(ConstByteSpan request,
+ RawUnaryResponder& responder) {
+ std::byte response[32];
+ StatusWithSize result = CopyBuffer(request, response);
+ responder.Finish(std::span(response).first(result.size()), result.status())
+ .IgnoreError();
}
void BenchmarkService::BidirectionalEcho(
diff --git a/pw_rpc/channel.cc b/pw_rpc/channel.cc
index 2167cbc96..ea74168cc 100644
--- a/pw_rpc/channel.cc
+++ b/pw_rpc/channel.cc
@@ -18,20 +18,42 @@
#include "pw_rpc/internal/channel.h"
// clang-format on
+#include "pw_bytes/span.h"
#include "pw_log/log.h"
+#include "pw_protobuf/decoder.h"
#include "pw_rpc/internal/config.h"
-namespace pw::rpc::internal {
-
+namespace pw::rpc {
namespace {
// TODO(pwbug/615): Dynamically allocate this buffer if
// PW_RPC_DYNAMIC_ALLOCATION is enabled.
std::array<std::byte, cfg::kEncodingBufferSizeBytes> encoding_buffer
- PW_GUARDED_BY(rpc_lock());
+ PW_GUARDED_BY(internal::rpc_lock());
} // namespace
+Result<uint32_t> ExtractChannelId(ConstByteSpan packet) {
+ protobuf::Decoder decoder(packet);
+
+ while (decoder.Next().ok()) {
+ switch (static_cast<internal::RpcPacket::Fields>(decoder.FieldNumber())) {
+ case internal::RpcPacket::Fields::CHANNEL_ID: {
+ uint32_t channel_id;
+ PW_TRY(decoder.ReadUint32(&channel_id));
+ return channel_id;
+ }
+
+ default:
+ continue;
+ }
+ }
+
+ return Status::DataLoss();
+}
+
+namespace internal {
+
ByteSpan GetPayloadBuffer() PW_EXCLUSIVE_LOCKS_REQUIRED(rpc_lock()) {
return ByteSpan(encoding_buffer)
.subspan(Packet::kMinEncodedSizeWithoutPayload);
@@ -61,4 +83,5 @@ Status Channel::Send(const Packet& packet) {
return OkStatus();
}
-} // namespace pw::rpc::internal
+} // namespace internal
+} // namespace pw::rpc
diff --git a/pw_rpc/channel_test.cc b/pw_rpc/channel_test.cc
index 930068704..b61fd8d75 100644
--- a/pw_rpc/channel_test.cc
+++ b/pw_rpc/channel_test.cc
@@ -14,6 +14,8 @@
#include "pw_rpc/channel.h"
+#include <cstddef>
+
#include "gtest/gtest.h"
#include "pw_rpc/internal/packet.h"
#include "pw_rpc/internal/test_utils.h"
@@ -21,8 +23,6 @@
namespace pw::rpc::internal {
namespace {
-using std::byte;
-
TEST(ChannelOutput, Name) {
class NameTester : public ChannelOutput {
public:
@@ -35,7 +35,7 @@ TEST(ChannelOutput, Name) {
}
constexpr Packet kTestPacket(
- PacketType::RESPONSE, 1, 42, 100, 0, {}, Status::NotFound());
+ PacketType::RESPONSE, 23, 42, 100, 0, {}, Status::NotFound());
const size_t kReservedSize = 2 /* type */ + 2 /* channel */ + 5 /* service */ +
5 /* method */ + 2 /* payload key */ +
2 /* status (if not OK) */;
@@ -56,5 +56,23 @@ TEST(Channel, TestPacket_ReservedSizeMatchesMinEncodedSizeBytes) {
EXPECT_EQ(kReservedSize, kTestPacket.MinEncodedSizeBytes());
}
+TEST(ExtractChannelId, ValidPacket) {
+ std::byte buffer[64] = {};
+ Result<ConstByteSpan> result = kTestPacket.Encode(buffer);
+ ASSERT_EQ(result.status(), OkStatus());
+
+ Result<uint32_t> channel_id = ExtractChannelId(*result);
+ ASSERT_EQ(channel_id.status(), OkStatus());
+ EXPECT_EQ(*channel_id, 23u);
+}
+
+TEST(ExtractChannelId, InvalidPacket) {
+ constexpr std::byte buffer[64] = {std::byte{1}, std::byte{2}};
+
+ Result<uint32_t> channel_id = ExtractChannelId(buffer);
+
+ EXPECT_EQ(channel_id.status(), Status::DataLoss());
+}
+
} // namespace
} // namespace pw::rpc::internal
diff --git a/pw_rpc/client.cc b/pw_rpc/client.cc
index 4aa740437..21a950d0a 100644
--- a/pw_rpc/client.cc
+++ b/pw_rpc/client.cc
@@ -32,9 +32,7 @@ using internal::PacketType;
} // namespace
Status Client::ProcessPacket(ConstByteSpan data) {
- PW_TRY_ASSIGN(Result<Packet> result,
- Endpoint::ProcessPacket(data, Packet::kClient));
- Packet& packet = *result;
+ PW_TRY_ASSIGN(Packet packet, Endpoint::ProcessPacket(data, Packet::kClient));
// Find an existing call for this RPC, if any.
internal::rpc_lock().lock();
diff --git a/pw_rpc/client_call.cc b/pw_rpc/client_call.cc
index 567b54863..d8e57e89a 100644
--- a/pw_rpc/client_call.cc
+++ b/pw_rpc/client_call.cc
@@ -18,7 +18,7 @@ namespace pw::rpc::internal {
void ClientCall::CloseClientCall() {
if (client_stream_open()) {
- CloseClientStreamLocked();
+ CloseClientStreamLocked().IgnoreError();
}
UnregisterAndMarkClosed();
}
diff --git a/pw_rpc/client_integration_test.cc b/pw_rpc/client_integration_test.cc
index f376efa16..94ce8070c 100644
--- a/pw_rpc/client_integration_test.cc
+++ b/pw_rpc/client_integration_test.cc
@@ -86,7 +86,7 @@ TEST(RawRpcIntegrationTest, BidirectionalStreaming) {
ASSERT_EQ(OkStatus(), call.Write(std::as_bytes(std::span("Dello"))));
EXPECT_STREQ(receiver.Wait(), "Dello");
- call.Cancel();
+ ASSERT_EQ(OkStatus(), call.Cancel());
}
}
diff --git a/pw_rpc/client_server_test.cc b/pw_rpc/client_server_test.cc
index 12c2daa55..bddcea849 100644
--- a/pw_rpc/client_server_test.cc
+++ b/pw_rpc/client_server_test.cc
@@ -31,8 +31,8 @@ constexpr uint32_t kFakeMethodId = 10;
RawFakeChannelOutput<1> output;
rpc::Channel channels[] = {Channel::Create<kFakeChannelId>(&output)};
-StatusWithSize FakeMethod(ConstByteSpan, ByteSpan) {
- return StatusWithSize::Unimplemented();
+void FakeMethod(ConstByteSpan, RawUnaryResponder& responder) {
+ ASSERT_EQ(OkStatus(), responder.Finish({}, Status::Unimplemented()));
}
class FakeService : public Service {
@@ -40,7 +40,7 @@ class FakeService : public Service {
FakeService(uint32_t id) : Service(id, kMethods) {}
static constexpr std::array<RawMethodUnion, 1> kMethods = {
- RawMethod::SynchronousUnary<FakeMethod>(kFakeMethodId),
+ RawMethod::AsynchronousUnary<FakeMethod>(kFakeMethodId),
};
};
diff --git a/pw_rpc/docs.rst b/pw_rpc/docs.rst
index 0f4efb683..25554f05a 100644
--- a/pw_rpc/docs.rst
+++ b/pw_rpc/docs.rst
@@ -303,13 +303,12 @@ channel output and the example service.
// Declare the pw_rpc server with the HDLC channel.
pw::rpc::Server server(channels);
- pw::rpc::TheService the_service;
+ foo::bar::TheService the_service;
+ pw::rpc::SomeOtherService some_other_service;
void RegisterServices() {
- // Register the foo.bar.TheService example service.
- server.Register(the_service);
-
- // Register other services
+ // Register the foo.bar.TheService example service and another service.
+ server.RegisterService(the_service, some_other_service);
}
int main() {
@@ -1121,6 +1120,7 @@ outgoing packets. The size of the buffer is set with
Users of ``pw_rpc`` must implement the :cpp:class:`pw::rpc::ChannelOutput`
interface.
+.. _module-pw_rpc-ChannelOutput:
.. cpp:class:: pw::rpc::ChannelOutput
``pw_rpc`` endpoints use :cpp:class:`ChannelOutput` instances to send packets.
diff --git a/pw_rpc/fake_channel_output.cc b/pw_rpc/fake_channel_output.cc
index a8008e632..530a03a6d 100644
--- a/pw_rpc/fake_channel_output.cc
+++ b/pw_rpc/fake_channel_output.cc
@@ -26,13 +26,14 @@
namespace pw::rpc::internal::test {
void FakeChannelOutput::clear() {
+ LockGuard lock(mutex_);
payloads_.clear();
packets_.clear();
send_status_ = OkStatus();
return_after_packet_count_ = -1;
}
-Status FakeChannelOutput::Send(std::span<const std::byte> buffer) {
+Status FakeChannelOutput::HandlePacket(std::span<const std::byte> buffer) {
// If the buffer is empty, this is just releasing an unused buffer.
if (buffer.empty()) {
return OkStatus();
@@ -42,7 +43,7 @@ Status FakeChannelOutput::Send(std::span<const std::byte> buffer) {
return send_status_;
}
if (return_after_packet_count_ > 0 &&
- return_after_packet_count_ == static_cast<int>(total_packets())) {
+ return_after_packet_count_ == static_cast<int>(packets_.size())) {
// Disable behavior.
return_after_packet_count_ = -1;
return send_status_;
@@ -107,6 +108,8 @@ void FakeChannelOutput::CopyPayloadToBuffer(Packet& packet) {
}
void FakeChannelOutput::LogPackets() const {
+ LockGuard lock(mutex_);
+
PW_LOG_INFO("%u packets have been sent through this FakeChannelOutput",
static_cast<unsigned>(packets_.size()));
diff --git a/pw_rpc/internal/packet.proto b/pw_rpc/internal/packet.proto
index 86fce960c..2862eb402 100644
--- a/pw_rpc/internal/packet.proto
+++ b/pw_rpc/internal/packet.proto
@@ -15,7 +15,7 @@ syntax = "proto3";
package pw.rpc.internal;
-option java_package = "dev.pigweed.pw.rpc.internal";
+option java_package = "dev.pigweed.pw_rpc.internal";
enum PacketType {
// To simplify identifying the origin of a packet, client-to-server packets
diff --git a/pw_rpc/java/main/dev/pigweed/pw_rpc/BUILD.bazel b/pw_rpc/java/main/dev/pigweed/pw_rpc/BUILD.bazel
index 5f834a029..4747b7f86 100644
--- a/pw_rpc/java/main/dev/pigweed/pw_rpc/BUILD.bazel
+++ b/pw_rpc/java/main/dev/pigweed/pw_rpc/BUILD.bazel
@@ -36,11 +36,11 @@ java_library(
],
visibility = ["//visibility:public"],
deps = [
+ "//pw_log/java/main/dev/pigweed/pw_log",
"//pw_rpc:packet_proto_java_lite",
"//third_party/google_auto:value",
"@com_google_protobuf//java/lite",
"@maven//:com_google_code_findbugs_jsr305",
- "@maven//:com_google_flogger_flogger",
"@maven//:com_google_guava_guava",
],
)
diff --git a/pw_rpc/java/main/dev/pigweed/pw_rpc/Client.java b/pw_rpc/java/main/dev/pigweed/pw_rpc/Client.java
index 64d934b6b..a62d77d76 100644
--- a/pw_rpc/java/main/dev/pigweed/pw_rpc/Client.java
+++ b/pw_rpc/java/main/dev/pigweed/pw_rpc/Client.java
@@ -14,12 +14,12 @@
package dev.pigweed.pw_rpc;
-// import com.google.common.flogger.FluentLogger;
import com.google.protobuf.ExtensionRegistryLite;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.MessageLite;
-import dev.pigweed.pw.rpc.internal.Packet.PacketType;
-import dev.pigweed.pw.rpc.internal.Packet.RpcPacket;
+import dev.pigweed.pw_log.Logger;
+import dev.pigweed.pw_rpc.internal.Packet.PacketType;
+import dev.pigweed.pw_rpc.internal.Packet.RpcPacket;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.List;
@@ -33,8 +33,7 @@ import javax.annotation.Nullable;
* through the processPacket function.
*/
public class Client {
- // TODO(pwbug/611): Restore logging without a mandatory Flogger dependency.
- // private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ private static final Logger logger = Logger.forClass(Client.class);
private final Map<Integer, Channel> channels;
private final Map<Integer, Service> services;
@@ -81,17 +80,17 @@ public class Client {
return create(channels, services, (rpc) -> new StreamObserver<MessageLite>() {
@Override
public void onNext(MessageLite value) {
- // logger.atFine().log("%s received response: %s", rpc, value);
+ logger.atFine().log("%s received response: %s", rpc, value);
}
@Override
public void onCompleted(Status status) {
- // logger.atInfo().log("%s completed with status %s", rpc, status);
+ logger.atInfo().log("%s completed with status %s", rpc, status);
}
@Override
public void onError(Status status) {
- // logger.atWarning().log("%s terminated with error %s", rpc, status);
+ logger.atWarning().log("%s terminated with error %s", rpc, status);
}
});
}
@@ -167,31 +166,30 @@ public class Client {
try {
packet = RpcPacket.parseFrom(data, ExtensionRegistryLite.getEmptyRegistry());
} catch (InvalidProtocolBufferException e) {
- // logger.atWarning().withCause(e).log("Failed to decode packet");
+ logger.atWarning().withCause(e).log("Failed to decode packet");
return false;
}
if (packet.getChannelId() == 0 || packet.getServiceId() == 0 || packet.getMethodId() == 0) {
- // logger.atWarning().log("Received corrupt packet with unset IDs");
+ logger.atWarning().log("Received corrupt packet with unset IDs");
return false;
}
// Packets for the server use even type values.
if (packet.getTypeValue() % 2 == 0) {
- // logger.atFine().log("Ignoring %s packet for server", packet.getType().name());
+ logger.atFine().log("Ignoring %s packet for server", packet.getType().name());
return false;
}
Channel channel = channels.get(packet.getChannelId());
if (channel == null) {
- // logger.atWarning().log(
- // "Received packet for unrecognized channel %d", packet.getChannelId());
+ logger.atWarning().log("Received packet for unrecognized channel %d", packet.getChannelId());
return false;
}
PendingRpc rpc = lookupRpc(channel, packet);
if (rpc == null) {
- // logger.atInfo().log("Ignoring packet for unknown service method");
+ logger.atInfo().log("Ignoring packet for unknown service method");
sendError(channel, packet, Status.NOT_FOUND);
return true; // true since the packet was handled, even though it was invalid.
}
@@ -200,31 +198,42 @@ public class Client {
StreamObserverCall<?, ?> call =
packet.getType().equals(PacketType.SERVER_STREAM) ? rpcs.getPending(rpc) : rpcs.clear(rpc);
if (call == null) {
- // logger.atInfo().log(
- // "Ignoring packet for RPC (%s) that isn't pending. Pending RPCs are: %s", rpc, rpcs);
+ logger.atFine().log(
+ "Ignoring packet for %s, which isn't pending. Pending RPCs are %s", rpc, rpcs);
sendError(channel, packet, Status.FAILED_PRECONDITION);
return true;
}
switch (packet.getType()) {
- case SERVER_ERROR:
+ case SERVER_ERROR: {
Status status = decodeStatus(packet);
- // logger.atWarning().log("RPC %s failed with error %s", rpc, status);
+ logger.atWarning().log("%s failed with error %s", rpc, status);
call.onError(status);
break;
- case RESPONSE:
+ }
+ case RESPONSE: {
+ Status status = decodeStatus(packet);
// Server streaming an unary RPCs include a payload with their response packet.
if (!rpc.method().isServerStreaming()) {
+ logger.atFiner().log("%s completed with status %s and %d B payload",
+ rpc,
+ status,
+ packet.getPayload().size());
call.onNext(packet.getPayload());
+ } else {
+ logger.atFiner().log("%s completed with status %s", rpc, status);
}
- call.onCompleted(decodeStatus(packet));
+ call.onCompleted(status);
break;
+ }
case SERVER_STREAM:
+ logger.atFiner().log(
+ "%s received server stream with %d B payload", rpc, packet.getPayload().size());
call.onNext(packet.getPayload());
break;
default:
- // logger.atWarning().log(
- // "Unexpected PacketType %d for RPC %s", packet.getType().getNumber(), rpc);
+ logger.atWarning().log(
+ "%s received unexpected PacketType %d", rpc, packet.getType().getNumber());
}
return true;
@@ -234,7 +243,7 @@ public class Client {
try {
channel.send(Packets.error(packet, status));
} catch (ChannelOutputException e) {
- // logger.atWarning().withCause(e).log("Failed to send error packet");
+ logger.atWarning().withCause(e).log("Failed to send error packet");
}
}
@@ -254,8 +263,8 @@ public class Client {
private static Status decodeStatus(RpcPacket packet) {
Status status = Status.fromCode(packet.getStatus());
if (status == null) {
- // logger.atWarning().log(
- // "Illegal status code %d in packet; using Status.UNKNOWN ", packet.getStatus());
+ logger.atWarning().log(
+ "Illegal status code %d in packet; using Status.UNKNOWN ", packet.getStatus());
return Status.UNKNOWN;
}
return status;
diff --git a/pw_rpc/java/main/dev/pigweed/pw_rpc/Packets.java b/pw_rpc/java/main/dev/pigweed/pw_rpc/Packets.java
index 2af050c39..3260bb13f 100644
--- a/pw_rpc/java/main/dev/pigweed/pw_rpc/Packets.java
+++ b/pw_rpc/java/main/dev/pigweed/pw_rpc/Packets.java
@@ -15,8 +15,8 @@
package dev.pigweed.pw_rpc;
import com.google.protobuf.MessageLite;
-import dev.pigweed.pw.rpc.internal.Packet.PacketType;
-import dev.pigweed.pw.rpc.internal.Packet.RpcPacket;
+import dev.pigweed.pw_rpc.internal.Packet.PacketType;
+import dev.pigweed.pw_rpc.internal.Packet.RpcPacket;
/** Encodes pw_rpc packets of various types. */
/* package */ class Packets {
diff --git a/pw_rpc/java/main/dev/pigweed/pw_rpc/PendingRpc.java b/pw_rpc/java/main/dev/pigweed/pw_rpc/PendingRpc.java
index 6cc9b4719..59e210d8a 100644
--- a/pw_rpc/java/main/dev/pigweed/pw_rpc/PendingRpc.java
+++ b/pw_rpc/java/main/dev/pigweed/pw_rpc/PendingRpc.java
@@ -32,7 +32,6 @@ public abstract class PendingRpc {
@Override
public final String toString() {
- return String.format(
- Locale.ENGLISH, "PendingRpc(channel=%d, method=%s)", channel().id(), method());
+ return String.format(Locale.ENGLISH, "RpcCall[%s channel=%d]", method(), channel().id());
}
}
diff --git a/pw_rpc/java/main/dev/pigweed/pw_rpc/RpcManager.java b/pw_rpc/java/main/dev/pigweed/pw_rpc/RpcManager.java
index 12e6db1a6..bc690bb28 100644
--- a/pw_rpc/java/main/dev/pigweed/pw_rpc/RpcManager.java
+++ b/pw_rpc/java/main/dev/pigweed/pw_rpc/RpcManager.java
@@ -14,16 +14,16 @@
package dev.pigweed.pw_rpc;
-// import com.google.common.flogger.FluentLogger;
import com.google.protobuf.MessageLite;
+import dev.pigweed.pw_log.Logger;
import java.util.HashMap;
import java.util.Map;
import javax.annotation.Nullable;
/** Tracks the state of service method invocations. */
public class RpcManager {
- // TODO(pwbug/611): Restore logging without a mandatory Flogger dependency.
- // private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ private static final Logger logger = Logger.forClass(RpcManager.class);
+
private final Map<PendingRpc, StreamObserverCall<?, ?>> pending = new HashMap<>();
/**
@@ -37,7 +37,7 @@ public class RpcManager {
public synchronized StreamObserverCall<?, ?> start(
PendingRpc rpc, StreamObserverCall<?, ?> call, @Nullable MessageLite payload)
throws ChannelOutputException {
- // logger.atFine().log("Start %s", rpc);
+ logger.atFine().log("%s starting", rpc);
rpc.channel().send(Packets.request(rpc, payload));
return pending.put(rpc, call);
}
@@ -51,12 +51,12 @@ public class RpcManager {
@Nullable
public synchronized StreamObserverCall<?, ?> open(
PendingRpc rpc, StreamObserverCall<?, ?> call, @Nullable MessageLite payload) {
- // logger.atFine().log("Open %s", rpc);
+ logger.atFine().log("%s opening", rpc);
try {
rpc.channel().send(Packets.request(rpc, payload));
} catch (ChannelOutputException e) {
- // logger.atFine().withCause(e).log(
- // "Ignoring error opening %s; listening for unrequested responses", rpc);
+ logger.atFiner().withCause(e).log(
+ "Ignoring error opening %s; listening for unrequested responses", rpc);
}
return pending.put(rpc, call);
}
@@ -67,7 +67,7 @@ public class RpcManager {
throws ChannelOutputException {
StreamObserverCall<?, ?> call = pending.remove(rpc);
if (call != null) {
- // logger.atFine().log("Cancel %s", rpc);
+ logger.atFine().log("%s was cancelled", rpc);
rpc.channel().send(Packets.cancel(rpc));
}
return call;
@@ -88,6 +88,7 @@ public class RpcManager {
throws ChannelOutputException {
StreamObserverCall<?, ?> call = pending.get(rpc);
if (call != null) {
+ logger.atFiner().log("%s client stream closed", rpc);
rpc.channel().send(Packets.clientStreamEnd(rpc));
}
return call;
@@ -95,11 +96,7 @@ public class RpcManager {
@Nullable
public synchronized StreamObserverCall<?, ?> clear(PendingRpc rpc) {
- StreamObserverCall<?, ?> call = pending.remove(rpc);
- if (call != null) {
- // logger.atFine().log("Clear %s", rpc);
- }
- return call;
+ return pending.remove(rpc);
}
@Nullable
diff --git a/pw_rpc/java/main/dev/pigweed/pw_rpc/Status.java b/pw_rpc/java/main/dev/pigweed/pw_rpc/Status.java
index 72a1cfe5c..58ef8b2ee 100644
--- a/pw_rpc/java/main/dev/pigweed/pw_rpc/Status.java
+++ b/pw_rpc/java/main/dev/pigweed/pw_rpc/Status.java
@@ -14,6 +14,8 @@
package dev.pigweed.pw_rpc;
+import javax.annotation.Nullable;
+
/** Status object for RPC statuses. Must match gRPC's status codes. */
public enum Status {
OK(0),
@@ -56,6 +58,7 @@ public enum Status {
return code == 0;
}
+ @Nullable
public static Status fromCode(int code) {
return code >= 0 && code < values.length ? values[code] : null;
}
diff --git a/pw_rpc/java/main/dev/pigweed/pw_rpc/StreamObserverCall.java b/pw_rpc/java/main/dev/pigweed/pw_rpc/StreamObserverCall.java
index f59d1518b..537362c6c 100644
--- a/pw_rpc/java/main/dev/pigweed/pw_rpc/StreamObserverCall.java
+++ b/pw_rpc/java/main/dev/pigweed/pw_rpc/StreamObserverCall.java
@@ -14,11 +14,11 @@
package dev.pigweed.pw_rpc;
-// import com.google.common.flogger.FluentLogger;
import com.google.common.util.concurrent.AbstractFuture;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.MessageLite;
+import dev.pigweed.pw_log.Logger;
import dev.pigweed.pw_rpc.Call.ClientStreaming;
import java.util.function.Consumer;
import javax.annotation.Nullable;
@@ -35,8 +35,7 @@ import javax.annotation.Nullable;
*/
class StreamObserverCall<RequestT extends MessageLite, ResponseT extends MessageLite>
implements ClientStreaming<RequestT> {
- // TODO(pwbug/611): Restore logging without a mandatory Flogger dependency.
- // private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+ private static final Logger logger = Logger.forClass(StreamObserverCall.class);
private final RpcManager rpcs;
private final PendingRpc rpc;
@@ -258,8 +257,8 @@ class StreamObserverCall<RequestT extends MessageLite, ResponseT extends Message
try {
return (ResponseT) rpc.method().decodeResponsePayload(payload);
} catch (InvalidProtocolBufferException e) {
- // logger.atWarning().withCause(e).log(
- // "Failed to decode response for method %s; skipping packet", rpc.method().name());
+ logger.atWarning().withCause(e).log(
+ "Failed to decode response for method %s; skipping packet", rpc.method().name());
return null;
}
}
diff --git a/pw_rpc/java/test/dev/pigweed/pw_rpc/ClientTest.java b/pw_rpc/java/test/dev/pigweed/pw_rpc/ClientTest.java
index f3142b4c1..affffa598 100644
--- a/pw_rpc/java/test/dev/pigweed/pw_rpc/ClientTest.java
+++ b/pw_rpc/java/test/dev/pigweed/pw_rpc/ClientTest.java
@@ -26,8 +26,8 @@ import com.google.common.collect.ImmutableList;
import com.google.protobuf.ExtensionRegistryLite;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.MessageLite;
-import dev.pigweed.pw.rpc.internal.Packet.PacketType;
-import dev.pigweed.pw.rpc.internal.Packet.RpcPacket;
+import dev.pigweed.pw_rpc.internal.Packet.PacketType;
+import dev.pigweed.pw_rpc.internal.Packet.RpcPacket;
import java.util.ArrayList;
import java.util.List;
import org.junit.Before;
diff --git a/pw_rpc/java/test/dev/pigweed/pw_rpc/PacketsTest.java b/pw_rpc/java/test/dev/pigweed/pw_rpc/PacketsTest.java
index a3dd4bfd4..6a1e771f8 100644
--- a/pw_rpc/java/test/dev/pigweed/pw_rpc/PacketsTest.java
+++ b/pw_rpc/java/test/dev/pigweed/pw_rpc/PacketsTest.java
@@ -17,8 +17,8 @@ package dev.pigweed.pw_rpc;
import static com.google.common.truth.Truth.assertThat;
import com.google.protobuf.ExtensionRegistryLite;
-import dev.pigweed.pw.rpc.internal.Packet.PacketType;
-import dev.pigweed.pw.rpc.internal.Packet.RpcPacket;
+import dev.pigweed.pw_rpc.internal.Packet.PacketType;
+import dev.pigweed.pw_rpc.internal.Packet.RpcPacket;
import org.junit.Test;
import org.junit.runner.RunWith;
diff --git a/pw_rpc/java/test/dev/pigweed/pw_rpc/RpcManagerTest.java b/pw_rpc/java/test/dev/pigweed/pw_rpc/RpcManagerTest.java
index 0c8af5307..f25c66283 100644
--- a/pw_rpc/java/test/dev/pigweed/pw_rpc/RpcManagerTest.java
+++ b/pw_rpc/java/test/dev/pigweed/pw_rpc/RpcManagerTest.java
@@ -22,8 +22,8 @@ import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import com.google.protobuf.MessageLite;
-import dev.pigweed.pw.rpc.internal.Packet.PacketType;
-import dev.pigweed.pw.rpc.internal.Packet.RpcPacket;
+import dev.pigweed.pw_rpc.internal.Packet.PacketType;
+import dev.pigweed.pw_rpc.internal.Packet.RpcPacket;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
diff --git a/pw_rpc/java/test/dev/pigweed/pw_rpc/StreamObserverCallTest.java b/pw_rpc/java/test/dev/pigweed/pw_rpc/StreamObserverCallTest.java
index 71c4ec3fa..f4eba688b 100644
--- a/pw_rpc/java/test/dev/pigweed/pw_rpc/StreamObserverCallTest.java
+++ b/pw_rpc/java/test/dev/pigweed/pw_rpc/StreamObserverCallTest.java
@@ -20,10 +20,10 @@ import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
-import dev.pigweed.pw.rpc.internal.Packet.PacketType;
-import dev.pigweed.pw.rpc.internal.Packet.RpcPacket;
import dev.pigweed.pw_rpc.StreamObserverCall.StreamResponseFuture;
import dev.pigweed.pw_rpc.StreamObserverCall.UnaryResponseFuture;
+import dev.pigweed.pw_rpc.internal.Packet.PacketType;
+import dev.pigweed.pw_rpc.internal.Packet.RpcPacket;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
diff --git a/pw_rpc/java/test/dev/pigweed/pw_rpc/TestClient.java b/pw_rpc/java/test/dev/pigweed/pw_rpc/TestClient.java
index 0b2f10a2d..6688e2422 100644
--- a/pw_rpc/java/test/dev/pigweed/pw_rpc/TestClient.java
+++ b/pw_rpc/java/test/dev/pigweed/pw_rpc/TestClient.java
@@ -19,8 +19,8 @@ import static java.util.Arrays.stream;
import com.google.common.collect.ImmutableList;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.MessageLite;
-import dev.pigweed.pw.rpc.internal.Packet.PacketType;
-import dev.pigweed.pw.rpc.internal.Packet.RpcPacket;
+import dev.pigweed.pw_rpc.internal.Packet.PacketType;
+import dev.pigweed.pw_rpc.internal.Packet.RpcPacket;
import java.util.ArrayList;
import java.util.EnumMap;
import java.util.List;
diff --git a/pw_rpc/nanopb/BUILD.bazel b/pw_rpc/nanopb/BUILD.bazel
new file mode 100644
index 000000000..f5a87f6ea
--- /dev/null
+++ b/pw_rpc/nanopb/BUILD.bazel
@@ -0,0 +1,262 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+ "//pw_build:pigweed.bzl",
+ "pw_cc_library",
+ "pw_cc_test",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+pw_cc_library(
+ name = "server_api",
+ srcs = [
+ "method.cc",
+ "server_reader_writer.cc",
+ ],
+ hdrs = [
+ "public/pw_rpc/nanopb/internal/method.h",
+ "public/pw_rpc/nanopb/internal/method_union.h",
+ "public/pw_rpc/nanopb/server_reader_writer.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":common",
+ "//pw_rpc/raw:server_api",
+ ],
+)
+
+pw_cc_library(
+ name = "client_api",
+ hdrs = [
+ "public/pw_rpc/nanopb/client_reader_writer.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":common",
+ ],
+)
+
+pw_cc_library(
+ name = "common",
+ srcs = ["common.cc"],
+ hdrs = [
+ "public/pw_rpc/nanopb/internal/common.h",
+ "public/pw_rpc/nanopb/server_reader_writer.h",
+ ],
+ includes = ["public"],
+ deps = [
+ "//pw_rpc",
+ "@com_github_nanopb_nanopb//:nanopb",
+ ],
+)
+
+pw_cc_library(
+ name = "test_method_context",
+ hdrs = [
+ "public/pw_rpc/nanopb/fake_channel_output.h",
+ "public/pw_rpc/nanopb/test_method_context.h",
+ ],
+ includes = ["public"],
+ deps = [
+ "//pw_containers",
+ "//pw_rpc:internal_test_utils",
+ ],
+)
+
+pw_cc_library(
+ name = "client_testing",
+ hdrs = [
+ "public/pw_rpc/nanopb/client_testing.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":test_method_context",
+ "//pw_rpc",
+ "//pw_rpc/raw:client_testing",
+ ],
+)
+
+pw_cc_library(
+ name = "internal_test_utils",
+ hdrs = ["pw_rpc_nanopb_private/internal_test_utils.h"],
+ deps = ["//pw_rpc:internal_test_utils"],
+)
+
+pw_cc_library(
+ name = "echo_service",
+ hdrs = ["public/pw_rpc/echo_service_nanopb.h"],
+ deps = [
+ "//pw_rpc:echo_cc.nanopb_rpc",
+ ],
+)
+
+# TODO(pwbug/507): Enable this library when logging_event_handler can be used.
+filegroup(
+ name = "client_integration_test",
+ srcs = [
+ "client_integration_test.cc",
+ ],
+ #deps = [
+ # "//pw_rpc:integration_testing",
+ # "//pw_sync:binary_semaphore",
+ # "//pw_rpc:benchmark_cc.nanopb_rpc",
+ #]
+)
+
+pw_cc_test(
+ name = "client_call_test",
+ srcs = [
+ "client_call_test.cc",
+ ],
+ deps = [
+ ":client_api",
+ ":internal_test_utils",
+ "//pw_rpc",
+ "//pw_rpc:pw_rpc_test_cc.nanopb",
+ ],
+)
+
+pw_cc_test(
+ name = "client_reader_writer_test",
+ srcs = [
+ "client_reader_writer_test.cc",
+ ],
+ deps = [
+ ":client_api",
+ ":client_testing",
+ "//pw_rpc:pw_rpc_test_cc.nanopb_rpc",
+ ],
+)
+
+pw_cc_test(
+ name = "codegen_test",
+ srcs = [
+ "codegen_test.cc",
+ ],
+ deps = [
+ ":internal_test_utils",
+ ":test_method_context",
+ "//pw_preprocessor",
+ "//pw_rpc:internal_test_utils",
+ "//pw_rpc:pw_rpc_test_cc.nanopb_rpc",
+ ],
+)
+
+pw_cc_test(
+ name = "fake_channel_output_test",
+ srcs = ["fake_channel_output_test.cc"],
+ deps = [
+ ":common",
+ ":server_api",
+ ":test_method_context",
+ "//pw_rpc:internal_test_utils",
+ "//pw_rpc:pw_rpc_test_cc.nanopb_rpc",
+ ],
+)
+
+pw_cc_test(
+ name = "method_test",
+ srcs = ["method_test.cc"],
+ deps = [
+ ":internal_test_utils",
+ ":server_api",
+ "//pw_rpc",
+ "//pw_rpc:internal_test_utils",
+ "//pw_rpc:pw_rpc_test_cc.nanopb",
+ ],
+)
+
+pw_cc_test(
+ name = "method_info_test",
+ srcs = ["method_info_test.cc"],
+ deps = [
+ "//pw_rpc",
+ "//pw_rpc:internal_test_utils",
+ "//pw_rpc:pw_rpc_test_cc.nanopb_rpc",
+ ],
+)
+
+pw_cc_test(
+ name = "method_lookup_test",
+ srcs = ["method_lookup_test.cc"],
+ deps = [
+ ":test_method_context",
+ "//pw_rpc:pw_rpc_test_cc.nanopb_rpc",
+ "//pw_rpc/raw:test_method_context",
+ ],
+)
+
+pw_cc_test(
+ name = "method_union_test",
+ srcs = ["method_union_test.cc"],
+ deps = [
+ ":internal_test_utils",
+ ":server_api",
+ "//pw_rpc:internal_test_utils",
+ "//pw_rpc:pw_rpc_test_cc.nanopb",
+ ],
+)
+
+# TODO(pwbug/628): Requires nanopb options file support to compile.
+filegroup(
+ name = "echo_service_test",
+ srcs = ["echo_service_test.cc"],
+ # deps = [
+ # ":echo_service",
+ # ":test_method_context",
+ # ],
+)
+
+pw_cc_test(
+ name = "server_reader_writer_test",
+ srcs = ["server_reader_writer_test.cc"],
+ deps = [
+ ":server_api",
+ ":test_method_context",
+ "//pw_rpc",
+ "//pw_rpc:pw_rpc_test_cc.nanopb_rpc",
+ ],
+)
+
+pw_cc_test(
+ name = "serde_test",
+ srcs = ["serde_test.cc"],
+ deps = [
+ ":common",
+ "//pw_rpc:pw_rpc_test_cc.nanopb",
+ ],
+)
+
+pw_cc_test(
+ name = "server_callback_test",
+ srcs = ["server_callback_test.cc"],
+ deps = [
+ ":test_method_context",
+ "//pw_rpc",
+ "//pw_rpc:pw_rpc_test_cc.nanopb_rpc",
+ "@com_github_nanopb_nanopb//:nanopb",
+ ],
+)
+
+pw_cc_test(
+ name = "stub_generation_test",
+ srcs = ["stub_generation_test.cc"],
+ deps = [
+ "//pw_rpc:pw_rpc_test_cc.nanopb_rpc",
+ ],
+)
diff --git a/pw_rpc/nanopb/client_call_test.cc b/pw_rpc/nanopb/client_call_test.cc
index 4ed3715ee..837620557 100644
--- a/pw_rpc/nanopb/client_call_test.cc
+++ b/pw_rpc/nanopb/client_call_test.cc
@@ -138,7 +138,7 @@ TEST_F(UnaryClientCall, InvokesCallbackOnValidResponse) {
});
PW_ENCODE_PB(pw_rpc_test_TestResponse, response, .value = 42);
- context.SendResponse(OkStatus(), response);
+ EXPECT_EQ(OkStatus(), context.SendResponse(OkStatus(), response));
ASSERT_EQ(responses_received_, 1);
EXPECT_EQ(last_status_, OkStatus());
@@ -155,7 +155,7 @@ TEST_F(UnaryClientCall, DoesNothingOnNullCallback) {
nullptr);
PW_ENCODE_PB(pw_rpc_test_TestResponse, response, .value = 42);
- context.SendResponse(OkStatus(), response);
+ EXPECT_EQ(OkStatus(), context.SendResponse(OkStatus(), response));
ASSERT_EQ(responses_received_, 0);
}
@@ -176,7 +176,7 @@ TEST_F(UnaryClientCall, InvokesErrorCallbackOnInvalidResponse) {
constexpr std::byte bad_payload[]{
std::byte{0xab}, std::byte{0xcd}, std::byte{0xef}};
- context.SendResponse(OkStatus(), bad_payload);
+ EXPECT_EQ(OkStatus(), context.SendResponse(OkStatus(), bad_payload));
EXPECT_EQ(responses_received_, 0);
ASSERT_TRUE(last_error_.has_value());
@@ -197,7 +197,9 @@ TEST_F(UnaryClientCall, InvokesErrorCallbackOnServerError) {
},
[this](Status status) { last_error_ = status; });
- context.SendPacket(internal::PacketType::SERVER_ERROR, Status::NotFound());
+ EXPECT_EQ(OkStatus(),
+ context.SendPacket(internal::PacketType::SERVER_ERROR,
+ Status::NotFound()));
EXPECT_EQ(responses_received_, 0);
EXPECT_EQ(last_error_, Status::NotFound());
@@ -218,7 +220,7 @@ TEST_F(UnaryClientCall, DoesNothingOnErrorWithoutCallback) {
constexpr std::byte bad_payload[]{
std::byte{0xab}, std::byte{0xcd}, std::byte{0xef}};
- context.SendResponse(OkStatus(), bad_payload);
+ EXPECT_EQ(OkStatus(), context.SendResponse(OkStatus(), bad_payload));
EXPECT_EQ(responses_received_, 0);
}
@@ -237,11 +239,11 @@ TEST_F(UnaryClientCall, OnlyReceivesOneResponse) {
});
PW_ENCODE_PB(pw_rpc_test_TestResponse, r1, .value = 42);
- context.SendResponse(Status::Unimplemented(), r1);
+ EXPECT_EQ(OkStatus(), context.SendResponse(Status::Unimplemented(), r1));
PW_ENCODE_PB(pw_rpc_test_TestResponse, r2, .value = 44);
- context.SendResponse(Status::OutOfRange(), r2);
+ EXPECT_EQ(OkStatus(), context.SendResponse(Status::OutOfRange(), r2));
PW_ENCODE_PB(pw_rpc_test_TestResponse, r3, .value = 46);
- context.SendResponse(Status::Internal(), r3);
+ EXPECT_EQ(OkStatus(), context.SendResponse(Status::Internal(), r3));
EXPECT_EQ(responses_received_, 1);
EXPECT_EQ(last_status_, Status::Unimplemented());
@@ -294,19 +296,19 @@ TEST_F(ServerStreamingClientCall, InvokesCallbackOnValidResponse) {
});
PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r1, .chunk = {}, .number = 11u);
- context.SendServerStream(r1);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(r1));
EXPECT_TRUE(active_);
EXPECT_EQ(responses_received_, 1);
EXPECT_EQ(last_response_number_, 11);
PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r2, .chunk = {}, .number = 22u);
- context.SendServerStream(r2);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(r2));
EXPECT_TRUE(active_);
EXPECT_EQ(responses_received_, 2);
EXPECT_EQ(last_response_number_, 22);
PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r3, .chunk = {}, .number = 33u);
- context.SendServerStream(r3);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(r3));
EXPECT_TRUE(active_);
EXPECT_EQ(responses_received_, 3);
EXPECT_EQ(last_response_number_, 33);
@@ -329,18 +331,18 @@ TEST_F(ServerStreamingClientCall, InvokesStreamEndOnFinish) {
});
PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r1, .chunk = {}, .number = 11u);
- context.SendServerStream(r1);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(r1));
EXPECT_TRUE(active_);
PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r2, .chunk = {}, .number = 22u);
- context.SendServerStream(r2);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(r2));
EXPECT_TRUE(active_);
// Close the stream.
- context.SendResponse(Status::NotFound());
+ EXPECT_EQ(OkStatus(), context.SendResponse(Status::NotFound()));
PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r3, .chunk = {}, .number = 33u);
- context.SendServerStream(r3);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(r3));
EXPECT_FALSE(active_);
EXPECT_EQ(responses_received_, 2);
@@ -361,25 +363,27 @@ TEST_F(ServerStreamingClientCall, InvokesErrorCallbackOnInvalidResponses) {
[this](Status error) { rpc_error_ = error; });
PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r1, .chunk = {}, .number = 11u);
- context.SendServerStream(r1);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(r1));
EXPECT_TRUE(active_);
EXPECT_EQ(responses_received_, 1);
EXPECT_EQ(last_response_number_, 11);
constexpr std::byte bad_payload[]{
std::byte{0xab}, std::byte{0xcd}, std::byte{0xef}};
- context.SendServerStream(bad_payload);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(bad_payload));
EXPECT_EQ(responses_received_, 1);
ASSERT_TRUE(rpc_error_.has_value());
EXPECT_EQ(rpc_error_, Status::DataLoss());
PW_ENCODE_PB(pw_rpc_test_TestStreamResponse, r2, .chunk = {}, .number = 22u);
- context.SendServerStream(r2);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(r2));
EXPECT_TRUE(active_);
EXPECT_EQ(responses_received_, 2);
EXPECT_EQ(last_response_number_, 22);
- context.SendPacket(internal::PacketType::SERVER_ERROR, Status::NotFound());
+ EXPECT_EQ(OkStatus(),
+ context.SendPacket(internal::PacketType::SERVER_ERROR,
+ Status::NotFound()));
EXPECT_EQ(responses_received_, 2);
EXPECT_EQ(rpc_error_, Status::NotFound());
}
diff --git a/pw_rpc/nanopb/codegen_test.cc b/pw_rpc/nanopb/codegen_test.cc
index 2b7d69ab5..5d27c1d56 100644
--- a/pw_rpc/nanopb/codegen_test.cc
+++ b/pw_rpc/nanopb/codegen_test.cc
@@ -39,17 +39,21 @@ class TestService final
const pw_rpc_test_TestRequest& request,
NanopbUnaryResponder<pw_rpc_test_TestResponse>& responder) {
pw_rpc_test_TestResponse response{};
- responder.Finish(response, TestUnaryRpc(request, response));
+ EXPECT_EQ(OkStatus(),
+ responder.Finish(response, TestUnaryRpc(request, response)));
}
static void TestServerStreamRpc(
const pw_rpc_test_TestRequest& request,
ServerWriter<pw_rpc_test_TestStreamResponse>& writer) {
for (int i = 0; i < request.integer; ++i) {
- writer.Write({.chunk = {}, .number = static_cast<uint32_t>(i)});
+ EXPECT_EQ(
+ OkStatus(),
+ writer.Write({.chunk = {}, .number = static_cast<uint32_t>(i)}));
}
- writer.Finish(static_cast<Status::Code>(request.status_code));
+ EXPECT_EQ(OkStatus(),
+ writer.Finish(static_cast<Status::Code>(request.status_code)));
}
void TestClientStreamRpc(
@@ -137,13 +141,13 @@ TEST(NanopbCodegen, Server_InvokeServerStreamingRpc_ManualWriting) {
auto writer = context.writer();
- writer.Write({.chunk = {}, .number = 3});
- writer.Write({.chunk = {}, .number = 6});
- writer.Write({.chunk = {}, .number = 9});
+ EXPECT_EQ(OkStatus(), writer.Write({.chunk = {}, .number = 3}));
+ EXPECT_EQ(OkStatus(), writer.Write({.chunk = {}, .number = 6}));
+ EXPECT_EQ(OkStatus(), writer.Write({.chunk = {}, .number = 9}));
EXPECT_FALSE(context.done());
- writer.Finish(Status::Cancelled());
+ EXPECT_EQ(OkStatus(), writer.Finish(Status::Cancelled()));
ASSERT_TRUE(context.done());
EXPECT_EQ(Status::Cancelled(), context.status());
@@ -237,7 +241,7 @@ TEST(NanopbCodegen, Client_InvokesUnaryRpcWithCallback) {
EXPECT_EQ(sent_proto.integer, 123);
PW_ENCODE_PB(pw_rpc_test_TestResponse, response, .value = 42);
- context.SendResponse(OkStatus(), response);
+ EXPECT_EQ(OkStatus(), context.SendResponse(OkStatus(), response));
EXPECT_EQ(result.last_status, OkStatus());
EXPECT_EQ(result.response_value, 42);
@@ -283,11 +287,11 @@ TEST(NanopbCodegen, Client_InvokesServerStreamingRpcWithCallback) {
PW_ENCODE_PB(
pw_rpc_test_TestStreamResponse, response, .chunk = {}, .number = 11u);
- context.SendServerStream(response);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(response));
EXPECT_TRUE(result.active);
EXPECT_EQ(result.response_value, 11);
- context.SendResponse(Status::NotFound());
+ EXPECT_EQ(OkStatus(), context.SendResponse(Status::NotFound()));
EXPECT_FALSE(result.active);
EXPECT_EQ(result.stream_status, Status::NotFound());
}
@@ -325,7 +329,7 @@ TEST(NanopbCodegen, Client_StaticMethod_InvokesUnaryRpcWithCallback) {
EXPECT_EQ(sent_proto.integer, 123);
PW_ENCODE_PB(pw_rpc_test_TestResponse, response, .value = 42);
- context.SendResponse(OkStatus(), response);
+ EXPECT_EQ(OkStatus(), context.SendResponse(OkStatus(), response));
EXPECT_EQ(result.last_status, OkStatus());
EXPECT_EQ(result.response_value, 42);
}
@@ -369,11 +373,11 @@ TEST(NanopbCodegen, Client_StaticMethod_InvokesServerStreamingRpcWithCallback) {
PW_ENCODE_PB(
pw_rpc_test_TestStreamResponse, response, .chunk = {}, .number = 11u);
- context.SendServerStream(response);
+ EXPECT_EQ(OkStatus(), context.SendServerStream(response));
EXPECT_TRUE(result.active);
EXPECT_EQ(result.response_value, 11);
- context.SendResponse(Status::NotFound());
+ EXPECT_EQ(OkStatus(), context.SendResponse(Status::NotFound()));
EXPECT_FALSE(result.active);
EXPECT_EQ(result.stream_status, Status::NotFound());
}
diff --git a/pw_rpc/nanopb/method_lookup_test.cc b/pw_rpc/nanopb/method_lookup_test.cc
index 7cc9af3db..7d45879fd 100644
--- a/pw_rpc/nanopb/method_lookup_test.cc
+++ b/pw_rpc/nanopb/method_lookup_test.cc
@@ -23,8 +23,9 @@ namespace {
class MixedService1
: public test::pw_rpc::nanopb::TestService::Service<MixedService1> {
public:
- StatusWithSize TestUnaryRpc(ConstByteSpan, ByteSpan) {
- return StatusWithSize(5);
+ void TestUnaryRpc(ConstByteSpan, RawUnaryResponder& responder) {
+ std::byte response[5] = {};
+ ASSERT_EQ(OkStatus(), responder.Finish(response, OkStatus()));
}
void TestAnotherUnaryRpc(const pw_rpc_test_TestRequest&,
@@ -86,9 +87,9 @@ class MixedService2
TEST(MixedService1, CallRawMethod_SyncUnary) {
PW_RAW_TEST_METHOD_CONTEXT(MixedService1, TestUnaryRpc) context;
- StatusWithSize sws = context.call({});
- EXPECT_TRUE(sws.ok());
- EXPECT_EQ(5u, sws.size());
+ context.call({});
+ EXPECT_EQ(OkStatus(), context.status());
+ EXPECT_EQ(5u, context.response().size());
}
TEST(MixedService1, CallNanopbMethod_AsyncUnary) {
diff --git a/pw_rpc/nanopb/method_test.cc b/pw_rpc/nanopb/method_test.cc
index b82bf88a2..cf19fe051 100644
--- a/pw_rpc/nanopb/method_test.cc
+++ b/pw_rpc/nanopb/method_test.cc
@@ -158,9 +158,8 @@ class FakeService : public FakeServiceBase<FakeService> {
if (fail_to_encode_async_unary_response) {
pw_rpc_test_TestResponse response = pw_rpc_test_TestResponse_init_default;
- response.repeated_field.funcs.encode = [](pb_ostream_t*,
- const pb_field_iter_t*,
- void* const*) { return false; };
+ response.repeated_field.funcs.encode =
+ [](pb_ostream_t*, const pb_field_t*, void* const*) { return false; };
ASSERT_EQ(OkStatus(), responder.Finish(response, Status::NotFound()));
} else {
ASSERT_EQ(
@@ -337,7 +336,7 @@ TEST(NanopbMethod, ServerStreamingRpc_ResponseEncodingFails_InternalError) {
pw_rpc_test_TestResponse response = pw_rpc_test_TestResponse_init_default;
response.repeated_field.funcs.encode =
- [](pb_ostream_t*, const pb_field_iter_t*, void* const*) { return false; };
+ [](pb_ostream_t*, const pb_field_t*, void* const*) { return false; };
EXPECT_EQ(Status::Internal(), context.service().last_writer.Write(response));
}
diff --git a/pw_rpc/nanopb/method_union_test.cc b/pw_rpc/nanopb/method_union_test.cc
index 0891bfc7c..6eacb5978 100644
--- a/pw_rpc/nanopb/method_union_test.cc
+++ b/pw_rpc/nanopb/method_union_test.cc
@@ -76,8 +76,8 @@ class FakeGeneratedServiceImpl
return Status::Unauthenticated();
}
- StatusWithSize DoNothing(ConstByteSpan, ByteSpan) {
- return StatusWithSize::Unknown();
+ void DoNothing(ConstByteSpan, RawUnaryResponder& responder) {
+ ASSERT_EQ(OkStatus(), responder.Finish({}, Status::Unknown()));
}
void RawStream(ConstByteSpan, RawServerWriter& writer) {
diff --git a/pw_rpc/nanopb/public/pw_rpc/nanopb/fake_channel_output.h b/pw_rpc/nanopb/public/pw_rpc/nanopb/fake_channel_output.h
index b1681a266..360cfc1d5 100644
--- a/pw_rpc/nanopb/public/pw_rpc/nanopb/fake_channel_output.h
+++ b/pw_rpc/nanopb/public/pw_rpc/nanopb/fake_channel_output.h
@@ -104,6 +104,12 @@ class NanopbFakeChannelOutput final
NanopbFakeChannelOutput() = default;
// Iterates over request payloads from request or client stream packets.
+ //
+ // !!! WARNING !!!
+ //
+ // Access to the FakeChannelOutput through the NanopbPayloadsView is NOT
+ // synchronized! The NanopbPayloadsView is immediately invalidated if any
+ // thread accesses the FakeChannelOutput.
template <auto kMethod>
NanopbPayloadsView<Request<kMethod>> requests(
uint32_t channel_id = Channel::kUnassignedChannelId) const {
@@ -122,6 +128,12 @@ class NanopbFakeChannelOutput final
}
// Iterates over response payloads from response or server stream packets.
+ //
+ // !!! WARNING !!!
+ //
+ // Access to the FakeChannelOutput through the NanopbPayloadsView is NOT
+ // synchronized! The NanopbPayloadsView is immediately invalidated if any
+ // thread accesses the FakeChannelOutput.
template <auto kMethod>
NanopbPayloadsView<Response<kMethod>> responses(
uint32_t channel_id = Channel::kUnassignedChannelId) const {
diff --git a/pw_rpc/nanopb/server_callback_test.cc b/pw_rpc/nanopb/server_callback_test.cc
index d13ae45b8..41bfd5729 100644
--- a/pw_rpc/nanopb/server_callback_test.cc
+++ b/pw_rpc/nanopb/server_callback_test.cc
@@ -86,7 +86,7 @@ TEST(NanopbTestMethodContext, ResponseWithCallbacks) {
pw_rpc_test_TestResponse response = pw_rpc_test_TestResponse_init_default;
response.repeated_field.funcs.decode = +[](pb_istream_t* stream,
- const pb_field_iter_t* /* field */,
+ const pb_field_t* /* field */,
void** arg) -> bool {
DecoderContext* dec_ctx = static_cast<DecoderContext*>(*arg);
uint64_t value;
diff --git a/pw_rpc/nanopb/server_reader_writer_test.cc b/pw_rpc/nanopb/server_reader_writer_test.cc
index e7b43e645..98a3b52bc 100644
--- a/pw_rpc/nanopb/server_reader_writer_test.cc
+++ b/pw_rpc/nanopb/server_reader_writer_test.cc
@@ -189,7 +189,8 @@ TEST(NanopbUnaryResponder, Open_ReturnsUsableResponder) {
NanopbUnaryResponder<pw_rpc_test_TestResponse>::Open<
TestService::TestUnaryRpc>(ctx.server, ctx.channel.id(), ctx.service);
- responder.Finish({.value = 4321, .repeated_field = {}});
+ ASSERT_EQ(OkStatus(),
+ responder.Finish({.value = 4321, .repeated_field = {}}));
EXPECT_EQ(ctx.output.last_response<TestService::TestUnaryRpc>().value, 4321);
EXPECT_EQ(ctx.output.last_status(), OkStatus());
@@ -202,8 +203,8 @@ TEST(NanopbServerWriter, Open_ReturnsUsableWriter) {
TestService::TestServerStreamRpc>(
ctx.server, ctx.channel.id(), ctx.service);
- responder.Write({.chunk = {}, .number = 321});
- responder.Finish();
+ ASSERT_EQ(OkStatus(), responder.Write({.chunk = {}, .number = 321}));
+ ASSERT_EQ(OkStatus(), responder.Finish());
EXPECT_EQ(ctx.output.last_response<TestService::TestServerStreamRpc>().number,
321u);
@@ -218,7 +219,7 @@ TEST(NanopbServerReader, Open_ReturnsUsableReader) {
Open<TestService::TestClientStreamRpc>(
ctx.server, ctx.channel.id(), ctx.service);
- responder.Finish({.chunk = {}, .number = 321});
+ ASSERT_EQ(OkStatus(), responder.Finish({.chunk = {}, .number = 321}));
EXPECT_EQ(ctx.output.last_response<TestService::TestClientStreamRpc>().number,
321u);
@@ -232,8 +233,8 @@ TEST(NanopbServerReaderWriter, Open_ReturnsUsableReaderWriter) {
Open<TestService::TestBidirectionalStreamRpc>(
ctx.server, ctx.channel.id(), ctx.service);
- responder.Write({.chunk = {}, .number = 321});
- responder.Finish(Status::NotFound());
+ ASSERT_EQ(OkStatus(), responder.Write({.chunk = {}, .number = 321}));
+ ASSERT_EQ(OkStatus(), responder.Finish(Status::NotFound()));
EXPECT_EQ(ctx.output.last_response<TestService::TestBidirectionalStreamRpc>()
.number,
diff --git a/pw_rpc/packet.cc b/pw_rpc/packet.cc
index a96e79db2..de37ab1fc 100644
--- a/pw_rpc/packet.cc
+++ b/pw_rpc/packet.cc
@@ -89,28 +89,22 @@ Result<ConstByteSpan> Packet::Encode(ByteSpan buffer) const {
// The payload is encoded first, as it may share the encode buffer.
if (!payload_.empty()) {
- rpc_packet.WritePayload(payload_)
- .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+ rpc_packet.WritePayload(payload_).IgnoreError();
}
- rpc_packet.WriteType(type_)
- .IgnoreError(); // TODO(pwbug/387): Handle Status properly
- rpc_packet.WriteChannelId(channel_id_)
- .IgnoreError(); // TODO(pwbug/387): Handle Status properly
- rpc_packet.WriteServiceId(service_id_)
- .IgnoreError(); // TODO(pwbug/387): Handle Status properly
- rpc_packet.WriteMethodId(method_id_)
- .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+ rpc_packet.WriteType(type_).IgnoreError();
+ rpc_packet.WriteChannelId(channel_id_).IgnoreError();
+ rpc_packet.WriteServiceId(service_id_).IgnoreError();
+ rpc_packet.WriteMethodId(method_id_).IgnoreError();
// Status code 0 is OK. In protobufs, 0 is the default int value, so skip
// encoding it to save two bytes in the output.
if (status_.code() != 0) {
- rpc_packet.WriteStatus(status_.code())
- .IgnoreError(); // TODO(pwbug/387): Handle Status properly
+ rpc_packet.WriteStatus(status_.code()).IgnoreError();
}
if (call_id_ != 0) {
- rpc_packet.WriteCallId(call_id_);
+ rpc_packet.WriteCallId(call_id_).IgnoreError();
}
if (rpc_packet.status().ok()) {
diff --git a/pw_rpc/public/pw_rpc/benchmark.h b/pw_rpc/public/pw_rpc/benchmark.h
index be91016d7..7ac45133c 100644
--- a/pw_rpc/public/pw_rpc/benchmark.h
+++ b/pw_rpc/public/pw_rpc/benchmark.h
@@ -22,7 +22,7 @@ namespace pw::rpc {
class BenchmarkService
: public pw_rpc::raw::Benchmark::Service<BenchmarkService> {
public:
- static StatusWithSize UnaryEcho(ConstByteSpan request, ByteSpan response);
+ static void UnaryEcho(ConstByteSpan request, RawUnaryResponder& responder);
void BidirectionalEcho(RawServerReaderWriter& reader_writer);
diff --git a/pw_rpc/public/pw_rpc/channel.h b/pw_rpc/public/pw_rpc/channel.h
index 099a994b2..849595602 100644
--- a/pw_rpc/public/pw_rpc/channel.h
+++ b/pw_rpc/public/pw_rpc/channel.h
@@ -19,11 +19,17 @@
#include <type_traits>
#include "pw_assert/assert.h"
+#include "pw_bytes/span.h"
+#include "pw_result/result.h"
#include "pw_rpc/internal/lock.h"
#include "pw_status/status.h"
namespace pw::rpc {
+// Extracts the channel ID from a pw_rpc packet. Returns DATA_LOSS if the
+// packet is corrupt and the channel ID could not be found.
+Result<uint32_t> ExtractChannelId(ConstByteSpan packet);
+
class ChannelOutput {
public:
// Returned from MaximumTransmissionUnit() to indicate that this ChannelOutput
diff --git a/pw_rpc/public/pw_rpc/client.h b/pw_rpc/public/pw_rpc/client.h
index 86c448190..db782989a 100644
--- a/pw_rpc/public/pw_rpc/client.h
+++ b/pw_rpc/public/pw_rpc/client.h
@@ -20,6 +20,7 @@
#include "pw_rpc/channel.h"
#include "pw_rpc/internal/channel.h"
#include "pw_rpc/internal/endpoint.h"
+#include "pw_rpc/internal/lock.h"
namespace pw::rpc {
diff --git a/pw_rpc/public/pw_rpc/internal/fake_channel_output.h b/pw_rpc/public/pw_rpc/internal/fake_channel_output.h
index fd04c6038..cef049d4f 100644
--- a/pw_rpc/public/pw_rpc/internal/fake_channel_output.h
+++ b/pw_rpc/public/pw_rpc/internal/fake_channel_output.h
@@ -19,11 +19,14 @@
#include "pw_bytes/span.h"
#include "pw_containers/vector.h"
+#include "pw_function/function.h"
#include "pw_rpc/channel.h"
+#include "pw_rpc/internal/lock.h"
#include "pw_rpc/internal/method_info.h"
#include "pw_rpc/internal/packet.h"
#include "pw_rpc/method_type.h"
#include "pw_rpc/payloads_view.h"
+#include "pw_sync/lock_annotations.h"
namespace pw::rpc {
@@ -40,15 +43,23 @@ class FakeChannelOutput : public ChannelOutput {
FakeChannelOutput& operator=(const FakeChannelOutput&) = delete;
FakeChannelOutput& operator=(FakeChannelOutput&&) = delete;
- Status last_status() const {
- PW_ASSERT(done());
+ Status last_status() const PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
+ PW_ASSERT(total_response_packets_ > 0);
return packets_.back().status();
}
// Returns a view of the payloads seen for this RPC.
+ //
+ // !!! WARNING !!!
+ //
+ // Access to the FakeChannelOutput through the PayloadsView is NOT
+ // synchronized! The PayloadsView is immediately invalidated if any thread
+ // accesses the FakeChannelOutput.
template <auto kMethod>
- PayloadsView payloads(
- uint32_t channel_id = Channel::kUnassignedChannelId) const {
+ PayloadsView payloads(uint32_t channel_id = Channel::kUnassignedChannelId)
+ const PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
return PayloadsView(packets_,
MethodInfo<kMethod>::kType,
channel_id,
@@ -59,15 +70,23 @@ class FakeChannelOutput : public ChannelOutput {
PayloadsView payloads(MethodType type,
uint32_t channel_id,
uint32_t service_id,
- uint32_t method_id) const {
+ uint32_t method_id) const PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
return PayloadsView(packets_, type, channel_id, service_id, method_id);
}
// Returns a view of the final statuses seen for this RPC. Only relevant for
// checking packets sent by a server.
+ //
+ // !!! WARNING !!!
+ //
+ // Access to the FakeChannelOutput through the StatusView is NOT
+ // synchronized! The StatusView is immediately invalidated if any thread
+ // accesses the FakeChannelOutput.
template <auto kMethod>
- StatusView completions(
- uint32_t channel_id = Channel::kUnassignedChannelId) const {
+ StatusView completions(uint32_t channel_id = Channel::kUnassignedChannelId)
+ const PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
return StatusView(packets_,
internal::PacketType::RESPONSE,
internal::PacketType::RESPONSE,
@@ -76,8 +95,17 @@ class FakeChannelOutput : public ChannelOutput {
MethodInfo<kMethod>::kMethodId);
}
+ // Returns a view of the pw_rpc server or client errors seen for this RPC.
+ //
+ // !!! WARNING !!!
+ //
+ // Access to the FakeChannelOutput through the StatusView is NOT
+ // synchronized! The StatusView is immediately invalidated if any thread
+ // accesses the FakeChannelOutput.
template <auto kMethod>
- StatusView errors(uint32_t channel_id = Channel::kUnassignedChannelId) const {
+ StatusView errors(uint32_t channel_id = Channel::kUnassignedChannelId) const
+ PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
return StatusView(packets_,
internal::PacketType::CLIENT_ERROR,
internal::PacketType::SERVER_ERROR,
@@ -86,9 +114,13 @@ class FakeChannelOutput : public ChannelOutput {
MethodInfo<kMethod>::kMethodId);
}
+ // Returns a view of the client stream end packets seen for this RPC. Only
+ // relevant for checking packets sent by a client.
template <auto kMethod>
size_t client_stream_end_packets(
- uint32_t channel_id = Channel::kUnassignedChannelId) const {
+ uint32_t channel_id = Channel::kUnassignedChannelId) const
+ PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
return internal::test::PacketsView(
packets_,
internal::test::PacketFilter(
@@ -102,26 +134,38 @@ class FakeChannelOutput : public ChannelOutput {
// The maximum number of packets this FakeChannelOutput can store. Attempting
// to store more packets than this is an error.
- size_t max_packets() const { return packets_.max_size(); }
+ size_t max_packets() const PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
+ return packets_.max_size();
+ }
// The total number of packets that have been sent.
- size_t total_packets() const { return packets_.size(); }
+ size_t total_packets() const PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
+ return packets_.size();
+ }
// Set to true if a RESPONSE packet is seen.
- bool done() const { return total_response_packets_ > 0; }
+ bool done() const PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
+ return total_response_packets_ > 0;
+ }
// Clears and resets the FakeChannelOutput.
- void clear();
+ void clear() PW_LOCKS_EXCLUDED(mutex_);
- // Returns `status` for all future SendAndReleaseBuffer calls. Enables packet
- // processing if `status` is OK.
- void set_send_status(Status status) {
+ // Returns `status` for all future Send calls. Enables packet processing if
+ // `status` is OK.
+ void set_send_status(Status status) PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
send_status_ = status;
return_after_packet_count_ = status.ok() ? -1 : 0;
}
// Returns `status` once after the specified positive number of packets.
- void set_send_status(Status status, int return_after_packet_count) {
+ void set_send_status(Status status, int return_after_packet_count)
+ PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
PW_ASSERT(!status.ok());
PW_ASSERT(return_after_packet_count > 0);
send_status_ = status;
@@ -129,25 +173,44 @@ class FakeChannelOutput : public ChannelOutput {
}
// Logs which packets have been sent for debugging purposes.
- void LogPackets() const;
+ void LogPackets() const PW_LOCKS_EXCLUDED(mutex_);
// Processes buffer according to packet type and `return_after_packet_count_`
// value as follows:
// When positive, returns `send_status_` once,
// When equals 0, returns `send_status_` in all future calls,
// When negative, ignores `send_status_` processes buffer.
- Status Send(ConstByteSpan buffer) final;
+ Status Send(ConstByteSpan buffer) final PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
+ const Status status = HandlePacket(buffer);
+ if (on_send_ != nullptr) {
+ on_send_(buffer, status);
+ }
+ return status;
+ }
// Gives access to the last received internal::Packet. This is hidden by the
// raw/Nanopb implementations, since it gives access to an internal class.
- const Packet& last_packet() const {
+ const Packet& last_packet() const PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
PW_ASSERT(!packets_.empty());
return packets_.back();
}
+ // The on_send callback is called every time Send() is called. It is passed
+ // the contents of the packet and the status to be returned from Send().
+ //
+ // DANGER: Do NOT call any FakeChannelOutput functions or functions that call
+ // FakeChannelOutput functions. That will result in infinite recursion or
+ // deadlocks.
+ void set_on_send(Function<void(ConstByteSpan, Status)>&& on_send)
+ PW_LOCKS_EXCLUDED(mutex_) {
+ LockGuard lock(mutex_);
+ on_send_ = std::move(on_send);
+ }
+
protected:
- constexpr FakeChannelOutput(Vector<Packet>& packets,
- Vector<std::byte>& payloads)
+ FakeChannelOutput(Vector<Packet>& packets, Vector<std::byte>& payloads)
: ChannelOutput("pw::rpc::internal::test::FakeChannelOutput"),
packets_(packets),
payloads_(payloads) {}
@@ -157,22 +220,25 @@ class FakeChannelOutput : public ChannelOutput {
private:
friend class rpc::FakeServer;
- Status HandlePacket(ConstByteSpan buffer);
- void CopyPayloadToBuffer(Packet& packet);
+ Status HandlePacket(ConstByteSpan buffer) PW_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ void CopyPayloadToBuffer(Packet& packet) PW_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+
+ int return_after_packet_count_ PW_GUARDED_BY(mutex_) = -1;
+ unsigned total_response_packets_ PW_GUARDED_BY(mutex_) = 0;
- int return_after_packet_count_ = -1;
- unsigned total_response_packets_ = 0;
+ Vector<Packet>& packets_ PW_GUARDED_BY(mutex_);
+ Vector<std::byte>& payloads_ PW_GUARDED_BY(mutex_);
+ Status send_status_ PW_GUARDED_BY(mutex_) = OkStatus();
+ Function<void(ConstByteSpan, Status)> on_send_ PW_GUARDED_BY(mutex_);
- Vector<Packet>& packets_;
- Vector<std::byte>& payloads_;
- Status send_status_ = OkStatus();
+ mutable RpcLock mutex_;
};
// Adds the packet output buffer to a FakeChannelOutput.
template <size_t kMaxPackets, size_t kPayloadsBufferSizeBytes>
class FakeChannelOutputBuffer : public FakeChannelOutput {
protected:
- constexpr FakeChannelOutputBuffer()
+ FakeChannelOutputBuffer()
: FakeChannelOutput(packets_array_, payloads_array_), payloads_array_ {}
{}
diff --git a/pw_rpc/public/pw_rpc/internal/lock.h b/pw_rpc/public/pw_rpc/internal/lock.h
index efb04433f..50c53d5c3 100644
--- a/pw_rpc/public/pw_rpc/internal/lock.h
+++ b/pw_rpc/public/pw_rpc/internal/lock.h
@@ -41,7 +41,9 @@ class PW_LOCKABLE("pw::rpc::internal::RpcLock") RpcLock {
class PW_SCOPED_LOCKABLE LockGuard {
public:
- constexpr LockGuard([[maybe_unused]] RpcLock& mutex)
+ // [[maybe_unused]] needs to be after the parameter to workaround a gcc bug
+ // context: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81429
+ constexpr LockGuard(RpcLock& mutex [[maybe_unused]])
PW_EXCLUSIVE_LOCK_FUNCTION(mutex) {}
~LockGuard() PW_UNLOCK_FUNCTION() = default;
diff --git a/pw_rpc/public/pw_rpc/internal/method_impl_tester.h b/pw_rpc/public/pw_rpc/internal/method_impl_tester.h
index 6498a99c3..2b411d2a6 100644
--- a/pw_rpc/public/pw_rpc/internal/method_impl_tester.h
+++ b/pw_rpc/public/pw_rpc/internal/method_impl_tester.h
@@ -123,14 +123,12 @@ class MethodImplTests {
struct Type {
constexpr bool Pass() const { return true; }
+ // Don't check kSynchronous for Unary since not all method implementations
+ // support synchronous unary.
static_assert(MethodTraits<decltype(&TestService::Unary)>::kType ==
MethodType::kUnary);
- static_assert(MethodTraits<decltype(&TestService::Unary)>::kSynchronous);
static_assert(MethodTraits<decltype(&TestService::StaticUnary)>::kType ==
MethodType::kUnary);
- static_assert(
- MethodTraits<decltype(&TestService::StaticUnary)>::kSynchronous);
-
static_assert(MethodTraits<decltype(&TestService::AsyncUnary)>::kType ==
MethodType::kUnary);
static_assert(
@@ -169,8 +167,7 @@ class MethodImplTests {
public:
template <typename... Args>
constexpr bool Pass(const std::tuple<Args...>& args) const {
- return UnaryMethod(args).id() == 1 && StaticUnaryMethod(args).id() == 2 &&
- AsyncUnaryMethod(args).id() == 3 &&
+ return AsyncUnaryMethod(args).id() == 3 &&
StaticAsyncUnaryMethod(args).id() == 4 &&
ServerStreamingMethod(args).id() == 5 &&
StaticServerStreamingMethod(args).id() == 6 &&
@@ -182,20 +179,8 @@ class MethodImplTests {
}
private:
- template <typename... Args>
- constexpr MethodImpl UnaryMethod(const std::tuple<Args...>& args) const {
- return Call(
- MethodImpl::template SynchronousUnary<&TestService::Unary>, 1, args);
- }
-
- template <typename... Args>
- constexpr MethodImpl StaticUnaryMethod(
- const std::tuple<Args...>& args) const {
- return Call(
- MethodImpl::template SynchronousUnary<&TestService::StaticUnary>,
- 2,
- args);
- }
+ // Do not check synchronous unary since not all method implementations
+ // support it.
template <typename... Args>
constexpr MethodImpl AsyncUnaryMethod(
diff --git a/pw_rpc/public/pw_rpc/internal/test_method_context.h b/pw_rpc/public/pw_rpc/internal/test_method_context.h
index 459be69e4..a8e546194 100644
--- a/pw_rpc/public/pw_rpc/internal/test_method_context.h
+++ b/pw_rpc/public/pw_rpc/internal/test_method_context.h
@@ -88,6 +88,9 @@ class InvocationContext {
.ok());
}
+ const Output& output() const { return output_; }
+ Output& output() { return output_; }
+
protected:
// Constructs the invocation context. The args for the ChannelOutput type are
// passed in a std::tuple. The args for the Service are forwarded directly
@@ -104,9 +107,6 @@ class InvocationContext {
server_.RegisterService(service_);
}
- const Output& output() const { return output_; }
- Output& output() { return output_; }
-
uint32_t channel_id() const { return channel_.id(); }
template <size_t kMaxPayloadSize = 32>
diff --git a/pw_rpc/public/pw_rpc/payloads_view.h b/pw_rpc/public/pw_rpc/payloads_view.h
index 0177f56a0..3d4934f88 100644
--- a/pw_rpc/public/pw_rpc/payloads_view.h
+++ b/pw_rpc/public/pw_rpc/payloads_view.h
@@ -178,7 +178,13 @@ class PayloadsView {
case MethodType::kBidirectionalStreaming:
return {PacketType::CLIENT_STREAM, PacketType::SERVER_STREAM};
}
+
+// Workaround for GCC 8 bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86678
+#if defined(__GNUC__) && __GNUC__ < 9
+ return {};
+#else
PW_ASSERT(false);
+#endif // defined(__GNUC__) && __GNUC__ < 9
}
internal::test::PacketsView view_;
diff --git a/pw_rpc/public/pw_rpc/server.h b/pw_rpc/public/pw_rpc/server.h
index 9955e13a1..69eae07fc 100644
--- a/pw_rpc/public/pw_rpc/server.h
+++ b/pw_rpc/public/pw_rpc/server.h
@@ -21,6 +21,7 @@
#include "pw_rpc/channel.h"
#include "pw_rpc/internal/channel.h"
#include "pw_rpc/internal/endpoint.h"
+#include "pw_rpc/internal/lock.h"
#include "pw_rpc/internal/method.h"
#include "pw_rpc/internal/method_info.h"
#include "pw_rpc/internal/server_call.h"
@@ -33,9 +34,23 @@ class Server : public internal::Endpoint {
public:
_PW_RPC_CONSTEXPR Server(std::span<Channel> channels) : Endpoint(channels) {}
- // Registers a service with the server. This should not be called directly
- // with a Service; instead, use a generated class which inherits from it.
- void RegisterService(Service& service) { services_.push_front(service); }
+ // Registers one or more services with the server. This should not be called
+ // directly with a Service; instead, use a generated class which inherits
+ // from it.
+ //
+ // This function may be called with any number of services. Combining
+ // registration into fewer calls is preferred so the RPC mutex is only
+ // locked/unlocked once.
+ template <typename... OtherServices>
+ void RegisterService(Service& service, OtherServices&... services)
+ PW_LOCKS_EXCLUDED(internal::rpc_lock()) {
+ internal::LockGuard lock(internal::rpc_lock());
+ services_.push_front(service); // Register the first service
+
+ // Register any additional services by expanding the parameter pack. This
+ // is a fold expression of the comma operator.
+ (services_.push_front(services), ...);
+ }
// Processes an RPC packet. The packet may contain an RPC request or a control
// packet, the result of which is processed in this function. Returns whether
@@ -49,10 +64,12 @@ class Server : public internal::Endpoint {
// ProcessPacket optionally accepts a ChannelOutput as a second argument. If
// provided, the server respond on that interface if an unknown channel is
// requested.
- Status ProcessPacket(ConstByteSpan packet_data) {
+ Status ProcessPacket(ConstByteSpan packet_data)
+ PW_LOCKS_EXCLUDED(internal::rpc_lock()) {
return ProcessPacket(packet_data, nullptr);
}
- Status ProcessPacket(ConstByteSpan packet_data, ChannelOutput& interface) {
+ Status ProcessPacket(ConstByteSpan packet_data, ChannelOutput& interface)
+ PW_LOCKS_EXCLUDED(internal::rpc_lock()) {
return ProcessPacket(packet_data, &interface);
}
@@ -116,7 +133,8 @@ class Server : public internal::Endpoint {
PW_LOCKS_EXCLUDED(internal::rpc_lock());
std::tuple<Service*, const internal::Method*> FindMethod(
- const internal::Packet& packet);
+ const internal::Packet& packet)
+ PW_EXCLUSIVE_LOCKS_REQUIRED(internal::rpc_lock());
void HandleClientStreamPacket(const internal::Packet& packet,
internal::Channel& channel,
@@ -127,7 +145,7 @@ class Server : public internal::Endpoint {
using Endpoint::active_call_count;
using Endpoint::GetInternalChannel;
- IntrusiveList<Service> services_;
+ IntrusiveList<Service> services_ PW_GUARDED_BY(internal::rpc_lock());
};
} // namespace pw::rpc
diff --git a/pw_rpc/public/pw_rpc/thread_testing.h b/pw_rpc/public/pw_rpc/thread_testing.h
new file mode 100644
index 000000000..eb723a2cf
--- /dev/null
+++ b/pw_rpc/public/pw_rpc/thread_testing.h
@@ -0,0 +1,42 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <chrono>
+
+#include "pw_assert/assert.h"
+#include "pw_rpc/internal/fake_channel_output.h"
+#include "pw_sync/counting_semaphore.h"
+
+namespace pw::rpc::test {
+
+// Wait until the provided RawFakeChannelOutput or NanopbFakeChannelOutput
+// receives the specified number of packets.
+template <unsigned kTimeoutSeconds = 10, typename Function>
+void WaitForPackets(internal::test::FakeChannelOutput& output,
+ int count,
+ Function&& run_before) {
+ sync::CountingSemaphore sem;
+ output.set_on_send([&sem](ConstByteSpan, Status) { sem.release(); });
+
+ run_before();
+
+ for (int i = 0; i < count; ++i) {
+ PW_ASSERT(sem.try_acquire_for(std::chrono::seconds(kTimeoutSeconds)));
+ }
+
+ output.set_on_send(nullptr);
+}
+
+} // namespace pw::rpc::test
diff --git a/pw_rpc/py/BUILD.bazel b/pw_rpc/py/BUILD.bazel
index e077b6752..4d9d454fe 100644
--- a/pw_rpc/py/BUILD.bazel
+++ b/pw_rpc/py/BUILD.bazel
@@ -42,7 +42,7 @@ filegroup(
)
py_binary(
- name = "plugin",
+ name = "plugin_raw",
srcs = [":pw_rpc_common_sources"],
imports = ["."],
main = "pw_rpc/plugin_raw.py",
diff --git a/pw_rpc/py/docs.rst b/pw_rpc/py/docs.rst
index 9d29a107c..2c0e3060e 100644
--- a/pw_rpc/py/docs.rst
+++ b/pw_rpc/py/docs.rst
@@ -23,6 +23,13 @@ pw_rpc.callback_client
ClientStreamingCall,
BidirectionalStreamingCall,
+pw_rpc.descriptors
+==================
+.. automodule:: pw_rpc.descriptors
+ :members:
+ Channel,
+ ChannelManipulator,
+
pw_rpc.console_tools
====================
.. automodule:: pw_rpc.console_tools
diff --git a/pw_rpc/py/pw_rpc/__init__.py b/pw_rpc/py/pw_rpc/__init__.py
index 1f1e72e41..ff1f8713b 100644
--- a/pw_rpc/py/pw_rpc/__init__.py
+++ b/pw_rpc/py/pw_rpc/__init__.py
@@ -14,4 +14,4 @@
"""Package for calling Pigweed RPCs from Python."""
from pw_rpc.client import Client
-from pw_rpc.descriptors import Channel
+from pw_rpc.descriptors import Channel, ChannelManipulator
diff --git a/pw_rpc/py/pw_rpc/client.py b/pw_rpc/py/pw_rpc/client.py
index a70a1ebb4..182434d6b 100644
--- a/pw_rpc/py/pw_rpc/client.py
+++ b/pw_rpc/py/pw_rpc/client.py
@@ -16,8 +16,8 @@
import abc
from dataclasses import dataclass
import logging
-from typing import (Any, Collection, Dict, Iterable, Iterator, NamedTuple,
- Optional)
+from typing import (Any, Callable, Collection, Dict, Iterable, Iterator,
+ NamedTuple, Optional)
from google.protobuf.message import DecodeError, Message
from pw_status import Status
@@ -385,6 +385,9 @@ class Client:
"""Sends requests and handles responses for a set of channels.
RPC invocations occur through a ChannelClient.
+
+ Users may set an optional response_callback that is called before processing
+ every response or server stream RPC packet.
"""
@classmethod
def from_modules(cls, impl: ClientImpl, channels: Iterable[Channel],
@@ -409,6 +412,10 @@ class Client:
for channel in channels
}
+ # Optional function called before processing every non-error RPC packet.
+ self.response_callback: Optional[Callable[
+ [PendingRpc, Any, Optional[Status]], Any]] = None
+
def channel(self, channel_id: int = None) -> ChannelClient:
"""Returns a ChannelClient, which is used to call RPCs on a channel.
@@ -501,6 +508,10 @@ class Client:
packet.type = PacketType.SERVER_ERROR
status = Status.DATA_LOSS
+ # If set, call the response callback with non-error packets.
+ if self.response_callback and packet.type != PacketType.SERVER_ERROR:
+ self.response_callback(rpc, payload, status) # pylint: disable=not-callable
+
try:
context = self._impl.rpcs.get_pending(rpc, status)
except KeyError:
diff --git a/pw_rpc/py/pw_rpc/codegen_raw.py b/pw_rpc/py/pw_rpc/codegen_raw.py
index 6454cb4dd..15bc15ba3 100644
--- a/pw_rpc/py/pw_rpc/codegen_raw.py
+++ b/pw_rpc/py/pw_rpc/codegen_raw.py
@@ -136,16 +136,15 @@ class RawCodeGenerator(CodeGenerator):
class StubGenerator(codegen.StubGenerator):
def unary_signature(self, method: ProtoServiceMethod, prefix: str) -> str:
- return (f'pw::StatusWithSize {prefix}{method.name()}('
- 'pw::ConstByteSpan request, pw::ByteSpan response)')
+ return (f'void {prefix}{method.name()}(pw::ConstByteSpan request, '
+ 'pw::rpc::RawUnaryResponder& responder)')
def unary_stub(self, method: ProtoServiceMethod,
output: OutputFile) -> None:
output.write_line(codegen.STUB_REQUEST_TODO)
output.write_line('static_cast<void>(request);')
output.write_line(codegen.STUB_RESPONSE_TODO)
- output.write_line('static_cast<void>(response);')
- output.write_line('return pw::StatusWithSize::Unimplemented();')
+ output.write_line('static_cast<void>(responder);')
def server_streaming_signature(self, method: ProtoServiceMethod,
prefix: str) -> str:
diff --git a/pw_rpc/py/pw_rpc/descriptors.py b/pw_rpc/py/pw_rpc/descriptors.py
index fdae73211..57ba98458 100644
--- a/pw_rpc/py/pw_rpc/descriptors.py
+++ b/pw_rpc/py/pw_rpc/descriptors.py
@@ -13,6 +13,7 @@
# the License.
"""Types representing the basic pw_rpc concepts: channel, service, method."""
+import abc
from dataclasses import dataclass
import enum
from inspect import Parameter
@@ -37,6 +38,57 @@ class Channel:
return f'Channel({self.id})'
+class ChannelManipulator(abc.ABC):
+ """A a pipe interface that may manipulate packets before they're sent.
+
+ ``ChannelManipulator``s allow application-specific packet handling to be
+ injected into the packet processing pipeline for an ingress or egress
+ channel-like pathway. This is particularly useful for integration testing
+ resilience to things like packet loss on a usually-reliable transport. RPC
+ server integrations (e.g. ``HdlcRpcLocalServerAndClient``) may provide an
+ opportunity to inject a ``ChannelManipulator`` for this use case.
+
+ A ``ChannelManipulator`` should not modify send_packet, as the consumer of a
+ ``ChannelManipulator`` will use ``send_packet`` to insert the provided
+ ``ChannelManipulator`` into a packet processing path.
+
+ For example:
+
+ .. code-block:: python
+
+ class PacketLogger(ChannelManipulator):
+ def process_and_send(self, packet: bytes) -> None:
+ _LOG.debug('Received packet with payload: %s', str(packet))
+ self.send_packet(packet)
+
+
+ packet_logger = PacketLogger()
+
+ # Configure actual send command.
+ packet_logger.send_packet = socket.sendall
+
+ # Route the output channel through the PacketLogger channel manipulator.
+ channels = tuple(Channel(_DEFAULT_CHANNEL, packet_logger))
+
+ # Create a RPC client.
+ client = HdlcRpcClient(socket.read, protos, channels, stdout)
+ """
+ def __init__(self):
+ self.send_packet: Callable[[bytes], Any] = lambda _: None
+
+ @abc.abstractmethod
+ def process_and_send(self, packet: bytes) -> None:
+ """Processes an incoming packet before optionally sending it.
+
+ Implementations of this method may send the processed packet, multiple
+ packets, or no packets at all via the registered `send_packet()`
+ handler.
+ """
+
+ def __call__(self, data: bytes) -> None:
+ self.process_and_send(data)
+
+
@dataclass(frozen=True, eq=False)
class Service:
"""Describes an RPC service."""
diff --git a/pw_rpc/py/tests/client_test.py b/pw_rpc/py/tests/client_test.py
index 6fcfd607f..e07125287 100755
--- a/pw_rpc/py/tests/client_test.py
+++ b/pw_rpc/py/tests/client_test.py
@@ -285,6 +285,26 @@ class ClientTest(unittest.TestCase):
method_id=method.id,
status=Status.FAILED_PRECONDITION.value))
+ def test_process_packet_non_pending_calls_response_callback(self) -> None:
+ method = self._client.method('pw.test1.PublicService.SomeUnary')
+ reply = method.response_type(payload='hello')
+
+ def response_callback(rpc: client.PendingRpc, message,
+ status: Optional[Status]) -> None:
+ self.assertEqual(
+ rpc,
+ client.PendingRpc(
+ self._client.channel(1).channel, method.service, method))
+ self.assertEqual(message, reply)
+ self.assertIs(status, Status.OK)
+
+ self._client.response_callback = response_callback
+
+ self.assertIs(
+ self._client.process_packet(
+ packets.encode_response((1, method.service, method), reply)),
+ Status.OK)
+
if __name__ == '__main__':
unittest.main()
diff --git a/pw_rpc/raw/BUILD.bazel b/pw_rpc/raw/BUILD.bazel
index 6e72fdd04..fcad0a628 100644
--- a/pw_rpc/raw/BUILD.bazel
+++ b/pw_rpc/raw/BUILD.bazel
@@ -36,6 +36,7 @@ pw_cc_library(
deps = [
"//pw_bytes",
"//pw_rpc",
+ "//pw_rpc:internal_packet_cc.pwpb",
"//pw_status",
],
)
@@ -47,7 +48,7 @@ pw_cc_library(
deps = [
"//pw_bytes",
"//pw_rpc",
- "//pw_rpc:internal_packet_pwpb",
+ "//pw_rpc:internal_packet_cc.pwpb",
],
)
@@ -108,7 +109,7 @@ pw_cc_test(
deps = [
":client_api",
":client_testing",
- "//pw_rpc:pw_rpc_test_pwpb",
+ "//pw_rpc:pw_rpc_test_cc.raw_rpc",
],
)
@@ -124,7 +125,8 @@ pw_cc_test(
":test_method_context",
"//pw_protobuf",
"//pw_rpc:internal_test_utils",
- "//pw_rpc:pw_rpc_test_pwpb",
+ "//pw_rpc:pw_rpc_test_cc.pwpb",
+ "//pw_rpc:pw_rpc_test_cc.raw_rpc",
],
)
@@ -137,7 +139,7 @@ pw_cc_test(
":server_api",
"//pw_protobuf",
"//pw_rpc:internal_test_utils",
- "//pw_rpc:pw_rpc_test_pwpb",
+ "//pw_rpc:pw_rpc_test_cc.pwpb",
],
)
@@ -147,12 +149,8 @@ pw_cc_test(
"method_info_test.cc",
],
deps = [
- "//pw_rpc",
"//pw_rpc:internal_test_utils",
- "//pw_rpc:pw_rpc_test_pwpb",
- # TODO(hepler): RPC deps not used directly should be provided by the proto library
- "//pw_rpc/raw:client_api",
- "//pw_rpc/raw:server_api",
+ "//pw_rpc:pw_rpc_test_cc.raw_rpc",
],
)
@@ -165,7 +163,7 @@ pw_cc_test(
":server_api",
"//pw_protobuf",
"//pw_rpc:internal_test_utils",
- "//pw_rpc:pw_rpc_test_pwpb",
+ "//pw_rpc:pw_rpc_test_cc.pwpb",
],
)
@@ -173,12 +171,10 @@ pw_cc_test(
name = "server_reader_writer_test",
srcs = ["server_reader_writer_test.cc"],
deps = [
- # TODO(hepler): RPC deps not used directly should be provided by the proto library
- ":client_api",
- ":server_api",
":test_method_context",
"//pw_rpc:internal_test_utils",
- "//pw_rpc:pw_rpc_test_pwpb",
+ "//pw_rpc:pw_rpc_test_cc.pwpb",
+ "//pw_rpc:pw_rpc_test_cc.raw_rpc",
],
)
@@ -186,10 +182,7 @@ pw_cc_test(
name = "stub_generation_test",
srcs = ["stub_generation_test.cc"],
deps = [
- # TODO(hepler): RPC deps not used directly should be provided by the proto library
- "//pw_rpc",
- "//pw_rpc:pw_rpc_test_pwpb",
- ":server_api",
- ":client_api",
+ "//pw_rpc:pw_rpc_test_cc.pwpb",
+ "//pw_rpc:pw_rpc_test_cc.raw_rpc",
],
)
diff --git a/pw_rpc/raw/client_testing.cc b/pw_rpc/raw/client_testing.cc
index 09502dbe4..e3b8f5363 100644
--- a/pw_rpc/raw/client_testing.cc
+++ b/pw_rpc/raw/client_testing.cc
@@ -52,7 +52,7 @@ Status FakeServer::ProcessPacket(internal::PacketType type,
ConstByteSpan payload,
Status status) const {
auto view = internal::test::PacketsView(
- output_.packets_,
+ output_.packets(),
internal::test::PacketFilter(internal::PacketType::REQUEST,
internal::PacketType::RESPONSE,
channel_id_,
diff --git a/pw_rpc/raw/codegen_test.cc b/pw_rpc/raw/codegen_test.cc
index bc24905b3..3318ac45d 100644
--- a/pw_rpc/raw/codegen_test.cc
+++ b/pw_rpc/raw/codegen_test.cc
@@ -55,18 +55,23 @@ namespace test {
class TestService final
: public pw_rpc::raw::TestService::Service<TestService> {
public:
- static StatusWithSize TestUnaryRpc(ConstByteSpan request, ByteSpan response) {
+ static void TestUnaryRpc(ConstByteSpan request,
+ RawUnaryResponder& responder) {
int64_t integer;
Status status;
if (!DecodeRequest(request, integer, status)) {
- return StatusWithSize::DataLoss();
+ ASSERT_EQ(OkStatus(), responder.Finish({}, Status::DataLoss()));
+ return;
}
+ std::byte response[64] = {};
TestResponse::MemoryEncoder test_response(response);
EXPECT_EQ(OkStatus(), test_response.WriteValue(integer + 1));
- return StatusWithSize(status, test_response.size());
+ ASSERT_EQ(OkStatus(),
+ responder.Finish(std::span(response).first(test_response.size()),
+ status));
}
void TestAnotherUnaryRpc(ConstByteSpan request,
@@ -74,10 +79,7 @@ class TestService final
if (request.empty()) {
last_responder_ = std::move(responder);
} else {
- std::byte response[32] = {};
- StatusWithSize sws = TestUnaryRpc(request, response);
-
- responder.Finish(std::span(response).first(sws.size()), sws.status());
+ TestUnaryRpc(request, responder);
}
}
@@ -187,8 +189,8 @@ TEST(RawCodegen, Server_CompilesProperly) {
TEST(RawCodegen, Server_InvokeUnaryRpc) {
PW_RAW_TEST_METHOD_CONTEXT(test::TestService, TestUnaryRpc) context;
- auto sws = context.call(EncodeRequest(123, OkStatus()));
- EXPECT_EQ(OkStatus(), sws.status());
+ context.call(EncodeRequest(123, OkStatus()));
+ EXPECT_EQ(OkStatus(), context.status());
protobuf::Decoder decoder(context.response());
@@ -247,7 +249,7 @@ TEST(RawCodegen, Server_Finish) {
ctx.call({});
ASSERT_TRUE(ctx.service().last_responder().active());
- ctx.service().last_responder().Finish({});
+ EXPECT_EQ(OkStatus(), ctx.service().last_responder().Finish({}));
EXPECT_FALSE(ctx.service().last_responder().active());
}
diff --git a/pw_rpc/raw/method_test.cc b/pw_rpc/raw/method_test.cc
index 2d7341b0d..43382dcbf 100644
--- a/pw_rpc/raw/method_test.cc
+++ b/pw_rpc/raw/method_test.cc
@@ -38,19 +38,15 @@ class TestRawService final : public Service {
public:
// Unary signatures
- StatusWithSize Unary(ConstByteSpan, ByteSpan) { return StatusWithSize(0); }
+ void Unary(ConstByteSpan, RawUnaryResponder&) {}
- static StatusWithSize StaticUnary(ConstByteSpan, ByteSpan) {
- return StatusWithSize(0);
- }
+ static void StaticUnary(ConstByteSpan, RawUnaryResponder&) {}
void AsyncUnary(ConstByteSpan, RawUnaryResponder&) {}
static void StaticAsyncUnary(ConstByteSpan, RawUnaryResponder&) {}
- StatusWithSize UnaryWrongArg(ConstByteSpan, ConstByteSpan) {
- return StatusWithSize(0);
- }
+ void UnaryWrongArg(ConstByteSpan, ConstByteSpan) {}
// Server streaming signatures
@@ -95,7 +91,7 @@ class FakeServiceBase : public Service {
FakeServiceBase(uint32_t id) : Service(id, kMethods) {}
static constexpr std::array<RawMethodUnion, 5> kMethods = {
- RawMethod::SynchronousUnary<&Impl::DoNothing>(10u),
+ RawMethod::AsynchronousUnary<&Impl::DoNothing>(10u),
RawMethod::AsynchronousUnary<&Impl::AddFive>(11u),
RawMethod::ServerStreaming<&Impl::StartStream>(12u),
RawMethod::ClientStreaming<&Impl::ClientStream>(13u),
@@ -107,8 +103,8 @@ class FakeService : public FakeServiceBase<FakeService> {
public:
FakeService(uint32_t id) : FakeServiceBase(id) {}
- StatusWithSize DoNothing(ConstByteSpan, ByteSpan) {
- return StatusWithSize::Unknown();
+ void DoNothing(ConstByteSpan, RawUnaryResponder& responder) {
+ ASSERT_EQ(OkStatus(), responder.Finish({}, Status::Unknown()));
}
void AddFive(ConstByteSpan request, RawUnaryResponder& responder) {
@@ -165,9 +161,9 @@ class FakeService : public FakeServiceBase<FakeService> {
RawServerReaderWriter last_reader_writer;
};
-constexpr const RawMethod& kSyncUnary =
+constexpr const RawMethod& kAsyncUnary0 =
std::get<0>(FakeServiceBase<FakeService>::kMethods).raw_method();
-constexpr const RawMethod& kAsyncUnary =
+constexpr const RawMethod& kAsyncUnary1 =
std::get<1>(FakeServiceBase<FakeService>::kMethods).raw_method();
constexpr const RawMethod& kServerStream =
std::get<2>(FakeServiceBase<FakeService>::kMethods).raw_method();
@@ -176,16 +172,16 @@ constexpr const RawMethod& kClientStream =
constexpr const RawMethod& kBidirectionalStream =
std::get<4>(FakeServiceBase<FakeService>::kMethods).raw_method();
-TEST(RawMethod, AsyncUnaryRpc_SendsResponse) {
+TEST(RawMethod, AsyncUnaryRpc1_SendsResponse) {
std::byte buffer[16];
stream::MemoryWriter writer(buffer);
TestRequest::StreamEncoder test_request(writer, ByteSpan());
ASSERT_EQ(OkStatus(), test_request.WriteInteger(456));
ASSERT_EQ(OkStatus(), test_request.WriteStatusCode(7));
- ServerContextForTest<FakeService> context(kAsyncUnary);
+ ServerContextForTest<FakeService> context(kAsyncUnary1);
rpc_lock().lock();
- kAsyncUnary.Invoke(context.get(), context.request(writer.WrittenData()));
+ kAsyncUnary1.Invoke(context.get(), context.request(writer.WrittenData()));
EXPECT_EQ(context.service().last_request.integer, 456);
EXPECT_EQ(context.service().last_request.status_code, 7u);
@@ -200,17 +196,17 @@ TEST(RawMethod, AsyncUnaryRpc_SendsResponse) {
EXPECT_EQ(value, 461);
}
-TEST(RawMethod, SyncUnaryRpc_SendsResponse) {
- ServerContextForTest<FakeService> context(kSyncUnary);
+TEST(RawMethod, AsyncUnaryRpc0_SendsResponse) {
+ ServerContextForTest<FakeService> context(kAsyncUnary0);
rpc_lock().lock();
- kSyncUnary.Invoke(context.get(), context.request({}));
+ kAsyncUnary0.Invoke(context.get(), context.request({}));
const Packet& packet = context.output().last_packet();
EXPECT_EQ(PacketType::RESPONSE, packet.type());
EXPECT_EQ(Status::Unknown(), packet.status());
EXPECT_EQ(context.service_id(), packet.service_id());
- EXPECT_EQ(kSyncUnary.id(), packet.method_id());
+ EXPECT_EQ(kAsyncUnary0.id(), packet.method_id());
}
TEST(RawMethod, ServerStreamingRpc_SendsNothingWhenInitiallyCalled) {
diff --git a/pw_rpc/raw/method_union_test.cc b/pw_rpc/raw/method_union_test.cc
index c45fcaa3a..a46478783 100644
--- a/pw_rpc/raw/method_union_test.cc
+++ b/pw_rpc/raw/method_union_test.cc
@@ -48,19 +48,18 @@ class FakeGeneratedServiceImpl
public:
FakeGeneratedServiceImpl(uint32_t id) : FakeGeneratedService(id) {}
- StatusWithSize DoNothing(ConstByteSpan, ByteSpan) {
- return StatusWithSize::Unknown();
- }
+ void DoNothing(ConstByteSpan, RawUnaryResponder&) {}
- StatusWithSize AddFive(ConstByteSpan request, ByteSpan response) {
+ void AddFive(ConstByteSpan request, RawUnaryResponder& responder) {
DecodeRawTestRequest(request);
+ std::byte response[32] = {};
TestResponse::MemoryEncoder test_response(response);
- test_response.WriteValue(last_request.integer + 5)
- .IgnoreError(); // TODO(pwbug/387): Handle Status properly
- ConstByteSpan payload(test_response);
+ ASSERT_EQ(OkStatus(), test_response.WriteValue(last_request.integer + 5));
- return StatusWithSize::Unauthenticated(payload.size());
+ ASSERT_EQ(OkStatus(),
+ responder.Finish(std::span(response).first(test_response.size()),
+ Status::Unauthenticated()));
}
void StartStream(ConstByteSpan request, RawServerWriter& writer) {
diff --git a/pw_rpc/raw/public/pw_rpc/raw/internal/method.h b/pw_rpc/raw/public/pw_rpc/raw/internal/method.h
index 6b81dd370..0ab022222 100644
--- a/pw_rpc/raw/public/pw_rpc/raw/internal/method.h
+++ b/pw_rpc/raw/public/pw_rpc/raw/internal/method.h
@@ -37,12 +37,12 @@ class RawMethod : public Method {
template <auto kMethod>
static constexpr RawMethod SynchronousUnary(uint32_t id) {
- constexpr SynchronousUnaryFunction wrapper =
- [](Service& service, ConstByteSpan req, ByteSpan res) {
- return CallMethodImplFunction<kMethod>(service, req, res);
- };
- return RawMethod(
- id, SynchronousUnaryInvoker, Function{.synchronous_unary = wrapper});
+ static_assert(sizeof(kMethod) != sizeof(kMethod),
+ "Raw synchronous unary methods are not supported. "
+ "Use an asynchronous unary method instead: "
+ "void MethodName(pw::ConstByteSpan request, "
+ "pw::rpc::RawUnaryResponder& responder)");
+ return {id, InvalidInvoker, {}};
}
template <auto kMethod>
diff --git a/pw_rpc/raw/server_reader_writer_test.cc b/pw_rpc/raw/server_reader_writer_test.cc
index 16d55292c..d6a35f803 100644
--- a/pw_rpc/raw/server_reader_writer_test.cc
+++ b/pw_rpc/raw/server_reader_writer_test.cc
@@ -26,9 +26,7 @@ namespace pw::rpc {
class TestServiceImpl final
: public test::pw_rpc::raw::TestService::Service<TestServiceImpl> {
public:
- static StatusWithSize TestUnaryRpc(ConstByteSpan, ByteSpan) {
- return StatusWithSize(0);
- }
+ static void TestUnaryRpc(ConstByteSpan, RawUnaryResponder&) {}
void TestAnotherUnaryRpc(ConstByteSpan, RawUnaryResponder&) {}
@@ -171,7 +169,8 @@ TEST(RawUnaryResponder, Open_ReturnsUsableResponder) {
ctx.server, ctx.channel.id(), ctx.service);
EXPECT_EQ(call.channel_id(), ctx.channel.id());
- call.Finish(std::as_bytes(std::span("hello from pw_rpc")));
+ EXPECT_EQ(OkStatus(),
+ call.Finish(std::as_bytes(std::span("hello from pw_rpc"))));
EXPECT_STREQ(
reinterpret_cast<const char*>(
@@ -223,7 +222,7 @@ TEST(RawServerWriter, Open_ReturnsUsableWriter) {
ctx.server, ctx.channel.id(), ctx.service);
EXPECT_EQ(call.channel_id(), ctx.channel.id());
- call.Write(std::as_bytes(std::span("321")));
+ EXPECT_EQ(OkStatus(), call.Write(std::as_bytes(std::span("321"))));
EXPECT_STREQ(reinterpret_cast<const char*>(
ctx.output.payloads<TestService::TestServerStreamRpc>()
@@ -239,7 +238,8 @@ TEST(RawServerReader, Open_ReturnsUsableReader) {
ctx.server, ctx.channel.id(), ctx.service);
EXPECT_EQ(call.channel_id(), ctx.channel.id());
- call.Finish(std::as_bytes(std::span("This is a message")));
+ EXPECT_EQ(OkStatus(),
+ call.Finish(std::as_bytes(std::span("This is a message"))));
EXPECT_STREQ(reinterpret_cast<const char*>(
ctx.output.payloads<TestService::TestClientStreamRpc>()
@@ -255,7 +255,7 @@ TEST(RawServerReaderWriter, Open_ReturnsUsableReaderWriter) {
ctx.server, ctx.channel.id(), ctx.service);
EXPECT_EQ(call.channel_id(), ctx.channel.id());
- call.Write(std::as_bytes(std::span("321")));
+ EXPECT_EQ(OkStatus(), call.Write(std::as_bytes(std::span("321"))));
EXPECT_STREQ(
reinterpret_cast<const char*>(
diff --git a/pw_rpc/server.cc b/pw_rpc/server.cc
index 78681b53f..d7634ab6b 100644
--- a/pw_rpc/server.cc
+++ b/pw_rpc/server.cc
@@ -34,9 +34,8 @@ using internal::PacketType;
Status Server::ProcessPacket(ConstByteSpan packet_data,
ChannelOutput* interface) {
- PW_TRY_ASSIGN(Result<Packet> result,
+ PW_TRY_ASSIGN(Packet packet,
Endpoint::ProcessPacket(packet_data, Packet::kServer));
- Packet& packet = *result;
internal::rpc_lock().lock();
internal::ServerCall* const call =
diff --git a/pw_rpc/server_test.cc b/pw_rpc/server_test.cc
index 8161df4c1..e51c3aec6 100644
--- a/pw_rpc/server_test.cc
+++ b/pw_rpc/server_test.cc
@@ -59,6 +59,14 @@ class TestService : public Service {
std::array<TestMethodUnion, 2> methods_;
};
+class EmptyService : public Service {
+ public:
+ constexpr EmptyService() : Service(200, methods_) {}
+
+ private:
+ static constexpr std::array<TestMethodUnion, 0> methods_ = {};
+};
+
class BasicServer : public ::testing::Test {
protected:
static constexpr byte kDefaultPayload[] = {
@@ -71,8 +79,9 @@ class BasicServer : public ::testing::Test {
Channel(), // available for assignment
},
server_(channels_),
- service_(42) {
- server_.RegisterService(service_);
+ service_1_(1),
+ service_42_(42) {
+ server_.RegisterService(service_1_, service_42_, empty_service_);
}
std::span<const byte> EncodePacket(
@@ -111,18 +120,34 @@ class BasicServer : public ::testing::Test {
RawFakeChannelOutput<2> output_;
std::array<Channel, 3> channels_;
Server server_;
- TestService service_;
+ TestService service_1_;
+ TestService service_42_;
+ EmptyService empty_service_;
private:
byte request_buffer_[64];
};
-TEST_F(BasicServer, ProcessPacket_ValidMethod_InvokesMethod) {
+TEST_F(BasicServer, ProcessPacket_ValidMethodInService1_InvokesMethod) {
EXPECT_EQ(OkStatus(),
- server_.ProcessPacket(EncodePacket(PacketType::REQUEST, 1, 42, 100),
+ server_.ProcessPacket(EncodePacket(PacketType::REQUEST, 1, 1, 100),
+ output_));
+
+ const TestMethod& method = service_1_.method(100);
+ EXPECT_EQ(1u, method.last_channel_id());
+ ASSERT_EQ(sizeof(kDefaultPayload), method.last_request().payload().size());
+ EXPECT_EQ(std::memcmp(kDefaultPayload,
+ method.last_request().payload().data(),
+ method.last_request().payload().size()),
+ 0);
+}
+
+TEST_F(BasicServer, ProcessPacket_ValidMethodInService42_InvokesMethod) {
+ EXPECT_EQ(OkStatus(),
+ server_.ProcessPacket(EncodePacket(PacketType::REQUEST, 1, 42, 200),
output_));
- const TestMethod& method = service_.method(100);
+ const TestMethod& method = service_42_.method(200);
EXPECT_EQ(1u, method.last_channel_id());
ASSERT_EQ(sizeof(kDefaultPayload), method.last_request().payload().size());
EXPECT_EQ(std::memcmp(kDefaultPayload,
@@ -142,8 +167,8 @@ TEST_F(BasicServer, ProcessPacket_IncompletePacket_NothingIsInvoked) {
server_.ProcessPacket(EncodePacket(PacketType::REQUEST, 1, 42, 0),
output_));
- EXPECT_EQ(0u, service_.method(100).last_channel_id());
- EXPECT_EQ(0u, service_.method(200).last_channel_id());
+ EXPECT_EQ(0u, service_42_.method(100).last_channel_id());
+ EXPECT_EQ(0u, service_42_.method(200).last_channel_id());
}
TEST_F(BasicServer, ProcessPacket_NoChannel_SendsNothing) {
@@ -175,8 +200,8 @@ TEST_F(BasicServer, ProcessPacket_InvalidMethod_NothingIsInvoked) {
server_.ProcessPacket(EncodePacket(PacketType::REQUEST, 1, 42, 101),
output_));
- EXPECT_EQ(0u, service_.method(100).last_channel_id());
- EXPECT_EQ(0u, service_.method(200).last_channel_id());
+ EXPECT_EQ(0u, service_42_.method(100).last_channel_id());
+ EXPECT_EQ(0u, service_42_.method(200).last_channel_id());
}
TEST_F(BasicServer, ProcessPacket_ClientErrorWithInvalidMethod_NoResponse) {
@@ -278,7 +303,7 @@ TEST_F(BasicServer, CloseChannel_PendingCall) {
EXPECT_EQ(static_cast<internal::Endpoint&>(server_).active_call_count(), 0u);
internal::TestMethod::FakeServerCall call;
- service_.method(100).keep_call_active(call);
+ service_42_.method(100).keep_call_active(call);
EXPECT_EQ(OkStatus(),
server_.ProcessPacket(EncodePacket(PacketType::REQUEST, 1, 42, 100),
@@ -332,8 +357,11 @@ TEST_F(BasicServer, OpenChannel_AdditionalSlot) {
class BidiMethod : public BasicServer {
protected:
BidiMethod()
- : responder_(internal::CallContext(
- server_, channels_[0].id(), service_, service_.method(100), 0)) {
+ : responder_(internal::CallContext(server_,
+ channels_[0].id(),
+ service_42_,
+ service_42_.method(100),
+ 0)) {
ASSERT_TRUE(responder_.active());
}
@@ -348,7 +376,7 @@ TEST_F(BidiMethod, DuplicateCall_CancelsExistingThenCallsAgain) {
}
});
- const TestMethod& method = service_.method(100);
+ const TestMethod& method = service_42_.method(100);
ASSERT_EQ(method.invocations(), 0u);
EXPECT_EQ(OkStatus(),
@@ -468,7 +496,11 @@ TEST_F(BidiMethod, ClientStreamEnd_ErrorWhenClosed) {
class ServerStreamingMethod : public BasicServer {
protected:
ServerStreamingMethod()
- : call_(server_, channels_[0].id(), service_, service_.method(100), 0),
+ : call_(server_,
+ channels_[0].id(),
+ service_42_,
+ service_42_.method(100),
+ 0),
responder_(call_) {
ASSERT_TRUE(responder_.active());
}
diff --git a/pw_rpc/ts/BUILD.bazel b/pw_rpc/ts/BUILD.bazel
index 18b6ba7c0..ef3bc6496 100644
--- a/pw_rpc/ts/BUILD.bazel
+++ b/pw_rpc/ts/BUILD.bazel
@@ -35,8 +35,8 @@ ts_project(
declaration = True,
source_map = True,
deps = [
+ ":packet_proto_tspb",
"//pw_protobuf_compiler/ts:pw_protobuf_compiler",
- "//pw_rpc:packet_proto_tspb",
"//pw_status/ts:pw_status",
"@npm//@types/google-protobuf",
"@npm//wait-queue",
@@ -52,8 +52,8 @@ js_library(
ts_proto_collection(
name = "rpc_proto_collection",
- js_proto_library = "@//pw_rpc/ts:test_protos_tspb",
- proto_library = "@//pw_rpc/ts:test_protos",
+ js_proto_library = "@pigweed//pw_rpc/ts:test_protos_tspb",
+ proto_library = "@pigweed//pw_rpc/ts:test_protos",
)
ts_library(
@@ -68,11 +68,11 @@ ts_library(
":test_protos",
],
deps = [
- "test_protos_tspb",
":lib",
+ ":packet_proto_tspb",
":rpc_proto_collection",
+ ":test_protos_tspb",
"//pw_protobuf_compiler/ts:pw_protobuf_compiler",
- "//pw_rpc:packet_proto_tspb",
"//pw_status/ts:pw_status",
"@npm//@types/google-protobuf",
"@npm//@types/jasmine",
@@ -99,3 +99,8 @@ js_proto_library(
name = "test_protos_tspb",
protos = [":test_protos"],
)
+
+js_proto_library(
+ name = "packet_proto_tspb",
+ protos = ["//pw_rpc:internal_packet_proto"],
+)
diff --git a/pw_snapshot/docs.rst b/pw_snapshot/docs.rst
index fcf2ebb42..dd4c3e938 100644
--- a/pw_snapshot/docs.rst
+++ b/pw_snapshot/docs.rst
@@ -59,5 +59,5 @@ like a file so it can be analyzed at a later time.
While Pigweed provides libraries for each part of a snapshot's lifecycle, the
glue that puts all these pieces together is project specific. Please see the
-section on `Setting up a Snapshot Pipeline <module-pw_snapshot-setup>`_ for more
-information on how to bring up snapshot support for your project.
+section on :ref:`Setting up a Snapshot Pipeline<module-pw_snapshot-setup>` for
+more information on how to bring up snapshot support for your project.
diff --git a/pw_snapshot/module_usage.rst b/pw_snapshot/module_usage.rst
index 11f495768..f020fd722 100644
--- a/pw_snapshot/module_usage.rst
+++ b/pw_snapshot/module_usage.rst
@@ -26,31 +26,29 @@ write a few fields in a snapshot, you can do so with minimal memory overhead.
.. code-block:: cpp
+ #include "pw_bytes/span.h"
#include "pw_protobuf/encoder.h"
#include "pw_snapshot_protos/snapshot.pwpb.h"
-
- Result<ConstByteSpan> EncodeSnapshot(pw::ByteSpan encode_buffer,
- const CrashInfo &crash_info) {
- // Instantiate a generic proto encoder.
- pw::protobuf::NestedEncoder<kMaxNestedProtoDepth> proto_encoder(
- encode_buffer);
- // Get a proto-specific wrapper for the encoder.
- pw::snapshot::Snapshot::Encoder snapshot_encoder(&proto_encoder);
+ #include "pw_status/status.h"
+ #include "pw_stream/stream.h"
+
+ pw::Status EncodeSnapshot(pw::stream::Writer& writer,
+ pw::ByteSpan submessage_encode_buffer,
+ const CrashInfo &crash_info) {
+ // Create a snapshot proto encoder.
+ pw::snapshot::Snapshot::StreamEncoder snapshot_encoder(
+ writer, submessage_encode_buffer);
{ // This scope is required to handle RAII behavior of the submessage.
// Start writing the Metadata submessage.
- pw::snapshot::Metadata::Encoder metadata_encoder =
+ pw::snapshot::Metadata::StreamEncoder metadata_encoder =
snapshot_encoder.GetMetadataEncoder();
metadata_encoder.WriteReason(EncodeReasonLog(crash_info));
metadata_encoder.WriteFatal(true);
metadata_encoder.WriteProjectName(std::as_bytes(std::span("smart-shoe")));
metadata_encoder.WriteDeviceName(
std::as_bytes(std::span("smart-shoe-p1")));
- metadata_encoder.WriteUptime(
- std::chrono::time_point_cast<std::chrono::milliseconds>(
- pw::chrono::SystemClock::now()));
}
- // Finalize the proto encode so it can be flushed somewhere.
- return proto_encoder.Encode();
+ return proto_encoder.status();
}
-------------------
@@ -69,7 +67,7 @@ pw_protobuf.
.. code-block:: cpp
{
- pw::Snapshot::TagsEntry::Encoder tags_encoder =
+ pw::Snapshot::TagsEntry::StreamEncoder tags_encoder =
snapshot_encoder.GetTagsEncoder();
tags_encoder.WriteKey("BtState");
tags_encoder.WriteValue("connected");
diff --git a/pw_software_update/BUILD.bazel b/pw_software_update/BUILD.bazel
index c4c53476b..ef586fa1a 100644
--- a/pw_software_update/BUILD.bazel
+++ b/pw_software_update/BUILD.bazel
@@ -52,6 +52,7 @@ pw_cc_library(
"//pw_protobuf",
"//pw_status",
"//pw_stream",
+ "//pw_string",
],
)
@@ -66,6 +67,7 @@ pw_cc_library(
"//pw_log",
"//pw_result",
"//pw_status",
+ "//pw_sync:borrow",
"//pw_sync:lock_annotations",
"//pw_sync:mutex",
"//pw_sync:string",
diff --git a/pw_software_update/BUILD.gn b/pw_software_update/BUILD.gn
index 3acad0407..f59438be0 100644
--- a/pw_software_update/BUILD.gn
+++ b/pw_software_update/BUILD.gn
@@ -102,6 +102,7 @@ if (pw_crypto_SHA256_BACKEND != "" && pw_crypto_ECDSA_BACKEND != "") {
":config",
":protos.pwpb",
dir_pw_log,
+ dir_pw_string,
]
sources = [
"manifest_accessor.cc",
@@ -126,6 +127,7 @@ if (dir_pw_third_party_nanopb != "" && dir_pw_third_party_protobuf != "") {
deps = [
":config",
":protos.pwpb",
+ "$dir_pw_sync:borrow",
"$dir_pw_sync:lock_annotations",
"$dir_pw_sync:mutex",
dir_pw_log,
diff --git a/pw_software_update/bundled_update_service.cc b/pw_software_update/bundled_update_service.cc
index dcb71048a..cbbc67e4f 100644
--- a/pw_software_update/bundled_update_service.cc
+++ b/pw_software_update/bundled_update_service.cc
@@ -12,79 +12,89 @@
// License for the specific language governing permissions and limitations under
// the License.
-#include "pw_software_update/config.h"
+#define PW_LOG_MODULE_NAME "PWSU"
+#define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
-#define PW_LOG_LEVEL PW_SOFTWARE_UPDATE_CONFIG_LOG_LEVEL
+#include "pw_software_update/bundled_update_service.h"
#include <mutex>
#include <string_view>
#include "pw_log/log.h"
#include "pw_result/result.h"
-#include "pw_software_update/bundled_update_service.h"
+#include "pw_software_update/config.h"
#include "pw_software_update/manifest_accessor.h"
#include "pw_software_update/update_bundle.pwpb.h"
#include "pw_status/status.h"
#include "pw_status/status_with_size.h"
#include "pw_status/try.h"
+#include "pw_string/string_builder.h"
#include "pw_string/util.h"
+#include "pw_sync/borrow.h"
#include "pw_sync/mutex.h"
#include "pw_tokenizer/tokenize.h"
-// TODO(keir): Convert all the CHECKs in the RPC service to gracefully report
-// errors.
-#define SET_ERROR(res, message, ...) \
- do { \
- PW_LOG_ERROR(message, __VA_ARGS__); \
- if (!IsFinished()) { \
- Finish(res); \
- size_t note_size = sizeof(status_.note.bytes); \
- PW_TOKENIZE_TO_BUFFER( \
- status_.note.bytes, &(note_size), message, __VA_ARGS__); \
- status_.note.size = note_size; \
- status_.has_note = true; \
- } \
- } while (false)
-
namespace pw::software_update {
namespace {
+using BorrowedStatus =
+ sync::BorrowedPointer<pw_software_update_BundledUpdateStatus, sync::Mutex>;
+using BundledUpdateState = pw_software_update_BundledUpdateState_Enum;
+using BundledUpdateStatus = pw_software_update_BundledUpdateStatus;
-constexpr std::string_view kTopLevelTargetsName = "targets";
-
+// TODO(keir): Convert all the CHECKs in the RPC service to gracefully report
+// errors.
+#define SET_ERROR(res, message, ...) \
+ do { \
+ PW_LOG_ERROR(message, __VA_ARGS__); \
+ if (!IsFinished()) { \
+ Finish(res); \
+ { \
+ BorrowedStatus borrowed_status = status_.acquire(); \
+ size_t note_size = sizeof(borrowed_status->note.bytes); \
+ PW_TOKENIZE_TO_BUFFER( \
+ borrowed_status->note.bytes, &(note_size), message, __VA_ARGS__); \
+ borrowed_status->note.size = note_size; \
+ borrowed_status->has_note = true; \
+ } \
+ } \
+ } while (false)
} // namespace
-Status BundledUpdateService::GetStatus(
- const pw_protobuf_Empty&,
- pw_software_update_BundledUpdateStatus& response) {
- std::lock_guard lock(mutex_);
- response = status_;
+Status BundledUpdateService::GetStatus(const pw_protobuf_Empty&,
+ BundledUpdateStatus& response) {
+ response = *status_.acquire();
return OkStatus();
}
Status BundledUpdateService::Start(
const pw_software_update_StartRequest& request,
- pw_software_update_BundledUpdateStatus& response) {
+ BundledUpdateStatus& response) {
std::lock_guard lock(mutex_);
// Check preconditions.
- if (status_.state != pw_software_update_BundledUpdateState_Enum_INACTIVE) {
+ const BundledUpdateState state = status_.acquire()->state;
+ if (state != pw_software_update_BundledUpdateState_Enum_INACTIVE) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_UNKNOWN_ERROR,
"Start() can only be called from INACTIVE state. "
"Current state: %d. Abort() then Reset() must be called first",
- static_cast<int>(status_.state));
- response = status_;
+ static_cast<int>(state));
+ response = *status_.acquire();
return Status::FailedPrecondition();
}
- PW_DCHECK(!status_.has_transfer_id);
- PW_DCHECK(!status_.has_result);
- PW_DCHECK(status_.current_state_progress_hundreth_percent == 0);
- PW_DCHECK(status_.bundle_filename[0] == '\0');
- PW_DCHECK(status_.note.size == 0);
+
+ {
+ BorrowedStatus borrowed_status = status_.acquire();
+ PW_DCHECK(!borrowed_status->has_transfer_id);
+ PW_DCHECK(!borrowed_status->has_result);
+ PW_DCHECK(borrowed_status->current_state_progress_hundreth_percent == 0);
+ PW_DCHECK(borrowed_status->bundle_filename[0] == '\0');
+ PW_DCHECK(borrowed_status->note.size == 0);
+ }
// Notify the backend of pending transfer.
if (const Status status = backend_.BeforeUpdateStart(); !status.ok()) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_UNKNOWN_ERROR,
"Backend error on BeforeUpdateStart()");
- response = status_;
+ response = *status_.acquire();
return status;
}
@@ -95,60 +105,70 @@ Status BundledUpdateService::Start(
if (!possible_transfer_id.ok()) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_TRANSFER_FAILED,
"Couldn't enable bundle transfer");
- response = status_;
+ response = *status_.acquire();
return possible_transfer_id.status();
}
// Update state.
- status_.transfer_id = possible_transfer_id.value();
- status_.has_transfer_id = true;
- if (request.has_bundle_filename) {
- const StatusWithSize sws = string::Copy(request.bundle_filename,
- status_.bundle_filename,
- sizeof(status_.bundle_filename));
- PW_DCHECK_OK(sws.status(),
- "bundle_filename options max_sizes do not match");
- status_.has_bundle_filename = true;
- }
- status_.state = pw_software_update_BundledUpdateState_Enum_TRANSFERRING;
- response = status_;
+ {
+ BorrowedStatus borrowed_status = status_.acquire();
+ borrowed_status->transfer_id = possible_transfer_id.value();
+ borrowed_status->has_transfer_id = true;
+ if (request.has_bundle_filename) {
+ const StatusWithSize sws =
+ string::Copy(request.bundle_filename,
+ borrowed_status->bundle_filename,
+ sizeof(borrowed_status->bundle_filename));
+ PW_DCHECK_OK(sws.status(),
+ "bundle_filename options max_sizes do not match");
+ borrowed_status->has_bundle_filename = true;
+ }
+ borrowed_status->state =
+ pw_software_update_BundledUpdateState_Enum_TRANSFERRING;
+ response = *borrowed_status;
+ }
return OkStatus();
}
-Status BundledUpdateService::SetTransferred(
- const pw_protobuf_Empty&,
- ::pw_software_update_BundledUpdateStatus& response) {
- if (status_.state !=
- pw_software_update_BundledUpdateState_Enum_TRANSFERRING &&
- status_.state != pw_software_update_BundledUpdateState_Enum_INACTIVE) {
+Status BundledUpdateService::SetTransferred(const pw_protobuf_Empty&,
+ BundledUpdateStatus& response) {
+ const BundledUpdateState state = status_.acquire()->state;
+
+ if (state != pw_software_update_BundledUpdateState_Enum_TRANSFERRING &&
+ state != pw_software_update_BundledUpdateState_Enum_INACTIVE) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_UNKNOWN_ERROR,
"SetTransferred() can only be called from TRANSFERRING or "
"INACTIVE state. State: %d",
- static_cast<int>(status_.state));
- response = status_;
+ static_cast<int>(state));
+ response = *status_.acquire();
return OkStatus();
}
+
NotifyTransferSucceeded();
- response = status_;
+
+ response = *status_.acquire();
return OkStatus();
}
// TODO: Check for "ABORTING" state and bail if it's set.
void BundledUpdateService::DoVerify() {
std::lock_guard guard(mutex_);
- if (status_.state == pw_software_update_BundledUpdateState_Enum_VERIFIED) {
+ const BundledUpdateState state = status_.acquire()->state;
+
+ if (state == pw_software_update_BundledUpdateState_Enum_VERIFIED) {
return; // Already done!
}
// Ensure we're in the right state.
- if (status_.state != pw_software_update_BundledUpdateState_Enum_TRANSFERRED) {
+ if (state != pw_software_update_BundledUpdateState_Enum_TRANSFERRED) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_VERIFY_FAILED,
"DoVerify() must be called from TRANSFERRED state. State: %d",
- static_cast<int>(status_.state));
+ static_cast<int>(state));
return;
}
- status_.state = pw_software_update_BundledUpdateState_Enum_VERIFYING;
+ status_.acquire()->state =
+ pw_software_update_BundledUpdateState_Enum_VERIFYING;
// Notify backend about pending verify.
if (const Status status = backend_.BeforeBundleVerify(); !status.ok()) {
@@ -167,7 +187,7 @@ void BundledUpdateService::DoVerify() {
bundle_open_ = true;
// Have the backend verify the user_manifest if present.
- if (!backend_.VerifyManifest(bundle_.GetManifestAccessor()).ok()) {
+ if (!backend_.VerifyManifest(bundle_.GetManifest()).ok()) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_VERIFY_FAILED,
"Backend::VerifyUserManifest() failed");
return;
@@ -180,30 +200,29 @@ void BundledUpdateService::DoVerify() {
"Backend::AfterBundleVerified() failed");
return;
}
- status_.state = pw_software_update_BundledUpdateState_Enum_VERIFIED;
+ status_.acquire()->state =
+ pw_software_update_BundledUpdateState_Enum_VERIFIED;
}
-Status BundledUpdateService::Verify(
- const pw_protobuf_Empty&,
- pw_software_update_BundledUpdateStatus& response) {
+Status BundledUpdateService::Verify(const pw_protobuf_Empty&,
+ BundledUpdateStatus& response) {
std::lock_guard lock(mutex_);
+ const BundledUpdateState state = status_.acquire()->state;
// Already done? Bail.
- if (status_.state == pw_software_update_BundledUpdateState_Enum_VERIFIED) {
+ if (state == pw_software_update_BundledUpdateState_Enum_VERIFIED) {
PW_LOG_DEBUG("Skipping verify since already verified");
return OkStatus();
}
// TODO: Remove the transferring permitted state here ASAP.
// Ensure we're in the right state.
- if ((status_.state !=
- pw_software_update_BundledUpdateState_Enum_TRANSFERRING) &&
- (status_.state !=
- pw_software_update_BundledUpdateState_Enum_TRANSFERRED)) {
+ if ((state != pw_software_update_BundledUpdateState_Enum_TRANSFERRING) &&
+ (state != pw_software_update_BundledUpdateState_Enum_TRANSFERRED)) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_VERIFY_FAILED,
"Verify() must be called from TRANSFERRED state. State: %d",
- static_cast<int>(status_.state));
- response = status_;
+ static_cast<int>(state));
+ response = *status_.acquire();
return Status::FailedPrecondition();
}
@@ -230,35 +249,34 @@ Status BundledUpdateService::Verify(
if (!status.ok()) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_VERIFY_FAILED,
"Unable to equeue apply to work queue");
- response = status_;
+ response = *status_.acquire();
return status;
}
work_enqueued_ = true;
- response = status_;
+ response = *status_.acquire();
return OkStatus();
}
-Status BundledUpdateService::Apply(
- const pw_protobuf_Empty&,
- pw_software_update_BundledUpdateStatus& response) {
+Status BundledUpdateService::Apply(const pw_protobuf_Empty&,
+ BundledUpdateStatus& response) {
std::lock_guard lock(mutex_);
+ const BundledUpdateState state = status_.acquire()->state;
// We do not wait to go into a finished error state if we're already
// applying, instead just let them know that yes we are working on it --
// hold on.
- if (status_.state == pw_software_update_BundledUpdateState_Enum_APPLYING) {
+ if (state == pw_software_update_BundledUpdateState_Enum_APPLYING) {
PW_LOG_DEBUG("Apply is already active");
return OkStatus();
}
- if ((status_.state !=
- pw_software_update_BundledUpdateState_Enum_TRANSFERRED) &&
- (status_.state != pw_software_update_BundledUpdateState_Enum_VERIFIED)) {
+ if ((state != pw_software_update_BundledUpdateState_Enum_TRANSFERRED) &&
+ (state != pw_software_update_BundledUpdateState_Enum_VERIFIED)) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
"Apply() must be called from TRANSFERRED or VERIFIED state. "
"State: %d",
- static_cast<int>(status_.state));
+ static_cast<int>(state));
return Status::FailedPrecondition();
}
@@ -286,7 +304,7 @@ Status BundledUpdateService::Apply(
if (!status.ok()) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
"Unable to equeue apply to work queue");
- response = status_;
+ response = *status_.acquire();
return status;
}
work_enqueued_ = true;
@@ -296,59 +314,18 @@ Status BundledUpdateService::Apply(
void BundledUpdateService::DoApply() {
std::lock_guard guard(mutex_);
+ const BundledUpdateState state = status_.acquire()->state;
PW_LOG_DEBUG("Attempting to apply the update");
- if (status_.state != pw_software_update_BundledUpdateState_Enum_VERIFIED) {
+ if (state != pw_software_update_BundledUpdateState_Enum_VERIFIED) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
"Apply() must be called from VERIFIED state. State: %d",
- static_cast<int>(status_.state));
- return;
- }
- status_.state = pw_software_update_BundledUpdateState_Enum_APPLYING;
-
- protobuf::StringToMessageMap signed_targets_metadata_map =
- bundle_.GetDecoder().AsStringToMessageMap(static_cast<uint32_t>(
- pw::software_update::UpdateBundle::Fields::TARGETS_METADATA));
- if (const Status status = signed_targets_metadata_map.status();
- !status.ok()) {
- SET_ERROR(pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
- "Update bundle does not contain the targets_metadata map: %d",
- static_cast<int>(status.code()));
- return;
- }
-
- // There should only be one element in the map, which is the top-level
- // targets metadata.
- protobuf::Message signed_targets_metadata =
- signed_targets_metadata_map[kTopLevelTargetsName];
- if (const Status status = signed_targets_metadata.status(); !status.ok()) {
- SET_ERROR(pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
- "The targets_metadata map does not contain the targets entry: %d",
- static_cast<int>(status.code()));
+ static_cast<int>(state));
return;
}
- protobuf::Message targets_metadata = signed_targets_metadata.AsMessage(
- static_cast<uint32_t>(pw::software_update::SignedTargetsMetadata::Fields::
- SERIALIZED_TARGETS_METADATA));
- if (const Status status = targets_metadata.status(); !status.ok()) {
- SET_ERROR(pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
- "The targets targets_metadata entry does not contain the "
- "serialized_target_metadata: %d",
- static_cast<int>(status.code()));
- return;
- }
-
- protobuf::RepeatedMessages target_files =
- targets_metadata.AsRepeatedMessages(static_cast<uint32_t>(
- pw::software_update::TargetsMetadata::Fields::TARGET_FILES));
- if (const Status status = target_files.status(); !status.ok()) {
- SET_ERROR(
- pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
- "The serialized_target_metadata does not contain target_files: %d",
- static_cast<int>(status.code()));
- return;
- }
+ status_.acquire()->state =
+ pw_software_update_BundledUpdateState_Enum_APPLYING;
if (const Status status = backend_.BeforeApply(); !status.ok()) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
@@ -357,53 +334,25 @@ void BundledUpdateService::DoApply() {
return;
}
- // In order to report apply progress, quickly scan to see how many bytes will
- // be applied.
- size_t target_file_bytes_to_apply = 0;
- protobuf::StringToBytesMap target_payloads =
- bundle_.GetDecoder().AsStringToBytesMap(static_cast<uint32_t>(
- pw::software_update::UpdateBundle::Fields::TARGET_PAYLOADS));
- if (!target_payloads.status().ok()) {
- SET_ERROR(
- pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
- "Failed to iterate the UpdateBundle target_payloads map entries: %d",
- static_cast<int>(target_payloads.status().code()));
- return;
- }
- for (pw::protobuf::StringToBytesMapEntry target_payload : target_payloads) {
- protobuf::Bytes target_payload_bytes = target_payload.Value();
- if (!target_payload_bytes.status().ok()) {
- SET_ERROR(pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
- "Failed to read a UpdateBundle target_payloads map entry: %d",
- static_cast<int>(target_payload_bytes.status().code()));
- return;
- }
- target_file_bytes_to_apply +=
- target_payload_bytes.GetBytesReader().ConservativeReadLimit();
- }
+ // In order to report apply progress, quickly scan to see how many bytes
+ // will be applied.
+ Result<uint64_t> total_payload_bytes = bundle_.GetTotalPayloadSize();
+ PW_CHECK_OK(total_payload_bytes.status());
+ size_t target_file_bytes_to_apply =
+ static_cast<size_t>(total_payload_bytes.value());
+
+ protobuf::RepeatedMessages target_files =
+ bundle_.GetManifest().GetTargetFiles();
+ PW_CHECK_OK(target_files.status());
size_t target_file_bytes_applied = 0;
for (pw::protobuf::Message file_name : target_files) {
- // TODO: Use a config.h parameter for this.
- constexpr size_t kFileNameMaxSize = 32;
- std::array<std::byte, kFileNameMaxSize> buf = {};
+ std::array<std::byte, MAX_TARGET_NAME_LENGTH> buf = {};
protobuf::String name = file_name.AsString(static_cast<uint32_t>(
pw::software_update::TargetFile::Fields::FILE_NAME));
- if (!name.status().ok()) {
- SET_ERROR(
- pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
- "The serialized_target_metadata failed to iterate target files: %d",
- static_cast<int>(name.status().code()));
- return;
- }
+ PW_CHECK_OK(name.status());
const Result<ByteSpan> read_result = name.GetBytesReader().Read(buf);
- if (!read_result.ok()) {
- SET_ERROR(
- pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
- "The serialized_target_metadata failed to read target filename: %d",
- static_cast<int>(read_result.status().code()));
- return;
- }
+ PW_CHECK_OK(read_result.status());
const ConstByteSpan file_name_span = read_result.value();
const std::string_view file_name_view(
reinterpret_cast<const char*>(file_name_span.data()),
@@ -411,8 +360,23 @@ void BundledUpdateService::DoApply() {
if (file_name_view.compare(kUserManifestTargetFileName) == 0) {
continue; // user_manifest is not applied by the backend.
}
+ // Try to get an IntervalReader for the current file.
stream::IntervalReader file_reader =
bundle_.GetTargetPayload(file_name_view);
+ if (file_reader.status().IsNotFound()) {
+ PW_LOG_INFO(
+ "Contents of file %s missing from bundle; ignoring",
+ pw::MakeString<MAX_TARGET_NAME_LENGTH>(file_name_view).c_str());
+ continue;
+ }
+ if (!file_reader.ok()) {
+ SET_ERROR(pw_software_update_BundledUpdateResult_Enum_APPLY_FAILED,
+ "Could not open contents of file %s from bundle; "
+ "aborting update apply phase",
+ static_cast<int>(file_reader.status().code()));
+ return;
+ }
+
const size_t bundle_offset = file_reader.start();
if (const Status status = backend_.ApplyTargetFile(
file_name_view, file_reader, bundle_offset);
@@ -431,9 +395,10 @@ void BundledUpdateService::DoApply() {
target_file_bytes_to_apply,
static_cast<unsigned long>(progress_hundreth_percent / 100));
{
- status_.current_state_progress_hundreth_percent =
+ BorrowedStatus borrowed_status = status_.acquire();
+ borrowed_status->current_state_progress_hundreth_percent =
progress_hundreth_percent;
- status_.has_current_state_progress_hundreth_percent = true;
+ borrowed_status->has_current_state_progress_hundreth_percent = true;
}
}
@@ -452,49 +417,53 @@ void BundledUpdateService::DoApply() {
Finish(pw_software_update_BundledUpdateResult_Enum_SUCCESS);
}
-Status BundledUpdateService::Abort(
- const pw_protobuf_Empty&,
- pw_software_update_BundledUpdateStatus& response) {
+Status BundledUpdateService::Abort(const pw_protobuf_Empty&,
+ BundledUpdateStatus& response) {
std::lock_guard lock(mutex_);
- if (status_.state == pw_software_update_BundledUpdateState_Enum_APPLYING) {
+ const BundledUpdateState state = status_.acquire()->state;
+
+ if (state == pw_software_update_BundledUpdateState_Enum_APPLYING) {
return Status::FailedPrecondition();
}
- if (status_.state == pw_software_update_BundledUpdateState_Enum_INACTIVE ||
- status_.state == pw_software_update_BundledUpdateState_Enum_FINISHED) {
+ if (state == pw_software_update_BundledUpdateState_Enum_INACTIVE ||
+ state == pw_software_update_BundledUpdateState_Enum_FINISHED) {
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_UNKNOWN_ERROR,
"Tried to abort when already INACTIVE or FINISHED");
return Status::FailedPrecondition();
}
// TODO: Switch abort to async; this state change isn't externally visible.
- status_.state = pw_software_update_BundledUpdateState_Enum_ABORTING;
+ status_.acquire()->state =
+ pw_software_update_BundledUpdateState_Enum_ABORTING;
SET_ERROR(pw_software_update_BundledUpdateResult_Enum_ABORTED,
"Update abort requested");
- response = status_;
+ response = *status_.acquire();
return OkStatus();
}
-Status BundledUpdateService::Reset(
- const pw_protobuf_Empty&,
- pw_software_update_BundledUpdateStatus& response) {
+Status BundledUpdateService::Reset(const pw_protobuf_Empty&,
+ BundledUpdateStatus& response) {
std::lock_guard lock(mutex_);
+ const BundledUpdateState state = status_.acquire()->state;
- if (status_.state == pw_software_update_BundledUpdateState_Enum_INACTIVE) {
+ if (state == pw_software_update_BundledUpdateState_Enum_INACTIVE) {
return OkStatus(); // Already done.
}
- if (status_.state != pw_software_update_BundledUpdateState_Enum_FINISHED) {
+ if (state != pw_software_update_BundledUpdateState_Enum_FINISHED) {
SET_ERROR(
pw_software_update_BundledUpdateResult_Enum_UNKNOWN_ERROR,
"Reset() must be called from FINISHED or INACTIVE state. State: %d",
- static_cast<int>(status_.state));
- response = status_;
+ static_cast<int>(state));
+ response = *status_.acquire();
return Status::FailedPrecondition();
}
- status_ = {};
- status_.state = pw_software_update_BundledUpdateState_Enum_INACTIVE;
+ {
+ *status_.acquire() = {
+ .state = pw_software_update_BundledUpdateState_Enum_INACTIVE};
+ }
// Reset the bundle.
if (bundle_open_) {
@@ -503,37 +472,41 @@ Status BundledUpdateService::Reset(
bundle_open_ = false;
}
- response = status_;
+ response = *status_.acquire();
return OkStatus();
}
void BundledUpdateService::NotifyTransferSucceeded() {
std::lock_guard lock(mutex_);
+ const BundledUpdateState state = status_.acquire()->state;
- if (status_.state !=
- pw_software_update_BundledUpdateState_Enum_TRANSFERRING) {
+ if (state != pw_software_update_BundledUpdateState_Enum_TRANSFERRING) {
// This can happen if the update gets Abort()'d during the transfer and
// the transfer completes successfully.
PW_LOG_WARN(
"Got transfer succeeded notification when not in TRANSFERRING state. "
"State: %d",
- static_cast<int>(status_.state));
+ static_cast<int>(state));
}
- if (status_.has_transfer_id) {
+
+ const bool transfer_ongoing = status_.acquire()->has_transfer_id;
+ if (transfer_ongoing) {
backend_.DisableBundleTransferHandler();
- status_.has_transfer_id = false;
+ status_.acquire()->has_transfer_id = false;
} else {
PW_LOG_WARN("No ongoing transfer found, forcefully set TRANSFERRED.");
}
- status_.state = pw_software_update_BundledUpdateState_Enum_TRANSFERRED;
+ status_.acquire()->state =
+ pw_software_update_BundledUpdateState_Enum_TRANSFERRED;
}
void BundledUpdateService::Finish(
pw_software_update_BundledUpdateResult_Enum result) {
if (result == pw_software_update_BundledUpdateResult_Enum_SUCCESS) {
- status_.current_state_progress_hundreth_percent = 0;
- status_.has_current_state_progress_hundreth_percent = false;
+ BorrowedStatus borrowed_status = status_.acquire();
+ borrowed_status->current_state_progress_hundreth_percent = 0;
+ borrowed_status->has_current_state_progress_hundreth_percent = false;
} else {
// In the case of error, notify backend that we're about to abort the
// software update.
@@ -541,10 +514,11 @@ void BundledUpdateService::Finish(
}
// Turn down the transfer if one is in progress.
- if (status_.has_transfer_id) {
+ const bool transfer_ongoing = status_.acquire()->has_transfer_id;
+ if (transfer_ongoing) {
backend_.DisableBundleTransferHandler();
}
- status_.has_transfer_id = false;
+ status_.acquire()->has_transfer_id = false;
// Close out any open bundles.
if (bundle_open_) {
@@ -552,9 +526,13 @@ void BundledUpdateService::Finish(
PW_CHECK_OK(bundle_.Close());
bundle_open_ = false;
}
- status_.state = pw_software_update_BundledUpdateState_Enum_FINISHED;
- status_.result = result;
- status_.has_result = true;
+ {
+ BorrowedStatus borrowed_status = status_.acquire();
+ borrowed_status->state =
+ pw_software_update_BundledUpdateState_Enum_FINISHED;
+ borrowed_status->result = result;
+ borrowed_status->has_result = true;
+ }
}
} // namespace pw::software_update
diff --git a/pw_software_update/manifest_accessor.cc b/pw_software_update/manifest_accessor.cc
index 5ec09f659..14e7ad1d1 100644
--- a/pw_software_update/manifest_accessor.cc
+++ b/pw_software_update/manifest_accessor.cc
@@ -13,24 +13,120 @@
// the License.
#include "pw_software_update/manifest_accessor.h"
-#include "pw_assert/check.h"
+#include "pw_software_update/config.h"
+#include "pw_software_update/update_bundle.pwpb.h"
#include "pw_software_update/update_bundle_accessor.h"
namespace pw::software_update {
-ManifestAccessor::ManifestAccessor(UpdateBundleAccessor* update_bundle_accessor)
- : update_bundle_accessor_(update_bundle_accessor) {
- PW_CHECK_NOTNULL(update_bundle_accessor_);
+ManifestAccessor ManifestAccessor::FromBundle(protobuf::Message bundle) {
+ protobuf::Message targets_metadata =
+ bundle
+ .AsStringToMessageMap(static_cast<uint32_t>(
+ UpdateBundle::Fields::TARGETS_METADATA))[kTopLevelTargetsName]
+ .AsMessage(static_cast<uint32_t>(
+ SignedTargetsMetadata::Fields::SERIALIZED_TARGETS_METADATA));
+
+ protobuf::Bytes user_manifest =
+ bundle.AsStringToBytesMap(static_cast<uint32_t>(
+ UpdateBundle::Fields::TARGET_PAYLOADS))[kUserManifestTargetFileName];
+
+ return ManifestAccessor(targets_metadata, user_manifest);
}
-Status ManifestAccessor::WriteManifest(stream::Writer& writer) {
- return update_bundle_accessor_->PersistManifest(writer);
-};
+ManifestAccessor ManifestAccessor::FromManifest(protobuf::Message manifest) {
+ protobuf::Message targets_metadata =
+ manifest.AsStringToMessageMap(static_cast<uint32_t>(
+ Manifest::Fields::TARGETS_METADATA))[kTopLevelTargetsName];
+
+ protobuf::Bytes user_manifest =
+ manifest.AsBytes(static_cast<uint32_t>(Manifest::Fields::USER_MANIFEST));
+
+ return ManifestAccessor(targets_metadata, user_manifest);
+}
+
+protobuf::RepeatedMessages ManifestAccessor::GetTargetFiles() {
+ PW_TRY(status());
+ return targets_metadata_.AsRepeatedMessages(
+ static_cast<uint32_t>(TargetsMetadata::Fields::TARGET_FILES));
+}
-pw::stream::IntervalReader ManifestAccessor::GetUserManifest() {
- stream::IntervalReader user_manifest =
- update_bundle_accessor_->GetTargetPayload("user_manifest");
- return user_manifest;
-};
+protobuf::Uint32 ManifestAccessor::GetVersion() {
+ PW_TRY(status());
+ return targets_metadata_
+ .AsMessage(
+ static_cast<uint32_t>(TargetsMetadata::Fields::COMMON_METADATA))
+ .AsUint32(static_cast<uint32_t>(CommonMetadata::Fields::VERSION));
+}
+
+Status ManifestAccessor::Export(stream::Writer& writer) {
+ PW_TRY(status());
+
+ // Write out the targets metadata map.
+ stream::MemoryReader name_reader(
+ std::as_bytes(std::span(kTopLevelTargetsName)));
+ stream::IntervalReader metadata_reader =
+ targets_metadata_.ToBytes().GetBytesReader();
+ std::byte stream_pipe_buffer[WRITE_MANIFEST_STREAM_PIPE_BUFFER_SIZE];
+ PW_TRY(protobuf::WriteProtoStringToBytesMapEntry(
+ static_cast<uint32_t>(Manifest::Fields::TARGETS_METADATA),
+ name_reader,
+ kTopLevelTargetsName.size(),
+ metadata_reader,
+ metadata_reader.interval_size(),
+ stream_pipe_buffer,
+ writer));
+
+ // The user manifest is optional, write it out if available().
+ stream::IntervalReader user_manifest_reader = user_manifest_.GetBytesReader();
+ if (user_manifest_reader.ok()) {
+ protobuf::StreamEncoder encoder(writer, {});
+ PW_TRY(encoder.WriteBytesFromStream(
+ static_cast<uint32_t>(Manifest::Fields::USER_MANIFEST),
+ user_manifest_reader,
+ user_manifest_reader.interval_size(),
+ stream_pipe_buffer));
+ }
+
+ return OkStatus();
+}
+
+protobuf::Message ManifestAccessor::GetTargetFile(protobuf::String name) {
+ PW_TRY(status());
+
+ std::array<std::byte, MAX_TARGET_NAME_LENGTH> name_buf = {};
+
+ stream::IntervalReader name_reader = name.GetBytesReader();
+ PW_TRY(name_reader.status());
+
+ if (name_reader.interval_size() > name_buf.size()) {
+ return Status::OutOfRange();
+ }
+
+ Result<ByteSpan> read_result = name_reader.Read(name_buf);
+ PW_TRY(read_result.status());
+
+ const ConstByteSpan name_span = read_result.value();
+ const std::string_view name_view(
+ reinterpret_cast<const char*>(name_span.data()), name_span.size_bytes());
+
+ return GetTargetFile(name_view);
+}
+
+protobuf::Message ManifestAccessor::GetTargetFile(std::string_view name) {
+ PW_TRY(status());
+
+ for (protobuf::Message target_file : GetTargetFiles()) {
+ protobuf::String target_name = target_file.AsString(
+ static_cast<uint32_t>(TargetFile::Fields::FILE_NAME));
+ Result<bool> compare_result = target_name.Equal(name);
+ PW_TRY(compare_result.status());
+ if (compare_result.value()) {
+ return target_file;
+ }
+ }
+
+ return Status::NotFound();
+}
} // namespace pw::software_update
diff --git a/pw_software_update/public/pw_software_update/bundled_update_backend.h b/pw_software_update/public/pw_software_update/bundled_update_backend.h
index 40b704a4b..ae80e065e 100644
--- a/pw_software_update/public/pw_software_update/bundled_update_backend.h
+++ b/pw_software_update/public/pw_software_update/bundled_update_backend.h
@@ -30,13 +30,17 @@ class BundledUpdateBackend {
public:
virtual ~BundledUpdateBackend() = default;
- // Optionally verify that the instance/content of the target file in use
- // on-device matches the metadata in the given manifest, called before apply.
- // (e.g. by checksum, if failed abort partial update and wipe/mark-invalid
- // running manifest)
+ // Perform optional, product-specific validations to the specified target
+ // file, using whatever metadata available in manifest.
+ //
+ // This is called for each target file after the standard verification has
+ // passed.
virtual Status VerifyTargetFile(
[[maybe_unused]] ManifestAccessor manifest,
[[maybe_unused]] std::string_view target_file_name) {
+ // TODO(backend): Perform any additional product-specific validations.
+ // It is safe to assume the target's payload has passed standard
+ // verification.
return OkStatus();
};
@@ -86,16 +90,99 @@ class BundledUpdateBackend {
stream::Reader& target_payload,
size_t update_bundle_offset) = 0;
- // Get reader of the device's current manifest.
+ // Backend to probe the device manifest and prepare a ready-to-go reader
+ // for it. See the comments to `GetCurrentManfestReader()` for more context.
+ virtual Status BeforeManifestRead() {
+ // Todo(backend):
+ // 1. Probe device to see if a well-formed manifest already exists.
+ // 2. If not, return `Status::NotFound()`. Note this will cause
+ // anti-rollback to skip. So please don't always return
+ // `Status::NotFound()`!
+ // 3. If yes, instantiate and activate a reader for the manifest!
+ // 4. Return any unexpected condition as errors but note this will cause
+ // the current software update session to abort.
+ return OkStatus();
+ }
+
+ // Backend to provide a ready-to-go reader for the on-device manifest blob.
+ // This function is called after a successful `BeforeManifestRead()`,
+ // potentially more than once.
+ //
+ // This manifest blob is a serialized `message Manifest{...}` as defined in
+ // update_bundle.proto.
+ //
+ // This manifest blob is ALWAYS and EXCLUSIVELY persisted by a successful
+ // software update. Thus it may not available before the first software
+ // update, in which case `BeforeManifestRead()` should've returned
+ // `Status::NotFound()`.
+ //
+ // This manifest contains trusted metadata of all software currently running
+ // on the device and used for anti-rollback checks. It MUST NOT be tampered
+ // by factory resets, flashing, or any other means other than software
+ // updates.
virtual Result<stream::SeekableReader*> GetCurrentManifestReader() {
+ // Todo(backend):
+ // 1. Double check if a ready-to-go reader has been prepared by
+ // `BeforeManifestRead()`.
+ // 2. If yes (expected), return the reader.
+ // 3. If not (unexpected), return `Status::FailedPrecondition()`.
return Status::Unimplemented();
}
- // Use a reader that provides a new manifest for the device to save.
- virtual Status UpdateCurrentManifest(
- [[maybe_unused]] stream::Reader& manifest) {
+ // TODO(alizhang): Deprecate GetCurrentManifestReader in favor of
+ // `GetManifestReader()`.
+ virtual Result<stream::SeekableReader*> GetManifestReader() {
+ return GetCurrentManifestReader();
+ }
+
+ // Backend to prepare for on-device manifest update, e.g. make necessary
+ // efforts to ready the manifest writer. The manifest writer is used to
+ // persist a new manifest on-device following a successful software update.
+ // Manifest writing is never mixed with reading (i.e. reader and writer are
+ // used sequentially).
+ virtual Status BeforeManifestWrite() {
+ // Todo(backend):
+ // 1. Instantiate and activate a manifest writer pointing at a persistent
+ // storage that at least could survive a factory data reset (FDR), if not
+ // tamper-resistant.
return OkStatus();
- };
+ }
+
+ // Backend to provide a ready-to-go writer for the on-device manifest blob.
+ // This function is called after a successful `BeforeManifestWrite()`,
+ // potentially more than once.
+ //
+ // This manifest blob is a serialized `message Manifest{...}` as defined in
+ // update_bundle.proto.
+ //
+ // This manifest blob is ALWAYS and EXCLUSIVELY persisted by a successful
+ // software update.
+ //
+ // This manifest contains trusted metadata of all software currently running
+ // on the device and used for anti-rollback checks. It MUST NOT be tampered
+ // by factory resets, flashing, or any other means other than software
+ // updates.
+ virtual Result<stream::Writer*> GetManifestWriter() {
+ // Todo(backend):
+ // 1. Double check a writer is ready to go as result of
+ // `BeforeManifestWrite()`.
+ // 2. If yes (expected), simply return the writer.
+ // 3. If not (unexpected), return `Status::FailedPrecondition()`.
+ return Status::Unimplemented();
+ }
+
+ // Backend to finish up manifest writing.
+ virtual Status AfterManifestWrite() {
+ // Todo(backend):
+ // Protect the newly persisted manifest blob. This is to make manifest
+ // probing / reading easier and more reliable. This could involve taking
+ // a measurement (e.g. checksum) and storing that measurement in a
+ // FDR-safe tag, replicating the manifest in a backup location if the
+ // backing media is unreliable (e.g. raw NAND) etc.
+ //
+ // It is safe to assume the writing has been successful in this function.
+ return OkStatus();
+ }
// Do any work needed to finish the apply of the update and do a required
// reboot of the device!
diff --git a/pw_software_update/public/pw_software_update/bundled_update_service.h b/pw_software_update/public/pw_software_update/bundled_update_service.h
index 2493733a6..d517d2203 100644
--- a/pw_software_update/public/pw_software_update/bundled_update_service.h
+++ b/pw_software_update/public/pw_software_update/bundled_update_service.h
@@ -18,6 +18,7 @@
#include "pw_software_update/bundled_update_backend.h"
#include "pw_software_update/update_bundle_accessor.h"
#include "pw_status/status.h"
+#include "pw_sync/borrow.h"
#include "pw_sync/lock_annotations.h"
#include "pw_sync/mutex.h"
#include "pw_work_queue/work_queue.h"
@@ -32,14 +33,14 @@ class BundledUpdateService
BundledUpdateService(UpdateBundleAccessor& bundle,
BundledUpdateBackend& backend,
work_queue::WorkQueue& work_queue)
- : status_{},
+ : unsafe_status_{.state =
+ pw_software_update_BundledUpdateState_Enum_INACTIVE},
+ status_(unsafe_status_, status_mutex_),
backend_(backend),
bundle_(bundle),
bundle_open_(false),
work_queue_(work_queue),
- work_enqueued_(false) {
- status_.state = pw_software_update_BundledUpdateState_Enum_INACTIVE;
- }
+ work_enqueued_(false) {}
Status GetStatus(const pw_protobuf_Empty& request,
pw_software_update_BundledUpdateStatus& response);
@@ -85,20 +86,28 @@ class BundledUpdateService
// ApplyProgress - to update % complete.
private:
- pw_software_update_BundledUpdateStatus status_ PW_GUARDED_BY(mutex_);
+ // Top-level lock for OTA state coherency. May be held for extended periods.
+ sync::Mutex mutex_;
BundledUpdateBackend& backend_ PW_GUARDED_BY(mutex_);
UpdateBundleAccessor& bundle_ PW_GUARDED_BY(mutex_);
bool bundle_open_ PW_GUARDED_BY(mutex_);
work_queue::WorkQueue& work_queue_ PW_GUARDED_BY(mutex_);
bool work_enqueued_ PW_GUARDED_BY(mutex_);
- sync::Mutex mutex_;
- void DoVerify();
- void DoApply();
+ // Nested lock for safe status updates and queries.
+ sync::Mutex status_mutex_ PW_ACQUIRED_AFTER(mutex_);
+ pw_software_update_BundledUpdateStatus unsafe_status_
+ PW_GUARDED_BY(status_mutex_);
+ sync::Borrowable<pw_software_update_BundledUpdateStatus, sync::Mutex> status_;
+
+ void DoVerify() PW_LOCKS_EXCLUDED(status_mutex_);
+ void DoApply() PW_LOCKS_EXCLUDED(status_mutex_);
void Finish(_pw_software_update_BundledUpdateResult_Enum result)
- PW_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
- bool IsFinished() const PW_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
- return status_.state == pw_software_update_BundledUpdateState_Enum_FINISHED;
+ PW_EXCLUSIVE_LOCKS_REQUIRED(mutex_) PW_LOCKS_EXCLUDED(status_mutex_);
+ bool IsFinished() PW_EXCLUSIVE_LOCKS_REQUIRED(mutex_)
+ PW_LOCKS_EXCLUDED(status_mutex_) {
+ return status_.acquire()->state ==
+ pw_software_update_BundledUpdateState_Enum_FINISHED;
}
};
diff --git a/pw_software_update/public/pw_software_update/config.h b/pw_software_update/public/pw_software_update/config.h
index e6b2bc5aa..3eaf18c82 100644
--- a/pw_software_update/public/pw_software_update/config.h
+++ b/pw_software_update/public/pw_software_update/config.h
@@ -13,12 +13,6 @@
// the License.
#pragma once
-// The log level to use for this module. Logs below this level are omitted.
-#define PW_LOG_MODULE_NAME "PWSU"
-#ifndef PW_SOFTWARE_UPDATE_CONFIG_LOG_LEVEL
-#define PW_SOFTWARE_UPDATE_CONFIG_LOG_LEVEL PW_LOG_LEVEL_ERROR
-#endif // PW_SOFTWARE_UPDATE_CONFIG_LOG_LEVEL
-
// The size of the buffer to create on stack for streaming manifest data from
// the bundle reader.
#define WRITE_MANIFEST_STREAM_PIPE_BUFFER_SIZE 8
@@ -26,7 +20,20 @@
// The maximum allowed length of a target name.
#define MAX_TARGET_NAME_LENGTH 32
+// The maximum allowed payload size in bytes. This is used to mitigate DoS
+// attacks.
+#ifndef PW_SOFTWARE_UPDATE_MAX_TARGET_PAYLOAD_SIZE
+#define PW_SOFTWARE_UPDATE_MAX_TARGET_PAYLOAD_SIZE (100 * 1024 * 1024)
+#endif // PW_SOFTWARE_UPDATE_MAX_TARGET_PAYLOAD_SIZE
+
// Not recommended. Disable compilation of bundle verification.
#ifndef PW_SOFTWARE_UPDATE_DISABLE_BUNDLE_VERIFICATION
#define PW_SOFTWARE_UPDATE_DISABLE_BUNDLE_VERIFICATION (false)
#endif // PW_SOFTWARE_UPDATE_DISABLE_BUNDLE_VERIFICATION
+
+// Whether to support bundle "personalization", which is a feature that
+// strips some or all target files that a device claims to already have from an
+// incoming bundle in order to improve performance.
+#ifndef PW_SOFTWARE_UPDATE_WITH_PERSONALIZATION
+#define PW_SOFTWARE_UPDATE_WITH_PERSONALIZATION (true)
+#endif // PW_SOFTWARE_UPDATE_WITH_PERSONALIZATION \ No newline at end of file
diff --git a/pw_software_update/public/pw_software_update/manifest_accessor.h b/pw_software_update/public/pw_software_update/manifest_accessor.h
index eaf680633..05a75cc38 100644
--- a/pw_software_update/public/pw_software_update/manifest_accessor.h
+++ b/pw_software_update/public/pw_software_update/manifest_accessor.h
@@ -13,21 +13,65 @@
// the License.
#pragma once
-#include "pw_stream/interval_reader.h"
+#include "pw_protobuf/message.h"
+#include "pw_result/result.h"
#include "pw_stream/stream.h"
namespace pw::software_update {
-// TODO(pwbug/456): There may be bettter solution to using a forward
-// declaration here.
-class UpdateBundleAccessor;
+// ManifestAccessor exposes manifest information from either a *verified* update
+// bundle (`message UpdateBundle`) or a *trusted* on-device manifest
+// (`message Manifest`).
+//
+// Instantiation MUST go-through the UpdateBundleAccessor class. e.g.:
+//
+// ManifestAccessor manifest = bundle.GetManifest();
+// PW_TRY(manifest.status()); // Fails if `bundle` is not yet verified.
class ManifestAccessor {
public:
- explicit ManifestAccessor(UpdateBundleAccessor* update_bundle_accessor);
- pw::stream::IntervalReader GetUserManifest();
- Status WriteManifest(stream::Writer& writer);
+ ManifestAccessor() = default;
+
+ Status status() { return targets_metadata_.status(); }
+ bool ok() { return status().ok(); }
+
+ // Retrieves the "user manifest" blob, which is product specific and optional.
+ pw::stream::IntervalReader GetUserManifest() {
+ return user_manifest_.GetBytesReader();
+ }
+
+ // Enumerates all target files as a list of `message TargetFile{...}`.
+ protobuf::RepeatedMessages GetTargetFiles();
+
+ // Given a name, return a `message TargetFile{...}` descriptor.
+ protobuf::Message GetTargetFile(protobuf::String name);
+ protobuf::Message GetTargetFile(std::string_view name);
+
+ // Returns the manifest version number.
+ protobuf::Uint32 GetVersion();
+
+ // TODO(alizhang): Deprecate WriteManifest() once backend code has changed
+ // to UpdateBundleAccessor::PersistManifest() where the backend is given
+ // chances to prepare and release the manifest writer.
+ Status WriteManifest(stream::Writer& writer) { return Export(writer); }
private:
- UpdateBundleAccessor* update_bundle_accessor_;
+ friend class UpdateBundleAccessor;
+
+ protobuf::Message targets_metadata_;
+ protobuf::Bytes user_manifest_;
+
+ ManifestAccessor(Status status) : targets_metadata_(status) {}
+ ManifestAccessor(protobuf::Message targets_metadata,
+ protobuf::Bytes user_manifest)
+ : targets_metadata_(targets_metadata), user_manifest_(user_manifest){};
+
+ // Constructs a `ManifestAccessor` from an update bundle.
+ static ManifestAccessor FromBundle(protobuf::Message bundle);
+
+ // Constructs a `ManifestAccessor` from a saved `message Manifest{...}`.
+ static ManifestAccessor FromManifest(protobuf::Message manifest);
+
+ // Exports a serialized `message Manifest{...}`.
+ Status Export(stream::Writer& writer);
};
} // namespace pw::software_update
diff --git a/pw_software_update/public/pw_software_update/update_bundle_accessor.h b/pw_software_update/public/pw_software_update/update_bundle_accessor.h
index 83b014e12..d2b80c081 100644
--- a/pw_software_update/public/pw_software_update/update_bundle_accessor.h
+++ b/pw_software_update/public/pw_software_update/update_bundle_accessor.h
@@ -21,90 +21,65 @@
#include "pw_protobuf/message.h"
#include "pw_software_update/bundled_update_backend.h"
#include "pw_software_update/manifest_accessor.h"
-#include "pw_stream/memory_stream.h"
namespace pw::software_update {
class BundledUpdateBackend;
+// Name of the top-level Targets metadata.
+constexpr std::string_view kTopLevelTargetsName = "targets";
+
+// Name of the "user manifest" target file. The "user manifest" is a product
+// specific blob that is opaque to upstream but need to be passed around in
+// manifest handling (for now).
constexpr std::string_view kUserManifestTargetFileName = "user_manifest";
-// UpdateBundleAccessor is responsible for parsing, verifying and providing
-// target payload access of a software update bundle. It takes the following as
-// inputs:
-//
-// 1. A software update bundle via `BlobStore`.
-// 2. A `BundledUpdateBackend`, which implements project-specific update
-// operations such as enforcing project update policies and
-// verifying/applying target files on device.
-//
-// The verification is done according to TUF process. Payload can only be
-// accessed after successful verification.
-//
-// Exmple of use:
-//
-// UpdateBundleAccessor bundle(blob,helper);
-// auto status = bundle.OpenAndVerify();
-// if (!status.ok()) {
-// // handle error
-// ...
-// }
+// UpdateBundleAccessor is the trusted custodian of a staged incoming update
+// bundle.
//
-// // Examine and use payload.
-// auto exist = bundle.IsTargetPayloadIncluded("audio");
-// if (!exist.ok() || !exist.value()) {
-// // handle error
-// ...
-// }
+// It takes exclusive ownership of the blob_store that represents a staged,
+// *untrusted* bundle, and presents convenient and *trusted* accessors.
//
-// auto payload_reader = bundle.GetTargetPayload("audio");
-// // Process payload
-// ...
-//
-// // Get bundle's manifest and write it to the given writer.
-// status = bundle.PersistManifest(staged_manifest_writer);
-// if (!status.ok()) {
-// // handle error
-// ...
-// }
-//
-// status = bundle.Close();
-// if (!status.ok()) {
-// // handle error
-// ...
-// }
+// ALL ACCESS to the staged update bundle MUST GO THROUGH the
+// `UpdateBundleAccessor`.
class UpdateBundleAccessor {
public:
// UpdateBundleAccessor
- // bundle - The software update bundle data on storage.
+ // blob_store - The staged incoming software update bundle.
// backend - Project-specific BundledUpdateBackend.
// disable_verification - Disable verification.
- constexpr UpdateBundleAccessor(blob_store::BlobStore& bundle,
+ constexpr UpdateBundleAccessor(blob_store::BlobStore& blob_store,
BundledUpdateBackend& backend,
bool disable_verification = false)
- : bundle_(bundle),
+ : blob_store_(blob_store),
+ blob_store_reader_(blob_store_),
backend_(backend),
- bundle_reader_(bundle_),
disable_verification_(disable_verification) {}
// Opens and verifies the software update bundle.
//
- // Specifically, the opening process opens a blob reader to the given bundle
- // and initializes the bundle proto parser. No write will be allowed to the
- // bundle until Close() is called.
+ // Verification covers the following:
//
- // If bundle verification is enabled (see the `option` argument in
- // the constructor), the verification process does the following:
+ // 1. If a Root metadata is included with the incoming bundle, the Root
+ // metadata will be verified and used as the new Root metadata to verify
+ // other metadata in the bundle.
//
- // 1. Check whether the bundle contains an incoming new root metadata. If it
- // does, it verifies the root against the current on-device root. If
- // successful, the on-device root will be updated to the new root.
+ // 2. The Targets metadata is verified using the Root metadata.
//
- // 2. Verify the targets metadata against the current trusted root.
+ // 3. All target payloads referenced in the Targets metadata are verified.
//
- // 3. Either verify all target payloads (size and hash) or defer that
- // verification till when a target is accessed.
+ // Limitations and customizations (compared to standard TUF):
//
- // 4. Invoke the backend to do downstream verification of the bundle.
+ // 1. Does not yet support arbitrary Root key rotations. Which means
+ // There is only one (reliable) chance to rotate the Root key for all
+ // devices. Rotation of the Targets key is still unlimited.
+ // 2. Timestamp and Snapshot metadata are not used or supported.
+ // 3. Assumes a single top-level Targets metadata and no delegations.
+ // 4. The top-level Targets metadata doubles as the software update
+ // "manifest". Anti-rollback IS supported via the Targets metadata version.
+ // 5. Supports "personalization", where the staged bundle may have been
+ // stripped of any target payloads that the device already have. For those
+ // personalized-out targets, verification relies on the cached manifest of
+ // a previous successful update to verify target length and hash.
//
// Returns:
// OK - Bundle was successfully opened and verified.
@@ -118,44 +93,37 @@ class UpdateBundleAccessor {
// DATA_LOSS - Error writing data or fail to verify written data.
Status Close();
- // Writes the manifest of the staged bundle to the given writer.
- //
- // Returns:
- // FAILED_PRECONDITION - Bundle is not open and verified.
- // TODO(pwbug/456): Add other error codes if necessary.
- Status PersistManifest(stream::Writer& staged_manifest_writer);
-
- // Is the target payload present in the bundle (not personalized out).
+ // Writes out the manifest of the staged bundle via a backend-supplied writer.
//
// Returns:
- // OK - Whether or not the target_file was included in the UpdateBundle or
- // whether it was personalized out.
// FAILED_PRECONDITION - Bundle is not open and verified.
// TODO(pwbug/456): Add other error codes if necessary.
- Result<bool> IsTargetPayloadIncluded(std::string_view target_file);
+ Status PersistManifest();
- // Returns a reader for the target file by `target_file` in the update
- // bundle.
+ // Returns a reader for the (verified) payload bytes of a specified target
+ // file.
//
// Returns:
// A reader instance for the target file.
// TODO(pwbug/456): Figure out a way to propagate error.
- stream::IntervalReader GetTargetPayload(std::string_view target_file);
+ stream::IntervalReader GetTargetPayload(std::string_view target_name);
+ stream::IntervalReader GetTargetPayload(protobuf::String target_name);
- // Returns a protobuf::Message representation of the update bundle.
- //
- // Returns:
- // An instance of protobuf::Message of the udpate bundle.
- // FAILED_PRECONDITION - Bundle is not open and verified.
- protobuf::Message GetDecoder();
+ // Exposes "manifest" information from the incoming update bundle once it has
+ // passed verification.
+ ManifestAccessor GetManifest();
- ManifestAccessor GetManifestAccessor() { return ManifestAccessor(this); };
+ // Returns the total number of bytes of all target payloads listed in the
+ // manifest *AND* exists in the bundle.
+ Result<uint64_t> GetTotalPayloadSize();
private:
- blob_store::BlobStore& bundle_;
+ blob_store::BlobStore& blob_store_;
+ blob_store::BlobStore::BlobReader blob_store_reader_;
BundledUpdateBackend& backend_;
- blob_store::BlobStore::BlobReader bundle_reader_;
- protobuf::Message decoder_;
+ protobuf::Message bundle_;
+ // The current, cached, trusted `SignedRootMetadata{}`.
+ protobuf::Message trusted_root_;
bool disable_verification_;
bool bundle_verified_ = false;
@@ -170,8 +138,7 @@ class UpdateBundleAccessor {
// verification and upgrade flow:
//
// 1. Verify the signatures according to the on-device trusted
- // disable_verificationroot metadata
- // obtained from the backend.
+ // root metadata obtained from the backend.
// 2. Verify content of the new root metadata, including:
// 1) Check role magic field.
// 2) Check signature requirement. Specifically, check that no key is
@@ -192,21 +159,57 @@ class UpdateBundleAccessor {
// 2. Check the content of the targets metadata.
// 3. Check rollback against the version from on-device manifest, if one
// exists (the manifest may be reset in the case of key rotation).
- //
- // TODO(pwbug/456): Should manifest persisting be handled here? The current
- // API design of this class exposes a PersistManifest() method, which implies
- // that manifest persisting is handled by some higher level logic.
Status VerifyTargetsMetadata();
// A helper to get the on-device trusted root metadata. It returns an
// instance of SignedRootMetadata proto message.
protobuf::Message GetOnDeviceTrustedRoot();
- // The method performs verification of the target payloads. Specifically, it
- // 1. For target payloads found in the bundle, verify its size and hash.
- // 2. For target payloads not found in the bundle, call downstream to verify
- // it and report back.
+ // A helper to get an accessor to the on-device manifest. The on-device
+ // manifest is a serialized `message Manifest{...}` that represents the
+ // current running firmware of the device. The on-device manifest storage
+ // MUST meet the following requirements.
+ //
+ // 1. MUST NOT get wiped by a factory reset, otherwise a FDR can be used
+ // to circumvent anti-rollback check.
+ // 2. MUST be kept in-sync with the actual firmware on-device. If any
+ // mechanism is used to modify the firmware (e.g. via flashing), the
+ // on-device manifest MUST be updated to reflect the change as well.
+ // The on-device manifest CAN be erased if updating it is too cumbersome
+ // BUT ONLY ON DEV DEVICES as erasing the on-device manifest defeats
+ // anti-rollback.
+ // 3. MUST be integrity-protected and checked. Corrupted on-device manifest
+ // cannot be used as it may brick a device as a result of anti-rollback
+ // check. Integrity check is added and enforced by the backend via
+ // `BundledUpdateBackend` callbacks.
+ //
+ ManifestAccessor GetOnDeviceManifest();
+
+ // Verify all targets referenced in the manifest (Targets metadata) has a
+ // payload blob either within the bundle or on-device, in both cases
+ // measuring up to the length and hash recorded in the manifest.
Status VerifyTargetsPayloads();
+
+ // Verify a target specified by name measures up to the expected length and
+ // SHA256 hash. Additionally call the backend to perform any product-specific
+ // validations.
+ Status VerifyTargetPayload(ManifestAccessor manifest,
+ std::string_view name,
+ protobuf::Uint64 expected_length,
+ protobuf::Bytes expected_sha256);
+
+ // For a target the payload of which is included in the bundle, verify
+ // it measures up to the expected length and sha256 hash.
+ Status VerifyInBundleTargetPayload(protobuf::Uint64 expected_length,
+ protobuf::Bytes expected_sha256,
+ stream::IntervalReader payload_reader);
+
+ // For a target with no corresponding payload in the bundle, verify
+ // its on-device payload bytes measures up to the expected length and sha256
+ // hash.
+ Status VerifyOutOfBundleTargetPayload(std::string_view name,
+ protobuf::Uint64 expected_length,
+ protobuf::Bytes expected_sha256);
};
} // namespace pw::software_update
diff --git a/pw_software_update/py/pw_software_update/generate_test_bundle.py b/pw_software_update/py/pw_software_update/generate_test_bundle.py
index b52a0cf95..304e0bdc5 100644
--- a/pw_software_update/py/pw_software_update/generate_test_bundle.py
+++ b/pw_software_update/py/pw_software_update/generate_test_bundle.py
@@ -257,11 +257,17 @@ def main() -> int:
dev_signed_root = test_bundle.generate_dev_signed_root_metadata()
dev_signed_bundle = test_bundle.generate_dev_signed_bundle()
+ dev_signed_bundle_with_root = test_bundle.generate_dev_signed_bundle(
+ signed_root_metadata=dev_signed_root)
+ unsigned_bundle_with_root = test_bundle.generate_unsigned_bundle(
+ signed_root_metadata=dev_signed_root)
manifest_proto = test_bundle.generate_manifest()
prod_signed_root = \
test_bundle.generate_prod_signed_root_metadata()
prod_signed_bundle = test_bundle.generate_prod_signed_bundle(
None, prod_signed_root)
+ dev_signed_bundle_with_prod_root = test_bundle.generate_dev_signed_bundle(
+ signed_root_metadata=prod_signed_root)
# Generates a prod root metadata that fails signature verification against
# the dev root (i.e. it has a bad prod signature). This is done by making
@@ -364,6 +370,15 @@ def main() -> int:
header.write(
proto_array_declaration(dev_signed_bundle, 'kTestDevBundle'))
header.write(
+ proto_array_declaration(dev_signed_bundle_with_root,
+ 'kTestDevBundleWithRoot'))
+ header.write(
+ proto_array_declaration(unsigned_bundle_with_root,
+ 'kTestUnsignedBundleWithRoot'))
+ header.write(
+ proto_array_declaration(dev_signed_bundle_with_prod_root,
+ 'kTestDevBundleWithProdRoot'))
+ header.write(
proto_array_declaration(manifest_proto, 'kTestBundleManifest'))
header.write(proto_array_declaration(dev_signed_root,
'kDevSignedRoot'))
diff --git a/pw_software_update/update_bundle_accessor.cc b/pw_software_update/update_bundle_accessor.cc
index 41c4639d1..453f932e8 100644
--- a/pw_software_update/update_bundle_accessor.cc
+++ b/pw_software_update/update_bundle_accessor.cc
@@ -12,6 +12,9 @@
// License for the specific language governing permissions and limitations under
// the License.
+#define PW_LOG_MODULE_NAME "PWSU"
+#define PW_LOG_LEVEL PW_LOG_LEVEL_WARN
+
#include "pw_software_update/update_bundle_accessor.h"
#include <cstddef>
@@ -24,17 +27,15 @@
#include "pw_protobuf/message.h"
#include "pw_result/result.h"
#include "pw_software_update/config.h"
+#include "pw_software_update/manifest_accessor.h"
#include "pw_software_update/update_bundle.pwpb.h"
#include "pw_stream/interval_reader.h"
#include "pw_stream/memory_stream.h"
-
-#define PW_LOG_LEVEL PW_SOFTWARE_UPDATE_CONFIG_LOG_LEVEL
+#include "pw_string/string_builder.h"
namespace pw::software_update {
namespace {
-constexpr std::string_view kTopLevelTargetsName = "targets";
-
Result<bool> VerifyEcdsaSignature(protobuf::Bytes public_key,
ConstByteSpan digest,
protobuf::Bytes signature) {
@@ -73,11 +74,10 @@ void LogKeyId(ConstByteSpan key_id) {
}
// Verifies signatures of a TUF metadata.
-Result<bool> VerifyMetadataSignatures(
- protobuf::Bytes message,
- protobuf::RepeatedMessages signatures,
- protobuf::Message signature_requirement,
- protobuf::StringToMessageMap key_mapping) {
+Status VerifyMetadataSignatures(protobuf::Bytes message,
+ protobuf::RepeatedMessages signatures,
+ protobuf::Message signature_requirement,
+ protobuf::StringToMessageMap key_mapping) {
// Gets the threshold -- at least `threshold` number of signatures must
// pass verification in order to trust this metadata.
protobuf::Uint32 threshold = signature_requirement.AsUint32(
@@ -93,7 +93,9 @@ Result<bool> VerifyMetadataSignatures(
// Verifies the signatures. Check that at least `threshold` number of
// signatures can be verified using the allowed keys.
size_t verified_count = 0;
+ size_t total_signatures = 0;
for (protobuf::Message signature : signatures) {
+ total_signatures++;
protobuf::Bytes key_id =
signature.AsBytes(static_cast<uint32_t>(Signature::Fields::KEY_ID));
PW_TRY(key_id.status());
@@ -122,7 +124,7 @@ Result<bool> VerifyMetadataSignatures(
}
if (!key_id_is_allowed) {
- PW_LOG_DEBUG("Skipping a key id not listed in allowed key ids.");
+ PW_LOG_DEBUG("Skipping a key id not listed in allowed key ids");
LogKeyId(key_id_buf);
continue;
}
@@ -154,17 +156,20 @@ Result<bool> VerifyMetadataSignatures(
if (res.value()) {
verified_count++;
if (verified_count == threshold.value()) {
- return true;
+ return OkStatus();
}
}
}
- PW_LOG_DEBUG(
- "Not enough number of signatures verified. Requires at least %u, "
- "verified %u",
- threshold.value(),
- verified_count);
- return false;
+ if (total_signatures == 0) {
+ // For self verification to tell apart unsigned bundles.
+ return Status::NotFound();
+ }
+
+ PW_LOG_ERROR("Insufficient signatures. Requires at least %u, verified %u",
+ threshold.value(),
+ verified_count);
+ return Status::Unauthenticated();
}
// Verifies the signatures of a signed new root metadata against a given
@@ -201,8 +206,9 @@ Result<bool> VerifyRootMetadataSignatures(protobuf::Message trusted_root,
PW_TRY(signature_requirement.status());
// Verifies the signatures.
- return VerifyMetadataSignatures(
- serialized, signatures, signature_requirement, key_mapping);
+ PW_TRY(VerifyMetadataSignatures(
+ serialized, signatures, signature_requirement, key_mapping));
+ return true;
}
Result<uint32_t> GetMetadataVersion(protobuf::Message& metadata,
@@ -227,90 +233,6 @@ Result<uint32_t> GetMetadataVersion(protobuf::Message& metadata,
return res.value();
}
-// Gets the list of targets in the top-level targets metadata
-protobuf::RepeatedMessages GetTopLevelTargets(protobuf::Message bundle) {
- // Get signed targets metadata map.
- //
- // message UpdateBundle {
- // ...
- // map<string, SignedTargetsMetadata> target_metadata = <id>;
- // ...
- // }
- protobuf::StringToMessageMap signed_targets_metadata_map =
- bundle.AsStringToMessageMap(
- static_cast<uint32_t>(UpdateBundle::Fields::TARGETS_METADATA));
- PW_TRY(signed_targets_metadata_map.status());
-
- // Get the top-level signed targets metadata.
- protobuf::Message signed_targets_metadata =
- signed_targets_metadata_map[kTopLevelTargetsName];
- PW_TRY(signed_targets_metadata.status());
-
- // Get the targets metadata.
- //
- // message SignedTargetsMetadata {
- // ...
- // bytes serialized_target_metadata = <id>;
- // ...
- // }
- protobuf::Message targets_metadata =
- signed_targets_metadata.AsMessage(static_cast<uint32_t>(
- SignedTargetsMetadata::Fields::SERIALIZED_TARGETS_METADATA));
- PW_TRY(targets_metadata.status());
-
- // Return the target file list
- //
- // message TargetsMetadata {
- // ...
- // repeated TargetFile target_files = <id>;
- // ...
- // }
- return targets_metadata.AsRepeatedMessages(
- static_cast<uint32_t>(TargetsMetadata::Fields::TARGET_FILES));
-}
-
-// Verifies a given target payload against a given hash.
-Result<bool> VerifyTargetPayloadHash(protobuf::Message hash_info,
- protobuf::Bytes target_payload) {
- // Get the hash function field
- //
- // message Hash {
- // ...
- // HashFunction function = <id>;
- // ...
- // }
- protobuf::Uint32 hash_function =
- hash_info.AsUint32(static_cast<uint32_t>(Hash::Fields::FUNCTION));
- PW_TRY(hash_function.status());
-
- // enum HashFunction {
- // UNKNOWN_HASH_FUNCTION = 0;
- // SHA256 = 1;
- // }
- if (hash_function.value() != static_cast<uint32_t>(HashFunction::SHA256)) {
- // Unknown hash function
- PW_LOG_DEBUG("Unknown hash function, %d", hash_function.value());
- return Status::InvalidArgument();
- }
-
- // Get the hash bytes field
- //
- // message Hash {
- // ...
- // bytes hash = <id>;
- // ...
- // }
- protobuf::Bytes hash_bytes =
- hash_info.AsBytes(static_cast<uint32_t>(Hash::Fields::HASH));
- PW_TRY(hash_bytes.status());
-
- std::byte digest[crypto::sha256::kDigestSizeBytes];
- stream::IntervalReader target_payload_reader =
- target_payload.GetBytesReader();
- PW_TRY(crypto::sha256::Hash(target_payload_reader, digest));
- return hash_bytes.Equal(digest);
-}
-
// Reads a protobuf::String into a buffer and returns a std::string_view.
Result<std::string_view> ReadProtoString(protobuf::String str,
std::span<char> buffer) {
@@ -327,176 +249,146 @@ Result<std::string_view> ReadProtoString(protobuf::String str,
} // namespace
Status UpdateBundleAccessor::OpenAndVerify() {
- PW_TRY(DoOpen());
- PW_TRY(DoVerify());
- return OkStatus();
-}
-
-// Get the target element corresponding to `target_file`
-stream::IntervalReader UpdateBundleAccessor::GetTargetPayload(
- std::string_view target_file_name) {
- if (!bundle_verified_) {
- PW_LOG_DEBUG("Bundled has not passed verification yet");
- return Status::FailedPrecondition();
+ if (Status status = DoOpen(); !status.ok()) {
+ PW_LOG_ERROR("Failed to open staged bundle");
+ return status;
}
- protobuf::StringToBytesMap target_payloads =
- decoder_.AsStringToBytesMap(static_cast<uint32_t>(
- pw::software_update::UpdateBundle::Fields::TARGET_PAYLOADS));
- PW_TRY(target_payloads.status());
- protobuf::Bytes payload = target_payloads[target_file_name];
- PW_TRY(payload.status());
- return payload.GetBytesReader();
-}
-
-protobuf::Message UpdateBundleAccessor::GetDecoder() {
- if (!bundle_verified_) {
- PW_LOG_DEBUG("Bundled has not passed verification yet");
- return Status::FailedPrecondition();
+ if (Status status = DoVerify(); !status.ok()) {
+ PW_LOG_ERROR("Failed to verified staged bundle");
+ Close();
+ return status;
}
- return decoder_;
+ return OkStatus();
}
-Result<bool> UpdateBundleAccessor::IsTargetPayloadIncluded(
- std::string_view target_file_name) {
- if (!bundle_verified_) {
- PW_LOG_DEBUG("Bundled has not passed verification yet");
- return Status::FailedPrecondition();
- }
- // TODO(pwbug/456): Perform personalization check first. If the target
- // is personalized out. Don't need to proceed.
-
- protobuf::StringToMessageMap signed_targets_metadata_map =
- decoder_.AsStringToMessageMap(static_cast<uint32_t>(
- pw::software_update::UpdateBundle::Fields::TARGETS_METADATA));
- PW_TRY(signed_targets_metadata_map.status());
+Result<uint64_t> UpdateBundleAccessor::GetTotalPayloadSize() {
+ protobuf::RepeatedMessages manifested_targets =
+ GetManifest().GetTargetFiles();
+ PW_TRY(manifested_targets.status());
- // There should only be one element in the map, which is the top-level
- // targets metadata.
- protobuf::Message signed_targets_metadata =
- signed_targets_metadata_map[kTopLevelTargetsName];
- PW_TRY(signed_targets_metadata.status());
+ protobuf::StringToBytesMap bundled_payloads = bundle_.AsStringToBytesMap(
+ static_cast<uint32_t>(UpdateBundle::Fields::TARGET_PAYLOADS));
+ PW_TRY(bundled_payloads.status());
+
+ uint64_t total_bytes;
+ std::array<std::byte, MAX_TARGET_NAME_LENGTH> name_buffer = {};
+ for (protobuf::Message target : manifested_targets) {
+ protobuf::String target_name =
+ target.AsString(static_cast<uint32_t>(TargetFile::Fields::FILE_NAME));
+
+ stream::IntervalReader name_reader = target_name.GetBytesReader();
+ PW_TRY(name_reader.status());
+ if (name_reader.interval_size() > name_buffer.size()) {
+ return Status::OutOfRange();
+ }
- protobuf::Message metadata = signed_targets_metadata.AsMessage(
- static_cast<uint32_t>(pw::software_update::SignedTargetsMetadata::Fields::
- SERIALIZED_TARGETS_METADATA));
- PW_TRY(metadata.status());
+ Result<ByteSpan> read_result = name_reader.Read(name_buffer);
+ PW_TRY(read_result.status());
- protobuf::RepeatedMessages target_files =
- metadata.AsRepeatedMessages(static_cast<uint32_t>(
- pw::software_update::TargetsMetadata::Fields::TARGET_FILES));
- PW_TRY(target_files.status());
+ ConstByteSpan name_span = read_result.value();
+ std::string_view name_view(reinterpret_cast<const char*>(name_span.data()),
+ name_span.size_bytes());
- for (protobuf::Message target_file : target_files) {
- protobuf::String name = target_file.AsString(static_cast<uint32_t>(
- pw::software_update::TargetFile::Fields::FILE_NAME));
- PW_TRY(name.status());
- Result<bool> file_name_matches = name.Equal(target_file_name);
- PW_TRY(file_name_matches.status());
- if (file_name_matches.value()) {
- return true;
+ if (!bundled_payloads[name_view].ok()) {
+ continue;
}
+ protobuf::Uint64 target_length =
+ target.AsUint64(static_cast<uint32_t>(TargetFile::Fields::LENGTH));
+ PW_TRY(target_length.status());
+ total_bytes += target_length.value();
}
- return false;
+ return total_bytes;
}
-Status UpdateBundleAccessor::PersistManifest(
- stream::Writer& staged_manifest_writer) {
- if (!bundle_verified_) {
- PW_LOG_DEBUG(
- "Bundle has not passed verification. Refuse to write manifest");
- return Status::FailedPrecondition();
- }
+// Get the target element corresponding to `target_file`
+stream::IntervalReader UpdateBundleAccessor::GetTargetPayload(
+ std::string_view target_name) {
+ protobuf::Message manifest_entry = GetManifest().GetTargetFile(target_name);
+ PW_TRY(manifest_entry.status());
- protobuf::StringToMessageMap signed_targets_metadata_map =
- decoder_.AsStringToMessageMap(static_cast<uint32_t>(
- pw::software_update::UpdateBundle::Fields::TARGETS_METADATA));
- PW_TRY(signed_targets_metadata_map.status());
+ protobuf::StringToBytesMap payloads_map = bundle_.AsStringToBytesMap(
+ static_cast<uint32_t>(UpdateBundle::Fields::TARGET_PAYLOADS));
+ return payloads_map[target_name].GetBytesReader();
+}
- // There should only be one element in the map, which is the top-level
- // targets metadata.
- protobuf::Message signed_targets_metadata =
- signed_targets_metadata_map[kTopLevelTargetsName];
- PW_TRY(signed_targets_metadata.status());
+// Get the target element corresponding to `target_file`
+stream::IntervalReader UpdateBundleAccessor::GetTargetPayload(
+ protobuf::String target_name) {
+ char name_buf[MAX_TARGET_NAME_LENGTH] = {0};
+ Result<std::string_view> name_view = ReadProtoString(target_name, name_buf);
+ PW_TRY(name_view.status());
+ return GetTargetPayload(name_view.value());
+}
- protobuf::Bytes metadata = signed_targets_metadata.AsBytes(
- static_cast<uint32_t>(pw::software_update::SignedTargetsMetadata::Fields::
- SERIALIZED_TARGETS_METADATA));
- PW_TRY(metadata.status());
+Status UpdateBundleAccessor::PersistManifest() {
+ ManifestAccessor manifest = GetManifest();
+ // GetManifest() fails if the bundle is yet to be verified.
+ PW_TRY(manifest.status());
- stream::MemoryReader name_reader(
- std::as_bytes(std::span(kTopLevelTargetsName)));
- stream::IntervalReader metadata_reader = metadata.GetBytesReader();
+ // Notify backend to prepare to receive a new manifest.
+ PW_TRY(backend_.BeforeManifestWrite());
- std::byte stream_pipe_buffer[WRITE_MANIFEST_STREAM_PIPE_BUFFER_SIZE];
- PW_TRY(protobuf::WriteProtoStringToBytesMapEntry(
- static_cast<uint32_t>(
- pw::software_update::Manifest::Fields::TARGETS_METADATA),
- name_reader,
- kTopLevelTargetsName.size(),
- metadata_reader,
- metadata_reader.interval_size(),
- stream_pipe_buffer,
- staged_manifest_writer));
-
- // Write `user_manifest` file if there is one.
- Result<bool> user_manifest_exists =
- IsTargetPayloadIncluded(kUserManifestTargetFileName);
- PW_TRY(user_manifest_exists);
- if (user_manifest_exists.value()) {
- stream::IntervalReader user_manifest_reader =
- GetTargetPayload(kUserManifestTargetFileName);
- PW_TRY(user_manifest_reader.status());
- protobuf::StreamEncoder encoder(staged_manifest_writer, {});
- PW_TRY(encoder.WriteBytesFromStream(
- static_cast<uint32_t>(Manifest::Fields::USER_MANIFEST),
- user_manifest_reader,
- user_manifest_reader.interval_size(),
- stream_pipe_buffer));
- }
+ Result<stream::Writer*> writer = backend_.GetManifestWriter();
+ PW_TRY(writer.status());
+ PW_CHECK_NOTNULL(writer.value());
+
+ PW_TRY(manifest.Export(*writer.value()));
+
+ // Notify backend we are done writing. Backend should finalize
+ // (seal the box).
+ PW_TRY(backend_.AfterManifestWrite());
return OkStatus();
}
Status UpdateBundleAccessor::Close() {
bundle_verified_ = false;
- return bundle_reader_.IsOpen() ? bundle_reader_.Close() : OkStatus();
+ return blob_store_reader_.IsOpen() ? blob_store_reader_.Close() : OkStatus();
}
Status UpdateBundleAccessor::DoOpen() {
- PW_TRY(bundle_.Init());
- PW_TRY(bundle_reader_.Open());
- decoder_ =
- protobuf::Message(bundle_reader_, bundle_reader_.ConservativeReadLimit());
+ PW_TRY(blob_store_.Init());
+ PW_TRY(blob_store_reader_.Open());
+ bundle_ = protobuf::Message(blob_store_reader_,
+ blob_store_reader_.ConservativeReadLimit());
+ if (!bundle_.ok()) {
+ blob_store_reader_.Close();
+ return bundle_.status();
+ }
return OkStatus();
}
Status UpdateBundleAccessor::DoVerify() {
#if PW_SOFTWARE_UPDATE_DISABLE_BUNDLE_VERIFICATION
- PW_LOG_WARN("Update bundle verification is disabled.");
+ PW_LOG_WARN("Bundle verification is compiled out.");
bundle_verified_ = true;
return OkStatus();
#else // PW_SOFTWARE_UPDATE_DISABLE_BUNDLE_VERIFICATION
bundle_verified_ = false;
- if (disable_verification_) {
- PW_LOG_WARN("Update bundle verification is disabled.");
- bundle_verified_ = true;
- return OkStatus();
- }
// Verify and upgrade the on-device trust to the incoming root metadata if
// one is included.
- PW_TRY(UpgradeRoot());
+ if (Status status = UpgradeRoot(); !status.ok()) {
+ PW_LOG_ERROR("Failed to upgrade to Root in staged bundle");
+ return status;
+ }
// TODO(pwbug/456): Verify the targets metadata against the current trusted
// root.
- PW_TRY(VerifyTargetsMetadata());
+ if (Status status = VerifyTargetsMetadata(); !status.ok()) {
+ PW_LOG_ERROR("Failed to verify Targets metadata");
+ return status;
+ }
// TODO(pwbug/456): Investigate whether targets payload verification should
// be performed here or deferred until a specific target is requested.
- PW_TRY(VerifyTargetsPayloads());
+ if (Status status = VerifyTargetsPayloads(); !status.ok()) {
+ PW_LOG_ERROR("Failed to verify all manifested payloads");
+ return status;
+ }
// TODO(pwbug/456): Invoke the backend to do downstream verification of the
// bundle (e.g. compatibility and manifest completeness checks).
@@ -508,36 +400,62 @@ Status UpdateBundleAccessor::DoVerify() {
protobuf::Message UpdateBundleAccessor::GetOnDeviceTrustedRoot() {
Result<stream::SeekableReader*> res = backend_.GetRootMetadataReader();
- PW_TRY(res.status());
- PW_CHECK_NOTNULL(res.value());
+ if (!(res.ok() && res.value())) {
+ PW_LOG_ERROR("Failed to get on-device Root metadata");
+ return res.status();
+ }
// Seek to the beginning so that ConservativeReadLimit() returns the correct
// value.
PW_TRY(res.value()->Seek(0, stream::Stream::Whence::kBeginning));
return protobuf::Message(*res.value(), res.value()->ConservativeReadLimit());
}
+ManifestAccessor UpdateBundleAccessor::GetOnDeviceManifest() {
+ // Notify backend to check if an on-device manifest exists and is valid and if
+ // yes, prepare a ready-to-go reader.
+ PW_TRY(backend_.BeforeManifestRead());
+
+ Result<stream::SeekableReader*> manifest_reader =
+ backend_.GetManifestReader();
+ PW_TRY(manifest_reader.status());
+ PW_CHECK_NOTNULL(manifest_reader.value());
+
+ // In case `backend_.BeforeManifestRead()` forgot to reset the reader.
+ PW_TRY(manifest_reader.value()->Seek(0, stream::Stream::Whence::kBeginning));
+
+ return ManifestAccessor::FromManifest(
+ protobuf::Message(*manifest_reader.value(),
+ manifest_reader.value()->ConservativeReadLimit()));
+}
+
Status UpdateBundleAccessor::UpgradeRoot() {
- protobuf::Message new_root = decoder_.AsMessage(
+ protobuf::Message new_root = bundle_.AsMessage(
static_cast<uint32_t>(UpdateBundle::Fields::ROOT_METADATA));
- if (new_root.status().IsNotFound()) {
+
+ // Try self-verification even if verification is disabled by the caller. This
+ // minimizes surprises when the caller do decide to turn on verification.
+ bool self_verifying = disable_verification_;
+
+ // Choose and cache the root metadata to trust.
+ trusted_root_ = self_verifying ? new_root : GetOnDeviceTrustedRoot();
+
+ if (!new_root.status().ok()) {
+ // Don't bother upgrading if not found or invalid.
+ PW_LOG_WARN("Incoming root metadata not found or invalid");
return OkStatus();
}
- PW_TRY(new_root.status());
-
- // Get the trusted root and prepare for verification.
- protobuf::Message trusted_root = GetOnDeviceTrustedRoot();
- PW_TRY(trusted_root.status());
+ // A valid trust anchor is required onwards from here.
+ PW_TRY(trusted_root_.status());
// TODO(pwbug/456): Check whether the bundle contains a root metadata that
// is different from the on-device trusted root.
// Verify the signatures against the trusted root metadata.
Result<bool> verify_res =
- VerifyRootMetadataSignatures(trusted_root, new_root);
- PW_TRY(verify_res.status());
- if (!verify_res.value()) {
- PW_LOG_INFO("Fail to verify signatures against the current root");
+ VerifyRootMetadataSignatures(trusted_root_, new_root);
+ if (!(verify_res.status().ok() && verify_res.value())) {
+ PW_LOG_ERROR("Failed to verify incoming root against the current root");
return Status::Unauthenticated();
}
@@ -552,16 +470,15 @@ Status UpdateBundleAccessor::UpgradeRoot() {
// Verify the signatures against the new root metadata.
verify_res = VerifyRootMetadataSignatures(new_root, new_root);
- PW_TRY(verify_res.status());
- if (!verify_res.value()) {
- PW_LOG_INFO("Fail to verify signatures against the new root");
+ if (!(verify_res.status().ok() && verify_res.value())) {
+ PW_LOG_ERROR("Fail to verify incoming root against itself");
return Status::Unauthenticated();
}
// TODO(pwbug/456): Check rollback.
// Retrieves the trusted root metadata content message.
protobuf::Message trusted_root_content =
- trusted_root.AsMessage(static_cast<uint32_t>(
+ trusted_root_.AsMessage(static_cast<uint32_t>(
SignedRootMetadata::Fields::SERIALIZED_ROOT_METADATA));
PW_TRY(trusted_root_content.status());
Result<uint32_t> trusted_root_version = GetMetadataVersion(
@@ -579,20 +496,23 @@ Status UpdateBundleAccessor::UpgradeRoot() {
PW_TRY(new_root_version.status());
if (trusted_root_version.value() > new_root_version.value()) {
- PW_LOG_DEBUG("Root attempts to rollback from %u to %u.",
+ PW_LOG_ERROR("Root attempts to rollback from %u to %u",
trusted_root_version.value(),
new_root_version.value());
return Status::Unauthenticated();
}
- // Persist the root immediately after it is successfully verified. This is
- // to make sure the trust anchor is up-to-date in storage as soon as
- // we are confident. Although targets metadata and product-specific
- // verification have not been done yet. They should be independent from and
- // not gate the upgrade of root key. This allows timely revokation of
- // compromise keys.
- stream::IntervalReader new_root_reader = new_root.ToBytes().GetBytesReader();
- PW_TRY(backend_.SafelyPersistRootMetadata(new_root_reader));
+ if (!self_verifying) {
+ // Persist the root immediately after it is successfully verified. This is
+ // to make sure the trust anchor is up-to-date in storage as soon as
+ // we are confident. Although targets metadata and product-specific
+ // verification have not been done yet. They should be independent from and
+ // not gate the upgrade of root key. This allows timely revokation of
+ // compromise keys.
+ stream::IntervalReader new_root_reader =
+ new_root.ToBytes().GetBytesReader();
+ PW_TRY(backend_.SafelyPersistRootMetadata(new_root_reader));
+ }
// TODO(pwbug/456): Implement key change detection to determine whether
// rotation has occured or not. Delete the persisted targets metadata version
@@ -602,6 +522,18 @@ Status UpdateBundleAccessor::UpgradeRoot() {
}
Status UpdateBundleAccessor::VerifyTargetsMetadata() {
+ bool self_verifying = disable_verification_;
+
+ if (self_verifying && !trusted_root_.status().ok()) {
+ PW_LOG_WARN(
+ "Self-verification won't verify Targets metadata because there is no "
+ "root");
+ return OkStatus();
+ }
+
+ // A valid trust anchor is required from now on.
+ PW_TRY(trusted_root_.status());
+
// Retrieve the signed targets metadata map.
//
// message UpdateBundle {
@@ -610,7 +542,7 @@ Status UpdateBundleAccessor::VerifyTargetsMetadata() {
// ...
// }
protobuf::StringToMessageMap signed_targets_metadata_map =
- decoder_.AsStringToMessageMap(
+ bundle_.AsStringToMessageMap(
static_cast<uint32_t>(UpdateBundle::Fields::TARGETS_METADATA));
PW_TRY(signed_targets_metadata_map.status());
@@ -637,13 +569,9 @@ Status UpdateBundleAccessor::VerifyTargetsMetadata() {
static_cast<uint32_t>(SignedTargetsMetadata::Fields::SIGNATURES));
PW_TRY(signatures.status());
- // Get the trusted root and prepare for verification.
- protobuf::Message signed_trusted_root = GetOnDeviceTrustedRoot();
- PW_TRY(signed_trusted_root.status());
-
// Retrieve the trusted root metadata message.
protobuf::Message trusted_root =
- signed_trusted_root.AsMessage(static_cast<uint32_t>(
+ trusted_root_.AsMessage(static_cast<uint32_t>(
SignedRootMetadata::Fields::SERIALIZED_ROOT_METADATA));
PW_TRY(trusted_root.status());
@@ -659,71 +587,49 @@ Status UpdateBundleAccessor::VerifyTargetsMetadata() {
PW_TRY(signature_requirement.status());
// Verify the sigantures
- Result<bool> sig_res =
+ Status sig_res =
VerifyMetadataSignatures(top_level_targets_metadata.ToBytes(),
signatures,
signature_requirement,
key_mapping);
- PW_TRY(sig_res.status());
- if (!sig_res.value()) {
- PW_LOG_DEBUG("Fail to verify targets metadata signatures");
+ if (self_verifying && sig_res.IsNotFound()) {
+ PW_LOG_WARN("Self-verification ignoring unsigned bundle");
+ return OkStatus();
+ }
+
+ if (!sig_res.ok()) {
+ PW_LOG_ERROR("Targets Metadata failed signature verification");
return Status::Unauthenticated();
}
// TODO(pwbug/456): Check targets metadtata content.
- // Get on-device manifest.
- Result<stream::SeekableReader*> manifest_reader =
- backend_.GetCurrentManifestReader();
- PW_TRY(manifest_reader.status());
- PW_CHECK_NOTNULL(manifest_reader.value());
- protobuf::Message manifest(*manifest_reader.value(),
- manifest_reader.value()->ConservativeReadLimit());
+ if (self_verifying) {
+ // Don't bother because it does not matter.
+ PW_LOG_WARN("Self verification does not do Targets metadata anti-rollback");
+ return OkStatus();
+ }
- // Retrieves the targest metdata map from the manifest
- //
- // message Manifest {
- // ...
- // map<string, TargetsMetadata> targets_metadata = <id>;
- // ...
- // }
- protobuf::StringToMessageMap manifest_targets_metadata_map =
- manifest.AsStringToMessageMap(
- static_cast<uint32_t>(Manifest::Fields::TARGETS_METADATA));
- PW_TRY(manifest_targets_metadata_map.status());
-
- // Retrieves the top-level targets metadata from the map and get the version
- uint32_t current_ver;
- protobuf::Message manifest_top_level_targets_metadata =
- manifest_targets_metadata_map[kTopLevelTargetsName];
- if (manifest_top_level_targets_metadata.status().IsNotFound()) {
- // If the top-level targets metadata is missing, then either the device has
- // never received any prior update, or manifest has been reset in the case
- // of key rotation. In this case, current version is assumed to be 0.
- PW_LOG_DEBUG(
- "Cannot find top-level targets metadata from the current manifest. "
- "Current rollback index is treated as 0");
- current_ver = 0;
- } else {
- PW_TRY(manifest_top_level_targets_metadata.status());
- Result<uint32_t> version = GetMetadataVersion(
- manifest_top_level_targets_metadata,
- static_cast<uint32_t>(
- software_update::TargetsMetadata::Fields::COMMON_METADATA));
- PW_TRY(version.status());
- current_ver = version.value();
+ // Anti-rollback check.
+ ManifestAccessor device_manifest = GetOnDeviceManifest();
+ if (device_manifest.status().IsNotFound()) {
+ PW_LOG_WARN("Skipping OTA anti-rollback due to absent device manifest");
+ return OkStatus();
}
+ protobuf::Uint32 current_version = device_manifest.GetVersion();
+ PW_TRY(current_version.status());
+
// Retrieves the version from the new metadata
Result<uint32_t> new_version = GetMetadataVersion(
top_level_targets_metadata,
static_cast<uint32_t>(
software_update::TargetsMetadata::Fields::COMMON_METADATA));
PW_TRY(new_version.status());
- if (current_ver > new_version.value()) {
- PW_LOG_DEBUG("Targets attempt to rollback from %u to %u.",
- current_ver,
+ if (current_version.value() > new_version.value()) {
+ PW_LOG_ERROR("Blocking Targets metadata rollback from %u to %u",
+ current_version.value(),
new_version.value());
return Status::Unauthenticated();
}
@@ -732,97 +638,187 @@ Status UpdateBundleAccessor::VerifyTargetsMetadata() {
}
Status UpdateBundleAccessor::VerifyTargetsPayloads() {
- // Gets the list of targets.
- protobuf::RepeatedMessages target_files = GetTopLevelTargets(decoder_);
- PW_TRY(target_files.status());
+ ManifestAccessor bundle_manifest = ManifestAccessor::FromBundle(bundle_);
+ PW_TRY(bundle_manifest.status());
- // Gets the list of payloads.
- //
- // message UpdateBundle {
- // ...
- // map<string, bytes> target_payloads = <id>;
- // ...
- // }
- protobuf::StringToBytesMap target_payloads = decoder_.AsStringToBytesMap(
- static_cast<uint32_t>(UpdateBundle::Fields::TARGET_PAYLOADS));
- PW_TRY(target_payloads.status());
+ // Target file descriptors (pathname, length, hash, etc.) listed in the bundle
+ // manifest.
+ protobuf::RepeatedMessages target_files = bundle_manifest.GetTargetFiles();
+ PW_TRY(target_files.status());
- // Checks hashes for all targets.
+ // Verify length and SHA256 hash for each file listed in the manifest.
for (protobuf::Message target_file : target_files) {
- // Extract `file_name`, `length` and `hashes` for each target in the
- // metadata.
- //
- // message TargetFile {
- // ...
- // string file_name = <id>;
- // uint64 length = <id>;
- // ...
- // }
- protobuf::String target_name = target_file.AsString(
+ // Extract target file name in the form of a `std::string_view`.
+ protobuf::String name_proto = target_file.AsString(
static_cast<uint32_t>(TargetFile::Fields::FILE_NAME));
+ PW_TRY(name_proto.status());
+ char name_buf[MAX_TARGET_NAME_LENGTH] = {0};
+ Result<std::string_view> target_name =
+ ReadProtoString(name_proto, name_buf);
PW_TRY(target_name.status());
+ // Get target length.
protobuf::Uint64 target_length =
target_file.AsUint64(static_cast<uint32_t>(TargetFile::Fields::LENGTH));
PW_TRY(target_length.status());
-
- char target_name_read_buf[MAX_TARGET_NAME_LENGTH] = {0};
- Result<std::string_view> target_name_sv =
- ReadProtoString(target_name, target_name_read_buf);
- PW_TRY(target_name_sv.status());
-
- // Finds the target in the target payloads
- protobuf::Bytes target_payload = target_payloads[target_name_sv.value()];
- if (target_payload.status().IsNotFound()) {
- PW_LOG_DEBUG(
- "target payload for %s does not exist. Assumed personalized out",
- target_name_read_buf);
- // Invoke backend specific check
- PW_TRY(backend_.VerifyTargetFile(GetManifestAccessor(),
- target_name_sv.value()));
- continue;
- }
-
- PW_TRY(target_payload.status());
- // Payload size must matches file length
- if (target_payload.GetBytesReader().interval_size() !=
- target_length.value()) {
- PW_LOG_DEBUG("Target payload size mismatch");
- return Status::Unauthenticated();
+ if (target_length.value() > PW_SOFTWARE_UPDATE_MAX_TARGET_PAYLOAD_SIZE) {
+ PW_LOG_ERROR("Target payload too big. Maximum is %llu bytes",
+ PW_SOFTWARE_UPDATE_MAX_TARGET_PAYLOAD_SIZE);
+ return Status::OutOfRange();
}
- // Gets the list of hashes
- //
- // message TargetFile {
- // ...
- // repeated Hash hashes = <id>;
- // ...
- // }
+ // Get target SHA256 hash.
+ protobuf::Bytes target_sha256 = Status::NotFound();
protobuf::RepeatedMessages hashes = target_file.AsRepeatedMessages(
static_cast<uint32_t>(TargetFile::Fields::HASHES));
- PW_TRY(hashes.status());
-
- // Check all hashes
- size_t num_hashes = 0;
for (protobuf::Message hash : hashes) {
- num_hashes++;
- Result<bool> hash_verify_res =
- VerifyTargetPayloadHash(hash, target_payload);
- PW_TRY(hash_verify_res.status());
- if (!hash_verify_res.value()) {
- PW_LOG_DEBUG("sha256 hash mismatch for file %s", target_name_read_buf);
- return Status::Unauthenticated();
+ protobuf::Uint32 hash_function =
+ hash.AsUint32(static_cast<uint32_t>(Hash::Fields::FUNCTION));
+ PW_TRY(hash_function.status());
+
+ if (hash_function.value() ==
+ static_cast<uint32_t>(HashFunction::SHA256)) {
+ target_sha256 = hash.AsBytes(static_cast<uint32_t>(Hash::Fields::HASH));
+ break;
}
- } // for (protobuf::Message hash : hashes)
+ }
+ PW_TRY(target_sha256.status());
+
+ if (Status status = VerifyTargetPayload(
+ bundle_manifest, target_name.value(), target_length, target_sha256);
+ !status.ok()) {
+ PW_LOG_ERROR("Target: %s failed verification",
+ pw::MakeString(target_name.value()).c_str());
+ return status;
+ }
+ } // for each target file in manifest.
+
+ return OkStatus();
+}
+
+Status UpdateBundleAccessor::VerifyTargetPayload(
+ ManifestAccessor manifest,
+ std::string_view target_name,
+ protobuf::Uint64 expected_length,
+ protobuf::Bytes expected_sha256) {
+ protobuf::StringToBytesMap payloads_map = bundle_.AsStringToBytesMap(
+ static_cast<uint32_t>(UpdateBundle::Fields::TARGET_PAYLOADS));
+ stream::IntervalReader payload_reader =
+ payloads_map[target_name].GetBytesReader();
+
+ Status status;
+
+ if (payload_reader.ok()) {
+ status = VerifyInBundleTargetPayload(
+ expected_length, expected_sha256, payload_reader);
+ } else {
+ status = VerifyOutOfBundleTargetPayload(
+ target_name, expected_length, expected_sha256);
+ }
+
+ // TODO(alizhang): Notify backend to do additional checks by calling
+ // backend_.VerifyTargetFile(...).
+ return status;
+}
+
+// TODO(alizhang): Add unit tests for all failure conditions.
+Status UpdateBundleAccessor::VerifyOutOfBundleTargetPayload(
+ std::string_view target_name,
+ protobuf::Uint64 expected_length,
+ protobuf::Bytes expected_sha256) {
+#if PW_SOFTWARE_UPDATE_WITH_PERSONALIZATION
+ // The target payload is "personalized out". We we can't take a measurement
+ // without backend help. For now we will check against the device manifest
+ // which contains a cached measurement of the last software update.
+ ManifestAccessor device_manifest = GetOnDeviceManifest();
+ if (!device_manifest.ok()) {
+ PW_LOG_ERROR(
+ "Can't verify personalized-out target because on-device manifest is "
+ "not found");
+ return Status::Unauthenticated();
+ }
+
+ protobuf::Message cached = device_manifest.GetTargetFile(target_name);
+ if (!cached.ok()) {
+ PW_LOG_ERROR(
+ "Can't verify personalized-out target because it is not found from "
+ "on-device manifest");
+ return Status::Unauthenticated();
+ }
+
+ protobuf::Uint64 cached_length =
+ cached.AsUint64(static_cast<uint32_t>(TargetFile::Fields::LENGTH));
+ PW_TRY(cached_length.status());
+ if (cached_length.value() != expected_length.value()) {
+ PW_LOG_ERROR("Personalized-out target has bad length: %llu, expected: %llu",
+ cached_length.value(),
+ expected_length.value());
+ return Status::Unauthenticated();
+ }
- // The packet does not contain any hash
- if (!num_hashes) {
- PW_LOG_DEBUG("No hash for file %s", target_name_read_buf);
- return Status::Unauthenticated();
+ protobuf::Bytes cached_sha256 = Status::NotFound();
+ protobuf::RepeatedMessages hashes = cached.AsRepeatedMessages(
+ static_cast<uint32_t>(TargetFile::Fields::HASHES));
+ for (protobuf::Message hash : hashes) {
+ protobuf::Uint32 hash_function =
+ hash.AsUint32(static_cast<uint32_t>(Hash::Fields::FUNCTION));
+ PW_TRY(hash_function.status());
+
+ if (hash_function.value() == static_cast<uint32_t>(HashFunction::SHA256)) {
+ cached_sha256 = hash.AsBytes(static_cast<uint32_t>(Hash::Fields::HASH));
+ break;
}
- } // for (protobuf::Message target_file : target_files)
+ }
+ std::byte sha256[crypto::sha256::kDigestSizeBytes] = {};
+ PW_TRY(cached_sha256.GetBytesReader().Read(sha256));
+
+ Result<bool> hash_equal = expected_sha256.Equal(sha256);
+ PW_TRY(hash_equal.status());
+ if (!hash_equal.value()) {
+ PW_LOG_ERROR("Personalized-out target has a bad hash");
+ return Status::Unauthenticated();
+ }
+
+ return OkStatus();
+#else
+ PW_LOG_ERROR("Target file %s not found in bundle", target_name);
+ return Status::Unauthenticated();
+#endif // PW_SOFTWARE_UPDATE_WITH_PERSONALIZATION
+}
+
+Status UpdateBundleAccessor::VerifyInBundleTargetPayload(
+ protobuf::Uint64 expected_length,
+ protobuf::Bytes expected_sha256,
+ stream::IntervalReader payload_reader) {
+ // If the target payload is included in the bundle, simply take a
+ // measurement.
+ uint64_t actual_length = payload_reader.interval_size();
+ if (actual_length != expected_length.value()) {
+ PW_LOG_ERROR("Wrong payload length. Expected: %llu, actual: %llu",
+ expected_length.value(),
+ actual_length);
+ return Status::Unauthenticated();
+ }
+
+ std::byte actual_sha256[crypto::sha256::kDigestSizeBytes] = {};
+ PW_TRY(crypto::sha256::Hash(payload_reader, actual_sha256));
+ Result<bool> hash_equal = expected_sha256.Equal(actual_sha256);
+ PW_TRY(hash_equal.status());
+ if (!hash_equal.value()) {
+ PW_LOG_ERROR("Wrong payload sha256 hash");
+ return Status::Unauthenticated();
+ }
return OkStatus();
}
+ManifestAccessor UpdateBundleAccessor::GetManifest() {
+ if (!bundle_verified_) {
+ PW_LOG_DEBUG("Bundled has not passed verification yet");
+ return Status::FailedPrecondition();
+ }
+
+ return ManifestAccessor::FromBundle(bundle_);
+}
+
} // namespace pw::software_update
diff --git a/pw_software_update/update_bundle_test.cc b/pw_software_update/update_bundle_test.cc
index 83b5b151d..927c0e6ed 100644
--- a/pw_software_update/update_bundle_test.cc
+++ b/pw_software_update/update_bundle_test.cc
@@ -23,7 +23,7 @@
#include "test_bundles.h"
#define ASSERT_OK(status) ASSERT_EQ(OkStatus(), status)
-#define ASSERT_NOT_OK(status) ASSERT_NE(OkStatus(), status)
+#define ASSERT_FAIL(status) ASSERT_NE(OkStatus(), status)
namespace pw::software_update {
namespace {
@@ -38,18 +38,11 @@ constexpr size_t kMetadataBufferSize =
class TestBundledUpdateBackend final : public BundledUpdateBackend {
public:
TestBundledUpdateBackend()
- : current_manifest_reader_({}), trusted_root_memory_reader_({}) {}
+ : manifest_reader_({}), trusted_root_memory_reader_({}) {}
Status ApplyReboot() override { return Status::Unimplemented(); }
Status PostRebootFinalize() override { return OkStatus(); }
- Status VerifyTargetFile(
- [[maybe_unused]] ManifestAccessor manifest,
- [[maybe_unused]] std::string_view target_file_name) override {
- backend_verified_files_++;
- return verify_target_file_result_;
- };
-
Status ApplyTargetFile(std::string_view, stream::Reader&, size_t) override {
return OkStatus();
}
@@ -69,15 +62,45 @@ class TestBundledUpdateBackend final : public BundledUpdateBackend {
}
void SetCurrentManifest(ConstByteSpan current_manifest) {
- current_manifest_reader_ = stream::MemoryReader(current_manifest);
+ manifest_reader_ = stream::MemoryReader(current_manifest);
}
+ void SetManifestWriter(stream::Writer* writer) { manifest_writer_ = writer; }
+
virtual Result<stream::SeekableReader*> GetRootMetadataReader() override {
return &trusted_root_reader_;
};
- virtual Result<stream::SeekableReader*> GetCurrentManifestReader() {
- return &current_manifest_reader_;
+ Status BeforeManifestRead() override {
+ before_manifest_read_called_ = true;
+ if (manifest_reader_.ConservativeReadLimit() > 0) {
+ return OkStatus();
+ }
+ return Status::NotFound();
+ };
+
+ bool BeforeManifestReadCalled() { return before_manifest_read_called_; }
+
+ Result<stream::SeekableReader*> GetManifestReader() override {
+ return &manifest_reader_;
+ }
+
+ Status BeforeManifestWrite() override {
+ before_manifest_write_called_ = true;
+ return (manifest_writer_) ? OkStatus() : Status::NotFound();
+ }
+
+ bool BeforeManifestWriteCalled() { return before_manifest_write_called_; }
+
+ Status AfterManifestWrite() override {
+ after_manifest_write_called_ = true;
+ return OkStatus();
+ }
+
+ bool AfterManifestWriteCalled() { return after_manifest_write_called_; }
+
+ Result<stream::Writer*> GetManifestWriter() override {
+ return manifest_writer_;
}
virtual Status SafelyPersistRootMetadata(
@@ -89,18 +112,15 @@ class TestBundledUpdateBackend final : public BundledUpdateBackend {
bool IsNewRootPersisted() const { return new_root_persisted_; }
- size_t NumFilesVerified() const { return backend_verified_files_; }
-
- void SetVerifyTargetFileResult(Status status) {
- verify_target_file_result_ = status;
- }
-
private:
stream::IntervalReader trusted_root_reader_;
- stream::MemoryReader current_manifest_reader_;
+ stream::MemoryReader manifest_reader_;
+ stream::Writer* manifest_writer_ = nullptr;
+ bool before_manifest_read_called_ = false;
+ bool before_manifest_write_called_ = false;
+ bool after_manifest_write_called_ = false;
bool new_root_persisted_ = false;
size_t backend_verified_files_ = 0;
- Status verify_target_file_result_ = OkStatus();
// A memory reader for buffer passed by SetTrustedRoot(). This will be used
// to back `trusted_root_reader_`
@@ -139,16 +159,14 @@ class UpdateBundleTest : public testing::Test {
UpdateBundleAccessor& update_bundle) {
// We need to check specificially that failure is due to rejecting
// unverified/unopen bundle, not anything else.
- ASSERT_EQ(update_bundle.GetDecoder().status(),
+ ASSERT_EQ(update_bundle.GetManifest().status(),
Status::FailedPrecondition());
ASSERT_EQ(update_bundle.GetTargetPayload("any").status(),
Status::FailedPrecondition());
- ASSERT_EQ(update_bundle.IsTargetPayloadIncluded("any").status(),
+ ASSERT_EQ(update_bundle.GetTargetPayload(protobuf::String({})).status(),
Status::FailedPrecondition());
-
- std::byte manifest_buffer[sizeof(kTestBundleManifest)];
- stream::MemoryWriter manifest_writer(manifest_buffer);
- ASSERT_EQ(update_bundle.PersistManifest(manifest_writer),
+ ASSERT_EQ(update_bundle.PersistManifest(), Status::FailedPrecondition());
+ ASSERT_EQ(update_bundle.GetTotalPayloadSize().status(),
Status::FailedPrecondition());
}
@@ -158,7 +176,7 @@ class UpdateBundleTest : public testing::Test {
void CheckOpenAndVerifyFail(UpdateBundleAccessor& update_bundle,
bool expect_new_root_persisted) {
ASSERT_FALSE(backend().IsNewRootPersisted());
- ASSERT_NOT_OK(update_bundle.OpenAndVerify());
+ ASSERT_FAIL(update_bundle.OpenAndVerify());
ASSERT_EQ(backend().IsNewRootPersisted(), expect_new_root_persisted);
VerifyAllBundleOperationsDisallowed(update_bundle);
@@ -209,26 +227,6 @@ TEST_F(UpdateBundleTest, GetTargetPayload) {
}
}
-TEST_F(UpdateBundleTest, IsTargetPayloadIncluded) {
- backend().SetTrustedRoot(kDevSignedRoot);
- StageTestBundle(kTestDevBundle);
- UpdateBundleAccessor update_bundle(bundle_blob(), backend());
-
- ASSERT_OK(update_bundle.OpenAndVerify());
-
- Result<bool> res = update_bundle.IsTargetPayloadIncluded("file1");
- ASSERT_OK(res.status());
- ASSERT_TRUE(res.value());
-
- res = update_bundle.IsTargetPayloadIncluded("file2");
- ASSERT_OK(res.status());
- ASSERT_TRUE(res.value());
-
- res = update_bundle.IsTargetPayloadIncluded("non-exist");
- ASSERT_OK(res.status());
- ASSERT_FALSE(res.value());
-}
-
TEST_F(UpdateBundleTest, PersistManifest) {
backend().SetTrustedRoot(kDevSignedRoot);
StageTestBundle(kTestDevBundle);
@@ -236,9 +234,14 @@ TEST_F(UpdateBundleTest, PersistManifest) {
ASSERT_OK(update_bundle.OpenAndVerify());
- std::byte manifest_buffer[sizeof(kTestBundleManifest)];
+ std::byte manifest_buffer[sizeof(kTestBundleManifest)] = {};
stream::MemoryWriter manifest_writer(manifest_buffer);
- ASSERT_OK(update_bundle.PersistManifest(manifest_writer));
+ backend().SetManifestWriter(&manifest_writer);
+ ASSERT_FALSE(backend().BeforeManifestWriteCalled());
+ ASSERT_FALSE(backend().AfterManifestWriteCalled());
+ ASSERT_OK(update_bundle.PersistManifest());
+ ASSERT_TRUE(backend().BeforeManifestWriteCalled());
+ ASSERT_TRUE(backend().AfterManifestWriteCalled());
ASSERT_EQ(
memcmp(manifest_buffer, kTestBundleManifest, sizeof(kTestBundleManifest)),
@@ -250,32 +253,78 @@ TEST_F(UpdateBundleTest, PersistManifestFailIfNotVerified) {
StageTestBundle(kTestBadProdSignature);
UpdateBundleAccessor update_bundle(bundle_blob(), backend());
- ASSERT_NOT_OK(update_bundle.OpenAndVerify());
+ ASSERT_FAIL(update_bundle.OpenAndVerify());
std::byte manifest_buffer[sizeof(kTestBundleManifest)];
stream::MemoryWriter manifest_writer(manifest_buffer);
- ASSERT_NOT_OK(update_bundle.PersistManifest(manifest_writer));
+ backend().SetManifestWriter(&manifest_writer);
+ ASSERT_FALSE(backend().BeforeManifestWriteCalled());
+ ASSERT_FALSE(backend().AfterManifestWriteCalled());
+ ASSERT_FAIL(update_bundle.PersistManifest());
+ ASSERT_FALSE(backend().BeforeManifestWriteCalled());
+ ASSERT_FALSE(backend().AfterManifestWriteCalled());
}
-TEST_F(UpdateBundleTest, BundleVerificationDisabled) {
- backend().SetTrustedRoot(kDevSignedRoot);
- StageTestBundle(kTestBadProdSignature);
- UpdateBundleAccessor update_bundle(bundle_blob(), backend(), true);
+TEST_F(UpdateBundleTest, SelfVerificationWithIncomingRoot) {
+ StageTestBundle(kTestDevBundleWithRoot);
+ UpdateBundleAccessor update_bundle(
+ bundle_blob(), backend(), /* disable_verification = */ true);
- // Since bundle verification is disabled. The bad bundle should not report
- // error.
ASSERT_OK(update_bundle.OpenAndVerify());
+ // Self verification must not persist anything.
+ ASSERT_FALSE(backend().IsNewRootPersisted());
// Manifest persisting should be allowed as well.
std::byte manifest_buffer[sizeof(kTestBundleManifest)];
stream::MemoryWriter manifest_writer(manifest_buffer);
- ASSERT_OK(update_bundle.PersistManifest(manifest_writer));
+ backend().SetManifestWriter(&manifest_writer);
+ ASSERT_OK(update_bundle.PersistManifest());
ASSERT_EQ(
memcmp(manifest_buffer, kTestBundleManifest, sizeof(kTestBundleManifest)),
0);
}
+TEST_F(UpdateBundleTest, SelfVerificationWithoutIncomingRoot) {
+ StageTestBundle(kTestDevBundle);
+ UpdateBundleAccessor update_bundle(
+ bundle_blob(), backend(), /* disable_verification = */ true);
+
+ ASSERT_OK(update_bundle.OpenAndVerify());
+}
+
+TEST_F(UpdateBundleTest, SelfVerificationWithMessedUpRoot) {
+ StageTestBundle(kTestDevBundleWithProdRoot);
+ UpdateBundleAccessor update_bundle(
+ bundle_blob(), backend(), /* disable_verification = */ true);
+
+ ASSERT_FAIL(update_bundle.OpenAndVerify());
+}
+
+TEST_F(UpdateBundleTest, SelfVerificationChecksMissingHashes) {
+ StageTestBundle(kTestBundleMissingTargetHashFile0);
+ UpdateBundleAccessor update_bundle(
+ bundle_blob(), backend(), /* disable_verification = */ true);
+
+ ASSERT_FAIL(update_bundle.OpenAndVerify());
+}
+
+TEST_F(UpdateBundleTest, SelfVerificationChecksBadHashes) {
+ StageTestBundle(kTestBundleMismatchedTargetHashFile0);
+ UpdateBundleAccessor update_bundle(
+ bundle_blob(), backend(), /* disable_verification = */ true);
+
+ ASSERT_FAIL(update_bundle.OpenAndVerify());
+}
+
+TEST_F(UpdateBundleTest, SelfVerificationIgnoresUnsignedBundle) {
+ StageTestBundle(kTestUnsignedBundleWithRoot);
+ UpdateBundleAccessor update_bundle(
+ bundle_blob(), backend(), /* disable_verification = */ true);
+
+ ASSERT_OK(update_bundle.OpenAndVerify());
+}
+
TEST_F(UpdateBundleTest, OpenAndVerifySucceedsWithAllVerification) {
backend().SetTrustedRoot(kDevSignedRoot);
backend().SetCurrentManifest(kTestBundleManifest);
@@ -283,12 +332,10 @@ TEST_F(UpdateBundleTest, OpenAndVerifySucceedsWithAllVerification) {
UpdateBundleAccessor update_bundle(bundle_blob(), backend());
ASSERT_FALSE(backend().IsNewRootPersisted());
+ ASSERT_FALSE(backend().BeforeManifestReadCalled());
ASSERT_OK(update_bundle.OpenAndVerify());
ASSERT_TRUE(backend().IsNewRootPersisted());
-
- // No file is personalized out in kTestProdBundle. Backend verification
- // should not be invoked.
- ASSERT_EQ(backend().NumFilesVerified(), static_cast<size_t>(0));
+ ASSERT_TRUE(backend().BeforeManifestReadCalled());
ASSERT_OK(update_bundle.Close());
VerifyAllBundleOperationsDisallowed(update_bundle);
@@ -305,12 +352,10 @@ TEST_F(UpdateBundleTest,
UpdateBundleAccessor update_bundle(bundle_blob(), backend());
ASSERT_FALSE(backend().IsNewRootPersisted());
+ ASSERT_FALSE(backend().BeforeManifestReadCalled());
ASSERT_OK(update_bundle.OpenAndVerify());
ASSERT_FALSE(backend().IsNewRootPersisted());
-
- // No file is personalized out in kTestDevBundle. Backend verification
- // should not be invoked.
- ASSERT_EQ(backend().NumFilesVerified(), static_cast<size_t>(0));
+ ASSERT_TRUE(backend().BeforeManifestReadCalled());
ASSERT_OK(update_bundle.Close());
VerifyAllBundleOperationsDisallowed(update_bundle);
@@ -447,9 +492,6 @@ TEST_F(UpdateBundleTest, OpenAndVerifySucceedsWithPersonalizedOutFile0) {
UpdateBundleAccessor update_bundle(bundle_blob(), backend());
ASSERT_OK(update_bundle.OpenAndVerify());
- // Backend specific file check shall be performed only on files personalized
- // out.
- ASSERT_EQ(backend().NumFilesVerified(), static_cast<size_t>(1));
}
TEST_F(UpdateBundleTest, OpenAndVerifySucceedsWithPersonalizedOutFile1) {
@@ -463,18 +505,19 @@ TEST_F(UpdateBundleTest, OpenAndVerifySucceedsWithPersonalizedOutFile1) {
UpdateBundleAccessor update_bundle(bundle_blob(), backend());
ASSERT_OK(update_bundle.OpenAndVerify());
- // Backend specific file check shall be performed only on files personalized
- // out.
- ASSERT_EQ(backend().NumFilesVerified(), static_cast<size_t>(1));
}
-TEST_F(UpdateBundleTest, OpenAndVerifyFailsOnBackendVerification) {
+TEST_F(UpdateBundleTest,
+ PersonalizationVerificationFailsWithoutDeviceManifest) {
backend().SetTrustedRoot(kDevSignedRoot);
- backend().SetCurrentManifest(kTestBundleManifest);
- StageTestBundle(kTestBundlePersonalizedOutFile1);
+ // `kTestBundlePersonalizedOutFile0` is auto generated by
+ // pw_software_update/py/pw_software_update/generate_test_bundle.py
+ // The payload for file 0 is removed from the bundle to emulate being
+ // personalized out.
+ StageTestBundle(kTestBundlePersonalizedOutFile0);
UpdateBundleAccessor update_bundle(bundle_blob(), backend());
- backend().SetVerifyTargetFileResult(Status::Internal());
- CheckOpenAndVerifyFail(update_bundle, true);
+
+ ASSERT_FAIL(update_bundle.OpenAndVerify());
}
} // namespace pw::software_update
diff --git a/pw_span/CMakeLists.txt b/pw_span/CMakeLists.txt
index cd29e1047..a53fd0d52 100644
--- a/pw_span/CMakeLists.txt
+++ b/pw_span/CMakeLists.txt
@@ -19,7 +19,7 @@ pw_auto_add_simple_module(pw_span
pw_polyfill
pw_polyfill.standard_library
)
-target_include_directories(pw_span PUBLIC public_overrides)
+target_include_directories(pw_span INTERFACE public_overrides)
if(Zephyr_FOUND AND CONFIG_PIGWEED_SPAN)
zephyr_link_libraries(pw_span)
diff --git a/pw_spi/public/pw_spi/device.h b/pw_spi/public/pw_spi/device.h
index 07a9858ee..db07e9ce9 100644
--- a/pw_spi/public/pw_spi/device.h
+++ b/pw_spi/public/pw_spi/device.h
@@ -92,7 +92,8 @@ class Device {
if ((selector_ != nullptr) &&
(behavior_ == ChipSelectBehavior::kPerTransaction) &&
(!first_write_read_)) {
- selector_->Deactivate();
+ selector_->Deactivate()
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
}
}
diff --git a/pw_status/docs.rst b/pw_status/docs.rst
index b4a89b6a1..997ca539c 100644
--- a/pw_status/docs.rst
+++ b/pw_status/docs.rst
@@ -214,9 +214,11 @@ Unused result warnings
If the ``PW_STATUS_CFG_CHECK_IF_USED`` option is enabled, ``pw::Status`` objects
returned from function calls must be used or it is a compilation error. To
silence these warnings call ``IgnoreError()`` on the returned status object.
-``PW_STATUS_CFG_CHECK_IF_USED`` defaults to off. Pigweed and projects that use
-it will be updated to compile with this option enabled. After all projects have
-migrated, unused result warnings will be enabled unconditionally.
+
+``PW_STATUS_CFG_CHECK_IF_USED`` defaults to off. Pigweed compiles with this
+option enabled, but projects that use Pigweed will need to be updated to compile
+with this option. After all projects have migrated, unused result warnings will
+be enabled unconditionally.
C compatibility
---------------
diff --git a/pw_stm32cube_build/py/pw_stm32cube_build/find_files.py b/pw_stm32cube_build/py/pw_stm32cube_build/find_files.py
index 903c05b1e..6e233ef88 100644
--- a/pw_stm32cube_build/py/pw_stm32cube_build/find_files.py
+++ b/pw_stm32cube_build/py/pw_stm32cube_build/find_files.py
@@ -209,6 +209,7 @@ _INCLUDE_DIRS = [
'hal_driver/Inc/Legacy',
'cmsis_device/Include',
'cmsis_core/Include',
+ 'cmsis_core/DSP/Include',
]
diff --git a/pw_stream/public/pw_stream/stream.h b/pw_stream/public/pw_stream/stream.h
index a307d67b8..31fdb333e 100644
--- a/pw_stream/public/pw_stream/stream.h
+++ b/pw_stream/public/pw_stream/stream.h
@@ -264,7 +264,7 @@ class Stream {
Seekability seekability_;
};
-// A Stream that supports writing but not reading. The Write() method is hidden.
+// A Stream that supports reading but not writing. The Write() method is hidden.
//
// Use in APIs when:
// * Must read from, but not write to, a stream.
diff --git a/pw_string/string_builder.cc b/pw_string/string_builder.cc
index 50dddc647..9317cc217 100644
--- a/pw_string/string_builder.cc
+++ b/pw_string/string_builder.cc
@@ -29,13 +29,13 @@ void StringBuilder::clear() {
}
StringBuilder& StringBuilder::append(size_t count, char ch) {
- char* const append_destination = buffer_.begin() + size_;
+ char* const append_destination = buffer_.data() + size_;
std::fill_n(append_destination, ResizeAndTerminate(count), ch);
return *this;
}
StringBuilder& StringBuilder::append(const char* str, size_t count) {
- char* const append_destination = buffer_.begin() + size_;
+ char* const append_destination = buffer_.data() + size_;
std::copy_n(str, ResizeAndTerminate(count), append_destination);
return *this;
}
diff --git a/pw_sync_embos/BUILD.gn b/pw_sync_embos/BUILD.gn
index 043ae729c..c3997484b 100644
--- a/pw_sync_embos/BUILD.gn
+++ b/pw_sync_embos/BUILD.gn
@@ -14,6 +14,7 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_build/error.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_chrono/backend.gni")
import("$dir_pw_docgen/docs.gni")
@@ -28,6 +29,15 @@ config("backend_config") {
visibility = [ ":*" ]
}
+pw_build_assert("check_system_clock_backend") {
+ condition =
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_embos:system_clock"
+ message = "The embOS pw_sync backends only work with the " +
+ "embOS pw::chrono::SystemClock backend."
+ visibility = [ ":*" ]
+}
+
# This target provides the backend for pw::sync::BinarySemaphore.
pw_source_set("binary_semaphore") {
public_configs = [
@@ -48,12 +58,10 @@ pw_source_set("binary_semaphore") {
"$dir_pw_third_party/embos",
]
sources = [ "binary_semaphore.cc" ]
- deps = [ "$dir_pw_sync:binary_semaphore.facade" ]
- assert(
- pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_embos:system_clock",
- "The embOS pw::sync::BinarySemaphore backend only works with the " +
- "embOS pw::chrono::SystemClock backend.")
+ deps = [
+ ":check_system_clock_backend",
+ "$dir_pw_sync:binary_semaphore.facade",
+ ]
}
# This target provides the backend for pw::sync::CountingSemaphore.
@@ -76,12 +84,10 @@ pw_source_set("counting_semaphore") {
"$dir_pw_third_party/embos",
]
sources = [ "counting_semaphore.cc" ]
- deps = [ "$dir_pw_sync:counting_semaphore.facade" ]
- assert(
- pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_embos:system_clock",
- "The embOS pw::sync::CountingSemaphore backend only works with " +
- "the embOS pw::chrono::SystemClock backend.")
+ deps = [
+ ":check_system_clock_backend",
+ "$dir_pw_sync:counting_semaphore.facade",
+ ]
}
# This target provides the backend for pw::sync::Mutex.
@@ -120,16 +126,12 @@ pw_source_set("timed_mutex") {
]
sources = [ "timed_mutex.cc" ]
deps = [
+ ":check_system_clock_backend",
"$dir_pw_assert",
"$dir_pw_chrono_embos:system_clock",
"$dir_pw_interrupt:context",
"$dir_pw_third_party/embos",
]
- assert(
- pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_embos:system_clock",
- "The embOS pw::sync::Mutex backend only works with the embOS " +
- "pw::chrono::SystemClock backend.")
}
# This target provides the backend for pw::sync::InterruptSpinLock.
diff --git a/pw_sync_freertos/BUILD.gn b/pw_sync_freertos/BUILD.gn
index 82665f113..c560d907d 100644
--- a/pw_sync_freertos/BUILD.gn
+++ b/pw_sync_freertos/BUILD.gn
@@ -14,6 +14,7 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_build/error.gni")
import("$dir_pw_build/module_config.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_chrono/backend.gni")
@@ -45,6 +46,15 @@ pw_source_set("config") {
]
}
+pw_build_assert("check_system_clock_backend") {
+ condition =
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_freertos:system_clock"
+ message = "The FreeRTOS pw_sync backends only work with the FreeRTOS " +
+ "pw::chrono::SystemClock backend."
+ visibility = [ ":*" ]
+}
+
# This target provides the backend for pw::sync::BinarySemaphore.
pw_source_set("binary_semaphore") {
public_configs = [
@@ -65,12 +75,10 @@ pw_source_set("binary_semaphore") {
"$dir_pw_third_party/freertos",
]
sources = [ "binary_semaphore.cc" ]
- deps = [ "$dir_pw_sync:binary_semaphore.facade" ]
- assert(pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND ==
- "$dir_pw_chrono_freertos:system_clock",
- "The FreeRTOS pw::sync::BinarySemaphore backend only works with the " +
- "FreeRTOS pw::chrono::SystemClock backend.")
+ deps = [
+ ":check_system_clock_backend",
+ "$dir_pw_sync:binary_semaphore.facade",
+ ]
}
# This target provides the backend for pw::sync::CountingSemaphore.
@@ -93,12 +101,10 @@ pw_source_set("counting_semaphore") {
"$dir_pw_third_party/freertos",
]
sources = [ "counting_semaphore.cc" ]
- deps = [ "$dir_pw_sync:counting_semaphore.facade" ]
- assert(pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND ==
- "$dir_pw_chrono_freertos:system_clock",
- "The FreeRTOS pw::sync::CountingSemaphore backend only works with " +
- "the FreeRTOS pw::chrono::SystemClock backend.")
+ deps = [
+ ":check_system_clock_backend",
+ "$dir_pw_sync:counting_semaphore.facade",
+ ]
}
# This target provides the backend for pw::sync::Mutex.
@@ -121,6 +127,30 @@ pw_source_set("mutex") {
]
}
+# This target provides the backend for pw::sync::TimedMutex.
+pw_source_set("timed_mutex") {
+ public_configs = [
+ ":public_include_path",
+ ":backend_config",
+ ]
+ public = [
+ "public/pw_sync_freertos/timed_mutex_inline.h",
+ "public_overrides/pw_sync_backend/timed_mutex_inline.h",
+ ]
+ public_deps = [
+ "$dir_pw_chrono:system_clock",
+ "$dir_pw_sync:timed_mutex.facade",
+ ]
+ sources = [ "timed_mutex.cc" ]
+ deps = [
+ ":check_system_clock_backend",
+ "$dir_pw_assert",
+ "$dir_pw_chrono_freertos:system_clock",
+ "$dir_pw_interrupt:context",
+ "$dir_pw_third_party/freertos",
+ ]
+}
+
config("public_overrides_thread_notification_include_path") {
include_dirs = [ "public_overrides/thread_notification" ]
visibility = [ ":thread_notification" ]
@@ -168,51 +198,18 @@ pw_source_set("timed_thread_notification") {
"public_overrides/timed_thread_notification/pw_sync_backend/timed_thread_notification_inline.h",
]
public_deps = [
- ":thread_notification",
"$dir_pw_chrono:system_clock",
"$dir_pw_sync:timed_thread_notification.facade",
]
sources = [ "timed_thread_notification.cc" ]
deps = [
+ ":check_system_clock_backend",
":config",
"$dir_pw_assert",
"$dir_pw_chrono_freertos:system_clock",
"$dir_pw_interrupt:context",
"$dir_pw_third_party/freertos",
]
- assert(pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND ==
- "$dir_pw_chrono_freertos:system_clock",
- "The FreeRTOS pw::sync::Mutex backend only works with the FreeRTOS " +
- "pw::chrono::SystemClock backend.")
-}
-
-# This target provides the backend for pw::sync::TimedMutex.
-pw_source_set("timed_mutex") {
- public_configs = [
- ":public_include_path",
- ":backend_config",
- ]
- public = [
- "public/pw_sync_freertos/timed_mutex_inline.h",
- "public_overrides/pw_sync_backend/timed_mutex_inline.h",
- ]
- public_deps = [
- "$dir_pw_chrono:system_clock",
- "$dir_pw_sync:timed_mutex.facade",
- ]
- sources = [ "timed_mutex.cc" ]
- deps = [
- "$dir_pw_assert",
- "$dir_pw_chrono_freertos:system_clock",
- "$dir_pw_interrupt:context",
- "$dir_pw_third_party/freertos",
- ]
- assert(pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND ==
- "$dir_pw_chrono_freertos:system_clock",
- "The FreeRTOS pw::sync::Mutex backend only works with the FreeRTOS " +
- "pw::chrono::SystemClock backend.")
}
# This target provides the backend for pw::sync::InterruptSpinLock.
@@ -233,7 +230,6 @@ pw_source_set("interrupt_spin_lock") {
"$dir_pw_assert",
"$dir_pw_interrupt:context",
"$dir_pw_sync:interrupt_spin_lock.facade",
- "$dir_pw_third_party/freertos",
]
}
diff --git a/pw_sync_freertos/CMakeLists.txt b/pw_sync_freertos/CMakeLists.txt
new file mode 100644
index 000000000..6139c3966
--- /dev/null
+++ b/pw_sync_freertos/CMakeLists.txt
@@ -0,0 +1,177 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_add_module_config(pw_sync_freertos_CONFIG)
+
+pw_add_module_library(pw_sync_freertos.config
+ HEADERS
+ public/pw_sync_freertos/config.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_third_party.freertos
+ ${pw_sync_freertos_CONFIG}
+)
+
+# TODO(ewout): Add system_clock backend compatibility check like in GN.
+
+# This target provides the backend for pw::sync::BinarySemaphore.
+pw_add_module_library(pw_sync_freertos.binary_semaphore
+ IMPLEMENTS_FACADES
+ pw_sync.binary_semaphore
+ HEADERS
+ public/pw_sync_freertos/binary_semaphore_inline.h
+ public/pw_sync_freertos/binary_semaphore_native.h
+ public_overrides/pw_sync_backend/binary_semaphore_inline.h
+ public_overrides/pw_sync_backend/binary_semaphore_native.h
+ PUBLIC_INCLUDES
+ public
+ public_overrides
+ PUBLIC_DEPS
+ pw_assert
+ pw_chrono.system_clock
+ pw_chrono_freertos.system_clock
+ pw_interrupt.context
+ pw_third_party.freertos
+ SOURCES
+ binary_semaphore.cc
+)
+
+# This target provides the backend for pw::sync::CountingSemaphore.
+pw_add_module_library(pw_sync_freertos.counting_semaphore
+ IMPLEMENTS_FACADES
+ pw_sync.counting_semaphore
+ HEADERS
+ public/pw_sync_freertos/counting_semaphore_inline.h
+ public/pw_sync_freertos/counting_semaphore_native.h
+ public_overrides/pw_sync_backend/counting_semaphore_inline.h
+ public_overrides/pw_sync_backend/counting_semaphore_native.h
+ PUBLIC_INCLUDES
+ public
+ public_overrides
+ PUBLIC_DEPS
+ pw_assert
+ pw_chrono.system_clock
+ pw_chrono_freertos.system_clock
+ pw_interrupt.context
+ pw_third_party.freertos
+ SOURCES
+ counting_semaphore.cc
+)
+
+# This target provides the backend for pw::sync::Mutex.
+pw_add_module_library(pw_sync_freertos.mutex
+ IMPLEMENTS_FACADES
+ pw_sync.mutex
+ HEADERS
+ public/pw_sync_freertos/mutex_inline.h
+ public/pw_sync_freertos/mutex_native.h
+ public_overrides/pw_sync_backend/mutex_inline.h
+ public_overrides/pw_sync_backend/mutex_native.h
+ PUBLIC_INCLUDES
+ public
+ public_overrides
+ PUBLIC_DEPS
+ pw_assert
+ pw_interrupt.context
+ pw_third_party.freertos
+)
+
+# This target provides the backend for pw::sync::TimedMutex.
+pw_add_module_library(pw_sync_freertos.timed_mutex
+ IMPLEMENTS_FACADES
+ pw_sync.timed_mutex
+ HEADERS
+ public/pw_sync_freertos/timed_mutex_inline.h
+ public_overrides/pw_sync_backend/timed_mutex_inline.h
+ PUBLIC_INCLUDES
+ public
+ public_overrides
+ PUBLIC_DEPS
+ pw_chrono.system_clock
+ SOURCES
+ timed_mutex.cc
+ PRIVATE_DEPS
+ pw_assert
+ pw_chrono_freertos.system_clock
+ pw_interrupt.context
+ pw_third_party.freertos
+)
+
+# This target provides the backend for pw::sync::ThreadNotification.
+pw_add_module_library(pw_sync_freertos.thread_notification
+ IMPLEMENTS_FACADES
+ pw_sync.thread_notification
+ HEADERS
+ public/pw_sync_freertos/thread_notification_inline.h
+ public/pw_sync_freertos/thread_notification_native.h
+ public_overrides/thread_notification/pw_sync_backend/thread_notification_inline.h
+ public_overrides/thread_notification/pw_sync_backend/thread_notification_native.h
+ PUBLIC_INCLUDES
+ public
+ public_overrides/thread_notification
+ PUBLIC_DEPS
+ pw_interrupt.context
+ pw_third_party.freertos
+ SOURCES
+ thread_notification.cc
+ PRIVATE_DEPS
+ pw_assert
+ pw_sync_freertos.config
+)
+
+# This target provides the backend for pw::sync::TimedThreadNotification.
+pw_add_module_library(pw_sync_freertos.timed_thread_notification
+ IMPLEMENTS_FACADES
+ pw_sync.timed_thread_notification
+ HEADERS
+ public/pw_sync_freertos/timed_thread_notification_inline.h
+ public_overrides/timed_thread_notification/pw_sync_backend/timed_thread_notification_inline.h
+ PUBLIC_INCLUDES
+ public
+ public_overrides/timed_thread_notification
+ PUBLIC_DEPS
+ pw_chrono.system_clock
+ SOURCES
+ timed_thread_notification.cc
+ PRIVATE_DEPS
+ pw_sync_freertos.config
+ pw_assert
+ pw_chrono_freertos.system_clock
+ pw_interrupt.context
+ pw_third_party.freertos
+)
+
+# This target provides the backend for pw::sync::InterruptSpinLock.
+pw_add_module_library(pw_sync_freertos.interrupt_spin_lock
+ IMPLEMENTS_FACADES
+ pw_sync.interrupt_spin_lock
+ HEADERS
+ public/pw_sync_freertos/interrupt_spin_lock_inline.h
+ public/pw_sync_freertos/interrupt_spin_lock_native.h
+ public_overrides/pw_sync_backend/interrupt_spin_lock_inline.h
+ public_overrides/pw_sync_backend/interrupt_spin_lock_native.h
+ PUBLIC_INCLUDES
+ public
+ public_overrides
+ PUBLIC_DEPS
+ pw_third_party.freertos
+ SOURCES
+ interrupt_spin_lock.cc
+ PRIVATE_DEPS
+ pw_assert
+ pw_interrupt.context
+)
diff --git a/pw_sync_freertos/thread_notification.cc b/pw_sync_freertos/thread_notification.cc
index 0c11b61be..9fc5f08ff 100644
--- a/pw_sync_freertos/thread_notification.cc
+++ b/pw_sync_freertos/thread_notification.cc
@@ -48,6 +48,10 @@ void ThreadNotification::acquire() {
// Enforce that only a single thread can block at a time.
PW_DCHECK(native_type_.blocked_thread == nullptr);
+ // Ensure that no one forgot to clean up nor corrupted the task notification
+ // state in the TCB.
+ PW_DCHECK(xTaskNotifyStateClear(nullptr) == pdFALSE);
+
taskENTER_CRITICAL();
if (native_type_.notified) {
native_type_.notified = false;
diff --git a/pw_sync_freertos/timed_thread_notification.cc b/pw_sync_freertos/timed_thread_notification.cc
index 906e1fc71..e6d82bb22 100644
--- a/pw_sync_freertos/timed_thread_notification.cc
+++ b/pw_sync_freertos/timed_thread_notification.cc
@@ -52,6 +52,10 @@ bool TimedThreadNotification::try_acquire_for(SystemClock::duration timeout) {
// Enforce that only a single thread can block at a time.
PW_DCHECK(native_handle().blocked_thread == nullptr);
+ // Ensure that no one forgot to clean up nor corrupted the task notification
+ // state in the TCB.
+ PW_DCHECK(xTaskNotifyStateClear(nullptr) == pdFALSE);
+
taskENTER_CRITICAL();
{
const bool notified = native_handle().notified;
diff --git a/pw_sync_stl/BUILD.gn b/pw_sync_stl/BUILD.gn
index f3f07c2d7..42aea99ec 100644
--- a/pw_sync_stl/BUILD.gn
+++ b/pw_sync_stl/BUILD.gn
@@ -14,6 +14,7 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_build/error.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_chrono/backend.gni")
import("$dir_pw_docgen/docs.gni")
@@ -28,6 +29,15 @@ config("backend_config") {
visibility = [ ":*" ]
}
+pw_build_assert("check_system_clock_backend") {
+ condition =
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_stl:system_clock"
+ message = "The STL pw_sync backends only work with the STL " +
+ "pw::chrono::SystemClock backend."
+ visibility = [ ":*" ]
+}
+
# This target provides the backend for pw::sync::BinarySemaphore.
pw_source_set("binary_semaphore_backend") {
public_configs = [
@@ -42,15 +52,11 @@ pw_source_set("binary_semaphore_backend") {
]
sources = [ "binary_semaphore.cc" ]
deps = [
+ ":check_system_clock_backend",
"$dir_pw_assert",
"$dir_pw_chrono:system_clock",
"$dir_pw_sync:binary_semaphore.facade",
]
- assert(
- pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_stl:system_clock",
- "The STL pw::sync::BinarySemaphore backend only works with the " +
- "STL pw::chrono::SystemClock backend.")
}
# This target provides the backend for pw::sync::CountingSemaphore.
@@ -67,15 +73,11 @@ pw_source_set("counting_semaphore_backend") {
]
sources = [ "counting_semaphore.cc" ]
deps = [
+ ":check_system_clock_backend",
"$dir_pw_assert",
"$dir_pw_chrono:system_clock",
"$dir_pw_sync:counting_semaphore.facade",
]
- assert(
- pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_stl:system_clock",
- "The STL pw::sync::CountingSemaphore backend only works with the " +
- "STL pw::chrono::SystemClock backend.")
}
# This target provides the backend for pw::sync::Mutex.
@@ -108,11 +110,7 @@ pw_source_set("timed_mutex_backend") {
"public_overrides/pw_sync_backend/timed_mutex_inline.h",
]
public_deps = [ "$dir_pw_chrono:system_clock" ]
- assert(
- pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_stl:system_clock",
- "The STL pw::sync::TimedMutex backend only works with the STL " +
- "pw::chrono::SystemClock backend.")
+ deps = [ ":check_system_clock_backend" ]
}
# This target provides the backend for pw::sync::InterruptSpinLock.
diff --git a/pw_sync_threadx/BUILD.gn b/pw_sync_threadx/BUILD.gn
index afeebe397..524c47864 100644
--- a/pw_sync_threadx/BUILD.gn
+++ b/pw_sync_threadx/BUILD.gn
@@ -14,6 +14,7 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_build/error.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_chrono/backend.gni")
import("$dir_pw_docgen/docs.gni")
@@ -29,6 +30,15 @@ config("backend_config") {
visibility = [ ":*" ]
}
+pw_build_assert("check_system_clock_backend") {
+ condition =
+ pw_sync_OVERRIDE_SYSTEM_CLOCK_BACKEND_CHECK ||
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_threadx:system_clock"
+ message = "The ThreadX pw::sync::BinarySemaphore backend only works with " +
+ "the ThreadX pw::chrono::SystemClock backend."
+ visibility = [ ":*" ]
+}
+
# This target provides the backend for pw::sync::Mutex.
pw_source_set("mutex") {
public_configs = [
@@ -70,15 +80,10 @@ if (pw_chrono_SYSTEM_CLOCK_BACKEND != "") {
]
sources = [ "binary_semaphore.cc" ]
deps = [
+ ":check_system_clock_backend",
"$dir_pw_sync:binary_semaphore.facade",
pw_chrono_SYSTEM_CLOCK_BACKEND,
]
- assert(
- pw_sync_OVERRIDE_SYSTEM_CLOCK_BACKEND_CHECK ||
- pw_chrono_SYSTEM_CLOCK_BACKEND ==
- "$dir_pw_chrono_threadx:system_clock",
- "The ThreadX pw::sync::BinarySemaphore backend only works with the " +
- "ThreadX pw::chrono::SystemClock backend.")
}
# This target provides the backend for pw::sync::CountingSemaphore.
@@ -101,14 +106,10 @@ if (pw_chrono_SYSTEM_CLOCK_BACKEND != "") {
]
sources = [ "counting_semaphore.cc" ]
deps = [
+ ":check_system_clock_backend",
"$dir_pw_sync:counting_semaphore.facade",
pw_chrono_SYSTEM_CLOCK_BACKEND,
]
- assert(pw_sync_OVERRIDE_SYSTEM_CLOCK_BACKEND_CHECK ||
- pw_chrono_SYSTEM_CLOCK_BACKEND ==
- "$dir_pw_chrono_threadx:system_clock",
- "The ThreadX pw::sync::CountingSemaphore backend only works with " +
- "the ThreadX pw::chrono::SystemClock backend.")
}
# This target provides the backend for pw::sync::TimedMutex.
@@ -127,16 +128,12 @@ if (pw_chrono_SYSTEM_CLOCK_BACKEND != "") {
]
sources = [ "timed_mutex.cc" ]
deps = [
+ ":check_system_clock_backend",
"$dir_pw_assert",
"$dir_pw_interrupt:context",
"$dir_pw_third_party/threadx",
pw_chrono_SYSTEM_CLOCK_BACKEND,
]
- assert(pw_sync_OVERRIDE_SYSTEM_CLOCK_BACKEND_CHECK ||
- pw_chrono_SYSTEM_CLOCK_BACKEND ==
- "$dir_pw_chrono_threadx:system_clock",
- "The ThreadX pw::sync::Mutex backend only works with the ThreadX " +
- "pw::chrono::SystemClock backend.")
}
}
diff --git a/pw_sys_io_emcraft_sf2/BUILD.bazel b/pw_sys_io_emcraft_sf2/BUILD.bazel
new file mode 100644
index 000000000..fb10a78c4
--- /dev/null
+++ b/pw_sys_io_emcraft_sf2/BUILD.bazel
@@ -0,0 +1,39 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+ "//pw_build:pigweed.bzl",
+ "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+pw_cc_library(
+ name = "pw_sys_io_emcraft_sf2",
+ srcs = [
+ "pw_sys_io_emcraft_sf2_private/config.h",
+ "sys_io_emcraft_sf2.cc",
+ ],
+ hdrs = ["public/pw_sys_io_emcraft_sf2/init.h"],
+ target_compatible_with = [
+ "@platforms//os:none",
+ ],
+ deps = [
+ "//pw_boot_cortex_m:armv7m",
+ "//pw_preprocessor",
+ "//pw_sys_io",
+ ],
+)
diff --git a/pw_sys_io_emcraft_sf2/BUILD.gn b/pw_sys_io_emcraft_sf2/BUILD.gn
new file mode 100644
index 000000000..236722f70
--- /dev/null
+++ b/pw_sys_io_emcraft_sf2/BUILD.gn
@@ -0,0 +1,60 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/module_config.gni")
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_third_party/smartfusion_mss/mss.gni")
+
+declare_args() {
+ # The build target that overrides the default configuration options for this
+ # module. This should point to a source set that provides defines through a
+ # public config (which may -include a file or add defines directly).
+ pw_sys_io_emcraft_sf2_CONFIG = pw_build_DEFAULT_MODULE_CONFIG
+}
+
+config("public_includes") {
+ include_dirs = [ "public" ]
+}
+
+pw_source_set("config") {
+ public_deps = [ pw_sys_io_emcraft_sf2_CONFIG ]
+ public = [ "pw_sys_io_emcraft_sf2_private/config.h" ]
+ visibility = [ ":*" ]
+}
+
+pw_source_set("pw_sys_io_emcraft_sf2") {
+ public_configs = [ ":public_includes" ]
+ public_deps = [
+ "$dir_pw_preprocessor",
+ "$dir_pw_third_party/smartfusion_mss",
+ ]
+ if (dir_pw_third_party_smartfusion_mss != "") {
+ public_deps += [ "$dir_pw_third_party/smartfusion_mss" ]
+ }
+ public = [ "public/pw_sys_io_emcraft_sf2/init.h" ]
+ sources = [ "sys_io_emcraft_sf2.cc" ]
+ deps = [
+ ":config",
+ "$dir_pw_status",
+ "$dir_pw_sys_io:default_putget_bytes",
+ "$dir_pw_sys_io:facade",
+ ]
+}
+
+pw_doc_group("docs") {
+ sources = [ "docs.rst" ]
+}
diff --git a/pw_sys_io_emcraft_sf2/docs.rst b/pw_sys_io_emcraft_sf2/docs.rst
new file mode 100644
index 000000000..66ef6c940
--- /dev/null
+++ b/pw_sys_io_emcraft_sf2/docs.rst
@@ -0,0 +1,44 @@
+.. _module-pw_sys_io_emcraft_sf2:
+
+---------------------
+pw_sys_io_emcraft_sf2
+---------------------
+
+``pw_sys_io_emcraft_sf2`` implements the ``pw_sys_io`` facade over
+UART.
+
+The Emcraft SF2 sys IO backend provides a UART driver layer that allows
+applications built against the ``pw_sys_io`` interface to run on a
+SmartFusion/2 chip and do simple input/output via UART. However, this should
+work with all Smartfusion/2 variations.
+
+This backend allows you to configure which UART to use. The point of it is to
+provide bare-minimum platform code needed to do UART reads/writes.
+
+Setup
+=====
+This module requires relatively minimal setup:
+
+ 1. Write code against the ``pw_sys_io`` facade.
+ 2. Specify the ``dir_pw_sys_io_backend`` GN global variable to point to this
+ backend.
+ 3. pw_sys_io_Init() provided by this module needs to be called in early boot
+ to get pw_sys_io into a working state.
+ 4. Build an executable with a main() function using a toolchain that
+ supports Cortex-M3.
+
+.. note::
+ This module provides early firmware init, so it will conflict with other
+ modules that do any early device init.
+
+Module usage
+============
+After building an executable that utilizes this backend, flash the
+produced .elf binary to the development board. Then, using a serial
+communication terminal like minicom/screen (Linux/Mac) or TeraTerm (Windows),
+connect to the device at a baud rate of 57600 (8N1).
+
+Dependencies
+============
+ * ``pw_sys_io`` facade
+ * ``pw_preprocessor`` module
diff --git a/pw_sys_io_emcraft_sf2/public/pw_sys_io_emcraft_sf2/init.h b/pw_sys_io_emcraft_sf2/public/pw_sys_io_emcraft_sf2/init.h
new file mode 100644
index 000000000..ccfd61e3c
--- /dev/null
+++ b/pw_sys_io_emcraft_sf2/public/pw_sys_io_emcraft_sf2/init.h
@@ -0,0 +1,23 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_preprocessor/util.h"
+
+PW_EXTERN_C_START
+
+// The actual implement of PreMainInit() in sys_io_BACKEND.
+void pw_sys_io_Init(void);
+
+PW_EXTERN_C_END
diff --git a/pw_sys_io_emcraft_sf2/pw_sys_io_emcraft_sf2_private/config.h b/pw_sys_io_emcraft_sf2/pw_sys_io_emcraft_sf2_private/config.h
new file mode 100644
index 000000000..fb3db3a17
--- /dev/null
+++ b/pw_sys_io_emcraft_sf2/pw_sys_io_emcraft_sf2_private/config.h
@@ -0,0 +1,22 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+// Defaults to USART1 on the SmartFusion2, but can be overridden.
+
+// The USART peripheral number to use. (1 for USART1, 2 for USART2, etc.)
+#ifndef PW_SYS_IO_EMCRAFT_SF2_USART_NUM
+#define PW_SYS_IO_EMCRAFT_SF2_USART_NUM 1
+#endif // PW_SYS_IO_EMCRAFT_SF2_USART_NUM
diff --git a/pw_sys_io_emcraft_sf2/sys_io_emcraft_sf2.cc b/pw_sys_io_emcraft_sf2/sys_io_emcraft_sf2.cc
new file mode 100644
index 000000000..d94a621b5
--- /dev/null
+++ b/pw_sys_io_emcraft_sf2/sys_io_emcraft_sf2.cc
@@ -0,0 +1,104 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <cinttypes>
+
+#include "mss_gpio/mss_gpio.h"
+#include "mss_uart/mss_uart.h"
+#include "pw_preprocessor/concat.h"
+#include "pw_status/status.h"
+#include "pw_sys_io/sys_io.h"
+#include "pw_sys_io_emcraft_sf2_private/config.h"
+
+namespace {
+
+// LEDs GPIOs
+
+constexpr mss_gpio_id_t kDs3LedGPIO = MSS_GPIO_1;
+constexpr mss_gpio_id_t kDs4LEDGPIO = MSS_GPIO_2;
+constexpr uint32_t kDs3LedMask = MSS_GPIO_1_MASK;
+constexpr uint32_t kDs4LedMask = MSS_GPIO_2_MASK;
+
+constexpr uint32_t kReadDataReady = 0x1u;
+
+} // namespace
+
+extern "C" void pw_sys_io_Init() {
+ // Configure MSS GPIOs.
+#if SF2_MSS_NO_BOOTLOADER
+ MSS_GPIO_init();
+#endif
+
+ MSS_GPIO_config(kDs3LedGPIO, MSS_GPIO_OUTPUT_MODE);
+ MSS_GPIO_config(kDs4LEDGPIO, MSS_GPIO_OUTPUT_MODE);
+ // Set LEDs to initial app state
+ MSS_GPIO_set_outputs(MSS_GPIO_get_outputs() | kDs4LedMask);
+
+ // Initialize the UART0 controller (57600, 8N1)
+ // Due to a HW eratta in SF2, we need to run at 57600 for
+ // in-system-programming mode. If we are not upgrading FPGA or flash then we
+ // can use a faster BAUD.
+ MSS_UART_init(
+ &g_mss_uart0,
+ MSS_UART_57600_BAUD,
+ MSS_UART_DATA_8_BITS | MSS_UART_NO_PARITY | MSS_UART_ONE_STOP_BIT);
+}
+
+// This whole implementation is very inefficient because it uses the synchronous
+// polling UART API and only reads / writes 1 byte at a time.
+namespace pw::sys_io {
+
+Status ReadByte(std::byte* dest) {
+ while (true) {
+ if (TryReadByte(dest).ok()) {
+ return OkStatus();
+ }
+ }
+}
+
+Status TryReadByte(std::byte* dest) {
+ if (!(g_mss_uart0.hw_reg->LSR & kReadDataReady)) {
+ return Status::Unavailable();
+ }
+
+ *dest = static_cast<std::byte>(g_mss_uart0.hw_reg->RBR);
+ return OkStatus();
+}
+
+Status WriteByte(std::byte b) {
+ // Wait for TX buffer to be empty. When the buffer is empty, we can write
+ // a value to be dumped out of UART.
+ const uint8_t pbuff = (uint8_t)b;
+
+ MSS_UART_polled_tx(&g_mss_uart0, &pbuff, 1);
+ return OkStatus();
+}
+
+// Writes a string using pw::sys_io, and add newline characters at the end.
+StatusWithSize WriteLine(const std::string_view& s) {
+ size_t chars_written = 0;
+ StatusWithSize result = WriteBytes(std::as_bytes(std::span(s)));
+ if (!result.ok()) {
+ return result;
+ }
+ chars_written += result.size();
+
+ // Write trailing newline.
+ result = WriteBytes(std::as_bytes(std::span("\r\n", 2)));
+ chars_written += result.size();
+
+ return StatusWithSize(OkStatus(), chars_written);
+}
+
+} // namespace pw::sys_io
diff --git a/pw_sys_io_stm32cube/BUILD.gn b/pw_sys_io_stm32cube/BUILD.gn
index 9f89c101c..edf540a50 100644
--- a/pw_sys_io_stm32cube/BUILD.gn
+++ b/pw_sys_io_stm32cube/BUILD.gn
@@ -17,7 +17,7 @@ import("//build_overrides/pigweed.gni")
import("$dir_pw_build/module_config.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_docgen/docs.gni")
-import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_third_party/stm32cube/stm32cube.gni")
declare_args() {
# The build target that overrides the default configuration options for this
@@ -41,8 +41,10 @@ pw_source_set("pw_sys_io_stm32cube") {
public_deps = [
"$dir_pw_preprocessor",
"$dir_pw_status",
- "$dir_pw_third_party/stm32cube",
]
+ if (dir_pw_third_party_stm32cube != "") {
+ public_deps += [ "$dir_pw_third_party/stm32cube" ]
+ }
public = [ "public/pw_sys_io_stm32cube/init.h" ]
sources = [ "sys_io.cc" ]
deps = [
diff --git a/pw_sys_io_zephyr/Kconfig b/pw_sys_io_zephyr/Kconfig
index efda27052..6fd75b763 100644
--- a/pw_sys_io_zephyr/Kconfig
+++ b/pw_sys_io_zephyr/Kconfig
@@ -16,3 +16,20 @@ config PIGWEED_SYS_IO
bool "Enable the Zephyr system IO module"
select PIGWEED_SPAN
select PIGWEED_STATUS
+ help
+ The system I/O module uses the Zephyr console under the hood to perform
+ I/O operations.
+
+if PIGWEED_SYS_IO
+
+config PIGWEED_SYS_IO_INIT_PRIORITY
+ int "The initialization priority of the system I/O module"
+ default 1
+ help
+ The system I/O module uses the APPLICATION initialization level. Use this
+ config to set the priority.
+
+config PIGWEED_SYS_IO_USB
+ bool "Use the USB for I/O"
+
+endif # PIGWEED_SYS_IO
diff --git a/pw_sys_io_zephyr/docs.rst b/pw_sys_io_zephyr/docs.rst
index b9820ea70..6ae4841ff 100644
--- a/pw_sys_io_zephyr/docs.rst
+++ b/pw_sys_io_zephyr/docs.rst
@@ -7,5 +7,11 @@ pw_sys_io_zephyr
--------
Overview
--------
-This sys IO backend implements the ``pw_sys_io`` facade. To enable, set
-``CONFIG_PIGWEED_SYS_IO=y``.
+This sys I/O backend implements the ``pw_sys_io`` facade. To enable, set
+``CONFIG_PIGWEED_SYS_IO=y``. Once enabled, I/O operations will be routed to
+Zephyr's console. Additionally, it is possible to enable the USB subsystem
+by setting ``CONFIG_PIGWEED_SYS_IO_USB=y``.
+
+The I/O backend initializes during Zephyr's ``APPLICATION`` level and uses
+``CONFIG_PIGWEED_SYS_IO_INIT_PRIORITY`` to set the priority level. This config
+value defaults to 1, but is configurable via Kconfig.
diff --git a/pw_sys_io_zephyr/sys_io.cc b/pw_sys_io_zephyr/sys_io.cc
index fe0faf57e..fd9255e63 100644
--- a/pw_sys_io_zephyr/sys_io.cc
+++ b/pw_sys_io_zephyr/sys_io.cc
@@ -14,17 +14,60 @@
#include "pw_sys_io/sys_io.h"
-#include <sys/printk.h>
+#include <console/console.h>
+#include <init.h>
+#include <usb/usb_device.h>
+#include <zephyr.h>
+
+static int sys_io_init(const struct device* dev) {
+ int err;
+ ARG_UNUSED(dev);
+
+ if (IS_ENABLED(CONFIG_PIGWEED_SYS_IO_USB)) {
+ err = usb_enable(nullptr);
+ if (err) {
+ return err;
+ }
+ }
+ err = console_init();
+ return err;
+}
+
+SYS_INIT(sys_io_init, APPLICATION, CONFIG_PIGWEED_SYS_IO_INIT_PRIORITY);
namespace pw::sys_io {
-Status ReadByte(std::byte*) { return Status::Unimplemented(); }
+Status ReadByte(std::byte* dest) {
+ if (dest == nullptr) {
+ return Status::InvalidArgument();
+ }
+
+ const int c = console_getchar();
+ *dest = static_cast<std::byte>(c);
-Status TryReadByte(std::byte*) { return Status::Unimplemented(); }
+ return c < 0 ? Status::FailedPrecondition() : OkStatus();
+}
+
+Status TryReadByte(std::byte* dest) {
+ if (dest == nullptr) {
+ return Status::InvalidArgument();
+ }
+
+ uint8_t byte;
+ int result = console_read(nullptr, &byte, 1);
+
+ if (result >= 0) {
+ *dest = static_cast<std::byte>(byte);
+ return OkStatus();
+ }
+
+ return Status::Unavailable();
+}
Status WriteByte(std::byte b) {
- printk("%c", static_cast<char>(b));
- return OkStatus();
+ return console_putchar(static_cast<char>(b)) < 0
+ ? Status::FailedPrecondition()
+ : OkStatus();
}
StatusWithSize WriteLine(const std::string_view& s) {
diff --git a/pw_system/BUILD.bazel b/pw_system/BUILD.bazel
index bec18f6d4..0a16c5722 100644
--- a/pw_system/BUILD.bazel
+++ b/pw_system/BUILD.bazel
@@ -22,6 +22,10 @@ package(default_visibility = ["//visibility:public"])
licenses(["notice"])
+# WARNING: Many of the dependencies in this file are missing and need to be
+# added/updated. This is provided as a starting point, but currently does not
+# work.
+
pw_cc_library(
name = "config",
hdrs = [
@@ -37,43 +41,66 @@ pw_cc_library(
hdrs = [
"pw_system_private/log.h",
],
- includes = ["public"],
deps = [
- ":logger",
- ":rpc",
- "//pw_bytes",
- "//pw_chrono:system_clock",
- "//pw_log",
- "//pw_log:proto_utils",
+ ":config",
+ ":rpc_server",
"//pw_log_rpc:log_service",
"//pw_log_rpc:rpc_log_drain",
"//pw_log_rpc:rpc_log_drain_thread",
+ "//pw_multisink",
+ "//pw_sync:lock_annotations",
+ "//pw_sync:mutex",
+ ],
+)
+
+pw_cc_library(
+ name = "log_backend",
+ srcs = [
+ "log_backend.cc",
+ ],
+ deps = [
+ ":config",
+ ":log",
+ "//pw_bytes",
+ "//pw_chrono:system_clock",
+ "//pw_log:facade",
+ "//pw_log:proto_utils",
+ "//pw_log_string:handler_facade",
"//pw_log_tokenized:metadata",
"//pw_multisink",
"//pw_result",
+ "//pw_string",
"//pw_sync:interrupt_spin_lock",
"//pw_sync:lock_annotations",
- "//pw_sync:mutex",
"//pw_tokenizer",
],
)
pw_cc_library(
- name = "rpc",
- srcs = [
- "rpc.cc",
- ],
+ name = "rpc_server",
hdrs = [
- "pw_system_private/rpc.h",
+ "public/pw_system/rpc_server.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":config",
+ ":hdlc_rpc_server",
+ ],
+)
+
+pw_cc_library(
+ name = "hdlc_rpc_server",
+ srcs = [
+ "hdlc_rpc_server.cc",
],
includes = ["public"],
deps = [
":io",
+ ":rpc_server",
":target_io",
"//pw_assert",
"//pw_hdlc:pw_rpc",
"//pw_hdlc:rpc_channel_output",
- "//pw_rpc/system_server:facade",
"//pw_sync:mutex",
"//pw_thread:thread_core",
],
@@ -100,7 +127,7 @@ pw_cc_library(
includes = ["public"],
deps = [
":log",
- ":rpc",
+ ":rpc_server",
"//pw_rpc/nanopb:echo_service",
"//pw_thread:thread",
],
diff --git a/pw_system/BUILD.gn b/pw_system/BUILD.gn
index ced870f0e..fd5cb5ad8 100644
--- a/pw_system/BUILD.gn
+++ b/pw_system/BUILD.gn
@@ -16,6 +16,7 @@ import("//build_overrides/pigweed.gni")
import("$dir_pigweed/third_party/freertos/freertos.gni")
import("$dir_pigweed/third_party/nanopb/nanopb.gni")
+import("$dir_pigweed/third_party/smartfusion_mss/mss.gni")
import("$dir_pigweed/third_party/stm32cube/stm32cube.gni")
import("$dir_pw_build/error.gni")
import("$dir_pw_build/facade.gni")
@@ -48,7 +49,7 @@ group("pw_system") {
":init",
":io",
":log",
- ":rpc",
+ ":rpc_server",
":work_queue",
]
deps = [ ":target_hooks" ]
@@ -61,53 +62,57 @@ pw_source_set("log") {
"pw_system_private/log.h",
]
public_deps = [
- ":config",
"$dir_pw_log_rpc:log_service",
"$dir_pw_log_rpc:rpc_log_drain_thread",
+ "$dir_pw_multisink",
+ ]
+ deps = [
+ ":config",
+ ":rpc_server",
+ "$dir_pw_log_rpc:rpc_log_drain",
+ "$dir_pw_sync:lock_annotations",
+ "$dir_pw_sync:mutex",
]
+}
+
+# There is no public part to this backend which does not cause circular
+# dependencies, there is only the pw_build_LINK_DEPS "log_backend.impl".
+pw_source_set("log_backend") {
+}
+
+pw_source_set("log_backend.impl") {
+ sources = [ "log_backend.cc" ]
deps = [
- ":rpc",
+ ":config",
+ ":log",
"$dir_pw_bytes",
"$dir_pw_chrono:system_clock",
- "$dir_pw_log",
"$dir_pw_log:proto_utils",
- "$dir_pw_log_rpc:rpc_log_drain",
+ "$dir_pw_log:pw_log.facade",
+ "$dir_pw_log_string:handler.facade",
"$dir_pw_log_tokenized:metadata",
"$dir_pw_multisink",
"$dir_pw_result",
"$dir_pw_string",
"$dir_pw_sync:interrupt_spin_lock",
"$dir_pw_sync:lock_annotations",
- "$dir_pw_sync:mutex",
"$dir_pw_tokenizer",
"$dir_pw_tokenizer:global_handler_with_payload.facade",
]
}
-pw_source_set("rpc") {
- visibility = [ ":*" ]
- sources = [
- "pw_system_private/rpc.h",
- "rpc.cc",
- ]
+pw_facade("rpc_server") {
+ backend = pw_system_RPC_SERVER_BACKEND
+ public = [ "public/pw_system/rpc_server.h" ]
public_configs = [ ":public_include_path" ]
public_deps = [
":config",
- "$dir_pw_rpc/system_server:facade",
"$dir_pw_thread:thread_core",
]
- deps = [
- ":io",
- "$dir_pw_assert",
- "$dir_pw_hdlc:pw_rpc",
- "$dir_pw_hdlc:rpc_channel_output",
- "$dir_pw_sync:mutex",
- ]
}
pw_facade("io") {
backend = pw_system_IO_BACKEND
- visibility = [ ":*" ]
public_configs = [ ":public_include_path" ]
public = [ "public/pw_system/io.h" ]
public_deps = [ "$dir_pw_stream" ]
@@ -119,7 +124,7 @@ pw_source_set("init") {
sources = [ "init.cc" ]
deps = [
":log",
- ":rpc",
+ ":rpc_server",
":target_hooks.facade",
":work_queue",
"$dir_pw_rpc/nanopb:echo_service",
@@ -127,8 +132,21 @@ pw_source_set("init") {
]
}
+pw_source_set("hdlc_rpc_server") {
+ sources = [ "hdlc_rpc_server.cc" ]
+ deps = [
+ ":config",
+ ":io",
+ ":rpc_server.facade",
+ "$dir_pw_assert",
+ "$dir_pw_hdlc:pw_rpc",
+ "$dir_pw_hdlc:rpc_channel_output",
+ "$dir_pw_log",
+ "$dir_pw_sync:mutex",
+ ]
+}
+
pw_source_set("work_queue") {
- visibility = [ ":*" ]
public_configs = [ ":public_include_path" ]
public = [ "public/pw_system/work_queue.h" ]
sources = [ "work_queue.cc" ]
@@ -209,6 +227,14 @@ if (dir_pw_third_party_nanopb != "") {
dir_pw_third_party_freertos != "") {
deps += [ ":system_example($dir_pigweed/targets/stm32f429i_disc1_stm32cube:stm32f429i_disc1_stm32cube.size_optimized)" ]
}
+ if (dir_pw_third_party_smartfusion_mss != "" &&
+ dir_pw_third_party_freertos != "") {
+ deps += [
+ ":system_example($dir_pigweed/targets/emcraft_sf2_som:emcraft_sf2_som.size_optimized)",
+ ":system_example($dir_pigweed/targets/emcraft_sf2_som:emcraft_sf2_som.speed_optimized)",
+ ":system_example($dir_pigweed/targets/emcraft_sf2_som:emcraft_sf2_som_debug.debug)",
+ ]
+ }
}
} else {
pw_error("system_examples") {
diff --git a/pw_system/CMakeLists.txt b/pw_system/CMakeLists.txt
index 4e538fcf8..e63e71f29 100644
--- a/pw_system/CMakeLists.txt
+++ b/pw_system/CMakeLists.txt
@@ -18,126 +18,142 @@ include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
# added/updated. This is provided as a starting point, but currently does not
# work.
-pw_add_module_library("pw_system.config"
+pw_add_module_library(pw_system.config
HEADERS
- "public/pw_system/config.h"
+ public/pw_system/config.h
)
-pw_add_module_library("pw_system.log"
+pw_add_module_library(pw_system.log
PUBLIC_DEPS
- "pw_system.config"
- "pw_log_rpc.log_service"
- "pw_log_rpc.rpc_log_drain_thread"
+ pw_log_rpc.log_service
+ pw_log_rpc.rpc_log_drain_thread
+ pw_multisink
PRIVATE_DEPS
- "pw_system.rpc"
- "pw_bytes"
- "pw_chrono.system_clock"
- "pw_log"
- "pw_log.proto_utils"
- "pw_log_rpc.rpc_log_drain"
- "pw_log_tokenized.metadata"
- "pw_multisink"
- "pw_result"
- "pw_sync.interrupt_spin_lock"
- "pw_sync.lock_annotations"
- "pw_sync.mutex"
- "pw_tokenizer"
- "pw_tokenizer.global_handler_with_payload.facade"
- SOURCES
- "log.cc"
+ pw_system.config
+ pw_system.rpc_server
+ pw_log_rpc.rpc_log_drain
+ pw_sync.lock_annotations
+ pw_sync.mutex
HEADERS
- "pw_system_private/log.h"
+ pw_system_private/log.h
+ SOURCES
+ log.cc
)
-pw_add_module_library("pw_system.rpc"
- PUBLIC_DEPS
- "pw_system.config"
- "pw_rpc.system_server.facade"
- "pw_threadthread_core"
+pw_add_module_library(pw_system.log_backend
PRIVATE_DEPS
- "pw_system.io"
- "pw_system.target_io"
- "pw_assert"
- "pw_hdlc.pw_rpc"
- "pw_hdlc.rpc_channel_output"
- "pw_sync.mutex"
+ pw_system.config
+ pw_system.log
+ pw_bytes
+ pw_chrono.system_clock
+ pw_log.facade
+ pw_log.proto_utils
+ pw_log_string.handler.facade
+ pw_log_tokenized.metadata
+ pw_multisink
+ pw_result
+ pw_sync.interrupt_spin_lock
+ pw_sync.lock_annotations
+ pw_tokenizer
+ pw_tokenizer.global_handler_with_payload.facade
SOURCES
- "rpc.cc"
+ log_backend.cc
+)
+
+pw_add_facade(pw_system.rpc_server
+ PUBLIC_DEPS
+ pw_system.config
+ pw_thread.thread_core
HEADERS
- "pw_system_private/rpc.h"
+ public/pw_system/rpc_server.h
+)
+
+pw_add_module_library(pw_system.hdlc_rpc_server
+ PRIVATE_DEPS
+ pw_assert
+ pw_hdlc.pw_rpc
+ pw_hdlc.rpc_channel_output
+ pw_sync.mutex
+ pw_system.config
+ pw_system.io
+ pw_system.rpc_server.facade
+ pw_system.target_io
+ pw_thread.thread_core
+ SOURCES
+ hdlc_rpc_server.cc
)
-pw_add_module_library("pw_system.io"
+pw_add_module_library(pw_system.io
HEADERS
- "public/pw_system/io.h"
+ public/pw_system/io.h
PUBLIC_DEPS
- "pw_stream"
+ pw_stream
)
-pw_add_module_library("pw_system.init"
+pw_add_module_library(pw_system.init
PRIVATE_DEPS
- "pw_system.log"
- "pw_system.rpc"
- "pw_rpc.nanopb.echo_service"
- "pw_thread.thread"
+ pw_system.log
+ pw_system.rpc_server
+ pw_rpc.nanopb.echo_service
+ pw_thread.thread
SOURCES
- "init.cc"
+ init.cc
HEADERS
- "public/pw_system/init.h"
+ public/pw_system/init.h
)
-pw_add_module_library("pw_system.work_queue"
+pw_add_module_library(pw_system.work_queue
PRIVATE_DEPS
- "pw_work_queue"
+ pw_work_queue
SOURCES
- "work_queue.cc"
+ work_queue.cc
HEADERS
- "public/pw_system/work_queue.h"
+ public/pw_system/work_queue.h
)
-pw_add_module_library("pw_system.target_io"
+pw_add_module_library(pw_system.target_io
PRIVATE_DEPS
- "pw_system.io"
- "pw_stream"
- "pw_stream.sys_io_stream"
+ pw_system.io
+ pw_stream
+ pw_stream.sys_io_stream
SOURCES
- "target_io.cc"
+ target_io.cc
)
-pw_add_module_library("pw_system.target_hooks"
+pw_add_module_library(pw_system.target_hooks
PUBLIC_DEPS
- "pw_thread"
+ pw_thread
HEADERS
- "public/pw_system/target_hooks.h"
+ public/pw_system/target_hooks.h
)
-pw_add_module_library("pw_system.stl_target_hooks"
+pw_add_module_library(pw_system.stl_target_hooks
PRIVATE_DEPS
- "pw_thread.sleep"
- "pw_thread.thread"
- "pw_thread_stl.thread"
+ pw_thread.sleep
+ pw_thread.thread
+ pw_thread_stl.thread
SOURCES
- "stl_target_hooks.cc"
+ stl_target_hooks.cc
)
-pw_add_module_library("pw_system.freertos_target_hooks"
+pw_add_module_library(pw_system.freertos_target_hooks
SOURCES
- "freertos_target_hooks.cc"
+ freertos_target_hooks.cc
PRIVATE_DEPS
- "pw_thread.thread"
- "pw_thread_freertos.thread"
+ pw_thread.thread
+ pw_thread_freertos.thread
# TODO(pwbug/317): This should depend on FreeRTOS but our third parties
# currently do not have CMake support.
)
-pw_add_module_library("pw_system.system_example"
+pw_add_module_library(pw_system.system_example
PRIVATE_DEPS
- "pw_system.init"
- "pw_system.io"
- "pw_system.target_hooks"
- "pw_stream"
- "pw_stream.sys_io_stream"
+ pw_system.init
+ pw_system.io
+ pw_system.target_hooks
+ pw_stream
+ pw_stream.sys_io_stream
SOURCES
- "example_user_app_init.cc"
+ example_user_app_init.cc
)
diff --git a/pw_system/backend.gni b/pw_system/backend.gni
index 727004ef0..3b4d470df 100644
--- a/pw_system/backend.gni
+++ b/pw_system/backend.gni
@@ -17,8 +17,20 @@ import("//build_overrides/pigweed.gni")
declare_args() {
# The pw_system backend that provides thread options for the appropriate
# scheduler.
+ #
+ # There's no default backend as this is target/os specific. pw_system_target
+ # can automatically configure this for your project.
pw_system_TARGET_HOOKS_BACKEND = ""
# The pw_system backend that provides read/write streams for RPC and logging.
+ #
+ # There's no default backend as this is target specific. pw_system_target
+ # can automatically configure this for your project.
pw_system_IO_BACKEND = ""
+
+ # The pw_system backend that provides the system RPC server.
+ #
+ # This defaults to a single-channel HDLC server provided by pw_system
+ # when using a pw_system_target.
+ pw_system_RPC_SERVER_BACKEND = ""
}
diff --git a/pw_system/docs.rst b/pw_system/docs.rst
index 41d54750f..b9572b9b9 100644
--- a/pw_system/docs.rst
+++ b/pw_system/docs.rst
@@ -4,7 +4,67 @@
pw_system
=========
.. warning::
- Under construction, stay tuned!
+ This module is an early work-in-progress towards an opinionated framework for
+ new projects built on Pigweed. It is under active development, so stay tuned!
+
+pw_system is quite different from typical Pigweed modules. Rather than providing
+a single slice of vertical functionality, pw_system pulls together many modules
+across Pigweed to construct a working system with RPC, Logging, an OS
+Abstraction layer, and more. pw_system exists to greatly simplify the process
+of starting a new project using Pigweed by drastically reducing the required
+configuration space required to go from first signs of on-device life to a more
+sophisticated production-ready system.
+
+Trying out pw_system
+====================
+If you'd like to give pw_system a spin and have a STM32F429I Discovery board,
+refer to the board's
+:ref:`target documentation<target-stm32f429i-disc1-stm32cube>` for instructions
+on how to build the demo and try things out
+
+If you don't have a discovery board, there's a simulated device variation that
+you can run on your local machine with no additional hardware. Check out the
+steps for trying this out :ref:`here<target-host-device-simulator>`.
+
+Target Bringup
+==============
+Bringing up a new device is as easy as 1-2-3! (Kidding, this is a work in
+progress)
+
+#. **Create a ``pw_system_target`` in your GN build.**
+ This is what will control the configuration of your target from a build
+ system level. This includes which compiler will be used, what architecture
+ flags will be used, which backends will be used, and more. A large quantity
+ of configuration will be pre-set to work with pw_system after you select the
+ CPU and scheduler your target will use, but your target will likely need to
+ set a few other things to get to a fully working state.
+#. **Write target-specific initialization.**
+ Most embedded devices require a linker script, manual initialization of
+ memory, and some clock initialization. pw_system leaves this to users to
+ implement as the exact initialization sequence can be very project-specific.
+ All that's required is that after early memory initialization and clock
+ configuration is complete, your target initialization should call
+ ``pw::system::Init()`` and then start the RTOS scheduler (e.g.
+ ``vTaskStartScheduler()``).
+#. **Implement ``pw::system::UserAppInit()`` in your application.**
+ This is where most of your project's application-specific logic goes. This
+ could be starting threads, registering RPC services, turning on Bluetooth,
+ or more. In ``UserAppInit()``, the RTOS will be running so you're free to use
+ OS primitives and use features that rely on threading (e.g. RPC, logging).
+
+Pigweed's ``stm32f429i_disc1_stm32cube`` target demonstrates what's required by
+the first two steps. The third step is where you get to decide how to turn your
+new platform into a project that does something cool! It might be as simple as
+a blinking LED, or something more complex like a Bluetooth device that brews you
+a cup of coffee whenever ``pw watch`` kicks off a new build.
+
+.. note::
+ Because of the nature of the hard-coded conditions in ``pw_system_target``,
+ you may find that some options are missing for various RTOSes and
+ architectures. The design of the GN integration is still a work-in-progress
+ to improve the scalability of this, but in the meantime the Pigweed team
+ welcomes contributions to expand the breadth of RTOSes and architectures
+ supported as ``pw_system_target``\s.
GN Target Toolchain Template
============================
@@ -53,3 +113,36 @@ being foundational infrastructure.
]
}
}
+
+ # Example for the Emcraft SmartFusion2 system-on-module
+ pw_system_target("emcraft_sf2_som_size_optimized") {
+ cpu = PW_SYSTEM_CPU.CORTEX_M3
+ scheduler = PW_SYSTEM_SCHEDULER.FREERTOS
+
+ link_deps = [ "$dir_pigweed/targets/emcraft_sf2_som:pre_init" ]
+ build_args = {
+ pw_log_BACKEND = dir_pw_log_basic #dir_pw_log_tokenized
+ pw_tokenizer_GLOBAL_HANDLER_WITH_PAYLOAD_BACKEND = "//pw_system:log"
+ pw_third_party_freertos_CONFIG = "$dir_pigweed/targets/emcraft_sf2_som:sf2_freertos_config"
+ pw_third_party_freertos_PORT = "$dir_pw_third_party/freertos:arm_cm3"
+ pw_sys_io_BACKEND = dir_pw_sys_io_emcraft_sf2
+ dir_pw_third_party_smartfusion_mss = dir_pw_third_party_smartfusion_mss_exported
+ pw_third_party_stm32cube_CONFIG =
+ "//targets/emcraft_sf2_som:sf2_mss_hal_config"
+ pw_third_party_stm32cube_CORE_INIT = ""
+ pw_boot_cortex_m_LINK_CONFIG_DEFINES = [
+ "PW_BOOT_FLASH_BEGIN=0x00000200",
+ "PW_BOOT_FLASH_SIZE=200K",
+
+ # TODO(pwbug/219): Currently "pw_tokenizer/detokenize_test" requires at
+ # least 6K bytes in heap when using pw_malloc_freelist. The heap size
+ # required for tests should be investigated.
+ "PW_BOOT_HEAP_SIZE=7K",
+ "PW_BOOT_MIN_STACK_SIZE=1K",
+ "PW_BOOT_RAM_BEGIN=0x20000000",
+ "PW_BOOT_RAM_SIZE=64K",
+ "PW_BOOT_VECTOR_TABLE_BEGIN=0x00000000",
+ "PW_BOOT_VECTOR_TABLE_SIZE=512",
+ ]
+ }
+ }
diff --git a/pw_system/rpc.cc b/pw_system/hdlc_rpc_server.cc
index 27ef7d8e4..fe4582651 100644
--- a/pw_system/rpc.cc
+++ b/pw_system/hdlc_rpc_server.cc
@@ -12,8 +12,6 @@
// License for the specific language governing permissions and limitations under
// the License.
-#include "pw_system_private/rpc.h"
-
#include <array>
#include <cstddef>
#include <cstdint>
@@ -26,6 +24,7 @@
#include "pw_sync/mutex.h"
#include "pw_system/config.h"
#include "pw_system/io.h"
+#include "pw_system/rpc_server.h"
namespace pw::system {
namespace {
@@ -36,7 +35,7 @@ hdlc::RpcChannelOutput hdlc_channel_output(GetWriter(),
PW_SYSTEM_DEFAULT_RPC_HDLC_ADDRESS,
"HDLC channel");
rpc::Channel channels[] = {
- rpc::Channel::Create<kDefaultChannelId>(&hdlc_channel_output)};
+ rpc::Channel::Create<kDefaultRpcChannelId>(&hdlc_channel_output)};
rpc::Server server(channels);
// Declare a buffer for decoding incoming HDLC frames.
diff --git a/pw_system/init.cc b/pw_system/init.cc
index bc06ac17a..215778611 100644
--- a/pw_system/init.cc
+++ b/pw_system/init.cc
@@ -16,10 +16,10 @@
#include "pw_log/log.h"
#include "pw_rpc/echo_service_nanopb.h"
+#include "pw_system/rpc_server.h"
#include "pw_system/target_hooks.h"
#include "pw_system/work_queue.h"
#include "pw_system_private/log.h"
-#include "pw_system_private/rpc.h"
#include "pw_thread/detached_thread.h"
namespace pw::system {
@@ -31,7 +31,7 @@ void InitImpl() {
// Setup logging.
const Status status = GetLogThread().OpenUnrequestedLogStream(
- kDefaultChannelId, GetRpcServer(), GetLogService());
+ kDefaultRpcChannelId, GetRpcServer(), GetLogService());
if (!status.ok()) {
PW_LOG_ERROR("Error opening unrequested log streams %d",
static_cast<int>(status.code()));
diff --git a/pw_system/log.cc b/pw_system/log.cc
index ea17e4d0d..7cfbd828f 100644
--- a/pw_system/log.cc
+++ b/pw_system/log.cc
@@ -17,21 +17,13 @@
#include <array>
#include <cstddef>
-#include "pw_bytes/span.h"
-#include "pw_chrono/system_clock.h"
-#include "pw_log/proto_utils.h"
#include "pw_log_rpc/rpc_log_drain.h"
#include "pw_log_rpc/rpc_log_drain_map.h"
-#include "pw_log_tokenized/metadata.h"
#include "pw_multisink/multisink.h"
-#include "pw_result/result.h"
-#include "pw_string/string_builder.h"
-#include "pw_sync/interrupt_spin_lock.h"
#include "pw_sync/lock_annotations.h"
#include "pw_sync/mutex.h"
#include "pw_system/config.h"
-#include "pw_system_private/rpc.h"
-#include "pw_tokenizer/tokenize_to_global_handler_with_payload.h"
+#include "pw_system/rpc_server.h"
namespace pw::system {
namespace {
@@ -41,16 +33,6 @@ using log_rpc::RpcLogDrain;
// Storage container for MultiSink used for deferred logging.
std::array<std::byte, PW_SYSTEM_LOG_BUFFER_SIZE> log_buffer;
-// Buffer used to encode each log entry before saving into log buffer.
-sync::InterruptSpinLock log_encode_lock;
-std::array<std::byte, PW_SYSTEM_MAX_LOG_ENTRY_SIZE> log_encode_buffer
- PW_GUARDED_BY(log_encode_lock);
-
-// String-only logs may need to be formatted first. This buffer is required
-// so the format string may be passed to the proto log encode.
-std::array<std::byte, PW_SYSTEM_MAX_LOG_ENTRY_SIZE> log_format_buffer
- PW_GUARDED_BY(log_encode_lock);
-
// To save RAM, share the mutex and buffer between drains, since drains are
// flushed sequentially.
sync::Mutex drains_mutex;
@@ -59,7 +41,7 @@ std::array<std::byte, PW_SYSTEM_MAX_LOG_ENTRY_SIZE> log_decode_buffer
PW_GUARDED_BY(drains_mutex);
std::array<RpcLogDrain, 1> drains{{
- RpcLogDrain(kDefaultChannelId,
+ RpcLogDrain(kDefaultRpcChannelId,
log_decode_buffer,
drains_mutex,
RpcLogDrain::LogDrainErrorHandling::kIgnoreWriterErrors),
@@ -67,9 +49,6 @@ std::array<RpcLogDrain, 1> drains{{
log_rpc::RpcLogDrainMap drain_map(drains);
-const int64_t boot_time_count =
- pw::chrono::SystemClock::now().time_since_epoch().count();
-
// TODO(amontanez): Is there a helper to subtract RPC overhead?
constexpr size_t kMaxPackedLogMessagesSize =
PW_SYSTEM_MAX_TRANSMISSION_UNIT - 32;
@@ -96,61 +75,4 @@ log_rpc::LogService& GetLogService() {
return log_service;
}
-// Provides time since boot in units defined by the target's pw_chrono backend.
-int64_t GetTimestamp() {
- return pw::chrono::SystemClock::now().time_since_epoch().count() -
- boot_time_count;
-}
-
-// Implementation for tokenized log handling. This will be optimized out for
-// devices that only use string logging.
-extern "C" void pw_tokenizer_HandleEncodedMessageWithPayload(
- pw_tokenizer_Payload payload, const uint8_t message[], size_t size_bytes) {
- log_tokenized::Metadata metadata = payload;
- const int64_t timestamp = GetTimestamp();
-
- std::lock_guard lock(log_encode_lock);
- Result<ConstByteSpan> encoded_log_result = log::EncodeTokenizedLog(
- metadata, message, size_bytes, timestamp, log_encode_buffer);
- if (!encoded_log_result.ok()) {
- GetMultiSink().HandleDropped();
- return;
- }
- GetMultiSink().HandleEntry(encoded_log_result.value());
-}
-
-// Implementation for string log handling. This will be optimized out for
-// devices that only use tokenized logging.
-extern "C" void pw_log_string_HandleMessage(int level,
- unsigned int flags,
- const char* module_name,
- const char* file_name,
- int line_number,
- const char* message,
- ...) {
- const int64_t timestamp = GetTimestamp();
-
- std::lock_guard lock(log_encode_lock);
- StringBuilder message_builder(log_format_buffer);
- va_list args;
- va_start(args, message);
- message_builder.FormatVaList(message, args);
- va_end(args);
-
- Result<ConstByteSpan> encoded_log_result =
- log::EncodeLog(level,
- flags,
- module_name,
- file_name,
- line_number,
- timestamp,
- message_builder.view(),
- log_encode_buffer);
- if (!encoded_log_result.ok()) {
- GetMultiSink().HandleDropped();
- return;
- }
- GetMultiSink().HandleEntry(encoded_log_result.value());
-}
-
} // namespace pw::system
diff --git a/pw_system/log_backend.cc b/pw_system/log_backend.cc
new file mode 100644
index 000000000..e08ba2ecf
--- /dev/null
+++ b/pw_system/log_backend.cc
@@ -0,0 +1,106 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <array>
+#include <cstddef>
+#include <mutex>
+
+#include "pw_bytes/span.h"
+#include "pw_chrono/system_clock.h"
+#include "pw_log/proto_utils.h"
+#include "pw_log_string/handler.h"
+#include "pw_log_tokenized/metadata.h"
+#include "pw_multisink/multisink.h"
+#include "pw_result/result.h"
+#include "pw_string/string_builder.h"
+#include "pw_sync/interrupt_spin_lock.h"
+#include "pw_sync/lock_annotations.h"
+#include "pw_system/config.h"
+#include "pw_system_private/log.h"
+#include "pw_tokenizer/tokenize_to_global_handler_with_payload.h"
+
+namespace pw::system {
+namespace {
+
+// Buffer used to encode each log entry before saving into log buffer.
+sync::InterruptSpinLock log_encode_lock;
+std::array<std::byte, PW_SYSTEM_MAX_LOG_ENTRY_SIZE> log_encode_buffer
+ PW_GUARDED_BY(log_encode_lock);
+
+// String-only logs may need to be formatted first. This buffer is required
+// so the format string may be passed to the proto log encode.
+std::array<std::byte, PW_SYSTEM_MAX_LOG_ENTRY_SIZE> log_format_buffer
+ PW_GUARDED_BY(log_encode_lock);
+
+const int64_t boot_time_count =
+ pw::chrono::SystemClock::now().time_since_epoch().count();
+
+} // namespace
+
+// Provides time since boot in units defined by the target's pw_chrono backend.
+int64_t GetTimestamp() {
+ return pw::chrono::SystemClock::now().time_since_epoch().count() -
+ boot_time_count;
+}
+
+// Implementation for tokenized log handling. This will be optimized out for
+// devices that only use string logging.
+extern "C" void pw_tokenizer_HandleEncodedMessageWithPayload(
+ pw_tokenizer_Payload payload, const uint8_t message[], size_t size_bytes) {
+ log_tokenized::Metadata metadata = payload;
+ const int64_t timestamp = GetTimestamp();
+
+ std::lock_guard lock(log_encode_lock);
+ Result<ConstByteSpan> encoded_log_result = log::EncodeTokenizedLog(
+ metadata, message, size_bytes, timestamp, log_encode_buffer);
+ if (!encoded_log_result.ok()) {
+ GetMultiSink().HandleDropped();
+ return;
+ }
+ GetMultiSink().HandleEntry(encoded_log_result.value());
+}
+
+// Implementation for string log handling. This will be optimized out for
+// devices that only use tokenized logging.
+extern "C" void pw_log_string_HandleMessageVaList(int level,
+ unsigned int flags,
+ const char* module_name,
+ const char* file_name,
+ int line_number,
+ const char* message,
+ va_list args) {
+ const int64_t timestamp = GetTimestamp();
+
+ std::lock_guard lock(log_encode_lock);
+ StringBuilder message_builder(log_format_buffer);
+ message_builder.FormatVaList(message, args);
+
+ Result<ConstByteSpan> encoded_log_result =
+ log::EncodeLog(level,
+ flags,
+ module_name,
+ /*thread_name=*/{},
+ file_name,
+ line_number,
+ timestamp,
+ message_builder.view(),
+ log_encode_buffer);
+ if (!encoded_log_result.ok()) {
+ GetMultiSink().HandleDropped();
+ return;
+ }
+ GetMultiSink().HandleEntry(encoded_log_result.value());
+}
+
+} // namespace pw::system
diff --git a/pw_system/pw_system_private/rpc.h b/pw_system/public/pw_system/rpc_server.h
index cf3932696..3918ded3c 100644
--- a/pw_system/pw_system_private/rpc.h
+++ b/pw_system/public/pw_system/rpc_server.h
@@ -16,13 +16,15 @@
#include <cstdint>
-#include "pw_rpc_system_server/rpc_server.h"
#include "pw_system/config.h"
#include "pw_thread/thread_core.h"
namespace pw::system {
-inline constexpr uint32_t kDefaultChannelId = PW_SYSTEM_DEFAULT_CHANNEL_ID;
+// This is the default channel used by the pw_system RPC server. Some other
+// parts of pw_system (e.g. logging) use this channel ID as the default
+// destination for unrequested data streams.
+inline constexpr uint32_t kDefaultRpcChannelId = PW_SYSTEM_DEFAULT_CHANNEL_ID;
rpc::Server& GetRpcServer();
diff --git a/pw_system/pw_system_private/log.h b/pw_system/pw_system_private/log.h
index 6b7da9e35..85cf0c208 100644
--- a/pw_system/pw_system_private/log.h
+++ b/pw_system/pw_system_private/log.h
@@ -16,10 +16,12 @@
#include "pw_log_rpc/log_service.h"
#include "pw_log_rpc/rpc_log_drain_thread.h"
+#include "pw_multisink/multisink.h"
namespace pw::system {
-log_rpc::RpcLogDrainThread& GetLogThread();
log_rpc::LogService& GetLogService();
+log_rpc::RpcLogDrainThread& GetLogThread();
+multisink::MultiSink& GetMultiSink();
} // namespace pw::system
diff --git a/pw_system/py/pw_system/console.py b/pw_system/py/pw_system/console.py
index 2a676e0d8..7843837a4 100644
--- a/pw_system/py/pw_system/console.py
+++ b/pw_system/py/pw_system/console.py
@@ -61,9 +61,7 @@ from pw_console.plugins.bandwidth_toolbar import BandwidthToolbar
from pw_log.proto import log_pb2
from pw_rpc.console_tools.console import flattened_rpc_completions
from pw_system.device import Device
-from pw_tokenizer.database import LoadTokenDatabases
-from pw_tokenizer.detokenize import Detokenizer
-from pw_tokenizer import tokens
+from pw_tokenizer.detokenize import AutoUpdatingDetokenizer
_LOG = logging.getLogger('tools')
_DEVICE_LOG = logging.getLogger('rpc_device')
@@ -105,7 +103,7 @@ def _parse_args():
parser.add_argument("--token-databases",
metavar='elf_or_token_database',
nargs="+",
- action=LoadTokenDatabases,
+ type=Path,
help="Path to tokenizer database csv file(s).")
parser.add_argument('--config-file',
type=Path,
@@ -211,7 +209,7 @@ class SocketClientImpl:
def console(device: str,
baudrate: int,
proto_globs: Collection[str],
- token_databases: Collection[tokens.Database],
+ token_databases: Collection[Path],
socket_addr: str,
logfile: str,
output: Any,
@@ -235,8 +233,8 @@ def console(device: str,
detokenizer = None
if token_databases:
- detokenizer = Detokenizer(tokens.Database.merged(*token_databases),
- show_errors=True)
+ detokenizer = AutoUpdatingDetokenizer(*token_databases)
+ detokenizer.show_errors = True
if not proto_globs:
proto_globs = ['**/*.proto']
@@ -263,7 +261,11 @@ def console(device: str,
timestamp_decoder = None
if socket_addr is None:
- serial_device = serial_impl(device, baudrate, timeout=1)
+ serial_device = serial_impl(
+ device,
+ baudrate,
+ timeout=0, # Non-blocking mode
+ )
read = lambda: serial_device.read(8192)
write = serial_device.write
diff --git a/pw_system/py/pw_system/device.py b/pw_system/py/pw_system/device.py
index b99db21b4..1288f68b8 100644
--- a/pw_system/py/pw_system/device.py
+++ b/pw_system/py/pw_system/device.py
@@ -100,10 +100,9 @@ class Device:
if error != Status.CANCELLED:
self.listen_to_log_stream()
- def _handle_log_drop_count(self, drop_count: int):
- message = f'Dropped {drop_count} log'
- if drop_count > 1:
- message += 's'
+ def _handle_log_drop_count(self, drop_count: int, reason: str):
+ log_text = 'log' if drop_count == 1 else 'logs'
+ message = f'Dropped {drop_count} {log_text} due to {reason}'
self._emit_device_log(logging.WARNING, '', '', '', message)
def _check_for_dropped_logs(self, log_entries_proto: log_pb2.LogEntries):
@@ -115,7 +114,7 @@ class Device:
self._expected_log_sequence_id = (
log_entries_proto.first_entry_sequence_id + messages_received)
if dropped_log_count > 0:
- self._handle_log_drop_count(dropped_log_count)
+ self._handle_log_drop_count(dropped_log_count, 'loss at transport')
elif dropped_log_count < 0:
_LOG.error('Log sequence ID is smaller than expected')
@@ -135,8 +134,10 @@ class Device:
# Handle dropped count.
if log_proto.dropped:
- self._handle_log_drop_count(log_proto.dropped)
- return
+ drop_reason = log_proto.message.decode("utf-8").lower(
+ ) if log_proto.message else 'enqueue failure on device'
+ self._handle_log_drop_count(log_proto.dropped, drop_reason)
+ continue
self._emit_device_log(level, '', decoded_timestamp, log.module,
log.message, **dict(log.fields))
diff --git a/pw_system/system_target.gni b/pw_system/system_target.gni
index 45fafcb15..8fe9d431d 100644
--- a/pw_system/system_target.gni
+++ b/pw_system/system_target.gni
@@ -14,10 +14,23 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_assert/backend.gni")
+import("$dir_pw_bloat/bloat.gni")
+import("$dir_pw_boot/backend.gni")
+import("$dir_pw_build/cc_library.gni")
+import("$dir_pw_chrono/backend.gni")
+import("$dir_pw_interrupt/backend.gni")
+import("$dir_pw_log/backend.gni")
+import("$dir_pw_log_string/backend.gni")
+import("$dir_pw_malloc/backend.gni")
+import("$dir_pw_sync/backend.gni")
+import("$dir_pw_sys_io/backend.gni")
+import("$dir_pw_thread/backend.gni")
import("$dir_pw_toolchain/arm_gcc/toolchains.gni")
import("$dir_pw_toolchain/generate_toolchain.gni")
import("$dir_pw_toolchain/host_clang/toolchains.gni")
import("$dir_pw_toolchain/host_gcc/toolchains.gni")
+import("$dir_pw_unit_test/test.gni")
import("backend.gni")
import("freertos_backends.gni")
import("stl_backends.gni")
@@ -25,6 +38,8 @@ import("stl_backends.gni")
# This scope is essentially an enum for pw_system_target's `cpu` selection.
PW_SYSTEM_CPU = {
CORTEX_M4F = "cortex-m4f"
+ CORTEX_M3 = "cortex-m3"
+ CORTEX_M7F = "cortex-m7f"
# Native builds for the host CPU.
NATIVE = "native"
@@ -45,7 +60,7 @@ PW_SYSTEM_SCHEDULER = {
#
# Args:
# cpu: (required) The architecture to target.
-# Supported choices: PW_SYSTEM_CPU.CORTEX_M4F, PW_SYSTEM_CPU.NATIVE
+# Supported choices: PW_SYSTEM_CPU.CORTEX_M7F, PW_SYSTEM_CPU.CORTEX_M4F, PW_SYSTEM_CPU.CORTEX_M3, PW_SYSTEM_CPU.NATIVE
# scheduler: (required) The scheduler implementation and API to use for this
# target.
# Supported choices: PW_SYSTEM_SCHEDULER.FREERTOS, PW_SYSTEM_SCHEDULER.NATIVE
@@ -54,6 +69,8 @@ PW_SYSTEM_SCHEDULER = {
# link_deps: Additional link-time dependencies required for all executables.
# This is a list of source sets.
# build_args: Additional overrides for GN build arguments.
+# global_configs: Configs that will be globally applied to all pw_source_set,
+# pw_static_library, and pw_executable targets.
template("pw_system_target") {
_OPTIMIZATION_LEVELS = {
SIZE_OPTIMIZED = "size_optimized"
@@ -63,6 +80,12 @@ template("pw_system_target") {
# Generic defaults.
_default_configs = [ "$dir_pw_build:extra_strict_warnings" ]
+ if (defined(invoker.global_configs)) {
+ foreach(cfg, invoker.global_configs) {
+ _default_configs += [ get_path_info(cfg, "abspath") ]
+ }
+ }
+
_link_deps = [
"$dir_pw_assert:impl",
"$dir_pw_log:impl",
@@ -73,6 +96,7 @@ template("pw_system_target") {
_final_binary_extension = ""
_default_build_args = {
+ pw_system_RPC_SERVER_BACKEND = "$dir_pw_system:hdlc_rpc_server"
pw_system_IO_BACKEND = "$dir_pw_system:sys_io_target_io"
# TODO(amontanez): This should be set to pw_assert_log ASAP.
@@ -92,8 +116,33 @@ template("pw_system_target") {
# Populate architecture-specific build args.
assert(
defined(invoker.cpu),
- "Please select a `cpu` for $target_name. Options: PW_SYSTEM_CPU.CORTEX_M4F, PW_SYSTEM_CPU.NATIVE")
- if (invoker.cpu == PW_SYSTEM_CPU.CORTEX_M4F) {
+ "Please select a `cpu` for $target_name. Options: PW_SYSTEM_CPU.CORTEX_M7, PW_SYSTEM_CPU.CORTEX_M4F, PW_SYSTEM_CPU.CORTEX_M3, PW_SYSTEM_CPU.NATIVE")
+ if (invoker.cpu == PW_SYSTEM_CPU.CORTEX_M7F) {
+ _current_cpu = "arm"
+ _default_configs += [ "$dir_pw_toolchain/arm_gcc:enable_float_printf" ]
+ _arch_build_args = {
+ pw_bloat_BLOATY_CONFIG = "$dir_pw_boot_cortex_m/bloaty_config.bloaty"
+ pw_boot_BACKEND = "$dir_pw_boot_cortex_m:armv7m"
+ pw_interrupt_CONTEXT_BACKEND = "$dir_pw_interrupt_cortex_m:context_armv7m"
+ }
+
+ _final_binary_extension = ".elf"
+
+ _toolchains = [
+ {
+ toolchain_base = pw_toolchain_arm_gcc.cortex_m7f_debug
+ level_name = _OPTIMIZATION_LEVELS.DEBUG
+ },
+ {
+ toolchain_base = pw_toolchain_arm_gcc.cortex_m7f_size_optimized
+ level_name = _OPTIMIZATION_LEVELS.SIZE_OPTIMIZED
+ },
+ {
+ toolchain_base = pw_toolchain_arm_gcc.cortex_m7f_speed_optimized
+ level_name = _OPTIMIZATION_LEVELS.SPEED_OPTIMIZED
+ },
+ ]
+ } else if (invoker.cpu == PW_SYSTEM_CPU.CORTEX_M4F) {
_current_cpu = "arm"
_default_configs += [ "$dir_pw_toolchain/arm_gcc:enable_float_printf" ]
_arch_build_args = {
@@ -118,14 +167,39 @@ template("pw_system_target") {
level_name = _OPTIMIZATION_LEVELS.SPEED_OPTIMIZED
},
]
+ } else if (invoker.cpu == PW_SYSTEM_CPU.CORTEX_M3) {
+ _current_cpu = "arm"
+ _arch_build_args = {
+ pw_bloat_BLOATY_CONFIG = "$dir_pw_boot_cortex_m/bloaty_config.bloaty"
+ pw_boot_BACKEND = "$dir_pw_boot_cortex_m:armv7m"
+ pw_interrupt_CONTEXT_BACKEND = "$dir_pw_interrupt_cortex_m:context_armv7m"
+ }
+
+ _final_binary_extension = ".elf"
+
+ _toolchains = [
+ {
+ toolchain_base = pw_toolchain_arm_gcc.cortex_m3_debug
+ level_name = _OPTIMIZATION_LEVELS.DEBUG
+ },
+ {
+ toolchain_base = pw_toolchain_arm_gcc.cortex_m3_size_optimized
+ level_name = _OPTIMIZATION_LEVELS.SIZE_OPTIMIZED
+ },
+ {
+ toolchain_base = pw_toolchain_arm_gcc.cortex_m3_speed_optimized
+ level_name = _OPTIMIZATION_LEVELS.SPEED_OPTIMIZED
+ },
+ ]
} else if (invoker.cpu == PW_SYSTEM_CPU.NATIVE) {
_current_cpu = host_cpu
_arch_build_args = {
pw_log_BACKEND = dir_pw_log_string
- pw_log_string_BACKEND = "$dir_pw_system:log"
+ pw_log_string_HANDLER_BACKEND = "$dir_pw_system:log_backend"
pw_sys_io_BACKEND = "$dir_pw_sys_io_stdio"
pw_system_IO_BACKEND = "$dir_pw_system:socket_target_io"
}
+ _link_deps += [ "$dir_pw_log_string:handler.impl" ]
if (host_os != "win") {
_toolchains = [
diff --git a/pw_thread/CMakeLists.txt b/pw_thread/CMakeLists.txt
index 54509492f..0ad8e33eb 100644
--- a/pw_thread/CMakeLists.txt
+++ b/pw_thread/CMakeLists.txt
@@ -82,6 +82,7 @@ pw_add_module_library(pw_thread.snapshot
public
PUBLIC_DEPS
pw_bytes
+ pw_function
pw_protobuf
pw_status
pw_thread.protos.pwpb
diff --git a/pw_thread/pw_thread_protos/thread.proto b/pw_thread/pw_thread_protos/thread.proto
index 230a515d5..bcfbb09b7 100644
--- a/pw_thread/pw_thread_protos/thread.proto
+++ b/pw_thread/pw_thread_protos/thread.proto
@@ -50,13 +50,13 @@ message Thread {
// tokenized data (e.g. base-64 encoded or binary data).
bytes name = 1 [(tokenizer.format) = TOKENIZATION_OPTIONAL];
+ // This field has been deprecatdin favor of using the state enum to report
+ // RUNNING or INTERRUPT_CONTEXT to mark them as active.
+ //
// Whether or not this thread is the thread is the currently active context
// at the time of capture. For multi-thread dumps, this field should only be
// set on ONE thread.
- //
- // Note: in interrupt contexts, the active thread may not be the thread that
- // is in the THREAD_STATE_RUNNING state.
- bool active = 2;
+ bool active = 2 [deprecated = true];
// A summarized thread state. RTOS-specific extensions of the Thread message
// may provide more specific thread state information.
diff --git a/pw_thread/py/pw_thread/thread_analyzer.py b/pw_thread/py/pw_thread/thread_analyzer.py
index 1cd6fb518..37a349570 100644
--- a/pw_thread/py/pw_thread/thread_analyzer.py
+++ b/pw_thread/py/pw_thread/thread_analyzer.py
@@ -189,14 +189,19 @@ class ThreadSnapshotAnalyzer:
def active_thread(self) -> Optional[thread_pb2.Thread]:
"""The thread that requested the snapshot capture."""
- running_thread = None
+ # First check if an interrupt handler was active.
for thread in self._threads:
- if thread.active:
+ if thread.state == thread_pb2.ThreadState.Enum.INTERRUPT_HANDLER:
return thread
+ if thread.active: # The deprecated legacy way to report this.
+ return thread
+
+ # If not, search for a running thread.
+ for thread in self._threads:
if thread.state == thread_pb2.ThreadState.Enum.RUNNING:
- running_thread = thread
+ return thread
- return running_thread
+ return None
def __str__(self) -> str:
"""outputs a pw.snapshot.Metadata proto as a multi-line string."""
diff --git a/pw_thread/py/thread_analyzer_test.py b/pw_thread/py/thread_analyzer_test.py
index 6b5b3dc51..0a6844f07 100644
--- a/pw_thread/py/thread_analyzer_test.py
+++ b/pw_thread/py/thread_analyzer_test.py
@@ -131,10 +131,10 @@ class ThreadSnapshotAnalyzerTest(unittest.TestCase):
snapshot.threads.append(temp_thread)
temp_thread = thread_pb2.Thread()
- temp_thread.name = 'Main/Handler'.encode()
+ temp_thread.name = 'Alice'.encode()
temp_thread.stack_start_pointer = 0x2001b000
temp_thread.stack_pointer = 0x2001ae20
- temp_thread.state = thread_pb2.ThreadState.Enum.INTERRUPT_HANDLER
+ temp_thread.state = thread_pb2.ThreadState.Enum.BLOCKED
snapshot.threads.append(temp_thread)
expected = '\n'.join((
@@ -148,7 +148,7 @@ class ThreadSnapshotAnalyzerTest(unittest.TestCase):
' Est peak usage: 512 bytes, 100.00%',
' Stack limits: 0x2001ac00 - 0x2001aa00 (512 bytes)',
'',
- 'Thread (INTERRUPT_HANDLER): Main/Handler',
+ 'Thread (BLOCKED): Alice',
'Est CPU usage: unknown',
'Stack info',
' Current usage: 0x2001b000 - 0x2001ae20 (480 bytes)',
@@ -160,6 +160,51 @@ class ThreadSnapshotAnalyzerTest(unittest.TestCase):
self.assertEqual(analyzer.active_thread(), None)
self.assertEqual(str(ThreadSnapshotAnalyzer(snapshot)), expected)
+ def test_interrupts_with_thread(self):
+ """Ensures interrupts are properly reported as active."""
+ snapshot = thread_pb2.SnapshotThreadInfo()
+
+ temp_thread = thread_pb2.Thread()
+ temp_thread.name = 'Idle'.encode()
+ temp_thread.state = thread_pb2.ThreadState.Enum.READY
+ temp_thread.stack_start_pointer = 0x2001ac00
+ temp_thread.stack_end_pointer = 0x2001aa00
+ temp_thread.stack_pointer = 0x2001ab0c
+ temp_thread.stack_pointer_est_peak = 0x2001aa00
+ snapshot.threads.append(temp_thread)
+
+ temp_thread = thread_pb2.Thread()
+ temp_thread.name = 'Main/Handler'.encode()
+ temp_thread.stack_start_pointer = 0x2001b000
+ temp_thread.stack_pointer = 0x2001ae20
+ temp_thread.state = thread_pb2.ThreadState.Enum.INTERRUPT_HANDLER
+ snapshot.threads.append(temp_thread)
+
+ expected = '\n'.join((
+ 'Thread State',
+ ' 2 threads running, Main/Handler active at the time of capture.',
+ ' ~~~~~~~~~~~~',
+ '',
+ # Ensure the active thread is moved to the top of the list.
+ 'Thread (INTERRUPT_HANDLER): Main/Handler <-- [ACTIVE]',
+ 'Est CPU usage: unknown',
+ 'Stack info',
+ ' Current usage: 0x2001b000 - 0x2001ae20 (480 bytes)',
+ ' Est peak usage: size unknown',
+ ' Stack limits: 0x2001b000 - 0x???????? (size unknown)',
+ '',
+ 'Thread (READY): Idle',
+ 'Est CPU usage: unknown',
+ 'Stack info',
+ ' Current usage: 0x2001ac00 - 0x2001ab0c (244 bytes, 47.66%)',
+ ' Est peak usage: 512 bytes, 100.00%',
+ ' Stack limits: 0x2001ac00 - 0x2001aa00 (512 bytes)',
+ '',
+ ))
+ analyzer = ThreadSnapshotAnalyzer(snapshot)
+ self.assertEqual(analyzer.active_thread(), temp_thread)
+ self.assertEqual(str(ThreadSnapshotAnalyzer(snapshot)), expected)
+
def test_active_thread(self):
"""Ensures the 'active' thread is highlighted."""
snapshot = thread_pb2.SnapshotThreadInfo()
diff --git a/pw_thread_embos/BUILD.gn b/pw_thread_embos/BUILD.gn
index 300a46bed..1b4875012 100644
--- a/pw_thread_embos/BUILD.gn
+++ b/pw_thread_embos/BUILD.gn
@@ -14,6 +14,7 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_build/error.gni")
import("$dir_pw_build/module_config.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_chrono/backend.gni")
@@ -67,6 +68,15 @@ pw_source_set("id") {
deps = [ "$dir_pw_thread:id.facade" ]
}
+pw_build_assert("check_system_clock_backend") {
+ condition =
+ pw_thread_OVERRIDE_SYSTEM_CLOCK_BACKEND_CHECK ||
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_embos:system_clock"
+ message = "The embOS pw::thread::sleep_{for,until} backend only works with " +
+ "the embOS pw::chrono::SystemClock backend."
+ visibility = [ ":*" ]
+}
+
if (pw_chrono_SYSTEM_CLOCK_BACKEND != "" && pw_thread_SLEEP_BACKEND != "") {
# This target provides the backend for pw::thread::sleep_{for,until}.
pw_source_set("sleep") {
@@ -81,17 +91,13 @@ if (pw_chrono_SYSTEM_CLOCK_BACKEND != "" && pw_thread_SLEEP_BACKEND != "") {
public_deps = [ "$dir_pw_chrono:system_clock" ]
sources = [ "sleep.cc" ]
deps = [
+ ":check_system_clock_backend",
"$dir_pw_assert",
"$dir_pw_chrono_embos:system_clock",
"$dir_pw_third_party/embos",
"$dir_pw_thread:id",
"$dir_pw_thread:sleep.facade",
]
- assert(pw_thread_OVERRIDE_SYSTEM_CLOCK_BACKEND_CHECK ||
- pw_chrono_SYSTEM_CLOCK_BACKEND ==
- "$dir_pw_chrono_embos:system_clock",
- "The embOS pw::thread::sleep_{for,until} backend only works with " +
- "the embOS pw::chrono::SystemClock backend.")
}
}
diff --git a/pw_thread_freertos/BUILD.gn b/pw_thread_freertos/BUILD.gn
index 01d0a1783..c1027cc66 100644
--- a/pw_thread_freertos/BUILD.gn
+++ b/pw_thread_freertos/BUILD.gn
@@ -14,6 +14,7 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_build/error.gni")
import("$dir_pw_build/facade.gni")
import("$dir_pw_build/module_config.gni")
import("$dir_pw_build/target_types.gni")
@@ -69,6 +70,17 @@ pw_source_set("id") {
deps = [ "$dir_pw_thread:id.facade" ]
}
+pw_build_assert("check_system_clock_backend") {
+ condition =
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_freertos:system_clock"
+ message = "This FreeRTOS backend only works with the FreeRTOS " +
+ "pw::chrono::SystemClock backend " +
+ "(pw_chrono_SYSTEM_CLOCK_BACKEND = " +
+ "\"$dir_pw_chrono_freertos:system_clock\")"
+ visibility = [ ":*" ]
+}
+
# This target provides the backend for pw::this_thread::sleep_{for,until}.
pw_source_set("sleep") {
public_configs = [
@@ -82,19 +94,13 @@ pw_source_set("sleep") {
public_deps = [ "$dir_pw_chrono:system_clock" ]
sources = [ "sleep.cc" ]
deps = [
+ ":check_system_clock_backend",
"$dir_pw_assert",
"$dir_pw_chrono_freertos:system_clock",
"$dir_pw_third_party/freertos",
"$dir_pw_thread:id",
"$dir_pw_thread:sleep.facade",
]
- assert(pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND ==
- "$dir_pw_chrono_freertos:system_clock",
- "The FreeRTOS pw::this_thread::sleep_{for,until} backend only works " +
- "with the FreeRTOS pw::chrono::SystemClock backend " +
- "(pw_chrono_SYSTEM_CLOCK_BACKEND = " +
- "\"$dir_pw_chrono_freertos:system_clock\")")
}
# This target provides the backend for pw::thread::Thread and the headers needed
diff --git a/pw_thread_stl/BUILD.gn b/pw_thread_stl/BUILD.gn
index c4d47463e..0e4e6b663 100644
--- a/pw_thread_stl/BUILD.gn
+++ b/pw_thread_stl/BUILD.gn
@@ -14,6 +14,7 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_build/error.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_chrono/backend.gni")
import("$dir_pw_docgen/docs.gni")
@@ -63,6 +64,17 @@ pw_source_set("thread") {
deps = [ "$dir_pw_thread:thread.facade" ]
}
+pw_build_assert("check_system_clock_backend") {
+ condition =
+ pw_thread_SLEEP_BACKEND != "$dir_pw_thread_stl:sleep" ||
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_stl:system_clock"
+ message = "The STL pw::this_thread::sleep_{for,until} backend only works " +
+ "with the STL pw::chrono::SystemClock backend " +
+ "(pw_chrono_SYSTEM_CLOCK_BACKEND = " +
+ "\"$dir_pw_chrono_stl:system_clock\")"
+}
+
# This target provides the backend for pw::this_thread::sleep_{for,until}.
pw_source_set("sleep") {
public_configs = [
@@ -74,17 +86,10 @@ pw_source_set("sleep") {
"public_overrides/pw_thread_backend/sleep_inline.h",
]
deps = [
+ ":check_system_clock_backend",
"$dir_pw_chrono:system_clock",
"$dir_pw_thread:sleep.facade",
]
- assert(
- pw_thread_SLEEP_BACKEND != "$dir_pw_thread_stl:sleep" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND == "" ||
- pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_stl:system_clock",
- "The STL pw::this_thread::sleep_{for,until} backend only works with " +
- "the STL pw::chrono::SystemClock backend " +
- "(pw_chrono_SYSTEM_CLOCK_BACKEND = " +
- "\"$dir_pw_chrono_stl:system_clock\")")
}
# This target provides the backend for pw::this_thread::yield.
diff --git a/pw_thread_threadx/BUILD.gn b/pw_thread_threadx/BUILD.gn
index 892d2c8dd..291714790 100644
--- a/pw_thread_threadx/BUILD.gn
+++ b/pw_thread_threadx/BUILD.gn
@@ -14,6 +14,7 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_build/error.gni")
import("$dir_pw_build/module_config.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_chrono/backend.gni")
@@ -68,6 +69,14 @@ pw_source_set("id") {
}
if (pw_chrono_SYSTEM_CLOCK_BACKEND != "" && pw_thread_SLEEP_BACKEND != "") {
+ pw_build_assert("check_system_clock_backend") {
+ condition =
+ pw_thread_OVERRIDE_SYSTEM_CLOCK_BACKEND_CHECK ||
+ pw_chrono_SYSTEM_CLOCK_BACKEND == "$dir_pw_chrono_threadx:system_clock"
+ message = "The ThreadX pw::this_thread::sleep_{for,until} backend only " +
+ "works with the ThreadX pw::chrono::SystemClock backend."
+ }
+
# This target provides the backend for pw::this_thread::sleep_{for,until}.
pw_source_set("sleep") {
public_configs = [
@@ -81,16 +90,12 @@ if (pw_chrono_SYSTEM_CLOCK_BACKEND != "" && pw_thread_SLEEP_BACKEND != "") {
public_deps = [ "$dir_pw_chrono:system_clock" ]
sources = [ "sleep.cc" ]
deps = [
+ ":check_system_clock_backend",
"$dir_pw_assert",
"$dir_pw_chrono_threadx:system_clock",
"$dir_pw_third_party/threadx",
"$dir_pw_thread:id",
]
- assert(
- pw_thread_OVERRIDE_SYSTEM_CLOCK_BACKEND_CHECK ||
- pw_chrono_SYSTEM_CLOCK_BACKEND ==
- "$dir_pw_chrono_threadx:system_clock",
- "The ThreadX pw::this_thread::sleep_{for,until} backend only works with " + "the ThreadX pw::chrono::SystemClock backend.")
}
}
diff --git a/pw_tokenizer/docs.rst b/pw_tokenizer/docs.rst
index e124e125e..dc908ea4b 100644
--- a/pw_tokenizer/docs.rst
+++ b/pw_tokenizer/docs.rst
@@ -954,6 +954,44 @@ functions.
TransmitLogMessage(base64_buffer, base64_size);
}
+Investigating undecoded messages
+--------------------------------
+Tokenized messages cannot be decoded if the token is not recognized. The Python
+package includes the ``parse_message`` tool, which parses tokenized Base64
+messages without looking up the token in a database. This tool attempts to guess
+the types of the arguments and displays potential ways to decode them.
+
+This tool can be used to extract argument information from an otherwise unusable
+message. It could help identify which statement in the code produced the
+message. This tool is not particularly helpful for tokenized messages without
+arguments, since all it can do is show the value of the unknown token.
+
+The tool is executed by passing Base64 tokenized messages, with or without the
+``$`` prefix, to ``pw_tokenizer.parse_message``. Pass ``-h`` or ``--help`` to
+see full usage information.
+
+Example
+^^^^^^^
+.. code-block::
+
+ $ python -m pw_tokenizer.parse_message '$329JMwA=' koSl524TRkFJTEVEX1BSRUNPTkRJVElPTgJPSw== --specs %s %d
+
+ INF Decoding arguments for '$329JMwA='
+ INF Binary: b'\xdfoI3\x00' [df 6f 49 33 00] (5 bytes)
+ INF Token: 0x33496fdf
+ INF Args: b'\x00' [00] (1 bytes)
+ INF Decoding with up to 8 %s or %d arguments
+ INF Attempt 1: [%s]
+ INF Attempt 2: [%d] 0
+
+ INF Decoding arguments for '$koSl524TRkFJTEVEX1BSRUNPTkRJVElPTgJPSw=='
+ INF Binary: b'\x92\x84\xa5\xe7n\x13FAILED_PRECONDITION\x02OK' [92 84 a5 e7 6e 13 46 41 49 4c 45 44 5f 50 52 45 43 4f 4e 44 49 54 49 4f 4e 02 4f 4b] (28 bytes)
+ INF Token: 0xe7a58492
+ INF Args: b'n\x13FAILED_PRECONDITION\x02OK' [6e 13 46 41 49 4c 45 44 5f 50 52 45 43 4f 4e 44 49 54 49 4f 4e 02 4f 4b] (24 bytes)
+ INF Decoding with up to 8 %s or %d arguments
+ INF Attempt 1: [%d %s %d %d %d] 55 FAILED_PRECONDITION 1 -40 -38
+ INF Attempt 2: [%d %s %s] 55 FAILED_PRECONDITION OK
+
Command line utilities
^^^^^^^^^^^^^^^^^^^^^^
``pw_tokenizer`` provides two standalone command line utilities for detokenizing
diff --git a/pw_tokenizer/generate_decoding_test_data.cc b/pw_tokenizer/generate_decoding_test_data.cc
index 9ab1a4446..83fda7f28 100644
--- a/pw_tokenizer/generate_decoding_test_data.cc
+++ b/pw_tokenizer/generate_decoding_test_data.cc
@@ -217,7 +217,7 @@ void GenerateEncodedStrings(TestDataFile* file) {
std::mt19937 random(6006411);
std::uniform_int_distribution<int64_t> big;
std::uniform_int_distribution<int32_t> medium;
- std::uniform_int_distribution<char> small(' ', '~');
+ std::uniform_int_distribution<int32_t> small(' ', '~');
std::uniform_real_distribution<float> real;
file->Section("Simple strings");
@@ -348,7 +348,7 @@ void GenerateEncodedStrings(TestDataFile* file) {
for (int i = 0; i < 100; ++i) {
unsigned long long n1 = big(random);
int n2 = medium(random);
- char ch = small(random);
+ char ch = static_cast<char>(small(random));
if (ch == '"' || ch == '\\') {
ch = '\t';
}
@@ -359,7 +359,7 @@ void GenerateEncodedStrings(TestDataFile* file) {
for (int i = 0; i < 100; ++i) {
const long long n1 = big(random);
const unsigned n2 = medium(random);
- const char ch = small(random);
+ const char ch = static_cast<char>(small(random));
MAKE_TEST_CASE(
"%s: %lld 0x%16u%08X %d", std::to_string(i).c_str(), n1, n2, n2, ch);
diff --git a/pw_tokenizer/py/BUILD.gn b/pw_tokenizer/py/BUILD.gn
index b0006ed10..2ac30448c 100644
--- a/pw_tokenizer/py/BUILD.gn
+++ b/pw_tokenizer/py/BUILD.gn
@@ -40,6 +40,7 @@ pw_python_package("py") {
"pw_tokenizer/detokenize.py",
"pw_tokenizer/elf_reader.py",
"pw_tokenizer/encode.py",
+ "pw_tokenizer/parse_message.py",
"pw_tokenizer/proto/__init__.py",
"pw_tokenizer/serial_detokenizer.py",
"pw_tokenizer/tokens.py",
diff --git a/pw_tokenizer/py/decode_test.py b/pw_tokenizer/py/decode_test.py
index c0c436616..be08eb824 100755
--- a/pw_tokenizer/py/decode_test.py
+++ b/pw_tokenizer/py/decode_test.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright 2020 The Pigweed Authors
+# Copyright 2022 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -14,6 +14,7 @@
# the License.
"""Tests the tokenized string decode module."""
+from datetime import datetime
import unittest
import tokenized_string_decoding_test_data as tokenized_string
@@ -21,7 +22,7 @@ import varint_test_data
from pw_tokenizer import decode
-def error(msg, value=None):
+def error(msg, value=None) -> str:
"""Formats msg as the message for an argument that failed to parse."""
if value is None:
return '<[{}]>'.format(msg)
@@ -30,13 +31,13 @@ def error(msg, value=None):
class TestDecodeTokenized(unittest.TestCase):
"""Tests decoding tokenized strings with various arguments."""
- def test_decode_generated_data(self):
+ def test_decode_generated_data(self) -> None:
self.assertGreater(len(tokenized_string.TEST_DATA), 100)
for fmt, decoded, encoded in tokenized_string.TEST_DATA:
self.assertEqual(decode.decode(fmt, encoded, True), decoded)
- def test_unicode_decode_errors(self):
+ def test_unicode_decode_errors(self) -> None:
"""Tests unicode errors, which do not occur in the C++ decoding code."""
self.assertEqual(decode.decode('Why, %c', b'\x01', True),
'Why, ' + error('%c ERROR', -1))
@@ -55,12 +56,12 @@ class TestDecodeTokenized(unittest.TestCase):
self.assertEqual(decode.decode('%c', b'\xff\xff\xff\xff\x0f', True),
error('%c ERROR', -2147483648))
- def test_ignore_errors(self):
+ def test_ignore_errors(self) -> None:
self.assertEqual(decode.decode('Why, %c', b'\x01'), 'Why, %c')
self.assertEqual(decode.decode('%s %d', b'\x01!'), '! %d')
- def test_pointer(self):
+ def test_pointer(self) -> None:
"""Tests pointer args, which are not natively supported in Python."""
self.assertEqual(decode.decode('Hello: %p', b'\x00', True),
'Hello: 0x00000000')
@@ -69,8 +70,8 @@ class TestDecodeTokenized(unittest.TestCase):
class TestIntegerDecoding(unittest.TestCase):
- """Test decoding variable-length integers."""
- def test_decode_generated_data(self):
+ """Tests decoding variable-length integers."""
+ def test_decode_generated_data(self) -> None:
test_data = varint_test_data.TEST_DATA
self.assertGreater(len(test_data), 100)
@@ -86,5 +87,44 @@ class TestIntegerDecoding(unittest.TestCase):
bytearray(encoded)).value)
+class TestFormattedString(unittest.TestCase):
+ """Tests scoring how successfully a formatted string decoded."""
+ def test_no_args(self) -> None:
+ result = decode.FormatString('string').format(b'')
+
+ self.assertTrue(result.ok())
+ self.assertEqual(result.score(), (True, True, 0, 0, datetime.max))
+
+ def test_one_arg(self) -> None:
+ result = decode.FormatString('%d').format(b'\0')
+
+ self.assertTrue(result.ok())
+ self.assertEqual(result.score(), (True, True, 0, 1, datetime.max))
+
+ def test_missing_args(self) -> None:
+ result = decode.FormatString('%p%d%d').format(b'\x02\x80')
+
+ self.assertFalse(result.ok())
+ self.assertEqual(result.score(), (False, True, -2, 3, datetime.max))
+ self.assertGreater(result.score(), result.score(datetime.now()))
+ self.assertGreater(result.score(datetime.now()),
+ result.score(datetime.min))
+
+ def test_compare_score(self) -> None:
+ all_args_ok = decode.FormatString('%d%d%d').format(b'\0\0\0')
+ missing_one_arg = decode.FormatString('%d%d%d').format(b'\0\0')
+ missing_two_args = decode.FormatString('%d%d%d').format(b'\0')
+ all_args_extra_data = decode.FormatString('%d%d%d').format(b'\0\0\0\1')
+ missing_one_arg_extra_data = decode.FormatString('%d%d%d').format(
+ b'\0' + b'\x80' * 100)
+
+ self.assertGreater(all_args_ok.score(), missing_one_arg.score())
+ self.assertGreater(missing_one_arg.score(), missing_two_args.score())
+ self.assertGreater(missing_two_args.score(),
+ all_args_extra_data.score())
+ self.assertGreater(all_args_extra_data.score(),
+ missing_one_arg_extra_data.score())
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/pw_tokenizer/py/pw_tokenizer/decode.py b/pw_tokenizer/py/pw_tokenizer/decode.py
index 30d87711c..f6ca50364 100644
--- a/pw_tokenizer/py/pw_tokenizer/decode.py
+++ b/pw_tokenizer/py/pw_tokenizer/decode.py
@@ -20,6 +20,7 @@ Missing, truncated, or otherwise corrupted arguments are handled and displayed
in the resulting string with an error message.
"""
+from datetime import datetime
import re
import struct
from typing import Iterable, List, NamedTuple, Match, Sequence, Tuple
@@ -275,7 +276,7 @@ class DecodedArg:
return self.format()
def __repr__(self) -> str:
- return 'DecodedArg({!r})'.format(self)
+ return f'DecodedArg({self})'
def parse_format_specifiers(format_string: str) -> Iterable[FormatSpec]:
@@ -288,6 +289,33 @@ class FormattedString(NamedTuple):
args: Sequence[DecodedArg]
remaining: bytes
+ def ok(self) -> bool:
+ """Arg data decoded successfully and all expected args were found."""
+ return all(arg.ok() for arg in self.args) and not self.remaining
+
+ def score(self, date_removed: datetime = None) -> tuple:
+ """Returns a key for sorting by how successful a decode was.
+
+ Decoded strings are sorted by whether they
+
+ 1. decoded all bytes for all arguments without errors,
+ 2. decoded all data,
+ 3. have the fewest decoding errors,
+ 4. decoded the most arguments successfully, or
+ 5. have the most recent removal date, if they were removed.
+
+ This must match the collision resolution logic in detokenize.cc.
+
+ To format a list of FormattedStrings from most to least successful,
+ use sort(key=FormattedString.score, reverse=True).
+ """
+ return (
+ self.ok(), # decocoded all data and all expected args were found
+ not self.remaining, # decoded all data
+ -sum(not arg.ok() for arg in self.args), # fewest errors
+ len(self.args), # decoded the most arguments
+ date_removed or datetime.max) # most recently present
+
class FormatString:
"""Represents a printf-style format string."""
diff --git a/pw_tokenizer/py/pw_tokenizer/detokenize.py b/pw_tokenizer/py/pw_tokenizer/detokenize.py
index ddd698dd8..8f94fa04e 100755
--- a/pw_tokenizer/py/pw_tokenizer/detokenize.py
+++ b/pw_tokenizer/py/pw_tokenizer/detokenize.py
@@ -34,7 +34,6 @@ messages from a file or stdin.
import argparse
import base64
import binascii
-from datetime import datetime
import io
import logging
import os
@@ -44,8 +43,9 @@ import string
import struct
import sys
import time
-from typing import (AnyStr, BinaryIO, Callable, Dict, List, Iterable, Iterator,
- Match, NamedTuple, Optional, Pattern, Tuple, Union)
+from typing import (AnyStr, BinaryIO, Callable, Dict, List, Iterable, IO,
+ Iterator, Match, NamedTuple, Optional, Pattern, Tuple,
+ Union)
try:
from pw_tokenizer import database, decode, encode, tokens
@@ -82,25 +82,7 @@ class DetokenizedString:
for entry, fmt in format_string_entries:
result = fmt.format(encoded_message[ENCODED_TOKEN.size:],
show_errors)
-
- # Sort competing entries so the most likely matches appear first.
- # Decoded strings are prioritized by whether they
- #
- # 1. decoded all bytes for all arguments without errors,
- # 2. decoded all data,
- # 3. have the fewest decoding errors,
- # 4. decoded the most arguments successfully, or
- # 5. have the most recent removal date, if they were removed.
- #
- # This must match the collision resolution logic in detokenize.cc.
- score: Tuple = (
- all(arg.ok() for arg in result.args) and not result.remaining,
- not result.remaining, # decoded all data
- -sum(not arg.ok() for arg in result.args), # fewest errors
- len(result.args), # decoded the most arguments
- entry.date_removed or datetime.max) # most recently present
-
- decode_attempts.append((score, result))
+ decode_attempts.append((result.score(entry.date_removed), result))
# Sort the attempts by the score so the most likely results are first.
decode_attempts.sort(key=lambda value: value[0], reverse=True)
@@ -299,11 +281,14 @@ class Detokenizer:
return decode_and_detokenize
+_PathOrFile = Union[IO, str, Path]
+
+
class AutoUpdatingDetokenizer(Detokenizer):
"""Loads and updates a detokenizer from database paths."""
class _DatabasePath:
"""Tracks the modified time of a path or file object."""
- def __init__(self, path):
+ def __init__(self, path: _PathOrFile) -> None:
self.path = path if isinstance(path, (str, Path)) else path.name
self._modified_time: Optional[float] = self._last_modified_time()
@@ -329,7 +314,7 @@ class AutoUpdatingDetokenizer(Detokenizer):
return database.load_token_database()
def __init__(self,
- *paths_or_files,
+ *paths_or_files: _PathOrFile,
min_poll_period_s: float = 1.0) -> None:
self.paths = tuple(self._DatabasePath(path) for path in paths_or_files)
self.min_poll_period_s = min_poll_period_s
diff --git a/pw_tokenizer/py/pw_tokenizer/parse_message.py b/pw_tokenizer/py/pw_tokenizer/parse_message.py
new file mode 100644
index 000000000..f8655e1f3
--- /dev/null
+++ b/pw_tokenizer/py/pw_tokenizer/parse_message.py
@@ -0,0 +1,182 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+"""Parses the arguments in a Base64-encoded tokenized message.
+
+This is useful for attempting to decode tokenized messages with arguments for
+which the token is not recognized.
+"""
+
+import argparse
+import base64
+from dataclasses import dataclass
+import logging
+import sys
+from typing import Collection, Iterable, Iterator, Sequence
+
+import pw_cli.log
+from pw_tokenizer.decode import FormatString, FormattedString
+
+_LOG: logging.Logger = logging.getLogger('pw_tokenizer')
+
+DEFAULT_FORMAT_SPECS = (
+ '%s',
+ '%d',
+ '%f',
+)
+
+DEFAULT_MAX_ARGS = 8
+PREFIX = '$'
+
+
+def attempt_to_decode(
+ arg_data: bytes,
+ format_specs: Collection[str] = DEFAULT_FORMAT_SPECS,
+ max_args: int = DEFAULT_MAX_ARGS,
+ yield_failures: bool = False) -> Iterator[FormattedString]:
+ """Attemps to decode arguments using the provided format specifiers."""
+ format_strings = [(0, '')] # (argument count, format string)
+
+ # Each argument requires at least 1 byte.
+ max_args = min(max_args, len(arg_data))
+
+ while format_strings:
+ arg_count, string = format_strings.pop(0)
+ decode_attempt = FormatString(string).format(arg_data)
+
+ if yield_failures or decode_attempt.ok():
+ yield decode_attempt
+
+ if arg_count < max_args:
+ format_strings.extend(
+ (arg_count + 1, string + spec) for spec in format_specs)
+
+
+@dataclass(frozen=True)
+class TokenizedMessage:
+ string: str
+ binary: bytes
+
+ @property
+ def token(self) -> int:
+ return int.from_bytes(self.binary[:4], 'little')
+
+ @property
+ def binary_args(self) -> bytes:
+ return self.binary[4:]
+
+ @classmethod
+ def parse(cls, message: str, prefix: str = '$') -> 'TokenizedMessage':
+ if not message.startswith(prefix):
+ raise ValueError(
+ f'{message} does not start wtih {prefix!r} as expected')
+
+ binary = base64.b64decode(message[1:])
+
+ if len(binary) < 4:
+ raise ValueError(f'{message} is only {len(binary)} bytes; '
+ 'tokenized messages must be at least 4 bytes')
+
+ return cls(message, binary)
+
+
+def _read_stdin():
+ try:
+ while True:
+ yield input()
+ except KeyboardInterrupt:
+ return
+
+
+def _text_list(items: Sequence, conjunction: str = 'or') -> str:
+ if len(items) == 1:
+ return str(items[0])
+
+ return f'{", ".join(str(i) for i in items[:-1])} {conjunction} {items[-1]}'
+
+
+def main(messages: Iterable[str], max_args: int, specs: Sequence[str],
+ show_failures: bool) -> int:
+ """Parses the arguments for a series of tokenized messages."""
+ exit_code = 0
+
+ for message in iter(messages) if messages else _read_stdin():
+ if not message:
+ continue
+
+ if not message.startswith(PREFIX):
+ message = PREFIX + message
+
+ _LOG.info('Decoding arguments for %r', message)
+ try:
+ parsed = TokenizedMessage.parse(message)
+ except ValueError as exc:
+ _LOG.error('%s', exc)
+ exit_code = 2
+ continue
+
+ _LOG.info('Binary: %r [%s] (%d bytes)', parsed.binary,
+ parsed.binary.hex(' ', 1), len(parsed.binary))
+ _LOG.info('Token: 0x%08x', parsed.token)
+ _LOG.info('Args: %r [%s] (%d bytes)', parsed.binary_args,
+ parsed.binary_args.hex(' ', 1), len(parsed.binary_args))
+ _LOG.info('Decoding with up to %d %s arguments', max_args,
+ _text_list(specs))
+
+ results = sorted(attempt_to_decode(parsed.binary_args, specs, max_args,
+ show_failures),
+ key=FormattedString.score,
+ reverse=True)
+
+ if not any(result.ok() for result in results):
+ _LOG.warning(
+ ' No combinations of up to %d %s arguments decoded '
+ 'successfully', max_args, _text_list(specs))
+ exit_code = 1
+
+ for i, result in enumerate(results, 1):
+ _LOG.info( # pylint: disable=logging-fstring-interpolation
+ f' Attempt %{len(str(len(results)))}d: [%s] %s', i,
+ ' '.join(str(a.specifier) for a in result.args),
+ ' '.join(str(a) for a in result.args))
+ print()
+
+ return exit_code
+
+
+def _parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('--max-args',
+ default=DEFAULT_MAX_ARGS,
+ type=int,
+ help='Maximum number of printf-style arguments')
+ parser.add_argument('--specs',
+ nargs='*',
+ default=DEFAULT_FORMAT_SPECS,
+ help='Which printf-style format specifiers to check')
+ parser.add_argument('--show-failures',
+ action='store_true',
+ help='Show argument combintations that fail to decode')
+ parser.add_argument(
+ 'messages',
+ nargs='*',
+ help=
+ 'Base64-encoded tokenized messages to decode; omit to read from stdin')
+ return parser.parse_args()
+
+
+if __name__ == '__main__':
+ pw_cli.log.install()
+ sys.exit(main(**vars(_parse_args())))
diff --git a/pw_toolchain/docs.rst b/pw_toolchain/docs.rst
index 875506724..81f60318c 100644
--- a/pw_toolchain/docs.rst
+++ b/pw_toolchain/docs.rst
@@ -109,9 +109,10 @@ provide ``"the_path/.*"`` to exclude all files in all directories under
The build argument ``pw_toolchain_STATIC_ANALYSIS_SKIP_INCLUDE_PATHS`` is used
used to exclude header files from the analysis. This argument must be a list of
-POSIX-style path suffixes for include paths. For example, passing
-``the_path/include`` excludes all header files that are accessed from include
-paths ending in ``the_path/include``.
+POSIX-style path suffixes for include paths, or regular expressions. For
+example, passing ``the_path/include`` excludes all header files that are
+accessed from include paths ending in ``the_path/include``, while passing
+``.*/third_party/.*`` excludes all third-party header files.
Provided toolchains
-------------------
diff --git a/pw_toolchain/generate_toolchain.gni b/pw_toolchain/generate_toolchain.gni
index ce925bedd..db057c24d 100644
--- a/pw_toolchain/generate_toolchain.gni
+++ b/pw_toolchain/generate_toolchain.gni
@@ -251,17 +251,11 @@ template("generate_toolchain") {
_link_flags += [
# Output a map file that shows symbols and their location.
"-Wl,-map,$_link_mapfile",
-
- # Delete unreferenced sections. Helpful with -ffunction-sections.
- "-Wl,-dead_strip",
]
} else {
_link_flags += [
# Output a map file that shows symbols and their location.
"-Wl,-Map,$_link_mapfile",
-
- # Delete unreferenced sections. Helpful with -ffunction-sections.
- "-Wl,--gc-sections",
]
}
diff --git a/pw_toolchain/host_clang/BUILD.gn b/pw_toolchain/host_clang/BUILD.gn
index 61d00aa9e..bed41fbc8 100644
--- a/pw_toolchain/host_clang/BUILD.gn
+++ b/pw_toolchain/host_clang/BUILD.gn
@@ -13,6 +13,7 @@
# the License.
import("//build_overrides/pigweed.gni")
+import("//build_overrides/pigweed_environment.gni")
# See https://github.com/google/sanitizers
config("sanitize_address") {
@@ -113,7 +114,7 @@ config("xcode_sysroot") {
# Pull the appropriate paths from our Pigweed env setup.
config("no_system_libcpp") {
if (current_os == "mac") {
- install_dir = getenv("PW_PIGWEED_CIPD_INSTALL_DIR")
+ install_dir = dir_cipd_pigweed
assert(install_dir != "",
"You forgot to activate the Pigweed environment; " +
"did you source pw_env_setup/setup.sh?")
@@ -122,7 +123,7 @@ config("no_system_libcpp") {
"-nostdlib++",
# Use the libc++ from CIPD.
- getenv("PW_PIGWEED_CIPD_INSTALL_DIR") + "/lib/libc++.a",
+ dir_cipd_pigweed + "/lib/libc++.a",
]
}
}
diff --git a/pw_toolchain/py/pw_toolchain/clang_tidy.py b/pw_toolchain/py/pw_toolchain/clang_tidy.py
index 12a707e08..fd7c625f0 100644
--- a/pw_toolchain/py/pw_toolchain/clang_tidy.py
+++ b/pw_toolchain/py/pw_toolchain/clang_tidy.py
@@ -104,7 +104,9 @@ def _filter_include_paths(args: Iterable[str],
for arg in args:
if arg.startswith('-I'):
path = Path(arg[2:]).as_posix()
- if any(path.endswith(f) for f in filters):
+ if any(
+ path.endswith(f) or re.match(f, str(path))
+ for f in filters):
yield '-isystem' + arg[2:]
continue
diff --git a/pw_toolchain/static_analysis_toolchain.gni b/pw_toolchain/static_analysis_toolchain.gni
index f23905bc0..732f2094d 100644
--- a/pw_toolchain/static_analysis_toolchain.gni
+++ b/pw_toolchain/static_analysis_toolchain.gni
@@ -28,14 +28,18 @@ declare_args() {
pw_toolchain_STATIC_ANALYSIS_SKIP_SOURCES_RES = []
# Disable clang-tidy for specific include paths. In the clang-tidy command,
- # include paths that end with one of these are switched from -I to -isystem,
- # which causes clang-tidy to ignore them. Unfortunately, clang-tidy provides
- # no other way to filter header files.
+ # include paths that end with one of these, or match as a regular expression,
+ # are switched from -I to -isystem, which causes clang-tidy to ignore them.
+ # Unfortunately, clang-tidy provides no other way to filter header files.
#
# For example, the following ignores header files in "mbedtls/include":
#
# pw_toolchain_STATIC_ANALYSIS_SKIP_INCLUDE_PATHS = ["mbedtls/include"]
#
+ # While the following ignores all third-party header files:
+ #
+ # pw_toolchain_STATIC_ANALYSIS_SKIP_INCLUDE_PATHS = [".*/third_party/.*"]
+ #
pw_toolchain_STATIC_ANALYSIS_SKIP_INCLUDE_PATHS = []
}
diff --git a/pw_trace_tokenized/example/basic.cc b/pw_trace_tokenized/example/basic.cc
index 0e020e53c..a3894e331 100644
--- a/pw_trace_tokenized/example/basic.cc
+++ b/pw_trace_tokenized/example/basic.cc
@@ -12,17 +12,17 @@
// License for the specific language governing permissions and limitations under
// the License.
//==============================================================================
-// BUID
+// BUILD
// ninja -C out
-// host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic
+// pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic
//
// RUN
-// ./out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic
+// ./out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic
// trace.bin
//
// DECODE
// python pw_trace_tokenized/py/trace_tokenized.py -i trace.bin -o trace.json
-// ./out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic#trace
+// ./out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic#trace
//
// VIEW
// In chrome navigate to chrome://tracing, and load the trace.json file.
@@ -46,4 +46,4 @@ int main(int argc, char** argv) { // Take filename as arg
PW_LOG_INFO("Running basic trace example...\n");
RunTraceSampleApp();
return 0;
-} \ No newline at end of file
+}
diff --git a/pw_trace_tokenized/example/filter.cc b/pw_trace_tokenized/example/filter.cc
index 5681cd770..6d68fb9e1 100644
--- a/pw_trace_tokenized/example/filter.cc
+++ b/pw_trace_tokenized/example/filter.cc
@@ -12,17 +12,17 @@
// License for the specific language governing permissions and limitations under
// the License.
//==============================================================================
-// BUID
+// BUILD
// ninja -C out
-// host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_filter
+// pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_filter
//
// RUN
-// ./out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_filter
+// ./out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_filter
// trace.bin
//
// DECODE
// python pw_trace_tokenized/py/trace_tokenized.py -i trace.bin -o trace.json
-// ./out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic#trace
+// ./out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic#trace
//
// VIEW
// In chrome navigate to chrome://tracing, and load the trace.json file.
diff --git a/pw_trace_tokenized/example/rpc.cc b/pw_trace_tokenized/example/rpc.cc
index 41a5e17f8..5041bc3c2 100644
--- a/pw_trace_tokenized/example/rpc.cc
+++ b/pw_trace_tokenized/example/rpc.cc
@@ -23,17 +23,17 @@ default_args = {
BUILD
ninja -C out
-host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_rpc
+pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_rpc
RUN
-./out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_rpc
+./out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_rpc
DECODE
python pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py
-s localhost:33000
-o trace.json
-t
- out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_rpc
+ out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_rpc
pw_trace_tokenized/pw_trace_protos/trace_rpc.proto
VIEW
@@ -41,6 +41,7 @@ In chrome navigate to chrome://tracing, and load the trace.json file.
*/
#include <thread>
+#include "pw_assert/check.h"
#include "pw_log/log.h"
#include "pw_rpc/server.h"
#include "pw_rpc_system_server/rpc_server.h"
@@ -57,7 +58,7 @@ void RpcThread() {
// Set up the server and start processing data.
pw::rpc::system_server::Server().RegisterService(trace_service);
- pw::rpc::system_server::Start();
+ PW_CHECK_OK(pw::rpc::system_server::Start());
}
} // namespace
@@ -71,4 +72,4 @@ int main() {
PW_LOG_INFO("Running basic trace example...\n");
RunTraceSampleApp();
return 0;
-} \ No newline at end of file
+}
diff --git a/pw_trace_tokenized/example/trigger.cc b/pw_trace_tokenized/example/trigger.cc
index 44ebc93a3..1995aff42 100644
--- a/pw_trace_tokenized/example/trigger.cc
+++ b/pw_trace_tokenized/example/trigger.cc
@@ -12,17 +12,17 @@
// License for the specific language governing permissions and limitations under
// the License.
//==============================================================================
-// BUID
+// BUILD
// ninja -C out
-// host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_trigger
+// pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_trigger
//
// RUN
-// ./out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_trigger
+// ./out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_trigger
// trace.bin
//
// DECODE
// python pw_trace_tokenized/py/trace_tokenized.py -i trace.bin -o trace.json
-// ./out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic#trace
+// ./out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic#trace
//
// VIEW
// In chrome navigate to chrome://tracing, and load the trace.json file.
diff --git a/pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py b/pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py
index 116c9f284..1c3aa9b9d 100755
--- a/pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py
+++ b/pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py
@@ -12,16 +12,19 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
-r"""
+"""
Generates json trace files viewable using chrome://tracing using RPCs from a
connected HdlcRpcClient.
Example usage:
python pw_trace_tokenized/py/pw_trace_tokenized/get_trace.py -s localhost:33000
-o trace.json
- -t out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_rpc
+ -t
+ out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_rpc
pw_trace_tokenized/pw_trace_protos/trace_rpc.proto
-"""
+""" # pylint: disable=line-too-long
+# pylint: enable=line-too-long
+
import argparse
import logging
import glob
diff --git a/pw_trace_tokenized/py/pw_trace_tokenized/trace_tokenized.py b/pw_trace_tokenized/py/pw_trace_tokenized/trace_tokenized.py
index 7fb82db0c..25108a5be 100755
--- a/pw_trace_tokenized/py/pw_trace_tokenized/trace_tokenized.py
+++ b/pw_trace_tokenized/py/pw_trace_tokenized/trace_tokenized.py
@@ -12,14 +12,16 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
-r"""
+"""
Generates json trace files viewable using chrome://tracing from binary
trace files.
Example usage:
python pw_trace_tokenized/py/trace_tokenized.py -i trace.bin -o trace.json
-./out/host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic
-"""
+out/pw_strict_host_clang_debug/obj/pw_trace_tokenized/bin/trace_tokenized_example_basic
+""" # pylint: disable=line-too-long
+# pylint: enable=line-too-long
+
from enum import IntEnum
import argparse
import logging
diff --git a/pw_trace_tokenized/trace_buffer.cc b/pw_trace_tokenized/trace_buffer.cc
index a7da5a988..31314ec7a 100644
--- a/pw_trace_tokenized/trace_buffer.cc
+++ b/pw_trace_tokenized/trace_buffer.cc
@@ -73,7 +73,8 @@ class TraceBuffer {
};
ConstByteSpan DeringAndViewRawBuffer() {
- ring_buffer_.Dering();
+ ring_buffer_.Dering()
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
return ByteSpan(raw_buffer_, ring_buffer_.TotalUsedBytes());
}
diff --git a/pw_trace_tokenized/trace_rpc_service_nanopb.cc b/pw_trace_tokenized/trace_rpc_service_nanopb.cc
index 2abd0ca20..55b3f43f2 100644
--- a/pw_trace_tokenized/trace_rpc_service_nanopb.cc
+++ b/pw_trace_tokenized/trace_rpc_service_nanopb.cc
@@ -45,7 +45,8 @@ void TraceService::GetTraceData(
while (trace_buffer->PeekFront(
std::as_writable_bytes(std::span(buffer.data.bytes)), &size) !=
pw::Status::OutOfRange()) {
- trace_buffer->PopFront();
+ trace_buffer->PopFront()
+ .IgnoreError(); // TODO(pwbug/387): Handle Status properly
buffer.data.size = size;
pw::Status status = writer.Write(buffer);
if (!status.ok()) {
@@ -54,6 +55,6 @@ void TraceService::GetTraceData(
break;
}
}
- writer.Finish();
+ writer.Finish().IgnoreError(); // TODO(pwbug/387): Handle Status properly
}
} // namespace pw::trace
diff --git a/pw_transfer/BUILD.bazel b/pw_transfer/BUILD.bazel
index ad9673c6a..3bae8c0b5 100644
--- a/pw_transfer/BUILD.bazel
+++ b/pw_transfer/BUILD.bazel
@@ -1,4 +1,4 @@
-# Copyright 2021 The Pigweed Authors
+# Copyright 2022 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -32,87 +32,86 @@ pw_cc_library(
)
pw_cc_library(
- name = "pw_transfer",
+ name = "core",
srcs = [
- "client_connection.cc",
- "public/pw_transfer/internal/client_connection.h",
+ "chunk.cc",
+ "client_context.cc",
+ "context.cc",
+ "public/pw_transfer/internal/chunk.h",
+ "public/pw_transfer/internal/client_context.h",
+ "public/pw_transfer/internal/context.h",
+ "public/pw_transfer/internal/event.h",
"public/pw_transfer/internal/server_context.h",
+ "rate_estimate.cc",
"server_context.cc",
- "transfer.cc",
+ "transfer_thread.cc",
],
hdrs = [
"public/pw_transfer/handler.h",
- "public/pw_transfer/transfer.h",
+ "public/pw_transfer/rate_estimate.h",
+ "public/pw_transfer/transfer_thread.h",
],
includes = ["public"],
deps = [
- ":context",
+ ":config",
":transfer_pwpb",
- "//pw_assert",
"//pw_bytes",
+ "//pw_chrono:system_clock",
"//pw_containers:intrusive_list",
"//pw_log",
+ "//pw_preprocessor",
"//pw_protobuf",
"//pw_result",
- "//pw_rpc:internal_packet_pwpb",
+ "//pw_rpc:client_server",
+ "//pw_rpc:internal_packet_cc.pwpb",
"//pw_rpc/raw:client_api",
"//pw_rpc/raw:server_api",
"//pw_status",
"//pw_stream",
+ "//pw_sync:binary_semaphore",
+ "//pw_sync:timed_thread_notification",
+ "//pw_thread:thread_core",
+ "//pw_varint",
],
)
pw_cc_library(
- name = "client",
+ name = "pw_transfer",
srcs = [
- "client.cc",
- "client_context.cc",
- "public/pw_transfer/internal/client_context.h",
+ "transfer.cc",
],
hdrs = [
- "public/pw_transfer/client.h",
+ "public/pw_transfer/transfer.h",
],
includes = ["public"],
deps = [
- ":context",
+ ":core",
"//pw_assert",
- "//pw_function",
+ "//pw_bytes",
"//pw_log",
- "//pw_rpc/raw:client_api",
+ "//pw_result",
+ "//pw_rpc:internal_packet_cc.pwpb",
"//pw_rpc/raw:server_api",
+ "//pw_status",
"//pw_stream",
- "//pw_sync:lock_annotations",
- "//pw_sync:mutex",
],
)
pw_cc_library(
- name = "context",
+ name = "client",
srcs = [
- "chunk.cc",
- "chunk_data_buffer.cc",
- "context.cc",
- "public/pw_transfer/internal/chunk.h",
- "public/pw_transfer/internal/chunk_data_buffer.h",
+ "client.cc",
],
hdrs = [
- "public/pw_transfer/internal/context.h",
+ "public/pw_transfer/client.h",
],
includes = ["public"],
deps = [
- ":config",
- ":transfer_pwpb",
- "//pw_bytes",
- "//pw_chrono:system_timer",
- "//pw_protobuf",
- "//pw_result",
- "//pw_rpc:client_server",
- "//pw_status",
+ ":core",
+ "//pw_assert",
+ "//pw_function",
+ "//pw_log",
"//pw_stream",
- "//pw_sync:interrupt_spin_lock",
- "//pw_sync:lock_annotations",
- "//pw_varint",
- "//pw_work_queue",
],
)
@@ -122,7 +121,7 @@ pw_cc_library(
"pw_transfer_private/chunk_testing.h",
],
deps = [
- ":context",
+ ":core",
"//pw_containers",
],
)
@@ -143,12 +142,28 @@ pw_cc_test(
)
pw_cc_test(
+ name = "transfer_thread_test",
+ srcs = ["transfer_thread_test.cc"],
+ deps = [
+ ":pw_transfer",
+ ":test_helpers",
+ "//pw_rpc:thread_testing",
+ "//pw_rpc/raw:client_testing",
+ "//pw_rpc/raw:test_method_context",
+ "//pw_thread:thread",
+ "//pw_unit_test",
+ ],
+)
+
+pw_cc_test(
name = "transfer_test",
srcs = ["transfer_test.cc"],
deps = [
":pw_transfer",
":test_helpers",
+ "//pw_rpc:thread_testing",
"//pw_rpc/raw:test_method_context",
+ "//pw_thread:thread",
"//pw_unit_test",
],
)
@@ -159,6 +174,7 @@ pw_cc_test(
deps = [
":client",
":test_helpers",
+ "//pw_rpc:thread_testing",
"//pw_rpc/raw:client_testing",
"//pw_thread:sleep",
"//pw_thread:thread",
diff --git a/pw_transfer/BUILD.gn b/pw_transfer/BUILD.gn
index 0ecf041ea..cf47f33fc 100644
--- a/pw_transfer/BUILD.gn
+++ b/pw_transfer/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2021 The Pigweed Authors
+# Copyright 2022 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -18,6 +18,8 @@ import("$dir_pw_build/module_config.gni")
import("$dir_pw_docgen/docs.gni")
import("$dir_pw_protobuf_compiler/proto.gni")
import("$dir_pw_rpc/internal/integration_test_ports.gni")
+import("$dir_pw_thread/backend.gni")
+import("$dir_pw_toolchain/generate_toolchain.gni")
import("$dir_pw_unit_test/test.gni")
declare_args() {
@@ -45,86 +47,82 @@ pw_source_set("config") {
pw_source_set("pw_transfer") {
public_configs = [ ":public_include_path" ]
public_deps = [
- ":context",
+ ":core",
":proto.raw_rpc",
- "$dir_pw_containers:intrusive_list",
dir_pw_assert,
+ dir_pw_bytes,
dir_pw_result,
dir_pw_status,
dir_pw_stream,
]
- deps = [
- ":proto.pwpb",
- dir_pw_log,
- ]
- public = [
- "public/pw_transfer/handler.h",
- "public/pw_transfer/transfer.h",
- ]
- sources = [
- "client_connection.cc",
- "public/pw_transfer/internal/client_connection.h",
- "public/pw_transfer/internal/server_context.h",
- "server_context.cc",
- "transfer.cc",
- ]
+ deps = [ dir_pw_log ]
+ public = [ "public/pw_transfer/transfer.h" ]
+ sources = [ "transfer.cc" ]
}
pw_source_set("client") {
public_configs = [ ":public_include_path" ]
public_deps = [
- ":context",
+ ":core",
":proto.raw_rpc",
- "$dir_pw_sync:lock_annotations",
- "$dir_pw_sync:mutex",
dir_pw_assert,
dir_pw_function,
dir_pw_stream,
]
- deps = [
- ":proto.pwpb",
- dir_pw_log,
- ]
+ deps = [ dir_pw_log ]
public = [ "public/pw_transfer/client.h" ]
- sources = [
- "client.cc",
- "client_context.cc",
- "public/pw_transfer/internal/client_context.h",
- ]
+ sources = [ "client.cc" ]
}
-pw_source_set("context") {
+pw_source_set("core") {
public_configs = [ ":public_include_path" ]
public_deps = [
":config",
+ "$dir_pw_chrono:system_clock",
+ "$dir_pw_preprocessor",
"$dir_pw_rpc:client",
- "$dir_pw_sync:interrupt_spin_lock",
- "$dir_pw_sync:lock_annotations",
+ "$dir_pw_rpc/raw:client_api",
+ "$dir_pw_rpc/raw:server_api",
+ "$dir_pw_sync:binary_semaphore",
+ "$dir_pw_sync:timed_thread_notification",
+ "$dir_pw_thread:thread_core",
+ dir_pw_assert,
dir_pw_bytes,
dir_pw_result,
dir_pw_status,
dir_pw_stream,
- dir_pw_work_queue,
]
deps = [
":proto.pwpb",
+ dir_pw_log,
dir_pw_protobuf,
dir_pw_varint,
]
+ public = [
+ "public/pw_transfer/handler.h",
+ "public/pw_transfer/rate_estimate.h",
+ "public/pw_transfer/transfer_thread.h",
+ ]
sources = [
"chunk.cc",
- "chunk_data_buffer.cc",
+ "client_context.cc",
"context.cc",
"public/pw_transfer/internal/chunk.h",
- "public/pw_transfer/internal/chunk_data_buffer.h",
+ "public/pw_transfer/internal/client_context.h",
"public/pw_transfer/internal/context.h",
+ "public/pw_transfer/internal/event.h",
+ "public/pw_transfer/internal/server_context.h",
+ "rate_estimate.cc",
+ "server_context.cc",
+ "transfer_thread.cc",
]
friend = [ ":*" ]
+ visibility = [ ":*" ]
}
pw_source_set("test_helpers") {
public_deps = [
- ":context",
+ ":core",
dir_pw_containers,
]
sources = [ "pw_transfer_private/chunk_testing.h" ]
@@ -139,14 +137,24 @@ pw_proto_library("proto") {
}
pw_test_group("tests") {
- tests = [
- # TODO(pwbug/562): These tests flake in CQ as they're timed-based. Rewrite
- # them to access transfer internals and avoid sleeping.
- # ":client_test",
+ tests = []
- ":handler_test",
- ":transfer_test",
- ]
+ # pw_transfer requires threading.
+ if (pw_thread_THREAD_BACKEND != "") {
+ tests = [
+ ":client_test",
+ ":transfer_thread_test",
+ ]
+
+ # TODO(pwbug/441): Fix transfer tests on Windows and non-host builds.
+ if (defined(pw_toolchain_SCOPE.is_host_toolchain) &&
+ pw_toolchain_SCOPE.is_host_toolchain && host_os != "win") {
+ tests += [
+ ":handler_test",
+ ":transfer_test",
+ ]
+ }
+ }
}
pw_test("handler_test") {
@@ -160,7 +168,23 @@ pw_test("transfer_test") {
":proto.pwpb",
":pw_transfer",
":test_helpers",
+ "$dir_pw_rpc:thread_testing",
+ "$dir_pw_rpc/raw:test_method_context",
+ "$dir_pw_thread:thread",
+ ]
+}
+
+pw_test("transfer_thread_test") {
+ sources = [ "transfer_thread_test.cc" ]
+ deps = [
+ ":core",
+ ":proto.raw_rpc",
+ ":pw_transfer",
+ ":test_helpers",
+ "$dir_pw_rpc:thread_testing",
+ "$dir_pw_rpc/raw:client_testing",
"$dir_pw_rpc/raw:test_method_context",
+ "$dir_pw_thread:thread",
]
}
@@ -169,6 +193,7 @@ pw_test("client_test") {
deps = [
":client",
":test_helpers",
+ "$dir_pw_rpc:thread_testing",
"$dir_pw_rpc/raw:client_testing",
"$dir_pw_thread:sleep",
"$dir_pw_thread:thread",
@@ -200,7 +225,6 @@ pw_executable("test_rpc_server") {
"$dir_pw_stream:std_file_stream",
"$dir_pw_thread:thread",
dir_pw_log,
- dir_pw_work_queue,
]
}
@@ -211,6 +235,7 @@ pw_executable("integration_test") {
":test_server_proto.raw_rpc",
"$dir_pw_rpc:integration_testing",
"$dir_pw_sync:binary_semaphore",
+ "$dir_pw_thread:thread",
dir_pw_assert,
dir_pw_log,
dir_pw_unit_test,
diff --git a/pw_transfer/CMakeLists.txt b/pw_transfer/CMakeLists.txt
index c15e83ffa..7f03af22d 100644
--- a/pw_transfer/CMakeLists.txt
+++ b/pw_transfer/CMakeLists.txt
@@ -27,11 +27,10 @@ pw_add_module_library(pw_transfer.config
pw_add_module_library(pw_transfer
PUBLIC_DEPS
pw_assert
- pw_containers.intrusive_list
pw_result
pw_status
pw_stream
- pw_transfer.context
+ pw_transfer.core
pw_transfer.proto.raw_rpc
PRIVATE_DEPS
pw_log
@@ -46,7 +45,7 @@ pw_add_module_library(pw_transfer.client
pw_function
pw_stream
pw_sync.mutex
- pw_transfer.context
+ pw_transfer.core
pw_transfer.proto.raw_rpc
PRIVATE_DEPS
pw_log
@@ -55,17 +54,18 @@ pw_add_module_library(pw_transfer.client
pw_rpc.test_utils
)
-pw_add_module_library(pw_transfer.context
+pw_add_module_library(pw_transfer.core
PUBLIC_DEPS
pw_bytes
+ pw_chrono.system_clock
+ pw_containers.intrusive_list
pw_result
pw_rpc.client
pw_status
pw_stream
- pw_sync.interrupt_spin_lock
- pw_sync.lock_annotations
+ pw_sync.binary_semaphore
+ pw_thread.thread_core
pw_transfer.config
- pw_work_queue
PRIVATE_DEPS
pw_protobuf
pw_transfer.proto.pwpb
diff --git a/pw_transfer/chunk.cc b/pw_transfer/chunk.cc
index 86c4a35fb..7066e251e 100644
--- a/pw_transfer/chunk.cc
+++ b/pw_transfer/chunk.cc
@@ -22,6 +22,28 @@ namespace pw::transfer::internal {
namespace ProtoChunk = transfer::Chunk;
+Result<uint32_t> ExtractTransferId(ConstByteSpan message) {
+ protobuf::Decoder decoder(message);
+
+ while (decoder.Next().ok()) {
+ ProtoChunk::Fields field =
+ static_cast<ProtoChunk::Fields>(decoder.FieldNumber());
+
+ switch (field) {
+ case ProtoChunk::Fields::TRANSFER_ID: {
+ uint32_t transfer_id;
+ PW_TRY(decoder.ReadUint32(&transfer_id));
+ return transfer_id;
+ }
+
+ default:
+ continue;
+ }
+ }
+
+ return Status::DataLoss();
+}
+
Status DecodeChunk(ConstByteSpan message, Chunk& chunk) {
protobuf::Decoder decoder(message);
Status status;
diff --git a/pw_transfer/client.cc b/pw_transfer/client.cc
index 7b1ed0156..73822c246 100644
--- a/pw_transfer/client.cc
+++ b/pw_transfer/client.cc
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -16,14 +16,7 @@
#include "pw_transfer/client.h"
-#include <algorithm>
-#include <cstring>
-#include <mutex>
-
-#include "pw_assert/check.h"
#include "pw_log/log.h"
-#include "pw_transfer/internal/chunk.h"
-#include "pw_transfer/transfer.pwpb.h"
namespace pw::transfer {
@@ -35,13 +28,27 @@ Status Client::Read(uint32_t transfer_id,
return Status::InvalidArgument();
}
- if (!read_stream_.active()) {
- read_stream_ =
- client_.Read([this](ConstByteSpan chunk) { OnChunk(chunk, kRead); });
+ if (!has_read_stream_) {
+ rpc::RawClientReaderWriter read_stream = client_.Read(
+ [this](ConstByteSpan chunk) {
+ transfer_thread_.ProcessClientChunk(chunk);
+ },
+ [this](Status status) {
+ OnRpcError(status, internal::TransferType::kReceive);
+ });
+ transfer_thread_.SetClientReadStream(read_stream);
+ has_read_stream_ = true;
}
- return StartNewTransfer(
- transfer_id, kRead, output, std::move(on_completion), timeout);
+ transfer_thread_.StartClientTransfer(internal::TransferType::kReceive,
+ transfer_id,
+ transfer_id,
+ &output,
+ max_parameters_,
+ std::move(on_completion),
+ timeout,
+ cfg::kDefaultMaxRetries);
+ return OkStatus();
}
Status Client::Write(uint32_t transfer_id,
@@ -52,138 +59,42 @@ Status Client::Write(uint32_t transfer_id,
return Status::InvalidArgument();
}
- if (!write_stream_.active()) {
- write_stream_ =
- client_.Write([this](ConstByteSpan chunk) { OnChunk(chunk, kWrite); });
+ if (!has_write_stream_) {
+ rpc::RawClientReaderWriter write_stream = client_.Write(
+ [this](ConstByteSpan chunk) {
+ transfer_thread_.ProcessClientChunk(chunk);
+ },
+ [this](Status status) {
+ OnRpcError(status, internal::TransferType::kTransmit);
+ });
+ transfer_thread_.SetClientWriteStream(write_stream);
+ has_write_stream_ = true;
}
- return StartNewTransfer(
- transfer_id, kWrite, input, std::move(on_completion), timeout);
-}
-
-Status Client::StartNewTransfer(uint32_t transfer_id,
- Type type,
- stream::Stream& stream,
- CompletionFunc&& on_completion,
- chrono::SystemClock::duration timeout) {
- std::lock_guard lock(transfer_context_mutex_);
- ClientContext* context = nullptr;
-
- // Check the transfer ID is already being used. If not, find an available
- // transfer slot.
- for (ClientContext& ctx : transfer_contexts_) {
- if (ctx.active()) {
- if (ctx.transfer_id() == transfer_id) {
- return Status::AlreadyExists();
- }
- } else {
- context = &ctx;
- }
- }
-
- if (context == nullptr) {
- return Status::ResourceExhausted();
- }
+ transfer_thread_.StartClientTransfer(internal::TransferType::kTransmit,
+ transfer_id,
+ transfer_id,
+ &input,
+ max_parameters_,
+ std::move(on_completion),
+ timeout,
+ cfg::kDefaultMaxRetries);
- if (type == kWrite) {
- PW_LOG_DEBUG("Starting new write transfer %u",
- static_cast<unsigned>(transfer_id));
- context->StartWrite(*this,
- transfer_id,
- work_queue_,
- encoding_buffer_,
- static_cast<stream::Reader&>(stream),
- write_stream_,
- std::move(on_completion),
- timeout);
- } else {
- PW_LOG_DEBUG("Starting new read transfer %u",
- static_cast<unsigned>(transfer_id));
- context->StartRead(*this,
- transfer_id,
- work_queue_,
- encoding_buffer_,
- static_cast<stream::Writer&>(stream),
- read_stream_,
- std::move(on_completion),
- timeout);
- }
-
- return context->InitiateTransfer(max_parameters_);
+ return OkStatus();
}
-// TODO(pwbug/592): This function should be updated to only return active
-// transfers. Calling ReadChunkData() / Finish() on inactive transfers is
-// unintuitive and has led to several bugs where not all cases are handled.
-Client::ClientContext* Client::GetTransferById(uint32_t transfer_id) {
- std::lock_guard lock(transfer_context_mutex_);
- auto it =
- std::find_if(transfer_contexts_.begin(),
- transfer_contexts_.end(),
- [&transfer_id](ClientContext& c) {
- return c.initialized() && c.transfer_id() == transfer_id;
- });
+void Client::OnRpcError(Status status, internal::TransferType type) {
+ bool is_write_error = type == internal::TransferType::kTransmit;
- if (it == transfer_contexts_.end()) {
- return nullptr;
- }
-
- return it;
-}
+ PW_LOG_ERROR("Client %s stream terminated with status %d",
+ is_write_error ? "Write()" : "Read()",
+ status.code());
-void Client::OnChunk(ConstByteSpan data, Type type) {
- internal::Chunk chunk;
- if (Status status = DecodeChunk(data, chunk); !status.ok()) {
- // TODO(frolv): Handle this error case.
- return;
- }
-
- ClientContext* ctx = GetTransferById(chunk.transfer_id);
- if (ctx == nullptr) {
- // TODO(frolv): Handle this error case.
- return;
- }
-
- if (type == kRead && !ctx->is_read_transfer()) {
- PW_LOG_ERROR(
- "Received a read chunk for transfer %u, but it is a write transfer",
- static_cast<unsigned>(ctx->transfer_id()));
- if (ctx->active()) {
- // TODO(pwbug/592): Remove the active() check.
- ctx->Finish(Status::Internal());
- }
- return;
- }
-
- if (type == kWrite && !ctx->is_write_transfer()) {
- PW_LOG_ERROR(
- "Received a write chunk for transfer %u, but it is a read transfer",
- static_cast<unsigned>(ctx->transfer_id()));
- if (ctx->active()) {
- // TODO(pwbug/592): Remove the active() check.
- ctx->Finish(Status::Internal());
- }
- return;
- }
-
- if (chunk.status.has_value() && ctx->active()) {
- // A status field indicates that the transfer has finished.
- //
- // TODO(frolv): This is invoked from the RPC client thread -- should it be
- // run in the work queue instead?
- //
- // TODO(pwbug/592): Remove the active() check.
- ctx->Finish(chunk.status.value());
- return;
- }
-
- if (ctx->ReadChunkData(chunk_data_buffer_, max_parameters_, chunk)) {
- // TODO(frolv): This should be run from work_queue_.
- ctx->ProcessChunk(chunk_data_buffer_, max_parameters_);
+ if (is_write_error) {
+ has_write_stream_ = false;
+ } else {
+ has_read_stream_ = false;
}
-
- // TODO(frolv): This silences the compiler. Actually use the work queue.
- static_cast<void>(work_queue_);
}
} // namespace pw::transfer
diff --git a/pw_transfer/client_connection.cc b/pw_transfer/client_connection.cc
deleted file mode 100644
index 901ce6de8..000000000
--- a/pw_transfer/client_connection.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-
-#define PW_LOG_MODULE_NAME "TRN"
-
-#include "pw_transfer/internal/client_connection.h"
-
-#include "pw_log/log.h"
-#include "pw_transfer/internal/chunk.h"
-
-namespace pw::transfer::internal {
-
-void ClientConnection::SendStatusChunk(TransferType type,
- uint32_t transfer_id,
- Status status) {
- internal::Chunk chunk = {};
- chunk.transfer_id = transfer_id;
- chunk.status = status.code();
-
- Result<ConstByteSpan> result = internal::EncodeChunk(chunk, encoding_buffer_);
-
- if (!result.ok()) {
- PW_LOG_ERROR("Failed to encode final chunk for transfer %u",
- static_cast<unsigned>(transfer_id));
- return;
- }
-
- if (!stream(type).Write(result.value()).ok()) {
- PW_LOG_ERROR("Failed to send final chunk for transfer %u",
- static_cast<unsigned>(transfer_id));
- return;
- }
-}
-
-} // namespace pw::transfer::internal
diff --git a/pw_transfer/client_context.cc b/pw_transfer/client_context.cc
index 02efd2cb3..33c1aeb9f 100644
--- a/pw_transfer/client_context.cc
+++ b/pw_transfer/client_context.cc
@@ -18,40 +18,12 @@
namespace pw::transfer::internal {
-void ClientContext::StartRead(Client& client,
- uint32_t transfer_id,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- stream::Writer& writer,
- rpc::RawClientReaderWriter& stream,
- Function<void(Status)>&& on_completion,
- chrono::SystemClock::duration timeout) {
- PW_DCHECK(!active());
- PW_DCHECK(on_completion != nullptr);
-
- client_ = &client;
- on_completion_ = std::move(on_completion);
-
- InitializeForReceive(
- transfer_id, work_queue, encoding_buffer, stream, writer, timeout);
-}
-
-void ClientContext::StartWrite(Client& client,
- uint32_t transfer_id,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- stream::Reader& reader,
- rpc::RawClientReaderWriter& stream,
- Function<void(Status)>&& on_completion,
- chrono::SystemClock::duration timeout) {
- PW_DCHECK(!active());
- PW_DCHECK(on_completion != nullptr);
-
- client_ = &client;
- on_completion_ = std::move(on_completion);
-
- InitializeForTransmit(
- transfer_id, work_queue, encoding_buffer, stream, reader, timeout);
+Status ClientContext::FinalCleanup(Status status) {
+ PW_DASSERT(active());
+ if (on_completion_ != nullptr) {
+ on_completion_(status);
+ }
+ return OkStatus();
}
} // namespace pw::transfer::internal
diff --git a/pw_transfer/client_test.cc b/pw_transfer/client_test.cc
index 2303bb826..cbf82d7ba 100644
--- a/pw_transfer/client_test.cc
+++ b/pw_transfer/client_test.cc
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -20,6 +20,7 @@
#include "pw_assert/check.h"
#include "pw_bytes/array.h"
#include "pw_rpc/raw/client_testing.h"
+#include "pw_rpc/thread_testing.h"
#include "pw_thread/sleep.h"
#include "pw_thread/thread.h"
#include "pw_thread_stl/options.h"
@@ -31,7 +32,12 @@ namespace {
using internal::Chunk;
using pw_rpc::raw::Transfer;
-thread::Options& WorkQueueThreadOptions() {
+using namespace std::chrono_literals;
+
+PW_MODIFY_DIAGNOSTICS_PUSH();
+PW_MODIFY_DIAGNOSTIC(ignored, "-Wmissing-field-initializers");
+
+thread::Options& TransferThreadOptions() {
static thread::stl::Options options;
return options;
}
@@ -39,25 +45,27 @@ thread::Options& WorkQueueThreadOptions() {
class ReadTransfer : public ::testing::Test {
protected:
ReadTransfer(size_t max_bytes_to_receive = 0)
- : client_(context_.client(),
+ : transfer_thread_(chunk_buffer_, encode_buffer_),
+ client_(context_.client(),
context_.channel().id(),
- work_queue_,
- data_buffer_,
+ transfer_thread_,
max_bytes_to_receive),
- work_queue_thread_(WorkQueueThreadOptions(), work_queue_) {}
+ system_thread_(TransferThreadOptions(), transfer_thread_) {}
~ReadTransfer() {
- work_queue_.RequestStop();
- work_queue_thread_.join();
+ transfer_thread_.Terminate();
+ system_thread_.join();
}
rpc::RawClientTestContext<> context_;
+ Thread<1, 1> transfer_thread_;
Client client_;
- std::array<std::byte, 64> data_buffer_;
- work_queue::WorkQueueWithBuffer<4> work_queue_;
- thread::Thread work_queue_thread_;
+ std::array<std::byte, 64> chunk_buffer_;
+ std::array<std::byte, 64> encode_buffer_;
+
+ thread::Thread system_thread_;
};
constexpr auto kData32 = bytes::Initialized<32>([](size_t i) { return i; });
@@ -67,9 +75,12 @@ TEST_F(ReadTransfer, SingleChunk) {
stream::MemoryWriterBuffer<64> writer;
Status transfer_status = Status::Unknown();
- client_.Read(3, writer, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Read(3, writer, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -84,6 +95,8 @@ TEST_F(ReadTransfer, SingleChunk) {
context_.server().SendServerStream<Transfer::Read>(EncodeChunk(
{.transfer_id = 3u, .offset = 0, .data = kData32, .remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 2u);
Chunk c1 = DecodeChunk(payloads[1]);
@@ -100,9 +113,12 @@ TEST_F(ReadTransfer, MultiChunk) {
stream::MemoryWriterBuffer<64> writer;
Status transfer_status = Status::Unknown();
- client_.Read(4, writer, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Read(4, writer, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -118,6 +134,8 @@ TEST_F(ReadTransfer, MultiChunk) {
constexpr ConstByteSpan data(kData32);
context_.server().SendServerStream<Transfer::Read>(
EncodeChunk({.transfer_id = 4u, .offset = 0, .data = data.first(16)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 1u);
context_.server().SendServerStream<Transfer::Read>(
@@ -125,6 +143,8 @@ TEST_F(ReadTransfer, MultiChunk) {
.offset = 16,
.data = data.subspan(16),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 2u);
Chunk c1 = DecodeChunk(payloads[1]);
@@ -145,9 +165,11 @@ TEST_F(ReadTransfer, MultipleTransfers) {
client_.Read(3, writer, [&transfer_status](Status status) {
transfer_status = status;
}));
+ transfer_thread_.WaitUntilEventIsProcessed();
context_.server().SendServerStream<Transfer::Read>(EncodeChunk(
{.transfer_id = 3u, .offset = 0, .data = kData32, .remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(transfer_status, OkStatus());
transfer_status = Status::Unknown();
@@ -156,20 +178,15 @@ TEST_F(ReadTransfer, MultipleTransfers) {
client_.Read(3, writer, [&transfer_status](Status status) {
transfer_status = status;
}));
+ transfer_thread_.WaitUntilEventIsProcessed();
context_.server().SendServerStream<Transfer::Read>(EncodeChunk(
{.transfer_id = 3u, .offset = 0, .data = kData32, .remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_EQ(transfer_status, OkStatus());
}
-TEST_F(ReadTransfer, BusyTransferReturnsAlreadyExists) {
- stream::MemoryWriterBuffer<64> writer;
- ASSERT_EQ(OkStatus(), client_.Read(3, writer, [](Status) {}));
-
- EXPECT_EQ(Status::AlreadyExists(), client_.Read(3, writer, [](Status) {}));
-}
-
class ReadTransferMaxBytes32 : public ReadTransfer {
protected:
ReadTransferMaxBytes32() : ReadTransfer(/*max_bytes_to_receive=*/32) {}
@@ -177,7 +194,8 @@ class ReadTransferMaxBytes32 : public ReadTransfer {
TEST_F(ReadTransferMaxBytes32, SetsPendingBytesFromConstructorArg) {
stream::MemoryWriterBuffer<64> writer;
- client_.Read(5, writer, [](Status) {});
+ EXPECT_EQ(OkStatus(), client_.Read(5, writer, [](Status) {}));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -192,7 +210,8 @@ TEST_F(ReadTransferMaxBytes32, SetsPendingBytesFromConstructorArg) {
TEST_F(ReadTransferMaxBytes32, SetsPendingBytesFromWriterLimit) {
stream::MemoryWriterBuffer<16> small_writer;
- client_.Read(5, small_writer, [](Status) {});
+ EXPECT_EQ(OkStatus(), client_.Read(5, small_writer, [](Status) {}));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -209,9 +228,11 @@ TEST_F(ReadTransferMaxBytes32, MultiParameters) {
stream::MemoryWriterBuffer<64> writer;
Status transfer_status = Status::Unknown();
- client_.Read(6, writer, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Read(6, writer, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -227,6 +248,8 @@ TEST_F(ReadTransferMaxBytes32, MultiParameters) {
constexpr ConstByteSpan data(kData64);
context_.server().SendServerStream<Transfer::Read>(
EncodeChunk({.transfer_id = 6u, .offset = 0, .data = data.first(32)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 2u);
EXPECT_EQ(transfer_status, Status::Unknown());
@@ -241,6 +264,8 @@ TEST_F(ReadTransferMaxBytes32, MultiParameters) {
.offset = 32,
.data = data.subspan(32),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 3u);
Chunk c2 = DecodeChunk(payloads[2]);
@@ -256,9 +281,11 @@ TEST_F(ReadTransfer, UnexpectedOffset) {
stream::MemoryWriterBuffer<64> writer;
Status transfer_status = Status::Unknown();
- client_.Read(7, writer, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Read(7, writer, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -274,6 +301,8 @@ TEST_F(ReadTransfer, UnexpectedOffset) {
constexpr ConstByteSpan data(kData32);
context_.server().SendServerStream<Transfer::Read>(
EncodeChunk({.transfer_id = 7u, .offset = 0, .data = data.first(16)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 1u);
EXPECT_EQ(transfer_status, Status::Unknown());
@@ -283,6 +312,8 @@ TEST_F(ReadTransfer, UnexpectedOffset) {
.offset = 8, // wrong!
.data = data.subspan(16),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 2u);
EXPECT_EQ(transfer_status, Status::Unknown());
@@ -297,6 +328,8 @@ TEST_F(ReadTransfer, UnexpectedOffset) {
.offset = 16,
.data = data.subspan(16),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 3u);
Chunk c2 = DecodeChunk(payloads[2]);
@@ -310,12 +343,14 @@ TEST_F(ReadTransfer, UnexpectedOffset) {
}
TEST_F(ReadTransferMaxBytes32, TooMuchData) {
- stream::MemoryWriterBuffer<64> writer;
+ stream::MemoryWriterBuffer<32> writer;
Status transfer_status = Status::Unknown();
- client_.Read(8, writer, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Read(8, writer, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -341,10 +376,11 @@ TEST_F(ReadTransferMaxBytes32, TooMuchData) {
// pending_bytes == 8, send 16 instead.
context_.server().SendServerStream<Transfer::Read>(EncodeChunk(
{.transfer_id = 8u, .offset = 24, .data = data.subspan(24, 16)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
- ASSERT_EQ(payloads.size(), 2u);
+ ASSERT_EQ(payloads.size(), 4u);
- Chunk c1 = DecodeChunk(payloads[1]);
+ Chunk c1 = DecodeChunk(payloads[3]);
EXPECT_EQ(c1.transfer_id, 8u);
ASSERT_TRUE(c1.status.has_value());
EXPECT_EQ(c1.status.value(), Status::Internal());
@@ -356,9 +392,11 @@ TEST_F(ReadTransfer, ServerError) {
stream::MemoryWriterBuffer<64> writer;
Status transfer_status = Status::Unknown();
- client_.Read(9, writer, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Read(9, writer, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -375,8 +413,9 @@ TEST_F(ReadTransfer, ServerError) {
// transfer.
context_.server().SendServerStream<Transfer::Read>(
EncodeChunk({.transfer_id = 9u, .status = Status::NotFound()}));
- ASSERT_EQ(payloads.size(), 1u);
+ transfer_thread_.WaitUntilEventIsProcessed();
+ ASSERT_EQ(payloads.size(), 1u);
EXPECT_EQ(transfer_status, Status::NotFound());
}
@@ -384,9 +423,11 @@ TEST_F(ReadTransfer, OnlySendsParametersOnceAfterDrop) {
stream::MemoryWriterBuffer<64> writer;
Status transfer_status = Status::Unknown();
- client_.Read(10, writer, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Read(10, writer, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -412,6 +453,7 @@ TEST_F(ReadTransfer, OnlySendsParametersOnceAfterDrop) {
.offset = offset,
.data = data.subspan(offset, 8)}));
}
+ transfer_thread_.WaitUntilEventIsProcessed();
// Only one parameters update should be sent, with the offset of the initial
// dropped packet.
@@ -428,6 +470,8 @@ TEST_F(ReadTransfer, OnlySendsParametersOnceAfterDrop) {
.offset = 8,
.data = data.subspan(8, 56),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 3u);
Chunk c2 = DecodeChunk(payloads[2]);
@@ -442,9 +486,11 @@ TEST_F(ReadTransfer, ResendsParametersIfSentRepeatedChunkDuringRecovery) {
stream::MemoryWriterBuffer<64> writer;
Status transfer_status = Status::Unknown();
- client_.Read(11, writer, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Read(11, writer, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -470,6 +516,7 @@ TEST_F(ReadTransfer, ResendsParametersIfSentRepeatedChunkDuringRecovery) {
.offset = offset,
.data = data.subspan(offset, 8)}));
}
+ transfer_thread_.WaitUntilEventIsProcessed();
// Only one parameters update should be sent, with the offset of the initial
// dropped packet.
@@ -480,6 +527,7 @@ TEST_F(ReadTransfer, ResendsParametersIfSentRepeatedChunkDuringRecovery) {
// Re-send the final chunk of the block.
context_.server().SendServerStream<Transfer::Read>(EncodeChunk(last_chunk));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The original drop parameters should be re-sent.
ASSERT_EQ(payloads.size(), 3u);
@@ -490,6 +538,8 @@ TEST_F(ReadTransfer, ResendsParametersIfSentRepeatedChunkDuringRecovery) {
// Do it again.
context_.server().SendServerStream<Transfer::Read>(EncodeChunk(last_chunk));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 4u);
Chunk c3 = DecodeChunk(payloads[3]);
EXPECT_EQ(c3.transfer_id, 11u);
@@ -502,6 +552,8 @@ TEST_F(ReadTransfer, ResendsParametersIfSentRepeatedChunkDuringRecovery) {
.offset = 8,
.data = data.subspan(8, 56),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 5u);
Chunk c4 = DecodeChunk(payloads[4]);
@@ -514,19 +566,19 @@ TEST_F(ReadTransfer, ResendsParametersIfSentRepeatedChunkDuringRecovery) {
constexpr chrono::SystemClock::duration kTestTimeout =
std::chrono::milliseconds(50);
-constexpr chrono::SystemClock::duration kWaitForTimeout =
- kTestTimeout + std::chrono::milliseconds(10);
constexpr uint8_t kTestRetries = 3;
TEST_F(ReadTransfer, Timeout_ResendsCurrentParameters) {
stream::MemoryWriterBuffer<64> writer;
Status transfer_status = Status::Unknown();
- client_.Read(
- 12,
- writer,
- [&transfer_status](Status status) { transfer_status = status; },
- kTestTimeout);
+ ASSERT_EQ(OkStatus(),
+ client_.Read(
+ 12,
+ writer,
+ [&transfer_status](Status status) { transfer_status = status; },
+ kTestTimeout));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -541,7 +593,7 @@ TEST_F(ReadTransfer, Timeout_ResendsCurrentParameters) {
// Wait for the timeout to expire without doing anything. The client should
// resend its parameters chunk.
- this_thread::sleep_for(kWaitForTimeout);
+ transfer_thread_.SimulateClientTimeout(12);
ASSERT_EQ(payloads.size(), 2u);
Chunk c = DecodeChunk(payloads.back());
@@ -558,6 +610,8 @@ TEST_F(ReadTransfer, Timeout_ResendsCurrentParameters) {
.offset = 0,
.data = kData32,
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 3u);
Chunk c4 = DecodeChunk(payloads.back());
@@ -572,11 +626,13 @@ TEST_F(ReadTransfer, Timeout_ResendsUpdatedParameters) {
stream::MemoryWriterBuffer<64> writer;
Status transfer_status = Status::Unknown();
- client_.Read(
- 13,
- writer,
- [&transfer_status](Status status) { transfer_status = status; },
- kTestTimeout);
+ ASSERT_EQ(OkStatus(),
+ client_.Read(
+ 13,
+ writer,
+ [&transfer_status](Status status) { transfer_status = status; },
+ kTestTimeout));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -594,11 +650,13 @@ TEST_F(ReadTransfer, Timeout_ResendsUpdatedParameters) {
// Send some data, but not everything.
context_.server().SendServerStream<Transfer::Read>(
EncodeChunk({.transfer_id = 13u, .offset = 0, .data = data.first(16)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 1u);
// Wait for the timeout to expire without sending more data. The client should
// send an updated parameters chunk, accounting for the data already received.
- this_thread::sleep_for(kWaitForTimeout);
+ transfer_thread_.SimulateClientTimeout(13);
ASSERT_EQ(payloads.size(), 2u);
Chunk c = DecodeChunk(payloads.back());
@@ -615,6 +673,8 @@ TEST_F(ReadTransfer, Timeout_ResendsUpdatedParameters) {
.offset = 16,
.data = data.subspan(16),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 3u);
Chunk c4 = DecodeChunk(payloads.back());
@@ -629,11 +689,13 @@ TEST_F(ReadTransfer, Timeout_EndsTransferAfterMaxRetries) {
stream::MemoryWriterBuffer<64> writer;
Status transfer_status = Status::Unknown();
- client_.Read(
- 14,
- writer,
- [&transfer_status](Status status) { transfer_status = status; },
- kTestTimeout);
+ ASSERT_EQ(OkStatus(),
+ client_.Read(
+ 14,
+ writer,
+ [&transfer_status](Status status) { transfer_status = status; },
+ kTestTimeout));
+ transfer_thread_.WaitUntilEventIsProcessed();
// First transfer parameters chunk is sent.
rpc::PayloadsView payloads =
@@ -649,7 +711,7 @@ TEST_F(ReadTransfer, Timeout_EndsTransferAfterMaxRetries) {
for (unsigned retry = 1; retry <= kTestRetries; ++retry) {
// Wait for the timeout to expire without doing anything. The client should
// resend its parameters chunk.
- this_thread::sleep_for(kWaitForTimeout);
+ transfer_thread_.SimulateClientTimeout(14);
ASSERT_EQ(payloads.size(), retry + 1);
Chunk c = DecodeChunk(payloads.back());
@@ -663,7 +725,7 @@ TEST_F(ReadTransfer, Timeout_EndsTransferAfterMaxRetries) {
// Sleep one more time after the final retry. The client should cancel the
// transfer at this point and send a DEADLINE_EXCEEDED chunk.
- this_thread::sleep_for(kWaitForTimeout);
+ transfer_thread_.SimulateClientTimeout(14);
ASSERT_EQ(payloads.size(), 5u);
Chunk c4 = DecodeChunk(payloads.back());
@@ -679,36 +741,126 @@ TEST_F(ReadTransfer, Timeout_EndsTransferAfterMaxRetries) {
ASSERT_EQ(payloads.size(), 5u);
}
+TEST_F(ReadTransfer, Timeout_ReceivingDataResetsRetryCount) {
+ stream::MemoryWriterBuffer<64> writer;
+ Status transfer_status = Status::Unknown();
+
+ constexpr ConstByteSpan data(kData32);
+
+ ASSERT_EQ(OkStatus(),
+ client_.Read(
+ 14,
+ writer,
+ [&transfer_status](Status status) { transfer_status = status; },
+ kTestTimeout));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ // First transfer parameters chunk is sent.
+ rpc::PayloadsView payloads =
+ context_.output().payloads<Transfer::Read>(context_.channel().id());
+ ASSERT_EQ(payloads.size(), 1u);
+ EXPECT_EQ(transfer_status, Status::Unknown());
+
+ Chunk c0 = DecodeChunk(payloads.back());
+ EXPECT_EQ(c0.transfer_id, 14u);
+ EXPECT_EQ(c0.offset, 0u);
+ EXPECT_EQ(c0.window_end_offset, 64u);
+
+ // Simulate one less timeout than the maximum amount of retries.
+ for (unsigned retry = 1; retry <= kTestRetries - 1; ++retry) {
+ transfer_thread_.SimulateClientTimeout(14);
+ ASSERT_EQ(payloads.size(), retry + 1);
+
+ Chunk c = DecodeChunk(payloads.back());
+ EXPECT_EQ(c.transfer_id, 14u);
+ EXPECT_EQ(c.offset, 0u);
+ EXPECT_EQ(c.window_end_offset, 64u);
+
+ // Transfer has not yet completed.
+ EXPECT_EQ(transfer_status, Status::Unknown());
+ }
+
+ // Send some data.
+ context_.server().SendServerStream<Transfer::Read>(
+ EncodeChunk({.transfer_id = 14u, .offset = 0, .data = data.first(16)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+ ASSERT_EQ(payloads.size(), 3u);
+
+ // Time out a couple more times. The context's retry count should have been
+ // reset, so it should go through the standard retry flow instead of
+ // terminating the transfer.
+ transfer_thread_.SimulateClientTimeout(14);
+ ASSERT_EQ(payloads.size(), 4u);
+
+ Chunk c = DecodeChunk(payloads.back());
+ EXPECT_FALSE(c.status.has_value());
+ EXPECT_EQ(c.transfer_id, 14u);
+ EXPECT_EQ(c.offset, 16u);
+ EXPECT_EQ(c.window_end_offset, 64u);
+
+ transfer_thread_.SimulateClientTimeout(14);
+ ASSERT_EQ(payloads.size(), 5u);
+
+ c = DecodeChunk(payloads.back());
+ EXPECT_FALSE(c.status.has_value());
+ EXPECT_EQ(c.transfer_id, 14u);
+ EXPECT_EQ(c.offset, 16u);
+ EXPECT_EQ(c.window_end_offset, 64u);
+}
+
+TEST_F(ReadTransfer, InitialPacketFails_OnCompletedCalledWithDataLoss) {
+ stream::MemoryWriterBuffer<64> writer;
+ Status transfer_status = Status::Unknown();
+
+ context_.output().set_send_status(Status::Unauthenticated());
+
+ ASSERT_EQ(OkStatus(),
+ client_.Read(
+ 14,
+ writer,
+ [&transfer_status](Status status) {
+ ASSERT_EQ(transfer_status,
+ Status::Unknown()); // Must only call once
+ transfer_status = status;
+ },
+ kTestTimeout));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ EXPECT_EQ(transfer_status, Status::Internal());
+}
+
class WriteTransfer : public ::testing::Test {
protected:
WriteTransfer()
- : client_(context_.client(),
- context_.channel().id(),
- work_queue_,
- data_buffer_),
- work_queue_thread_(WorkQueueThreadOptions(), work_queue_) {}
+ : transfer_thread_(chunk_buffer_, encode_buffer_),
+ client_(context_.client(), context_.channel().id(), transfer_thread_),
+ system_thread_(TransferThreadOptions(), transfer_thread_) {}
~WriteTransfer() {
- work_queue_.RequestStop();
- work_queue_thread_.join();
+ transfer_thread_.Terminate();
+ system_thread_.join();
}
rpc::RawClientTestContext<> context_;
+ Thread<1, 1> transfer_thread_;
Client client_;
- std::array<std::byte, 64> data_buffer_;
- work_queue::WorkQueueWithBuffer<4> work_queue_;
- thread::Thread work_queue_thread_;
+ std::array<std::byte, 64> chunk_buffer_;
+ std::array<std::byte, 64> encode_buffer_;
+
+ thread::Thread system_thread_;
};
TEST_F(WriteTransfer, SingleChunk) {
stream::MemoryReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(3, reader, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Write(3, reader, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -719,14 +871,16 @@ TEST_F(WriteTransfer, SingleChunk) {
Chunk c0 = DecodeChunk(payloads[0]);
EXPECT_EQ(c0.transfer_id, 3u);
- // Send transfer parameters.
- context_.server().SendServerStream<Transfer::Write>(
- EncodeChunk({.transfer_id = 3,
- .pending_bytes = 64,
- .max_chunk_size_bytes = 32,
- .offset = 0}));
+ // Send transfer parameters. Client should send a data chunk and the final
+ // chunk.
+ rpc::test::WaitForPackets(context_.output(), 2, [this] {
+ context_.server().SendServerStream<Transfer::Write>(
+ EncodeChunk({.transfer_id = 3,
+ .pending_bytes = 64,
+ .max_chunk_size_bytes = 32,
+ .offset = 0}));
+ });
- // Client should send a data chunk and the final chunk.
ASSERT_EQ(payloads.size(), 3u);
Chunk c1 = DecodeChunk(payloads[1]);
@@ -744,6 +898,8 @@ TEST_F(WriteTransfer, SingleChunk) {
// Send the final status chunk to complete the transfer.
context_.server().SendServerStream<Transfer::Write>(
EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_EQ(payloads.size(), 3u);
EXPECT_EQ(transfer_status, OkStatus());
}
@@ -752,9 +908,11 @@ TEST_F(WriteTransfer, MultiChunk) {
stream::MemoryReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(4, reader, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Write(4, reader, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -766,13 +924,16 @@ TEST_F(WriteTransfer, MultiChunk) {
EXPECT_EQ(c0.transfer_id, 4u);
// Send transfer parameters with a chunk size smaller than the data.
- context_.server().SendServerStream<Transfer::Write>(
- EncodeChunk({.transfer_id = 4,
- .pending_bytes = 64,
- .max_chunk_size_bytes = 16,
- .offset = 0}));
// Client should send two data chunks and the final chunk.
+ rpc::test::WaitForPackets(context_.output(), 3, [this] {
+ context_.server().SendServerStream<Transfer::Write>(
+ EncodeChunk({.transfer_id = 4,
+ .pending_bytes = 64,
+ .max_chunk_size_bytes = 16,
+ .offset = 0}));
+ });
+
ASSERT_EQ(payloads.size(), 4u);
Chunk c1 = DecodeChunk(payloads[1]);
@@ -797,6 +958,8 @@ TEST_F(WriteTransfer, MultiChunk) {
// Send the final status chunk to complete the transfer.
context_.server().SendServerStream<Transfer::Write>(
EncodeChunk({.transfer_id = 4, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_EQ(payloads.size(), 4u);
EXPECT_EQ(transfer_status, OkStatus());
}
@@ -805,9 +968,11 @@ TEST_F(WriteTransfer, OutOfOrder_SeekSupported) {
stream::MemoryReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(5, reader, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Write(5, reader, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -819,13 +984,15 @@ TEST_F(WriteTransfer, OutOfOrder_SeekSupported) {
EXPECT_EQ(c0.transfer_id, 5u);
// Send transfer parameters with a nonzero offset, requesting a seek.
- context_.server().SendServerStream<Transfer::Write>(
- EncodeChunk({.transfer_id = 5,
- .pending_bytes = 64,
- .max_chunk_size_bytes = 32,
- .offset = 16}));
-
// Client should send a data chunk and the final chunk.
+ rpc::test::WaitForPackets(context_.output(), 2, [this] {
+ context_.server().SendServerStream<Transfer::Write>(
+ EncodeChunk({.transfer_id = 5,
+ .pending_bytes = 64,
+ .max_chunk_size_bytes = 32,
+ .offset = 16}));
+ });
+
ASSERT_EQ(payloads.size(), 3u);
Chunk c1 = DecodeChunk(payloads[1]);
@@ -845,6 +1012,8 @@ TEST_F(WriteTransfer, OutOfOrder_SeekSupported) {
// Send the final status chunk to complete the transfer.
context_.server().SendServerStream<Transfer::Write>(
EncodeChunk({.transfer_id = 5, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_EQ(payloads.size(), 3u);
EXPECT_EQ(transfer_status, OkStatus());
}
@@ -874,9 +1043,11 @@ TEST_F(WriteTransfer, OutOfOrder_SeekNotSupported) {
FakeNonSeekableReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(6, reader, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Write(6, reader, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -893,6 +1064,7 @@ TEST_F(WriteTransfer, OutOfOrder_SeekNotSupported) {
.pending_bytes = 64,
.max_chunk_size_bytes = 32,
.offset = 16}));
+ transfer_thread_.WaitUntilEventIsProcessed();
// Client should send a status chunk and end the transfer.
ASSERT_EQ(payloads.size(), 2u);
@@ -909,9 +1081,11 @@ TEST_F(WriteTransfer, ServerError) {
stream::MemoryReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(7, reader, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Write(7, reader, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -925,6 +1099,7 @@ TEST_F(WriteTransfer, ServerError) {
// Send an error from the server.
context_.server().SendServerStream<Transfer::Write>(
EncodeChunk({.transfer_id = 7, .status = Status::NotFound()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
// Client should not respond and terminate the transfer.
EXPECT_EQ(payloads.size(), 1u);
@@ -935,9 +1110,11 @@ TEST_F(WriteTransfer, MalformedParametersChunk) {
stream::MemoryReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(8, reader, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Write(8, reader, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -951,6 +1128,7 @@ TEST_F(WriteTransfer, MalformedParametersChunk) {
// Send an invalid transfer parameters chunk without pending_bytes.
context_.server().SendServerStream<Transfer::Write>(
EncodeChunk({.transfer_id = 8, .max_chunk_size_bytes = 32}));
+ transfer_thread_.WaitUntilEventIsProcessed();
// Client should send a status chunk and end the transfer.
ASSERT_EQ(payloads.size(), 2u);
@@ -967,9 +1145,11 @@ TEST_F(WriteTransfer, AbortIfZeroBytesAreRequested) {
stream::MemoryReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(9, reader, [&transfer_status](Status status) {
- transfer_status = status;
- });
+ ASSERT_EQ(OkStatus(),
+ client_.Write(9, reader, [&transfer_status](Status status) {
+ transfer_status = status;
+ }));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -983,6 +1163,7 @@ TEST_F(WriteTransfer, AbortIfZeroBytesAreRequested) {
// Send an invalid transfer parameters chunk with 0 pending_bytes.
context_.server().SendServerStream<Transfer::Write>(EncodeChunk(
{.transfer_id = 9, .pending_bytes = 0, .max_chunk_size_bytes = 32}));
+ transfer_thread_.WaitUntilEventIsProcessed();
// Client should send a status chunk and end the transfer.
ASSERT_EQ(payloads.size(), 2u);
@@ -990,20 +1171,22 @@ TEST_F(WriteTransfer, AbortIfZeroBytesAreRequested) {
Chunk c1 = DecodeChunk(payloads[1]);
EXPECT_EQ(c1.transfer_id, 9u);
ASSERT_TRUE(c1.status.has_value());
- EXPECT_EQ(c1.status.value(), Status::Internal());
+ EXPECT_EQ(c1.status.value(), Status::ResourceExhausted());
- EXPECT_EQ(transfer_status, Status::Internal());
+ EXPECT_EQ(transfer_status, Status::ResourceExhausted());
}
TEST_F(WriteTransfer, Timeout_RetriesWithInitialChunk) {
stream::MemoryReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(
- 10,
- reader,
- [&transfer_status](Status status) { transfer_status = status; },
- kTestTimeout);
+ ASSERT_EQ(OkStatus(),
+ client_.Write(
+ 10,
+ reader,
+ [&transfer_status](Status status) { transfer_status = status; },
+ kTestTimeout));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -1016,7 +1199,7 @@ TEST_F(WriteTransfer, Timeout_RetriesWithInitialChunk) {
// Wait for the timeout to expire without doing anything. The client should
// resend the initial transmit chunk.
- this_thread::sleep_for(kWaitForTimeout);
+ transfer_thread_.SimulateClientTimeout(10);
ASSERT_EQ(payloads.size(), 2u);
Chunk c = DecodeChunk(payloads.back());
@@ -1030,11 +1213,13 @@ TEST_F(WriteTransfer, Timeout_RetriesWithMostRecentChunk) {
stream::MemoryReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(
- 11,
- reader,
- [&transfer_status](Status status) { transfer_status = status; },
- kTestTimeout);
+ ASSERT_EQ(OkStatus(),
+ client_.Write(
+ 11,
+ reader,
+ [&transfer_status](Status status) { transfer_status = status; },
+ kTestTimeout));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -1046,13 +1231,15 @@ TEST_F(WriteTransfer, Timeout_RetriesWithMostRecentChunk) {
EXPECT_EQ(c0.transfer_id, 11u);
// Send the first parameters chunk.
- context_.server().SendServerStream<Transfer::Write>(
- EncodeChunk({.transfer_id = 11,
- .pending_bytes = 16,
- .max_chunk_size_bytes = 8,
- .offset = 0}));
-
+ rpc::test::WaitForPackets(context_.output(), 2, [this] {
+ context_.server().SendServerStream<Transfer::Write>(
+ EncodeChunk({.transfer_id = 11,
+ .pending_bytes = 16,
+ .max_chunk_size_bytes = 8,
+ .offset = 0}));
+ });
ASSERT_EQ(payloads.size(), 3u);
+
EXPECT_EQ(transfer_status, Status::Unknown());
Chunk c1 = DecodeChunk(payloads[1]);
@@ -1071,7 +1258,7 @@ TEST_F(WriteTransfer, Timeout_RetriesWithMostRecentChunk) {
// Wait for the timeout to expire without doing anything. The client should
// resend the most recently sent chunk.
- this_thread::sleep_for(kWaitForTimeout);
+ transfer_thread_.SimulateClientTimeout(11);
ASSERT_EQ(payloads.size(), 4u);
Chunk c3 = DecodeChunk(payloads[3]);
@@ -1088,11 +1275,13 @@ TEST_F(WriteTransfer, Timeout_RetriesWithSingleChunkTransfer) {
stream::MemoryReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(
- 12,
- reader,
- [&transfer_status](Status status) { transfer_status = status; },
- kTestTimeout);
+ ASSERT_EQ(OkStatus(),
+ client_.Write(
+ 12,
+ reader,
+ [&transfer_status](Status status) { transfer_status = status; },
+ kTestTimeout));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -1105,13 +1294,15 @@ TEST_F(WriteTransfer, Timeout_RetriesWithSingleChunkTransfer) {
// Send the first parameters chunk, requesting all the data. The client should
// respond with one data chunk and a remaining_bytes = 0 chunk.
- context_.server().SendServerStream<Transfer::Write>(
- EncodeChunk({.transfer_id = 12,
- .pending_bytes = 64,
- .max_chunk_size_bytes = 64,
- .offset = 0}));
-
+ rpc::test::WaitForPackets(context_.output(), 2, [this] {
+ context_.server().SendServerStream<Transfer::Write>(
+ EncodeChunk({.transfer_id = 12,
+ .pending_bytes = 64,
+ .max_chunk_size_bytes = 64,
+ .offset = 0}));
+ });
ASSERT_EQ(payloads.size(), 3u);
+
EXPECT_EQ(transfer_status, Status::Unknown());
Chunk c1 = DecodeChunk(payloads[1]);
@@ -1127,7 +1318,7 @@ TEST_F(WriteTransfer, Timeout_RetriesWithSingleChunkTransfer) {
// Wait for the timeout to expire without doing anything. The client should
// resend the data chunk.
- this_thread::sleep_for(kWaitForTimeout);
+ transfer_thread_.SimulateClientTimeout(12);
ASSERT_EQ(payloads.size(), 4u);
Chunk c3 = DecodeChunk(payloads[3]);
@@ -1142,6 +1333,8 @@ TEST_F(WriteTransfer, Timeout_RetriesWithSingleChunkTransfer) {
.pending_bytes = 64,
.max_chunk_size_bytes = 64,
.offset = 32}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(payloads.size(), 5u);
Chunk c4 = DecodeChunk(payloads[4]);
@@ -1151,6 +1344,7 @@ TEST_F(WriteTransfer, Timeout_RetriesWithSingleChunkTransfer) {
context_.server().SendServerStream<Transfer::Write>(
EncodeChunk({.transfer_id = 12, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_EQ(transfer_status, OkStatus());
}
@@ -1159,11 +1353,13 @@ TEST_F(WriteTransfer, Timeout_EndsTransferAfterMaxRetries) {
stream::MemoryReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(
- 13,
- reader,
- [&transfer_status](Status status) { transfer_status = status; },
- kTestTimeout);
+ ASSERT_EQ(OkStatus(),
+ client_.Write(
+ 13,
+ reader,
+ [&transfer_status](Status status) { transfer_status = status; },
+ kTestTimeout));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -1177,7 +1373,7 @@ TEST_F(WriteTransfer, Timeout_EndsTransferAfterMaxRetries) {
for (unsigned retry = 1; retry <= kTestRetries; ++retry) {
// Wait for the timeout to expire without doing anything. The client should
// resend the initial transmit chunk.
- this_thread::sleep_for(kWaitForTimeout);
+ transfer_thread_.SimulateClientTimeout(13);
ASSERT_EQ(payloads.size(), retry + 1);
Chunk c = DecodeChunk(payloads.back());
@@ -1189,7 +1385,7 @@ TEST_F(WriteTransfer, Timeout_EndsTransferAfterMaxRetries) {
// Sleep one more time after the final retry. The client should cancel the
// transfer at this point and send a DEADLINE_EXCEEDED chunk.
- this_thread::sleep_for(kWaitForTimeout);
+ transfer_thread_.SimulateClientTimeout(13);
ASSERT_EQ(payloads.size(), 5u);
Chunk c4 = DecodeChunk(payloads.back());
@@ -1209,11 +1405,13 @@ TEST_F(WriteTransfer, Timeout_NonSeekableReaderEndsTransfer) {
FakeNonSeekableReader reader(kData32);
Status transfer_status = Status::Unknown();
- client_.Write(
- 14,
- reader,
- [&transfer_status](Status status) { transfer_status = status; },
- kTestTimeout);
+ ASSERT_EQ(OkStatus(),
+ client_.Write(
+ 14,
+ reader,
+ [&transfer_status](Status status) { transfer_status = status; },
+ kTestTimeout));
+ transfer_thread_.WaitUntilEventIsProcessed();
// The client begins by just sending the transfer ID.
rpc::PayloadsView payloads =
@@ -1225,13 +1423,15 @@ TEST_F(WriteTransfer, Timeout_NonSeekableReaderEndsTransfer) {
EXPECT_EQ(c0.transfer_id, 14u);
// Send the first parameters chunk.
- context_.server().SendServerStream<Transfer::Write>(
- EncodeChunk({.transfer_id = 14,
- .pending_bytes = 16,
- .max_chunk_size_bytes = 8,
- .offset = 0}));
-
+ rpc::test::WaitForPackets(context_.output(), 2, [this] {
+ context_.server().SendServerStream<Transfer::Write>(
+ EncodeChunk({.transfer_id = 14,
+ .pending_bytes = 16,
+ .max_chunk_size_bytes = 8,
+ .offset = 0}));
+ });
ASSERT_EQ(payloads.size(), 3u);
+
EXPECT_EQ(transfer_status, Status::Unknown());
Chunk c1 = DecodeChunk(payloads[1]);
@@ -1250,7 +1450,7 @@ TEST_F(WriteTransfer, Timeout_NonSeekableReaderEndsTransfer) {
// Wait for the timeout to expire without doing anything. The client should
// fail to seek back and end the transfer.
- this_thread::sleep_for(kWaitForTimeout);
+ transfer_thread_.SimulateClientTimeout(14);
ASSERT_EQ(payloads.size(), 4u);
Chunk c3 = DecodeChunk(payloads[3]);
@@ -1261,5 +1461,7 @@ TEST_F(WriteTransfer, Timeout_NonSeekableReaderEndsTransfer) {
EXPECT_EQ(transfer_status, Status::DeadlineExceeded());
}
+PW_MODIFY_DIAGNOSTICS_POP();
+
} // namespace
} // namespace pw::transfer::test
diff --git a/pw_transfer/context.cc b/pw_transfer/context.cc
index 2d1f4e50f..04027036e 100644
--- a/pw_transfer/context.cc
+++ b/pw_transfer/context.cc
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -16,105 +16,289 @@
#include "pw_transfer/internal/context.h"
+#include <chrono>
#include <mutex>
#include "pw_assert/check.h"
+#include "pw_chrono/system_clock.h"
#include "pw_log/log.h"
#include "pw_status/try.h"
#include "pw_transfer/transfer.pwpb.h"
+#include "pw_transfer/transfer_thread.h"
#include "pw_varint/varint.h"
+PW_MODIFY_DIAGNOSTICS_PUSH();
+PW_MODIFY_DIAGNOSTIC(ignored, "-Wmissing-field-initializers");
+
namespace pw::transfer::internal {
-Status Context::InitiateTransfer(const TransferParameters& max_parameters) {
+void Context::HandleEvent(const Event& event) {
+ switch (event.type) {
+ case EventType::kNewClientTransfer:
+ case EventType::kNewServerTransfer: {
+ if (active()) {
+ Finish(Status::Aborted());
+ }
+
+ Initialize(event.new_transfer);
+
+ if (event.type == EventType::kNewClientTransfer) {
+ InitiateTransferAsClient();
+ } else {
+ StartTransferAsServer(event.new_transfer);
+ }
+ return;
+ }
+
+ case EventType::kClientChunk:
+ case EventType::kServerChunk:
+ PW_CHECK(initialized());
+ HandleChunkEvent(event.chunk);
+ return;
+
+ case EventType::kClientTimeout:
+ case EventType::kServerTimeout:
+ HandleTimeout();
+ return;
+
+ case EventType::kSendStatusChunk:
+ case EventType::kSetTransferStream:
+ case EventType::kAddTransferHandler:
+ case EventType::kRemoveTransferHandler:
+ case EventType::kTerminate:
+ // These events are intended for the transfer thread and should never be
+ // forwarded through to a context.
+ PW_CRASH("Transfer context received a transfer thread event");
+ }
+}
+
+void Context::InitiateTransferAsClient() {
PW_DCHECK(active());
- if (type() == kReceive) {
+ SetTimeout(chunk_timeout_);
+
+ if (type() == TransferType::kReceive) {
// A receiver begins a new transfer with a parameters chunk telling the
// transmitter what to send.
- PW_TRY(UpdateAndSendTransferParameters(max_parameters, kRetransmit));
+ UpdateAndSendTransferParameters(TransmitAction::kBegin);
} else {
- PW_TRY(SendInitialTransmitChunk());
+ SendInitialTransmitChunk();
}
- timer_.InvokeAfter(chunk_timeout_);
- return OkStatus();
+ // Don't send an error packet. If the transfer failed to start, then there's
+ // nothing to tell the server about.
}
-bool Context::ReadChunkData(ChunkDataBuffer& buffer,
- const TransferParameters& max_parameters,
- const Chunk& chunk) {
- CancelTimer();
-
- if (type() == kTransmit) {
- return ReadTransmitChunk(max_parameters, chunk);
+void Context::StartTransferAsServer(const NewTransferEvent& new_transfer) {
+ PW_LOG_INFO("Starting transfer %u with handler %u",
+ static_cast<unsigned>(new_transfer.transfer_id),
+ static_cast<unsigned>(new_transfer.handler_id));
+
+ if (const Status status = new_transfer.handler->Prepare(new_transfer.type);
+ !status.ok()) {
+ PW_LOG_WARN("Transfer handler %u prepare failed with status %u",
+ static_cast<unsigned>(new_transfer.handler->id()),
+ status.code());
+ Finish(status.IsPermissionDenied() ? status : Status::DataLoss());
+ // Do not send the final status packet here! On the server, a start event
+ // will immediately be followed by the server chunk event. Sending the final
+ // chunk will be handled then.
+ return;
}
- return ReadReceiveChunk(buffer, max_parameters, chunk);
-}
-void Context::ProcessChunk(ChunkDataBuffer& buffer,
- const TransferParameters& max_parameters) {
- if (type() == kTransmit) {
- ProcessTransmitChunk();
- } else {
- ProcessReceiveChunk(buffer, max_parameters);
- }
+ // Initialize doesn't set the handler since it's specific to server transfers.
+ static_cast<ServerContext&>(*this).set_handler(*new_transfer.handler);
- if (active()) {
- timer_.InvokeAfter(chunk_timeout_);
- }
+ // Server transfers use the stream provided by the handler rather than the
+ // stream included in the NewTransferEvent.
+ stream_ = &new_transfer.handler->stream();
}
-Status Context::SendInitialTransmitChunk() {
+void Context::SendInitialTransmitChunk() {
// A transmitter begins a transfer by just sending its ID.
internal::Chunk chunk = {};
chunk.transfer_id = transfer_id_;
+ chunk.type = Chunk::Type::kTransferStart;
+
+ EncodeAndSendChunk(chunk);
+}
- Result<ConstByteSpan> result = EncodeChunk(chunk, *encoding_buffer_);
- if (!result.ok()) {
- return result.status();
+void Context::SendTransferParameters(TransmitAction action) {
+ internal::Chunk parameters = {
+ .transfer_id = transfer_id_,
+ .window_end_offset = window_end_offset_,
+ .pending_bytes = pending_bytes_,
+ .max_chunk_size_bytes = max_chunk_size_bytes_,
+ .min_delay_microseconds = kDefaultChunkDelayMicroseconds,
+ .offset = offset_,
+ };
+
+ switch (action) {
+ case TransmitAction::kBegin:
+ parameters.type = internal::Chunk::Type::kTransferStart;
+ break;
+ case TransmitAction::kRetransmit:
+ parameters.type = internal::Chunk::Type::kParametersRetransmit;
+ break;
+ case TransmitAction::kExtend:
+ parameters.type = internal::Chunk::Type::kParametersContinue;
+ break;
}
- return rpc_writer_->Write(*result);
+ PW_LOG_DEBUG(
+ "Transfer %u sending transfer parameters: "
+ "offset=%u, window_end_offset=%u, pending_bytes=%u, chunk_size=%u",
+ static_cast<unsigned>(transfer_id_),
+ static_cast<unsigned>(offset_),
+ static_cast<unsigned>(window_end_offset_),
+ static_cast<unsigned>(pending_bytes_),
+ static_cast<unsigned>(max_chunk_size_bytes_));
+
+ EncodeAndSendChunk(parameters);
}
-bool Context::ReadTransmitChunk(const TransferParameters& max_parameters,
- const Chunk& chunk) {
- {
- std::lock_guard lock(state_lock_);
+void Context::EncodeAndSendChunk(const Chunk& chunk) {
+ Result<ConstByteSpan> data =
+ internal::EncodeChunk(chunk, thread_->encode_buffer());
+ if (!data.ok()) {
+ PW_LOG_ERROR("Failed to encode chunk for transfer %u: %d",
+ static_cast<unsigned>(chunk.transfer_id),
+ data.status().code());
+ if (active()) {
+ Finish(Status::Internal());
+ }
+ return;
+ }
- switch (transfer_state_) {
- case TransferState::kInactive:
- PW_CRASH("Never should handle chunk while in kInactive state");
+ if (const Status status = rpc_writer_->Write(*data); !status.ok()) {
+ PW_LOG_ERROR("Failed to write chunk for transfer %u: %d",
+ static_cast<unsigned>(chunk.transfer_id),
+ status.code());
+ if (active()) {
+ Finish(Status::Internal());
+ }
+ return;
+ }
+}
- case TransferState::kRecovery:
- PW_CRASH("Transmit transfer should not enter recovery state.");
+void Context::UpdateAndSendTransferParameters(TransmitAction action) {
+ size_t pending_bytes =
+ std::min(max_parameters_->pending_bytes(),
+ static_cast<uint32_t>(writer().ConservativeWriteLimit()));
- case TransferState::kCompleted:
- // Transfer is not pending; notify the sender.
- SendStatusChunk(Status::FailedPrecondition());
- transfer_state_ = TransferState::kInactive;
- return false;
+ window_size_ = pending_bytes;
+ window_end_offset_ = offset_ + pending_bytes;
+ pending_bytes_ = pending_bytes;
+
+ max_chunk_size_bytes_ = MaxWriteChunkSize(
+ max_parameters_->max_chunk_size_bytes(), rpc_writer_->channel_id());
+
+ PW_LOG_INFO("Transfer rate: %u B/s",
+ static_cast<unsigned>(transfer_rate_.GetRateBytesPerSecond()));
+
+ return SendTransferParameters(action);
+}
+
+void Context::Initialize(const NewTransferEvent& new_transfer) {
+ PW_DCHECK(!active());
+
+ transfer_id_ = new_transfer.transfer_id;
+ flags_ = static_cast<uint8_t>(new_transfer.type);
+ transfer_state_ = TransferState::kWaiting;
+ retries_ = 0;
+ max_retries_ = new_transfer.max_retries;
+
+ rpc_writer_ = new_transfer.rpc_writer;
+ stream_ = new_transfer.stream;
+
+ offset_ = 0;
+ window_size_ = 0;
+ window_end_offset_ = 0;
+ pending_bytes_ = 0;
+ max_chunk_size_bytes_ = new_transfer.max_parameters->max_chunk_size_bytes();
- case TransferState::kData:
- // Continue with reading the chunk.
- break;
+ max_parameters_ = new_transfer.max_parameters;
+ thread_ = new_transfer.transfer_thread;
- case TransferState::kTimedOut:
- // Drop the chunk to let the timeout handler run.
- return false;
+ last_chunk_offset_ = 0;
+ chunk_timeout_ = new_transfer.timeout;
+ interchunk_delay_ = chrono::SystemClock::for_at_least(
+ std::chrono::microseconds(kDefaultChunkDelayMicroseconds));
+ next_timeout_ = kNoTimeout;
+
+ transfer_rate_.Reset();
+}
+
+void Context::HandleChunkEvent(const ChunkEvent& event) {
+ PW_DCHECK(event.transfer_id == transfer_id_);
+
+ Chunk chunk;
+ if (!DecodeChunk(ConstByteSpan(event.data, event.size), chunk).ok()) {
+ return;
+ }
+
+ // Received some data. Reset the retry counter.
+ retries_ = 0;
+
+ if (chunk.status.has_value()) {
+ if (active()) {
+ Finish(chunk.status.value());
+ } else {
+ PW_LOG_DEBUG("Got final status %d for completed transfer %d",
+ static_cast<int>(chunk.status.value().code()),
+ static_cast<int>(transfer_id_));
}
+ return;
}
+ if (type() == TransferType::kTransmit) {
+ HandleTransmitChunk(chunk);
+ } else {
+ HandleReceiveChunk(chunk);
+ }
+}
+
+void Context::HandleTransmitChunk(const Chunk& chunk) {
+ switch (transfer_state_) {
+ case TransferState::kInactive:
+ case TransferState::kRecovery:
+ PW_CRASH("Never should handle chunk while inactive");
+
+ case TransferState::kCompleted:
+ // If the transfer has already completed and another chunk is received,
+ // tell the other end that the transfer is over.
+ //
+ // TODO(frolv): Final status chunks should be ACKed by the other end. When
+ // that is added, this case should be updated to check if the received
+ // chunk is an ACK. If so, the transfer state can be reset to INACTIVE.
+ // Otherwise, the final status should be re-sent.
+ if (!chunk.IsInitialChunk()) {
+ status_ = Status::FailedPrecondition();
+ }
+ SendFinalStatusChunk();
+ return;
+
+ case TransferState::kWaiting:
+ case TransferState::kTransmitting:
+ HandleTransferParametersUpdate(chunk);
+ if (transfer_state_ == TransferState::kCompleted) {
+ SendFinalStatusChunk();
+ }
+ return;
+ }
+}
+
+void Context::HandleTransferParametersUpdate(const Chunk& chunk) {
if (!chunk.pending_bytes.has_value()) {
// Malformed chunk.
- FinishAndSendStatus(Status::InvalidArgument());
- return false;
+ Finish(Status::InvalidArgument());
+ return;
}
bool retransmit = true;
if (chunk.type.has_value()) {
- retransmit = chunk.type == Chunk::Type::kParametersRetransmit;
+ retransmit = chunk.type == Chunk::Type::kParametersRetransmit ||
+ chunk.type == Chunk::Type::kTransferStart;
}
if (retransmit) {
@@ -140,8 +324,8 @@ bool Context::ReadTransmitChunk(const TransferParameters& max_parameters,
seek_status = Status::DataLoss();
}
- FinishAndSendStatus(seek_status);
- return false;
+ Finish(seek_status);
+ return;
}
}
@@ -157,150 +341,29 @@ bool Context::ReadTransmitChunk(const TransferParameters& max_parameters,
if (chunk.max_chunk_size_bytes.has_value()) {
max_chunk_size_bytes_ = std::min(chunk.max_chunk_size_bytes.value(),
- max_parameters.max_chunk_size_bytes());
- }
-
- return true;
-}
-
-void Context::ProcessTransmitChunk() {
- Status status;
-
- while ((status = SendNextDataChunk()).ok()) {
- // Continue until all requested bytes are sent.
- }
-
- // If all bytes are successfully sent, SendNextChunk will return OUT_OF_RANGE.
- if (!status.IsOutOfRange()) {
- FinishAndSendStatus(status);
- }
-}
-
-bool Context::ReadReceiveChunk(ChunkDataBuffer& buffer,
- const TransferParameters& max_parameters,
- const Chunk& chunk) {
- state_lock_.lock();
-
- switch (transfer_state_) {
- case TransferState::kInactive:
- PW_CRASH("Never should handle chunk while in kInactive state");
-
- case TransferState::kTimedOut:
- // Drop the chunk to let the timeout handler run.
- state_lock_.unlock();
- return false;
-
- case TransferState::kCompleted: {
- // If the chunk is a repeat of the final chunk, resend the status chunk,
- // which apparently was lost. Otherwise, send FAILED_PRECONDITION since
- // this is for a non-pending transfer.
- Status response = status_;
- if (!chunk.IsFinalTransmitChunk()) {
- response = Status::FailedPrecondition();
- // Sender should only should retry with final chunk
- transfer_state_ = TransferState::kInactive;
- }
- state_lock_.unlock();
- SendStatusChunk(response);
- return false;
- }
-
- case TransferState::kData:
- state_lock_.unlock();
- if (!HandleDataChunk(buffer, max_parameters, chunk)) {
- return false;
- }
- break;
-
- case TransferState::kRecovery:
- if (chunk.offset != offset_) {
- state_lock_.unlock();
-
- if (last_chunk_offset_ == chunk.offset) {
- PW_LOG_DEBUG(
- "Transfer %u received repeated offset %u; retry detected, "
- "resending transfer parameters",
- static_cast<unsigned>(transfer_id_),
- static_cast<unsigned>(chunk.offset));
- if (!UpdateAndSendTransferParameters(max_parameters, kRetransmit)
- .ok()) {
- return false;
- }
- } else {
- PW_LOG_DEBUG("Transfer %u waiting for offset %u, ignoring %u",
- static_cast<unsigned>(transfer_id_),
- static_cast<unsigned>(offset_),
- static_cast<unsigned>(chunk.offset));
- }
-
- last_chunk_offset_ = chunk.offset;
- return false;
- }
-
- PW_LOG_DEBUG("Transfer %u received expected offset %u, resuming transfer",
- static_cast<unsigned>(transfer_id_),
- static_cast<unsigned>(offset_));
-
- transfer_state_ = TransferState::kData;
- state_lock_.unlock();
-
- if (!HandleDataChunk(buffer, max_parameters, chunk)) {
- return false;
- }
- break;
- }
-
- // Update the last offset seen so that retries can be detected.
- last_chunk_offset_ = chunk.offset;
- return true;
-}
-
-void Context::ProcessReceiveChunk(ChunkDataBuffer& buffer,
- const TransferParameters& max_parameters) {
- // Write staged data from the buffer to the stream.
- if (!buffer.empty()) {
- if (Status status = writer().Write(buffer); !status.ok()) {
- PW_LOG_ERROR(
- "Transfer %u write of %u B chunk failed with status %u; aborting "
- "with DATA_LOSS",
- static_cast<unsigned>(transfer_id_),
- static_cast<unsigned>(buffer.size()),
- status.code());
- FinishAndSendStatus(Status::DataLoss());
- return;
- }
+ max_parameters_->max_chunk_size_bytes());
}
- // When the client sets remaining_bytes to 0, it indicates completion of the
- // transfer. Acknowledge the completion through a status chunk and clean up.
- if (buffer.last_chunk()) {
- FinishAndSendStatus(OkStatus());
- return;
+ if (chunk.min_delay_microseconds.has_value()) {
+ interchunk_delay_ = chrono::SystemClock::for_at_least(
+ std::chrono::microseconds(chunk.min_delay_microseconds.value()));
}
- // Update the transfer state.
- offset_ += buffer.size();
- pending_bytes_ -= buffer.size();
+ PW_LOG_DEBUG(
+ "Transfer %u received parameters type=%s offset=%u window_end_offset=%u",
+ static_cast<unsigned>(transfer_id_),
+ retransmit ? "RETRANSMIT" : "CONTINUE",
+ static_cast<unsigned>(chunk.offset),
+ static_cast<unsigned>(window_end_offset_));
- // TODO(frolv): Release the buffer.
+ // Parsed all of the parameters; start sending the window.
+ set_transfer_state(TransferState::kTransmitting);
- // Once the transmitter has sent a sufficient amount of data, try to extend
- // the window to allow it to continue sending data without blocking.
- uint32_t remaining_window_size = window_end_offset_ - offset_;
- bool extend_window = remaining_window_size <=
- window_size_ / TransferParameters::kExtendWindowDivisor;
-
- if (pending_bytes_ == 0u) {
- // First chunk of a receive transfer (transfer_id only). This condition
- // should be updated to explicitly check for the first chunk.
- UpdateAndSendTransferParameters(max_parameters, kRetransmit);
- } else if (extend_window) {
- UpdateAndSendTransferParameters(max_parameters, kExtend);
- }
+ TransmitNextChunk(retransmit);
}
-Status Context::SendNextDataChunk() {
- ByteSpan buffer = *encoding_buffer_;
+void Context::TransmitNextChunk(bool retransmit_requested) {
+ ByteSpan buffer = thread_->encode_buffer();
// Begin by doing a partial encode of all the metadata fields, leaving the
// buffer with usable space for the chunk data at the end.
@@ -308,6 +371,12 @@ Status Context::SendNextDataChunk() {
encoder.WriteTransferId(transfer_id_).IgnoreError();
encoder.WriteOffset(offset_).IgnoreError();
+ // TODO(frolv): Type field presence is currently meaningful, so this type must
+ // be serialized. Once all users of transfer always set chunk types, the field
+ // can be made non-optional and this write can be removed as TRANSFER_DATA has
+ // the default proto value of 0.
+ encoder.WriteType(transfer::Chunk::Type::TRANSFER_DATA).IgnoreError();
+
// Reserve space for the data proto field overhead and use the remainder of
// the buffer for the chunk data.
size_t reserved_size = encoder.size() + 1 /* data key */ + 5 /* data size */;
@@ -326,15 +395,32 @@ Status Context::SendNextDataChunk() {
encoder.WriteRemainingBytes(0).IgnoreError();
window_end_offset_ = offset_;
pending_bytes_ = 0;
+
+ PW_LOG_DEBUG("Transfer %u sending final chunk with remaining_bytes=0",
+ static_cast<unsigned>(transfer_id_));
} else if (data.ok()) {
if (offset_ == window_end_offset_) {
- PW_LOG_DEBUG(
- "Transfer %u is not finished, but the receiver cannot accept any "
- "more data (offset == window_end_offset)",
- static_cast<unsigned>(transfer_id_));
- return Status::ResourceExhausted();
+ if (retransmit_requested) {
+ PW_LOG_DEBUG(
+ "Transfer %u: received an empty retransmit request, but there is "
+ "still data to send; aborting with RESOURCE_EXHAUSTED",
+ id_for_log());
+ Finish(Status::ResourceExhausted());
+ } else {
+ PW_LOG_DEBUG(
+ "Transfer %u: ignoring continuation packet for transfer window "
+ "that has already been sent",
+ id_for_log());
+ SetTimeout(chunk_timeout_);
+ }
+ return; // No data was requested, so there is nothing else to do.
}
+ PW_LOG_DEBUG("Transfer %u sending chunk offset=%u size=%u",
+ static_cast<unsigned>(transfer_id_),
+ static_cast<unsigned>(offset_),
+ static_cast<unsigned>(data.value().size()));
+
encoder.WriteData(data.value()).IgnoreError();
last_chunk_offset_ = offset_;
offset_ += data.value().size();
@@ -343,42 +429,112 @@ Status Context::SendNextDataChunk() {
PW_LOG_ERROR("Transfer %u Read() failed with status %u",
static_cast<unsigned>(transfer_id_),
data.status().code());
- return Status::DataLoss();
+ Finish(Status::DataLoss());
+ return;
}
if (!encoder.status().ok()) {
PW_LOG_ERROR("Transfer %u failed to encode transmit chunk",
static_cast<unsigned>(transfer_id_));
- return Status::Internal();
+ Finish(Status::Internal());
+ return;
}
if (const Status status = rpc_writer_->Write(encoder); !status.ok()) {
PW_LOG_ERROR("Transfer %u failed to send transmit chunk, status %u",
static_cast<unsigned>(transfer_id_),
status.code());
- return Status::DataLoss();
+ Finish(Status::DataLoss());
+ return;
}
flags_ |= kFlagsDataSent;
if (offset_ == window_end_offset_) {
- return Status::OutOfRange();
+ // Sent all requested data. Must now wait for next parameters from the
+ // receiver.
+ set_transfer_state(TransferState::kWaiting);
+ SetTimeout(chunk_timeout_);
+ } else {
+ // More data is to be sent. Set a timeout to send the next chunk following
+ // the chunk delay.
+ SetTimeout(chrono::SystemClock::for_at_least(interchunk_delay_));
}
+}
- return data.status();
+void Context::HandleReceiveChunk(const Chunk& chunk) {
+ switch (transfer_state_) {
+ case TransferState::kInactive:
+ PW_CRASH("Never should handle chunk while inactive");
+
+ case TransferState::kTransmitting:
+ PW_CRASH("Receive transfer somehow entered TRANSMITTING state");
+
+ case TransferState::kCompleted:
+ // If the transfer has already completed and another chunk is received,
+ // re-send the final status chunk.
+ //
+ // TODO(frolv): Final status chunks should be ACKed by the other end. When
+ // that is added, this case should be updated to check if the received
+ // chunk is an ACK. If so, the transfer state can be reset to INACTIVE.
+ // Otherwise, the final status should be re-sent.
+ SendFinalStatusChunk();
+ return;
+
+ case TransferState::kRecovery:
+ if (chunk.offset != offset_) {
+ if (last_chunk_offset_ == chunk.offset) {
+ PW_LOG_DEBUG(
+ "Transfer %u received repeated offset %u; retry detected, "
+ "resending transfer parameters",
+ static_cast<unsigned>(transfer_id_),
+ static_cast<unsigned>(chunk.offset));
+
+ UpdateAndSendTransferParameters(TransmitAction::kRetransmit);
+ if (transfer_state_ == TransferState::kCompleted) {
+ SendFinalStatusChunk();
+ return;
+ }
+ PW_LOG_DEBUG("Transfer %u waiting for offset %u, ignoring %u",
+ static_cast<unsigned>(transfer_id_),
+ static_cast<unsigned>(offset_),
+ static_cast<unsigned>(chunk.offset));
+ }
+
+ last_chunk_offset_ = chunk.offset;
+ SetTimeout(chunk_timeout_);
+ return;
+ }
+
+ PW_LOG_DEBUG("Transfer %u received expected offset %u, resuming transfer",
+ static_cast<unsigned>(transfer_id_),
+ static_cast<unsigned>(offset_));
+ set_transfer_state(TransferState::kWaiting);
+
+ // The correct chunk was received; process it normally.
+ [[fallthrough]];
+ case TransferState::kWaiting:
+ HandleReceivedData(chunk);
+ if (transfer_state_ == TransferState::kCompleted) {
+ SendFinalStatusChunk();
+ }
+ return;
+ }
}
-bool Context::HandleDataChunk(ChunkDataBuffer& buffer,
- const TransferParameters& max_parameters,
- const Chunk& chunk) {
+void Context::HandleReceivedData(const Chunk& chunk) {
if (chunk.data.size() > pending_bytes_) {
- // End the transfer, as this indcates a bug with the client implementation
+ // End the transfer, as this indicates a bug with the client implementation
// where it doesn't respect pending_bytes. Trying to recover from here
// could potentially result in an infinite transfer loop.
PW_LOG_ERROR(
- "Received more data than what was requested; terminating transfer.");
- FinishAndSendStatus(Status::Internal());
- return false;
+ "Transfer %u received more data than what was requested (%u received "
+ "for %u pending); terminating transfer.",
+ id_for_log(),
+ static_cast<unsigned>(chunk.data.size()),
+ static_cast<unsigned>(pending_bytes_));
+ Finish(Status::Internal());
+ return;
}
if (chunk.offset != offset_) {
@@ -388,204 +544,180 @@ bool Context::HandleDataChunk(ChunkDataBuffer& buffer,
static_cast<unsigned>(transfer_id_),
static_cast<unsigned>(offset_),
static_cast<unsigned>(chunk.offset));
- UpdateAndSendTransferParameters(max_parameters, kRetransmit);
+
set_transfer_state(TransferState::kRecovery);
+ SetTimeout(chunk_timeout_);
- // Return false as there is no immediate deferred work to complete. The
- // transfer must wait for the next data chunk to be sent by the transmitter.
- return false;
+ UpdateAndSendTransferParameters(TransmitAction::kRetransmit);
+ return;
}
- // Write the chunk data to the buffer to be processed later. If the chunk has
- // no data, this will clear the buffer.
- buffer.Write(chunk.data, chunk.IsFinalTransmitChunk());
- return true;
-}
-
-Status Context::SendTransferParameters(TransmitAction action) {
- const internal::Chunk parameters = {
- .transfer_id = transfer_id_,
- .window_end_offset = window_end_offset_,
- .pending_bytes = pending_bytes_,
- .max_chunk_size_bytes = max_chunk_size_bytes_,
- .min_delay_microseconds = kDefaultChunkDelayMicroseconds,
- .offset = offset_,
- .type = action == kRetransmit
- ? internal::Chunk::Type::kParametersRetransmit
- : internal::Chunk::Type::kParametersContinue,
- };
+ // Update the last offset seen so that retries can be detected.
+ last_chunk_offset_ = chunk.offset;
- PW_LOG_DEBUG(
- "Transfer %u sending transfer parameters: "
- "offset=%u, window_end_offset=%u, pending_bytes=%u, chunk_size=%u",
- static_cast<unsigned>(transfer_id_),
- static_cast<unsigned>(offset_),
- static_cast<unsigned>(window_end_offset_),
- static_cast<unsigned>(pending_bytes_),
- static_cast<unsigned>(max_chunk_size_bytes_));
+ // Write staged data from the buffer to the stream.
+ if (!chunk.data.empty()) {
+ if (Status status = writer().Write(chunk.data); !status.ok()) {
+ PW_LOG_ERROR(
+ "Transfer %u write of %u B chunk failed with status %u; aborting "
+ "with DATA_LOSS",
+ static_cast<unsigned>(transfer_id_),
+ static_cast<unsigned>(chunk.data.size()),
+ status.code());
+ Finish(Status::DataLoss());
+ return;
+ }
- // If the parameters can't be encoded or sent, it most likely indicates a
- // transport-layer issue, so there isn't much that can be done by the transfer
- // service. The client will time out and can try to restart the transfer.
- Result<ConstByteSpan> data =
- internal::EncodeChunk(parameters, *encoding_buffer_);
- if (!data.ok()) {
- PW_LOG_ERROR("Failed to encode parameters for transfer %u: %d",
- static_cast<unsigned>(parameters.transfer_id),
- data.status().code());
- FinishAndSendStatus(Status::Internal());
- return Status::Internal();
+ transfer_rate_.Update(chunk.data.size());
}
- if (const Status status = rpc_writer_->Write(*data); !status.ok()) {
- PW_LOG_ERROR("Failed to write parameters for transfer %u: %d",
- static_cast<unsigned>(parameters.transfer_id),
- status.code());
- if (on_completion_) {
- on_completion_(*this, Status::Internal());
- }
- return Status::Internal();
+ // When the client sets remaining_bytes to 0, it indicates completion of the
+ // transfer. Acknowledge the completion through a status chunk and clean up.
+ if (chunk.IsFinalTransmitChunk()) {
+ Finish(OkStatus());
+ return;
}
- return OkStatus();
-}
-
-Status Context::UpdateAndSendTransferParameters(
- const TransferParameters& max_parameters, TransmitAction action) {
- size_t pending_bytes =
- std::min(max_parameters.pending_bytes(),
- static_cast<uint32_t>(writer().ConservativeWriteLimit()));
-
- window_size_ = pending_bytes;
- window_end_offset_ = offset_ + pending_bytes;
- pending_bytes_ = pending_bytes;
-
- max_chunk_size_bytes_ = MaxWriteChunkSize(
- max_parameters.max_chunk_size_bytes(), rpc_writer_->channel_id());
+ // Update the transfer state.
+ offset_ += chunk.data.size();
+ pending_bytes_ -= chunk.data.size();
- return SendTransferParameters(action);
-}
+ if (chunk.window_end_offset != 0) {
+ if (chunk.window_end_offset < offset_) {
+ PW_LOG_ERROR(
+ "Transfer %u got invalid end offset of %u (current offset %u)",
+ id_for_log(),
+ static_cast<unsigned>(chunk.window_end_offset),
+ static_cast<unsigned>(offset_));
+ Finish(Status::Internal());
+ return;
+ }
-void Context::Initialize(Type type,
- uint32_t transfer_id,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- rpc::Writer& rpc_writer,
- stream::Stream& stream,
- chrono::SystemClock::duration chunk_timeout,
- uint8_t max_retries) {
- PW_DCHECK(!active());
- PW_CHECK(state_lock_.try_lock());
+ if (chunk.window_end_offset > window_end_offset_) {
+ // A transmitter should never send a larger end offset than what the
+ // receiver has advertised. If this occurs, there is a bug in the
+ // transmitter implementation. Terminate the transfer.
+ PW_LOG_ERROR(
+ "Transfer %u transmitter sent invalid end offset of %u, "
+ "greater than receiver offset %u",
+ id_for_log(),
+ static_cast<unsigned>(chunk.window_end_offset),
+ static_cast<unsigned>(window_end_offset_));
+ Finish(Status::Internal());
+ return;
+ }
- transfer_id_ = transfer_id;
- flags_ = static_cast<uint8_t>(type);
- transfer_state_ = TransferState::kData;
- retries_ = 0;
- max_retries_ = max_retries;
+ window_end_offset_ = chunk.window_end_offset;
+ pending_bytes_ = chunk.window_end_offset - offset_;
+ }
- rpc_writer_ = &rpc_writer;
- stream_ = &stream;
+ SetTimeout(chunk_timeout_);
- offset_ = 0;
- window_size_ = 0;
- window_end_offset_ = 0;
- pending_bytes_ = 0;
- max_chunk_size_bytes_ = std::numeric_limits<uint32_t>::max();
+ if (pending_bytes_ == 0u) {
+ // Received all pending data. Advance the transfer parameters.
+ UpdateAndSendTransferParameters(TransmitAction::kRetransmit);
+ return;
+ }
- last_chunk_offset_ = 0;
- chunk_timeout_ = chunk_timeout;
- work_queue_ = &work_queue;
- encoding_buffer_ = &encoding_buffer;
+ // Once the transmitter has sent a sufficient amount of data, try to extend
+ // the window to allow it to continue sending data without blocking.
+ uint32_t remaining_window_size = window_end_offset_ - offset_;
+ bool extend_window = remaining_window_size <=
+ window_size_ / max_parameters_->extend_window_divisor();
- state_lock_.unlock();
+ if (extend_window) {
+ UpdateAndSendTransferParameters(TransmitAction::kExtend);
+ return;
+ }
}
-void Context::SendStatusChunk(Status status) {
+void Context::SendFinalStatusChunk() {
+ PW_DCHECK(transfer_state_ == TransferState::kCompleted);
+
internal::Chunk chunk = {};
chunk.transfer_id = transfer_id_;
- chunk.status = status.code();
-
- Result<ConstByteSpan> result =
- internal::EncodeChunk(chunk, *encoding_buffer_);
+ chunk.status = status_.code();
+ chunk.type = Chunk::Type::kTransferCompletion;
- if (!result.ok()) {
- PW_LOG_ERROR("Failed to encode final chunk for transfer %u",
- static_cast<unsigned>(transfer_id_));
- return;
- }
-
- if (!rpc_writer_->Write(result.value()).ok()) {
- PW_LOG_ERROR("Failed to send final chunk for transfer %u",
- static_cast<unsigned>(transfer_id_));
- return;
- }
+ PW_LOG_DEBUG("Sending final chunk for transfer %u with status %u",
+ static_cast<unsigned>(transfer_id_),
+ status_.code());
+ EncodeAndSendChunk(chunk);
}
-void Context::FinishAndSendStatus(Status status) {
- CancelTimer();
+void Context::Finish(Status status) {
+ PW_DCHECK(active());
- PW_LOG_INFO("Transfer %u completed with status %u; sending final chunk",
+ PW_LOG_INFO("Transfer %u completed with status %u",
static_cast<unsigned>(transfer_id_),
status.code());
- status_ = status;
+ status.Update(FinalCleanup(status));
- if (on_completion_ != nullptr) {
- status.Update(on_completion_(*this, status));
- }
-
- SendStatusChunk(status);
set_transfer_state(TransferState::kCompleted);
+ SetTimeout(kFinalChunkAckTimeout);
+ status_ = status;
}
-void Context::OnTimeout() {
- std::lock_guard lock(state_lock_);
- transfer_state_ = TransferState::kTimedOut;
+void Context::SetTimeout(chrono::SystemClock::duration timeout) {
+ next_timeout_ = chrono::SystemClock::TimePointAfterAtLeast(timeout);
+}
- const Status status = work_queue_->PushWork([this]() {
- HandleTimeout();
- if (active()) {
- timer_.InvokeAfter(chunk_timeout_);
- }
- });
+void Context::HandleTimeout() {
+ ClearTimeout();
- if (!status.ok()) {
- PW_LOG_ERROR("Transfer %u failed to push timeout handler to work queue",
- static_cast<unsigned>(transfer_id_));
+ switch (transfer_state_) {
+ case TransferState::kCompleted:
+ // A timeout occurring in a completed state indicates that the other side
+ // never ACKed the final status packet. Reset the context to inactive.
+ set_transfer_state(TransferState::kInactive);
+ return;
+
+ case TransferState::kTransmitting:
+ // A timeout occurring in a TRANSMITTING state indicates that the transfer
+ // has waited for its inter-chunk delay and should transmit its next
+ // chunk.
+ TransmitNextChunk(/*retransmit_requested=*/false);
+ break;
+
+ case TransferState::kWaiting:
+ case TransferState::kRecovery:
+ // A timeout occurring in a WAITING or RECOVERY state indicates that no
+ // chunk has been received from the other side. The transfer should retry
+ // its previous operation.
+ SetTimeout(chunk_timeout_); // Finish() clears the timeout if retry fails
+ Retry();
+ break;
- // If the work queue is full, there is no way to keep the transfer alive or
- // notify the other end of the failure. Simply end the transfer; if any more
- // chunks are received, an error will be sent then.
- status_ = Status::DeadlineExceeded();
- transfer_state_ = TransferState::kCompleted;
+ case TransferState::kInactive:
+ PW_LOG_ERROR("Timeout occurred in INACTIVE state");
+ return;
}
-}
-void Context::HandleTimeout() {
- state_lock_.lock();
- PW_DCHECK(transfer_state_ == TransferState::kTimedOut);
+ if (transfer_state_ == TransferState::kCompleted) {
+ SendFinalStatusChunk();
+ }
+}
+void Context::Retry() {
if (retries_ == max_retries_) {
PW_LOG_ERROR("Transfer %u failed to receive a chunk after %u retries.",
static_cast<unsigned>(transfer_id_),
static_cast<unsigned>(retries_));
PW_LOG_ERROR("Canceling transfer.");
- state_lock_.unlock();
- FinishAndSendStatus(Status::DeadlineExceeded());
+ Finish(Status::DeadlineExceeded());
return;
}
++retries_;
- transfer_state_ = TransferState::kData;
- state_lock_.unlock();
- if (type() == kReceive) {
- // Resend the most recent transfer parameters. SendTransferParameters()
- // internally handles failures, so its status can be ignored.
+ if (type() == TransferType::kReceive) {
+ // Resend the most recent transfer parameters.
PW_LOG_DEBUG(
"Receive transfer %u timed out waiting for chunk; resending parameters",
static_cast<unsigned>(transfer_id_));
- SendTransferParameters(kRetransmit).IgnoreError();
+
+ SendTransferParameters(TransmitAction::kRetransmit);
return;
}
@@ -605,7 +737,7 @@ void Context::HandleTimeout() {
PW_LOG_ERROR("Transmit transfer %d timed out waiting for new parameters.",
static_cast<unsigned>(transfer_id_));
PW_LOG_ERROR("Retrying requires a seekable reader. Alas, ours is not.");
- FinishAndSendStatus(Status::DeadlineExceeded());
+ Finish(Status::DeadlineExceeded());
return;
}
@@ -614,7 +746,7 @@ void Context::HandleTimeout() {
offset_ = last_chunk_offset_;
pending_bytes_ += last_size_sent;
- ProcessTransmitChunk();
+ TransmitNextChunk(/*retransmit_requested=*/false);
}
uint32_t Context::MaxWriteChunkSize(uint32_t max_chunk_size_bytes,
@@ -670,3 +802,5 @@ uint32_t Context::MaxWriteChunkSize(uint32_t max_chunk_size_bytes,
}
} // namespace pw::transfer::internal
+
+PW_MODIFY_DIAGNOSTICS_POP();
diff --git a/pw_transfer/docs.rst b/pw_transfer/docs.rst
index 229faa9e7..b0a068b58 100644
--- a/pw_transfer/docs.rst
+++ b/pw_transfer/docs.rst
@@ -79,7 +79,30 @@ transfer service using their transfer IDs.
Module Configuration Options
----------------------------
-todo
+The following configurations can be adjusted via compile-time configuration of
+this module, see the
+:ref:`module documentation <module-structure-compile-time-configuration>` for
+more details.
+
+.. c:macro:: PW_TRANSFER_DEFAULT_MAX_RETRIES
+
+ The default maximum number of times a transfer should retry sending a chunk
+ when no response is received. This can later be configured per-transfer.
+
+.. c:macro:: PW_TRANSFER_DEFAULT_TIMEOUT_MS
+
+ The default amount of time, in milliseconds, to wait for a chunk to arrive
+ before retrying. This can later be configured per-transfer.
+
+.. c:macro:: PW_TRANSFER_DEFAULT_EXTEND_WINDOW_DIVISOR
+
+ The fractional position within a window at which a receive transfer should
+ extend its window size to minimize the amount of time the transmitter
+ spends blocked.
+
+ For example, a divisor of 2 will extend the window when half of the
+ requested data has been received, a divisor of three will extend at a third
+ of the window, and so on.
Python
======
diff --git a/pw_transfer/integration_test.cc b/pw_transfer/integration_test.cc
index c328905a9..3f6f829e4 100644
--- a/pw_transfer/integration_test.cc
+++ b/pw_transfer/integration_test.cc
@@ -35,12 +35,11 @@ namespace {
using namespace std::chrono_literals;
-// TODO(hepler): Use more iterations when the pw_transfer synchronization issues
-// that make this flaky are fixed.
-constexpr int kIterations = 1;
+constexpr int kIterations = 5;
constexpr auto kData512 = bytes::Initialized<512>([](size_t i) { return i; });
constexpr auto kData8192 = bytes::Initialized<8192>([](size_t i) { return i; });
+constexpr auto kDataHdlcEscape = bytes::Initialized<8192>(0x7e);
std::filesystem::path directory;
@@ -66,27 +65,31 @@ ConstByteSpan AsByteSpan(const char (&data)[kLengthWithNull]) {
constexpr ConstByteSpan AsByteSpan(ConstByteSpan data) { return data; }
+thread::Options& TransferThreadOptions() {
+ static thread::stl::Options options;
+ return options;
+}
+
// Test fixture for pw_transfer tests. Clears the transfer files before and
// after each test.
class TransferIntegration : public ::testing::Test {
protected:
TransferIntegration()
- : client_(rpc::integration_test::client(),
+ : transfer_thread_(chunk_buffer_, encode_buffer_),
+ system_thread_(TransferThreadOptions(), transfer_thread_),
+ client_(rpc::integration_test::client(),
rpc::integration_test::kChannelId,
- work_queue_,
- client_buffer_,
+ transfer_thread_,
256),
test_server_client_(rpc::integration_test::client(),
- rpc::integration_test::kChannelId),
- work_queue_thread_(kThreadOptions, work_queue_) {
+ rpc::integration_test::kChannelId) {
ClearFiles();
}
~TransferIntegration() {
- work_queue_.RequestStop();
- work_queue_thread_.join();
-
ClearFiles();
+ transfer_thread_.Terminate();
+ system_thread_.join();
}
// Sets the content of a transfer ID and returns a MemoryReader for that data.
@@ -163,18 +166,16 @@ class TransferIntegration : public ::testing::Test {
}
}
- static constexpr thread::stl::Options kThreadOptions;
-
- work_queue::WorkQueueWithBuffer<4> work_queue_;
+ std::byte chunk_buffer_[512];
+ std::byte encode_buffer_[512];
+ transfer::Thread<2, 2> transfer_thread_;
+ thread::Thread system_thread_;
Client client_;
pw_rpc::raw::TestServer::Client test_server_client_;
Status last_status_ = Status::Unknown();
sync::BinarySemaphore completed_;
- std::byte client_buffer_[512];
-
- thread::Thread work_queue_thread_;
};
TEST_F(TransferIntegration, Read_UnknownId) {
@@ -203,6 +204,7 @@ PW_TRANSFER_TEST_READ(SingleByte_1, "\0");
PW_TRANSFER_TEST_READ(SingleByte_2, "?");
PW_TRANSFER_TEST_READ(SmallData, "hunter2");
PW_TRANSFER_TEST_READ(LargeData, kData512);
+PW_TRANSFER_TEST_READ(VeryLargeData, kData8192);
TEST_F(TransferIntegration, Write_UnknownId) {
constexpr std::byte kData[] = {std::byte{0}, std::byte{1}, std::byte{2}};
@@ -233,6 +235,8 @@ PW_TRANSFER_TEST_WRITE(SingleByte_1, "\0");
PW_TRANSFER_TEST_WRITE(SingleByte_2, "?");
PW_TRANSFER_TEST_WRITE(SmallData, "hunter2");
PW_TRANSFER_TEST_WRITE(LargeData, kData512);
+PW_TRANSFER_TEST_WRITE(HdlcEscape, kDataHdlcEscape);
+PW_TRANSFER_TEST_WRITE(VeryLargeData, kData8192);
} // namespace
} // namespace pw::transfer
diff --git a/pw_transfer/public/pw_transfer/client.h b/pw_transfer/public/pw_transfer/client.h
index 27e65ea9b..ae3622392 100644
--- a/pw_transfer/public/pw_transfer/client.h
+++ b/pw_transfer/public/pw_transfer/client.h
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -18,11 +18,10 @@
#include "pw_function/function.h"
#include "pw_status/status.h"
#include "pw_stream/stream.h"
-#include "pw_sync/lock_annotations.h"
-#include "pw_sync/mutex.h"
#include "pw_transfer/internal/client_context.h"
+#include "pw_transfer/internal/config.h"
#include "pw_transfer/transfer.raw_rpc.pb.h"
-#include "pw_work_queue/work_queue.h"
+#include "pw_transfer/transfer_thread.h"
namespace pw::transfer {
@@ -55,15 +54,18 @@ class Client {
// recover.
Client(rpc::Client& rpc_client,
uint32_t channel_id,
- work_queue::WorkQueue& work_queue,
- ByteSpan transfer_data_buffer,
- size_t max_bytes_to_receive = 0)
+ TransferThread& transfer_thread,
+ size_t max_bytes_to_receive = 0,
+ uint32_t extend_window_divisor = cfg::kDefaultExtendWindowDivisor)
: client_(rpc_client, channel_id),
- work_queue_(work_queue),
- max_parameters_(max_bytes_to_receive > 0 ? max_bytes_to_receive
- : transfer_data_buffer.size(),
- transfer_data_buffer.size()),
- chunk_data_buffer_(transfer_data_buffer) {}
+ transfer_thread_(transfer_thread),
+ max_parameters_(max_bytes_to_receive > 0
+ ? max_bytes_to_receive
+ : transfer_thread.max_chunk_size(),
+ transfer_thread.max_chunk_size(),
+ extend_window_divisor),
+ has_read_stream_(false),
+ has_write_stream_(false) {}
// Begins a new read transfer for the given transfer ID. The data read from
// the server is written to the provided writer. Returns OK if the transfer is
@@ -85,39 +87,26 @@ class Client {
CompletionFunc&& on_completion,
chrono::SystemClock::duration timeout = cfg::kDefaultChunkTimeout);
- private:
- using Transfer = pw_rpc::raw::Transfer;
- using ClientContext = internal::ClientContext;
+ Status set_extend_window_divisor(uint32_t extend_window_divisor) {
+ if (extend_window_divisor <= 1) {
+ return Status::InvalidArgument();
+ }
- enum Type : bool { kRead, kWrite };
+ max_parameters_.set_extend_window_divisor(extend_window_divisor);
+ return OkStatus();
+ }
- Status StartNewTransfer(uint32_t transfer_id,
- Type type,
- stream::Stream& stream,
- CompletionFunc&& on_completion,
- chrono::SystemClock::duration timeout);
-
- ClientContext* GetTransferById(uint32_t transfer_id);
+ private:
+ using Transfer = pw_rpc::raw::Transfer;
- // Function called when a chunk is received, from the context of the RPC
- // client thread.
- void OnChunk(ConstByteSpan data, Type type);
+ void OnRpcError(Status status, internal::TransferType type);
Transfer::Client client_;
- work_queue::WorkQueue& work_queue_;
-
- rpc::RawClientReaderWriter read_stream_;
- rpc::RawClientReaderWriter write_stream_;
-
- // TODO(frolv): Make this size configurable.
- std::array<ClientContext, 1> transfer_contexts_
- PW_GUARDED_BY(transfer_context_mutex_);
- sync::Mutex transfer_context_mutex_;
-
+ TransferThread& transfer_thread_;
internal::TransferParameters max_parameters_;
- internal::ChunkDataBuffer chunk_data_buffer_;
- internal::EncodingBuffer encoding_buffer_;
+ bool has_read_stream_;
+ bool has_write_stream_;
};
} // namespace pw::transfer
diff --git a/pw_transfer/public/pw_transfer/handler.h b/pw_transfer/public/pw_transfer/handler.h
index 00e10a9de..9680fb5e4 100644
--- a/pw_transfer/public/pw_transfer/handler.h
+++ b/pw_transfer/public/pw_transfer/handler.h
@@ -17,7 +17,7 @@
#include "pw_containers/intrusive_list.h"
#include "pw_status/status.h"
#include "pw_stream/stream.h"
-#include "pw_transfer/internal/client_connection.h"
+#include "pw_transfer/internal/event.h"
namespace pw::transfer {
namespace internal {
@@ -70,25 +70,20 @@ class Handler : public IntrusiveList<Handler>::Item {
void set_writer(stream::Writer& writer) { writer_ = &writer; }
private:
- friend class ServerContext;
+ friend class Context;
// Prepares for either a read or write transfer.
Status Prepare(internal::TransferType type) {
- return type == internal::kRead ? PrepareRead() : PrepareWrite();
+ return type == internal::TransferType::kTransmit ? PrepareRead()
+ : PrepareWrite();
}
- // Only valid after a PrepareRead() call that returns OK.
- stream::Reader& reader() const {
+ // Only valid after a PrepareRead() or PrepareWrite() call that returns OK.
+ stream::Stream& stream() const {
PW_DASSERT(reader_ != nullptr);
return *reader_;
}
- // Only valid after a PrepareWrite() call that returns OK.
- stream::Writer& writer() const {
- PW_DASSERT(writer_ != nullptr);
- return *writer_;
- }
-
uint32_t transfer_id_;
// Use a union to support constexpr construction.
@@ -116,6 +111,9 @@ class ReadOnlyHandler : public internal::Handler {
Status PrepareWrite() final { return Status::PermissionDenied(); }
using internal::Handler::set_reader;
+
+ private:
+ using internal::Handler::set_writer;
};
class WriteOnlyHandler : public internal::Handler {
@@ -134,6 +132,9 @@ class WriteOnlyHandler : public internal::Handler {
Status PrepareWrite() override { return OkStatus(); }
using internal::Handler::set_writer;
+
+ private:
+ using internal::Handler::set_reader;
};
class ReadWriteHandler : public internal::Handler {
diff --git a/pw_transfer/public/pw_transfer/internal/chunk.h b/pw_transfer/public/pw_transfer/internal/chunk.h
index 56f475daf..b20acf44f 100644
--- a/pw_transfer/public/pw_transfer/internal/chunk.h
+++ b/pw_transfer/public/pw_transfer/internal/chunk.h
@@ -28,13 +28,17 @@ struct Chunk {
kTransferStart = 1,
kParametersRetransmit = 2,
kParametersContinue = 3,
+ kTransferCompletion = 4,
+ kTransferCompletionAck = 5, // Currently unused.
};
// The initial chunk always has an offset of 0 and no data or status.
//
- // Pending bytes is required in all read chunks, so that is checked elsewhere.
+ // TODO(frolv): Going forward, all users of transfer should set a type for
+ // all chunks. This initial chunk assumption should be removed.
constexpr bool IsInitialChunk() const {
- return offset == 0 && data.empty() && !status.has_value();
+ return type == Type::kTransferStart ||
+ (offset == 0 && data.empty() && !status.has_value());
}
// The final chunk from the transmitter sets remaining_bytes to 0 in both Read
@@ -53,6 +57,9 @@ struct Chunk {
std::optional<Type> type;
};
+// Partially decodes a transfer chunk to find its transfer ID field.
+Result<uint32_t> ExtractTransferId(ConstByteSpan message);
+
Status DecodeChunk(ConstByteSpan message, Chunk& chunk);
Result<ConstByteSpan> EncodeChunk(const Chunk& chunk, ByteSpan buffer);
diff --git a/pw_transfer/public/pw_transfer/internal/chunk_data_buffer.h b/pw_transfer/public/pw_transfer/internal/chunk_data_buffer.h
deleted file mode 100644
index 92cb3a48e..000000000
--- a/pw_transfer/public/pw_transfer/internal/chunk_data_buffer.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2021 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-#pragma once
-
-#include "pw_bytes/span.h"
-
-namespace pw::transfer::internal {
-
-// Stores deferred write chunk data for consumption in a work queue context.
-//
-// To avoid blocking an RPC thread, transfer data in receive transfer is not
-// written directly to a stream::Writer from the RPC callback. Instead, it is
-// copied into this buffer and later drained by a job in a work queue. This
-// buffer must be locked when it is written to, and unlocked when drained.
-class ChunkDataBuffer {
- public:
- constexpr ChunkDataBuffer(ByteSpan buffer)
- : buffer_(buffer), size_(0), last_chunk_(false) {}
-
- constexpr std::byte* data() const { return buffer_.data(); }
-
- constexpr size_t size() const { return size_; }
- constexpr size_t max_size() const { return buffer_.size(); }
-
- constexpr bool empty() const { return size() == 0u; }
-
- constexpr bool last_chunk() const { return last_chunk_; }
-
- void Write(ConstByteSpan data, bool last_chunk);
-
- private:
- // TODO(frolv): This should be locked for use between an RPC thread and work
- // queue.
- ByteSpan buffer_;
- size_t size_;
- bool last_chunk_;
-};
-
-} // namespace pw::transfer::internal
diff --git a/pw_transfer/public/pw_transfer/internal/client_connection.h b/pw_transfer/public/pw_transfer/internal/client_connection.h
deleted file mode 100644
index c21441579..000000000
--- a/pw_transfer/public/pw_transfer/internal/client_connection.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2021 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-#pragma once
-
-#include <cstddef>
-#include <cstdint>
-
-#include "pw_assert/assert.h"
-#include "pw_rpc/raw/server_reader_writer.h"
-#include "pw_transfer/internal/context.h"
-
-namespace pw::transfer::internal {
-
-struct Chunk;
-
-enum TransferType : bool { kRead, kWrite };
-
-// Stores the read/write streams and transfer parameters for communicating with
-// a pw_transfer client.
-class ClientConnection {
- public:
- constexpr ClientConnection(EncodingBuffer& encoding_buffer,
- uint32_t max_pending_bytes,
- uint32_t max_chunk_size_bytes)
- : encoding_buffer_(encoding_buffer),
- max_parameters_(max_pending_bytes, max_chunk_size_bytes) {}
-
- void InitializeRead(rpc::RawServerReaderWriter& reader_writer,
- Function<void(ConstByteSpan)>&& callback) {
- read_stream_ = std::move(reader_writer);
- read_stream_.set_on_next(std::move(callback));
- }
-
- void InitializeWrite(rpc::RawServerReaderWriter& reader_writer,
- Function<void(ConstByteSpan)>&& callback) {
- write_stream_ = std::move(reader_writer);
- write_stream_.set_on_next(std::move(callback));
- }
-
- const TransferParameters& max_parameters() const { return max_parameters_; }
-
- rpc::RawServerReaderWriter& read_stream() { return read_stream_; }
- rpc::RawServerReaderWriter& write_stream() { return write_stream_; }
-
- rpc::RawServerReaderWriter& stream(TransferType type) {
- return type == kRead ? read_stream_ : write_stream_;
- }
-
- void SendStatusChunk(TransferType type, uint32_t transfer_id, Status status);
-
- private:
- EncodingBuffer& encoding_buffer_;
-
- // Persistent streams for read and write transfers. The server never closes
- // these streams -- they remain open until the client ends them.
- rpc::RawServerReaderWriter read_stream_;
- rpc::RawServerReaderWriter write_stream_;
-
- // Cannot exceed these parameters, even if the client requests a larger
- // pending bytes or chunk size.
- TransferParameters max_parameters_;
-};
-
-} // namespace pw::transfer::internal
diff --git a/pw_transfer/public/pw_transfer/internal/client_context.h b/pw_transfer/public/pw_transfer/internal/client_context.h
index 652d88c10..a84af3152 100644
--- a/pw_transfer/public/pw_transfer/internal/client_context.h
+++ b/pw_transfer/public/pw_transfer/internal/client_context.h
@@ -13,71 +13,24 @@
// the License.
#pragma once
-#include <variant>
-
-#include "pw_assert/assert.h"
#include "pw_function/function.h"
#include "pw_rpc/raw/client_reader_writer.h"
-#include "pw_stream/stream.h"
#include "pw_transfer/internal/context.h"
-namespace pw::transfer {
-
-class Client;
-
-namespace internal {
+namespace pw::transfer::internal {
-class ClientContext : public Context {
+class ClientContext final : public Context {
public:
- ClientContext()
- : internal::Context(OnCompletion),
- client_(nullptr),
- on_completion_(nullptr) {}
+ constexpr ClientContext() : on_completion_(nullptr) {}
- constexpr bool is_read_transfer() const { return type() == kReceive; }
- constexpr bool is_write_transfer() const { return type() == kTransmit; }
-
- Client& client() {
- PW_DASSERT(active());
- return *client_;
- }
-
- void StartRead(Client& client,
- uint32_t transfer_id,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- stream::Writer& writer,
- rpc::RawClientReaderWriter& stream,
- Function<void(Status)>&& on_completion,
- chrono::SystemClock::duration chunk_timeout);
-
- void StartWrite(Client& client,
- uint32_t transfer_id,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- stream::Reader& reader,
- rpc::RawClientReaderWriter& stream,
- Function<void(Status)>&& on_completion,
- chrono::SystemClock::duration chunk_timeout);
-
- void Finish(Status status) {
- PW_DASSERT(active());
- set_transfer_state(TransferState::kCompleted);
- if (on_completion_ != nullptr) {
- on_completion_(status);
- }
- client_ = nullptr;
+ void set_on_completion(Function<void(Status)>&& on_completion) {
+ on_completion_ = std::move(on_completion);
}
private:
- static Status OnCompletion(Context& ctx, Status status) {
- static_cast<ClientContext&>(ctx).Finish(status);
- return OkStatus();
- }
+ Status FinalCleanup(Status status) override;
- Client* client_;
Function<void(Status)> on_completion_;
};
-} // namespace internal
-} // namespace pw::transfer
+} // namespace pw::transfer::internal
diff --git a/pw_transfer/public/pw_transfer/internal/config.h b/pw_transfer/public/pw_transfer/internal/config.h
index b6e5cad5e..f144ac2a7 100644
--- a/pw_transfer/public/pw_transfer/internal/config.h
+++ b/pw_transfer/public/pw_transfer/internal/config.h
@@ -15,9 +15,7 @@
// Configuration macros for the transfer module.
#pragma once
-#include <array>
#include <cinttypes>
-#include <cstddef>
#include <limits>
#include "pw_chrono/system_clock.h"
@@ -28,43 +26,37 @@
#define PW_TRANSFER_DEFAULT_MAX_RETRIES 3
#endif // PW_TRANSFER_DEFAULT_MAX_RETRIES
+static_assert(PW_TRANSFER_DEFAULT_MAX_RETRIES > 0 &&
+ PW_TRANSFER_DEFAULT_MAX_RETRIES <=
+ std::numeric_limits<uint8_t>::max());
+
// The default amount of time, in milliseconds, to wait for a chunk to arrive
// before retrying. This can later be configured per-transfer.
#ifndef PW_TRANSFER_DEFAULT_TIMEOUT_MS
#define PW_TRANSFER_DEFAULT_TIMEOUT_MS 2000
#endif // PW_TRANSFER_DEFAULT_TIMEOUT_MS
-// The size of buffer to allocate in the transfer service/client.
-// TODO(pwbug/613): Use a different means to provide a buffer.
-#ifndef PW_TRANSFER_ENCODING_BUFFER_SIZE_BYTES
-#define PW_TRANSFER_ENCODING_BUFFER_SIZE_BYTES 512
-#endif // PW_TRANSFER_ENCODING_BUFFER_SIZE_BYTES
+static_assert(PW_TRANSFER_DEFAULT_TIMEOUT_MS > 0);
-static_assert(PW_TRANSFER_DEFAULT_MAX_RETRIES > 0 &&
- PW_TRANSFER_DEFAULT_MAX_RETRIES <=
- std::numeric_limits<uint8_t>::max());
+// The fractional position within a window at which a receive transfer should
+// extend its window size to minimize the amount of time the transmitter
+// spends blocked.
+//
+// For example, a divisor of 2 will extend the window when half of the
+// requested data has been received, a divisor of three will extend at a third
+// of the window, and so on.
+#ifndef PW_TRANSFER_DEFAULT_EXTEND_WINDOW_DIVISOR
+#define PW_TRANSFER_DEFAULT_EXTEND_WINDOW_DIVISOR 2
+#endif // PW_TRANSFER_DEFAULT_EXTEND_WINDOW_DIVISOR
-static_assert(PW_TRANSFER_DEFAULT_TIMEOUT_MS > 0);
+static_assert(PW_TRANSFER_DEFAULT_EXTEND_WINDOW_DIVISOR > 1);
-namespace pw::transfer {
-namespace cfg {
+namespace pw::transfer::cfg {
inline constexpr uint8_t kDefaultMaxRetries = PW_TRANSFER_DEFAULT_MAX_RETRIES;
inline constexpr chrono::SystemClock::duration kDefaultChunkTimeout =
std::chrono::milliseconds(PW_TRANSFER_DEFAULT_TIMEOUT_MS);
+inline constexpr uint32_t kDefaultExtendWindowDivisor =
+ PW_TRANSFER_DEFAULT_EXTEND_WINDOW_DIVISOR;
-} // namespace cfg
-
-namespace internal {
-
-// TODO(pwbug/613): Reconsider this buffer as part of the transfer refactor.
-using EncodingBuffer =
- std::array<std::byte, PW_TRANSFER_ENCODING_BUFFER_SIZE_BYTES>;
-
-} // namespace internal
-
-} // namespace pw::transfer
-
-#undef PW_TRANSFER_DEFAULT_MAX_RETRIES
-#undef PW_TRANSFER_DEFAULT_TIMEOUT_MS
-#undef PW_TRANSFER_ENCODING_BUFFER_SIZE_BYTES
+} // namespace pw::transfer::cfg
diff --git a/pw_transfer/public/pw_transfer/internal/context.h b/pw_transfer/public/pw_transfer/internal/context.h
index 5039325c4..f825419dd 100644
--- a/pw_transfer/public/pw_transfer/internal/context.h
+++ b/pw_transfer/public/pw_transfer/internal/context.h
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -16,56 +16,59 @@
#include <cinttypes>
#include <cstddef>
#include <limits>
+#include <optional>
#include "pw_assert/assert.h"
-#include "pw_chrono/system_timer.h"
+#include "pw_chrono/system_clock.h"
#include "pw_rpc/writer.h"
#include "pw_status/status.h"
#include "pw_stream/stream.h"
-#include "pw_sync/interrupt_spin_lock.h"
-#include "pw_sync/lock_annotations.h"
#include "pw_transfer/internal/chunk.h"
-#include "pw_transfer/internal/chunk_data_buffer.h"
-#include "pw_transfer/internal/config.h"
-#include "pw_work_queue/work_queue.h"
+#include "pw_transfer/internal/event.h"
+#include "pw_transfer/rate_estimate.h"
namespace pw::transfer::internal {
+class TransferThread;
+
class TransferParameters {
public:
constexpr TransferParameters(uint32_t pending_bytes,
- uint32_t max_chunk_size_bytes)
+ uint32_t max_chunk_size_bytes,
+ uint32_t extend_window_divisor)
: pending_bytes_(pending_bytes),
- max_chunk_size_bytes_(max_chunk_size_bytes) {
+ max_chunk_size_bytes_(max_chunk_size_bytes),
+ extend_window_divisor_(extend_window_divisor) {
PW_ASSERT(pending_bytes > 0);
PW_ASSERT(max_chunk_size_bytes > 0);
+ PW_ASSERT(extend_window_divisor > 1);
}
uint32_t pending_bytes() const { return pending_bytes_; }
+ void set_pending_bytes(uint32_t pending_bytes) {
+ pending_bytes_ = pending_bytes;
+ }
+
uint32_t max_chunk_size_bytes() const { return max_chunk_size_bytes_; }
+ void set_max_chunk_size_bytes(uint32_t max_chunk_size_bytes) {
+ max_chunk_size_bytes_ = max_chunk_size_bytes;
+ }
- // The fractional position within a window at which a receive transfer should
- // extend its window size to minimize the amount of time the transmitter
- // spends blocked.
- //
- // For example, a divisor of 2 will extend the window when half of the
- // requested data has been received, a divisor of three will extend at a third
- // of the window, and so on.
- //
- // TODO(frolv): Find a good threshold for this; maybe make it configurable?
- static constexpr uint32_t kExtendWindowDivisor = 2;
- static_assert(kExtendWindowDivisor > 1);
+ uint32_t extend_window_divisor() const { return extend_window_divisor_; }
+ void set_extend_window_divisor(uint32_t extend_window_divisor) {
+ PW_DASSERT(extend_window_divisor > 1);
+ extend_window_divisor_ = extend_window_divisor;
+ }
private:
uint32_t pending_bytes_;
uint32_t max_chunk_size_bytes_;
+ uint32_t extend_window_divisor_;
};
// Information about a single transfer.
class Context {
public:
- enum Type : bool { kTransmit, kReceive };
-
Context(const Context&) = delete;
Context(Context&&) = delete;
Context& operator=(const Context&) = delete;
@@ -74,46 +77,33 @@ class Context {
constexpr uint32_t transfer_id() const { return transfer_id_; }
// True if the context has been used for a transfer (it has an ID).
- bool initialized() {
- state_lock_.lock();
- bool initialized = transfer_state_ != TransferState::kInactive;
- state_lock_.unlock();
- return initialized;
+ bool initialized() const {
+ return transfer_state_ != TransferState::kInactive;
}
// True if the transfer is active.
- bool active() {
- state_lock_.lock();
- bool active = transfer_state_ >= TransferState::kData;
- state_lock_.unlock();
- return active;
+ bool active() const { return transfer_state_ >= TransferState::kWaiting; }
+
+ std::optional<chrono::SystemClock::time_point> timeout() const {
+ return active() && next_timeout_ != kNoTimeout
+ ? std::optional(next_timeout_)
+ : std::nullopt;
}
- // Starts a new transfer from an initialized context by sending the initial
- // transfer chunk. This is generally called from within a transfer client, as
- // it is unusual for a server to initiate a transfer.
- Status InitiateTransfer(const TransferParameters& max_parameters);
+ // Returns true if the transfer's most recently set timeout has passed.
+ bool timed_out() const {
+ std::optional<chrono::SystemClock::time_point> next_timeout = timeout();
+ return next_timeout.has_value() &&
+ chrono::SystemClock::now() >= next_timeout.value();
+ }
- // Extracts data from the provided chunk into the transfer context. This is
- // intended to be the immediate part of the transfer, run directly from within
- // the RPC message handler.
- //
- // Returns true if there is any deferred work required for this chunk (i.e.
- // ProcessChunk should be called to complete the operation).
- bool ReadChunkData(ChunkDataBuffer& buffer,
- const TransferParameters& max_parameters,
- const Chunk& chunk);
-
- // Handles the chunk from the previous invocation of ReadChunkData(). This
- // operation is intended to be deferred, running from a different context than
- // the RPC callback in which the chunk was received.
- void ProcessChunk(ChunkDataBuffer& buffer,
- const TransferParameters& max_parameters);
+ // Processes an event for this transfer.
+ void HandleEvent(const Event& event);
protected:
- using CompletionFunction = Status (*)(Context&, Status);
+ ~Context() = default;
- Context(CompletionFunction on_completion)
+ constexpr Context()
: transfer_id_(0),
flags_(0),
transfer_state_(TransferState::kInactive),
@@ -126,183 +116,178 @@ class Context {
window_end_offset_(0),
pending_bytes_(0),
max_chunk_size_bytes_(std::numeric_limits<uint32_t>::max()),
+ max_parameters_(nullptr),
+ thread_(nullptr),
last_chunk_offset_(0),
- timer_([this](chrono::SystemClock::time_point) { this->OnTimeout(); }),
chunk_timeout_(chrono::SystemClock::duration::zero()),
- work_queue_(nullptr),
- encoding_buffer_(nullptr),
- on_completion_(on_completion) {}
+ interchunk_delay_(chrono::SystemClock::for_at_least(
+ std::chrono::microseconds(kDefaultChunkDelayMicroseconds))),
+ next_timeout_(kNoTimeout) {}
+
+ constexpr TransferType type() const {
+ return static_cast<TransferType>(flags_ & kFlagsType);
+ }
+ private:
enum class TransferState : uint8_t {
// This ServerContext has never been used for a transfer. It is available
// for use for a transfer.
kInactive,
- // A transfer completed and the final status chunk was sent. The Context is
- // available for use for a new transfer. A receive transfer uses this state
- // to allow a transmitter to retry its last chunk if the final status chunk
+ // A transfer completed and the final status chunk was sent. The Context
+ // is
+ // available for use for a new transfer. A receive transfer uses this
+ // state
+ // to allow a transmitter to retry its last chunk if the final status
+ // chunk
// was dropped.
kCompleted,
- // Sending or receiving data for an active transfer.
- kData,
+ // Waiting for the other end to send a chunk.
+ kWaiting,
+ // Transmitting a window of data to a receiver.
+ kTransmitting,
// Recovering after one or more chunks was dropped in an active transfer.
kRecovery,
- // Hit a timeout and waiting for the timeout handler to run.
- kTimedOut,
};
- constexpr Type type() const { return static_cast<Type>(flags_ & kFlagsType); }
+ enum class TransmitAction {
+ // Start of a new transfer.
+ kBegin,
+ // Extend the current window length.
+ kExtend,
+ // Retransmit from a specified offset.
+ kRetransmit,
+ };
+
+ void set_transfer_state(TransferState state) { transfer_state_ = state; }
- void set_transfer_state(TransferState state) {
- state_lock_.lock();
- transfer_state_ = state;
- state_lock_.unlock();
+ // The transfer ID as unsigned instead of uint32_t so it can be used with %u.
+ unsigned id_for_log() const {
+ static_assert(sizeof(unsigned) >= sizeof(transfer_id_));
+ return static_cast<unsigned>(transfer_id_);
}
- // Begins a new transmit transfer from this context.
- // Precondition: context is not active.
- void InitializeForTransmit(
- uint32_t transfer_id,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- rpc::Writer& rpc_writer,
- stream::Reader& reader,
- chrono::SystemClock::duration chunk_timeout = cfg::kDefaultChunkTimeout,
- uint8_t max_retries = cfg::kDefaultMaxRetries) {
- Initialize(kTransmit,
- transfer_id,
- work_queue,
- encoding_buffer,
- rpc_writer,
- reader,
- chunk_timeout,
- max_retries);
+ stream::Reader& reader() {
+ PW_DASSERT(active() && type() == TransferType::kTransmit);
+ return static_cast<stream::Reader&>(*stream_);
}
- // Begins a new receive transfer from this context.
- // Precondition: context is not active.
- void InitializeForReceive(
- uint32_t transfer_id,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- rpc::Writer& rpc_writer,
- stream::Writer& writer,
- chrono::SystemClock::duration chunk_timeout = cfg::kDefaultChunkTimeout,
- uint8_t max_retries = cfg::kDefaultMaxRetries) {
- Initialize(kReceive,
- transfer_id,
- work_queue,
- encoding_buffer,
- rpc_writer,
- writer,
- chunk_timeout,
- max_retries);
+ stream::Writer& writer() {
+ PW_DASSERT(active() && type() == TransferType::kReceive);
+ return static_cast<stream::Writer&>(*stream_);
}
- // Calculates the maximum size of actual data that can be sent within a single
- // client write transfer chunk, accounting for the overhead of the transfer
- // protocol and RPC system.
+ // Calculates the maximum size of actual data that can be sent within a
+ // single client write transfer chunk, accounting for the overhead of the
+ // transfer protocol and RPC system.
//
// Note: This function relies on RPC protocol internals. This is generally a
- // *bad* idea, but is necessary here due to limitations of the RPC system and
- // its asymmetric ingress and egress paths.
+ // *bad* idea, but is necessary here due to limitations of the RPC system
+ // and its asymmetric ingress and egress paths.
//
// TODO(frolv): This should be investigated further and perhaps addressed
// within the RPC system, at the least through a helper function.
uint32_t MaxWriteChunkSize(uint32_t max_chunk_size_bytes,
uint32_t channel_id) const;
- private:
- enum TransmitAction : bool { kExtend, kRetransmit };
+ // Initializes a new transfer using new_transfer. The provided stream
+ // argument is used in place of the NewTransferEvent's stream. Only
+ // initializes state; no packets are sent.
+ //
+ // Precondition: context is not active.
+ void Initialize(const NewTransferEvent& new_transfer);
- void Initialize(Type type,
- uint32_t transfer_id,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- rpc::Writer& rpc_writer,
- stream::Stream& stream,
- chrono::SystemClock::duration chunk_timeout,
- uint8_t max_retries);
+ // Starts a new transfer from an initialized context by sending the initial
+ // transfer chunk. This is only used by transfer clients, as the transfer
+ // service cannot initiate transfers.
+ //
+ // Calls Finish(), which calls the on_completion callback, if initiating a
+ // transfer fails.
+ void InitiateTransferAsClient();
- stream::Reader& reader() {
- PW_DASSERT(active() && type() == kTransmit);
- return static_cast<stream::Reader&>(*stream_);
- }
+ // Starts a new transfer on the server after receiving a request from a
+ // client.
+ void StartTransferAsServer(const NewTransferEvent& new_transfer);
- stream::Writer& writer() {
- PW_DASSERT(active() && type() == kReceive);
- return static_cast<stream::Writer&>(*stream_);
- }
+ // Does final cleanup specific to the server or client. Returns whether the
+ // cleanup succeeded. An error in cleanup indicates that the transfer
+ // failed.
+ virtual Status FinalCleanup(Status status) = 0;
- // Sends the first chunk in a transmit transfer.
- Status SendInitialTransmitChunk();
-
- // Functions which extract relevant data from a chunk into the context.
- bool ReadTransmitChunk(const TransferParameters& max_parameters,
- const Chunk& chunk);
- bool ReadReceiveChunk(ChunkDataBuffer& buffer,
- const TransferParameters& max_parameters,
- const Chunk& chunk);
-
- // Functions which handle the last received chunk.
- void ProcessTransmitChunk();
- void ProcessReceiveChunk(ChunkDataBuffer& buffer,
- const TransferParameters& max_parameters);
-
- // In a transmit transfer, sends the next data chunk from the local stream.
- // Returns status indicating what to do next:
- //
- // OK - continue
- // OUT_OF_RANGE - done for now
- // other errors - abort transfer with this error
- //
- Status SendNextDataChunk();
+ // Processes a chunk in either a transfer or receive transfer.
+ void HandleChunkEvent(const ChunkEvent& event);
+
+ // Processes a chunk in a transmit transfer.
+ void HandleTransmitChunk(const Chunk& chunk);
+
+ // Processes a transfer parameters update in a transmit transfer.
+ void HandleTransferParametersUpdate(const Chunk& chunk);
- // In a receive transfer, processes the fields from a data chunk and stages
- // the data for a deferred write. Returns true if there is a deferred
- // operation to complete.
- bool HandleDataChunk(ChunkDataBuffer& buffer,
- const TransferParameters& max_parameters,
- const Chunk& chunk);
+ // Sends the next chunk in a transmit transfer, if any.
+ void TransmitNextChunk(bool retransmit_requested);
- // In a receive transfer, sends a parameters chunk telling the transmitter how
- // much data they can send.
- Status SendTransferParameters(TransmitAction action);
+ // Processes a chunk in a receive transfer.
+ void HandleReceiveChunk(const Chunk& chunk);
+
+ // Processes a data chunk in a received while in the kWaiting state.
+ void HandleReceivedData(const Chunk& chunk);
+
+ // Sends the first chunk in a transmit transfer.
+ void SendInitialTransmitChunk();
+
+ // In a receive transfer, sends a parameters chunk telling the transmitter
+ // how much data they can send.
+ void SendTransferParameters(TransmitAction action);
// Updates the current receive transfer parameters from the provided object,
// then sends them.
- Status UpdateAndSendTransferParameters(
- const TransferParameters& max_parameters, TransmitAction action);
+ void UpdateAndSendTransferParameters(TransmitAction action);
- void SendStatusChunk(Status status);
- void FinishAndSendStatus(Status status);
+ // Sends a final status chunk of a completed transfer without updating the
+ // the transfer. Sends status_, which MUST have been set by a previous
+ // Finish() call.
+ void SendFinalStatusChunk();
- void CancelTimer() {
- timer_.Cancel();
- retries_ = 0;
- }
+ // Marks the transfer as completed and calls FinalCleanup(). Sets status_ to
+ // the final status for this transfer. The transfer MUST be active when this
+ // is called.
+ void Finish(Status status);
+
+ // Encodes the specified chunk to the encode buffer and sends it with the
+ // rpc_writer_. Calls Finish() with an error if the operation fails.
+ void EncodeAndSendChunk(const Chunk& chunk);
- // Timeout function invoked from the timer context. This may occur in an
- // interrupt, so no real work can be done. Instead, sets state to timed out
- // and adds a job to run the timeout handler.
- void OnTimeout();
+ void SetTimeout(chrono::SystemClock::duration timeout);
+ void ClearTimeout() { next_timeout_ = kNoTimeout; }
- // The acutal timeout handler, invoked from within the work queue.
+ // Called when the transfer's timeout expires.
void HandleTimeout();
+ // Resends the last packet or aborts the transfer if the maximum retries has
+ // been exceeded.
+ void Retry();
+
static constexpr uint8_t kFlagsType = 1 << 0;
static constexpr uint8_t kFlagsDataSent = 1 << 1;
- // TODO(frolv): Make this value configurable per transfer.
static constexpr uint32_t kDefaultChunkDelayMicroseconds = 2000;
+ // How long to wait for the other side to ACK a final transfer chunk before
+ // resetting the context so that it can be reused. During this time, the
+ // status chunk will be re-sent for every non-ACK chunk received,
+ // continually notifying the other end that the transfer is over.
+ static constexpr chrono::SystemClock::duration kFinalChunkAckTimeout =
+ std::chrono::milliseconds(5000);
+
+ static constexpr chrono::SystemClock::time_point kNoTimeout =
+ chrono::SystemClock::time_point(chrono::SystemClock::duration(0));
+
uint32_t transfer_id_;
uint8_t flags_;
- TransferState transfer_state_ PW_GUARDED_BY(state_lock_);
+ TransferState transfer_state_;
uint8_t retries_;
uint8_t max_retries_;
- sync::InterruptSpinLock state_lock_;
-
+ // The stream from which to read or to which to write data.
stream::Stream* stream_;
rpc::Writer* rpc_writer_;
@@ -313,18 +298,25 @@ class Context {
uint32_t pending_bytes_;
uint32_t max_chunk_size_bytes_;
+ const TransferParameters* max_parameters_;
+ TransferThread* thread_;
+
union {
Status status_; // Used when state is kCompleted.
- uint32_t last_chunk_offset_; // Used in states kData and kRecovery.
+ uint32_t last_chunk_offset_; // Used in states kWaiting and kRecovery.
};
- // Timer used to handle timeouts waiting for chunks.
- chrono::SystemTimer timer_;
+ // How long to wait for a chunk from the other end.
chrono::SystemClock::duration chunk_timeout_;
- work_queue::WorkQueue* work_queue_;
- EncodingBuffer* encoding_buffer_;
- CompletionFunction on_completion_;
+ // How long to delay between transmitting subsequent data chunks within a
+ // window.
+ chrono::SystemClock::duration interchunk_delay_;
+
+ // Timestamp at which the transfer will next time out, or kNoTimeout.
+ chrono::SystemClock::time_point next_timeout_;
+
+ RateEstimate transfer_rate_;
};
} // namespace pw::transfer::internal
diff --git a/pw_transfer/public/pw_transfer/internal/event.h b/pw_transfer/public/pw_transfer/internal/event.h
new file mode 100644
index 000000000..b4cc6ba02
--- /dev/null
+++ b/pw_transfer/public/pw_transfer/internal/event.h
@@ -0,0 +1,105 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_chrono/system_clock.h"
+#include "pw_rpc/writer.h"
+#include "pw_stream/stream.h"
+
+namespace pw::transfer::internal {
+
+enum class TransferType : bool { kTransmit, kReceive };
+
+enum class TransferStream {
+ kClientRead,
+ kClientWrite,
+ kServerRead,
+ kServerWrite,
+};
+
+enum class EventType {
+ // Begins a new transfer in an available context.
+ kNewClientTransfer,
+ kNewServerTransfer,
+
+ // Processes an incoming chunk for a transfer.
+ kClientChunk,
+ kServerChunk,
+
+ // Runs the timeout handler for a transfer.
+ kClientTimeout,
+ kServerTimeout,
+
+ // Sends a status chunk to terminate a transfer. This does not call into the
+ // transfer context's completion handler; it is for out-of-band termination.
+ kSendStatusChunk,
+
+ // Updates one of the transfer thread's RPC streams.
+ kSetTransferStream,
+
+ // Manages the list of transfer handlers for a transfer service.
+ kAddTransferHandler,
+ kRemoveTransferHandler,
+
+ // For testing only: aborts the transfer thread.
+ kTerminate,
+};
+
+// Forward declarations required for events.
+class Handler;
+class TransferParameters;
+class TransferThread;
+
+struct NewTransferEvent {
+ TransferType type;
+ uint32_t transfer_id;
+ uint32_t handler_id;
+ rpc::Writer* rpc_writer;
+ const TransferParameters* max_parameters;
+ chrono::SystemClock::duration timeout;
+ uint32_t max_retries;
+ TransferThread* transfer_thread;
+
+ union {
+ stream::Stream* stream; // In client-side transfers.
+ Handler* handler; // In server-side transfers.
+ };
+};
+
+struct ChunkEvent {
+ uint32_t transfer_id;
+ const std::byte* data;
+ size_t size;
+};
+
+struct SendStatusChunkEvent {
+ uint32_t transfer_id;
+ Status::Code status;
+ TransferStream stream;
+};
+
+struct Event {
+ EventType type;
+
+ union {
+ NewTransferEvent new_transfer;
+ ChunkEvent chunk;
+ SendStatusChunkEvent send_status_chunk;
+ TransferStream set_transfer_stream;
+ Handler* add_transfer_handler;
+ Handler* remove_transfer_handler;
+ };
+};
+
+} // namespace pw::transfer::internal
diff --git a/pw_transfer/public/pw_transfer/internal/server_context.h b/pw_transfer/public/pw_transfer/internal/server_context.h
index f4fd65ea3..038feece6 100644
--- a/pw_transfer/public/pw_transfer/internal/server_context.h
+++ b/pw_transfer/public/pw_transfer/internal/server_context.h
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -18,78 +18,30 @@
#include "pw_result/result.h"
#include "pw_rpc/raw/server_reader_writer.h"
#include "pw_transfer/handler.h"
-#include "pw_transfer/internal/client_connection.h"
#include "pw_transfer/internal/context.h"
namespace pw::transfer::internal {
// Transfer context for use within the transfer service (server-side). Stores a
// pointer to a transfer handler when active to stream the transfer data.
-class ServerContext : public Context {
+class ServerContext final : public Context {
public:
- ServerContext() : Context(OnCompletion), type_(kRead), handler_(nullptr) {}
+ constexpr ServerContext() : handler_(nullptr) {}
- // Begins a new transfer with the specified type and handler. Calls into the
- // handler's Prepare method.
- //
- // Precondition: Context is not already active.
- Status Start(TransferType type,
- Handler& handler,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- rpc::RawServerReaderWriter& stream,
- chrono::SystemClock::duration timeout,
- uint8_t max_retries);
+ // Sets the handler. The handler isn't set by Context::Initialize() since
+ // ClientContexts don't track it.
+ void set_handler(Handler& handler) { handler_ = &handler; }
+ private:
// Ends the transfer with the given status, calling the handler's Finalize
// method. No chunks are sent.
//
// Returns DATA_LOSS if the finalize call fails.
//
// Precondition: Transfer context is active.
- Status Finish(Status status);
-
- private:
- static Status OnCompletion(Context& ctx, Status status) {
- return static_cast<ServerContext&>(ctx).Finish(status);
- }
+ Status FinalCleanup(Status status) override;
- TransferType type_;
Handler* handler_;
};
-// A fixed-size pool of allocatable transfer contexts.
-class ServerContextPool {
- public:
- ServerContextPool(TransferType type,
- IntrusiveList<internal::Handler>& handlers)
- : type_(type), handlers_(handlers) {}
-
- // Looks up an active context by ID. If none exists, tries to allocate and
- // start a new context.
- //
- // Errors:
- //
- // NOT_FOUND - No handler exists for the specified transfer ID.
- // RESOURCE_EXHAUSTED - Out of transfer context slots.
- //
- Result<ServerContext*> StartTransfer(uint32_t transfer_id,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- rpc::RawServerReaderWriter& stream,
- chrono::SystemClock::duration timeout,
- uint8_t max_retries);
-
- Result<ServerContext*> GetPendingTransfer(uint32_t transfer_id);
-
- private:
- // TODO(frolv): Initially, only one transfer at a time is supported. Once that
- // is updated, this should be made configurable.
- static constexpr int kMaxConcurrentTransfers = 1;
-
- TransferType type_;
- std::array<ServerContext, kMaxConcurrentTransfers> transfers_;
- IntrusiveList<internal::Handler>& handlers_;
-};
-
} // namespace pw::transfer::internal
diff --git a/pw_transfer/public/pw_transfer/rate_estimate.h b/pw_transfer/public/pw_transfer/rate_estimate.h
new file mode 100644
index 000000000..9832397b9
--- /dev/null
+++ b/pw_transfer/public/pw_transfer/rate_estimate.h
@@ -0,0 +1,42 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <chrono>
+#include <cstdint>
+#include <optional>
+
+#include "pw_chrono/system_clock.h"
+
+namespace pw::transfer {
+
+class RateEstimate {
+ public:
+ constexpr RateEstimate() : start_time_(std::nullopt), bytes_transferred_(0) {}
+
+ void Reset() {
+ start_time_ = chrono::SystemClock::now();
+ bytes_transferred_ = 0;
+ }
+
+ constexpr void Update(size_t new_bytes) { bytes_transferred_ += new_bytes; }
+
+ size_t GetRateBytesPerSecond() const;
+
+ private:
+ std::optional<chrono::SystemClock::time_point> start_time_;
+ size_t bytes_transferred_;
+};
+
+} // namespace pw::transfer
diff --git a/pw_transfer/public/pw_transfer/transfer.h b/pw_transfer/public/pw_transfer/transfer.h
index 5ed8fd1e9..f6e71c314 100644
--- a/pw_transfer/public/pw_transfer/transfer.h
+++ b/pw_transfer/public/pw_transfer/transfer.h
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -19,11 +19,10 @@
#include "pw_bytes/span.h"
#include "pw_transfer/handler.h"
-#include "pw_transfer/internal/client_connection.h"
#include "pw_transfer/internal/config.h"
#include "pw_transfer/internal/server_context.h"
#include "pw_transfer/transfer.raw_rpc.pb.h"
-#include "pw_work_queue/work_queue.h"
+#include "pw_transfer/transfer_thread.h"
namespace pw::transfer {
namespace internal {
@@ -54,17 +53,15 @@ class TransferService : public pw_rpc::raw::Transfer::Service<TransferService> {
// larger values could slow down a transfer in the event of repeated packet
// loss.
TransferService(
- work_queue::WorkQueue& work_queue,
- ByteSpan transfer_data_buffer,
+ TransferThread& transfer_thread,
uint32_t max_pending_bytes,
chrono::SystemClock::duration chunk_timeout = cfg::kDefaultChunkTimeout,
- uint8_t max_retries = cfg::kDefaultMaxRetries)
- : read_transfers_(internal::kRead, handlers_),
- write_transfers_(internal::kWrite, handlers_),
- work_queue_(work_queue),
- client_(
- encoding_buffer_, max_pending_bytes, transfer_data_buffer.size()),
- chunk_data_buffer_(transfer_data_buffer),
+ uint8_t max_retries = cfg::kDefaultMaxRetries,
+ uint32_t extend_window_divisor = cfg::kDefaultExtendWindowDivisor)
+ : max_parameters_(max_pending_bytes,
+ transfer_thread.max_chunk_size(),
+ extend_window_divisor),
+ thread_(transfer_thread),
chunk_timeout_(chunk_timeout),
max_retries_(max_retries) {}
@@ -75,23 +72,35 @@ class TransferService : public pw_rpc::raw::Transfer::Service<TransferService> {
TransferService& operator=(TransferService&&) = delete;
void Read(RawServerReaderWriter& reader_writer) {
- client_.InitializeRead(reader_writer, [this](ConstByteSpan message) {
- HandleChunk(message, internal::kRead);
+ reader_writer.set_on_next([this](ConstByteSpan message) {
+ HandleChunk(message, internal::TransferType::kTransmit);
});
+ thread_.SetServerReadStream(reader_writer);
}
void Write(RawServerReaderWriter& reader_writer) {
- client_.InitializeWrite(reader_writer, [this](ConstByteSpan message) {
- HandleChunk(message, internal::kWrite);
+ reader_writer.set_on_next([this](ConstByteSpan message) {
+ HandleChunk(message, internal::TransferType::kReceive);
});
+ thread_.SetServerWriteStream(reader_writer);
}
void RegisterHandler(internal::Handler& handler) {
- handlers_.push_front(handler);
+ thread_.AddTransferHandler(handler);
+ }
+
+ void set_max_pending_bytes(uint32_t max_pending_bytes) {
+ max_parameters_.set_pending_bytes(max_pending_bytes);
+ }
+
+ // Sets the maximum size for the data in a pw_transfer chunk. Note that the
+ // max chunk size must always fit within the transfer thread's chunk buffer.
+ void set_max_chunk_size_bytes(uint32_t max_chunk_size_bytes) {
+ max_parameters_.set_max_chunk_size_bytes(max_chunk_size_bytes);
}
void UnregisterHandler(internal::Handler& handler) {
- handlers_.remove(handler);
+ thread_.RemoveTransferHandler(handler);
}
void set_chunk_timeout(chrono::SystemClock::duration chunk_timeout) {
@@ -100,44 +109,23 @@ class TransferService : public pw_rpc::raw::Transfer::Service<TransferService> {
void set_max_retries(uint8_t max_retries) { max_retries_ = max_retries; }
- private:
- // Calls transfer.Finish() and sends the final status chunk.
- void FinishTransfer(internal::ServerContext& transfer, Status status);
+ Status set_extend_window_divisor(uint32_t extend_window_divisor) {
+ if (extend_window_divisor <= 1) {
+ return Status::InvalidArgument();
+ }
- // Sends a out data chunk for a read transfer. Returns true if the data was
- // sent successfully.
- bool SendNextReadChunk(internal::ServerContext& context);
+ max_parameters_.set_extend_window_divisor(extend_window_divisor);
+ return OkStatus();
+ }
+ private:
void HandleChunk(ConstByteSpan message, internal::TransferType type);
- // All registered transfer handlers.
- IntrusiveList<internal::Handler> handlers_;
-
- internal::ServerContextPool read_transfers_;
- internal::ServerContextPool write_transfers_;
-
- work_queue::WorkQueue& work_queue_;
- internal::EncodingBuffer encoding_buffer_;
-
- // Stores the RPC streams and parameters for communicating with the client.
- internal::ClientConnection client_;
-
- internal::ChunkDataBuffer chunk_data_buffer_;
+ internal::TransferParameters max_parameters_;
+ TransferThread& thread_;
chrono::SystemClock::duration chunk_timeout_;
uint8_t max_retries_;
};
-// A transfer service with its own buffer for transfer data.
-template <size_t kSizeBytes>
-class TransferServiceBuffer : public TransferService {
- public:
- constexpr TransferServiceBuffer(work_queue::WorkQueue& work_queue,
- uint32_t max_pending_bytes)
- : TransferService(work_queue, transfer_data_buffer_, max_pending_bytes) {}
-
- private:
- std::array<std::byte, kSizeBytes> transfer_data_buffer_;
-};
-
} // namespace pw::transfer
diff --git a/pw_transfer/public/pw_transfer/transfer_thread.h b/pw_transfer/public/pw_transfer/transfer_thread.h
new file mode 100644
index 000000000..9e21bba5c
--- /dev/null
+++ b/pw_transfer/public/pw_transfer/transfer_thread.h
@@ -0,0 +1,270 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstdint>
+#include <span>
+
+#include "pw_assert/assert.h"
+#include "pw_chrono/system_clock.h"
+#include "pw_function/function.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_rpc/raw/client_reader_writer.h"
+#include "pw_rpc/raw/server_reader_writer.h"
+#include "pw_sync/binary_semaphore.h"
+#include "pw_sync/timed_thread_notification.h"
+#include "pw_thread/thread_core.h"
+#include "pw_transfer/handler.h"
+#include "pw_transfer/internal/client_context.h"
+#include "pw_transfer/internal/context.h"
+#include "pw_transfer/internal/event.h"
+#include "pw_transfer/internal/server_context.h"
+
+namespace pw::transfer {
+namespace internal {
+
+class TransferThread : public thread::ThreadCore {
+ public:
+ TransferThread(std::span<ClientContext> client_transfers,
+ std::span<ServerContext> server_transfers,
+ ByteSpan chunk_buffer,
+ ByteSpan encode_buffer)
+ : client_transfers_(client_transfers),
+ server_transfers_(server_transfers),
+ chunk_buffer_(chunk_buffer),
+ encode_buffer_(encode_buffer) {}
+
+ void StartClientTransfer(TransferType type,
+ uint32_t transfer_id,
+ uint32_t handler_id,
+ stream::Stream* stream,
+ const TransferParameters& max_parameters,
+ Function<void(Status)>&& on_completion,
+ chrono::SystemClock::duration timeout,
+ uint8_t max_retries) {
+ StartTransfer(type,
+ transfer_id,
+ handler_id,
+ stream,
+ max_parameters,
+ std::move(on_completion),
+ timeout,
+ max_retries);
+ }
+
+ void StartServerTransfer(TransferType type,
+ uint32_t transfer_id,
+ uint32_t handler_id,
+ const TransferParameters& max_parameters,
+ chrono::SystemClock::duration timeout,
+ uint8_t max_retries) {
+ StartTransfer(type,
+ transfer_id,
+ handler_id,
+ /*stream=*/nullptr,
+ max_parameters,
+ /*on_completion=*/nullptr,
+ timeout,
+ max_retries);
+ }
+
+ void ProcessClientChunk(ConstByteSpan chunk) {
+ ProcessChunk(EventType::kClientChunk, chunk);
+ }
+
+ void ProcessServerChunk(ConstByteSpan chunk) {
+ ProcessChunk(EventType::kServerChunk, chunk);
+ }
+
+ void SetClientReadStream(rpc::RawClientReaderWriter& read_stream) {
+ SetClientStream(TransferStream::kClientRead, read_stream);
+ }
+
+ void SetClientWriteStream(rpc::RawClientReaderWriter& write_stream) {
+ SetClientStream(TransferStream::kClientWrite, write_stream);
+ }
+
+ void SetServerReadStream(rpc::RawServerReaderWriter& read_stream) {
+ SetServerStream(TransferStream::kServerRead, read_stream);
+ }
+
+ void SetServerWriteStream(rpc::RawServerReaderWriter& write_stream) {
+ SetServerStream(TransferStream::kServerWrite, write_stream);
+ }
+
+ void AddTransferHandler(Handler& handler) {
+ TransferHandlerEvent(EventType::kAddTransferHandler, handler);
+ }
+
+ void RemoveTransferHandler(Handler& handler) {
+ TransferHandlerEvent(EventType::kRemoveTransferHandler, handler);
+ }
+
+ size_t max_chunk_size() const { return chunk_buffer_.size(); }
+
+ // For testing only: terminates the transfer thread with a kTerminate event.
+ void Terminate();
+
+ // For testing only: blocks until the next event can be acquired, which means
+ // a previously enqueued event has been processed.
+ void WaitUntilEventIsProcessed() {
+ next_event_ownership_.acquire();
+ next_event_ownership_.release();
+ }
+
+ // For testing only: simulates a timeout event for a client transfer.
+ void SimulateClientTimeout(uint32_t transfer_id) {
+ SimulateTimeout(EventType::kClientTimeout, transfer_id);
+ }
+
+ // For testing only: simulates a timeout event for a server transfer.
+ void SimulateServerTimeout(uint32_t transfer_id) {
+ SimulateTimeout(EventType::kServerTimeout, transfer_id);
+ }
+
+ private:
+ friend class Context;
+
+ // Maximum amount of time between transfer thread runs.
+ static constexpr chrono::SystemClock::duration kMaxTimeout =
+ std::chrono::seconds(2);
+
+ // Finds an active server or client transfer.
+ template <typename T>
+ static Context* FindActiveTransfer(const std::span<T>& transfers,
+ uint32_t transfer_id) {
+ auto transfer = std::find_if(
+ transfers.begin(), transfers.end(), [transfer_id](auto& c) {
+ return c.initialized() && c.transfer_id() == transfer_id;
+ });
+ return transfer != transfers.end() ? &*transfer : nullptr;
+ }
+
+ void SimulateTimeout(EventType type, uint32_t transfer_id);
+
+ // Finds an new server or client transfer.
+ template <typename T>
+ static Context* FindNewTransfer(const std::span<T>& transfers,
+ uint32_t transfer_id) {
+ Context* new_transfer = nullptr;
+
+ for (Context& context : transfers) {
+ if (context.active()) {
+ if (context.transfer_id() == transfer_id) {
+ // Restart an already active transfer.
+ return &context;
+ }
+ } else {
+ // Store the inactive context as an option, but keep checking for the
+ // restart case.
+ new_transfer = &context;
+ }
+ }
+
+ return new_transfer;
+ }
+
+ const ByteSpan& encode_buffer() const { return encode_buffer_; }
+
+ void Run() final;
+
+ void HandleTimeouts();
+
+ rpc::Writer& stream_for(TransferStream stream) {
+ switch (stream) {
+ case TransferStream::kClientRead:
+ return client_read_stream_;
+ case TransferStream::kClientWrite:
+ return client_write_stream_;
+ case TransferStream::kServerRead:
+ return server_read_stream_;
+ case TransferStream::kServerWrite:
+ return server_write_stream_;
+ }
+ // An unknown TransferStream value was passed, which means this function
+ // was passed an invalid enum value.
+ PW_ASSERT(false);
+ }
+
+ // Returns the earliest timeout among all active transfers, up to kMaxTimeout.
+ chrono::SystemClock::time_point GetNextTransferTimeout() const;
+
+ void StartTransfer(TransferType type,
+ uint32_t transfer_id,
+ uint32_t handler_id,
+ stream::Stream* stream,
+ const TransferParameters& max_parameters,
+ Function<void(Status)>&& on_completion,
+ chrono::SystemClock::duration timeout,
+ uint8_t max_retries);
+
+ void ProcessChunk(EventType type, ConstByteSpan chunk);
+
+ void SetClientStream(TransferStream type, rpc::RawClientReaderWriter& stream);
+ void SetServerStream(TransferStream type, rpc::RawServerReaderWriter& stream);
+
+ void TransferHandlerEvent(EventType type, Handler& handler);
+
+ void HandleEvent(const Event& event);
+ Context* FindContextForEvent(const Event& event) const;
+
+ void SendStatusChunk(const SendStatusChunkEvent& event);
+
+ sync::TimedThreadNotification event_notification_;
+ sync::BinarySemaphore next_event_ownership_;
+
+ Event next_event_;
+ Function<void(Status)> staged_on_completion_;
+ rpc::RawClientReaderWriter staged_client_stream_;
+ rpc::RawServerReaderWriter staged_server_stream_;
+
+ rpc::RawClientReaderWriter client_read_stream_;
+ rpc::RawClientReaderWriter client_write_stream_;
+ rpc::RawServerReaderWriter server_read_stream_;
+ rpc::RawServerReaderWriter server_write_stream_;
+
+ std::span<ClientContext> client_transfers_;
+ std::span<ServerContext> server_transfers_;
+
+ // All registered transfer handlers.
+ IntrusiveList<Handler> handlers_;
+
+ // Buffer in which chunk data is staged for CHUNK events.
+ ByteSpan chunk_buffer_;
+
+ // Buffer into which responses are encoded. Only ever used from within the
+ // transfer thread, so no locking is required.
+ ByteSpan encode_buffer_;
+};
+
+} // namespace internal
+
+using TransferThread = internal::TransferThread;
+
+template <size_t kMaxConcurrentClientTransfers,
+ size_t kMaxConcurrentServerTransfers>
+class Thread final : public internal::TransferThread {
+ public:
+ Thread(ByteSpan chunk_buffer, ByteSpan encode_buffer)
+ : internal::TransferThread(
+ client_contexts_, server_contexts_, chunk_buffer, encode_buffer) {}
+
+ private:
+ std::array<internal::ClientContext, kMaxConcurrentClientTransfers>
+ client_contexts_;
+ std::array<internal::ServerContext, kMaxConcurrentServerTransfers>
+ server_contexts_;
+};
+
+} // namespace pw::transfer
diff --git a/pw_transfer/py/pw_transfer/transfer.py b/pw_transfer/py/pw_transfer/transfer.py
index a7067818c..06574f4e7 100644
--- a/pw_transfer/py/pw_transfer/transfer.py
+++ b/pw_transfer/py/pw_transfer/transfer.py
@@ -1,4 +1,4 @@
-# Copyright 2021 The Pigweed Authors
+# Copyright 2022 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -196,7 +196,10 @@ class Transfer(abc.ABC):
def _send_error(self, error: Status) -> None:
"""Sends an error chunk to the server and finishes the transfer."""
- self._send_chunk(Chunk(transfer_id=self.id, status=error.value))
+ self._send_chunk(
+ Chunk(transfer_id=self.id,
+ status=error.value,
+ type=Chunk.Type.TRANSFER_COMPLETION))
self.finish(error)
@@ -236,7 +239,7 @@ class WriteTransfer(Transfer):
return self._data
def _initial_chunk(self) -> Chunk:
- return Chunk(transfer_id=self.id)
+ return Chunk(transfer_id=self.id, type=Chunk.Type.TRANSFER_START)
async def _handle_data_chunk(self, chunk: Chunk) -> None:
"""Processes an incoming chunk from the server.
@@ -286,7 +289,8 @@ class WriteTransfer(Transfer):
retransmit = True
if chunk.HasField('type'):
- retransmit = chunk.type == Chunk.Type.PARAMETERS_RETRANSMIT
+ retransmit = (chunk.type == Chunk.Type.PARAMETERS_RETRANSMIT
+ or chunk.type == Chunk.Type.TRANSFER_START)
if chunk.offset > len(self.data):
# Bad offset; terminate the transfer.
@@ -339,7 +343,9 @@ class WriteTransfer(Transfer):
def _next_chunk(self) -> Chunk:
"""Returns the next Chunk message to send in the data transfer."""
- chunk = Chunk(transfer_id=self.id, offset=self._offset)
+ chunk = Chunk(transfer_id=self.id,
+ offset=self._offset,
+ type=Chunk.Type.TRANSFER_DATA)
max_bytes_in_chunk = min(self._max_chunk_size,
self._window_end_offset - self._offset)
@@ -400,7 +406,7 @@ class ReadTransfer(Transfer):
return bytes(self._data)
def _initial_chunk(self) -> Chunk:
- return self._transfer_parameters()
+ return self._transfer_parameters(Chunk.Type.TRANSFER_START)
async def _handle_data_chunk(self, chunk: Chunk) -> None:
"""Processes an incoming chunk from the server.
@@ -413,7 +419,8 @@ class ReadTransfer(Transfer):
# Initially, the transfer service only supports in-order transfers.
# If data is received out of order, request that the server
# retransmit from the previous offset.
- self._send_chunk(self._transfer_parameters())
+ self._send_chunk(
+ self._transfer_parameters(Chunk.Type.PARAMETERS_RETRANSMIT))
return
self._data += chunk.data
@@ -424,7 +431,9 @@ class ReadTransfer(Transfer):
if chunk.remaining_bytes == 0:
# No more data to read. Acknowledge receipt and finish.
self._send_chunk(
- Chunk(transfer_id=self.id, status=Status.OK.value))
+ Chunk(transfer_id=self.id,
+ status=Status.OK.value,
+ type=Chunk.Type.TRANSFER_COMPLETION))
self.finish(Status.OK)
return
@@ -442,6 +451,26 @@ class ReadTransfer(Transfer):
self._remaining_transfer_size + self._offset)
self._update_progress(self._offset, self._offset, total_size)
+ if chunk.window_end_offset != 0:
+ if chunk.window_end_offset < self._offset:
+ _LOG.error(
+ 'Transfer %d: transmitter sent invalid earlier end offset '
+ '%d (receiver offset %d)', self.id,
+ chunk.window_end_offset, self._offset)
+ self._send_error(Status.INTERNAL)
+ return
+
+ if chunk.window_end_offset > self._window_end_offset:
+ _LOG.error(
+ 'Transfer %d: transmitter sent invalid later end offset '
+ '%d (receiver end offset %d)', self.id,
+ chunk.window_end_offset, self._window_end_offset)
+ self._send_error(Status.INTERNAL)
+ return
+
+ self._window_end_offset = chunk.window_end_offset
+ self._pending_bytes -= chunk.window_end_offset - self._offset
+
remaining_window_size = self._window_end_offset - self._offset
extend_window = (remaining_window_size <= self._max_bytes_to_receive /
ReadTransfer.EXTEND_WINDOW_DIVISOR)
@@ -449,22 +478,22 @@ class ReadTransfer(Transfer):
if self._pending_bytes == 0:
# All pending data was received. Send out a new parameters chunk for
# the next block.
- self._send_chunk(self._transfer_parameters())
+ self._send_chunk(
+ self._transfer_parameters(Chunk.Type.PARAMETERS_RETRANSMIT))
elif extend_window:
- self._send_chunk(self._transfer_parameters(extend=True))
+ self._send_chunk(
+ self._transfer_parameters(Chunk.Type.PARAMETERS_CONTINUE))
def _retry_after_timeout(self) -> None:
- self._send_chunk(self._transfer_parameters())
+ self._send_chunk(
+ self._transfer_parameters(Chunk.Type.PARAMETERS_RETRANSMIT))
- def _transfer_parameters(self, extend: bool = False) -> Chunk:
+ def _transfer_parameters(self, chunk_type: Any) -> Chunk:
"""Sends an updated transfer parameters chunk to the server."""
self._pending_bytes = self._max_bytes_to_receive
self._window_end_offset = self._offset + self._max_bytes_to_receive
- chunk_type = (Chunk.Type.PARAMETERS_CONTINUE
- if extend else Chunk.Type.PARAMETERS_RETRANSMIT)
-
chunk = Chunk(transfer_id=self.id,
pending_bytes=self._pending_bytes,
window_end_offset=self._window_end_offset,
diff --git a/pw_transfer/py/tests/python_cpp_transfer_test.py b/pw_transfer/py/tests/python_cpp_transfer_test.py
index 4386bce3e..62e0d14eb 100755
--- a/pw_transfer/py/tests/python_cpp_transfer_test.py
+++ b/pw_transfer/py/tests/python_cpp_transfer_test.py
@@ -44,10 +44,13 @@ class TransferServiceIntegrationTest(unittest.TestCase):
self.directory = Path(self._tempdir.name)
command = (*self.test_server_command, str(self.directory))
+ self._outgoing_filter = rpc.PacketFilter('outgoing RPC')
+ self._incoming_filter = rpc.PacketFilter('incoming RPC')
self._context = rpc.HdlcRpcLocalServerAndClient(
command,
self.port, [transfer_pb2, test_server_pb2],
- for_testing=True)
+ outgoing_processor=self._outgoing_filter,
+ incoming_processor=self._incoming_filter)
service = self._context.client.channel(1).rpcs.pw.transfer.Transfer
self.manager = pw_transfer.Manager(
@@ -96,8 +99,9 @@ class TransferServiceIntegrationTest(unittest.TestCase):
def test_read_large_amount_of_data(self) -> None:
for _ in range(ITERATIONS):
- self.set_content(27, '~' * 512)
- self.assertEqual(self.manager.read(27), b'~' * 512)
+ size = 2**13 # TODO(hepler): Increase to 2**14 when it passes.
+ self.set_content(27, '~' * size)
+ self.assertEqual(self.manager.read(27), b'~' * size)
def test_write_unknown_id(self) -> None:
with self.assertRaises(pw_transfer.Error) as ctx:
@@ -147,12 +151,12 @@ class TransferServiceIntegrationTest(unittest.TestCase):
self.set_content(34, 'junk')
# Allow the initial packet and first chunk, then drop the second chunk.
- self._context.outgoing_packets.keep(2)
- self._context.outgoing_packets.drop(1)
+ self._outgoing_filter.keep(2)
+ self._outgoing_filter.drop(1)
# Allow the initial transfer parameters updates, then drop the next two.
- self._context.incoming_packets.keep(1)
- self._context.incoming_packets.drop(2)
+ self._incoming_filter.keep(1)
+ self._incoming_filter.drop(2)
with self.assertLogs('pw_transfer', 'DEBUG') as logs:
self.manager.write(34, _DATA_4096B)
@@ -168,8 +172,8 @@ class TransferServiceIntegrationTest(unittest.TestCase):
def test_write_regularly_drop_packets(self) -> None:
self.set_content(35, 'junk')
- self._context.outgoing_packets.drop_every(5) # drop one per window
- self._context.incoming_packets.drop_every(3)
+ self._outgoing_filter.drop_every(5) # drop one per window
+ self._incoming_filter.drop_every(3)
self.manager.write(35, _DATA_4096B)
@@ -183,16 +187,16 @@ class TransferServiceIntegrationTest(unittest.TestCase):
self.set_content(seed, 'junk')
rand = random.Random(seed)
- self._context.incoming_packets.randomly_drop(3, rand)
- self._context.outgoing_packets.randomly_drop(3, rand)
+ self._incoming_filter.randomly_drop(3, rand)
+ self._outgoing_filter.randomly_drop(3, rand)
data = bytes(
rand.randrange(256) for _ in range(rand.randrange(16384)))
self.manager.write(seed, data)
self.assertEqual(self.get_content(seed), data)
- self._context.incoming_packets.reset()
- self._context.outgoing_packets.reset()
+ self._incoming_filter.reset()
+ self._outgoing_filter.reset()
def _main(test_server_command: List[str], port: int,
diff --git a/pw_transfer/py/tests/transfer_test.py b/pw_transfer/py/tests/transfer_test.py
index 718b276bf..e73c09ddf 100644
--- a/pw_transfer/py/tests/transfer_test.py
+++ b/pw_transfer/py/tests/transfer_test.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright 2021 The Pigweed Authors
+# Copyright 2022 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -462,9 +462,12 @@ class TransferManagerTest(unittest.TestCase):
self.assertEqual(
self._sent_chunks,
[
- Chunk(transfer_id=22), # initial chunk
- Chunk(transfer_id=22), # retry 1
- Chunk(transfer_id=22), # retry 2
+ Chunk(transfer_id=22,
+ type=Chunk.Type.TRANSFER_START), # initial chunk
+ Chunk(transfer_id=22,
+ type=Chunk.Type.TRANSFER_START), # retry 1
+ Chunk(transfer_id=22,
+ type=Chunk.Type.TRANSFER_START), # retry 2
])
exception = context.exception
@@ -489,13 +492,16 @@ class TransferManagerTest(unittest.TestCase):
last_data_chunk = Chunk(transfer_id=22,
data=b'56789',
offset=5,
- remaining_bytes=0)
+ remaining_bytes=0,
+ type=Chunk.Type.TRANSFER_DATA)
self.assertEqual(
self._sent_chunks,
[
- Chunk(transfer_id=22), # start transfer
- Chunk(transfer_id=22, data=b'01234'),
+ Chunk(transfer_id=22, type=Chunk.Type.TRANSFER_START),
+ Chunk(transfer_id=22,
+ data=b'01234',
+ type=Chunk.Type.TRANSFER_DATA),
last_data_chunk, # last chunk
last_data_chunk, # retry 1
last_data_chunk, # retry 2
diff --git a/pw_transfer/rate_estimate.cc b/pw_transfer/rate_estimate.cc
new file mode 100644
index 000000000..09c26de65
--- /dev/null
+++ b/pw_transfer/rate_estimate.cc
@@ -0,0 +1,35 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_transfer/rate_estimate.h"
+
+namespace pw::transfer {
+
+size_t RateEstimate::GetRateBytesPerSecond() const {
+ if (!start_time_.has_value()) {
+ return 0;
+ }
+
+ auto elapsed_ms = std::chrono::duration_cast<std::chrono::milliseconds>(
+ chrono::SystemClock::now() - start_time_.value());
+ if (elapsed_ms.count() == 0) {
+ return 0;
+ }
+
+ constexpr unsigned int kMillsecondsPerSecond = 1000;
+ return (static_cast<uint64_t>(bytes_transferred_) * kMillsecondsPerSecond) /
+ elapsed_ms.count();
+}
+
+} // namespace pw::transfer
diff --git a/pw_transfer/server_context.cc b/pw_transfer/server_context.cc
index 9d3410974..353258f5b 100644
--- a/pw_transfer/server_context.cc
+++ b/pw_transfer/server_context.cc
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -25,55 +25,18 @@
namespace pw::transfer::internal {
-Status ServerContext::Start(TransferType type,
- Handler& handler,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- rpc::RawServerReaderWriter& stream,
- chrono::SystemClock::duration timeout,
- uint8_t max_retries) {
- PW_DCHECK(!active());
-
- PW_LOG_INFO("Starting transfer %u", static_cast<unsigned>(handler.id()));
-
- if (const Status status = handler.Prepare(type); !status.ok()) {
- PW_LOG_WARN("Transfer %u prepare failed with status %u",
- static_cast<unsigned>(handler.id()),
- status.code());
- return status.IsPermissionDenied() ? status : Status::DataLoss();
- }
-
- type_ = type;
- handler_ = &handler;
+Status ServerContext::FinalCleanup(const Status status) {
+ PW_DCHECK(active());
- if (type == kRead) {
- InitializeForTransmit(handler.id(),
- work_queue,
- encoding_buffer,
- stream,
- handler.reader(),
- timeout,
- max_retries);
- } else {
- InitializeForReceive(handler.id(),
- work_queue,
- encoding_buffer,
- stream,
- handler.writer(),
- timeout,
- max_retries);
+ // If no handler is set, then the Prepare call failed. Nothing to do.
+ if (handler_ == nullptr) {
+ return OkStatus();
}
- return OkStatus();
-}
-
-Status ServerContext::Finish(const Status status) {
- PW_DCHECK(active());
-
Handler& handler = *handler_;
- set_transfer_state(TransferState::kCompleted);
+ handler_ = nullptr;
- if (type_ == kRead) {
+ if (type() == TransferType::kTransmit) {
handler.FinalizeRead(status);
return OkStatus();
}
@@ -86,75 +49,8 @@ Status ServerContext::Finish(const Status status) {
static_cast<int>(finalized.code()));
return Status::DataLoss();
}
- return OkStatus();
-}
-
-Result<ServerContext*> ServerContextPool::StartTransfer(
- uint32_t transfer_id,
- work_queue::WorkQueue& work_queue,
- EncodingBuffer& encoding_buffer,
- rpc::RawServerReaderWriter& stream,
- chrono::SystemClock::duration timeout,
- uint8_t max_retries) {
- ServerContext* new_transfer = nullptr;
- // Check if the ID belongs to an active transfer. If not, pick an inactive
- // slot to start a new transfer.
- for (ServerContext& transfer : transfers_) {
- if (transfer.active()) {
- // Check if restarting a currently pending transfer.
- if (transfer.transfer_id() == transfer_id) {
- PW_LOG_DEBUG(
- "Received initial chunk for transfer %u which was already in "
- "progress; aborting and restarting",
- static_cast<unsigned>(transfer_id));
- transfer.Finish(Status::Aborted());
- new_transfer = &transfer;
- break;
- }
- } else {
- // Remember this but keep searching for an active transfer with this ID.
- new_transfer = &transfer;
- }
- }
-
- if (new_transfer == nullptr) {
- return Status::Unavailable();
- }
-
- // Try to start the new transfer by checking if a handler for it exists.
- auto handler = std::find_if(handlers_.begin(), handlers_.end(), [&](auto& h) {
- return h.id() == transfer_id;
- });
-
- if (handler == handlers_.end()) {
- return Status::NotFound();
- }
-
- PW_TRY(new_transfer->Start(type_,
- *handler,
- work_queue,
- encoding_buffer,
- stream,
- timeout,
- max_retries));
- return new_transfer;
-}
-
-Result<ServerContext*> ServerContextPool::GetPendingTransfer(
- uint32_t transfer_id) {
- auto transfer =
- std::find_if(transfers_.begin(), transfers_.end(), [=](auto& t) {
- return t.initialized() && t.transfer_id() == transfer_id;
- });
-
- if (transfer == transfers_.end()) {
- PW_LOG_DEBUG("Ignoring chunk for transfer %u, which is not pending",
- static_cast<unsigned>(transfer_id));
- return Status::FailedPrecondition();
- }
-
- return &(*transfer);
+ return OkStatus();
}
} // namespace pw::transfer::internal
diff --git a/pw_transfer/test_rpc_server.cc b/pw_transfer/test_rpc_server.cc
index 6b595e089..e0f8e6788 100644
--- a/pw_transfer/test_rpc_server.cc
+++ b/pw_transfer/test_rpc_server.cc
@@ -32,7 +32,6 @@
#include "pw_thread_stl/options.h"
#include "pw_transfer/transfer.h"
#include "pw_transfer_test/test_server.raw_rpc.pb.h"
-#include "pw_work_queue/work_queue.h"
namespace pw::transfer {
namespace {
@@ -84,9 +83,8 @@ class TestServerService
void set_directory(const char* directory) { directory_ = directory; }
- StatusWithSize ReloadTransferFiles(ConstByteSpan, ByteSpan) {
+ void ReloadTransferFiles(ConstByteSpan, rpc::RawUnaryResponder&) {
LoadFileHandlers();
- return StatusWithSize();
}
void LoadFileHandlers() {
@@ -117,10 +115,10 @@ class TestServerService
constexpr size_t kChunkSizeBytes = 256;
constexpr size_t kMaxReceiveSizeBytes = 1024;
-work_queue::WorkQueueWithBuffer<10> work_queue_;
-
-TransferServiceBuffer<kChunkSizeBytes> transfer_service(work_queue_,
- kMaxReceiveSizeBytes);
+std::array<std::byte, kChunkSizeBytes> chunk_buffer;
+std::array<std::byte, kChunkSizeBytes> encode_buffer;
+transfer::Thread<4, 4> transfer_thread(chunk_buffer, encode_buffer);
+TransferService transfer_service(transfer_thread, kMaxReceiveSizeBytes);
TestServerService test_server_service(transfer_service);
void RunServer(int socket_port, const char* directory) {
@@ -130,10 +128,10 @@ void RunServer(int socket_port, const char* directory) {
test_server_service.LoadFileHandlers();
rpc::system_server::Init();
- rpc::system_server::Server().RegisterService(test_server_service);
- rpc::system_server::Server().RegisterService(transfer_service);
+ rpc::system_server::Server().RegisterService(test_server_service,
+ transfer_service);
- thread::DetachedThread(thread::stl::Options(), work_queue_);
+ thread::DetachedThread(thread::stl::Options(), transfer_thread);
PW_LOG_INFO("Starting pw_rpc server");
PW_CHECK_OK(rpc::system_server::Start());
diff --git a/pw_transfer/transfer.cc b/pw_transfer/transfer.cc
index 50bd2612c..142ecb364 100644
--- a/pw_transfer/transfer.cc
+++ b/pw_transfer/transfer.cc
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -12,8 +12,6 @@
// License for the specific language governing permissions and limitations under
// the License.
-#define PW_LOG_MODULE_NAME "TRN"
-
#include "pw_transfer/transfer.h"
#include "pw_assert/check.h"
@@ -25,70 +23,24 @@ namespace pw::transfer {
void TransferService::HandleChunk(ConstByteSpan message,
internal::TransferType type) {
- // All incoming chunks in a client read transfer are transfer parameter
- // updates, except for the final chunk, which is an acknowledgement of
- // completion.
- //
- // Transfer parameters may contain the following fields:
- //
- // - transfer_id (required)
- // - pending_bytes (required)
- // - offset (required)
- // - max_chunk_size_bytes
- // - min_delay_microseconds (not yet supported)
- //
internal::Chunk chunk;
-
if (Status status = internal::DecodeChunk(message, chunk); !status.ok()) {
- // No special handling required here. The client will retransmit the chunk
- // when no response is received.
- PW_LOG_ERROR("Failed to decode incoming transfer chunk");
+ PW_LOG_ERROR("Failed to decode transfer chunk: %d", status.code());
return;
}
- internal::ServerContextPool& pool =
- type == internal::kRead ? read_transfers_ : write_transfers_;
- rpc::RawServerReaderWriter& stream =
- type == internal::kRead ? client_.read_stream() : client_.write_stream();
-
- Result<internal::ServerContext*> result =
- chunk.IsInitialChunk() ? pool.StartTransfer(chunk.transfer_id,
- work_queue_,
- encoding_buffer_,
- stream,
- chunk_timeout_,
- max_retries_)
- : pool.GetPendingTransfer(chunk.transfer_id);
- if (!result.ok()) {
- client_.SendStatusChunk(type, chunk.transfer_id, result.status());
- PW_LOG_ERROR("Error handling chunk for transfer %u: %d",
- static_cast<unsigned>(chunk.transfer_id),
- result.status().code());
- return;
+ if (chunk.IsInitialChunk()) {
+ // TODO(frolv): Right now, transfer ID and handler ID are the same thing.
+ // The transfer ID should be made into a unique session ID instead.
+ thread_.StartServerTransfer(type,
+ chunk.transfer_id,
+ chunk.transfer_id,
+ max_parameters_,
+ chunk_timeout_,
+ max_retries_);
}
- internal::ServerContext& transfer = *result.value();
-
- if (chunk.status.has_value()) {
- // Transfer has been terminated (successfully or not).
- const Status status = chunk.status.value();
- if (!status.ok()) {
- PW_LOG_ERROR("Receiver terminated transfer %u with status %d",
- static_cast<unsigned>(chunk.transfer_id),
- static_cast<int>(status.code()));
- }
- if (transfer.active()) {
- transfer.Finish(status).IgnoreError();
- }
- return;
- }
-
- if (transfer.ReadChunkData(
- chunk_data_buffer_, client_.max_parameters(), chunk)) {
- // Call this synchronously for now. Later, this will be deferred within a
- // work queue.
- transfer.ProcessChunk(chunk_data_buffer_, client_.max_parameters());
- }
+ thread_.ProcessServerChunk(message);
}
} // namespace pw::transfer
diff --git a/pw_transfer/transfer.proto b/pw_transfer/transfer.proto
index 8a9d35500..6f598a071 100644
--- a/pw_transfer/transfer.proto
+++ b/pw_transfer/transfer.proto
@@ -140,17 +140,24 @@ message Chunk {
// Chunk containing transfer data.
TRANSFER_DATA = 0;
- // First chunk of a transfer (only sent by the client). Currently unused.
+ // First chunk of a transfer (only sent by the client).
TRANSFER_START = 1;
- // Transfer parameters indicating that the sender should retransmit from the
- // specified offset.
+ // Transfer parameters indicating that the transmitter should retransmit
+ // from the specified offset.
PARAMETERS_RETRANSMIT = 2;
- // Transfer parameters telling the sender to continue sending up to index
- // `offset + pending_bytes` of data. If the sender is already beyond offset,
- // it does not have to rewind.
+ // Transfer parameters telling the transmitter to continue sending up to
+ // index `offset + pending_bytes` of data. If the transmitter is already
+ // beyond `offset`, it does not have to rewind.
PARAMETERS_CONTINUE = 3;
+
+ // Sender of the chunk is terminating the transfer.
+ TRANSFER_COMPLETION = 4;
+
+ // Acknowledge the completion of a transfer. Currently unused.
+ // TODO(konkers): Implement this behavior.
+ TRANSFER_COMPLETION_ACK = 5;
};
// The type of this chunk. This field should only be processed when present.
diff --git a/pw_transfer/transfer_test.cc b/pw_transfer/transfer_test.cc
index 69bed5bd0..76cfb0113 100644
--- a/pw_transfer/transfer_test.cc
+++ b/pw_transfer/transfer_test.cc
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -17,15 +17,26 @@
#include "gtest/gtest.h"
#include "pw_bytes/array.h"
#include "pw_rpc/raw/test_method_context.h"
+#include "pw_rpc/thread_testing.h"
+#include "pw_thread/thread.h"
+#include "pw_thread_stl/options.h"
#include "pw_transfer/transfer.pwpb.h"
#include "pw_transfer_private/chunk_testing.h"
namespace pw::transfer::test {
namespace {
+using namespace std::chrono_literals;
+
PW_MODIFY_DIAGNOSTICS_PUSH();
PW_MODIFY_DIAGNOSTIC(ignored, "-Wmissing-field-initializers");
+// TODO(frolv): Have a generic way to obtain a thread for testing on any system.
+thread::Options& TransferThreadOptions() {
+ static thread::stl::Options options;
+ return options;
+}
+
using internal::Chunk;
class TestMemoryReader : public stream::SeekableReader {
@@ -67,9 +78,14 @@ class SimpleReadTransfer final : public ReadOnlyHandler {
reader_(data) {}
Status PrepareRead() final {
- reader_.Seek(0);
- set_reader(reader_);
prepare_read_called = true;
+
+ if (!prepare_read_return_status.ok()) {
+ return prepare_read_return_status;
+ }
+
+ EXPECT_EQ(reader_.seek_status, reader_.Seek(0));
+ set_reader(reader_);
return OkStatus();
}
@@ -83,6 +99,7 @@ class SimpleReadTransfer final : public ReadOnlyHandler {
bool prepare_read_called;
bool finalize_read_called;
+ Status prepare_read_return_status;
Status finalize_read_status;
private:
@@ -95,32 +112,43 @@ class ReadTransfer : public ::testing::Test {
protected:
ReadTransfer(size_t max_chunk_size_bytes = 64)
: handler_(3, kData),
- ctx_(work_queue_,
- std::span(data_buffer_).first(max_chunk_size_bytes),
- 64) {
+ transfer_thread_(std::span(data_buffer_).first(max_chunk_size_bytes),
+ encode_buffer_),
+ ctx_(transfer_thread_, 64),
+ system_thread_(TransferThreadOptions(), transfer_thread_) {
ctx_.service().RegisterHandler(handler_);
ASSERT_FALSE(handler_.prepare_read_called);
ASSERT_FALSE(handler_.finalize_read_called);
ctx_.call(); // Open the read stream
+ transfer_thread_.WaitUntilEventIsProcessed();
+ }
+
+ ~ReadTransfer() {
+ transfer_thread_.Terminate();
+ system_thread_.join();
}
SimpleReadTransfer handler_;
+ Thread<1, 1> transfer_thread_;
PW_RAW_TEST_METHOD_CONTEXT(TransferService, Read) ctx_;
+ thread::Thread system_thread_;
std::array<std::byte, 64> data_buffer_;
-
- // Not currently used in the tests, so left uninitialized.
- work_queue::WorkQueueWithBuffer<1> work_queue_;
+ std::array<std::byte, 64> encode_buffer_;
};
TEST_F(ReadTransfer, SingleChunk) {
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 3,
- .window_end_offset = 64,
- .pending_bytes = 64,
- .offset = 0,
- .type = Chunk::Type::kParametersRetransmit}));
+ rpc::test::WaitForPackets(ctx_.output(), 2, [this] {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .window_end_offset = 64,
+ .pending_bytes = 64,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+
+ transfer_thread_.WaitUntilEventIsProcessed();
+ });
+
EXPECT_TRUE(handler_.prepare_read_called);
EXPECT_FALSE(handler_.finalize_read_called);
@@ -141,13 +169,22 @@ TEST_F(ReadTransfer, SingleChunk) {
EXPECT_EQ(c1.remaining_bytes.value(), 0u);
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.finalize_read_called);
EXPECT_EQ(handler_.finalize_read_status, OkStatus());
}
TEST_F(ReadTransfer, PendingBytes_SingleChunk) {
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 3, .pending_bytes = 64, .offset = 0}));
+ rpc::test::WaitForPackets(ctx_.output(), 2, [this] {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .pending_bytes = 64,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+
+ transfer_thread_.WaitUntilEventIsProcessed();
+ });
+
EXPECT_TRUE(handler_.prepare_read_called);
EXPECT_FALSE(handler_.finalize_read_called);
@@ -168,17 +205,21 @@ TEST_F(ReadTransfer, PendingBytes_SingleChunk) {
EXPECT_EQ(c1.remaining_bytes.value(), 0u);
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.finalize_read_called);
EXPECT_EQ(handler_.finalize_read_status, OkStatus());
}
TEST_F(ReadTransfer, MultiChunk) {
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 3,
- .window_end_offset = 16,
- .pending_bytes = 16,
- .offset = 0,
- .type = Chunk::Type::kParametersRetransmit}));
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .window_end_offset = 16,
+ .pending_bytes = 16,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.prepare_read_called);
EXPECT_FALSE(handler_.finalize_read_called);
@@ -196,6 +237,8 @@ TEST_F(ReadTransfer, MultiChunk) {
.pending_bytes = 16,
.offset = 16,
.type = Chunk::Type::kParametersContinue}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 2u);
Chunk c1 = DecodeChunk(ctx_.responses()[1]);
@@ -210,6 +253,8 @@ TEST_F(ReadTransfer, MultiChunk) {
.pending_bytes = 16,
.offset = 32,
.type = Chunk::Type::kParametersContinue}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 3u);
Chunk c2 = DecodeChunk(ctx_.responses()[2]);
@@ -219,13 +264,52 @@ TEST_F(ReadTransfer, MultiChunk) {
EXPECT_EQ(c2.remaining_bytes.value(), 0u);
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.finalize_read_called);
EXPECT_EQ(handler_.finalize_read_status, OkStatus());
}
+TEST_F(ReadTransfer, MultiChunk_RepeatedContinuePackets) {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .window_end_offset = 16,
+ .pending_bytes = 16,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ const auto continue_chunk =
+ EncodeChunk({.transfer_id = 3,
+ .window_end_offset = 24,
+ .pending_bytes = 8,
+ .offset = 16,
+ .type = Chunk::Type::kParametersContinue});
+ ctx_.SendClientStream(continue_chunk);
+
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ // Resend the CONTINUE packets that don't actually advance the window.
+ for (int i = 0; i < 3; ++i) {
+ ctx_.SendClientStream(continue_chunk);
+ transfer_thread_.WaitUntilEventIsProcessed();
+ }
+
+ ASSERT_EQ(ctx_.total_responses(), 2u); // Only sent one packet
+ Chunk c1 = DecodeChunk(ctx_.responses()[1]);
+
+ EXPECT_EQ(c1.transfer_id, 3u);
+ EXPECT_EQ(c1.offset, 16u);
+ ASSERT_EQ(c1.data.size(), 8u);
+ EXPECT_EQ(std::memcmp(c1.data.data(), kData.data() + 16, c1.data.size()), 0);
+}
+
TEST_F(ReadTransfer, PendingBytes_MultiChunk) {
ctx_.SendClientStream(
EncodeChunk({.transfer_id = 3, .pending_bytes = 16, .offset = 0}));
+
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.prepare_read_called);
EXPECT_FALSE(handler_.finalize_read_called);
@@ -239,6 +323,8 @@ TEST_F(ReadTransfer, PendingBytes_MultiChunk) {
ctx_.SendClientStream(
EncodeChunk({.transfer_id = 3, .pending_bytes = 16, .offset = 16}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 2u);
Chunk c1 = DecodeChunk(ctx_.responses()[1]);
@@ -249,6 +335,8 @@ TEST_F(ReadTransfer, PendingBytes_MultiChunk) {
ctx_.SendClientStream(
EncodeChunk({.transfer_id = 3, .pending_bytes = 16, .offset = 32}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 3u);
Chunk c2 = DecodeChunk(ctx_.responses()[2]);
@@ -258,32 +346,38 @@ TEST_F(ReadTransfer, PendingBytes_MultiChunk) {
EXPECT_EQ(c2.remaining_bytes.value(), 0u);
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.finalize_read_called);
EXPECT_EQ(handler_.finalize_read_status, OkStatus());
}
TEST_F(ReadTransfer, OutOfOrder_SeekingSupported) {
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 3, .pending_bytes = 16, .offset = 0}));
+ rpc::test::WaitForPackets(ctx_.output(), 4, [this] {
+ ctx_.SendClientStream(
+ EncodeChunk({.transfer_id = 3, .pending_bytes = 16, .offset = 0}));
- ASSERT_EQ(ctx_.total_responses(), 1u);
- Chunk chunk = DecodeChunk(ctx_.responses().back());
- EXPECT_TRUE(
- std::equal(&kData[0], &kData[16], chunk.data.begin(), chunk.data.end()));
+ transfer_thread_.WaitUntilEventIsProcessed();
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 3, .pending_bytes = 8, .offset = 2}));
+ Chunk chunk = DecodeChunk(ctx_.responses().back());
+ EXPECT_TRUE(std::equal(
+ &kData[0], &kData[16], chunk.data.begin(), chunk.data.end()));
- ASSERT_EQ(ctx_.total_responses(), 2u);
- chunk = DecodeChunk(ctx_.responses().back());
- EXPECT_TRUE(
- std::equal(&kData[2], &kData[10], chunk.data.begin(), chunk.data.end()));
+ ctx_.SendClientStream(
+ EncodeChunk({.transfer_id = 3, .pending_bytes = 8, .offset = 2}));
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 3, .pending_bytes = 64, .offset = 17}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ chunk = DecodeChunk(ctx_.responses().back());
+ EXPECT_TRUE(std::equal(
+ &kData[2], &kData[10], chunk.data.begin(), chunk.data.end()));
+
+ ctx_.SendClientStream(
+ EncodeChunk({.transfer_id = 3, .pending_bytes = 64, .offset = 17}));
+ });
ASSERT_EQ(ctx_.total_responses(), 4u);
- chunk = DecodeChunk(ctx_.responses()[2]);
+ Chunk chunk = DecodeChunk(ctx_.responses()[2]);
EXPECT_TRUE(std::equal(
&kData[17], kData.end(), chunk.data.begin(), chunk.data.end()));
}
@@ -296,16 +390,22 @@ TEST_F(ReadTransfer, OutOfOrder_SeekingNotSupported_EndsWithUnimplemented) {
ctx_.SendClientStream(
EncodeChunk({.transfer_id = 3, .pending_bytes = 8, .offset = 2}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 2u);
Chunk chunk = DecodeChunk(ctx_.responses().back());
EXPECT_EQ(chunk.status, Status::Unimplemented());
}
TEST_F(ReadTransfer, MaxChunkSize_Client) {
- ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
- .pending_bytes = 64,
- .max_chunk_size_bytes = 8,
- .offset = 0}));
+ rpc::test::WaitForPackets(ctx_.output(), 5, [this] {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .pending_bytes = 64,
+ .max_chunk_size_bytes = 8,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+ });
+
EXPECT_TRUE(handler_.prepare_read_called);
EXPECT_FALSE(handler_.finalize_read_called);
@@ -342,21 +442,59 @@ TEST_F(ReadTransfer, MaxChunkSize_Client) {
EXPECT_EQ(c4.remaining_bytes.value(), 0u);
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.finalize_read_called);
EXPECT_EQ(handler_.finalize_read_status, OkStatus());
}
+TEST_F(ReadTransfer, HandlerIsClearedAfterTransfer) {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .window_end_offset = 64,
+ .pending_bytes = 64,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ ASSERT_EQ(ctx_.total_responses(), 1u);
+ ASSERT_TRUE(handler_.prepare_read_called);
+ ASSERT_TRUE(handler_.finalize_read_called);
+ ASSERT_EQ(OkStatus(), handler_.finalize_read_status);
+
+ // Now, clear state and start a second transfer
+ handler_.prepare_read_return_status = Status::FailedPrecondition();
+ handler_.prepare_read_called = false;
+ handler_.finalize_read_called = false;
+
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .window_end_offset = 64,
+ .pending_bytes = 64,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ // Prepare failed, so the handler should not have been stored in the context,
+ // and finalize should not have been called.
+ ASSERT_TRUE(handler_.prepare_read_called);
+ ASSERT_FALSE(handler_.finalize_read_called);
+}
+
class ReadTransferMaxChunkSize8 : public ReadTransfer {
protected:
- ReadTransferMaxChunkSize8() : ReadTransfer(/*max_chunK_size_bytes=*/8) {}
+ ReadTransferMaxChunkSize8() : ReadTransfer(/*max_chunk_size_bytes=*/8) {}
};
TEST_F(ReadTransferMaxChunkSize8, MaxChunkSize_Server) {
// Client asks for max 16-byte chunks, but service places a limit of 8 bytes.
- ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
- .pending_bytes = 64,
- .max_chunk_size_bytes = 16,
- .offset = 0}));
+ rpc::test::WaitForPackets(ctx_.output(), 5, [this] {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .pending_bytes = 64,
+ .max_chunk_size_bytes = 16,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+ });
+
EXPECT_TRUE(handler_.prepare_read_called);
EXPECT_FALSE(handler_.finalize_read_called);
@@ -393,13 +531,20 @@ TEST_F(ReadTransferMaxChunkSize8, MaxChunkSize_Server) {
EXPECT_EQ(c4.remaining_bytes.value(), 0u);
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.finalize_read_called);
EXPECT_EQ(handler_.finalize_read_status, OkStatus());
}
TEST_F(ReadTransfer, ClientError) {
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 3, .pending_bytes = 16, .offset = 0}));
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .pending_bytes = 16,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.prepare_read_called);
EXPECT_FALSE(handler_.finalize_read_called);
ASSERT_EQ(ctx_.total_responses(), 1u);
@@ -407,6 +552,8 @@ TEST_F(ReadTransfer, ClientError) {
// Send client error.
ctx_.SendClientStream(
EncodeChunk({.transfer_id = 3, .status = Status::OutOfRange()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 1u);
EXPECT_TRUE(handler_.finalize_read_called);
EXPECT_EQ(handler_.finalize_read_status, Status::OutOfRange());
@@ -415,6 +562,8 @@ TEST_F(ReadTransfer, ClientError) {
TEST_F(ReadTransfer, MalformedParametersChunk) {
// pending_bytes is required in a parameters chunk.
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.prepare_read_called);
EXPECT_TRUE(handler_.finalize_read_called);
EXPECT_EQ(handler_.finalize_read_status, Status::InvalidArgument());
@@ -427,8 +576,11 @@ TEST_F(ReadTransfer, MalformedParametersChunk) {
}
TEST_F(ReadTransfer, UnregisteredHandler) {
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 11, .pending_bytes = 32, .offset = 0}));
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 11,
+ .pending_bytes = 32,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
Chunk chunk = DecodeChunk(ctx_.responses()[0]);
@@ -442,6 +594,7 @@ TEST_F(ReadTransfer, IgnoresNonPendingTransfers) {
ctx_.SendClientStream(EncodeChunk(
{.transfer_id = 3, .offset = 0, .data = std::span(kData).first(10)}));
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
// Only start transfer for initial packet.
EXPECT_FALSE(handler_.prepare_read_called);
@@ -449,8 +602,11 @@ TEST_F(ReadTransfer, IgnoresNonPendingTransfers) {
}
TEST_F(ReadTransfer, AbortAndRestartIfInitialPacketIsReceived) {
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 3, .pending_bytes = 16, .offset = 0}));
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .pending_bytes = 16,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
@@ -459,7 +615,11 @@ TEST_F(ReadTransfer, AbortAndRestartIfInitialPacketIsReceived) {
handler_.prepare_read_called = false; // Reset so can check if called again.
ctx_.SendClientStream( // Resend starting chunk
- EncodeChunk({.transfer_id = 3, .pending_bytes = 16, .offset = 0}));
+ EncodeChunk({.transfer_id = 3,
+ .pending_bytes = 16,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 2u);
@@ -471,6 +631,7 @@ TEST_F(ReadTransfer, AbortAndRestartIfInitialPacketIsReceived) {
ctx_.SendClientStream(
EncodeChunk({.transfer_id = 3, .pending_bytes = 16, .offset = 16}));
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 3u);
EXPECT_TRUE(handler_.finalize_read_called);
@@ -478,7 +639,10 @@ TEST_F(ReadTransfer, AbortAndRestartIfInitialPacketIsReceived) {
}
TEST_F(ReadTransfer, ZeroPendingBytesWithRemainingData_Aborts) {
- ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .pending_bytes = 0}));
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .pending_bytes = 0,
+ .type = Chunk::Type::kTransferStart}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
ASSERT_TRUE(handler_.finalize_read_called);
@@ -492,13 +656,17 @@ TEST_F(ReadTransfer, ZeroPendingBytesNoRemainingData_Completes) {
// Make the next read appear to be the end of the stream.
handler_.set_read_status(Status::OutOfRange());
- ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .pending_bytes = 0}));
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .pending_bytes = 0,
+ .type = Chunk::Type::kTransferStart}));
+ transfer_thread_.WaitUntilEventIsProcessed();
Chunk chunk = DecodeChunk(ctx_.responses().back());
EXPECT_EQ(chunk.transfer_id, 3u);
EXPECT_EQ(chunk.remaining_bytes, 0u);
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
ASSERT_TRUE(handler_.finalize_read_called);
@@ -506,8 +674,13 @@ TEST_F(ReadTransfer, ZeroPendingBytesNoRemainingData_Completes) {
}
TEST_F(ReadTransfer, SendsErrorIfChunkIsReceivedInCompletedState) {
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 3, .pending_bytes = 64, .offset = 0}));
+ rpc::test::WaitForPackets(ctx_.output(), 2, [this] {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 3,
+ .pending_bytes = 64,
+ .offset = 0,
+ .type = Chunk::Type::kTransferStart}));
+ });
+
EXPECT_TRUE(handler_.prepare_read_called);
EXPECT_FALSE(handler_.finalize_read_called);
@@ -528,6 +701,8 @@ TEST_F(ReadTransfer, SendsErrorIfChunkIsReceivedInCompletedState) {
EXPECT_EQ(c1.remaining_bytes.value(), 0u);
ctx_.SendClientStream(EncodeChunk({.transfer_id = 3, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_TRUE(handler_.finalize_read_called);
EXPECT_EQ(handler_.finalize_read_status, OkStatus());
@@ -537,6 +712,8 @@ TEST_F(ReadTransfer, SendsErrorIfChunkIsReceivedInCompletedState) {
ctx_.SendClientStream(
EncodeChunk({.transfer_id = 3, .pending_bytes = 48, .offset = 16}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 3u);
Chunk c2 = DecodeChunk(ctx_.responses()[2]);
@@ -557,7 +734,7 @@ class SimpleWriteTransfer final : public WriteOnlyHandler {
writer_(data) {}
Status PrepareWrite() final {
- writer_.Seek(0);
+ EXPECT_EQ(OkStatus(), writer_.Seek(0));
set_writer(writer_);
prepare_write_called = true;
return OkStatus();
@@ -587,27 +764,39 @@ class WriteTransfer : public ::testing::Test {
WriteTransfer(size_t max_bytes_to_receive = 64)
: buffer{},
handler_(7, buffer),
- ctx_(work_queue_, data_buffer_, max_bytes_to_receive) {
+ transfer_thread_(data_buffer_, encode_buffer_),
+ system_thread_(TransferThreadOptions(), transfer_thread_),
+ ctx_(transfer_thread_,
+ max_bytes_to_receive,
+ // Use a long timeout to avoid accidentally triggering timeouts.
+ std::chrono::minutes(1)) {
ctx_.service().RegisterHandler(handler_);
ASSERT_FALSE(handler_.prepare_write_called);
ASSERT_FALSE(handler_.finalize_write_called);
ctx_.call(); // Open the write stream
+ transfer_thread_.WaitUntilEventIsProcessed();
+ }
+
+ ~WriteTransfer() {
+ transfer_thread_.Terminate();
+ system_thread_.join();
}
std::array<std::byte, kData.size()> buffer;
SimpleWriteTransfer handler_;
- PW_RAW_TEST_METHOD_CONTEXT(TransferService, Write) ctx_;
+ Thread<1, 1> transfer_thread_;
+ thread::Thread system_thread_;
std::array<std::byte, 64> data_buffer_;
-
- // Not currently used in the tests, so left uninitialized.
- work_queue::WorkQueueWithBuffer<1> work_queue_;
+ std::array<std::byte, 64> encode_buffer_;
+ PW_RAW_TEST_METHOD_CONTEXT(TransferService, Write) ctx_;
};
TEST_F(WriteTransfer, SingleChunk) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_TRUE(handler_.prepare_write_called);
EXPECT_FALSE(handler_.finalize_write_called);
@@ -624,6 +813,8 @@ TEST_F(WriteTransfer, SingleChunk) {
.offset = 0,
.data = std::span(kData),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 2u);
chunk = DecodeChunk(ctx_.responses()[1]);
EXPECT_EQ(chunk.transfer_id, 7u);
@@ -644,6 +835,7 @@ TEST_F(WriteTransfer, FinalizeFails) {
.offset = 0,
.data = std::span(kData),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 2u);
Chunk chunk = DecodeChunk(ctx_.responses()[1]);
@@ -655,8 +847,35 @@ TEST_F(WriteTransfer, FinalizeFails) {
EXPECT_EQ(handler_.finalize_write_status, OkStatus());
}
+TEST_F(WriteTransfer, SendingFinalPacketFails) {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ ctx_.output().set_send_status(Status::Unknown());
+
+ ctx_.SendClientStream<64>(EncodeChunk({.transfer_id = 7,
+ .offset = 0,
+ .data = std::span(kData),
+ .remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ // Should only have sent the transfer parameters.
+ ASSERT_EQ(ctx_.total_responses(), 1u);
+ Chunk chunk = DecodeChunk(ctx_.responses()[0]);
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ ASSERT_TRUE(chunk.pending_bytes.has_value());
+ EXPECT_EQ(chunk.pending_bytes.value(), 32u);
+ ASSERT_TRUE(chunk.max_chunk_size_bytes.has_value());
+ EXPECT_EQ(chunk.max_chunk_size_bytes.value(), 37u);
+
+ // When FinalizeWrite() was called, the transfer was considered successful.
+ EXPECT_TRUE(handler_.finalize_write_called);
+ EXPECT_EQ(handler_.finalize_write_status, OkStatus());
+}
+
TEST_F(WriteTransfer, MultiChunk) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_TRUE(handler_.prepare_write_called);
EXPECT_FALSE(handler_.finalize_write_called);
@@ -669,12 +888,16 @@ TEST_F(WriteTransfer, MultiChunk) {
ctx_.SendClientStream<64>(EncodeChunk(
{.transfer_id = 7, .offset = 0, .data = std::span(kData).first(8)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 1u);
ctx_.SendClientStream<64>(EncodeChunk({.transfer_id = 7,
.offset = 8,
.data = std::span(kData).subspan(8),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 2u);
chunk = DecodeChunk(ctx_.responses()[1]);
EXPECT_EQ(chunk.transfer_id, 7u);
@@ -686,8 +909,71 @@ TEST_F(WriteTransfer, MultiChunk) {
EXPECT_EQ(std::memcmp(buffer.data(), kData.data(), kData.size()), 0);
}
+TEST_F(WriteTransfer, WriteFailsOnRetry) {
+ // Skip one packet to fail on a retry.
+ ctx_.output().set_send_status(Status::FailedPrecondition(), 1);
+
+ // Wait for 3 packets: initial params, retry attempt, final error
+ rpc::test::WaitForPackets(ctx_.output(), 3, [this] {
+ // Send only one client packet so the service times out.
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.SimulateServerTimeout(7); // Time out to trigger retry
+ });
+
+ // Attempted to send 3 packets, but the 2nd packet was dropped.
+ // Check that the last packet is an INTERNAL error from the RPC write failure.
+ ASSERT_EQ(ctx_.total_responses(), 2u);
+ Chunk chunk = DecodeChunk(ctx_.responses()[1]);
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ ASSERT_TRUE(chunk.status.has_value());
+ EXPECT_EQ(chunk.status.value(), Status::Internal());
+}
+
+TEST_F(WriteTransfer, TimeoutInRecoveryState) {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ ASSERT_EQ(ctx_.total_responses(), 1u);
+ Chunk chunk = DecodeChunk(ctx_.responses().back());
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ EXPECT_EQ(chunk.offset, 0u);
+ ASSERT_TRUE(chunk.pending_bytes.has_value());
+ EXPECT_EQ(chunk.pending_bytes.value(), 32u);
+
+ constexpr std::span data(kData);
+
+ ctx_.SendClientStream<64>(
+ EncodeChunk({.transfer_id = 7, .offset = 0, .data = data.first(8)}));
+
+ // Skip offset 8 to enter a recovery state.
+ ctx_.SendClientStream<64>(EncodeChunk(
+ {.transfer_id = 7, .offset = 12, .data = data.subspan(12, 4)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ // Recovery parameters should be sent for offset 8.
+ ASSERT_EQ(ctx_.total_responses(), 2u);
+ chunk = DecodeChunk(ctx_.responses().back());
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ EXPECT_EQ(chunk.offset, 8u);
+ ASSERT_TRUE(chunk.pending_bytes.has_value());
+ EXPECT_EQ(chunk.pending_bytes.value(), 24u);
+
+ // Timeout while in the recovery state.
+ transfer_thread_.SimulateServerTimeout(7);
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ // Same recovery parameters should be re-sent.
+ ASSERT_EQ(ctx_.total_responses(), 3u);
+ chunk = DecodeChunk(ctx_.responses().back());
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ EXPECT_EQ(chunk.offset, 8u);
+ ASSERT_TRUE(chunk.pending_bytes.has_value());
+ EXPECT_EQ(chunk.pending_bytes.value(), 24u);
+}
+
TEST_F(WriteTransfer, ExtendWindow) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_TRUE(handler_.prepare_write_called);
EXPECT_FALSE(handler_.finalize_write_called);
@@ -702,17 +988,24 @@ TEST_F(WriteTransfer, ExtendWindow) {
// Window starts at 32 bytes and should extend when half of that is sent.
ctx_.SendClientStream<64>(EncodeChunk(
{.transfer_id = 7, .offset = 0, .data = std::span(kData).first(4)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
+
ctx_.SendClientStream<64>(EncodeChunk(
{.transfer_id = 7, .offset = 4, .data = std::span(kData).subspan(4, 4)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
+
ctx_.SendClientStream<64>(EncodeChunk(
{.transfer_id = 7, .offset = 8, .data = std::span(kData).subspan(8, 4)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
+
ctx_.SendClientStream<64>(
EncodeChunk({.transfer_id = 7,
.offset = 12,
.data = std::span(kData).subspan(12, 4)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 2u);
// Extend parameters chunk.
@@ -727,8 +1020,9 @@ TEST_F(WriteTransfer, ExtendWindow) {
.offset = 16,
.data = std::span(kData).subspan(16),
.remaining_bytes = 0}));
- ASSERT_EQ(ctx_.total_responses(), 3u);
+ transfer_thread_.WaitUntilEventIsProcessed();
+ ASSERT_EQ(ctx_.total_responses(), 3u);
chunk = DecodeChunk(ctx_.responses()[2]);
EXPECT_EQ(chunk.transfer_id, 7u);
ASSERT_TRUE(chunk.status.has_value());
@@ -744,8 +1038,66 @@ class WriteTransferMaxBytes16 : public WriteTransfer {
WriteTransferMaxBytes16() : WriteTransfer(/*max_bytes_to_receive=*/16) {}
};
+TEST_F(WriteTransfer, TransmitterReducesWindow) {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ EXPECT_TRUE(handler_.prepare_write_called);
+ EXPECT_FALSE(handler_.finalize_write_called);
+
+ ASSERT_EQ(ctx_.total_responses(), 1u);
+ Chunk chunk = DecodeChunk(ctx_.responses().back());
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ EXPECT_EQ(chunk.window_end_offset, 32u);
+
+ // Send only 12 bytes and set that as the new end offset.
+ ctx_.SendClientStream<64>(EncodeChunk({.transfer_id = 7,
+ .window_end_offset = 12,
+ .offset = 0,
+ .data = std::span(kData).first(12)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+ ASSERT_EQ(ctx_.total_responses(), 2u);
+
+ // Receiver should respond immediately with a retransmit chunk as the end of
+ // the window has been reached.
+ chunk = DecodeChunk(ctx_.responses().back());
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ EXPECT_EQ(chunk.offset, 12u);
+ EXPECT_EQ(chunk.window_end_offset, 32u);
+ EXPECT_EQ(chunk.type, Chunk::Type::kParametersRetransmit);
+}
+
+TEST_F(WriteTransfer, TransmitterExtendsWindow_TerminatesWithInvalid) {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ EXPECT_TRUE(handler_.prepare_write_called);
+ EXPECT_FALSE(handler_.finalize_write_called);
+
+ ASSERT_EQ(ctx_.total_responses(), 1u);
+ Chunk chunk = DecodeChunk(ctx_.responses().back());
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ EXPECT_EQ(chunk.window_end_offset, 32u);
+
+ // Send only 12 bytes and set that as the new end offset.
+ ctx_.SendClientStream<64>(
+ EncodeChunk({.transfer_id = 7,
+ // Larger window end offset than the receiver's.
+ .window_end_offset = 48,
+ .offset = 0,
+ .data = std::span(kData).first(16)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+ ASSERT_EQ(ctx_.total_responses(), 2u);
+
+ chunk = DecodeChunk(ctx_.responses().back());
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ ASSERT_TRUE(chunk.status.has_value());
+ EXPECT_EQ(chunk.status.value(), Status::Internal());
+}
+
TEST_F(WriteTransferMaxBytes16, MultipleParameters) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_TRUE(handler_.prepare_write_called);
EXPECT_FALSE(handler_.finalize_write_called);
@@ -758,6 +1110,8 @@ TEST_F(WriteTransferMaxBytes16, MultipleParameters) {
ctx_.SendClientStream<64>(EncodeChunk(
{.transfer_id = 7, .offset = 0, .data = std::span(kData).first(8)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 2u);
chunk = DecodeChunk(ctx_.responses()[1]);
EXPECT_EQ(chunk.transfer_id, 7u);
@@ -768,6 +1122,8 @@ TEST_F(WriteTransferMaxBytes16, MultipleParameters) {
ctx_.SendClientStream<64>(EncodeChunk(
{.transfer_id = 7, .offset = 8, .data = std::span(kData).subspan(8, 8)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 3u);
chunk = DecodeChunk(ctx_.responses()[2]);
EXPECT_EQ(chunk.transfer_id, 7u);
@@ -780,6 +1136,8 @@ TEST_F(WriteTransferMaxBytes16, MultipleParameters) {
EncodeChunk({.transfer_id = 7,
.offset = 16,
.data = std::span(kData).subspan(16, 8)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 4u);
chunk = DecodeChunk(ctx_.responses()[3]);
EXPECT_EQ(chunk.transfer_id, 7u);
@@ -792,6 +1150,8 @@ TEST_F(WriteTransferMaxBytes16, MultipleParameters) {
.offset = 24,
.data = std::span(kData).subspan(24),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 5u);
chunk = DecodeChunk(ctx_.responses()[4]);
EXPECT_EQ(chunk.transfer_id, 7u);
@@ -806,6 +1166,7 @@ TEST_F(WriteTransferMaxBytes16, MultipleParameters) {
TEST_F(WriteTransferMaxBytes16, SetsDefaultPendingBytes) {
// Default max bytes is smaller than buffer.
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
Chunk chunk = DecodeChunk(ctx_.responses()[0]);
@@ -821,6 +1182,7 @@ TEST_F(WriteTransfer, SetsWriterPendingBytes) {
ctx_.service().RegisterHandler(handler_);
ctx_.SendClientStream(EncodeChunk({.transfer_id = 987}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
Chunk chunk = DecodeChunk(ctx_.responses()[0]);
@@ -830,6 +1192,7 @@ TEST_F(WriteTransfer, SetsWriterPendingBytes) {
TEST_F(WriteTransfer, UnexpectedOffset) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_TRUE(handler_.prepare_write_called);
EXPECT_FALSE(handler_.finalize_write_called);
@@ -843,12 +1206,16 @@ TEST_F(WriteTransfer, UnexpectedOffset) {
ctx_.SendClientStream<64>(EncodeChunk(
{.transfer_id = 7, .offset = 0, .data = std::span(kData).first(8)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 1u);
ctx_.SendClientStream<64>(EncodeChunk({.transfer_id = 7,
.offset = 4, // incorrect
.data = std::span(kData).subspan(16),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 2u);
chunk = DecodeChunk(ctx_.responses()[1]);
EXPECT_EQ(chunk.transfer_id, 7u);
@@ -860,6 +1227,8 @@ TEST_F(WriteTransfer, UnexpectedOffset) {
.offset = 8, // correct
.data = std::span(kData).subspan(8),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 3u);
chunk = DecodeChunk(ctx_.responses()[2]);
EXPECT_EQ(chunk.transfer_id, 7u);
@@ -873,6 +1242,7 @@ TEST_F(WriteTransfer, UnexpectedOffset) {
TEST_F(WriteTransferMaxBytes16, TooMuchData) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_TRUE(handler_.prepare_write_called);
EXPECT_FALSE(handler_.finalize_write_called);
@@ -886,8 +1256,9 @@ TEST_F(WriteTransferMaxBytes16, TooMuchData) {
// pending_bytes = 16 but send 24
ctx_.SendClientStream<64>(EncodeChunk(
{.transfer_id = 7, .offset = 0, .data = std::span(kData).first(24)}));
- ASSERT_EQ(ctx_.total_responses(), 2u);
+ transfer_thread_.WaitUntilEventIsProcessed();
+ ASSERT_EQ(ctx_.total_responses(), 2u);
chunk = DecodeChunk(ctx_.responses()[1]);
EXPECT_EQ(chunk.transfer_id, 7u);
ASSERT_TRUE(chunk.status.has_value());
@@ -896,6 +1267,7 @@ TEST_F(WriteTransferMaxBytes16, TooMuchData) {
TEST_F(WriteTransfer, UnregisteredHandler) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 999}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
Chunk chunk = DecodeChunk(ctx_.responses()[0]);
@@ -906,6 +1278,7 @@ TEST_F(WriteTransfer, UnregisteredHandler) {
TEST_F(WriteTransfer, ClientError) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_TRUE(handler_.prepare_write_called);
EXPECT_FALSE(handler_.finalize_write_called);
@@ -918,6 +1291,8 @@ TEST_F(WriteTransfer, ClientError) {
ctx_.SendClientStream<64>(
EncodeChunk({.transfer_id = 7, .status = Status::DataLoss()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
EXPECT_EQ(ctx_.total_responses(), 1u);
EXPECT_TRUE(handler_.finalize_write_called);
@@ -926,6 +1301,7 @@ TEST_F(WriteTransfer, ClientError) {
TEST_F(WriteTransfer, OnlySendParametersUpdateOnceAfterDrop) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
@@ -939,6 +1315,8 @@ TEST_F(WriteTransfer, OnlySendParametersUpdateOnceAfterDrop) {
{.transfer_id = 7, .offset = i, .data = data.subspan(i, 1)}));
}
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 2u);
Chunk chunk = DecodeChunk(ctx_.responses().back());
EXPECT_EQ(chunk.transfer_id, 7u);
@@ -949,6 +1327,7 @@ TEST_F(WriteTransfer, OnlySendParametersUpdateOnceAfterDrop) {
.offset = 1,
.data = data.subspan(1, 31),
.status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_TRUE(handler_.finalize_write_called);
EXPECT_EQ(handler_.finalize_write_status, OkStatus());
@@ -956,6 +1335,7 @@ TEST_F(WriteTransfer, OnlySendParametersUpdateOnceAfterDrop) {
TEST_F(WriteTransfer, ResendParametersIfSentRepeatedChunkDuringRecovery) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
@@ -967,16 +1347,21 @@ TEST_F(WriteTransfer, ResendParametersIfSentRepeatedChunkDuringRecovery) {
{.transfer_id = 7, .offset = i, .data = data.subspan(i, 1)}));
}
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 2u); // Resent transfer parameters once.
const auto last_chunk = EncodeChunk(
{.transfer_id = 7, .offset = kData.size() - 1, .data = data.last(1)});
ctx_.SendClientStream<64>(last_chunk);
+ transfer_thread_.WaitUntilEventIsProcessed();
// Resent transfer parameters since the packet is repeated
ASSERT_EQ(ctx_.total_responses(), 3u);
ctx_.SendClientStream<64>(last_chunk);
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 4u);
Chunk chunk = DecodeChunk(ctx_.responses().back());
@@ -987,6 +1372,7 @@ TEST_F(WriteTransfer, ResendParametersIfSentRepeatedChunkDuringRecovery) {
// Resumes normal operation when correct offset is sent.
ctx_.SendClientStream<64>(EncodeChunk(
{.transfer_id = 7, .offset = 0, .data = kData, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_TRUE(handler_.finalize_write_called);
EXPECT_EQ(handler_.finalize_write_status, OkStatus());
@@ -994,6 +1380,7 @@ TEST_F(WriteTransfer, ResendParametersIfSentRepeatedChunkDuringRecovery) {
TEST_F(WriteTransfer, ResendsStatusIfClientRetriesAfterStatusChunk) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
@@ -1001,6 +1388,7 @@ TEST_F(WriteTransfer, ResendsStatusIfClientRetriesAfterStatusChunk) {
.offset = 0,
.data = std::span(kData),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 2u);
Chunk chunk = DecodeChunk(ctx_.responses().back());
@@ -1011,6 +1399,7 @@ TEST_F(WriteTransfer, ResendsStatusIfClientRetriesAfterStatusChunk) {
.offset = 0,
.data = std::span(kData),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 3u);
chunk = DecodeChunk(ctx_.responses().back());
@@ -1018,36 +1407,14 @@ TEST_F(WriteTransfer, ResendsStatusIfClientRetriesAfterStatusChunk) {
EXPECT_EQ(chunk.status.value(), OkStatus());
}
-TEST_F(WriteTransfer, RejectsNonFinalChunksAfterCompleted) {
- ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
-
- ASSERT_EQ(ctx_.total_responses(), 1u);
-
- ctx_.SendClientStream<64>(EncodeChunk({.transfer_id = 7,
- .offset = 0,
- .data = std::span(kData),
- .remaining_bytes = 0}));
-
- ASSERT_EQ(ctx_.total_responses(), 2u);
- Chunk chunk = DecodeChunk(ctx_.responses().back());
- ASSERT_TRUE(chunk.status.has_value());
- EXPECT_EQ(chunk.status.value(), OkStatus());
-
- ctx_.SendClientStream<64>( // Don't set remaining_bytes=0
- EncodeChunk({.transfer_id = 7, .offset = 0, .data = std::span(kData)}));
-
- ASSERT_EQ(ctx_.total_responses(), 3u);
- chunk = DecodeChunk(ctx_.responses().back());
- ASSERT_TRUE(chunk.status.has_value());
- EXPECT_EQ(chunk.status.value(), Status::FailedPrecondition());
-}
-
TEST_F(WriteTransfer, IgnoresNonPendingTransfers) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7, .offset = 3}));
ctx_.SendClientStream(EncodeChunk(
{.transfer_id = 7, .offset = 0, .data = std::span(kData).first(10)}));
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7, .status = OkStatus()}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
// Only start transfer for initial packet.
EXPECT_FALSE(handler_.prepare_write_called);
EXPECT_FALSE(handler_.finalize_write_called);
@@ -1055,11 +1422,13 @@ TEST_F(WriteTransfer, IgnoresNonPendingTransfers) {
TEST_F(WriteTransfer, AbortAndRestartIfInitialPacketIsReceived) {
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
ctx_.SendClientStream<64>(EncodeChunk(
{.transfer_id = 7, .offset = 0, .data = std::span(kData).first(8)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
@@ -1069,6 +1438,7 @@ TEST_F(WriteTransfer, AbortAndRestartIfInitialPacketIsReceived) {
// Simulate client disappearing then restarting the transfer.
ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
EXPECT_TRUE(handler_.prepare_write_called);
EXPECT_TRUE(handler_.finalize_write_called);
@@ -1082,6 +1452,8 @@ TEST_F(WriteTransfer, AbortAndRestartIfInitialPacketIsReceived) {
.offset = 0,
.data = std::span(kData),
.remaining_bytes = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
ASSERT_EQ(ctx_.total_responses(), 3u);
EXPECT_TRUE(handler_.finalize_write_called);
@@ -1114,6 +1486,7 @@ TEST_F(ReadTransfer, PrepareError) {
ctx_.SendClientStream(
EncodeChunk({.transfer_id = 88, .pending_bytes = 128, .offset = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
ASSERT_EQ(ctx_.total_responses(), 1u);
Chunk chunk = DecodeChunk(ctx_.responses()[0]);
@@ -1122,13 +1495,50 @@ TEST_F(ReadTransfer, PrepareError) {
EXPECT_EQ(chunk.status.value(), Status::DataLoss());
// Try starting the transfer again. It should work this time.
- ctx_.SendClientStream(
- EncodeChunk({.transfer_id = 88, .pending_bytes = 128, .offset = 0}));
- ASSERT_EQ(ctx_.total_responses(), 3u);
+ // TODO(frolv): This won't work until completion ACKs are supported.
+ if (false) {
+ ctx_.SendClientStream(
+ EncodeChunk({.transfer_id = 88, .pending_bytes = 128, .offset = 0}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ ASSERT_EQ(ctx_.total_responses(), 2u);
+ chunk = DecodeChunk(ctx_.responses()[1]);
+ EXPECT_EQ(chunk.transfer_id, 88u);
+ ASSERT_EQ(chunk.data.size(), kData.size());
+ EXPECT_EQ(std::memcmp(chunk.data.data(), kData.data(), chunk.data.size()),
+ 0);
+ }
+}
+
+TEST_F(WriteTransferMaxBytes16, Service_SetMaxPendingBytes) {
+ ctx_.SendClientStream(EncodeChunk({.transfer_id = 7}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ EXPECT_TRUE(handler_.prepare_write_called);
+ EXPECT_FALSE(handler_.finalize_write_called);
+
+ // First parameters chunk has default pending bytes of 16.
+ ASSERT_EQ(ctx_.total_responses(), 1u);
+ Chunk chunk = DecodeChunk(ctx_.responses()[0]);
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ ASSERT_TRUE(chunk.pending_bytes.has_value());
+ EXPECT_EQ(chunk.pending_bytes.value(), 16u);
+
+ // Update the pending bytes value.
+ ctx_.service().set_max_pending_bytes(12);
+
+ ctx_.SendClientStream<64>(EncodeChunk(
+ {.transfer_id = 7, .offset = 0, .data = std::span(kData).first(8)}));
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ // Second parameters chunk should use the new max pending bytes.
+ ASSERT_EQ(ctx_.total_responses(), 2u);
chunk = DecodeChunk(ctx_.responses()[1]);
- EXPECT_EQ(chunk.transfer_id, 88u);
- ASSERT_EQ(chunk.data.size(), kData.size());
- EXPECT_EQ(std::memcmp(chunk.data.data(), kData.data(), chunk.data.size()), 0);
+ EXPECT_EQ(chunk.transfer_id, 7u);
+ EXPECT_EQ(chunk.offset, 8u);
+ EXPECT_EQ(chunk.window_end_offset, 20u);
+ ASSERT_TRUE(chunk.pending_bytes.has_value());
+ EXPECT_EQ(chunk.pending_bytes.value(), 12u);
}
PW_MODIFY_DIAGNOSTICS_POP();
diff --git a/pw_transfer/transfer_thread.cc b/pw_transfer/transfer_thread.cc
new file mode 100644
index 000000000..aac5cad35
--- /dev/null
+++ b/pw_transfer/transfer_thread.cc
@@ -0,0 +1,320 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#define PW_LOG_MODULE_NAME "TRN"
+
+#include "pw_transfer/transfer_thread.h"
+
+#include "pw_assert/check.h"
+#include "pw_log/log.h"
+#include "pw_transfer/internal/chunk.h"
+
+PW_MODIFY_DIAGNOSTICS_PUSH();
+PW_MODIFY_DIAGNOSTIC(ignored, "-Wmissing-field-initializers");
+
+namespace pw::transfer::internal {
+
+void TransferThread::Terminate() {
+ next_event_ownership_.acquire();
+ next_event_.type = EventType::kTerminate;
+ event_notification_.release();
+}
+
+void TransferThread::SimulateTimeout(EventType type, uint32_t transfer_id) {
+ next_event_ownership_.acquire();
+
+ next_event_.type = type;
+ next_event_.chunk = {};
+ next_event_.chunk.transfer_id = transfer_id;
+
+ event_notification_.release();
+
+ WaitUntilEventIsProcessed();
+}
+
+void TransferThread::Run() {
+ // Next event starts freed.
+ next_event_ownership_.release();
+
+ while (true) {
+ if (event_notification_.try_acquire_until(GetNextTransferTimeout())) {
+ if (next_event_.type == EventType::kTerminate) {
+ return;
+ }
+
+ HandleEvent(next_event_);
+
+ // Finished processing the event. Allow the next_event struct to be
+ // overwritten.
+ next_event_ownership_.release();
+ }
+
+ // Regardless of whether an event was received or not, check for any
+ // transfers which have timed out and process them if so.
+ for (Context& context : client_transfers_) {
+ if (context.timed_out()) {
+ context.HandleEvent({.type = EventType::kClientTimeout});
+ }
+ }
+ for (Context& context : server_transfers_) {
+ if (context.timed_out()) {
+ context.HandleEvent({.type = EventType::kServerTimeout});
+ }
+ }
+ }
+}
+
+chrono::SystemClock::time_point TransferThread::GetNextTransferTimeout() const {
+ chrono::SystemClock::time_point timeout =
+ chrono::SystemClock::TimePointAfterAtLeast(kMaxTimeout);
+
+ for (Context& context : client_transfers_) {
+ auto ctx_timeout = context.timeout();
+ if (ctx_timeout.has_value() && ctx_timeout.value() < timeout) {
+ timeout = ctx_timeout.value();
+ }
+ }
+ for (Context& context : server_transfers_) {
+ auto ctx_timeout = context.timeout();
+ if (ctx_timeout.has_value() && ctx_timeout.value() < timeout) {
+ timeout = ctx_timeout.value();
+ }
+ }
+
+ return timeout;
+}
+
+void TransferThread::StartTransfer(TransferType type,
+ uint32_t transfer_id,
+ uint32_t handler_id,
+ stream::Stream* stream,
+ const TransferParameters& max_parameters,
+ Function<void(Status)>&& on_completion,
+ chrono::SystemClock::duration timeout,
+ uint8_t max_retries) {
+ // Block until the last event has been processed.
+ next_event_ownership_.acquire();
+
+ bool is_client_transfer = stream != nullptr;
+
+ next_event_.type = is_client_transfer ? EventType::kNewClientTransfer
+ : EventType::kNewServerTransfer;
+ next_event_.new_transfer = {
+ .type = type,
+ .transfer_id = transfer_id,
+ .handler_id = handler_id,
+ .max_parameters = &max_parameters,
+ .timeout = timeout,
+ .max_retries = max_retries,
+ .transfer_thread = this,
+ };
+
+ staged_on_completion_ = std::move(on_completion);
+
+ // The transfer is initialized with either a stream (client-side) or a handler
+ // (server-side). If no stream is provided, try to find a registered handler
+ // with the specified ID.
+ if (is_client_transfer) {
+ next_event_.new_transfer.stream = stream;
+ next_event_.new_transfer.rpc_writer = &static_cast<rpc::Writer&>(
+ type == TransferType::kTransmit ? client_write_stream_
+ : client_read_stream_);
+ } else {
+ auto handler = std::find_if(handlers_.begin(),
+ handlers_.end(),
+ [&](auto& h) { return h.id() == handler_id; });
+ if (handler != handlers_.end()) {
+ next_event_.new_transfer.handler = &*handler;
+ next_event_.new_transfer.rpc_writer = &static_cast<rpc::Writer&>(
+ type == TransferType::kTransmit ? server_read_stream_
+ : server_write_stream_);
+ } else {
+ // No handler exists for the transfer: return a NOT_FOUND.
+ next_event_.type = EventType::kSendStatusChunk;
+ next_event_.send_status_chunk = {
+ .transfer_id = transfer_id,
+ .status = Status::NotFound().code(),
+ .stream = type == TransferType::kTransmit
+ ? TransferStream::kServerRead
+ : TransferStream::kServerWrite,
+ };
+ }
+ }
+
+ event_notification_.release();
+}
+
+void TransferThread::ProcessChunk(EventType type, ConstByteSpan chunk) {
+ // If this assert is hit, there is a bug in the transfer implementation.
+ // Contexts' max_chunk_size_bytes fields should be set based on the size of
+ // chunk_buffer_.
+ PW_CHECK(chunk.size() <= chunk_buffer_.size(),
+ "Transfer received a larger chunk than it can handle.");
+
+ Result<uint32_t> transfer_id = ExtractTransferId(chunk);
+ if (!transfer_id.ok()) {
+ PW_LOG_ERROR("Received a malformed chunk without a transfer ID");
+ return;
+ }
+
+ // Block until the last event has been processed.
+ next_event_ownership_.acquire();
+
+ std::memcpy(chunk_buffer_.data(), chunk.data(), chunk.size());
+
+ next_event_.type = type;
+ next_event_.chunk = {
+ .transfer_id = *transfer_id,
+ .data = chunk_buffer_.data(),
+ .size = chunk.size(),
+ };
+
+ event_notification_.release();
+}
+
+void TransferThread::SetClientStream(TransferStream type,
+ rpc::RawClientReaderWriter& stream) {
+ // Block until the last event has been processed.
+ next_event_ownership_.acquire();
+
+ next_event_.type = EventType::kSetTransferStream;
+ next_event_.set_transfer_stream = type;
+ staged_client_stream_ = std::move(stream);
+
+ event_notification_.release();
+}
+
+void TransferThread::SetServerStream(TransferStream type,
+ rpc::RawServerReaderWriter& stream) {
+ // Block until the last event has been processed.
+ next_event_ownership_.acquire();
+
+ next_event_.type = EventType::kSetTransferStream;
+ next_event_.set_transfer_stream = type;
+ staged_server_stream_ = std::move(stream);
+
+ event_notification_.release();
+}
+
+void TransferThread::TransferHandlerEvent(EventType type,
+ internal::Handler& handler) {
+ // Block until the last event has been processed.
+ next_event_ownership_.acquire();
+
+ next_event_.type = type;
+ if (type == EventType::kAddTransferHandler) {
+ next_event_.add_transfer_handler = &handler;
+ } else {
+ next_event_.remove_transfer_handler = &handler;
+ }
+
+ event_notification_.release();
+}
+
+void TransferThread::HandleEvent(const internal::Event& event) {
+ switch (event.type) {
+ case EventType::kSendStatusChunk:
+ SendStatusChunk(event.send_status_chunk);
+ break;
+
+ case EventType::kSetTransferStream:
+ switch (event.set_transfer_stream) {
+ case TransferStream::kClientRead:
+ client_read_stream_ = std::move(staged_client_stream_);
+ break;
+
+ case TransferStream::kClientWrite:
+ client_write_stream_ = std::move(staged_client_stream_);
+ break;
+
+ case TransferStream::kServerRead:
+ server_read_stream_ = std::move(staged_server_stream_);
+ break;
+
+ case TransferStream::kServerWrite:
+ server_write_stream_ = std::move(staged_server_stream_);
+ break;
+ }
+ return;
+
+ case EventType::kAddTransferHandler:
+ handlers_.push_front(*event.add_transfer_handler);
+ return;
+
+ case EventType::kRemoveTransferHandler:
+ handlers_.remove(*event.remove_transfer_handler);
+ return;
+
+ default:
+ // Other events are handled by individual transfer contexts.
+ break;
+ }
+
+ if (Context* ctx = FindContextForEvent(event); ctx != nullptr) {
+ if (event.type == EventType::kNewClientTransfer) {
+ // TODO(frolv): This is terrible.
+ static_cast<ClientContext*>(ctx)->set_on_completion(
+ std::move(staged_on_completion_));
+ }
+
+ ctx->HandleEvent(event);
+ }
+}
+
+Context* TransferThread::FindContextForEvent(
+ const internal::Event& event) const {
+ switch (event.type) {
+ case EventType::kNewClientTransfer:
+ return FindNewTransfer(client_transfers_, event.new_transfer.transfer_id);
+ case EventType::kNewServerTransfer:
+ return FindNewTransfer(server_transfers_, event.new_transfer.transfer_id);
+ case EventType::kClientChunk:
+ return FindActiveTransfer(client_transfers_, event.chunk.transfer_id);
+ case EventType::kServerChunk:
+ return FindActiveTransfer(server_transfers_, event.chunk.transfer_id);
+ case EventType::kClientTimeout: // Manually triggered client timeout
+ return FindActiveTransfer(client_transfers_, event.chunk.transfer_id);
+ case EventType::kServerTimeout: // Manually triggered server timeout
+ return FindActiveTransfer(server_transfers_, event.chunk.transfer_id);
+ default:
+ return nullptr;
+ }
+}
+
+void TransferThread::SendStatusChunk(
+ const internal::SendStatusChunkEvent& event) {
+ rpc::Writer& destination = stream_for(event.stream);
+
+ internal::Chunk chunk = {};
+ chunk.transfer_id = event.transfer_id;
+ chunk.status = event.status;
+
+ Result<ConstByteSpan> result = internal::EncodeChunk(chunk, chunk_buffer_);
+
+ if (!result.ok()) {
+ PW_LOG_ERROR("Failed to encode final chunk for transfer %u",
+ static_cast<unsigned>(event.transfer_id));
+ return;
+ }
+
+ if (!destination.Write(result.value()).ok()) {
+ PW_LOG_ERROR("Failed to send final chunk for transfer %u",
+ static_cast<unsigned>(event.transfer_id));
+ return;
+ }
+}
+
+} // namespace pw::transfer::internal
+
+PW_MODIFY_DIAGNOSTICS_POP();
diff --git a/pw_transfer/transfer_thread_test.cc b/pw_transfer/transfer_thread_test.cc
new file mode 100644
index 000000000..7890041ba
--- /dev/null
+++ b/pw_transfer/transfer_thread_test.cc
@@ -0,0 +1,218 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_transfer/transfer_thread.h"
+
+#include "gtest/gtest.h"
+#include "pw_assert/check.h"
+#include "pw_bytes/array.h"
+#include "pw_rpc/raw/client_testing.h"
+#include "pw_rpc/raw/test_method_context.h"
+#include "pw_rpc/thread_testing.h"
+#include "pw_thread/thread.h"
+#include "pw_thread_stl/options.h"
+#include "pw_transfer/handler.h"
+#include "pw_transfer/transfer.h"
+#include "pw_transfer/transfer.raw_rpc.pb.h"
+#include "pw_transfer_private/chunk_testing.h"
+
+namespace pw::transfer::test {
+namespace {
+
+using internal::Chunk;
+
+PW_MODIFY_DIAGNOSTICS_PUSH();
+PW_MODIFY_DIAGNOSTIC(ignored, "-Wmissing-field-initializers");
+
+// TODO(frolv): Have a generic way to obtain a thread for testing on any system.
+thread::Options& TransferThreadOptions() {
+ static thread::stl::Options options;
+ return options;
+}
+
+class TransferThreadTest : public ::testing::Test {
+ public:
+ TransferThreadTest()
+ : ctx_(transfer_thread_, 512),
+ max_parameters_(chunk_buffer_.size(),
+ chunk_buffer_.size(),
+ cfg::kDefaultExtendWindowDivisor),
+ transfer_thread_(chunk_buffer_, encode_buffer_),
+ system_thread_(TransferThreadOptions(), transfer_thread_) {}
+
+ ~TransferThreadTest() {
+ transfer_thread_.Terminate();
+ system_thread_.join();
+ }
+
+ protected:
+ PW_RAW_TEST_METHOD_CONTEXT(TransferService, Read) ctx_;
+
+ std::array<std::byte, 64> chunk_buffer_;
+ std::array<std::byte, 64> encode_buffer_;
+
+ rpc::RawClientTestContext<> rpc_client_context_;
+ internal::TransferParameters max_parameters_;
+
+ transfer::Thread<1, 1> transfer_thread_;
+
+ thread::Thread system_thread_;
+};
+
+class SimpleReadTransfer final : public ReadOnlyHandler {
+ public:
+ SimpleReadTransfer(uint32_t transfer_id, ConstByteSpan data)
+ : ReadOnlyHandler(transfer_id),
+ prepare_read_called(false),
+ finalize_read_called(false),
+ finalize_read_status(Status::Unknown()),
+ reader_(data) {}
+
+ Status PrepareRead() final {
+ PW_CHECK_OK(reader_.Seek(0));
+ set_reader(reader_);
+ prepare_read_called = true;
+ return OkStatus();
+ }
+
+ void FinalizeRead(Status status) final {
+ finalize_read_called = true;
+ finalize_read_status = status;
+ }
+
+ bool prepare_read_called;
+ bool finalize_read_called;
+ Status finalize_read_status;
+
+ private:
+ stream::MemoryReader reader_;
+};
+
+constexpr auto kData = bytes::Initialized<32>([](size_t i) { return i; });
+
+TEST_F(TransferThreadTest, AddTransferHandler) {
+ auto reader_writer = ctx_.reader_writer();
+ transfer_thread_.SetServerReadStream(reader_writer);
+
+ SimpleReadTransfer handler(3, kData);
+ transfer_thread_.AddTransferHandler(handler);
+
+ transfer_thread_.StartServerTransfer(internal::TransferType::kTransmit,
+ 3,
+ 3,
+ max_parameters_,
+ std::chrono::seconds(2),
+ 0);
+
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ EXPECT_TRUE(handler.prepare_read_called);
+}
+
+TEST_F(TransferThreadTest, RemoveTransferHandler) {
+ auto reader_writer = ctx_.reader_writer();
+ transfer_thread_.SetServerReadStream(reader_writer);
+
+ SimpleReadTransfer handler(3, kData);
+ transfer_thread_.AddTransferHandler(handler);
+ transfer_thread_.RemoveTransferHandler(handler);
+
+ transfer_thread_.StartServerTransfer(internal::TransferType::kTransmit,
+ 3,
+ 3,
+ max_parameters_,
+ std::chrono::seconds(2),
+ 0);
+
+ transfer_thread_.WaitUntilEventIsProcessed();
+
+ EXPECT_FALSE(handler.prepare_read_called);
+
+ ASSERT_EQ(ctx_.total_responses(), 1u);
+ auto chunk = DecodeChunk(ctx_.response());
+ EXPECT_EQ(chunk.transfer_id, 3u);
+ ASSERT_TRUE(chunk.status.has_value());
+ EXPECT_EQ(chunk.status.value(), Status::NotFound());
+}
+
+TEST_F(TransferThreadTest, ProcessChunk_SendsWindow) {
+ auto reader_writer = ctx_.reader_writer();
+ transfer_thread_.SetServerReadStream(reader_writer);
+
+ SimpleReadTransfer handler(3, kData);
+ transfer_thread_.AddTransferHandler(handler);
+
+ transfer_thread_.StartServerTransfer(internal::TransferType::kTransmit,
+ 3,
+ 3,
+ max_parameters_,
+ std::chrono::seconds(2),
+ 0);
+
+ rpc::test::WaitForPackets(ctx_.output(), 2, [this] {
+ // Malformed transfer parameters chunk without a pending_bytes field.
+ transfer_thread_.ProcessServerChunk(
+ EncodeChunk({.transfer_id = 3,
+ .window_end_offset = 16,
+ .pending_bytes = 16,
+ .max_chunk_size_bytes = 8,
+ .offset = 0,
+ .type = Chunk::Type::kParametersRetransmit}));
+ });
+
+ ASSERT_EQ(ctx_.total_responses(), 2u);
+ auto chunk = DecodeChunk(ctx_.responses()[0]);
+ EXPECT_EQ(chunk.transfer_id, 3u);
+ EXPECT_EQ(chunk.offset, 0u);
+ EXPECT_EQ(chunk.data.size(), 8u);
+ EXPECT_EQ(std::memcmp(chunk.data.data(), kData.data(), chunk.data.size()), 0);
+
+ chunk = DecodeChunk(ctx_.responses()[1]);
+ EXPECT_EQ(chunk.transfer_id, 3u);
+ EXPECT_EQ(chunk.offset, 8u);
+ EXPECT_EQ(chunk.data.size(), 8u);
+ EXPECT_EQ(std::memcmp(chunk.data.data(), kData.data() + 8, chunk.data.size()),
+ 0);
+}
+
+TEST_F(TransferThreadTest, ProcessChunk_Malformed) {
+ auto reader_writer = ctx_.reader_writer();
+ transfer_thread_.SetServerReadStream(reader_writer);
+
+ SimpleReadTransfer handler(3, kData);
+ transfer_thread_.AddTransferHandler(handler);
+
+ rpc::test::WaitForPackets(ctx_.output(), 1, [this] {
+ transfer_thread_.StartServerTransfer(internal::TransferType::kTransmit,
+ 3,
+ 3,
+ max_parameters_,
+ std::chrono::seconds(2),
+ 0);
+
+ // Malformed transfer parameters chunk without a pending_bytes field.
+ transfer_thread_.ProcessServerChunk(EncodeChunk({.transfer_id = 3}));
+ });
+
+ ASSERT_EQ(ctx_.total_responses(), 1u);
+ auto chunk = DecodeChunk(ctx_.response());
+ EXPECT_EQ(chunk.transfer_id, 3u);
+ ASSERT_TRUE(chunk.status.has_value());
+ EXPECT_EQ(chunk.status.value(), Status::InvalidArgument());
+}
+
+PW_MODIFY_DIAGNOSTICS_POP();
+
+} // namespace
+} // namespace pw::transfer::test
diff --git a/pw_transfer/ts/BUILD.bazel b/pw_transfer/ts/BUILD.bazel
index c84824f3a..ca639ae64 100644
--- a/pw_transfer/ts/BUILD.bazel
+++ b/pw_transfer/ts/BUILD.bazel
@@ -58,8 +58,8 @@ ts_library(
deps = [
":lib",
":transfer_proto_collection",
- "//pw_rpc:packet_proto_tspb",
"//pw_rpc/ts:lib",
+ "//pw_rpc/ts:packet_proto_tspb",
"//pw_status/ts:pw_status",
"//pw_transfer:transfer_proto_tspb",
"@npm//@types/jasmine",
diff --git a/pw_transfer/ts/transfer.ts b/pw_transfer/ts/transfer.ts
index 845959fa3..708f5cfcb 100644
--- a/pw_transfer/ts/transfer.ts
+++ b/pw_transfer/ts/transfer.ts
@@ -1,4 +1,4 @@
-// Copyright 2021 The Pigweed Authors
+// Copyright 2022 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
@@ -138,6 +138,7 @@ export abstract class Transfer {
const chunk = new Chunk();
chunk.setStatus(error);
chunk.setTransferId(this.id);
+ chunk.setType(Chunk.Type.TRANSFER_COMPLETION);
this.sendChunk(chunk);
this.finish(error);
}
@@ -252,25 +253,21 @@ export class ReadTransfer extends Transfer {
}
protected get initialChunk(): Chunk {
- return this.transferParameters();
+ return this.transferParameters(Chunk.Type.TRANSFER_START);
}
/** Builds an updated transfer parameters chunk to send the server. */
- private transferParameters(extend = false): Chunk {
+ private transferParameters(type: Chunk.TypeMap[keyof Chunk.TypeMap]): Chunk {
this.pendingBytes = this.maxBytesToReceive;
this.windowEndOffset = this.offset + this.maxBytesToReceive;
- const chunkType = extend
- ? Chunk.Type.PARAMETERS_CONTINUE
- : Chunk.Type.PARAMETERS_RETRANSMIT;
-
const chunk = new Chunk();
chunk.setTransferId(this.id);
chunk.setPendingBytes(this.pendingBytes);
chunk.setMaxChunkSizeBytes(this.maxChunkSize);
chunk.setOffset(this.offset);
chunk.setWindowEndOffset(this.windowEndOffset);
- chunk.setType(chunkType);
+ chunk.setType(type);
if (this.chunkDelayMicroS !== 0) {
chunk.setMinDelayMicroseconds(this.chunkDelayMicroS!);
@@ -289,7 +286,7 @@ export class ReadTransfer extends Transfer {
// Initially, the transfer service only supports in-order transfers.
// If data is received out of order, request that the server
// retransmit from the previous offset.
- this.sendChunk(this.transferParameters());
+ this.sendChunk(this.transferParameters(Chunk.Type.PARAMETERS_RETRANSMIT));
return;
}
@@ -304,10 +301,11 @@ export class ReadTransfer extends Transfer {
if (chunk.hasRemainingBytes()) {
if (chunk.getRemainingBytes() === 0) {
- // No more data to read. Aknowledge receipt and finish.
+ // No more data to read. Acknowledge receipt and finish.
const endChunk = new Chunk();
endChunk.setTransferId(this.id);
endChunk.setStatus(Status.OK);
+ endChunk.setType(Chunk.Type.TRANSFER_COMPLETION);
this.sendChunk(endChunk);
this.finish(Status.OK);
return;
@@ -323,6 +321,35 @@ export class ReadTransfer extends Transfer {
}
}
+ if (chunk.getWindowEndOffset() !== 0) {
+ if (chunk.getWindowEndOffset() < this.offset) {
+ console.error(
+ `Transfer ${
+ this.id
+ }: transmitter sent invalid earlier end offset ${chunk.getWindowEndOffset()} (receiver offset ${
+ this.offset
+ })`
+ );
+ this.sendError(Status.INTERNAL);
+ return;
+ }
+
+ if (chunk.getWindowEndOffset() < this.offset) {
+ console.error(
+ `Transfer ${
+ this.id
+ }: transmitter sent invalid later end offset ${chunk.getWindowEndOffset()} (receiver end offset ${
+ this.windowEndOffset
+ })`
+ );
+ this.sendError(Status.INTERNAL);
+ return;
+ }
+
+ this.windowEndOffset = chunk.getWindowEndOffset();
+ this.pendingBytes -= chunk.getWindowEndOffset() - this.offset;
+ }
+
const remainingWindowSize = this.windowEndOffset - this.offset;
const extendWindow =
remainingWindowSize <=
@@ -337,14 +364,14 @@ export class ReadTransfer extends Transfer {
if (this.pendingBytes === 0) {
// All pending data was received. Send out a new parameters chunk
// for the next block.
- this.sendChunk(this.transferParameters());
+ this.sendChunk(this.transferParameters(Chunk.Type.PARAMETERS_RETRANSMIT));
} else if (extendWindow) {
- this.sendChunk(this.transferParameters(/*extend=*/ true));
+ this.sendChunk(this.transferParameters(Chunk.Type.PARAMETERS_CONTINUE));
}
}
protected retryAfterTimeout(): void {
- this.sendChunk(this.transferParameters());
+ this.sendChunk(this.transferParameters(Chunk.Type.PARAMETERS_RETRANSMIT));
}
}
@@ -377,6 +404,7 @@ export class WriteTransfer extends Transfer {
protected get initialChunk(): Chunk {
const chunk = new Chunk();
chunk.setTransferId(this.id);
+ chunk.setType(Chunk.Type.TRANSFER_START);
return chunk;
}
@@ -487,6 +515,8 @@ export class WriteTransfer extends Transfer {
const chunk = new Chunk();
chunk.setTransferId(this.id);
chunk.setOffset(this.offset);
+ chunk.setType(Chunk.Type.TRANSFER_DATA);
+
const maxBytesInChunk = Math.min(
this.maxChunkSize,
this.windowEndOffset - this.offset
diff --git a/pw_transfer/ts/transfer_test.ts b/pw_transfer/ts/transfer_test.ts
index 86ef5e049..1707a82f7 100644
--- a/pw_transfer/ts/transfer_test.ts
+++ b/pw_transfer/ts/transfer_test.ts
@@ -400,7 +400,9 @@ describe('Encoder', () => {
]);
await manager.write(4, textEncoder.encode('hello this is a message'));
- expect(receivedData()).toEqual(textEncoder.encode('hello this is a message'));
+ expect(receivedData()).toEqual(
+ textEncoder.encode('hello this is a message')
+ );
expect(sentChunks[1].getData()).toEqual(textEncoder.encode('hell'));
expect(sentChunks[2].getData()).toEqual(textEncoder.encode('o th'));
expect(sentChunks[3].getData()).toEqual(textEncoder.encode('is i'));
@@ -611,14 +613,17 @@ describe('Encoder', () => {
.catch(error => {
const expectedChunk1 = new Chunk();
expectedChunk1.setTransferId(22);
+ expectedChunk1.setType(Chunk.Type.TRANSFER_START);
const expectedChunk2 = new Chunk();
expectedChunk2.setTransferId(22);
expectedChunk2.setData(textEncoder.encode('01234'));
+ expectedChunk2.setType(Chunk.Type.TRANSFER_DATA);
const lastChunk = new Chunk();
lastChunk.setTransferId(22);
lastChunk.setData(textEncoder.encode('56789'));
lastChunk.setOffset(5);
lastChunk.setRemainingBytes(0);
+ lastChunk.setType(Chunk.Type.TRANSFER_DATA);
const expectedChunks = [
expectedChunk1,
diff --git a/pw_unit_test/BUILD.bazel b/pw_unit_test/BUILD.bazel
index 5064793b2..7ef41c747 100644
--- a/pw_unit_test/BUILD.bazel
+++ b/pw_unit_test/BUILD.bazel
@@ -14,6 +14,7 @@
load(
"//pw_build:pigweed.bzl",
+ "pw_cc_binary",
"pw_cc_library",
"pw_cc_test",
)
@@ -70,19 +71,32 @@ pw_cc_library(
],
)
-filegroup(
+pw_cc_library(
name = "logging_event_handler",
srcs = [
"logging_event_handler.cc",
+ ],
+ hdrs = [
"public/pw_unit_test/logging_event_handler.h",
],
+ includes = [
+ "public",
+ ],
+ deps = [
+ "//pw_log",
+ "//pw_unit_test",
+ ],
)
-filegroup(
+pw_cc_binary(
name = "logging_main",
srcs = [
"logging_main.cc",
],
+ deps = [
+ ":logging_event_handler",
+ "//pw_unit_test",
+ ],
)
pw_cc_library(
@@ -105,7 +119,7 @@ proto_library(
)
pw_proto_library(
- name = "unit_test_pwpb",
+ name = "unit_test_cc",
deps = [":unit_test_proto"],
)
@@ -121,12 +135,9 @@ pw_cc_library(
],
deps = [
":pw_unit_test",
- ":unit_test_pwpb",
+ ":unit_test_cc.pwpb",
+ ":unit_test_cc.raw_rpc",
"//pw_log",
- # TODO(hepler): RPC deps not used directly should be provided by the proto library
- "//pw_rpc",
- "//pw_rpc/raw:client_api",
- "//pw_rpc/raw:server_api",
],
)
diff --git a/pw_unit_test/docs.rst b/pw_unit_test/docs.rst
index 450998491..f96c56abc 100644
--- a/pw_unit_test/docs.rst
+++ b/pw_unit_test/docs.rst
@@ -26,16 +26,26 @@ Pigweed.
------------------
Writing unit tests
------------------
-
``pw_unit_test``'s interface is largely compatible with `Google Test`_. Refer to
the Google Test documentation for examples of to define unit test cases.
.. note::
- A lot of Google Test's more advanced features are not yet implemented. To
- request a feature addition, please
+ Many of Google Test's more advanced features are not yet implemented. Missing
+ features include:
+
+ * Any GoogleMock features (e.g. :c:macro:`EXPECT_THAT`)
+ * Floating point comparison macros (e.g. :c:macro:`EXPECT_FLOAT_EQ`)
+ * Death tests (e.g. :c:macro:`EXPECT_DEATH`); ``EXPECT_DEATH_IF_SUPPORTED``
+ does nothing but silently passes
+ * Value-parameterized tests
+
+ To request a feature addition, please
`let us know <mailto:pigweed@googlegroups.com>`_.
+ See `Using upstream Googletest and Googlemock` below for information
+ about using upstream Googletest instead.
+
------------------------
Using the test framework
------------------------
@@ -380,3 +390,20 @@ this module.
The size of the memory pool to use for test fixture instances. By default this
is set to 16K.
+
+Using upstream Googletest and Googlemock
+========================================
+
+If you want to use the full upstream Googletest/Googlemock, you must do the
+following:
+
+* Set the GN var ``dir_pw_third_party_googletest`` to the location of the
+ googletest source. You can use ``pw package install googletest`` to fetch the
+ source if desired.
+* Set the GN var ``pw_unit_test_MAIN = "//third_party/googletest:gmock_main"``
+* Set the GN var ``pw_unit_test_PUBLIC_DEPS = [ "//third_party/googletest" ]``
+
+.. note::
+
+ Not all unit tests build properly with upstream Googletest yet. This is a
+ work in progress.
diff --git a/pw_unit_test/framework.cc b/pw_unit_test/framework.cc
index c71fd9430..46d913b60 100644
--- a/pw_unit_test/framework.cc
+++ b/pw_unit_test/framework.cc
@@ -92,6 +92,9 @@ void Framework::EndCurrentTest() {
case TestResult::kFailure:
run_tests_summary_.failed_tests++;
break;
+ case TestResult::kSkipped:
+ run_tests_summary_.skipped_tests++;
+ break;
}
if (event_handler_ != nullptr) {
@@ -101,6 +104,14 @@ void Framework::EndCurrentTest() {
current_test_ = nullptr;
}
+void Framework::CurrentTestSkip(int line) {
+ if (current_result_ == TestResult::kSuccess) {
+ current_result_ = TestResult::kSkipped;
+ }
+ return CurrentTestExpectSimple(
+ "(test skipped)", "(test skipped)", line, true);
+}
+
void Framework::CurrentTestExpectSimple(const char* expression,
const char* evaluated_expression,
int line,
diff --git a/pw_unit_test/framework_test.cc b/pw_unit_test/framework_test.cc
index 1c0a1ce4d..d287859a5 100644
--- a/pw_unit_test/framework_test.cc
+++ b/pw_unit_test/framework_test.cc
@@ -78,6 +78,22 @@ TEST(PigweedTest, SucceedAndFailMacros) {
}
}
+TEST(PigweedTest, SkipMacro) {
+ GTEST_SKIP();
+ // This code should not run.
+ EXPECT_TRUE(false);
+}
+
+class SkipOnSetUpTest : public ::testing::Test {
+ public:
+ void SetUp() override { GTEST_SKIP(); }
+};
+
+TEST_F(SkipOnSetUpTest, FailTest) {
+ // This code should not run because the test was skipped in SetUp().
+ EXPECT_TRUE(false);
+}
+
class NonCopyable {
public:
NonCopyable(int value) : value_(value) {}
diff --git a/pw_unit_test/logging_event_handler.cc b/pw_unit_test/logging_event_handler.cc
index 7c71c832b..7e80c321c 100644
--- a/pw_unit_test/logging_event_handler.cc
+++ b/pw_unit_test/logging_event_handler.cc
@@ -31,6 +31,9 @@ void LoggingEventHandler::RunAllTestsEnd(
const RunTestsSummary& run_tests_summary) {
PW_LOG_INFO("[==========] Done running all tests.");
PW_LOG_INFO("[ PASSED ] %d test(s).", run_tests_summary.passed_tests);
+ if (run_tests_summary.skipped_tests) {
+ PW_LOG_WARN("[ SKIPPED ] %d test(s).", run_tests_summary.skipped_tests);
+ }
if (run_tests_summary.failed_tests) {
PW_LOG_ERROR("[ FAILED ] %d test(s).", run_tests_summary.failed_tests);
}
@@ -52,6 +55,10 @@ void LoggingEventHandler::TestCaseEnd(const TestCase& test_case,
PW_LOG_ERROR(
"[ FAILED ] %s.%s", test_case.suite_name, test_case.test_name);
break;
+ case TestResult::kSkipped:
+ PW_LOG_WARN(
+ "[ SKIPPED ] %s.%s", test_case.suite_name, test_case.test_name);
+ break;
}
}
diff --git a/pw_unit_test/public/pw_unit_test/event_handler.h b/pw_unit_test/public/pw_unit_test/event_handler.h
index 3a5a7101c..ea029ac3d 100644
--- a/pw_unit_test/public/pw_unit_test/event_handler.h
+++ b/pw_unit_test/public/pw_unit_test/event_handler.h
@@ -51,6 +51,8 @@ namespace unit_test {
enum class TestResult {
kSuccess = 0,
kFailure = 1,
+ // Test skipped at runtime. This is neither a success nor a failure.
+ kSkipped = 2,
};
struct TestCase {
diff --git a/pw_unit_test/public/pw_unit_test/framework.h b/pw_unit_test/public/pw_unit_test/framework.h
index 8b247268a..ab3594898 100644
--- a/pw_unit_test/public/pw_unit_test/framework.h
+++ b/pw_unit_test/public/pw_unit_test/framework.h
@@ -76,6 +76,11 @@
// Generates a fatal failure with a generic message.
#define GTEST_FAIL() return ADD_FAILURE()
+// Skips test at runtime, which is neither successful nor failed. Skip aborts
+// current function.
+#define GTEST_SKIP() \
+ return ::pw::unit_test::internal::Framework::Get().CurrentTestSkip(__LINE__)
+
// Define either macro to 1 to omit the definition of FAIL(), which is a
// generic name and clashes with some other libraries.
#if !(defined(GTEST_DONT_DEFINE_FAIL) && GTEST_DONT_DEFINE_FAIL)
@@ -110,6 +115,19 @@
#define RUN_ALL_TESTS() \
::pw::unit_test::internal::Framework::Get().RunAllTests()
+// Death tests are not supported. The *_DEATH_IF_SUPPORTED macros do nothing.
+#define GTEST_HAS_DEATH_TEST 0
+
+#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ if (0) { \
+ static_cast<void>(statement); \
+ static_cast<void>(regex); \
+ } \
+ static_assert(true, "Macros must be termianted with a semicolon")
+
+#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ EXPECT_DEATH_IF_SUPPORTED(statement, regex)
+
namespace pw {
#if PW_CXX_STANDARD_IS_SUPPORTED(17)
@@ -200,6 +218,9 @@ class Framework {
bool ShouldRunTest(const TestInfo& test_info) const;
+ // Whether the current test is skipped.
+ bool IsSkipped() const { return current_result_ == TestResult::kSkipped; }
+
// Constructs an instance of a unit test class and runs the test.
//
// Tests are constructed within a static memory pool at run time instead of
@@ -268,6 +289,9 @@ class Framework {
return success;
}
+ // Skips the current test and dispatches an event for it.
+ void CurrentTestSkip(int line);
+
// Dispatches an event indicating the result of an expectation.
void CurrentTestExpectSimple(const char* expression,
const char* evaluated_expression,
@@ -307,7 +331,7 @@ class Framework {
// The current test case which is running.
const TestInfo* current_test_;
- // Overall result of the current test case (pass/fail).
+ // Overall result of the current test case (pass/fail/skip).
TestResult current_result_;
// Overall result of the ongoing test run, which covers multiple tests.
@@ -392,7 +416,10 @@ class Test {
// Runs the unit test.
void PigweedTestRun() {
SetUp();
- PigweedTestBody();
+ // TODO(deymo): Skip the test body if there's a fatal error in SetUp().
+ if (!Framework::Get().IsSkipped()) {
+ PigweedTestBody();
+ }
TearDown();
}
diff --git a/pw_unit_test/simple_printing_event_handler.cc b/pw_unit_test/simple_printing_event_handler.cc
index ab5cd74d2..5a84d578e 100644
--- a/pw_unit_test/simple_printing_event_handler.cc
+++ b/pw_unit_test/simple_printing_event_handler.cc
@@ -28,6 +28,9 @@ void SimplePrintingEventHandler::RunAllTestsEnd(
const RunTestsSummary& run_tests_summary) {
WriteLine("[==========] Done running all tests.");
WriteLine("[ PASSED ] %d test(s).", run_tests_summary.passed_tests);
+ if (run_tests_summary.skipped_tests) {
+ WriteLine("[ SKIPPED ] %d test(s).", run_tests_summary.skipped_tests);
+ }
if (run_tests_summary.failed_tests) {
WriteLine("[ FAILED ] %d test(s).", run_tests_summary.failed_tests);
}
@@ -49,6 +52,10 @@ void SimplePrintingEventHandler::TestCaseEnd(const TestCase& test_case,
WriteLine(
"[ FAILED ] %s.%s", test_case.suite_name, test_case.test_name);
break;
+ case TestResult::kSkipped:
+ WriteLine(
+ "[ SKIPPED ] %s.%s", test_case.suite_name, test_case.test_name);
+ break;
}
}
diff --git a/pw_watch/docs.rst b/pw_watch/docs.rst
index 94db83706..306bec2b9 100644
--- a/pw_watch/docs.rst
+++ b/pw_watch/docs.rst
@@ -48,8 +48,8 @@ override this behavior, provide the ``-C`` argument to ``pw watch``.
# Build the default target and start a docs server on http://127.0.0.1:5555
pw watch --serve-docs --serve-docs-port=5555
- # Build the default target and start a terminal application for showing logs
- pw watch --watch-app
+ # Build with a full screen terminal user interface similar to pw_console.
+ pw watch --fullscreen
``pw watch`` only rebuilds when a file that is not ignored by Git changes.
Adding exclusions to a ``.gitignore`` causes watch to ignore them, even if the
diff --git a/pw_watch/py/pw_watch/debounce.py b/pw_watch/py/pw_watch/debounce.py
index 82a52662c..3417b1e67 100644
--- a/pw_watch/py/pw_watch/debounce.py
+++ b/pw_watch/py/pw_watch/debounce.py
@@ -18,7 +18,7 @@ import logging
import threading
from abc import ABC, abstractmethod
-_LOG = logging.getLogger(__package__)
+_LOG = logging.getLogger('pw_watch')
class DebouncedFunction(ABC):
diff --git a/pw_watch/py/pw_watch/watch.py b/pw_watch/py/pw_watch/watch.py
index 59e384ac5..fe495efba 100755
--- a/pw_watch/py/pw_watch/watch.py
+++ b/pw_watch/py/pw_watch/watch.py
@@ -38,26 +38,38 @@ Usage examples:
import argparse
from dataclasses import dataclass
import errno
+from itertools import zip_longest
import logging
import os
from pathlib import Path
+import re
import shlex
import subprocess
import sys
import threading
from threading import Thread
-from typing import (Iterable, List, NamedTuple, NoReturn, Optional, Sequence,
- Tuple)
+from typing import (
+ Iterable,
+ List,
+ NamedTuple,
+ NoReturn,
+ Optional,
+ Sequence,
+ Tuple,
+)
import httpwatcher # type: ignore
from watchdog.events import FileSystemEventHandler # type: ignore[import]
from watchdog.observers import Observer # type: ignore[import]
-import pw_cli.log
+from prompt_toolkit.formatted_text.base import OneStyleAndTextTuple
+from prompt_toolkit.formatted_text import StyleAndTextTuples
+
import pw_cli.branding
import pw_cli.color
import pw_cli.env
+import pw_cli.log
import pw_cli.plugins
import pw_console.python_logging
@@ -65,7 +77,8 @@ from pw_watch.watch_app import WatchApp
from pw_watch.debounce import DebouncedFunction, Debouncer
_COLOR = pw_cli.color.colors()
-_LOG = logging.getLogger(__package__)
+_LOG = logging.getLogger('pw_watch')
+_NINJA_LOG = logging.getLogger('pw_watch_ninja_output')
_ERRNO_INOTIFY_LIMIT_REACHED = 28
# Suppress events under 'fsevents', generated by watchdog on every file
@@ -97,6 +110,8 @@ _FAIL_MESSAGE = """
░ ░ ░ ░ ░
"""
+_FULLSCREEN_STATUS_COLUMN_WIDTH = 10
+
# TODO(keir): Figure out a better strategy for exiting. The problem with the
# watcher is that doing a "clean exit" is slow. However, by directly exiting,
@@ -157,8 +172,10 @@ def git_ignored(file: Path) -> bool:
class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
"""Process filesystem events and launch builds if necessary."""
+ # pylint: disable=too-many-instance-attributes
+ NINJA_BUILD_STEP = re.compile(
+ r'^\[(?P<step>[0-9]+)/(?P<total_steps>[0-9]+)\] (?P<action>.*)$')
- # pylint: disable=R0902 # too many instance attributes
def __init__(
self,
build_commands: Sequence[BuildCommand],
@@ -167,18 +184,26 @@ class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
charset: WatchCharset = _ASCII_CHARSET,
restart: bool = True,
jobs: int = None,
- watch_app: bool = False,
+ fullscreen: bool = False,
+ banners: bool = True,
):
super().__init__()
+ self.banners = banners
+ self.status_message: Optional[OneStyleAndTextTuple] = None
+ self.result_message: Optional[StyleAndTextTuples] = None
self.current_stdout = ''
+ self.current_build_step = ''
+ self.current_build_percent = 0.0
+ self.current_build_errors = 0
self.patterns = patterns
self.ignore_patterns = ignore_patterns
self.build_commands = build_commands
self.charset: WatchCharset = charset
self.restart_on_changes = restart
- self.watch_app_enabled = watch_app
+ self.fullscreen_enabled = fullscreen
+ self.watch_app: Optional[WatchApp] = None
self._current_build: subprocess.Popen
self._extra_ninja_args = [] if jobs is None else [f'-j{jobs}']
@@ -190,16 +215,16 @@ class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
self.matching_path: Optional[Path] = None
self.builds_succeeded: List[bool] = []
- if not self.watch_app_enabled:
+ if not self.fullscreen_enabled:
self.wait_for_keypress_thread = threading.Thread(
None, self._wait_for_enter)
self.wait_for_keypress_thread.start()
def rebuild(self):
- """ Manual rebuild command triggered from watch app."""
+ """ Rebuild command triggered from watch app."""
self._current_build.terminate()
self._current_build.wait()
- self.debouncer.press('Manual build requested...')
+ self.debouncer.press('Manual build requested')
def _wait_for_enter(self) -> NoReturn:
try:
@@ -245,8 +270,18 @@ class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
if self.matching_path is None:
self.matching_path = matching_path
- self.debouncer.press(
- f'File change detected: {os.path.relpath(matching_path)}')
+ log_message = f'File change detected: {os.path.relpath(matching_path)}'
+ if self.restart_on_changes:
+ if self.fullscreen_enabled and self.watch_app:
+ self.watch_app.rebuild_on_filechange()
+ self.debouncer.press(f'{log_message} Triggering build...')
+ else:
+ _LOG.info('%s ; not rebuilding', log_message)
+
+ def _clear_screen(self) -> None:
+ if not self.fullscreen_enabled:
+ print('\033c', end='') # TODO(pwbug/38): Not Windows compatible.
+ sys.stdout.flush()
# Implementation of DebouncedFunction.run()
#
@@ -257,19 +292,24 @@ class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
"""Run all the builds in serial and capture pass/fail for each."""
# Clear the screen and show a banner indicating the build is starting.
- if not self.watch_app_enabled:
- print('\033c', end='') # TODO(pwbug/38): Not Windows compatible.
- # TODO: fix the banner function to add the color on each line
- for line in pw_cli.branding.banner().splitlines():
- _LOG.info(line)
- _LOG.info(
- _COLOR.green(
- ' Watching for changes. Ctrl-C to exit; enter to rebuild'))
+ self._clear_screen()
+
+ if self.fullscreen_enabled:
+ self.create_result_message()
+ _LOG.info(
+ _COLOR.green(
+ 'Watching for changes. Ctrl-d to exit; enter to rebuild'))
+ else:
+ for line in pw_cli.branding.banner().splitlines():
+ _LOG.info(line)
+ _LOG.info(
+ _COLOR.green(
+ ' Watching for changes. Ctrl-C to exit; enter to rebuild')
+ )
_LOG.info('')
_LOG.info('Change detected: %s', self.matching_path)
- if not self.watch_app_enabled:
- print('\033c', end='') # TODO(pwbug/38): Not Windows compatible.
- sys.stdout.flush()
+
+ self._clear_screen()
self.builds_succeeded = []
num_builds = len(self.build_commands)
@@ -278,11 +318,12 @@ class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
env = os.environ.copy()
# Force colors in Pigweed subcommands run through the watcher.
env['PW_USE_COLOR'] = '1'
+ # Force Ninja to output ANSI colors
+ env['CLICOLOR_FORCE'] = '1'
for i, cmd in enumerate(self.build_commands, 1):
index = f'[{i}/{num_builds}]'
self.builds_succeeded.append(self._run_build(index, cmd, env))
-
if self.builds_succeeded[-1]:
level = logging.INFO
tag = '(OK)'
@@ -291,6 +332,33 @@ class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
tag = '(FAIL)'
_LOG.log(level, '%s Finished build: %s %s', index, cmd, tag)
+ self.create_result_message()
+
+ def create_result_message(self):
+ if not self.fullscreen_enabled:
+ return
+
+ self.result_message = []
+ first_building_target_found = False
+ for (succeeded, command) in zip_longest(self.builds_succeeded,
+ self.build_commands):
+ if succeeded:
+ self.result_message.append(
+ ('class:theme-fg-green',
+ 'OK'.rjust(_FULLSCREEN_STATUS_COLUMN_WIDTH)))
+ elif succeeded is None and not first_building_target_found:
+ first_building_target_found = True
+ self.result_message.append(
+ ('class:theme-fg-yellow',
+ 'Building'.rjust(_FULLSCREEN_STATUS_COLUMN_WIDTH)))
+ elif first_building_target_found:
+ self.result_message.append(
+ ('', ''.rjust(_FULLSCREEN_STATUS_COLUMN_WIDTH)))
+ else:
+ self.result_message.append(
+ ('class:theme-fg-red',
+ 'Failed'.rjust(_FULLSCREEN_STATUS_COLUMN_WIDTH)))
+ self.result_message.append(('', f' {command}\n'))
def _run_build(self, index: str, cmd: BuildCommand, env: dict) -> bool:
# Make sure there is a build.ninja file for Ninja to use.
@@ -315,7 +383,11 @@ class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
def _execute_command(self, command: list, env: dict) -> bool:
"""Runs a command with a blank before/after for visual separation."""
- if self.watch_app_enabled:
+ self.current_build_errors = 0
+ self.status_message = (
+ 'class:theme-fg-yellow',
+ 'Building'.rjust(_FULLSCREEN_STATUS_COLUMN_WIDTH))
+ if self.fullscreen_enabled:
return self._execute_command_watch_app(command, env)
print()
self._current_build = subprocess.Popen(command, env=env)
@@ -325,6 +397,8 @@ class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
def _execute_command_watch_app(self, command: list, env: dict) -> bool:
"""Runs a command with and outputs the logs."""
+ if not self.watch_app:
+ return False
self.current_stdout = ''
returncode = None
with subprocess.Popen(command,
@@ -333,15 +407,50 @@ class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
stderr=subprocess.STDOUT,
errors='replace') as proc:
self._current_build = proc
- self.current_stdout += 'START\n'
+
+ # Empty line at the start.
+ _NINJA_LOG.info('')
while returncode is None:
- if proc.stdout:
- output = proc.stdout.readline()
- if output:
- self.current_stdout += output
+ if not proc.stdout:
+ continue
+
+ output = proc.stdout.readline()
+ self.current_stdout += output
+
+ line_match_result = self.NINJA_BUILD_STEP.match(output)
+ if line_match_result:
+ matches = line_match_result.groupdict()
+ self.current_build_step = line_match_result.group(0)
+ self.current_build_percent = float(
+ int(matches.get('step', 0)) /
+ int(matches.get('total_steps', 1)))
+
+ elif output.startswith(WatchApp.NINJA_FAILURE_TEXT):
+ _NINJA_LOG.critical(output.strip())
+ self.current_build_errors += 1
+
+ else:
+ # Mypy output mixes character encoding in its colored output
+ # due to it's use of the curses module retrieving the 'sgr0'
+ # (or exit_attribute_mode) capability from the host
+ # machine's terminfo database.
+ #
+ # This can result in this sequence ending up in STDOUT as
+ # b'\x1b(B\x1b[m'. (B tells terminals to interpret text as
+ # USASCII encoding but will appear in prompt_toolkit as a B
+ # character.
+ #
+ # The following replace calls will strip out those
+ # instances.
+ _NINJA_LOG.info(
+ output.replace('\x1b(B\x1b[m',
+ '').replace('\x1b[1m', '').strip())
+ self.watch_app.redraw_ui()
returncode = proc.poll()
- self.current_stdout += 'CHECK DONE\n'
+ # Empty line at the end.
+ _NINJA_LOG.info('')
+
return returncode == 0
# Implementation of DebouncedFunction.cancel()
@@ -357,43 +466,59 @@ class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
def on_complete(self, cancelled: bool = False) -> None:
# First, use the standard logging facilities to report build status.
if cancelled:
+ self.status_message = (
+ '', 'Cancelled'.rjust(_FULLSCREEN_STATUS_COLUMN_WIDTH))
_LOG.error('Finished; build was interrupted')
elif all(self.builds_succeeded):
+ self.status_message = (
+ 'class:theme-fg-green',
+ 'Succeeded'.rjust(_FULLSCREEN_STATUS_COLUMN_WIDTH))
_LOG.info('Finished; all successful')
else:
+ self.status_message = (
+ 'class:theme-fg-red',
+ 'Failed'.rjust(_FULLSCREEN_STATUS_COLUMN_WIDTH))
_LOG.info('Finished; some builds failed')
- # Then, show a more distinct colored banner.
- if not cancelled:
- # Write out build summary table so you can tell which builds passed
- # and which builds failed.
- _LOG.info('')
- _LOG.info(' .------------------------------------')
- _LOG.info(' |')
- for (succeeded, cmd) in zip(self.builds_succeeded,
- self.build_commands):
- slug = (self.charset.slug_ok
- if succeeded else self.charset.slug_fail)
- _LOG.info(' | %s %s', slug, cmd)
- _LOG.info(' |')
- _LOG.info(" '------------------------------------")
- else:
- # Build was interrupted.
- _LOG.info('')
- _LOG.info(' .------------------------------------')
- _LOG.info(' |')
- _LOG.info(' | %s- interrupted', self.charset.slug_fail)
- _LOG.info(' |')
- _LOG.info(" '------------------------------------")
-
- # Show a large color banner so it is obvious what the overall result is.
- if all(self.builds_succeeded) and not cancelled:
- for line in _PASS_MESSAGE.splitlines():
- _LOG.info(_COLOR.green(line))
+ # Show individual build results for fullscreen app
+ if self.fullscreen_enabled:
+ self.create_result_message()
+ # For non-fullscreen pw watch
else:
- for line in _FAIL_MESSAGE.splitlines():
- _LOG.info(_COLOR.red(line))
-
+ # Show a more distinct colored banner.
+ if not cancelled:
+ # Write out build summary table so you can tell which builds
+ # passed and which builds failed.
+ _LOG.info('')
+ _LOG.info(' .------------------------------------')
+ _LOG.info(' |')
+ for (succeeded, cmd) in zip(self.builds_succeeded,
+ self.build_commands):
+ slug = (self.charset.slug_ok
+ if succeeded else self.charset.slug_fail)
+ _LOG.info(' | %s %s', slug, cmd)
+ _LOG.info(' |')
+ _LOG.info(" '------------------------------------")
+ else:
+ # Build was interrupted.
+ _LOG.info('')
+ _LOG.info(' .------------------------------------')
+ _LOG.info(' |')
+ _LOG.info(' | %s- interrupted', self.charset.slug_fail)
+ _LOG.info(' |')
+ _LOG.info(" '------------------------------------")
+
+ # Show a large color banner for the overall result.
+ if self.banners:
+ if all(self.builds_succeeded) and not cancelled:
+ for line in _PASS_MESSAGE.splitlines():
+ _LOG.info(_COLOR.green(line))
+ else:
+ for line in _FAIL_MESSAGE.splitlines():
+ _LOG.info(_COLOR.red(line))
+
+ if self.watch_app:
+ self.watch_app.redraw_ui()
self.matching_path = None
# Implementation of DebouncedFunction.on_keyboard_interrupt()
@@ -496,11 +621,18 @@ def add_parser_arguments(parser: argparse.ArgumentParser) -> None:
'--jobs',
type=int,
help="Number of cores to use; defaults to Ninja's default")
- parser.add_argument('--watch-app',
- dest='watch_app',
+ parser.add_argument('-f',
+ '--fullscreen',
action='store_true',
default=False,
- help='Start the watch app console.')
+ help='Use a fullscreen interface.')
+ parser.add_argument('--debug-logging',
+ action='store_true',
+ help='Enable debug logging.')
+ parser.add_argument('--no-banners',
+ dest='banners',
+ action='store_false',
+ help='Hide pass/fail banners.')
def _exit(code: int) -> NoReturn:
@@ -648,12 +780,21 @@ def get_common_excludes() -> List[Path]:
return exclude_list
-# pylint: disable=R0913, R0914 # too many arguments and local variables
-def watch_setup(default_build_targets: List[str], build_directories: List[str],
- patterns: str, ignore_patterns_string: str,
- exclude_list: List[Path], restart: bool, jobs: Optional[int],
- serve_docs: bool, serve_docs_port: int, serve_docs_path: Path,
- watch_app: bool):
+def watch_setup(
+ default_build_targets: List[str],
+ build_directories: List[str],
+ patterns: str,
+ ignore_patterns_string: str,
+ exclude_list: List[Path],
+ restart: bool,
+ jobs: Optional[int],
+ serve_docs: bool,
+ serve_docs_port: int,
+ serve_docs_path: Path,
+ fullscreen: bool,
+ banners: bool,
+ # pylint: disable=too-many-arguments
+) -> Tuple[str, PigweedBuildWatcher, List[Path]]:
"""Watches files and runs Ninja commands when they change."""
_LOG.info('Starting Pigweed build watcher')
@@ -666,6 +807,9 @@ def watch_setup(default_build_targets: List[str], build_directories: List[str],
# Preset exclude list for pigweed directory.
exclude_list += get_common_excludes()
+ # Add build directories to the exclude list.
+ exclude_list.extend(
+ Path(build_dir[0]).resolve() for build_dir in build_directories)
build_commands = [
BuildCommand(Path(build_dir[0]), tuple(build_dir[1:]))
@@ -729,12 +873,12 @@ def watch_setup(default_build_targets: List[str], build_directories: List[str],
charset=charset,
restart=restart,
jobs=jobs,
- watch_app=watch_app,
+ fullscreen=fullscreen,
+ banners=banners,
)
return path_to_log, event_handler, exclude_list
-# pylint: disable=R0914 # too many local variables
def watch(path_to_log: Path, event_handler: PigweedBuildWatcher,
exclude_list: List[Path]):
"""Watches files and runs Ninja commands when they change."""
@@ -786,10 +930,25 @@ def main() -> None:
formatter_class=argparse.RawDescriptionHelpFormatter)
add_parser_arguments(parser)
args = parser.parse_args()
- path_to_log, event_handler, exclude_list = watch_setup(**vars(args))
- if args.watch_app:
- watch_logfile = pw_console.python_logging.create_temp_log_file()
+ path_to_log, event_handler, exclude_list = watch_setup(
+ default_build_targets=args.default_build_targets,
+ build_directories=args.build_directories,
+ patterns=args.patterns,
+ ignore_patterns_string=args.ignore_patterns_string,
+ exclude_list=args.exclude_list,
+ restart=args.restart,
+ jobs=args.jobs,
+ serve_docs=args.serve_docs,
+ serve_docs_port=args.serve_docs_port,
+ serve_docs_path=args.serve_docs_path,
+ fullscreen=args.fullscreen,
+ banners=args.banners,
+ )
+
+ if args.fullscreen:
+ watch_logfile = (pw_console.python_logging.create_temp_log_file(
+ prefix=__package__))
pw_cli.log.install(
level=logging.DEBUG,
use_color=True,
@@ -803,12 +962,19 @@ def main() -> None:
args=(path_to_log, event_handler, exclude_list),
daemon=True)
watch_thread.start()
- watch_app = WatchApp(startup_args=vars(args),
- event_handler=event_handler,
- loggers=[_LOG])
+ watch_app = WatchApp(event_handler=event_handler,
+ debug_logging=args.debug_logging,
+ log_file_name=watch_logfile)
+
+ event_handler.watch_app = watch_app
watch_app.run()
else:
- watch(path_to_log, event_handler, exclude_list)
+ pw_cli.log.install(
+ level=logging.DEBUG if args.debug_logging else logging.INFO,
+ use_color=True,
+ hide_timestamp=False,
+ )
+ watch(Path(path_to_log), event_handler, exclude_list)
if __name__ == '__main__':
diff --git a/pw_watch/py/pw_watch/watch_app.py b/pw_watch/py/pw_watch/watch_app.py
index 4a530e785..61f7fd99c 100644
--- a/pw_watch/py/pw_watch/watch_app.py
+++ b/pw_watch/py/pw_watch/watch_app.py
@@ -13,225 +13,360 @@
# License for the specific language governing permissions and limitations under
# the License.
""" Prompt toolkit application for pw watch. """
-from typing import Any
-from pygments.lexers.special import TextLexer
+
+import asyncio
+import logging
+from pathlib import Path
+import re
+import sys
+from typing import List, NoReturn, Optional
+
from prompt_toolkit.application import Application
from prompt_toolkit.clipboard.pyperclip import PyperclipClipboard
-from prompt_toolkit.document import Document
+from prompt_toolkit.filters import Condition
from prompt_toolkit.history import (
FileHistory,
History,
ThreadedHistory,
)
-from prompt_toolkit.key_binding import KeyBindings, merge_key_bindings
+from prompt_toolkit.key_binding import (
+ KeyBindings,
+ KeyBindingsBase,
+ merge_key_bindings,
+)
from prompt_toolkit.layout import (
Dimension,
DynamicContainer,
+ Float,
+ FloatContainer,
FormattedTextControl,
HSplit,
Layout,
Window,
)
-from prompt_toolkit.lexers import PygmentsLexer
-from prompt_toolkit.widgets import SearchToolbar, TextArea
-from prompt_toolkit.styles import DynamicStyle, merge_styles
+from prompt_toolkit.layout.controls import BufferControl
+from prompt_toolkit.styles import DynamicStyle, merge_styles, Style
+from prompt_toolkit.formatted_text import StyleAndTextTuples
+
from pw_console.console_app import get_default_colordepth
from pw_console.console_prefs import ConsolePrefs
from pw_console.get_pw_console_app import PW_CONSOLE_APP_CONTEXTVAR
from pw_console.log_pane import LogPane
+from pw_console.plugin_mixin import PluginMixin
+from pw_console.quit_dialog import QuitDialog
import pw_console.style
-from pw_console.widgets import ToolbarButton, WindowPane, WindowPaneToolbar
+import pw_console.widgets.border
from pw_console.window_manager import WindowManager
-from pw_console.plugin_mixin import PluginMixin
+
+_NINJA_LOG = logging.getLogger('pw_watch_ninja_output')
+_LOG = logging.getLogger('pw_watch')
class WatchWindowManager(WindowManager):
def update_root_container_body(self):
- self.application.window_manager_container = self.create_root_container(
- )
+ self.application.window_manager_container = (
+ self.create_root_container())
-class RunHistoryPane(WindowPane):
- """Pigweed Console plugin window pane.
- Accepts text input in one window split and displays the output in another.
- """
- def __init__(
- self,
- application: Any,
- pane_title: str = 'Run History',
- ):
- super().__init__(pane_title=pane_title, application=application)
- self.application = application
- self.search_field = SearchToolbar() # For reverse search (ctrl-r)
- self.output_field = TextArea(
- style='class:output-field',
- text='pw_watch',
- focusable=True,
- focus_on_click=True,
- scrollbar=True,
- line_numbers=False,
- search_field=self.search_field,
- lexer=PygmentsLexer(TextLexer),
- )
- key_bindings = KeyBindings()
+class WatchApp(PluginMixin):
+ """Pigweed Watch main window application."""
+ # pylint: disable=too-many-instance-attributes
- self.output_field.control.key_bindings = key_bindings
- # Setup the bottom toolbar
- self.bottom_toolbar = WindowPaneToolbar(self)
- self.bottom_toolbar.add_button(
- ToolbarButton('Enter', 'Run Build', self.run_build))
- self.bottom_toolbar.add_button(
- ToolbarButton(description='Copy Output',
- mouse_handler=self.copy_all_output))
- self.bottom_toolbar.add_button(
- ToolbarButton('q', 'Quit', self.application.quit))
- self.container = self._create_pane_container(
- self.output_field,
- self.search_field,
- self.bottom_toolbar,
- )
+ NINJA_FAILURE_TEXT = '\033[31mFAILED: '
- # Trigger text input, same as hitting enter inside sefl.input_field.
- def run_build(self):
- self.application.run_build()
+ NINJA_BUILD_STEP = re.compile(
+ r"^\[(?P<step>[0-9]+)/(?P<total_steps>[0-9]+)\] (?P<action>.*)$")
- def update_output(self, text: str):
- # Add text to output buffer.
- cursor_position = self.output_field.document.cursor_position
- if cursor_position > len(text):
- cursor_position = len(text)
- self.output_field.buffer.set_document(
- Document(text=text, cursor_position=cursor_position))
+ def __init__(self,
+ event_handler,
+ debug_logging: bool = False,
+ log_file_name: Optional[str] = None):
- def copy_selected_output(self):
- clipboard_data = self.output_field.buffer.copy_selection()
- self.application.application.clipboard.set_data(clipboard_data)
+ self.event_handler = event_handler
- def copy_all_output(self):
- self.application.application.clipboard.set_text(
- self.output_field.buffer.text)
+ self.external_logfile: Optional[Path] = (Path(log_file_name)
+ if log_file_name else None)
+ self.color_depth = get_default_colordepth()
+ # Necessary for some of pw_console's window manager features to work
+ # such as mouse drag resizing.
+ PW_CONSOLE_APP_CONTEXTVAR.set(self) # type: ignore
-class WatchApp(PluginMixin):
- """Pigweed Watch main window application."""
+ self.prefs = ConsolePrefs()
- # pylint: disable=R0902 # too many instance attributes
- def __init__(self, startup_args, event_handler, loggers):
- # watch startup args
- self.startup_args = startup_args
- self.event_handler = event_handler
- self.color_depth = get_default_colordepth()
+ self.quit_dialog = QuitDialog(self, self.exit) # type: ignore
- PW_CONSOLE_APP_CONTEXTVAR.set(self)
- self.prefs = ConsolePrefs(project_file=False, user_file=False)
self.search_history_filename = self.prefs.search_history
# History instance for search toolbars.
self.search_history: History = ThreadedHistory(
- FileHistory(self.search_history_filename))
+ FileHistory(str(self.search_history_filename)))
self.window_manager = WatchWindowManager(self)
pw_console.python_logging.setup_python_logging()
- #log pane
- log_pane = LogPane(application=self, pane_title='log pane')
- for logger in loggers:
- log_pane.add_log_handler(logger, level_name='INFO')
- # disable table view to allow teriminal based coloring
- log_pane.table_view = False
- self.run_history_pane = RunHistoryPane(self)
+ self._build_error_count = 0
+ self._errors_in_output = False
+
+ self.ninja_log_pane = LogPane(application=self,
+ pane_title='Pigweed Watch')
+ self.ninja_log_pane.add_log_handler(_NINJA_LOG, level_name='INFO')
+ self.ninja_log_pane.add_log_handler(
+ _LOG, level_name=('DEBUG' if debug_logging else 'INFO'))
+ # Set python log format to just the message itself.
+ self.ninja_log_pane.log_view.log_store.formatter = logging.Formatter(
+ '%(message)s')
+ self.ninja_log_pane.table_view = False
+ # Enable line wrapping
+ self.ninja_log_pane.toggle_wrap_lines()
+ # Blank right side toolbar text
+ self.ninja_log_pane._pane_subtitle = ' '
+ self.ninja_log_view = self.ninja_log_pane.log_view
+
+ # Make tab and shift-tab search for next and previous error
+ next_error_bindings = KeyBindings()
+
+ @next_error_bindings.add('s-tab')
+ def _previous_error(_event):
+ self.jump_to_error(backwards=True)
+
+ @next_error_bindings.add('tab')
+ def _next_error(_event):
+ self.jump_to_error()
- # Bottom Window: pw watch log messages
- self.window_manager.add_pane(log_pane)
- # Top Window: Ninja build output
- self.window_manager.add_pane(self.run_history_pane)
+ existing_log_bindings: Optional[KeyBindingsBase] = (
+ self.ninja_log_pane.log_content_control.key_bindings)
+
+ key_binding_list: List[KeyBindingsBase] = []
+ if existing_log_bindings:
+ key_binding_list.append(existing_log_bindings)
+ key_binding_list.append(next_error_bindings)
+ self.ninja_log_pane.log_content_control.key_bindings = (
+ merge_key_bindings(key_binding_list))
+
+ self.window_manager.add_pane(self.ninja_log_pane)
self.window_manager_container = (
self.window_manager.create_root_container())
- self.root_container = HSplit([
- # The top toolbar.
- Window(
- content=FormattedTextControl(self.get_statusbar_text),
- height=Dimension.exact(1),
- style='',
- ),
- # The main content.
- DynamicContainer(lambda: self.window_manager_container),
- ])
- key_bindings = KeyBindings()
- @key_bindings.add('c-c')
- @key_bindings.add("q")
- def _quit(event):
- "Quit."
- loggers[0].info('Got quit signal; exiting...')
- event.app.exit()
+ self.status_bar_border_style = 'class:command-runner-border'
- @key_bindings.add("enter")
- def _quit(_):
+ self.root_container = FloatContainer(
+ HSplit([
+ pw_console.widgets.border.create_border(
+ HSplit([
+ # The top toolbar.
+ Window(
+ content=FormattedTextControl(
+ self.get_statusbar_text),
+ height=Dimension.exact(1),
+ style='class:toolbar_inactive',
+ ),
+ # Result Toolbar.
+ Window(
+ content=FormattedTextControl(
+ self.get_resultbar_text),
+ height=lambda: len(self.event_handler.
+ build_commands),
+ style='class:toolbar_inactive',
+ ),
+ ]),
+ border_style=lambda: self.status_bar_border_style,
+ base_style='class:toolbar_inactive',
+ left_margin_columns=1,
+ right_margin_columns=1,
+ ),
+ # The main content.
+ DynamicContainer(lambda: self.window_manager_container),
+ ]),
+ floats=[
+ Float(
+ content=self.quit_dialog,
+ top=2,
+ left=2,
+ ),
+ ],
+ )
+
+ key_bindings = KeyBindings()
+
+ @key_bindings.add('enter', filter=self.input_box_not_focused())
+ def _run_build(_event):
"Rebuild."
self.run_build()
- self.key_bindings = key_bindings
- self._current_theme = pw_console.style.generate_styles()
- self.application = Application(
- layout=Layout(self.root_container,
- focused_element=self.run_history_pane),
- key_bindings=merge_key_bindings([
- self.key_bindings,
- self.window_manager.key_bindings,
- ]),
- enable_page_navigation_bindings=True,
+ register = self.prefs.register_keybinding
+
+ @register('global.exit-no-confirmation', key_bindings)
+ def _quit_no_confirm(_event):
+ """Quit without confirmation."""
+ _LOG.info('Got quit signal; exiting...')
+ self.exit(0)
+
+ @register('global.exit-with-confirmation', key_bindings)
+ def _quit_with_confirm(_event):
+ """Quit with confirmation dialog."""
+ self.quit_dialog.open_dialog()
+
+ self.key_bindings = merge_key_bindings([
+ self.window_manager.key_bindings,
+ key_bindings,
+ ])
+
+ self.current_theme = pw_console.style.generate_styles(
+ self.prefs.ui_theme)
+ self.current_theme = merge_styles([
+ self.current_theme,
+ Style.from_dict({'search': 'bg:ansired ansiblack'}),
+ ])
+
+ self.layout = Layout(self.root_container,
+ focused_element=self.ninja_log_pane)
+
+ self.application: Application = Application(
+ layout=self.layout,
+ key_bindings=self.key_bindings,
mouse_support=True,
color_depth=self.color_depth,
clipboard=PyperclipClipboard(),
style=DynamicStyle(lambda: merge_styles([
- self._current_theme,
+ self.current_theme,
])),
full_screen=True,
)
self.plugin_init(
- plugin_callback=self.check_stdout,
- plugin_callback_frequency=1.0,
+ plugin_callback=self.check_build_status,
+ plugin_callback_frequency=0.5,
plugin_logger_name='pw_watch_stdout_checker',
)
+ def jump_to_error(self, backwards: bool = False) -> None:
+ if not self.ninja_log_pane.log_view.search_text:
+ self.ninja_log_pane.log_view.set_search_regex(
+ '^FAILED: ', False, None)
+ if backwards:
+ self.ninja_log_pane.log_view.search_backwards()
+ else:
+ self.ninja_log_pane.log_view.search_forwards()
+ self.ninja_log_pane.log_view.log_screen.reset_logs(
+ log_index=self.ninja_log_pane.log_view.log_index)
+
+ self.ninja_log_pane.log_view.move_selected_line_to_top()
+
def update_menu_items(self):
- # Required by the Window Manager Class.
- pass
+ """Required by the Window Manager Class."""
def redraw_ui(self):
"""Redraw the prompt_toolkit UI."""
if hasattr(self, 'application'):
- # Thread safe way of sending a repaint trigger to the input event
- # loop.
self.application.invalidate()
def focus_on_container(self, pane):
"""Set application focus to a specific container."""
self.application.layout.focus(pane)
+ def focused_window(self):
+ """Return the currently focused window."""
+ return self.application.layout.current_window
+
+ def command_runner_is_open(self) -> bool:
+ # pylint: disable=no-self-use
+ return False
+
+ def clear_ninja_log(self) -> None:
+ self.ninja_log_view.log_store.clear_logs()
+ self.ninja_log_view._restart_filtering() # pylint: disable=protected-access
+ self.ninja_log_view.view_mode_changed()
+
def run_build(self):
"""Manually trigger a rebuild."""
+ self.clear_ninja_log()
self.event_handler.rebuild()
+ def rebuild_on_filechange(self):
+ self.ninja_log_view.log_store.clear_logs()
+ self.ninja_log_view.view_mode_changed()
+
def get_statusbar_text(self):
- return [('class:logo', 'Pigweed Watch'),
- ('class:theme-fg-cyan',
- ' {} '.format(self.startup_args.get('build_directories')))]
+ status = self.event_handler.status_message
+ fragments = [('class:logo', 'Pigweed Watch')]
+ is_building = False
+ if status:
+ fragments = [status]
+ is_building = status[1].endswith('Building')
+ separator = ('', ' ')
+ self.status_bar_border_style = 'class:theme-fg-green'
+
+ if is_building:
+ percent = self.event_handler.current_build_percent
+ percent *= 100
+ fragments.append(separator)
+ fragments.append(('ansicyan', '{:.0f}%'.format(percent)))
+ self.status_bar_border_style = 'class:theme-fg-yellow'
+
+ if self.event_handler.current_build_errors > 0:
+ fragments.append(separator)
+ fragments.append(('', 'Errors:'))
+ fragments.append(
+ ('ansired', str(self.event_handler.current_build_errors)))
+ self.status_bar_border_style = 'class:theme-fg-red'
+
+ if is_building:
+ fragments.append(separator)
+ fragments.append(('', self.event_handler.current_build_step))
+
+ return fragments
+
+ def get_resultbar_text(self) -> StyleAndTextTuples:
+ result = self.event_handler.result_message
+ if not result:
+ result = [('', 'Loading...')]
+ return result
+
+ def exit(self, exit_code: int = 0) -> None:
+ log_file = self.external_logfile
+
+ def _really_exit(future: asyncio.Future) -> NoReturn:
+ if log_file:
+ # Print a message showing where logs were saved to.
+ print('Logs saved to: {}'.format(log_file.resolve()))
+ sys.exit(future.result())
- def quit(self):
- self.application.exit()
+ if self.application.future:
+ self.application.future.add_done_callback(_really_exit)
+ self.application.exit(result=exit_code)
- def check_stdout(self) -> bool:
- if self.event_handler.current_stdout:
- self.run_history_pane.update_output(
- self.event_handler.current_stdout)
+ def check_build_status(self) -> bool:
+ if not self.event_handler.current_stdout:
+ return False
+
+ if self._errors_in_output:
return True
- return False
+
+ if self.event_handler.current_build_errors > self._build_error_count:
+ self._errors_in_output = True
+ self.jump_to_error()
+
+ return True
def run(self):
self.plugin_start()
# Run the prompt_toolkit application
- self.application.run()
+ self.application.run(set_exception_handler=True)
+
+ def input_box_not_focused(self) -> Condition:
+ """Condition checking the focused control is not a text input field."""
+ @Condition
+ def _test() -> bool:
+ """Check if the currently focused control is an input buffer.
+
+ Returns:
+ bool: True if the currently focused control is not a text input
+ box. For example if the user presses enter when typing in
+ the search box, return False.
+ """
+ return not isinstance(self.application.layout.current_control,
+ BufferControl)
+
+ return _test
diff --git a/pw_web_ui/BUILD.bazel b/pw_web_ui/BUILD.bazel
index 97c30cade..1fe99a2ec 100644
--- a/pw_web_ui/BUILD.bazel
+++ b/pw_web_ui/BUILD.bazel
@@ -22,29 +22,17 @@ load("@rules_proto_grpc//js:defs.bzl", "js_proto_library")
package(default_visibility = ["//visibility:public"])
-filegroup(
- name = "echo_service",
- srcs = [
- "@//pw_rpc:echo",
- ],
-)
-
-proto_library(
- name = "rpc_protos",
- srcs = [
- ":echo_service",
- ],
-)
-
js_proto_library(
name = "rpc_protos_tspb",
- protos = [":rpc_protos"],
+ protos = [
+ "//pw_rpc:echo_proto",
+ ],
)
ts_proto_collection(
name = "web_proto_collection",
js_proto_library = "@//pw_web_ui:rpc_protos_tspb",
- proto_library = "@//pw_web_ui:rpc_protos",
+ proto_library = "@//pw_rpc:echo_proto",
)
ts_project(
diff --git a/targets/default_config.BUILD b/targets/default_config.BUILD
index fd1a1e752..2ed1d01f1 100644
--- a/targets/default_config.BUILD
+++ b/targets/default_config.BUILD
@@ -20,6 +20,11 @@ label_flag(
)
label_flag(
+ name = "pw_log_string_handler_backend",
+ build_setting_default = "@pigweed//pw_log_string:handler_backend_multiplexer",
+)
+
+label_flag(
name = "pw_assert_backend",
build_setting_default = "@pigweed//pw_assert:backend_multiplexer",
)
@@ -75,6 +80,11 @@ label_flag(
)
label_flag(
+ name = "pw_sync_timed_thread_notification_backend",
+ build_setting_default = "@pigweed//pw_sync:timed_thread_notification_backend_multiplexer",
+)
+
+label_flag(
name = "pw_interrupt_backend",
build_setting_default = "@pigweed//pw_interrupt:backend_multiplexer",
)
diff --git a/targets/emcraft_sf2_som/BUILD.bazel b/targets/emcraft_sf2_som/BUILD.bazel
new file mode 100644
index 000000000..26d04260a
--- /dev/null
+++ b/targets/emcraft_sf2_som/BUILD.bazel
@@ -0,0 +1,58 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+ "//pw_build:pigweed.bzl",
+ "pw_cc_binary",
+ "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+pw_cc_library(
+ name = "pre_init",
+ srcs = [
+ "boot.cc",
+ "vector_table.c",
+ ],
+ hdrs = [
+ "config/FreeRTOSConfig.h",
+ "config/sf2_mss_hal_conf.h",
+ ],
+ deps = [
+ "//pw_boot",
+ "//pw_boot_cortex_m",
+ "//pw_malloc",
+ "//pw_preprocessor",
+ "//pw_string",
+ "//pw_sys_io_emcraft_sf2",
+ "//third_party/freertos",
+ "//third_party/smartfusion_mss",
+ ],
+)
+
+pw_cc_binary(
+ name = "demo",
+ srcs = [
+ "main.cc",
+ ],
+ deps = [
+ "//pw_thread:thread",
+ "//pw_thread:thread_core",
+ "//pw_thread_freertos:thread",
+ "//third_party/freertos",
+ ],
+)
diff --git a/targets/emcraft_sf2_som/BUILD.gn b/targets/emcraft_sf2_som/BUILD.gn
new file mode 100644
index 000000000..9d778698e
--- /dev/null
+++ b/targets/emcraft_sf2_som/BUILD.gn
@@ -0,0 +1,149 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_malloc/backend.gni")
+import("$dir_pw_system/system_target.gni")
+import("$dir_pw_third_party/smartfusion_mss/mss.gni")
+import("$dir_pw_tokenizer/backend.gni")
+import("$dir_pw_toolchain/generate_toolchain.gni")
+
+config("pw_malloc_active") {
+ if (pw_malloc_BACKEND != "") {
+ defines = [ "PW_MALLOC_ACTIVE=1" ]
+ }
+}
+
+if (current_toolchain != default_toolchain) {
+ pw_source_set("pre_init") {
+ configs = [ ":pw_malloc_active" ]
+ deps = [
+ "$dir_pw_boot",
+ "$dir_pw_boot_cortex_m",
+ "$dir_pw_malloc",
+ "$dir_pw_preprocessor",
+ "$dir_pw_string",
+ "$dir_pw_sys_io_emcraft_sf2",
+ "$dir_pw_system",
+ "$dir_pw_third_party/freertos",
+ ]
+ sources = [
+ "boot.cc",
+ "vector_table.c",
+ ]
+ }
+
+ config("config_includes") {
+ include_dirs = [ "config" ]
+ }
+
+ pw_source_set("sf2_mss_hal_config") {
+ public_configs = [ ":config_includes" ]
+ public =
+ [ "config/sf2_mss_hal_conf.h" ] # SKEYS likely want to put the MDDR
+ # config by cortex etc stuff here
+ }
+
+ pw_source_set("sf2_freertos_config") {
+ public_configs = [ ":config_includes" ]
+ public_deps = [ "$dir_pw_third_party/freertos:config_assert" ]
+ public = [ "config/FreeRTOSConfig.h" ]
+ }
+}
+
+# Configured for use with a first stage boot loader to configure DDR and
+# perform memory remapping.
+pw_system_target("emcraft_sf2_som") {
+ cpu = PW_SYSTEM_CPU.CORTEX_M3
+ scheduler = PW_SYSTEM_SCHEDULER.FREERTOS
+ link_deps = [ "$dir_pigweed/targets/emcraft_sf2_som:pre_init" ]
+
+ build_args = {
+ pw_log_BACKEND = dir_pw_log_tokenized
+ pw_tokenizer_GLOBAL_HANDLER_WITH_PAYLOAD_BACKEND =
+ "$dir_pw_system:log_backend.impl"
+ pw_third_party_freertos_CONFIG =
+ "$dir_pigweed/targets/emcraft_sf2_som:sf2_freertos_config"
+ pw_third_party_freertos_PORT = "$dir_pw_third_party/freertos:arm_cm3"
+ pw_sys_io_BACKEND = dir_pw_sys_io_emcraft_sf2
+
+ # Non-debug build for use with the boot loader.
+ pw_boot_cortex_m_LINK_CONFIG_DEFINES = [
+ "PW_BOOT_FLASH_BEGIN=0x00000200", # After vector table.
+
+ # TODO(skeys) Bootloader is capable of loading 16M of uncompressed code
+ # from SPI flash to external RAM. For now use the allocated eNVM flash
+ # (256K - Bootloader - InSystemProgrammer = 192K)
+ "PW_BOOT_FLASH_SIZE=0x30000",
+
+ # TODO(pwbug/219): Currently "pw_tokenizer/detokenize_test" requires at
+ # least 6K bytes in heap when using pw_malloc_freelist. The heap size
+ # required for tests should be investigated.
+ "PW_BOOT_HEAP_SIZE=4M",
+
+ # With external RAM remapped, we use the entire internal ram for the
+ # stack (64K).
+ "PW_BOOT_MIN_STACK_SIZE=1024K",
+
+ # Using external DDR RAM, we just need to make sure we go past our ROM
+ # sections.
+ "PW_BOOT_RAM_BEGIN=0xA1000000",
+
+ # We assume that the bootloader loaded all 16M of text.
+ "PW_BOOT_RAM_SIZE=48M",
+ "PW_BOOT_VECTOR_TABLE_BEGIN=0x00000000",
+ "PW_BOOT_VECTOR_TABLE_SIZE=512",
+ ]
+ }
+}
+
+# Debug target configured to work with MSS linker script and startup code.
+# TODO(skeys) Add linker script and config for debug builds using SoftConsole.
+pw_system_target("emcraft_sf2_som_debug") {
+ cpu = PW_SYSTEM_CPU.CORTEX_M3
+ scheduler = PW_SYSTEM_SCHEDULER.FREERTOS
+ link_deps = [ "$dir_pigweed/targets/emcraft_sf2_som:pre_init" ]
+
+ build_args = {
+ pw_log_BACKEND = dir_pw_log_tokenized
+ pw_tokenizer_GLOBAL_HANDLER_WITH_PAYLOAD_BACKEND =
+ "$dir_pw_system:log_backend.impl"
+ pw_third_party_freertos_CONFIG =
+ "$dir_pigweed/targets/emcraft_sf2_som:sf2_freertos_config"
+ pw_third_party_freertos_PORT = "$dir_pw_third_party/freertos:arm_cm3"
+ pw_sys_io_BACKEND = dir_pw_sys_io_emcraft_sf2
+
+ pw_boot_cortex_m_LINK_CONFIG_DEFINES = [
+ "PW_BOOT_FLASH_BEGIN=0x00000200",
+ "PW_BOOT_FLASH_SIZE=200K",
+
+ # TODO(pwbug/219): Currently "pw_tokenizer/detokenize_test" requires at
+ # least 6K bytes in heap when using pw_malloc_freelist. The heap size
+ # required for tests should be investigated.
+ "PW_BOOT_HEAP_SIZE=7K",
+ "PW_BOOT_MIN_STACK_SIZE=1K",
+ "PW_BOOT_RAM_BEGIN=0x20000000",
+ "PW_BOOT_RAM_SIZE=64K",
+ "PW_BOOT_VECTOR_TABLE_BEGIN=0x00000000",
+ "PW_BOOT_VECTOR_TABLE_SIZE=512",
+ ]
+ }
+}
+
+pw_doc_group("docs") {
+ sources = [ "target_docs.rst" ]
+}
diff --git a/targets/emcraft_sf2_som/boot.cc b/targets/emcraft_sf2_som/boot.cc
new file mode 100644
index 000000000..bb786c850
--- /dev/null
+++ b/targets/emcraft_sf2_som/boot.cc
@@ -0,0 +1,195 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_boot/boot.h"
+
+#include <array>
+
+#include "FreeRTOS.h"
+#include "config/sf2_mss_hal_conf.h"
+#include "m2sxxx.h"
+#include "pw_boot_cortex_m/boot.h"
+#include "pw_malloc/malloc.h"
+#include "pw_preprocessor/compiler.h"
+#include "pw_string/util.h"
+#include "pw_sys_io_emcraft_sf2/init.h"
+#include "pw_system/init.h"
+#include "system_m2sxxx.h"
+#include "task.h"
+
+#include liberosoc_CONFIG_FILE
+
+namespace {
+
+std::array<StackType_t, configMINIMAL_STACK_SIZE> freertos_idle_stack;
+StaticTask_t freertos_idle_tcb;
+
+std::array<StackType_t, configTIMER_TASK_STACK_DEPTH> freertos_timer_stack;
+StaticTask_t freertos_timer_tcb;
+
+std::array<char, configMAX_TASK_NAME_LEN> temp_thread_name_buffer;
+
+} // namespace
+
+// Functions needed when configGENERATE_RUN_TIME_STATS is on.
+extern "C" void configureTimerForRunTimeStats(void) {}
+extern "C" unsigned long getRunTimeCounterValue(void) { return 10 /* FIXME */; }
+// uwTick is an uint32_t incremented each Systick interrupt 1ms. uwTick is used
+// to execute HAL_Delay function.
+
+// Required for configCHECK_FOR_STACK_OVERFLOW.
+extern "C" void vApplicationStackOverflowHook(TaskHandle_t, char* pcTaskName) {
+ pw::string::Copy(pcTaskName, temp_thread_name_buffer);
+ PW_CRASH("Stack OVF for task %s", temp_thread_name_buffer.data());
+}
+
+// Required for configUSE_TIMERS.
+extern "C" void vApplicationGetTimerTaskMemory(
+ StaticTask_t** ppxIdleTaskTCBBuffer,
+ StackType_t** ppxIdleTaskStackBuffer,
+ uint32_t* pulIdleTaskStackSize) {
+ *ppxIdleTaskTCBBuffer = &freertos_idle_tcb;
+ *ppxIdleTaskStackBuffer = freertos_idle_stack.data();
+ *pulIdleTaskStackSize = freertos_idle_stack.size();
+}
+
+extern "C" void vApplicationGetIdleTaskMemory(
+ StaticTask_t** ppxIdleTaskTCBBuffer,
+ StackType_t** ppxIdleTaskStackBuffer,
+ uint32_t* pulIdleTaskStackSize) {
+ *ppxIdleTaskTCBBuffer = &freertos_timer_tcb;
+ *ppxIdleTaskStackBuffer = freertos_timer_stack.data();
+ *pulIdleTaskStackSize = freertos_timer_stack.size();
+}
+
+extern "C" void pw_boot_PreStaticMemoryInit() {
+#if SF2_MSS_NO_BOOTLOADER
+ SystemInit();
+ // Initialize DDR
+ // inclusive-language: disable
+ MDDR->core.ddrc.DYN_SOFT_RESET_CR = 0x0000;
+ MDDR->core.ddrc.DYN_REFRESH_1_CR = 0x27de;
+ MDDR->core.ddrc.DYN_REFRESH_2_CR = 0x030f;
+ MDDR->core.ddrc.DYN_POWERDOWN_CR = 0x0002;
+ MDDR->core.ddrc.DYN_DEBUG_CR = 0x0000;
+ MDDR->core.ddrc.MODE_CR = 0x00C1;
+ MDDR->core.ddrc.ADDR_MAP_BANK_CR = 0x099f;
+ MDDR->core.ddrc.ECC_DATA_MASK_CR = 0x0000;
+ MDDR->core.ddrc.ADDR_MAP_COL_1_CR = 0x3333;
+ MDDR->core.ddrc.ADDR_MAP_COL_2_CR = 0xffff;
+ MDDR->core.ddrc.ADDR_MAP_ROW_1_CR = 0x7777;
+ MDDR->core.ddrc.ADDR_MAP_ROW_2_CR = 0x0fff;
+ MDDR->core.ddrc.INIT_1_CR = 0x0001;
+ MDDR->core.ddrc.CKE_RSTN_CYCLES_CR[0] = 0x4242;
+ MDDR->core.ddrc.CKE_RSTN_CYCLES_CR[1] = 0x0008;
+ MDDR->core.ddrc.INIT_MR_CR = 0x0033;
+ MDDR->core.ddrc.INIT_EMR_CR = 0x0020;
+ MDDR->core.ddrc.INIT_EMR2_CR = 0x0000;
+ MDDR->core.ddrc.INIT_EMR3_CR = 0x0000;
+ MDDR->core.ddrc.DRAM_BANK_TIMING_PARAM_CR = 0x00c0;
+ MDDR->core.ddrc.DRAM_RD_WR_LATENCY_CR = 0x0023;
+ MDDR->core.ddrc.DRAM_RD_WR_PRE_CR = 0x0235;
+ MDDR->core.ddrc.DRAM_MR_TIMING_PARAM_CR = 0x0064;
+ MDDR->core.ddrc.DRAM_RAS_TIMING_CR = 0x0108;
+ MDDR->core.ddrc.DRAM_RD_WR_TRNARND_TIME_CR = 0x0178;
+ MDDR->core.ddrc.DRAM_T_PD_CR = 0x0033;
+ MDDR->core.ddrc.DRAM_BANK_ACT_TIMING_CR = 0x1947;
+ MDDR->core.ddrc.ODT_PARAM_1_CR = 0x0010;
+ MDDR->core.ddrc.ODT_PARAM_2_CR = 0x0000;
+ MDDR->core.ddrc.ADDR_MAP_COL_3_CR = 0x3300;
+ MDDR->core.ddrc.MODE_REG_RD_WR_CR = 0x0000;
+ MDDR->core.ddrc.MODE_REG_DATA_CR = 0x0000;
+ MDDR->core.ddrc.PWR_SAVE_1_CR = 0x0514;
+ MDDR->core.ddrc.PWR_SAVE_2_CR = 0x0000;
+ MDDR->core.ddrc.ZQ_LONG_TIME_CR = 0x0200;
+ MDDR->core.ddrc.ZQ_SHORT_TIME_CR = 0x0040;
+ MDDR->core.ddrc.ZQ_SHORT_INT_REFRESH_MARGIN_CR[0] = 0x0012;
+ MDDR->core.ddrc.ZQ_SHORT_INT_REFRESH_MARGIN_CR[1] = 0x0002;
+ MDDR->core.ddrc.PERF_PARAM_1_CR = 0x4000;
+ MDDR->core.ddrc.HPR_QUEUE_PARAM_CR[0] = 0x80f8;
+ MDDR->core.ddrc.HPR_QUEUE_PARAM_CR[1] = 0x0007;
+ MDDR->core.ddrc.LPR_QUEUE_PARAM_CR[0] = 0x80f8;
+ MDDR->core.ddrc.LPR_QUEUE_PARAM_CR[1] = 0x0007;
+ MDDR->core.ddrc.WR_QUEUE_PARAM_CR = 0x0200;
+ MDDR->core.ddrc.PERF_PARAM_2_CR = 0x0001;
+ MDDR->core.ddrc.PERF_PARAM_3_CR = 0x0000;
+ MDDR->core.ddrc.DFI_RDDATA_EN_CR = 0x0003;
+ MDDR->core.ddrc.DFI_MIN_CTRLUPD_TIMING_CR = 0x0003;
+ MDDR->core.ddrc.DFI_MAX_CTRLUPD_TIMING_CR = 0x0040;
+ MDDR->core.ddrc.DFI_WR_LVL_CONTROL_CR[0] = 0x0000;
+ MDDR->core.ddrc.DFI_WR_LVL_CONTROL_CR[1] = 0x0000;
+ MDDR->core.ddrc.DFI_RD_LVL_CONTROL_CR[0] = 0x0000;
+ MDDR->core.ddrc.DFI_RD_LVL_CONTROL_CR[1] = 0x0000;
+ MDDR->core.ddrc.DFI_CTRLUPD_TIME_INTERVAL_CR = 0x0309;
+ MDDR->core.ddrc.AXI_FABRIC_PRI_ID_CR = 0x0000;
+ MDDR->core.ddrc.ECC_INT_CLR_REG = 0x0000;
+
+ MDDR->core.phy.LOOPBACK_TEST_CR = 0x0000;
+ MDDR->core.phy.CTRL_SLAVE_RATIO_CR = 0x0080;
+ MDDR->core.phy.DATA_SLICE_IN_USE_CR = 0x0003;
+ MDDR->core.phy.DQ_OFFSET_CR[0] = 0x00000000;
+ MDDR->core.phy.DQ_OFFSET_CR[2] = 0x0000;
+ MDDR->core.phy.DLL_LOCK_DIFF_CR = 0x000B;
+ MDDR->core.phy.FIFO_WE_SLAVE_RATIO_CR[0] = 0x0040;
+ MDDR->core.phy.FIFO_WE_SLAVE_RATIO_CR[1] = 0x0401;
+ MDDR->core.phy.FIFO_WE_SLAVE_RATIO_CR[2] = 0x4010;
+ MDDR->core.phy.FIFO_WE_SLAVE_RATIO_CR[3] = 0x0000;
+ MDDR->core.phy.LOCAL_ODT_CR = 0x0001;
+ MDDR->core.phy.RD_DQS_SLAVE_RATIO_CR[0] = 0x0040;
+ MDDR->core.phy.RD_DQS_SLAVE_RATIO_CR[1] = 0x0401;
+ MDDR->core.phy.RD_DQS_SLAVE_RATIO_CR[2] = 0x4010;
+ MDDR->core.phy.WR_DATA_SLAVE_RATIO_CR[0] = 0x0040;
+ MDDR->core.phy.WR_DATA_SLAVE_RATIO_CR[1] = 0x0401;
+ MDDR->core.phy.WR_DATA_SLAVE_RATIO_CR[2] = 0x4010;
+ MDDR->core.phy.WR_RD_RL_CR = 0x0021;
+ MDDR->core.phy.RDC_WE_TO_RE_DELAY_CR = 0x0003;
+ MDDR->core.phy.USE_FIXED_RE_CR = 0x0001;
+ MDDR->core.phy.USE_RANK0_DELAYS_CR = 0x0001;
+ MDDR->core.phy.CONFIG_CR = 0x0009;
+ MDDR->core.phy.DYN_RESET_CR = 0x01;
+ MDDR->core.ddrc.DYN_SOFT_RESET_CR = 0x01;
+ // inclusive-language: enable
+ // Wait for config
+ while ((MDDR->core.ddrc.DDRC_SR) == 0x0000) {
+ }
+#endif
+}
+
+extern "C" void pw_boot_PreStaticConstructorInit() {
+ // TODO(skeys) add "#if no_bootLoader" and the functions needed for init.
+
+#if PW_MALLOC_ACTIVE
+ pw_MallocInit(&pw_boot_heap_low_addr, &pw_boot_heap_high_addr);
+#endif // PW_MALLOC_ACTIVE
+ pw_sys_io_Init();
+}
+
+// TODO(amontanez): pw_boot_PreMainInit() should get renamed to
+// pw_boot_FinalizeBoot or similar when main() is removed.
+extern "C" void pw_boot_PreMainInit() {
+ pw::system::Init();
+ vTaskStartScheduler();
+ PW_UNREACHABLE;
+}
+
+// This `main()` stub prevents another main function from being linked since
+// this target deliberately doesn't run `main()`.
+extern "C" int main() {}
+
+extern "C" PW_NO_RETURN void pw_boot_PostMain() {
+ // In case main() returns, just sit here until the device is reset.
+ while (true) {
+ }
+ PW_UNREACHABLE;
+}
diff --git a/targets/emcraft_sf2_som/config/FreeRTOSConfig.h b/targets/emcraft_sf2_som/config/FreeRTOSConfig.h
new file mode 100644
index 000000000..c024e3e60
--- /dev/null
+++ b/targets/emcraft_sf2_som/config/FreeRTOSConfig.h
@@ -0,0 +1,81 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <stdint.h>
+
+// Externally defined variables that must be forward-declared for FreeRTOS to
+// use them.
+extern uint32_t SystemCoreClock;
+extern void configureTimerForRunTimeStats(void);
+extern unsigned long getRunTimeCounterValue(void);
+
+#define configSUPPORT_DYNAMIC_ALLOCATION 0
+#define configSUPPORT_STATIC_ALLOCATION 1
+
+#define configUSE_16_BIT_TICKS 0
+#define configUSE_CO_ROUTINES 0
+#define configUSE_IDLE_HOOK 0
+#define configUSE_MUTEXES 1
+#define configUSE_PORT_OPTIMISED_TASK_SELECTION 1
+#define configUSE_PREEMPTION 1
+#define configUSE_TICK_HOOK 0
+#define configUSE_TIMERS 1
+#define configUSE_TRACE_FACILITY 1
+
+#define configGENERATE_RUN_TIME_STATS 1
+#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS configureTimerForRunTimeStats
+#define portGET_RUN_TIME_COUNTER_VALUE getRunTimeCounterValue
+
+#define configCHECK_FOR_STACK_OVERFLOW 2
+#define configCPU_CLOCK_HZ (SystemCoreClock)
+#define configENABLE_BACKWARD_COMPATIBILITY 0
+#define configMAX_CO_ROUTINE_PRIORITIES (2)
+#define configMAX_PRIORITIES (7)
+#define configMAX_TASK_NAME_LEN (16)
+#define configMESSAGE_BUFFER_LENGTH_TYPE size_t
+#define configMINIMAL_STACK_SIZE ((uint16_t)128)
+#define configQUEUE_REGISTRY_SIZE 8
+#define configRECORD_STACK_HIGH_ADDRESS 1
+#define configTICK_RATE_HZ ((TickType_t)1000)
+#define configTIMER_QUEUE_LENGTH 10
+#define configTIMER_TASK_PRIORITY (6)
+#define configTIMER_TASK_STACK_DEPTH 512
+
+/* __NVIC_PRIO_BITS in CMSIS */
+#define configPRIO_BITS 4
+
+#define configLIBRARY_LOWEST_INTERRUPT_PRIORITY 15
+#define configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY 5
+#define configKERNEL_INTERRUPT_PRIORITY \
+ (configLIBRARY_LOWEST_INTERRUPT_PRIORITY << (8 - configPRIO_BITS))
+#define configMAX_SYSCALL_INTERRUPT_PRIORITY \
+ (configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY << (8 - configPRIO_BITS))
+
+#define INCLUDE_uxTaskPriorityGet 1
+#define INCLUDE_vTaskCleanUpResources 0
+#define INCLUDE_vTaskDelay 1
+#define INCLUDE_vTaskDelayUntil 0
+#define INCLUDE_vTaskDelete 1
+#define INCLUDE_vTaskPrioritySet 1
+#define INCLUDE_vTaskSuspend 1
+#define INCLUDE_xTaskGetSchedulerState 1
+
+// Instead of defining configASSERT(), include a header that provides a
+// definition that redirects to pw_assert.
+#include "pw_third_party/freertos/config_assert.h"
+
+#define vPortSVCHandler SVC_Handler
+#define xPortPendSVHandler PendSV_Handler
+#define xPortSysTickHandler SysTick_Handler
diff --git a/targets/emcraft_sf2_som/config/sf2_mss_hal_conf.h b/targets/emcraft_sf2_som/config/sf2_mss_hal_conf.h
new file mode 100644
index 000000000..0e82d6856
--- /dev/null
+++ b/targets/emcraft_sf2_som/config/sf2_mss_hal_conf.h
@@ -0,0 +1,25 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#if (MSS_SYS_MDDR_CONFIG_BY_CORTEX == 1)
+#error "Please turn off DDR initialization! See the comment in this file above."
+#endif
+
+#define HAL_GPIO_MODULE_ENABLED
+#include "mss_gpio/mss_gpio.h"
+
+#define HAL_UART_MODULE_ENABLED
+#include "mss_uart/mss_uart.h"
diff --git a/targets/emcraft_sf2_som/target_docs.rst b/targets/emcraft_sf2_som/target_docs.rst
new file mode 100644
index 000000000..adcc077e8
--- /dev/null
+++ b/targets/emcraft_sf2_som/target_docs.rst
@@ -0,0 +1,35 @@
+.. _target-emcraft-sf2-som:
+
+-------------------------------------
+_target-emcraft-sf2-som: SmartFusion2
+-------------------------------------
+The Emcraft SmartFusion2 system-on-module target configuration
+uses FreeRTOS and the Microchip MSS HAL rather than a from-the-ground-up
+baremetal approach.
+
+-----
+Setup
+-----
+To use this target, pigweed must be set up to use FreeRTOS and the Microchip
+MSS HAL for the SmartFusion series. The supported repositories can be
+downloaded via ``pw package``, and then the build must be manually configured
+to point to the locations the repositories were downloaded to.
+
+.. code:: sh
+
+ pw package install freertos
+ pw package install smartfusion_mss
+ pw package install nanopb
+
+ gn args out
+ # Add these lines, replacing ${PW_ROOT} with the path to the location that
+ # Pigweed is checked out at.
+ dir_pw_third_party_freertos = "${PW_ROOT}/.environment/packages/freertos"
+ dir_pw_third_party_smartfusion_mss =
+ "${PW_ROOT}/.environment/packages/smartfusion_mss"
+ dir_pw_third_party_nanopb = "${PW_ROOT}/.environment/packages/nanopb"
+
+Building and running the demo
+=============================
+This target does not yet build as part of Pigweed, but will later be
+available though the pw_system_demo build target.
diff --git a/targets/emcraft_sf2_som/vector_table.c b/targets/emcraft_sf2_som/vector_table.c
new file mode 100644
index 000000000..a4f5f7fc4
--- /dev/null
+++ b/targets/emcraft_sf2_som/vector_table.c
@@ -0,0 +1,75 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <stdbool.h>
+
+#include "pw_boot/boot.h"
+#include "pw_boot_cortex_m/boot.h"
+#include "pw_preprocessor/compiler.h"
+
+// Default handler to insert into the ARMv7-M vector table (below).
+// This function exists for convenience. If a device isn't doing what you
+// expect, it might have hit a fault and ended up here.
+static void DefaultFaultHandler(void) {
+ while (true) {
+ // Wait for debugger to attach.
+ }
+}
+
+// This is the device's interrupt vector table. It's not referenced in any
+// code because the platform (SmartFusion) expects this table to be present at
+// the beginning of flash. The exact address is specified in the pw_boot_armv7m
+// configuration as part of the target config.
+//
+// For more information, see ARMv7-M Architecture Reference Manual DDI 0403E.b
+// section B1.5.3.
+
+// This typedef is for convenience when building the vector table. With the
+// exception of SP_main (0th entry in the vector table), all the entries of the
+// vector table are function pointers.
+typedef void (*InterruptHandler)(void);
+
+// Interrupt handlers critical for OS operation.
+void SVC_Handler(void);
+void PendSV_Handler(void);
+void SysTick_Handler(void);
+
+PW_KEEP_IN_SECTION(".vector_table")
+const InterruptHandler vector_table[] = {
+ // The starting location of the stack pointer.
+ // This address is NOT an interrupt handler/function pointer, it is simply
+ // the address that the main stack pointer should be initialized to. The
+ // value is reinterpret casted because it needs to be in the vector table.
+ [0] = (InterruptHandler)(&pw_boot_stack_high_addr),
+
+ // Reset handler, dictates how to handle reset interrupt. This is the
+ // address that the Program Counter (PC) is initialized to at boot.
+ [1] = pw_boot_Entry,
+
+ // NMI handler.
+ [2] = DefaultFaultHandler,
+ // HardFault handler.
+ [3] = DefaultFaultHandler,
+ // 4-6: Specialized fault handlers.
+ // 7-10: Reserved.
+ // SVCall handler.
+ [11] = SVC_Handler,
+ // DebugMon handler.
+ [12] = DefaultFaultHandler,
+ // 13: Reserved.
+ // PendSV handler.
+ [14] = PendSV_Handler,
+ // SysTick handler.
+ [15] = SysTick_Handler,
+};
diff --git a/targets/host/macos.gni b/targets/host/macos.gni
index 5624f2689..62c938000 100644
--- a/targets/host/macos.gni
+++ b/targets/host/macos.gni
@@ -14,11 +14,6 @@
import("host_common.gni")
-declare_args() {
- # Specifies the toolchain to use for this build.
- pw_target_toolchain = "$dir_pw_toolchain:host_clang_og"
-}
-
pw_executable_config.bloaty_config_file =
get_path_info("macos.bloaty", "abspath")
diff --git a/targets/host/pigweed_internal/BUILD.gn b/targets/host/pigweed_internal/BUILD.gn
new file mode 100644
index 000000000..295536edf
--- /dev/null
+++ b/targets/host/pigweed_internal/BUILD.gn
@@ -0,0 +1,28 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pigweed/targets/host/target_toolchains.gni")
+import("$dir_pw_toolchain/generate_toolchain.gni")
+
+assert(
+ "//targets/host/pigweed_internal/" == get_path_info("./", "abspath"),
+ "The host toolchains in targets/host/pigweed_internal may only be used " +
+ "for building upstream Pigweed. Use the host toolchains in " +
+ "targets/host for downstream code.")
+
+generate_toolchains("internal_host_toolchains") {
+ toolchains = pw_internal_host_toolchains
+}
diff --git a/targets/host/target_docs.rst b/targets/host/target_docs.rst
index b3d1564ab..cadbb9b03 100644
--- a/targets/host/target_docs.rst
+++ b/targets/host/target_docs.rst
@@ -1,37 +1,83 @@
.. _target-host:
-----
+====
host
-----
+====
The Pigweed host target is used for unit testing and some host side tooling.
+----------
+Toolchains
+----------
+Pigweed several toolchains preconfigured for compiling for the host.
+
+.. list-table::
+
+ * - Toolchain name
+ - GN path
+ - Compiler
+ - Optimization
+ * - ``host_clang_debug``
+ - ``//targets/host:host_clang_debug``
+ - Clang
+ - ``-Og``
+ * - ``host_clang_size_optimized``
+ - ``//targets/host:host_clang_size_optimized``
+ - Clang
+ - ``-Os``
+ * - ``host_clang_speed_optimized``
+ - ``//targets/host:host_clang_speed_optimized``
+ - Clang
+ - ``-O2``
+ * - ``host_gcc_debug``
+ - ``//targets/host:host_gcc_debug``
+ - GCC
+ - ``-Og``
+ * - ``host_gcc_size_optimized``
+ - ``//targets/host:host_gcc_size_optimized``
+ - GCC
+ - ``-Os``
+ * - ``host_gcc_speed_optimized``
+ - ``//targets/host:host_gcc_speed_optimized``
+ - GCC
+ - ``-O2``
+
+These toolchains may be used directly by downstream projects if desired. For
+upstream builds, Pigweed uses internal-only variants of these toolchains. The
+upstream toolchains are defined in ``//targets/host/pigweed_internal`` and are
+prefixed with ``pw_strict_``. The upstream toolchains may not be used by
+downstream projects.
+
+--------
Building
-========
-To build for this target, invoke ninja with the top-level "host" group as the
-target to build.
+--------
+To build for the host with a default configuration, invoke Ninja with the
+top-level ``host`` group as the target to build.
-.. code:: sh
+.. code-block:: sh
$ ninja -C out host
-There are two host toolchains, and both of them can be manually invoked by
-replacing `host` with `host_clang` or `host_gcc`. Not all toolchains are
-supported on all platforms. Unless working specifically on one toolchain, it is
-recommended to leave this to the default.
+``host`` may be replaced with with ``host_clang``, ``host_gcc``,
+``host_clang_debug``, etc. to build with a more specific host toolchain. Not all
+toolchains are supported on all platforms. Unless working specifically on one
+toolchain, it is recommended to use the default.
+
+-------------
Running Tests
-=============
+-------------
Tests are automatically run as part of the host build, but if you desire to
manually run tests, you may invoke them from a shell directly.
Example:
-... code:: sh
+.. code-block:: sh
$ ./out/host_[compiler]_debug/obj/pw_status/status_test
+----------
RPC server
-==========
+----------
The host target implements a system RPC server that runs over a local socket,
defaulting to port 33000. To communicate with a process running the host RPC
server, use ``pw rpc -s localhost:33000 <protos>``.
diff --git a/targets/host/target_toolchains.gni b/targets/host/target_toolchains.gni
index ee5669a9a..15c08acb4 100644
--- a/targets/host/target_toolchains.gni
+++ b/targets/host/target_toolchains.gni
@@ -1,4 +1,4 @@
-# Copyright 2020 The Pigweed Authors
+# Copyright 2022 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -123,12 +123,12 @@ _gcc_default_configs = [
"$dir_pw_toolchain/host_gcc:threading_support",
]
-pw_target_toolchain_host = {
- _excluded_members = [
- "defaults",
- "name",
- ]
+_excluded_members = [
+ "defaults",
+ "name",
+]
+pw_target_toolchain_host = {
clang_debug = {
name = "host_clang_debug"
_toolchain_base = pw_toolchain_host_clang.debug
@@ -291,3 +291,86 @@ pw_target_toolchain_host_list = [
pw_target_toolchain_host.gcc_speed_optimized,
pw_target_toolchain_host.gcc_size_optimized,
]
+
+# Additional configuration intended only for upstream Pigweed use.
+_pigweed_internal = {
+ pw_status_CONFIG = "$dir_pw_status:check_if_used"
+}
+
+# Host toolchains exclusively for upstream Pigweed use. To give upstream Pigweed
+# flexibility in how it compiles code, these toolchains may not be used by
+# downstream projects.
+pw_internal_host_toolchains = [
+ {
+ name = "pw_strict_host_clang_debug"
+ _toolchain_base = pw_toolchain_host_clang.debug
+ forward_variables_from(_toolchain_base, "*", _excluded_members)
+ defaults = {
+ forward_variables_from(_toolchain_base.defaults, "*")
+ forward_variables_from(_host_common, "*")
+ forward_variables_from(_pigweed_internal, "*")
+ forward_variables_from(_os_specific_config, "*")
+ default_configs += _clang_default_configs
+ }
+ },
+ {
+ name = "pw_strict_host_clang_speed_optimized"
+ _toolchain_base = pw_toolchain_host_clang.speed_optimized
+ forward_variables_from(_toolchain_base, "*", _excluded_members)
+ defaults = {
+ forward_variables_from(_toolchain_base.defaults, "*")
+ forward_variables_from(_host_common, "*")
+ forward_variables_from(_pigweed_internal, "*")
+ forward_variables_from(_os_specific_config, "*")
+ default_configs += _clang_default_configs
+ }
+ },
+ {
+ name = "pw_strict_host_clang_size_optimized"
+ _toolchain_base = pw_toolchain_host_clang.size_optimized
+ forward_variables_from(_toolchain_base, "*", _excluded_members)
+ defaults = {
+ forward_variables_from(_toolchain_base.defaults, "*")
+ forward_variables_from(_host_common, "*")
+ forward_variables_from(_pigweed_internal, "*")
+ forward_variables_from(_os_specific_config, "*")
+ default_configs += _clang_default_configs
+ }
+ },
+ {
+ name = "pw_strict_host_gcc_debug"
+ _toolchain_base = pw_toolchain_host_gcc.debug
+ forward_variables_from(_toolchain_base, "*", _excluded_members)
+ defaults = {
+ forward_variables_from(_toolchain_base.defaults, "*")
+ forward_variables_from(_host_common, "*")
+ forward_variables_from(_pigweed_internal, "*")
+ forward_variables_from(_os_specific_config, "*")
+ default_configs += _gcc_default_configs
+ }
+ },
+ {
+ name = "pw_strict_host_gcc_speed_optimized"
+ _toolchain_base = pw_toolchain_host_gcc.speed_optimized
+ forward_variables_from(_toolchain_base, "*", _excluded_members)
+ defaults = {
+ forward_variables_from(_toolchain_base.defaults, "*")
+ forward_variables_from(_host_common, "*")
+ forward_variables_from(_pigweed_internal, "*")
+ forward_variables_from(_os_specific_config, "*")
+ default_configs += _gcc_default_configs
+ }
+ },
+ {
+ name = "pw_strict_host_gcc_size_optimized"
+ _toolchain_base = pw_toolchain_host_gcc.size_optimized
+ forward_variables_from(_toolchain_base, "*", _excluded_members)
+ defaults = {
+ forward_variables_from(_toolchain_base.defaults, "*")
+ forward_variables_from(_host_common, "*")
+ forward_variables_from(_pigweed_internal, "*")
+ forward_variables_from(_os_specific_config, "*")
+ default_configs += _gcc_default_configs
+ }
+ },
+]
diff --git a/targets/host_device_simulator/target_docs.rst b/targets/host_device_simulator/target_docs.rst
index 0de6f016c..494a4b9aa 100644
--- a/targets/host_device_simulator/target_docs.rst
+++ b/targets/host_device_simulator/target_docs.rst
@@ -35,7 +35,7 @@ run with the following commands:
ninja -C out pw_system_demo
- ./out/host_device_simulator.size_optimized/obj/pw_system/bin/system_example
+ ./out/host_device_simulator.speed_optimized/obj/pw_system/bin/system_example
To communicate with the launched process, use
-``pw rpc -s localhost:33000 --proto-globs pw_rpc/echo.proto``.
+``pw-system-console -s localhost:33000 --proto-globs pw_rpc/echo.proto``.
diff --git a/targets/rp2040/BUILD.bazel b/targets/rp2040/BUILD.bazel
new file mode 100644
index 000000000..76bd682b4
--- /dev/null
+++ b/targets/rp2040/BUILD.bazel
@@ -0,0 +1,36 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+ "//pw_build:pigweed.bzl",
+ "pw_cc_library",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+# This is just a stub to silence warnings saying that pico_logging_test_main.cc
+# is missing from the bazel build. There's no plans yet to do a Bazel build for
+# the Pi Pico.
+pw_cc_library(
+ name = "pico_logging_test_main",
+ srcs = [
+ "pico_logging_test_main.cc",
+ ],
+ deps = [
+ "//pw_unit_test",
+ "//pw_unit_test:logging_event_handler",
+ ],
+)
diff --git a/targets/rp2040/BUILD.gn b/targets/rp2040/BUILD.gn
new file mode 100644
index 000000000..9f0bbdb10
--- /dev/null
+++ b/targets/rp2040/BUILD.gn
@@ -0,0 +1,79 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_toolchain/arm_gcc/toolchains.gni")
+import("$dir_pw_toolchain/generate_toolchain.gni")
+
+if (current_toolchain != default_toolchain) {
+ pw_source_set("pico_logging_test_main") {
+ # Required because the pico SDK can't properly propagate -Wno-undef and
+ # -Wno-unused-function because of Pigweed's very unusual default_configs
+ # behavior.
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ deps = [
+ "$PICO_ROOT/src/common/pico_base",
+ "$PICO_ROOT/src/common/pico_stdlib",
+ "$dir_pw_unit_test:logging_event_handler",
+ "$dir_pw_unit_test:pw_unit_test",
+ ]
+ sources = [ "pico_logging_test_main.cc" ]
+ }
+}
+
+generate_toolchain("rp2040") {
+ _excluded_members = [
+ "defaults",
+ "name",
+ ]
+ _toolchain_base = pw_toolchain_arm_gcc.cortex_m0plus_size_optimized
+ forward_variables_from(_toolchain_base, "*", _excluded_members)
+ final_binary_extension = ".elf"
+
+ # For now, no Pigweed configurations set up.
+ defaults = {
+ forward_variables_from(_toolchain_base.defaults, "*")
+
+ pw_build_EXECUTABLE_TARGET_TYPE = "pico_executable"
+ pw_build_EXECUTABLE_TARGET_TYPE_FILE =
+ get_path_info("pico_executable.gni", "abspath")
+ pw_unit_test_MAIN = "$dir_pigweed/targets/rp2040:pico_logging_test_main"
+ pw_assert_BACKEND = dir_pw_assert_basic
+ pw_log_BACKEND = dir_pw_log_basic
+ pw_sys_io_BACKEND = "$dir_pw_sys_io_stdio"
+
+ pw_sync_INTERRUPT_SPIN_LOCK_BACKEND =
+ "$dir_pw_sync_baremetal:interrupt_spin_lock"
+ pw_sync_MUTEX_BACKEND = "$dir_pw_sync_baremetal:mutex"
+
+ # Silence GN variable overwrite warning.
+ pw_build_LINK_DEPS = []
+
+ pw_build_LINK_DEPS = [
+ "$dir_pw_assert:impl",
+ "$dir_pw_log:impl",
+ ]
+
+ current_cpu = "arm"
+ current_os = ""
+ }
+}
+
+pw_doc_group("target_docs") {
+ sources = [ "target_docs.rst" ]
+}
diff --git a/targets/rp2040/pico_executable.gni b/targets/rp2040/pico_executable.gni
new file mode 100644
index 000000000..f8afcf16e
--- /dev/null
+++ b/targets/rp2040/pico_executable.gni
@@ -0,0 +1,25 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+# Executable wrapper that allows the 2nd stage bootloader to strip link deps.
+template("pico_executable") {
+ target("executable", target_name) {
+ forward_variables_from(invoker, "*")
+ if (defined(no_link_deps) && no_link_deps) {
+ public_deps -= [ "$dir_pw_build:link_deps" ]
+ }
+ }
+}
diff --git a/targets/rp2040/pico_logging_test_main.cc b/targets/rp2040/pico_logging_test_main.cc
new file mode 100644
index 000000000..1c5c92c59
--- /dev/null
+++ b/targets/rp2040/pico_logging_test_main.cc
@@ -0,0 +1,24 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pico/stdlib.h"
+#include "pw_unit_test/framework.h"
+#include "pw_unit_test/logging_event_handler.h"
+
+int main() {
+ setup_default_uart();
+ pw::unit_test::LoggingEventHandler handler;
+ pw::unit_test::RegisterEventHandler(&handler);
+ return RUN_ALL_TESTS();
+}
diff --git a/targets/rp2040/target_docs.rst b/targets/rp2040/target_docs.rst
new file mode 100644
index 000000000..0546f724b
--- /dev/null
+++ b/targets/rp2040/target_docs.rst
@@ -0,0 +1,63 @@
+.. _target-raspberry-pi-pico:
+
+-----------------
+Raspberry Pi Pico
+-----------------
+.. warning::
+ This target is in an early state and is under active development. Usability
+ is not very polished, and many features/configuration options that work in
+ upstream Pi Pico CMake build have not yet been ported to the GN build.
+
+-----
+Setup
+-----
+To use this target, Pigweed must be set up to build against the Raspberry Pi
+Pico SDK. This can be downloaded via ``pw package``, and then the build must be
+manually configured to point to the location of the downloaded SDK.
+
+.. code:: sh
+
+ pw package install pico_sdk
+
+ gn args out
+ # Add these lines, replacing ${PW_ROOT} with the path to the location that
+ # Pigweed is checked out at.
+ PICO_SRC_DIR = "${PW_ROOT}/.environment/packages/pico_sdk"
+
+-----
+Usage
+-----
+The Pi Pico is currently configured to output logs and test results over UART
+via GPIO 1 and 2 (TX and RX, respectively) at a baud rate of 115200. Because
+of this, you'll need a USB TTL adapter to communicate with the Pi Pico.
+
+Once the pico SDK is configured, the Pi Pico will build as part of the default
+GN build:
+
+.. code:: sh
+
+ ninja -C out
+
+Pigweed's build will produce ELF files for each unit test built for the Pi Pico.
+While ELF files can be flashed to a Pi Pico via SWD, it's slightly easier to
+use the Pi Pico's bootloader to flash the firmware as a UF2 file.
+
+Pigweed currently does not yet build/provide the elf2uf2 utility used to convert
+ELF files to UF2 files. This tool can be built from within the Pi Pico SDK with
+the following command:
+
+.. code:: sh
+
+ mkdir build && cd build && cmake -G Ninja ../ && ninja
+ # Copy the tool so it's visible in your PATH.
+ cp elf2uf2/elf2uf2 $HOME/bin/elf2uf2
+
+Flashing
+========
+Flashing the Pi Pico is as easy as 1-2-3:
+
+#. Create a UF2 file from an ELF file using ``elf2uf2``.
+#. While holding the button on the Pi Pico, connect the Pico to your computer
+ via the micro USB port.
+#. Copy the UF2 to the RPI-RP2 volume that enumerated when you connected the
+ Pico.
diff --git a/targets/stm32f429i_disc1_stm32cube/BUILD.gn b/targets/stm32f429i_disc1_stm32cube/BUILD.gn
index 0e55048a2..818692857 100644
--- a/targets/stm32f429i_disc1_stm32cube/BUILD.gn
+++ b/targets/stm32f429i_disc1_stm32cube/BUILD.gn
@@ -71,14 +71,15 @@ pw_system_target("stm32f429i_disc1_stm32cube") {
link_deps = [ "$dir_pigweed/targets/stm32f429i_disc1_stm32cube:pre_init" ]
build_args = {
pw_log_BACKEND = dir_pw_log_tokenized
- pw_tokenizer_GLOBAL_HANDLER_WITH_PAYLOAD_BACKEND = "//pw_system:log"
+ pw_tokenizer_GLOBAL_HANDLER_WITH_PAYLOAD_BACKEND =
+ "$dir_pw_system:log_backend.impl"
pw_third_party_freertos_CONFIG = "$dir_pigweed/targets/stm32f429i_disc1_stm32cube:stm32f4xx_freertos_config"
pw_third_party_freertos_PORT = "$dir_pw_third_party/freertos:arm_cm4f"
pw_sys_io_BACKEND = dir_pw_sys_io_stm32cube
dir_pw_third_party_stm32cube = dir_pw_third_party_stm32cube_f4
pw_third_party_stm32cube_PRODUCT = "STM32F429xx"
pw_third_party_stm32cube_CONFIG =
- "//targets/stm32f429i_disc1_stm32cube:stm32f4xx_hal_config"
+ "$dir_pigweed/targets/stm32f429i_disc1_stm32cube:stm32f4xx_hal_config"
pw_third_party_stm32cube_CORE_INIT = ""
pw_boot_cortex_m_LINK_CONFIG_DEFINES = [
"PW_BOOT_FLASH_BEGIN=0x08000200",
diff --git a/targets/stm32f429i_disc1_stm32cube/boot.cc b/targets/stm32f429i_disc1_stm32cube/boot.cc
index 81eae3298..8f00947bf 100644
--- a/targets/stm32f429i_disc1_stm32cube/boot.cc
+++ b/targets/stm32f429i_disc1_stm32cube/boot.cc
@@ -90,21 +90,21 @@ extern "C" void vApplicationStackOverflowHook(TaskHandle_t, char* pcTaskName) {
// Required for configUSE_TIMERS.
extern "C" void vApplicationGetTimerTaskMemory(
- StaticTask_t** ppxIdleTaskTCBBuffer,
- StackType_t** ppxIdleTaskStackBuffer,
- uint32_t* pulIdleTaskStackSize) {
- *ppxIdleTaskTCBBuffer = &freertos_idle_tcb;
- *ppxIdleTaskStackBuffer = freertos_idle_stack.data();
- *pulIdleTaskStackSize = freertos_idle_stack.size();
+ StaticTask_t** ppxTimerTaskTCBBuffer,
+ StackType_t** ppxTimerTaskStackBuffer,
+ uint32_t* pulTimerTaskStackSize) {
+ *ppxTimerTaskTCBBuffer = &freertos_timer_tcb;
+ *ppxTimerTaskStackBuffer = freertos_timer_stack.data();
+ *pulTimerTaskStackSize = freertos_timer_stack.size();
}
extern "C" void vApplicationGetIdleTaskMemory(
StaticTask_t** ppxIdleTaskTCBBuffer,
StackType_t** ppxIdleTaskStackBuffer,
uint32_t* pulIdleTaskStackSize) {
- *ppxIdleTaskTCBBuffer = &freertos_timer_tcb;
- *ppxIdleTaskStackBuffer = freertos_timer_stack.data();
- *pulIdleTaskStackSize = freertos_timer_stack.size();
+ *ppxIdleTaskTCBBuffer = &freertos_idle_tcb;
+ *ppxIdleTaskStackBuffer = freertos_idle_stack.data();
+ *pulIdleTaskStackSize = freertos_idle_stack.size();
}
extern "C" void pw_boot_PreStaticMemoryInit() {}
diff --git a/targets/stm32f429i_disc1_stm32cube/target_docs.rst b/targets/stm32f429i_disc1_stm32cube/target_docs.rst
index 182948387..e7e46225b 100644
--- a/targets/stm32f429i_disc1_stm32cube/target_docs.rst
+++ b/targets/stm32f429i_disc1_stm32cube/target_docs.rst
@@ -3,15 +3,22 @@
---------------------------
stm32f429i-disc1: STM32Cube
---------------------------
+.. warning::
+ This target is in a very preliminary state and is under active development.
+ This demo gives a preview of the direction we are heading with
+ :ref:`pw_system<module-pw_system>`, but it is not yet ready for production
+ use.
+
+
The STMicroelectronics STM32F429I-DISC1 development board is currently Pigweed's
primary target for on-device testing and development. This target configuration
-uses FreeRTOS and the STM32Cube HAL rather than a from-the-ground-up baremetal
-approach.
+uses :ref:`pw_system<module-pw_system>` on top of FreeRTOS and the STM32Cube HAL
+rather than a from-the-ground-up baremetal approach.
-----
Setup
-----
-To use this target, pigweed must be set up to use FreeRTOS and the STM32Cube HAL
+To use this target, Pigweed must be set up to use FreeRTOS and the STM32Cube HAL
for the STM32F4 series. The supported repositories can be downloaded via
``pw package``, and then the build must be manually configured to point to the
locations the repositories were downloaded to.
@@ -45,7 +52,10 @@ via the Pigweed console:
.. code:: sh
- pw-system-console -d /dev/ttyACM0 -b 115200 --proto-globs pw_rpc/echo.proto --token-databases out/stm32f429i_disc1_stm32cube.size_optimized/obj/pw_system/bin/system_example.elf
+ pw-system-console -d /dev/{ttyX} -b 115200 --proto-globs pw_rpc/echo.proto --token-databases out/stm32f429i_disc1_stm32cube.size_optimized/obj/pw_system/bin/system_example.elf
+
+Replace ``{ttyX}`` with the appropriate device on your machine. On Linux this
+may look like ``ttyACM0``, and on a Mac it may look like ``cu.usbmodem***``.
When the console opens, try sending an Echo RPC request. You should get back
the same message you sent to the device.
@@ -54,3 +64,5 @@ the same message you sent to the device.
>>> device.rpcs.pw.rpc.EchoService.Echo(msg="Hello, Pigweed!")
(Status.OK, pw.rpc.EchoMessage(msg='Hello, Pigweed!'))
+
+You are now up and running!
diff --git a/third_party/freertos/BUILD.gn b/third_party/freertos/BUILD.gn
index 1b243da58..48749ccc9 100644
--- a/third_party/freertos/BUILD.gn
+++ b/third_party/freertos/BUILD.gn
@@ -119,6 +119,25 @@ if (dir_pw_third_party_freertos == "") {
]
}
+ # ARM CM7 port of FreeRTOS
+ config("arm_cm7_includes") {
+ include_dirs = [ "$dir_pw_third_party_freertos/portable/GCC/ARM_CM7/r0p1" ]
+ visibility = [ ":arm_cm7" ]
+ }
+
+ pw_source_set("arm_cm7") {
+ public_configs = [
+ ":arm_cm7_includes",
+ ":public_includes",
+ ]
+ public_deps = [ pw_third_party_freertos_CONFIG ]
+ public =
+ [ "$dir_pw_third_party_freertos/portable/GCC/ARM_CM7/r0p1/portmacro.h" ]
+ sources =
+ [ "$dir_pw_third_party_freertos/portable/GCC/ARM_CM7/r0p1/port.c" ]
+ configs = [ ":disable_warnings" ]
+ }
+
# ARM CM4F port of FreeRTOS.
config("arm_cm4f_includes") {
include_dirs = [ "$dir_pw_third_party_freertos/portable/GCC/ARM_CM4F" ]
@@ -136,6 +155,23 @@ if (dir_pw_third_party_freertos == "") {
sources = [ "$dir_pw_third_party_freertos/portable/GCC/ARM_CM4F/port.c" ]
configs = [ ":disable_warnings" ]
}
+
+ # ARM CM3 port of FreeRTOS.
+ config("arm_cm3_includes") {
+ include_dirs = [ "$dir_pw_third_party_freertos/portable/GCC/ARM_CM3" ]
+ visibility = [ ":arm_cm3" ]
+ }
+
+ pw_source_set("arm_cm3") {
+ public_configs = [
+ ":arm_cm3_includes",
+ ":public_includes",
+ ]
+ public_deps = [ pw_third_party_freertos_CONFIG ]
+ public = [ "$dir_pw_third_party_freertos/portable/GCC/ARM_CM3/portmacro.h" ]
+ sources = [ "$dir_pw_third_party_freertos/portable/GCC/ARM_CM3/port.c" ]
+ configs = [ ":disable_warnings" ]
+ }
}
config("public_include_path") {
diff --git a/third_party/freertos/CMakeLists.txt b/third_party/freertos/CMakeLists.txt
index 5f1538a92..3208003dd 100644
--- a/third_party/freertos/CMakeLists.txt
+++ b/third_party/freertos/CMakeLists.txt
@@ -109,6 +109,21 @@ pw_add_module_library(pw_third_party.freertos.freertos_tasks
${disable_tasks_statics}
)
+# ARM CM7 port of FreeRTOS.
+pw_add_module_library(pw_third_party.freertos.arm_cm7
+ HEADERS
+ ${dir_pw_third_party_freertos}/portable/GCC/ARM_CM7/r0p1/portmacro.h
+ PUBLIC_DEPS
+ ${pw_third_party_freertos_CONFIG}
+ PUBLIC_INCLUDES
+ ${dir_pw_third_party_freertos}/include
+ ${dir_pw_third_party_freertos}/portable/GCC/ARM_CM7/r0p1
+ SOURCES
+ ${dir_pw_third_party_freertos}/portable/GCC/ARM_CM7/r0p1/port.c
+ PRIVATE_DEPS
+ pw_third_party.freertos.disable_warnings
+)
+
# ARM CM4F port of FreeRTOS.
pw_add_module_library(pw_third_party.freertos.arm_cm4f
HEADERS
diff --git a/third_party/pico_sdk/gn/BUILD.gn b/third_party/pico_sdk/gn/BUILD.gn
new file mode 100644
index 000000000..f411b8033
--- /dev/null
+++ b/third_party/pico_sdk/gn/BUILD.gn
@@ -0,0 +1,27 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# These warnings need to be disabled when using strict warnings.
+#
+# TODO(amontanez): Just applying these flags to Pi Pico source sets does not
+# work because of Pigweed's default_configs notion and how it orders flags.
+# Removing Pigweed's strict warnings config is the only working solution for
+# now.
+config("disable_warnings") {
+ cflags = [
+ "-Wno-undef",
+ "-Wno-unused-function",
+ ]
+ asmflags = cflags
+}
diff --git a/third_party/pico_sdk/gn/generate_config_header.gni b/third_party/pico_sdk/gn/generate_config_header.gni
new file mode 100644
index 000000000..0cbc4e0e1
--- /dev/null
+++ b/third_party/pico_sdk/gn/generate_config_header.gni
@@ -0,0 +1,66 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# Generates a pico/config_autogen.h file as part of a source set that provides
+# the required include directory as a public config.
+#
+# Example contents:
+#
+# // AUTO GENERATED BY A GN generate_config_header TARGET
+# #include "boards/pico.h"
+# #include "cmsis/rename_exceptions.h"
+#
+# Arguments:
+# config_header_files (required): Includes that should be written to the
+# generated header file.
+template("generate_config_header") {
+ assert(defined(invoker.config_header_files), "No headers provided")
+
+ _generated_header_dir = "${target_gen_dir}/${target_name}_include"
+ _generated_header_path = "${_generated_header_dir}/pico/config_autogen.h"
+
+ # Provide the include path so the header is exposed when targets depend on
+ # the generate_config_header target.
+ config("${target_name}.public_include_dirs") {
+ include_dirs = [ "${_generated_header_dir}" ]
+ }
+
+ # Actually generate config_autogen.h.
+ generated_file("${target_name}.generated_header") {
+ outputs = [ "${_generated_header_path}" ]
+ _lines = [ "// AUTO GENERATED BY A GN generate_config_header TARGET" ]
+ foreach(_header, invoker.config_header_files) {
+ _lines += [ "#include \"${_header}\"" ]
+ }
+
+ # Join with newline.
+ _NEWLINE_CHAR = "$0x0A"
+ contents = string_join(_NEWLINE_CHAR, _lines)
+ }
+
+ # This source set bundles up the generated header such that depending on
+ # this template will allow targets to include "pico/config_autogen.h".
+ pw_source_set("${target_name}") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":${target_name}.public_include_dirs" ]
+ deps = [ ":${target_name}.generated_header" ]
+ public = [ "${_generated_header_path}" ]
+ forward_variables_from(invoker, "*", [ "config_header_files" ])
+ }
+}
diff --git a/third_party/pico_sdk/pi_pico.gni b/third_party/pico_sdk/pi_pico.gni
new file mode 100644
index 000000000..dde153fb8
--- /dev/null
+++ b/third_party/pico_sdk/pi_pico.gni
@@ -0,0 +1,34 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+declare_args() {
+ # PIGWEED ONLY: Since Pigweed doesn't host 3p code, this points to the actual
+ # location of the Pi Pico source. If the GN build is ever upstreamed, this
+ # variable would not be needed.
+ PICO_SRC_DIR = ""
+}
+
+# Actual Pi Pico build configuration options.
+declare_args() {
+ PICO_BARE_METAL = false
+ PICO_BOARD = "\"rp2040\""
+ PICO_BOARD_HEADER_DIR = get_path_info("src/boards", "abspath")
+
+ # TODO(amontanez): This needs to be thought through fully.
+ PICO_GENERATED_CONFIG = get_path_info("src/rp2040:rp2040_config", "abspath")
+
+ # TODO(amontanez): This needs to be thought through fully, but can wait until
+ # a Pi Pico successor that requires it.
+ PICO_PLATFORM_DIR = get_path_info("src/rp2040", "abspath")
+}
diff --git a/third_party/pico_sdk/src/BUILD.gn b/third_party/pico_sdk/src/BUILD.gn
new file mode 100644
index 000000000..62b395275
--- /dev/null
+++ b/third_party/pico_sdk/src/BUILD.gn
@@ -0,0 +1,23 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# TODO(amontanez): If a successor to the RP2040 comes out, this might need to
+# be a little smarter about what code is pulled in.
+group("pico_sdk") {
+ public_deps = [
+ "common",
+ "rp2040",
+ "rp2_common",
+ ]
+}
diff --git a/third_party/pico_sdk/src/boards/BUILD.gn b/third_party/pico_sdk/src/boards/BUILD.gn
new file mode 100644
index 000000000..d92764e2b
--- /dev/null
+++ b/third_party/pico_sdk/src/boards/BUILD.gn
@@ -0,0 +1,58 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/boards"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("boards") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public = [
+ "${_CWD}/include/boards/adafruit_feather_rp2040.h",
+ "${_CWD}/include/boards/adafruit_itsybitsy_rp2040.h",
+ "${_CWD}/include/boards/adafruit_qtpy_rp2040.h",
+ "${_CWD}/include/boards/adafruit_trinkey_qt2040.h",
+ "${_CWD}/include/boards/arduino_nano_rp2040_connect.h",
+ "${_CWD}/include/boards/melopero_shake_rp2040.h",
+ "${_CWD}/include/boards/none.h",
+ "${_CWD}/include/boards/pico.h",
+ "${_CWD}/include/boards/pimoroni_interstate75.h",
+ "${_CWD}/include/boards/pimoroni_keybow2040.h",
+ "${_CWD}/include/boards/pimoroni_pga2040.h",
+ "${_CWD}/include/boards/pimoroni_picolipo_16mb.h",
+ "${_CWD}/include/boards/pimoroni_picolipo_4mb.h",
+ "${_CWD}/include/boards/pimoroni_picosystem.h",
+ "${_CWD}/include/boards/pimoroni_plasma2040.h",
+ "${_CWD}/include/boards/pimoroni_tiny2040.h",
+ "${_CWD}/include/boards/pybstick26_rp2040.h",
+ "${_CWD}/include/boards/sparkfun_micromod.h",
+ "${_CWD}/include/boards/sparkfun_promicro.h",
+ "${_CWD}/include/boards/sparkfun_thingplus.h",
+ "${_CWD}/include/boards/vgaboard.h",
+ "${_CWD}/include/boards/waveshare_rp2040_lcd_0.96.h",
+ "${_CWD}/include/boards/waveshare_rp2040_plus_16mb.h",
+ "${_CWD}/include/boards/waveshare_rp2040_plus_4mb.h",
+ "${_CWD}/include/boards/waveshare_rp2040_zero.h",
+ ]
+}
diff --git a/third_party/pico_sdk/src/common/BUILD.gn b/third_party/pico_sdk/src/common/BUILD.gn
new file mode 100644
index 000000000..3dda4de96
--- /dev/null
+++ b/third_party/pico_sdk/src/common/BUILD.gn
@@ -0,0 +1,34 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+group("common") {
+ public_deps = [
+ "boot_picoboot",
+ "boot_uf2",
+ "pico_base",
+ "pico_usb_reset_interface",
+ ]
+
+ if (!PICO_BARE_METAL) {
+ public_deps += [
+ "pico_binary_info",
+ "pico_bit_ops",
+ "pico_divider",
+ "pico_stdlib",
+ "pico_sync",
+ "pico_time",
+ "pico_util",
+ ]
+ }
+}
diff --git a/third_party/pico_sdk/src/common/boot_picoboot/BUILD.gn b/third_party/pico_sdk/src/common/boot_picoboot/BUILD.gn
new file mode 100644
index 000000000..2e141d4f2
--- /dev/null
+++ b/third_party/pico_sdk/src/common/boot_picoboot/BUILD.gn
@@ -0,0 +1,35 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/boot_picoboot"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("boot_picoboot") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+
+ # Optionally requires a dep on "pico/platform.h"
+
+ public = [ "${_CWD}/include/boot/picoboot.h" ]
+}
diff --git a/third_party/pico_sdk/src/common/boot_uf2/BUILD.gn b/third_party/pico_sdk/src/common/boot_uf2/BUILD.gn
new file mode 100644
index 000000000..2e6468b90
--- /dev/null
+++ b/third_party/pico_sdk/src/common/boot_uf2/BUILD.gn
@@ -0,0 +1,32 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/boot_uf2"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("boot_uf2") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public = [ "${_CWD}/include/boot/uf2.h" ]
+}
diff --git a/third_party/pico_sdk/src/common/pico_base/BUILD.gn b/third_party/pico_sdk/src/common/pico_base/BUILD.gn
new file mode 100644
index 000000000..73e176dc0
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_base/BUILD.gn
@@ -0,0 +1,66 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/pico_base"
+
+import("generate_version_header.gni")
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+config("board_define") {
+ defines = [
+ "PICO_BOARD=${PICO_BOARD}",
+ "PICO_ON_DEVICE=1",
+ "PICO_NO_HARDWARE=0",
+ "PICO_BUILD=1",
+ ]
+}
+
+generate_version_header("version") {
+ version_major = PICO_SDK_VERSION_MAJOR
+ version_minor = PICO_SDK_VERSION_MINOR
+ version_revision = PICO_SDK_VERSION_REVISION
+ version_string = PICO_SDK_VERSION_STRING
+}
+
+pw_source_set("pico_base") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [
+ ":board_define",
+ ":public_include_dirs",
+ ]
+ public = [
+ "${_CWD}/include/pico.h",
+ "${_CWD}/include/pico/assert.h",
+ "${_CWD}/include/pico/config.h",
+ "${_CWD}/include/pico/error.h",
+ "${_CWD}/include/pico/types.h",
+ ]
+ public_deps = [
+ ":version",
+ "${PICO_GENERATED_CONFIG}",
+ "${PICO_ROOT}/src/rp2_common/pico_platform:headers",
+ ]
+ allow_circular_includes_from =
+ [ "${PICO_ROOT}/src/rp2_common/pico_platform:headers" ]
+}
diff --git a/third_party/pico_sdk/src/common/pico_base/generate_version_header.gni b/third_party/pico_sdk/src/common/pico_base/generate_version_header.gni
new file mode 100644
index 000000000..189bf7e8f
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_base/generate_version_header.gni
@@ -0,0 +1,80 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+declare_args() {
+ PICO_SDK_VERSION_MAJOR = 1
+ PICO_SDK_VERSION_MINOR = 3
+ PICO_SDK_VERSION_REVISION = 0
+}
+
+# GN-ism: To reference earlier args, this needs to be in a separate block.
+declare_args() {
+ PICO_SDK_VERSION_STRING = "${PICO_SDK_VERSION_MAJOR}.${PICO_SDK_VERSION_MINOR}.${PICO_SDK_VERSION_REVISION}"
+}
+
+template("generate_version_header") {
+ assert(defined(invoker.version_major))
+ assert(defined(invoker.version_minor))
+ assert(defined(invoker.version_revision))
+ assert(defined(invoker.version_string))
+
+ _generated_header_dir = "${target_gen_dir}/${target_name}_include"
+ _generated_header_path = "${_generated_header_dir}/pico/version.h"
+
+ config("${target_name}.public_include_dirs") {
+ include_dirs = [ "${_generated_header_dir}" ]
+ }
+
+ generated_file("${target_name}.generated_header") {
+ outputs = [ "${_generated_header_path}" ]
+ _lines = [
+ "// ---------------------------------------",
+ "// THIS FILE IS AUTOGENERATED; DO NOT EDIT",
+ "// ---------------------------------------",
+ "",
+ "#ifndef _PICO_VERSION_H",
+ "#define _PICO_VERSION_H",
+ "",
+ "#define PICO_SDK_VERSION_MAJOR ${invoker.version_major}",
+ "#define PICO_SDK_VERSION_MINOR ${invoker.version_minor}",
+ "#define PICO_SDK_VERSION_REVISION ${invoker.version_revision}",
+ "#define PICO_SDK_VERSION_STRING \"${invoker.version_string}\"",
+ "",
+ "#endif",
+ ]
+
+ # Join with newline.
+ _NEWLINE_CHAR = "$0x0A"
+ contents = string_join(_NEWLINE_CHAR, _lines)
+ }
+
+ pw_source_set("${target_name}") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":${target_name}.public_include_dirs" ]
+ deps = [ ":${target_name}.generated_header" ]
+ public = [ "${_generated_header_path}" ]
+ forward_variables_from(invoker,
+ "*",
+ [
+ "version_major",
+ "version_minor",
+ "version_revision",
+ "version_string",
+ ])
+ }
+}
diff --git a/third_party/pico_sdk/src/common/pico_binary_info/BUILD.gn b/third_party/pico_sdk/src/common/pico_binary_info/BUILD.gn
new file mode 100644
index 000000000..86fbbb510
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_binary_info/BUILD.gn
@@ -0,0 +1,38 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/pico_binary_info"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_binary_info") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [ "${PICO_ROOT}/src/common/pico_base" ]
+ public = [
+ "${_CWD}/include/pico/binary_info.h",
+ "${_CWD}/include/pico/binary_info/code.h",
+ "${_CWD}/include/pico/binary_info/defs.h",
+ "${_CWD}/include/pico/binary_info/structure.h",
+ ]
+}
diff --git a/third_party/pico_sdk/src/common/pico_bit_ops/BUILD.gn b/third_party/pico_sdk/src/common/pico_bit_ops/BUILD.gn
new file mode 100644
index 000000000..7a55fb15b
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_bit_ops/BUILD.gn
@@ -0,0 +1,33 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/pico_bit_ops"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_bit_ops") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [ "${PICO_ROOT}/src/common/pico_base" ]
+ public = [ "${_CWD}/include/pico/bit_ops.h" ]
+}
diff --git a/third_party/pico_sdk/src/common/pico_divider/BUILD.gn b/third_party/pico_sdk/src/common/pico_divider/BUILD.gn
new file mode 100644
index 000000000..dea76d4a4
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_divider/BUILD.gn
@@ -0,0 +1,36 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/pico_divider"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_divider") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2_common/hardware_divider",
+ ]
+ public = [ "${_CWD}/include/pico/divider.h" ]
+}
diff --git a/third_party/pico_sdk/src/common/pico_stdlib/BUILD.gn b/third_party/pico_sdk/src/common/pico_stdlib/BUILD.gn
new file mode 100644
index 000000000..721254d83
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_stdlib/BUILD.gn
@@ -0,0 +1,59 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/pico_stdlib"
+
+import("pico_stdio.gni")
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+ defines = [ "${PICO_STDIO}=1" ]
+}
+
+pw_source_set("headers") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/common/pico_time",
+ "${PICO_ROOT}/src/rp2_common/hardware_gpio",
+ "${PICO_ROOT}/src/rp2_common/hardware_uart",
+ "${PICO_ROOT}/src/rp2_common/pico_stdio",
+ ]
+
+ if (PICO_STDIO == ENUM_LIB_PICO_STDIO.UART) {
+ public_deps += [ "${PICO_ROOT}/src/rp2_common/pico_stdio_uart" ]
+ } else if (PICO_STDIO == ENUM_LIB_PICO_STDIO.USB) {
+ public_deps += [ "${PICO_ROOT}/src/rp2_common/pico_stdio_usb" ]
+ } else if (PICO_STDIO == ENUM_LIB_PICO_STDIO.SEMIHOSTING) {
+ public_deps += [ "${PICO_ROOT}/src/rp2_common/pico_stdio_semihosting" ]
+ }
+
+ public = [ "include/pico/stdlib.h" ]
+}
+
+pw_source_set("pico_stdlib") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_deps = [ ":headers" ]
+
+ # Ensure the pico stdlib implementation is linked in.
+ deps = [ "${PICO_ROOT}/src/rp2_common/pico_stdlib" ]
+}
diff --git a/third_party/pico_sdk/src/common/pico_stdlib/pico_stdio.gni b/third_party/pico_sdk/src/common/pico_stdlib/pico_stdio.gni
new file mode 100644
index 000000000..8d1f27b03
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_stdlib/pico_stdio.gni
@@ -0,0 +1,24 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+ENUM_LIB_PICO_STDIO = {
+ UART = "LIB_PICO_STDIO_UART"
+ USB = "LIB_PICO_STDIO_USB"
+ SEMIHOSTING = "LIB_PICO_STDIO_SEMIHOSTING"
+}
+
+# TODO(amontanez): This looks like a facade. Rethink?
+declare_args() {
+ PICO_STDIO = ENUM_LIB_PICO_STDIO.UART
+}
diff --git a/third_party/pico_sdk/src/common/pico_sync/BUILD.gn b/third_party/pico_sdk/src/common/pico_sync/BUILD.gn
new file mode 100644
index 000000000..4fe1f46fa
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_sync/BUILD.gn
@@ -0,0 +1,49 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/pico_sync"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_sync") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/common/pico_time:headers",
+ "${PICO_ROOT}/src/rp2_common/hardware_sync",
+ ]
+ public = [
+ "${_CWD}/include/pico/critical_section.h",
+ "${_CWD}/include/pico/lock_core.h",
+ "${_CWD}/include/pico/mutex.h",
+ "${_CWD}/include/pico/sem.h",
+ "${_CWD}/include/pico/sync.h",
+ ]
+ sources = [
+ "${_CWD}/critical_section.c",
+ "${_CWD}/lock_core.c",
+ "${_CWD}/mutex.c",
+ "${_CWD}/sem.c",
+ ]
+}
diff --git a/third_party/pico_sdk/src/common/pico_time/BUILD.gn b/third_party/pico_sdk/src/common/pico_time/BUILD.gn
new file mode 100644
index 000000000..ca0dff6b9
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_time/BUILD.gn
@@ -0,0 +1,49 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/pico_time"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("headers") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2_common/hardware_timer",
+ ]
+ public = [
+ "${_CWD}/include/pico/time.h",
+ "${_CWD}/include/pico/timeout_helper.h",
+ ]
+}
+
+pw_source_set("pico_time") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_deps = [ ":headers" ]
+ deps = [ "${PICO_ROOT}/src/common/pico_util" ]
+ sources = [
+ "${_CWD}/time.c",
+ "${_CWD}/timeout_helper.c",
+ ]
+}
diff --git a/third_party/pico_sdk/src/common/pico_usb_reset_interface/BUILD.gn b/third_party/pico_sdk/src/common/pico_usb_reset_interface/BUILD.gn
new file mode 100644
index 000000000..2c3abd89b
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_usb_reset_interface/BUILD.gn
@@ -0,0 +1,32 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/pico_usb_reset_interface"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_usb_reset_interface") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public = [ "${_CWD}/include/pico/usb_reset_interface.h" ]
+}
diff --git a/third_party/pico_sdk/src/common/pico_util/BUILD.gn b/third_party/pico_sdk/src/common/pico_util/BUILD.gn
new file mode 100644
index 000000000..eed47bde9
--- /dev/null
+++ b/third_party/pico_sdk/src/common/pico_util/BUILD.gn
@@ -0,0 +1,47 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/common/pico_util"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_util") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/common/pico_sync",
+ "${PICO_ROOT}/src/rp2_common/hardware_sync",
+ ]
+ public = [
+ "${_CWD}/include/pico/util/datetime.h",
+ "${_CWD}/include/pico/util/pheap.h",
+ "${_CWD}/include/pico/util/queue.h",
+ ]
+ sources = [
+ "${_CWD}/datetime.c",
+ "${_CWD}/doc.h",
+ "${_CWD}/pheap.c",
+ "${_CWD}/queue.c",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2040/BUILD.gn b/third_party/pico_sdk/src/rp2040/BUILD.gn
new file mode 100644
index 000000000..c33009740
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2040/BUILD.gn
@@ -0,0 +1,37 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+import("${PICO_ROOT}/gn/generate_config_header.gni")
+group("rp2040") {
+ public_deps = [
+ "hardware_regs",
+ "hardware_structs",
+ ]
+}
+
+generate_config_header("rp2040_config") {
+ public_deps = [
+ "${PICO_ROOT}/src/boards",
+ "${PICO_ROOT}/src/rp2_common/cmsis:rename_exceptions",
+ ]
+ config_header_files = [
+ "boards/pico.h",
+ "cmsis/rename_exceptions.h",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2040/hardware_regs/BUILD.gn b/third_party/pico_sdk/src/rp2040/hardware_regs/BUILD.gn
new file mode 100644
index 000000000..739d4bb38
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2040/hardware_regs/BUILD.gn
@@ -0,0 +1,77 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2040/hardware_regs"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("platform_defs") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public = [ "${_CWD}/include/hardware/platform_defs.h" ]
+}
+
+pw_source_set("hardware_regs") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ ":platform_defs",
+ "${PICO_ROOT}/src/rp2_common/pico_platform:headers",
+ ]
+ public = [
+ "${_CWD}/include/hardware/regs/adc.h",
+ "${_CWD}/include/hardware/regs/addressmap.h",
+ "${_CWD}/include/hardware/regs/busctrl.h",
+ "${_CWD}/include/hardware/regs/clocks.h",
+ "${_CWD}/include/hardware/regs/dma.h",
+ "${_CWD}/include/hardware/regs/dreq.h",
+ "${_CWD}/include/hardware/regs/i2c.h",
+ "${_CWD}/include/hardware/regs/intctrl.h",
+ "${_CWD}/include/hardware/regs/io_bank0.h",
+ "${_CWD}/include/hardware/regs/io_qspi.h",
+ "${_CWD}/include/hardware/regs/m0plus.h",
+ "${_CWD}/include/hardware/regs/pads_bank0.h",
+ "${_CWD}/include/hardware/regs/pads_qspi.h",
+ "${_CWD}/include/hardware/regs/pio.h",
+ "${_CWD}/include/hardware/regs/pll.h",
+ "${_CWD}/include/hardware/regs/psm.h",
+ "${_CWD}/include/hardware/regs/pwm.h",
+ "${_CWD}/include/hardware/regs/resets.h",
+ "${_CWD}/include/hardware/regs/rosc.h",
+ "${_CWD}/include/hardware/regs/rtc.h",
+ "${_CWD}/include/hardware/regs/sio.h",
+ "${_CWD}/include/hardware/regs/spi.h",
+ "${_CWD}/include/hardware/regs/ssi.h",
+ "${_CWD}/include/hardware/regs/syscfg.h",
+ "${_CWD}/include/hardware/regs/sysinfo.h",
+ "${_CWD}/include/hardware/regs/tbman.h",
+ "${_CWD}/include/hardware/regs/timer.h",
+ "${_CWD}/include/hardware/regs/uart.h",
+ "${_CWD}/include/hardware/regs/usb.h",
+ "${_CWD}/include/hardware/regs/usb_device_dpram.h",
+ "${_CWD}/include/hardware/regs/vreg_and_chip_reset.h",
+ "${_CWD}/include/hardware/regs/watchdog.h",
+ "${_CWD}/include/hardware/regs/xip.h",
+ "${_CWD}/include/hardware/regs/xosc.h",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2040/hardware_structs/BUILD.gn b/third_party/pico_sdk/src/rp2040/hardware_structs/BUILD.gn
new file mode 100644
index 000000000..b472f5e2c
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2040/hardware_structs/BUILD.gn
@@ -0,0 +1,69 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2040/hardware_structs"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_structs") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2040/hardware_regs:platform_defs",
+ "${PICO_ROOT}/src/rp2_common/hardware_base",
+ ]
+ public = [
+ "${_CWD}/include/hardware/structs/adc.h",
+ "${_CWD}/include/hardware/structs/bus_ctrl.h",
+ "${_CWD}/include/hardware/structs/clocks.h",
+ "${_CWD}/include/hardware/structs/dma.h",
+ "${_CWD}/include/hardware/structs/i2c.h",
+ "${_CWD}/include/hardware/structs/interp.h",
+ "${_CWD}/include/hardware/structs/iobank0.h",
+ "${_CWD}/include/hardware/structs/ioqspi.h",
+ "${_CWD}/include/hardware/structs/mpu.h",
+ "${_CWD}/include/hardware/structs/pads_qspi.h",
+ "${_CWD}/include/hardware/structs/padsbank0.h",
+ "${_CWD}/include/hardware/structs/pio.h",
+ "${_CWD}/include/hardware/structs/pll.h",
+ "${_CWD}/include/hardware/structs/psm.h",
+ "${_CWD}/include/hardware/structs/pwm.h",
+ "${_CWD}/include/hardware/structs/resets.h",
+ "${_CWD}/include/hardware/structs/rosc.h",
+ "${_CWD}/include/hardware/structs/rtc.h",
+ "${_CWD}/include/hardware/structs/scb.h",
+ "${_CWD}/include/hardware/structs/sio.h",
+ "${_CWD}/include/hardware/structs/spi.h",
+ "${_CWD}/include/hardware/structs/ssi.h",
+ "${_CWD}/include/hardware/structs/syscfg.h",
+ "${_CWD}/include/hardware/structs/systick.h",
+ "${_CWD}/include/hardware/structs/timer.h",
+ "${_CWD}/include/hardware/structs/uart.h",
+ "${_CWD}/include/hardware/structs/usb.h",
+ "${_CWD}/include/hardware/structs/vreg_and_chip_reset.h",
+ "${_CWD}/include/hardware/structs/watchdog.h",
+ "${_CWD}/include/hardware/structs/xip_ctrl.h",
+ "${_CWD}/include/hardware/structs/xosc.h",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/BUILD.gn b/third_party/pico_sdk/src/rp2_common/BUILD.gn
new file mode 100644
index 000000000..494af4a4e
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/BUILD.gn
@@ -0,0 +1,73 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+group("rp2_common") {
+ public_deps = [
+ "hardware_adc",
+ "hardware_base",
+ "hardware_claim",
+ "hardware_clocks",
+ "hardware_divider",
+ "hardware_dma",
+ "hardware_exception",
+ "hardware_flash",
+ "hardware_gpio",
+ "hardware_i2c",
+ "hardware_interp",
+ "hardware_irq",
+ "hardware_pio",
+ "hardware_pll",
+ "hardware_pwm",
+ "hardware_resets",
+ "hardware_rtc",
+ "hardware_spi",
+ "hardware_sync",
+ "hardware_timer",
+ "hardware_uart",
+ "hardware_vreg",
+ "hardware_watchdog",
+ "hardware_xosc",
+ "pico_bootrom",
+ "pico_platform",
+ ]
+
+ if (!PICO_BARE_METAL) {
+ public_deps += [
+ "boot_stage2",
+ "cmsis",
+ "pico_bit_ops",
+ "pico_bootsel_via_double_reset",
+ "pico_divider",
+ "pico_double",
+ "pico_fix",
+ "pico_float",
+ "pico_int64_ops",
+ "pico_malloc",
+ "pico_mem_ops",
+ "pico_multicore",
+ "pico_printf",
+ "pico_runtime",
+ "pico_standard_link",
+ "pico_stdio",
+ "pico_stdio_semihosting",
+ "pico_stdio_uart",
+ "pico_stdio_usb",
+ "pico_stdlib",
+ "pico_unique_id",
+ "tinyusb",
+ ]
+ # Not a real library:
+ # pico_cxx_options
+ }
+}
diff --git a/third_party/pico_sdk/src/rp2_common/boot_stage2/BUILD.gn b/third_party/pico_sdk/src/rp2_common/boot_stage2/BUILD.gn
new file mode 100644
index 000000000..d23802a89
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/boot_stage2/BUILD.gn
@@ -0,0 +1,114 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/exec.gni")
+import("$dir_pw_build/python_action.gni")
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/boot_stage2"
+
+config("public_include_dirs") {
+ include_dirs = [
+ "${_CWD}/include",
+ "${_CWD}/asminclude",
+ ]
+}
+
+pw_executable("boot_stage2_elf") {
+ _linker_script_path = rebase_path("${_CWD}/boot_stage2.ld", root_build_dir)
+
+ # Compile as position-independent.
+ cflags = [ "-fPIC" ]
+ asmflags = cflags
+
+ ldflags = cflags
+ ldflags += [
+ "-T${_linker_script_path}",
+ "-nostartfiles",
+
+ # Unfortunately, this is not properly applied to compiler flags thanks to
+ # `default_configs`.
+ "-Wl,--no-gc-sections",
+ ]
+
+ public_configs = [ ":public_include_dirs" ]
+
+ # The upstream boot_stage2.ld doesn't specify the binary entry point or
+ # mark the required sections as KEEP(), so they're optimized out with
+ # Pigweed's aggressive default optimizations.
+ remove_configs = [
+ "$dir_pw_build:reduced_size",
+ "$dir_pw_build:strict_warnings",
+ ]
+ no_link_deps = true
+
+ public = [ "${_CWD}/include/boot_stage2/config.h" ]
+
+ deps = [ "${PICO_ROOT}/src/common/pico_base" ]
+
+ # The correct assembly file is pulled in by compile_time_choice.S.
+ inputs = [
+ "${_CWD}/boot_stage2.ld",
+ "${_CWD}/boot2_at25sf128a.S",
+ "${_CWD}/boot2_generic_03h.S",
+ "${_CWD}/boot2_is25lp080.S",
+ "${_CWD}/boot2_usb_blinky.S",
+ "${_CWD}/boot2_w25q080.S",
+ "${_CWD}/boot2_w25x10cl.S",
+ ]
+ sources = [ "${_CWD}/compile_time_choice.S" ]
+}
+
+pw_exec("boot_stage2_bin") {
+ _out_bin = "${target_out_dir}/boot_stage2.bin"
+ program = "arm-none-eabi-objcopy"
+ args = [
+ "-Obinary",
+ "<TARGET_FILE(:boot_stage2_elf)>",
+ rebase_path(_out_bin, root_build_dir),
+ ]
+ outputs = [ _out_bin ]
+ deps = [ ":boot_stage2_elf" ]
+}
+
+pw_python_action("boot_stage2_padded") {
+ _src_bin = get_target_outputs(":boot_stage2_bin")
+ _out_asm = "${target_out_dir}/boot_stage2.S"
+ script = "${_CWD}/pad_checksum"
+ args = [
+ "-s",
+ "0xffffffff",
+ rebase_path(_src_bin[0], root_build_dir),
+ rebase_path(_out_asm, root_build_dir),
+ ]
+ outputs = [ _out_asm ]
+ deps = [ ":boot_stage2_bin" ]
+}
+
+pw_source_set("boot_stage2_asm") {
+ deps = [ ":boot_stage2_padded" ]
+ sources = get_target_outputs(":boot_stage2_padded")
+}
+
+group("boot_stage2") {
+ public_deps = [
+ ":boot_stage2_asm",
+ ":boot_stage2_elf",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/cmsis/BUILD.gn b/third_party/pico_sdk/src/rp2_common/cmsis/BUILD.gn
new file mode 100644
index 000000000..94c8f41fa
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/cmsis/BUILD.gn
@@ -0,0 +1,59 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/cmsis"
+
+config("public_include_dirs") {
+ include_dirs = [
+ "${_CWD}/include",
+ "${_CWD}/stub/CMSIS/Core/Include",
+ "${_CWD}/stub/CMSIS/Device/RaspberryPi/RP2040/Include",
+ ]
+}
+
+pw_source_set("rename_exceptions") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public = [ "include/cmsis/rename_exceptions.h" ]
+}
+
+# TODO(amontanez): The CMSIS stub should probably be more configurable to match
+# CMake.
+pw_source_set("cmsis") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public = [
+ "${_CWD}/stub/CMSIS/Core/Include/cmsis_armcc.h",
+ "${_CWD}/stub/CMSIS/Core/Include/cmsis_armclang.h",
+ "${_CWD}/stub/CMSIS/Core/Include/cmsis_armclang_ltm.h",
+ "${_CWD}/stub/CMSIS/Core/Include/cmsis_compiler.h",
+ "${_CWD}/stub/CMSIS/Core/Include/cmsis_gcc.h",
+ "${_CWD}/stub/CMSIS/Core/Include/cmsis_iccarm.h",
+ "${_CWD}/stub/CMSIS/Core/Include/cmsis_version.h",
+ "${_CWD}/stub/CMSIS/Core/Include/core_cm0plus.h",
+ "${_CWD}/stub/CMSIS/Core/Include/mpu_armv7.h",
+ "${_CWD}/stub/CMSIS/Device/RaspberryPi/RP2040/Include/RP2040.h",
+ "${_CWD}/stub/CMSIS/Device/RaspberryPi/RP2040/Include/system_RP2040.h",
+ ]
+ deps = [ "${PICO_ROOT}/src/rp2_common/hardware_clocks" ]
+ sources =
+ [ "${_CWD}/stub/CMSIS/Device/RaspberryPi/RP2040/Source/system_RP2040.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_adc/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_adc/BUILD.gn
new file mode 100644
index 000000000..7a9364651
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_adc/BUILD.gn
@@ -0,0 +1,39 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_adc"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_adc") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ "${PICO_ROOT}/src/rp2_common/hardware_gpio",
+ ]
+ deps = [ "${PICO_ROOT}/src/rp2_common/hardware_resets" ]
+ public = [ "${_CWD}/include/hardware/adc.h" ]
+ sources = [ "${_CWD}/adc.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_base/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_base/BUILD.gn
new file mode 100644
index 000000000..00909d44c
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_base/BUILD.gn
@@ -0,0 +1,36 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_base"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_base") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ ]
+ public = [ "${_CWD}/include/hardware/address_mapped.h" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_claim/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_claim/BUILD.gn
new file mode 100644
index 000000000..9ced82c0d
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_claim/BUILD.gn
@@ -0,0 +1,41 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_claim"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_claim") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2_common/hardware_sync",
+ ]
+
+ # hardware_claim and hardware_sync circularly depend on each other.
+ allow_circular_includes_from = [ "${PICO_ROOT}/src/rp2_common/hardware_sync" ]
+
+ public = [ "${_CWD}/include/hardware/claim.h" ]
+ sources = [ "${_CWD}/claim.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_clocks/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_clocks/BUILD.gn
new file mode 100644
index 000000000..dd4eb4791
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_clocks/BUILD.gn
@@ -0,0 +1,54 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_clocks"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_clocks") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public = [ "${_CWD}/include/hardware/clocks.h" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2040/hardware_regs:platform_defs",
+ "${PICO_ROOT}/src/rp2_common/hardware_gpio",
+ "${PICO_ROOT}/src/rp2_common/hardware_irq",
+ "${PICO_ROOT}/src/rp2_common/hardware_pll",
+ "${PICO_ROOT}/src/rp2_common/hardware_watchdog",
+ "${PICO_ROOT}/src/rp2_common/hardware_xosc",
+ ]
+
+ # hardware_pll and hardware_clocks circularly depend on each other.
+ # hardware_xosc and hardware_clocks circularly depend on each other.
+ allow_circular_includes_from = [
+ "${PICO_ROOT}/src/rp2_common/hardware_pll",
+ "${PICO_ROOT}/src/rp2_common/hardware_xosc",
+ ]
+
+ sources = [ "${_CWD}/clocks.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_divider/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_divider/BUILD.gn
new file mode 100644
index 000000000..25e1a5fca
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_divider/BUILD.gn
@@ -0,0 +1,44 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_divider"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_divider") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2_common/pico_platform",
+ ]
+ public = [
+ "${_CWD}/include/hardware/divider.h",
+ "${_CWD}/include/hardware/divider_helper.S",
+ ]
+ sources = [ "${_CWD}/divider.S" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_dma/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_dma/BUILD.gn
new file mode 100644
index 000000000..33ca25c33
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_dma/BUILD.gn
@@ -0,0 +1,39 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_dma"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_dma") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [ "${PICO_ROOT}/src/rp2_common/hardware_claim" ]
+ public = [ "${_CWD}/include/hardware/dma.h" ]
+ sources = [ "${_CWD}/dma.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_exception/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_exception/BUILD.gn
new file mode 100644
index 000000000..cb8e2dee1
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_exception/BUILD.gn
@@ -0,0 +1,43 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_exception"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_exception") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2_common/hardware_base",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/common/pico_sync",
+ "${PICO_ROOT}/src/rp2040/hardware_regs:platform_defs",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ public = [ "${_CWD}/include/hardware/exception.h" ]
+ sources = [ "${_CWD}/exception.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_flash/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_flash/BUILD.gn
new file mode 100644
index 000000000..6e3e19014
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_flash/BUILD.gn
@@ -0,0 +1,39 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_flash"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_flash") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [ "${PICO_ROOT}/src/common/pico_base" ]
+ deps = [
+ "${PICO_ROOT}/src/common/pico_sync",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ "${PICO_ROOT}/src/rp2_common/pico_bootrom",
+ ]
+ public = [ "${_CWD}/include/hardware/flash.h" ]
+ sources = [ "${_CWD}/flash.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_gpio/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_gpio/BUILD.gn
new file mode 100644
index 000000000..193f3236a
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_gpio/BUILD.gn
@@ -0,0 +1,45 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_gpio"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_gpio") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [
+ # TODO(amontanez): This is off by default, properly control with
+ # configuration.
+ # "${PICO_ROOT}/src/common/pico_binary_info",
+
+ "${PICO_ROOT}/src/rp2_common/hardware_irq",
+ "${PICO_ROOT}/src/rp2_common/hardware_sync",
+ ]
+ public = [ "${_CWD}/include/hardware/gpio.h" ]
+ sources = [ "${_CWD}/gpio.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_i2c/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_i2c/BUILD.gn
new file mode 100644
index 000000000..1c3f717a5
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_i2c/BUILD.gn
@@ -0,0 +1,43 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_i2c"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_i2c") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/common/pico_time",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/common/pico_time",
+ "${PICO_ROOT}/src/rp2_common/hardware_clocks",
+ "${PICO_ROOT}/src/rp2_common/hardware_resets",
+ ]
+ public = [ "${_CWD}/include/hardware/i2c.h" ]
+ sources = [ "${_CWD}/i2c.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_interp/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_interp/BUILD.gn
new file mode 100644
index 000000000..6a2dec8c6
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_interp/BUILD.gn
@@ -0,0 +1,39 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_interp"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_interp") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [ "${PICO_ROOT}/src/rp2_common/hardware_claim" ]
+ public = [ "${_CWD}/include/hardware/interp.h" ]
+ sources = [ "${_CWD}/interp.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_irq/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_irq/BUILD.gn
new file mode 100644
index 000000000..a21900ee0
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_irq/BUILD.gn
@@ -0,0 +1,53 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_irq"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_irq") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2_common/hardware_base",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/common/pico_sync",
+ "${PICO_ROOT}/src/rp2040/hardware_regs:platform_defs",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+
+ # There's a dependency cycle with:
+ # hardware_irq->pico_sync->pico_time->hardware_timer->hardware_irq
+ deps += [ "${PICO_ROOT}/src/rp2_common/hardware_timer" ]
+ allow_circular_includes_from =
+ [ "${PICO_ROOT}/src/rp2_common/hardware_timer" ]
+
+ public = [ "${_CWD}/include/hardware/irq.h" ]
+ sources = [
+ "${_CWD}/irq.c",
+ "${_CWD}/irq_handler_chain.S",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_pio/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_pio/BUILD.gn
new file mode 100644
index 000000000..f1fc257df
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_pio/BUILD.gn
@@ -0,0 +1,44 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_pio"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_pio") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ "${PICO_ROOT}/src/rp2_common/hardware_base",
+ "${PICO_ROOT}/src/rp2_common/hardware_gpio",
+ ]
+ deps = [ "${PICO_ROOT}/src/rp2_common/hardware_claim" ]
+ public = [
+ "${_CWD}/include/hardware/pio.h",
+ "${_CWD}/include/hardware/pio_instructions.h",
+ ]
+ sources = [ "${_CWD}/pio.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_pll/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_pll/BUILD.gn
new file mode 100644
index 000000000..837611dff
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_pll/BUILD.gn
@@ -0,0 +1,43 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_pll"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_pll") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [ "${PICO_ROOT}/src/rp2_common/hardware_resets" ]
+
+ # hardware_pll and hardware_clocks circularly depend on each other.
+ configs =
+ [ "${PICO_ROOT}/src/rp2_common/hardware_clocks:public_include_dirs" ]
+
+ public = [ "${_CWD}/include/hardware/pll.h" ]
+ sources = [ "${_CWD}/pll.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_pwm/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_pwm/BUILD.gn
new file mode 100644
index 000000000..968ef3e8f
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_pwm/BUILD.gn
@@ -0,0 +1,37 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_pwm"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_pwm") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ public = [ "${_CWD}/include/hardware/pwm.h" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_resets/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_resets/BUILD.gn
new file mode 100644
index 000000000..92beff00e
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_resets/BUILD.gn
@@ -0,0 +1,36 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_resets"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_resets") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ public = [ "${_CWD}/include/hardware/resets.h" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_rtc/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_rtc/BUILD.gn
new file mode 100644
index 000000000..b59a0f2bf
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_rtc/BUILD.gn
@@ -0,0 +1,42 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_rtc"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_rtc") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/rp2_common/hardware_clocks",
+ "${PICO_ROOT}/src/rp2_common/hardware_irq",
+ "${PICO_ROOT}/src/rp2_common/hardware_resets",
+ ]
+ public = [ "${_CWD}/include/hardware/rtc.h" ]
+ sources = [ "${_CWD}/rtc.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_spi/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_spi/BUILD.gn
new file mode 100644
index 000000000..42f559747
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_spi/BUILD.gn
@@ -0,0 +1,43 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_spi"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_spi") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/common/pico_time",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/rp2_common/hardware_clocks",
+ "${PICO_ROOT}/src/rp2_common/hardware_resets",
+ ]
+ public = [ "${_CWD}/include/hardware/spi.h" ]
+ sources = [ "${_CWD}/spi.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_sync/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_sync/BUILD.gn
new file mode 100644
index 000000000..442aab30b
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_sync/BUILD.gn
@@ -0,0 +1,42 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_sync"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_sync") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2_common/hardware_base",
+ ]
+
+ # hardware_claim and hardware_sync circularly depend on each other.
+ configs = [ "${PICO_ROOT}/src/rp2_common/hardware_claim:public_include_dirs" ]
+
+ public = [ "${_CWD}/include/hardware/sync.h" ]
+ sources = [ "${_CWD}/sync.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_timer/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_timer/BUILD.gn
new file mode 100644
index 000000000..6abf5acbc
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_timer/BUILD.gn
@@ -0,0 +1,45 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_timer"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_timer") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/rp2_common/hardware_claim",
+ "${PICO_ROOT}/src/rp2_common/hardware_sync",
+ ]
+
+ # There's a dependency cycle with:
+ # hardware_irq->pico_sync->pico_time->hardware_timer->hardware_irq
+ configs = [ "${PICO_ROOT}/src/rp2_common/hardware_irq:public_include_dirs" ]
+ public = [ "${_CWD}/include/hardware/timer.h" ]
+ sources = [ "${_CWD}/timer.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_uart/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_uart/BUILD.gn
new file mode 100644
index 000000000..0c71aae95
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_uart/BUILD.gn
@@ -0,0 +1,45 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_uart"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_uart") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/rp2040/hardware_regs:platform_defs",
+ "${PICO_ROOT}/src/rp2_common/hardware_base",
+ "${PICO_ROOT}/src/rp2_common/hardware_clocks",
+ "${PICO_ROOT}/src/rp2_common/hardware_resets",
+ "${PICO_ROOT}/src/rp2_common/hardware_timer",
+ ]
+ public = [ "${_CWD}/include/hardware/uart.h" ]
+ sources = [ "${_CWD}/uart.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_vreg/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_vreg/BUILD.gn
new file mode 100644
index 000000000..1815fc078
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_vreg/BUILD.gn
@@ -0,0 +1,37 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_vreg"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_vreg") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ public = [ "${_CWD}/include/hardware/vreg.h" ]
+ sources = [ "${_CWD}/vreg.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_watchdog/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_watchdog/BUILD.gn
new file mode 100644
index 000000000..644f72291
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_watchdog/BUILD.gn
@@ -0,0 +1,37 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_watchdog"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_watchdog") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ public = [ "${_CWD}/include/hardware/watchdog.h" ]
+ sources = [ "${_CWD}/watchdog.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/hardware_xosc/BUILD.gn b/third_party/pico_sdk/src/rp2_common/hardware_xosc/BUILD.gn
new file mode 100644
index 000000000..a2d77d862
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/hardware_xosc/BUILD.gn
@@ -0,0 +1,43 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/hardware_xosc"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("hardware_xosc") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [ "${PICO_ROOT}/src/rp2040/hardware_regs:platform_defs" ]
+
+ # hardware_pll and hardware_clocks circularly depend on each other.
+ configs =
+ [ "${PICO_ROOT}/src/rp2_common/hardware_clocks:public_include_dirs" ]
+
+ public = [ "${_CWD}/include/hardware/xosc.h" ]
+ sources = [ "${_CWD}/xosc.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_bit_ops/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_bit_ops/BUILD.gn
new file mode 100644
index 000000000..f799a721a
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_bit_ops/BUILD.gn
@@ -0,0 +1,31 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_bit_ops"
+
+pw_source_set("pico_bit_ops") {
+ deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2_common/pico_bootrom",
+ "${PICO_ROOT}/src/rp2_common/pico_platform",
+ ]
+ sources = [ "${_CWD}/bit_ops_eabi.S" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_bootrom/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_bootrom/BUILD.gn
new file mode 100644
index 000000000..d1d28a2f3
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_bootrom/BUILD.gn
@@ -0,0 +1,37 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_bootrom"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_bootrom") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [ "${PICO_ROOT}/src/common/pico_base" ]
+ public = [
+ "${_CWD}/include/pico/bootrom.h",
+ "${_CWD}/include/pico/bootrom/sf_table.h",
+ ]
+ sources = [ "${_CWD}/bootrom.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_bootsel_via_double_reset/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_bootsel_via_double_reset/BUILD.gn
new file mode 100644
index 000000000..5f615278d
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_bootsel_via_double_reset/BUILD.gn
@@ -0,0 +1,32 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_bootsel_via_double_reset"
+
+pw_source_set("pico_bootsel_via_double_reset") {
+ deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/common/pico_binary_info",
+ "${PICO_ROOT}/src/rp2_common/pico_bootrom",
+ "${PICO_ROOT}/src/rp2_common/pico_time",
+ ]
+ sources = [ "${_CWD}/pico_bootsel_via_double_reset.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_divider/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_divider/BUILD.gn
new file mode 100644
index 000000000..e231a8210
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_divider/BUILD.gn
@@ -0,0 +1,30 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_divider"
+
+pw_source_set("pico_divider") {
+ deps = [
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2_common/hardware_divider",
+ ]
+ sources = [ "${_CWD}/divider.S" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_double/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_double/BUILD.gn
new file mode 100644
index 000000000..ef7feb76d
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_double/BUILD.gn
@@ -0,0 +1,47 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_double"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_double") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2_common/pico_bootrom",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/rp2_common/hardware_divider",
+ "${PICO_ROOT}/src/rp2_common/pico_platform",
+ ]
+ public = [ "${_CWD}/include/pico/double.h" ]
+ sources = [
+ "${_CWD}/double_aeabi.S",
+ "${_CWD}/double_init_rom.c",
+ "${_CWD}/double_math.c",
+ "${_CWD}/double_none.S",
+ "${_CWD}/double_v1_rom_shim.S",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_fix/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_fix/BUILD.gn
new file mode 100644
index 000000000..f9397b18e
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_fix/BUILD.gn
@@ -0,0 +1,17 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+group("pico_fix") {
+ public_deps = [ "rp2040_usb_device_enumeration" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_fix/rp2040_usb_device_enumeration/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_fix/rp2040_usb_device_enumeration/BUILD.gn
new file mode 100644
index 000000000..99d8cd466
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_fix/rp2040_usb_device_enumeration/BUILD.gn
@@ -0,0 +1,39 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_fix/rp2040_usb_device_enumeration"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("rp2040_usb_device_enumeration") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ "${PICO_ROOT}/src/rp2_common/hardware_gpio",
+ "${PICO_ROOT}/src/rp2_common/pico_time",
+ ]
+ public = [ "${_CWD}/include/pico/fix/rp2040_usb_device_enumeration.h" ]
+ sources = [ "${_CWD}/rp2040_usb_device_enumberation.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_float/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_float/BUILD.gn
new file mode 100644
index 000000000..04c0fdf00
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_float/BUILD.gn
@@ -0,0 +1,47 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_float"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_float") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2_common/pico_bootrom",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/rp2_common/hardware_divider",
+ "${PICO_ROOT}/src/rp2_common/pico_platform",
+ ]
+ public = [ "${_CWD}/include/pico/float.h" ]
+ sources = [
+ "${_CWD}/float_aeabi.S",
+ "${_CWD}/float_init_rom.c",
+ "${_CWD}/float_math.c",
+ "${_CWD}/float_none.S",
+ "${_CWD}/float_v1_rom_shim.S",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_int64_ops/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_int64_ops/BUILD.gn
new file mode 100644
index 000000000..6458280d9
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_int64_ops/BUILD.gn
@@ -0,0 +1,35 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_int64_ops"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_int64_ops") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [ "${PICO_ROOT}/src/common/pico_base" ]
+ deps = [ "${PICO_ROOT}/src/rp2_common/pico_platform" ]
+ public = [ "${_CWD}/include/pico/int64_ops.h" ]
+ sources = [ "${_CWD}/pico_int64_ops_aeabi.S" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_malloc/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_malloc/BUILD.gn
new file mode 100644
index 000000000..ad1afbed9
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_malloc/BUILD.gn
@@ -0,0 +1,37 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_malloc"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_malloc") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/common/pico_sync",
+ ]
+ public = [ "${_CWD}/include/pico/malloc.h" ]
+ sources = [ "${_CWD}/pico_malloc.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_mem_ops/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_mem_ops/BUILD.gn
new file mode 100644
index 000000000..df40c7edd
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_mem_ops/BUILD.gn
@@ -0,0 +1,41 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_mem_ops"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_mem_ops") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [ "${PICO_ROOT}/src/common/pico_base" ]
+ deps = [
+ "${PICO_ROOT}/src/rp2_common/pico_bootrom",
+ "${PICO_ROOT}/src/rp2_common/pico_platform",
+ ]
+ public = [ "${_CWD}/include/pico/mem_ops.h" ]
+ sources = [
+ "${_CWD}/mem_ops.c",
+ "${_CWD}/mem_ops_aeabi.S",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_multicore/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_multicore/BUILD.gn
new file mode 100644
index 000000000..d154954f5
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_multicore/BUILD.gn
@@ -0,0 +1,45 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_multicore"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_multicore") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/common/pico_sync",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2_common/hardware_claim",
+ "${PICO_ROOT}/src/rp2_common/hardware_irq",
+ "${PICO_ROOT}/src/rp2_common/hardware_sync",
+ "${PICO_ROOT}/src/rp2_common/pico_runtime",
+ ]
+ public = [ "${_CWD}/include/pico/multicore.h" ]
+ sources = [ "${_CWD}/pico_multicore.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_platform/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_platform/BUILD.gn
new file mode 100644
index 000000000..593196dc4
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_platform/BUILD.gn
@@ -0,0 +1,44 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_platform"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("headers") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [ "${PICO_ROOT}/src/rp2040/hardware_regs:platform_defs" ]
+ configs = [ "${PICO_ROOT}/src/rp2_common/pico_platform:public_include_dirs" ]
+ public = [
+ "${_CWD}/include/pico/asm_helper.S",
+ "${_CWD}/include/pico/platform.h",
+ ]
+}
+
+pw_source_set("pico_platform") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_deps = [ ":headers" ]
+ deps = [ "${PICO_ROOT}/src/rp2_common/hardware_base" ]
+ sources = [ "${_CWD}/platform.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_printf/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_printf/BUILD.gn
new file mode 100644
index 000000000..00453c48b
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_printf/BUILD.gn
@@ -0,0 +1,43 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_printf"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_printf_none") {
+ deps = [
+ "${PICO_ROOT}/src/rp2_common/pico_bootrom",
+ "${PICO_ROOT}/src/rp2_common/pico_platform",
+ ]
+ sources = [ "${_CWD}/printf_none.S" ]
+}
+
+pw_source_set("pico_printf") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [ "${PICO_ROOT}/src/common/pico_base" ]
+ deps = [ "${PICO_ROOT}/src/rp2_common/pico_platform" ]
+ public = [ "${_CWD}/include/pico/printf.h" ]
+ sources = [ "${_CWD}/printf.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_runtime/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_runtime/BUILD.gn
new file mode 100644
index 000000000..f0c42c69e
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_runtime/BUILD.gn
@@ -0,0 +1,46 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_runtime"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_runtime") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+
+ deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/common/pico_sync",
+ "${PICO_ROOT}/src/common/pico_time",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2040/hardware_structs",
+ "${PICO_ROOT}/src/rp2_common/hardware_clocks",
+ "${PICO_ROOT}/src/rp2_common/hardware_irq",
+ "${PICO_ROOT}/src/rp2_common/hardware_resets",
+ "${PICO_ROOT}/src/rp2_common/pico_bootrom",
+ "${PICO_ROOT}/src/rp2_common/pico_printf",
+ ]
+ public = [ "${_CWD}/include/pico/runtime.h" ]
+ sources = [ "${_CWD}/runtime.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_standard_link/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_standard_link/BUILD.gn
new file mode 100644
index 000000000..457f93d8e
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_standard_link/BUILD.gn
@@ -0,0 +1,46 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_standard_link"
+
+# TODO(amontanez): Not all linker script configurations are supported yet.
+
+config("linker_script") {
+ _linker_script_path = rebase_path("${_CWD}/memmap_default.ld", root_build_dir)
+ ldflags = [ "-T${_linker_script_path}" ]
+}
+
+pw_source_set("pico_standard_link") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ all_dependent_configs = [ ":linker_script" ]
+ inputs = [ "${_CWD}/memmap_default.ld" ]
+ deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/common/pico_binary_info",
+ "${PICO_ROOT}/src/rp2040/hardware_regs",
+ "${PICO_ROOT}/src/rp2_common/boot_stage2",
+ ]
+ sources = [
+ "${_CWD}/binary_info.c",
+ "${_CWD}/crt0.S",
+ "${_CWD}/new_delete.cpp",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_stdio/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_stdio/BUILD.gn
new file mode 100644
index 000000000..38e2c69ca
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_stdio/BUILD.gn
@@ -0,0 +1,73 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_stdio"
+
+import("${PICO_ROOT}/src/common/pico_stdlib/pico_stdio.gni")
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+config("printf_wrappers") {
+ ldflags = [
+ "-Wl,--wrap=printf",
+ "-Wl,--wrap=vprintf",
+ "-Wl,--wrap=puts",
+ "-Wl,--wrap=putchar",
+ "-Wl,--wrap=getchar",
+ ]
+}
+
+# TODO(amontanez): This is definitely a facade. For now, just have header and
+# header+impl build targets to simulate.
+pw_source_set("headers") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_base",
+ "${PICO_ROOT}/src/rp2_common/pico_platform",
+ ]
+
+ public = [
+ "${_CWD}/include/pico/stdio.h",
+ "${_CWD}/include/pico/stdio/driver.h",
+ ]
+}
+
+pw_source_set("pico_stdio") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ all_dependent_configs = [ ":printf_wrappers" ]
+ public_deps = [ ":headers" ]
+ deps = [
+ "${PICO_ROOT}/src/common/pico_sync",
+ "${PICO_ROOT}/src/common/pico_time",
+ "${PICO_ROOT}/src/rp2_common/pico_printf",
+ ]
+ if (PICO_STDIO == ENUM_LIB_PICO_STDIO.UART) {
+ deps += [ "${PICO_ROOT}/src/rp2_common/pico_stdio_uart" ]
+ } else if (PICO_STDIO == ENUM_LIB_PICO_STDIO.USB) {
+ deps += [ "${PICO_ROOT}/src/rp2_common/pico_stdio_usb" ]
+ } else if (PICO_STDIO == ENUM_LIB_PICO_STDIO.SEMIHOSTING) {
+ deps += [ "${PICO_ROOT}/src/rp2_common/pico_stdio_semihosting" ]
+ }
+ sources = [ "${_CWD}/stdio.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_stdio_semihosting/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_stdio_semihosting/BUILD.gn
new file mode 100644
index 000000000..5590ad224
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_stdio_semihosting/BUILD.gn
@@ -0,0 +1,35 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_stdio_semihosting"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_stdio_semihosting") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [ "${PICO_ROOT}/src/rp2_common/pico_stdio:headers" ]
+ deps = [ "${PICO_ROOT}/src/common/pico_binary_info" ]
+ public = [ "${_CWD}/include/pico/stdio_semihosting.h" ]
+ sources = [ "${_CWD}/pico_stdio/semihosting.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_stdio_uart/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_stdio_uart/BUILD.gn
new file mode 100644
index 000000000..788a0de14
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_stdio_uart/BUILD.gn
@@ -0,0 +1,41 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_stdio_uart"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_stdio_uart") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/rp2_common/hardware_uart",
+ "${PICO_ROOT}/src/rp2_common/pico_stdio:headers",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/common/pico_binary_info",
+ "${PICO_ROOT}/src/rp2_common/hardware_gpio",
+ ]
+ public = [ "${_CWD}/include/pico/stdio_uart.h" ]
+ sources = [ "${_CWD}/stdio_uart.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_stdio_usb/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_stdio_usb/BUILD.gn
new file mode 100644
index 000000000..30a3434d7
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_stdio_usb/BUILD.gn
@@ -0,0 +1,52 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_stdio_usb"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_stdio_usb") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [
+ "${PICO_ROOT}/src/common/pico_usb_reset_interface",
+ "${PICO_ROOT}/src/rp2_common/pico_stdio:headers",
+ ]
+ deps = [
+ "${PICO_ROOT}/src/common/pico_sync",
+ "${PICO_ROOT}/src/common/pico_time",
+ "${PICO_ROOT}/src/rp2_common/hardware_irq",
+ ]
+
+ # TODO(amontanez): Still needs a dependency on tinyusb.
+ public = [
+ "${_CWD}/include/pico/stdio_usb.h",
+ "${_CWD}/include/pico/stdio_usb/reset_interface.h",
+ "${_CWD}/include/tusb_config.h",
+ ]
+ sources = [
+ "${_CWD}/reset_interface.c",
+ "${_CWD}/stdio_usb.c",
+ "${_CWD}/stdio_usb_descriptors.c",
+ ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_stdlib/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_stdlib/BUILD.gn
new file mode 100644
index 000000000..448d1d758
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_stdlib/BUILD.gn
@@ -0,0 +1,51 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_stdlib"
+
+import("${PICO_ROOT}/src/common/pico_stdlib/pico_stdio.gni")
+
+pw_source_set("pico_stdlib") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ deps = [
+ "${PICO_ROOT}/src/common/pico_stdlib:headers",
+ "${PICO_ROOT}/src/rp2_common/hardware_clocks",
+ "${PICO_ROOT}/src/rp2_common/hardware_pll",
+ ]
+
+ # These libraries must be linked in for this to work, even though this does
+ # not #include anything from these:
+ deps += [
+ "${PICO_ROOT}/src/common/pico_time",
+ "${PICO_ROOT}/src/rp2_common/pico_platform",
+ "${PICO_ROOT}/src/rp2_common/pico_runtime",
+ "${PICO_ROOT}/src/rp2_common/pico_standard_link",
+ "${PICO_ROOT}/src/rp2_common/pico_stdio",
+ ]
+
+ if (PICO_STDIO == ENUM_LIB_PICO_STDIO.UART) {
+ deps += [ "${PICO_ROOT}/src/rp2_common/pico_stdio_uart" ]
+ } else {
+ deps += [ "${PICO_ROOT}/src/common/pico_binary_info" ]
+ }
+
+ sources = [ "${_CWD}/stdlib.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/pico_unique_id/BUILD.gn b/third_party/pico_sdk/src/rp2_common/pico_unique_id/BUILD.gn
new file mode 100644
index 000000000..a3f7b5750
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/pico_unique_id/BUILD.gn
@@ -0,0 +1,35 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pi_pico.gni")
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+# TODO(amontanez): This can go away if the GN build can be upstreamed to the
+# Pi Pico repo.
+_CWD = "${PICO_SRC_DIR}/src/rp2_common/pico_unique_id"
+
+config("public_include_dirs") {
+ include_dirs = [ "${_CWD}/include" ]
+}
+
+pw_source_set("pico_unique_id") {
+ remove_configs = [ "$dir_pw_build:strict_warnings" ]
+ public_configs = [ ":public_include_dirs" ]
+ public_deps = [ "${PICO_ROOT}/src/common/pico_base" ]
+ deps = [ "${PICO_ROOT}/src/rp2_common/hardware_flash" ]
+ public = [ "${_CWD}/include/pico/unique_id.h" ]
+ sources = [ "${_CWD}/unique_id.c" ]
+}
diff --git a/third_party/pico_sdk/src/rp2_common/tinyusb/BUILD.gn b/third_party/pico_sdk/src/rp2_common/tinyusb/BUILD.gn
new file mode 100644
index 000000000..32680d51a
--- /dev/null
+++ b/third_party/pico_sdk/src/rp2_common/tinyusb/BUILD.gn
@@ -0,0 +1,15 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# TODO(amontanez): Build shim for TinyUSB
diff --git a/third_party/rules_proto_grpc/BUILD.bazel b/third_party/rules_proto_grpc/BUILD.bazel
new file mode 100644
index 000000000..a524793e9
--- /dev/null
+++ b/third_party/rules_proto_grpc/BUILD.bazel
@@ -0,0 +1,15 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+exports_files(["internal_proto.bzl"])
diff --git a/third_party/rules_proto_grpc/internal_proto.bzl b/third_party/rules_proto_grpc/internal_proto.bzl
new file mode 100644
index 000000000..cab19d405
--- /dev/null
+++ b/third_party/rules_proto_grpc/internal_proto.bzl
@@ -0,0 +1,237 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+"""Backend implementation for 'pw_protobuf_compiler/proto.bzl'"""
+
+# Apache License, Version 2.0, January 2004, http://www.apache.org/licenses/
+# Adapted from: https://github.com/rules-proto-grpc/rules_proto_grpc/
+# Files adapted:
+# - rules_proto_grpc/cpp/cpp_grpc_library.bzl
+# - rules_proto_grpc/cpp/cpp_grpc_compile.bzl
+# These two files have been adapted for use in Pigweed and combined into this
+# file.
+
+load("@rules_proto//proto:defs.bzl", "ProtoInfo")
+load(
+ "@rules_proto_grpc//:defs.bzl",
+ "ProtoLibraryAspectNodeInfo",
+ "ProtoPluginInfo",
+ "proto_compile_aspect_attrs",
+ "proto_compile_aspect_impl",
+ "proto_compile_attrs",
+ "proto_compile_impl",
+)
+load("@rules_proto_grpc//internal:filter_files.bzl", "filter_files")
+
+# Create compile rule
+def _proto_compiler_aspect(plugin_group, prefix):
+ return aspect(
+ implementation = proto_compile_aspect_impl,
+ provides = [ProtoLibraryAspectNodeInfo],
+ attr_aspects = ["deps"],
+ attrs = dict(
+ proto_compile_aspect_attrs,
+ _plugins = attr.label_list(
+ doc = "List of protoc plugins to apply",
+ providers = [ProtoPluginInfo],
+ default = plugin_group,
+ ),
+ _prefix = attr.string(
+ doc = "String used to disambiguate aspects when generating \
+outputs",
+ default = prefix,
+ ),
+ ),
+ toolchains = [str(Label("@rules_proto_grpc//protobuf:toolchain_type"))],
+ )
+
+def _proto_compiler_rule(plugin_group, aspect):
+ return rule(
+ implementation = proto_compile_impl,
+ attrs = dict(
+ proto_compile_attrs,
+ _plugins = attr.label_list(
+ doc = "List of protoc plugins to apply",
+ providers = [ProtoPluginInfo],
+ default = plugin_group,
+ ),
+ protos = attr.label_list(
+ providers = [ProtoInfo],
+ doc = "List of proto_library targets.",
+ ),
+ deps = attr.label_list(
+ doc = "List of proto_library targets. Prefer protos.",
+ aspects = [aspect],
+ ),
+ ),
+ toolchains = [str(Label("@rules_proto_grpc//protobuf:toolchain_type"))],
+ )
+
+nanopb_compile_aspect = _proto_compiler_aspect(
+ [Label("//pw_rpc:nanopb_plugin")],
+ "nanopb_proto_compile_aspect",
+)
+nanopb_compile = _proto_compiler_rule(
+ [Label("//pw_rpc:nanopb_plugin")],
+ nanopb_compile_aspect,
+)
+
+pwpb_compile_aspect = _proto_compiler_aspect(
+ [Label("@pigweed//pw_protobuf:pw_cc_plugin")],
+ "pwpb_proto_compile_aspect",
+)
+pwpb_compile = _proto_compiler_rule(
+ [Label("@pigweed//pw_protobuf:pw_cc_plugin")],
+ pwpb_compile_aspect,
+)
+
+raw_rpc_compile_aspect = _proto_compiler_aspect(
+ [Label("@pigweed//pw_rpc:pw_cc_plugin_raw")],
+ "raw_rpc_proto_compile_aspect",
+)
+raw_rpc_compile = _proto_compiler_rule(
+ [Label("@pigweed//pw_rpc:pw_cc_plugin_raw")],
+ raw_rpc_compile_aspect,
+)
+
+nanopb_rpc_compile_aspect = _proto_compiler_aspect(
+ [
+ Label("@pigweed//pw_rpc:pw_cc_plugin_nanopb_rpc"),
+ Label("//pw_rpc:nanopb_plugin"),
+ ],
+ "nanopb_rpc_proto_compile_aspect",
+)
+nanopb_rpc_compile = _proto_compiler_rule(
+ [
+ Label("@pigweed//pw_rpc:pw_cc_plugin_nanopb_rpc"),
+ Label("//pw_rpc:nanopb_plugin"),
+ ],
+ nanopb_rpc_compile_aspect,
+)
+
+PLUGIN_INFO = {
+ "nanopb": {
+ "compiler": nanopb_compile,
+ "deps": ["@com_github_nanopb_nanopb//:nanopb"],
+ "has_srcs": True,
+ # TODO: Find a way to get Nanopb to generate nested structs.
+ # Otherwise add the manual tag to the resulting library,
+ # preventing it from being built unless directly depended on.
+ # e.g. The 'Pigweed' message in
+ # pw_protobuf/pw_protobuf_test_protos/full_test.proto will fail to
+ # compile as it has a self referring nested message. According to
+ # the docs
+ # https://jpa.kapsi.fi/nanopb/docs/reference.html#proto-file-options
+ # and https://github.com/nanopb/nanopb/issues/433 it seams like it
+ # should be possible to configure nanopb to generate nested structs.
+ "additional_tags": ["manual"],
+ },
+ "nanopb_rpc": {
+ "compiler": nanopb_rpc_compile,
+ "deps": [
+ "@com_github_nanopb_nanopb//:nanopb",
+ "@pigweed//pw_rpc/nanopb:server_api",
+ "@pigweed//pw_rpc/nanopb:client_api",
+ "@pigweed//pw_rpc",
+ ],
+ "has_srcs": True,
+ # See above todo.
+ "additional_tags": ["manual"],
+ },
+ "pwpb": {
+ "compiler": pwpb_compile,
+ "deps": ["@pigweed//pw_protobuf"],
+ "has_srcs": False,
+ "additional_tags": [],
+ },
+ "raw_rpc": {
+ "compiler": raw_rpc_compile,
+ "deps": [
+ "@pigweed//pw_rpc",
+ "@pigweed//pw_rpc/raw:server_api",
+ "@pigweed//pw_rpc/raw:client_api",
+ ],
+ "has_srcs": False,
+ "additional_tags": [],
+ },
+}
+
+def pw_proto_library(name, **kwargs): # buildifier: disable=function-docstring
+ for plugin_name, info in PLUGIN_INFO.items():
+ name_pb = name + "_pb" + "." + plugin_name
+ additional_tags = [
+ tag
+ for tag in info["additional_tags"]
+ if tag not in kwargs.get("tags", [])
+ ]
+ info["compiler"](
+ name = name_pb,
+ tags = additional_tags,
+ # Forward deps and verbose tags to implementation
+ verbose = kwargs.get("verbose", 0),
+ deps = kwargs.get("deps", []),
+ protos = kwargs.get("protos", []),
+ )
+
+ # Filter files to sources and headers
+ filter_files(
+ name = name_pb + "_srcs",
+ target = name_pb,
+ extensions = ["c", "cc", "cpp", "cxx"],
+ tags = additional_tags,
+ )
+
+ filter_files(
+ name = name_pb + "_hdrs",
+ target = name_pb,
+ extensions = ["h"],
+ tags = additional_tags,
+ )
+
+ # Cannot use pw_cc_library here as it will add cxxopts.
+ # Note that the srcs attribute here is passed in as a DefaultInfo
+ # object, which is not supported by pw_cc_library.
+ native.cc_library(
+ name = name + "." + plugin_name,
+ hdrs = [name_pb + "_hdrs"],
+ includes = [name_pb],
+ alwayslink = kwargs.get("alwayslink"),
+ copts = kwargs.get("copts", []),
+ defines = kwargs.get("defines", []),
+ srcs = [name_pb + "_srcs"] if info["has_srcs"] else [],
+ deps = info["deps"],
+ include_prefix = kwargs.get("include_prefix", ""),
+ linkopts = kwargs.get("linkopts", []),
+ linkstatic = kwargs.get("linkstatic", True),
+ local_defines = kwargs.get("local_defines", []),
+ nocopts = kwargs.get("nocopts", ""),
+ visibility = kwargs.get("visibility"),
+ tags = kwargs.get("tags", []) + additional_tags,
+ )
+
+ if "manual" in kwargs.get("tags", []):
+ additional_tags = []
+ else:
+ additional_tags = ["manual"]
+
+ # Combine all plugins into a single library.
+ native.cc_library(
+ name = name,
+ deps = [
+ name + "." + plugin_name
+ for plugin_name in PLUGIN_INFO.keys()
+ ],
+ tags = kwargs.get("tags", []) + additional_tags,
+ **{k: v for k, v in kwargs.items() if k not in ["deps", "protos"]}
+ )
diff --git a/third_party/smartfusion_mss/BUILD.bazel b/third_party/smartfusion_mss/BUILD.bazel
new file mode 100644
index 000000000..bf4dfa0d7
--- /dev/null
+++ b/third_party/smartfusion_mss/BUILD.bazel
@@ -0,0 +1,40 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+ "//pw_build:pigweed.bzl",
+ "pw_cc_library",
+)
+
+# Ready-made configurations
+liberosoc_configs = [
+ ("default", "configs/config_default.h"),
+ ("debug", "configs/config_debug.h"),
+]
+
+# Config targets.
+[
+ pw_cc_library(
+ name = "%s_config" % config_name,
+ hdrs = [
+ config_header,
+ "configs/config_pigweed_common.h",
+ ],
+ copts = ["-Dmss_CONFIG_FILE=\"%s\"" % config_header],
+ includes = ["."],
+ )
+ for config_name, config_header in liberosoc_configs
+]
+
+# TODO(skeys): Add build recipe for the library.
diff --git a/third_party/smartfusion_mss/BUILD.gn b/third_party/smartfusion_mss/BUILD.gn
new file mode 100644
index 000000000..2e3c10888
--- /dev/null
+++ b/third_party/smartfusion_mss/BUILD.gn
@@ -0,0 +1,112 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+import("$dir_pw_build/linker_script.gni")
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_third_party/smartfusion_mss/mss.gni")
+
+declare_args() {
+ pw_target_smartfusion2_LINK_CONFIG_DEFINES = []
+}
+
+if (dir_pw_third_party_smartfusion_mss != "") {
+ # The list currently includes all source files for build.
+ smartfusion_mss_sources = [
+ "exported_firmware/CMSIS/startup_gcc/startup_m2sxxx.S",
+ "exported_firmware/CMSIS/system_m2sxxx.c",
+ "exported_firmware/drivers/mss_can/mss_can.c",
+ "exported_firmware/drivers/mss_ethernet_mac/m88e1340_phy.c",
+ "exported_firmware/drivers/mss_ethernet_mac/mss_ethernet_mac.c",
+ "exported_firmware/drivers/mss_gpio/mss_gpio.c",
+ "exported_firmware/drivers/mss_hpdma/mss_hpdma.c",
+ "exported_firmware/drivers/mss_i2c/mss_i2c.c",
+ "exported_firmware/drivers/mss_nvm/mss_nvm.c",
+ "exported_firmware/drivers/mss_rtc/mss_rtc.c",
+ "exported_firmware/drivers/mss_spi/mss_spi.c",
+ "exported_firmware/drivers/mss_sys_services/mss_comblk.c",
+ "exported_firmware/drivers/mss_sys_services/mss_sys_services.c",
+ "exported_firmware/drivers/mss_uart/mss_uart.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_common_cif.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_device.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_device_cdc.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_device_cif.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_device_hid.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_device_msd.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_device_printer.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_device_rndis.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_device_vendor.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_host.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_host_cif.c",
+ "exported_firmware/drivers/mss_usb/mss_usb_host_msc.c",
+ "exported_firmware/drivers_config/sys_config/sys_config.c",
+ ]
+
+ liberosoc_configs = [
+ {
+ name = "default"
+ config_header = "configs/config_default.h"
+ },
+ {
+ name = "debug"
+ config_header = "configs/config_debug.h"
+ },
+ ]
+
+ foreach(ele, liberosoc_configs) {
+ config_name = ele.name + "_config"
+ config(config_name) {
+ # Custom config file is specified by macro liberosoc_CONFIG_FILE
+ # for liberosoc
+ defines = [ "liberosoc_CONFIG_FILE=\"${ele.config_header}\"" ]
+ }
+
+ srcset_name = ele.name + "_config_srcset"
+ pw_source_set(srcset_name) {
+ public = [
+ "configs/config_pigweed_common.h",
+ ele.config_header,
+ ]
+ public_configs = [
+ ":${config_name}",
+ ":smartfusion_mss_common_config",
+ ]
+ }
+ }
+
+ config("smartfusion_mss_common_config") {
+ include_dirs = [
+ "$dir_pw_third_party_smartfusion_mss/exported_firmware/CMSIS/V4.5/Include",
+ "$dir_pw_third_party_smartfusion_mss/exported_firmware/drivers",
+ "$dir_pw_third_party_smartfusion_mss/exported_firmware/CMSIS",
+ "$dir_pw_third_party/smartfusion_mss",
+ ]
+ cflags = [
+ "-Wno-error=cast-qual",
+ "-Wno-error=redundant-decls",
+ "-w",
+ ]
+ }
+
+ pw_source_set("smartfusion_mss") {
+ sources = []
+ foreach(source, smartfusion_mss_sources) {
+ sources += [ "$dir_pw_third_party_smartfusion_mss/" + source ]
+ }
+ public_deps = [ ":${pw_third_party_smartfusion_mss_CONFIG}_config_srcset" ]
+ }
+} else {
+ group("smartfusion_mss") {
+ }
+}
diff --git a/third_party/smartfusion_mss/README.md b/third_party/smartfusion_mss/README.md
new file mode 100644
index 000000000..7532545e2
--- /dev/null
+++ b/third_party/smartfusion_mss/README.md
@@ -0,0 +1,6 @@
+# LiberoSoC Library
+
+The folder provides build scripts and configuration recipes for building
+the SmartFusion2 Microcontroller Subsystem library. The source code needs to be downloaded by the user, or
+via the support in pw_package "pw package install sf2mss". For gn build,
+set `dir_pw_third_party_smartfusion_mss` to point to the path of the source code.
diff --git a/third_party/smartfusion_mss/configs/config_debug.h b/third_party/smartfusion_mss/configs/config_debug.h
new file mode 100644
index 000000000..7eb69608e
--- /dev/null
+++ b/third_party/smartfusion_mss/configs/config_debug.h
@@ -0,0 +1,19 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include "configs/config_pigweed_common.h"
+
+#define SF2_MSS_NO_BOOTLOADER 1
diff --git a/third_party/smartfusion_mss/configs/config_default.h b/third_party/smartfusion_mss/configs/config_default.h
new file mode 100644
index 000000000..30aee591f
--- /dev/null
+++ b/third_party/smartfusion_mss/configs/config_default.h
@@ -0,0 +1,17 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include "configs/config_pigweed_common.h"
diff --git a/third_party/smartfusion_mss/configs/config_pigweed_common.h b/third_party/smartfusion_mss/configs/config_pigweed_common.h
new file mode 100644
index 000000000..3f4f6ea45
--- /dev/null
+++ b/third_party/smartfusion_mss/configs/config_pigweed_common.h
@@ -0,0 +1,20 @@
+// Copyright 2022 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Some common configs for using mbedtls in Pigweed. These include disabling of
+// file system, socket and linux/windows specific features.
+// See include/mbedtls/config.h for a detail explanation of these
+// configurations.
+
+#pragma once
diff --git a/third_party/smartfusion_mss/mss.gni b/third_party/smartfusion_mss/mss.gni
new file mode 100644
index 000000000..dd971781a
--- /dev/null
+++ b/third_party/smartfusion_mss/mss.gni
@@ -0,0 +1,24 @@
+# Copyright 2022 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+declare_args() {
+ # If compiling backends with mbedtls, this variable is set to the path to the
+ # mbedtls source code. When set, a pw_source_set for the mbedtls library is
+ # created at "$dir_pw_third_party/mbedtls".
+ dir_pw_third_party_smartfusion_mss = ""
+
+ # configuration for mbedtls. Can be one of `mbedtls_configs` in the BUILD.gn
+ # file
+ pw_third_party_smartfusion_mss_CONFIG = "default"
+}
diff --git a/third_party/stm32cube/BUILD.gn b/third_party/stm32cube/BUILD.gn
index 44a801419..c541fc859 100644
--- a/third_party/stm32cube/BUILD.gn
+++ b/third_party/stm32cube/BUILD.gn
@@ -16,145 +16,160 @@ import("//build_overrides/pigweed.gni")
import("$dir_pw_build/linker_script.gni")
import("$dir_pw_build/target_types.gni")
-
import("$dir_pw_third_party/stm32cube/stm32cube.gni")
-assert(dir_pw_third_party_stm32cube != "",
- "The target must specify the stm32cube directory.")
-
-stm32cube_builder_script =
- "$dir_pw_stm32cube_build/py/pw_stm32cube_build/__main__.py"
+if (dir_pw_third_party_stm32cube == "") {
+ group("linker_script_template") {
+ }
+ group("core_init_template") {
+ }
+ group("cmsis_init_template") {
+ }
+ group("hal_config_template") {
+ }
+ group("stm32cube_headers") {
+ }
+ group("stm32cube") {
+ }
+} else {
+ stm32cube_builder_script =
+ "$dir_pw_stm32cube_build/py/pw_stm32cube_build/__main__.py"
-rebased_dir_pw_third_party_stm32cube = rebase_path(dir_pw_third_party_stm32cube)
+ rebased_dir_pw_third_party_stm32cube =
+ rebase_path(dir_pw_third_party_stm32cube)
-find_files_args = [
- "find_files",
- rebased_dir_pw_third_party_stm32cube,
- pw_third_party_stm32cube_PRODUCT,
-]
-if (pw_third_party_stm32cube_CORE_INIT ==
- "$dir_pw_third_party/stm32cube:core_init_template") {
- find_files_args += [ "--init" ]
-}
+ find_files_args = [
+ "find_files",
+ rebased_dir_pw_third_party_stm32cube,
+ pw_third_party_stm32cube_PRODUCT,
+ ]
+ if (pw_third_party_stm32cube_CORE_INIT ==
+ "$dir_pw_third_party/stm32cube:core_init_template") {
+ find_files_args += [ "--init" ]
+ }
-# This script finds the files relavent for the current product.
-files = exec_script(stm32cube_builder_script,
- find_files_args,
- "scope",
- [ "$rebased_dir_pw_third_party_stm32cube/files.txt" ])
-
-if (pw_third_party_stm32cube_CORE_INIT ==
- "$dir_pw_third_party/stm32cube:core_init_template") {
- assert(files.gcc_linker != "" || files.iar_linker != "",
- "No linker file found")
-
- gcc_linker = files.gcc_linker
- if (gcc_linker == "") {
- gcc_linker = "$target_gen_dir/linker.ld"
- gcc_linker_str = exec_script(stm32cube_builder_script,
- [
- "icf_to_ld",
- files.iar_linker,
- ],
- "string",
- [ files.iar_linker ])
- write_file(gcc_linker, gcc_linker_str)
- }
-
- startup_file = "$target_gen_dir/startup.s"
- startup_file_str = exec_script(stm32cube_builder_script,
- [
- "inject_init",
- files.startup,
- ],
- "string",
- [ files.startup ])
- write_file(startup_file, startup_file_str)
-
- pw_linker_script("linker_script_template") {
- linker_script = gcc_linker
- }
-
- pw_source_set("core_init_template") {
- deps = [ ":linker_script_template" ]
- sources = [ startup_file ]
+ # This script finds the files relavent for the current product.
+ files = exec_script(stm32cube_builder_script,
+ find_files_args,
+ "scope",
+ [ "$rebased_dir_pw_third_party_stm32cube/files.txt" ])
+
+ if (pw_third_party_stm32cube_CORE_INIT ==
+ "$dir_pw_third_party/stm32cube:core_init_template") {
+ assert(files.gcc_linker != "" || files.iar_linker != "",
+ "No linker file found")
+
+ gcc_linker = files.gcc_linker
+ if (gcc_linker == "") {
+ gcc_linker = "$target_gen_dir/linker.ld"
+ gcc_linker_str = exec_script(stm32cube_builder_script,
+ [
+ "icf_to_ld",
+ files.iar_linker,
+ ],
+ "string",
+ [ files.iar_linker ])
+ write_file(gcc_linker, gcc_linker_str)
+ }
+
+ startup_file = "$target_gen_dir/startup.s"
+ startup_file_str = exec_script(stm32cube_builder_script,
+ [
+ "inject_init",
+ files.startup,
+ ],
+ "string",
+ [ files.startup ])
+ write_file(startup_file, startup_file_str)
+
+ pw_linker_script("linker_script_template") {
+ linker_script = gcc_linker
+ }
+
+ pw_source_set("core_init_template") {
+ deps = [ ":linker_script_template" ]
+ sources = [ startup_file ]
+ }
}
-}
-pw_source_set("hal_timebase_template") {
- deps = [ ":stm32cube_headers" ]
- sources = [ "$dir_pw_third_party_stm32cube/hal_driver/Src/${files.family}_hal_timebase_tim_template.c" ]
-}
+ pw_source_set("hal_timebase_template") {
+ deps = [ ":stm32cube_headers" ]
+ sources = [ "$dir_pw_third_party_stm32cube/hal_driver/Src/${files.family}_hal_timebase_tim_template.c" ]
+ }
-pw_source_set("cmsis_init_template") {
- deps = [ ":stm32cube_headers" ]
- sources = [ "$dir_pw_third_party_stm32cube/cmsis_device/Source/Templates/system_${files.family}.c" ]
-}
+ pw_source_set("cmsis_init_template") {
+ deps = [ ":stm32cube_headers" ]
+ sources = [ "$dir_pw_third_party_stm32cube/cmsis_device/Source/Templates/system_${files.family}.c" ]
+ }
-# Generate a stub config header that points to the correct template.
-write_file("$target_gen_dir/template_config/${files.family}_hal_conf.h",
- "#include \"${files.family}_hal_conf_template.h\"")
-config("hal_config_template_includes") {
- include_dirs = [ "$target_gen_dir/template_config" ]
-}
-pw_source_set("hal_config_template") {
- public_configs = [ ":hal_config_template_includes" ]
+ # Generate a stub config header that points to the correct template.
+ write_file("$target_gen_dir/template_config/${files.family}_hal_conf.h",
+ "#include \"${files.family}_hal_conf_template.h\"")
+ config("hal_config_template_includes") {
+ include_dirs = [ "$target_gen_dir/template_config" ]
+ }
+ pw_source_set("hal_config_template") {
+ public_configs = [ ":hal_config_template_includes" ]
- # This is to make sure GN properly detects changes to these files. The
- # generated file shouldn't change, but the file it redirects to might.
- public = [ "$target_gen_dir/template_config/${files.family}_hal_conf.h" ]
- inputs = [ "$dir_pw_third_party_stm32cube/hal_driver/Inc/${files.family}_hal_conf_template.h" ]
-}
+ # This is to make sure GN properly detects changes to these files. The
+ # generated file shouldn't change, but the file it redirects to might.
+ public = [ "$target_gen_dir/template_config/${files.family}_hal_conf.h" ]
+ inputs = [ "$dir_pw_third_party_stm32cube/hal_driver/Inc/${files.family}_hal_conf_template.h" ]
+ }
-config("flags") {
- cflags = [ "-Wno-unused-parameter" ]
- cflags_c = [
- "-Wno-redundant-decls",
- "-Wno-sign-compare",
- "-Wno-old-style-declaration",
- "-Wno-maybe-uninitialized",
- "-Wno-undef",
- "-Wno-implicit-function-declaration",
- ]
- defines = [
- "USE_HAL_DRIVER",
- files.product_define,
- "STM32CUBE_HEADER=\"${files.family}.h\"",
- "__ARMCC_VERSION=0", # workaround for bug at stm32l552xx.h:1303
- ]
- visibility = [ ":*" ]
-}
+ config("flags") {
+ cflags = [ "-Wno-unused-parameter" ]
+ cflags_c = [
+ "-Wno-redundant-decls",
+ "-Wno-sign-compare",
+ "-Wno-old-style-declaration",
+ "-Wno-maybe-uninitialized",
+ "-Wno-undef",
+ "-Wno-implicit-function-declaration",
+ ]
+ defines = [
+ "USE_HAL_DRIVER",
+ files.product_define,
+ "STM32CUBE_HEADER=\"${files.family}.h\"",
+ "__ARMCC_VERSION=0", # workaround for bug at stm32l552xx.h:1303
+ ]
+ visibility = [ ":*" ]
+ }
-config("public_include_paths") {
- include_dirs = files.include_dirs
- include_dirs += [ "public" ]
- visibility = [ ":*" ]
-}
+ config("public_include_paths") {
+ include_dirs = files.include_dirs
+ include_dirs += [ "public" ]
+ visibility = [ ":*" ]
+ }
-# Only libraries that implement parts of the stm32cube hal should depend on
-# this. If you just want to depend on the hal, depend on stm32cube directly.
-pw_source_set("stm32cube_headers") {
- public_configs = [
- ":flags",
- ":public_include_paths",
- ]
- public = [
- "public/stm32cube/init.h",
- "public/stm32cube/stm32cube.h",
- ]
- public += files.headers
- public_deps = [ pw_third_party_stm32cube_CONFIG ]
- visibility = [ ":*" ]
-}
+ # Only libraries that implement parts of the stm32cube hal should depend on
+ # this. If you just want to depend on the hal, depend on stm32cube directly.
+ pw_source_set("stm32cube_headers") {
+ public_configs = [
+ ":flags",
+ ":public_include_paths",
+ ]
+ public = [
+ "public/stm32cube/init.h",
+ "public/stm32cube/stm32cube.h",
+ ]
+ public += files.headers
+ public_deps = [ pw_third_party_stm32cube_CONFIG ]
+ visibility = [ ":*" ]
+ if (pw_third_party_stm32cube_CORE_INIT != "") {
+ visibility += [ pw_third_party_stm32cube_CORE_INIT ]
+ }
+ }
-pw_source_set("stm32cube") {
- public_deps = [ ":stm32cube_headers" ]
- sources = files.sources
- deps = [
- pw_third_party_stm32cube_CMSIS_INIT,
- pw_third_party_stm32cube_TIMEBASE,
- ]
- if (pw_third_party_stm32cube_CORE_INIT != "") {
- deps += [ pw_third_party_stm32cube_CORE_INIT ]
+ pw_source_set("stm32cube") {
+ public_deps = [ ":stm32cube_headers" ]
+ sources = files.sources
+ deps = [
+ pw_third_party_stm32cube_CMSIS_INIT,
+ pw_third_party_stm32cube_TIMEBASE,
+ ]
+ if (pw_third_party_stm32cube_CORE_INIT != "") {
+ deps += [ pw_third_party_stm32cube_CORE_INIT ]
+ }
}
}
diff --git a/zephyr/OWNERS b/zephyr/OWNERS
new file mode 100644
index 000000000..2230ec264
--- /dev/null
+++ b/zephyr/OWNERS
@@ -0,0 +1 @@
+peress@google.com